daniel-dona commited on
Commit
0787b7f
·
1 Parent(s): b34d81a
Files changed (1) hide show
  1. app.py +18 -6
app.py CHANGED
@@ -44,21 +44,30 @@ def submit_query(msg):
44
  )
45
 
46
  msg = response.message.content
 
 
 
 
 
 
 
 
 
47
 
48
  try:
49
 
50
  json_data_test = json.loads(msg.split("```json")[1].split("```")[0])
51
 
52
- return json.dumps(json_data_test, indent=2)
53
 
54
  except:
55
- return "{}"
56
 
57
  except:
58
 
59
  raise gradio.Error("Error: LLM request timed out!", duration=5)
60
 
61
- return "{}"
62
 
63
 
64
  test_dataset = [
@@ -114,8 +123,8 @@ with gradio.Blocks() as demo:
114
 
115
  query = gradio.Textbox(render=False, label="Query", placeholder="Write a query and press Enter.", submit_btn="Send query")
116
 
117
-
118
  code_llm = gradio.Code(render=False, label="LLM output", interactive=False, language="json")
 
119
  code_ref = gradio.Code(render=False, label="Expected output", interactive=False, language="json")
120
 
121
  #chat = gradio.Chatbot(render=False, value=history, label="LLM output", type="messages")
@@ -137,9 +146,12 @@ with gradio.Blocks() as demo:
137
 
138
  with gradio.Row():
139
  code_llm.render()
 
 
 
140
 
141
- query.submit(submit_query, inputs=[query], outputs=[code_llm])
142
  #query.change(submit_query, inputs=[query], outputs=[code_llm])
143
- query.change(clean_output, inputs=[], outputs=[code_llm]).then(submit_query, inputs=[query], outputs=[code_llm])
144
 
145
  demo.launch()
 
44
  )
45
 
46
  msg = response.message.content
47
+
48
+ debug = {
49
+ k:v for k,v in response.dict().items()
50
+ }
51
+
52
+ debug["eval_t/s"] = debug["eval_count"]/(debug["eval_duration"]/1_000_000_000)
53
+ debug["prompt_eval_t/s"] = debug["prompt_eval_count"]/(debug["prompt_eval_duration"]/1_000_000_000)
54
+
55
+ print(response)
56
 
57
  try:
58
 
59
  json_data_test = json.loads(msg.split("```json")[1].split("```")[0])
60
 
61
+ return [json.dumps(json_data_test, indent=2), json.dumps(debug, indent=2)]
62
 
63
  except:
64
+ return [json.dumps({}, indent=2), json.dumps({"error": "Unable to process result."}, indent=2)]
65
 
66
  except:
67
 
68
  raise gradio.Error("Error: LLM request timed out!", duration=5)
69
 
70
+ return [json.dumps({}, indent=2),json.dumps({"error": "No query"}, indent=2)]
71
 
72
 
73
  test_dataset = [
 
123
 
124
  query = gradio.Textbox(render=False, label="Query", placeholder="Write a query and press Enter.", submit_btn="Send query")
125
 
 
126
  code_llm = gradio.Code(render=False, label="LLM output", interactive=False, language="json")
127
+ metadata_llm = gradio.Code(render=False, label="LLM request metadata", interactive=False, language="json")
128
  code_ref = gradio.Code(render=False, label="Expected output", interactive=False, language="json")
129
 
130
  #chat = gradio.Chatbot(render=False, value=history, label="LLM output", type="messages")
 
146
 
147
  with gradio.Row():
148
  code_llm.render()
149
+
150
+ with gradio.Row():
151
+ metadata_llm.render()
152
 
153
+ query.submit(submit_query, inputs=[query], outputs=[code_llm, metadata_llm])
154
  #query.change(submit_query, inputs=[query], outputs=[code_llm])
155
+ query.change(clean_output, inputs=[], outputs=[code_llm]).then(submit_query, inputs=[query], outputs=[code_llm, metadata_llm])
156
 
157
  demo.launch()