openfree commited on
Commit
f5c4480
Β·
verified Β·
1 Parent(s): ef60744

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +80 -66
app.py CHANGED
@@ -6,7 +6,7 @@ import spaces
6
  import transformers
7
  from transformers import pipeline
8
 
9
- # λͺ¨λΈκ³Ό ν† ν¬λ‚˜μ΄μ € λ‘œλ”©
10
  model_name = "meta-llama/Llama-3.1-8B-Instruct"
11
  if gr.NO_RELOAD:
12
  pipe = pipeline(
@@ -16,33 +16,33 @@ if gr.NO_RELOAD:
16
  torch_dtype="auto",
17
  )
18
 
19
- # μ΅œμ’… 닡변을 κ°μ§€ν•˜κΈ° μœ„ν•œ 마컀
20
- ANSWER_MARKER = "**λ‹΅λ³€**"
21
 
22
- # 단계별 좔둠을 μ‹œμž‘ν•˜λŠ” λ¬Έμž₯λ“€
23
  rethink_prepends = [
24
- "자, 이제 λ‹€μŒμ„ νŒŒμ•…ν•΄μ•Ό ν•©λ‹ˆλ‹€ ",
25
- "제 μƒκ°μ—λŠ” ",
26
- "λ‹€μŒ 사항이 λ§žλŠ”μ§€ 확인해 λ³΄κ² μŠ΅λ‹ˆλ‹€ ",
27
- "λ˜ν•œ κΈ°μ–΅ν•΄μ•Ό ν•  것은 ",
28
- "또 λ‹€λ₯Έ μ£Όλͺ©ν•  점은 ",
29
- "그리고 μ €λŠ” λ‹€μŒκ³Ό 같은 사싀도 κΈ°μ–΅ν•©λ‹ˆλ‹€ ",
30
- "이제 μΆ©λΆ„νžˆ μ΄ν•΄ν–ˆλ‹€κ³  μƒκ°ν•©λ‹ˆλ‹€ ",
31
  ]
32
 
33
- # μ΅œμ’… λ‹΅λ³€ 생성을 μœ„ν•œ ν”„λ‘¬ν”„νŠΈ μΆ”κ°€
34
  final_answer_prompt = """
35
- μ§€κΈˆκΉŒμ§€μ˜ μΆ”λ‘  과정을 λ°”νƒ•μœΌλ‘œ, μ›λž˜ μ§ˆλ¬Έμ— μ‚¬μš©λœ μ–Έμ–΄λ‘œ λ‹΅λ³€ν•˜κ² μŠ΅λ‹ˆλ‹€:
36
  {question}
37
 
38
- μ•„λž˜λŠ” λ‚΄κ°€ μΆ”λ‘ ν•œ κ²°λ‘ μž…λ‹ˆλ‹€:
39
  {reasoning_conclusion}
40
 
41
- μœ„ 좔둠을 기반으둜 μ΅œμ’… λ‹΅λ³€:
42
  {ANSWER_MARKER}
43
  """
44
 
45
- # μˆ˜μ‹ ν‘œμ‹œ 문제 해결을 μœ„ν•œ μ„€μ •
46
  latex_delimiters = [
47
  {"left": "$$", "right": "$$", "display": True},
48
  {"left": "$", "right": "$", "display": False},
@@ -50,9 +50,9 @@ latex_delimiters = [
50
 
51
 
52
  def reformat_math(text):
53
- """Gradio ꡬ문(Katex)을 μ‚¬μš©ν•˜λ„λ‘ MathJax ꡬ뢄 기호 μˆ˜μ •.
54
- 이것은 Gradioμ—μ„œ μˆ˜ν•™ 곡식을 ν‘œμ‹œν•˜κΈ° μœ„ν•œ μž„μ‹œ ν•΄κ²°μ±…μž…λ‹ˆλ‹€. ν˜„μž¬λ‘œμ„œλŠ”
55
- λ‹€λ₯Έ latex_delimitersλ₯Ό μ‚¬μš©ν•˜μ—¬ μ˜ˆμƒλŒ€λ‘œ μž‘λ™ν•˜κ²Œ ν•˜λŠ” 방법을 μ°Ύμ§€ λͺ»ν–ˆμŠ΅λ‹ˆλ‹€...
56
  """
57
  text = re.sub(r"\\\[\s*(.*?)\s*\\\]", r"$$\1$$", text, flags=re.DOTALL)
58
  text = re.sub(r"\\\(\s*(.*?)\s*\\\)", r"$\1$", text, flags=re.DOTALL)
@@ -60,7 +60,7 @@ def reformat_math(text):
60
 
61
 
62
  def user_input(message, history_original, history_thinking):
63
- """μ‚¬μš©μž μž…λ ₯을 νžˆμŠ€ν† λ¦¬μ— μΆ”κ°€ν•˜κ³  μž…λ ₯ ν…μŠ€νŠΈ μƒμž λΉ„μš°κΈ°"""
64
  return "", history_original + [
65
  gr.ChatMessage(role="user", content=message.replace(ANSWER_MARKER, ""))
66
  ], history_thinking + [
@@ -69,7 +69,7 @@ def user_input(message, history_original, history_thinking):
69
 
70
 
71
  def rebuild_messages(history: list):
72
- """쀑간 생각 κ³Όμ • 없이 λͺ¨λΈμ΄ μ‚¬μš©ν•  νžˆμŠ€ν† λ¦¬μ—μ„œ λ©”μ‹œμ§€ μž¬κ΅¬μ„±"""
73
  messages = []
74
  for h in history:
75
  if isinstance(h, dict) and not h.get("metadata", {}).get("title", False):
@@ -90,16 +90,16 @@ def bot_original(
90
  do_sample: bool,
91
  temperature: float,
92
  ):
93
- """원본 λͺ¨λΈμ΄ μ§ˆλ¬Έμ— λ‹΅λ³€ν•˜λ„λ‘ ν•˜κΈ° (μΆ”λ‘  κ³Όμ • 없이)"""
94
 
95
- # λ‚˜μ€‘μ— μŠ€λ ˆλ“œμ—μ„œ 토큰을 슀트림으둜 κ°€μ Έμ˜€κΈ° μœ„ν•¨
96
  streamer = transformers.TextIteratorStreamer(
97
  pipe.tokenizer, # pyright: ignore
98
  skip_special_tokens=True,
99
  skip_prompt=True,
100
  )
101
 
102
- # 보쑰자 λ©”μ‹œμ§€ μ€€λΉ„
103
  history.append(
104
  gr.ChatMessage(
105
  role="assistant",
@@ -107,10 +107,10 @@ def bot_original(
107
  )
108
  )
109
 
110
- # ν˜„μž¬ μ±„νŒ…μ— ν‘œμ‹œλ  λ©”μ‹œμ§€
111
- messages = rebuild_messages(history[:-1]) # λ§ˆμ§€λ§‰ 빈 λ©”μ‹œμ§€ μ œμ™Έ
112
 
113
- # 원본 λͺ¨λΈμ€ μΆ”λ‘  없이 λ°”λ‘œ λ‹΅λ³€
114
  t = threading.Thread(
115
  target=pipe,
116
  args=(messages,),
@@ -140,34 +140,34 @@ def bot_thinking(
140
  do_sample: bool,
141
  temperature: float,
142
  ):
143
- """μΆ”λ‘  과정을 ν¬ν•¨ν•˜μ—¬ λͺ¨λΈμ΄ μ§ˆλ¬Έμ— λ‹΅λ³€ν•˜λ„λ‘ ν•˜κΈ°"""
144
 
145
- # λ‚˜μ€‘μ— μŠ€λ ˆλ“œμ—μ„œ 토큰을 슀트림으둜 ��져였기 μœ„ν•¨
146
  streamer = transformers.TextIteratorStreamer(
147
  pipe.tokenizer, # pyright: ignore
148
  skip_special_tokens=True,
149
  skip_prompt=True,
150
  )
151
 
152
- # ν•„μš”ν•œ 경우 좔둠에 μ§ˆλ¬Έμ„ λ‹€μ‹œ μ‚½μž…ν•˜κΈ° μœ„ν•¨
153
  question = history[-1]["content"]
154
 
155
- # 보쑰자 λ©”μ‹œμ§€ μ€€λΉ„
156
  history.append(
157
  gr.ChatMessage(
158
  role="assistant",
159
  content=str(""),
160
- metadata={"title": "🧠 생각 쀑...", "status": "pending"},
161
  )
162
  )
163
 
164
- # ν˜„μž¬ μ±„νŒ…μ— ν‘œμ‹œλ  μΆ”λ‘  κ³Όμ •
165
  messages = rebuild_messages(history)
166
 
167
- # 전체 μΆ”λ‘  과정을 μ €μž₯ν•  λ³€μˆ˜
168
  full_reasoning = ""
169
 
170
- # μΆ”λ‘  단계 μ‹€ν–‰
171
  for i, prepend in enumerate(rethink_prepends):
172
  if i > 0:
173
  messages[-1]["content"] += "\n\n"
@@ -185,7 +185,7 @@ def bot_thinking(
185
  )
186
  t.start()
187
 
188
- # μƒˆ λ‚΄μš©μœΌλ‘œ νžˆμŠ€ν† λ¦¬ μž¬κ΅¬μ„±
189
  history[-1].content += prepend.format(question=question)
190
  for token in streamer:
191
  history[-1].content += token
@@ -193,21 +193,21 @@ def bot_thinking(
193
  yield history
194
  t.join()
195
 
196
- # 각 μΆ”λ‘  λ‹¨κ³„μ˜ κ²°κ³Όλ₯Ό full_reasoning에 μ €μž₯
197
  full_reasoning = history[-1].content
198
 
199
- # μΆ”λ‘  μ™„λ£Œ, 이제 μ΅œμ’… 닡변을 생성
200
- history[-1].metadata = {"title": "πŸ’­ 사고 κ³Όμ •", "status": "done"}
201
 
202
- # μΆ”λ‘  κ³Όμ •μ—μ„œ κ²°λ‘  뢀뢄을 μΆ”μΆœ (λ§ˆμ§€λ§‰ 1-2 문단 정도)
203
  reasoning_parts = full_reasoning.split("\n\n")
204
  reasoning_conclusion = "\n\n".join(reasoning_parts[-2:]) if len(reasoning_parts) > 2 else full_reasoning
205
 
206
- # μ΅œμ’… λ‹΅λ³€ λ©”μ‹œμ§€ μΆ”κ°€
207
  history.append(gr.ChatMessage(role="assistant", content=""))
208
 
209
- # μ΅œμ’… 닡변을 μœ„ν•œ λ©”μ‹œμ§€ ꡬ성
210
- final_messages = rebuild_messages(history[:-1]) # λ§ˆμ§€λ§‰ 빈 λ©”μ‹œμ§€ μ œμ™Έ
211
  final_prompt = final_answer_prompt.format(
212
  question=question,
213
  reasoning_conclusion=reasoning_conclusion,
@@ -215,7 +215,7 @@ def bot_thinking(
215
  )
216
  final_messages[-1]["content"] += final_prompt
217
 
218
- # μ΅œμ’… λ‹΅λ³€ 생성
219
  t = threading.Thread(
220
  target=pipe,
221
  args=(final_messages,),
@@ -228,7 +228,7 @@ def bot_thinking(
228
  )
229
  t.start()
230
 
231
- # μ΅œμ’… λ‹΅λ³€ 슀트리밍
232
  for token in streamer:
233
  history[-1].content += token
234
  history[-1].content = reformat_math(history[-1].content)
@@ -238,10 +238,21 @@ def bot_thinking(
238
  yield history
239
 
240
 
241
- with gr.Blocks(fill_height=True, title="Vidraft ThinkFlow") as demo:
242
- # 제λͺ©κ³Ό μ„€λͺ…
243
- gr.Markdown("# Vidraft ThinkFlow")
244
- gr.Markdown("### μΆ”λ‘  κΈ°λŠ₯이 μ—†λŠ” LLM λͺ¨λΈμ˜ μˆ˜μ • 없이도 μΆ”λ‘  κΈ°λŠ₯을 μžλ™μœΌλ‘œ μ μš©ν•˜λŠ” LLM μΆ”λ‘  생성 ν”Œλž«νΌ")
 
 
 
 
 
 
 
 
 
 
 
245
 
246
  with gr.Row(scale=1):
247
  with gr.Column(scale=2):
@@ -263,36 +274,36 @@ with gr.Blocks(fill_height=True, title="Vidraft ThinkFlow") as demo:
263
  )
264
 
265
  with gr.Row():
266
- # msg ν…μŠ€νŠΈλ°•μŠ€λ₯Ό λ¨Όμ € μ •μ˜
267
  msg = gr.Textbox(
268
  submit_btn=True,
269
  label="",
270
  show_label=False,
271
- placeholder="여기에 μ§ˆλ¬Έμ„ μž…λ ₯ν•˜μ„Έμš”.",
272
  autofocus=True,
273
  )
274
 
275
- # 예제 μ„Ήμ…˜ - msg λ³€μˆ˜ μ •μ˜ 이후에 배치
276
  with gr.Accordion("EXAMPLES", open=False):
277
  examples = gr.Examples(
278
  examples=[
279
- "[좜처: MATH-500)] 처음 100개의 μ–‘μ˜ μ •μˆ˜ μ€‘μ—μ„œ 3, 4, 5둜 λ‚˜λˆ„μ–΄ λ–¨μ–΄μ§€λŠ” μˆ˜λŠ” λͺ‡ κ°œμž…λ‹ˆκΉŒ?",
280
- "[좜처: MATH-500)] μž‰ν¬μ˜ λ•…μ—μ„œ 돈 μ‹œμŠ€ν…œμ€ λ…νŠΉν•©λ‹ˆλ‹€. νŠΈλ§ν‚· 1κ°œλŠ” 블링킷 4κ°œμ™€ κ°™κ³ , 블링킷 3κ°œλŠ” λ“œλ§ν¬ 7κ°œμ™€ κ°™μŠ΅λ‹ˆλ‹€. νŠΈλ§ν‚·μ—μ„œ λ“œλ§ν¬ 56개의 κ°€μΉ˜λŠ” μ–Όλ§ˆμž…λ‹ˆκΉŒ?",
281
- "[좜처: MATH-500)] 에이미, λ²€, 크리슀의 평균 λ‚˜μ΄λŠ” 6μ‚΄μž…λ‹ˆλ‹€. 4λ…„ μ „ ν¬λ¦¬μŠ€λŠ” μ§€κΈˆ 에이미와 같은 λ‚˜μ΄μ˜€μŠ΅λ‹ˆλ‹€. 4λ…„ ν›„ 벀의 λ‚˜μ΄λŠ” κ·Έλ•Œ μ—μ΄λ―Έμ˜ λ‚˜μ΄μ˜ $\\frac{3}{5}$κ°€ 될 κ²ƒμž…λ‹ˆλ‹€. ν¬λ¦¬μŠ€λŠ” μ§€κΈˆ λͺ‡ μ‚΄μž…λ‹ˆκΉŒ?",
282
- "[좜처: MATH-500)] λ…Έλž€μƒ‰κ³Ό νŒŒλž€μƒ‰ ꡬ슬이 λ“€μ–΄ μžˆλŠ” 가방이 μžˆμŠ΅λ‹ˆλ‹€. ν˜„μž¬ νŒŒλž€μƒ‰ ꡬ슬과 λ…Έλž€μƒ‰ ꡬ슬의 λΉ„μœ¨μ€ 4:3μž…λ‹ˆλ‹€. νŒŒλž€μƒ‰ ꡬ슬 5개λ₯Ό λ”ν•˜κ³  λ…Έλž€μƒ‰ ꡬ슬 3개λ₯Ό μ œκ±°ν•˜λ©΄ λΉ„μœ¨μ€ 7:3이 λ©λ‹ˆλ‹€. 더 λ„£κΈ° 전에 가방에 νŒŒλž€μƒ‰ ꡬ슬이 λͺ‡ 개 μžˆμ—ˆμŠ΅λ‹ˆκΉŒ?"
283
  ],
284
  inputs=msg
285
  )
286
 
287
  with gr.Row():
288
  with gr.Column():
289
- gr.Markdown("""## λ§€κ°œλ³€μˆ˜ μ‘°μ •""")
290
  num_tokens = gr.Slider(
291
  50,
292
  4000,
293
  2000,
294
  step=1,
295
- label="μΆ”λ‘  단계당 μ΅œλŒ€ 토큰 수",
296
  interactive=True,
297
  )
298
  final_num_tokens = gr.Slider(
@@ -300,17 +311,20 @@ with gr.Blocks(fill_height=True, title="Vidraft ThinkFlow") as demo:
300
  4000,
301
  2000,
302
  step=1,
303
- label="μ΅œμ’… λ‹΅λ³€μ˜ μ΅œλŒ€ 토큰 수",
304
  interactive=True,
305
  )
306
- do_sample = gr.Checkbox(True, label="μƒ˜ν”Œλ§ μ‚¬μš©")
307
- temperature = gr.Slider(0.1, 1.0, 0.7, step=0.1, label="μ˜¨λ„")
 
 
 
308
 
309
- # μ‚¬μš©μžκ°€ λ©”μ‹œμ§€λ₯Ό μ œμΆœν•˜λ©΄ 두 봇이 λ™μ‹œμ— μ‘λ‹΅ν•©λ‹ˆλ‹€
310
  msg.submit(
311
  user_input,
312
- [msg, chatbot_original, chatbot_thinking], # μž…λ ₯
313
- [msg, chatbot_original, chatbot_thinking], # 좜λ ₯
314
  ).then(
315
  bot_original,
316
  [
@@ -319,7 +333,7 @@ with gr.Blocks(fill_height=True, title="Vidraft ThinkFlow") as demo:
319
  do_sample,
320
  temperature,
321
  ],
322
- chatbot_original, # 좜λ ₯μ—μ„œ μƒˆ νžˆμŠ€ν† λ¦¬ μ €μž₯
323
  ).then(
324
  bot_thinking,
325
  [
@@ -329,7 +343,7 @@ with gr.Blocks(fill_height=True, title="Vidraft ThinkFlow") as demo:
329
  do_sample,
330
  temperature,
331
  ],
332
- chatbot_thinking, # 좜λ ₯μ—μ„œ μƒˆ νžˆμŠ€ν† λ¦¬ μ €μž₯
333
  )
334
 
335
  if __name__ == "__main__":
 
6
  import transformers
7
  from transformers import pipeline
8
 
9
+ # Loading model and tokenizer
10
  model_name = "meta-llama/Llama-3.1-8B-Instruct"
11
  if gr.NO_RELOAD:
12
  pipe = pipeline(
 
16
  torch_dtype="auto",
17
  )
18
 
19
+ # Marker for detecting final answer
20
+ ANSWER_MARKER = "**Answer**"
21
 
22
+ # Sentences to start step-by-step reasoning
23
  rethink_prepends = [
24
+ "Now, I need to understand the following ",
25
+ "In my opinion ",
26
+ "Let me verify if the following is correct ",
27
+ "Also, I should remember that ",
28
+ "Another point to note is ",
29
+ "And I also remember the following fact ",
30
+ "Now I think I understand sufficiently ",
31
  ]
32
 
33
+ # Prompt addition for generating final answer
34
  final_answer_prompt = """
35
+ Based on my reasoning process so far, I will answer the original question in the language it was asked:
36
  {question}
37
 
38
+ Here is the conclusion I've reasoned:
39
  {reasoning_conclusion}
40
 
41
+ Based on the above reasoning, my final answer:
42
  {ANSWER_MARKER}
43
  """
44
 
45
+ # Settings for displaying formulas
46
  latex_delimiters = [
47
  {"left": "$$", "right": "$$", "display": True},
48
  {"left": "$", "right": "$", "display": False},
 
50
 
51
 
52
  def reformat_math(text):
53
+ """Modify MathJax delimiters to use Gradio syntax (Katex).
54
+ This is a temporary fix for displaying math formulas in Gradio. Currently,
55
+ I haven't found a way to make it work as expected with other latex_delimiters...
56
  """
57
  text = re.sub(r"\\\[\s*(.*?)\s*\\\]", r"$$\1$$", text, flags=re.DOTALL)
58
  text = re.sub(r"\\\(\s*(.*?)\s*\\\)", r"$\1$", text, flags=re.DOTALL)
 
60
 
61
 
62
  def user_input(message, history_original, history_thinking):
63
+ """Add user input to history and clear input text box"""
64
  return "", history_original + [
65
  gr.ChatMessage(role="user", content=message.replace(ANSWER_MARKER, ""))
66
  ], history_thinking + [
 
69
 
70
 
71
  def rebuild_messages(history: list):
72
+ """Reconstruct messages from history for model use without intermediate thinking process"""
73
  messages = []
74
  for h in history:
75
  if isinstance(h, dict) and not h.get("metadata", {}).get("title", False):
 
90
  do_sample: bool,
91
  temperature: float,
92
  ):
93
+ """Make the original model answer questions (without reasoning process)"""
94
 
95
+ # For streaming tokens from thread later
96
  streamer = transformers.TextIteratorStreamer(
97
  pipe.tokenizer, # pyright: ignore
98
  skip_special_tokens=True,
99
  skip_prompt=True,
100
  )
101
 
102
+ # Prepare assistant message
103
  history.append(
104
  gr.ChatMessage(
105
  role="assistant",
 
107
  )
108
  )
109
 
110
+ # Messages to be displayed in current chat
111
+ messages = rebuild_messages(history[:-1]) # Excluding last empty message
112
 
113
+ # Original model answers directly without reasoning
114
  t = threading.Thread(
115
  target=pipe,
116
  args=(messages,),
 
140
  do_sample: bool,
141
  temperature: float,
142
  ):
143
+ """Make the model answer questions with reasoning process"""
144
 
145
+ # For streaming tokens from thread later
146
  streamer = transformers.TextIteratorStreamer(
147
  pipe.tokenizer, # pyright: ignore
148
  skip_special_tokens=True,
149
  skip_prompt=True,
150
  )
151
 
152
+ # For reinserting the question into reasoning if needed
153
  question = history[-1]["content"]
154
 
155
+ # Prepare assistant message
156
  history.append(
157
  gr.ChatMessage(
158
  role="assistant",
159
  content=str(""),
160
+ metadata={"title": "🧠 Thinking...", "status": "pending"},
161
  )
162
  )
163
 
164
+ # Reasoning process to be displayed in current chat
165
  messages = rebuild_messages(history)
166
 
167
+ # Variable to store the entire reasoning process
168
  full_reasoning = ""
169
 
170
+ # Run reasoning steps
171
  for i, prepend in enumerate(rethink_prepends):
172
  if i > 0:
173
  messages[-1]["content"] += "\n\n"
 
185
  )
186
  t.start()
187
 
188
+ # Reconstruct history with new content
189
  history[-1].content += prepend.format(question=question)
190
  for token in streamer:
191
  history[-1].content += token
 
193
  yield history
194
  t.join()
195
 
196
+ # Save the result of each reasoning step to full_reasoning
197
  full_reasoning = history[-1].content
198
 
199
+ # Reasoning complete, now generate final answer
200
+ history[-1].metadata = {"title": "πŸ’­ Thought Process", "status": "done"}
201
 
202
+ # Extract conclusion part from reasoning process (approximately last 1-2 paragraphs)
203
  reasoning_parts = full_reasoning.split("\n\n")
204
  reasoning_conclusion = "\n\n".join(reasoning_parts[-2:]) if len(reasoning_parts) > 2 else full_reasoning
205
 
206
+ # Add final answer message
207
  history.append(gr.ChatMessage(role="assistant", content=""))
208
 
209
+ # Construct message for final answer
210
+ final_messages = rebuild_messages(history[:-1]) # Excluding last empty message
211
  final_prompt = final_answer_prompt.format(
212
  question=question,
213
  reasoning_conclusion=reasoning_conclusion,
 
215
  )
216
  final_messages[-1]["content"] += final_prompt
217
 
218
+ # Generate final answer
219
  t = threading.Thread(
220
  target=pipe,
221
  args=(final_messages,),
 
228
  )
229
  t.start()
230
 
231
+ # Stream final answer
232
  for token in streamer:
233
  history[-1].content += token
234
  history[-1].content = reformat_math(history[-1].content)
 
238
  yield history
239
 
240
 
241
+ with gr.Blocks(fill_height=True, title="ThinkFlow") as demo:
242
+ # Title and description
243
+ gr.Markdown("# ThinkFlow")
244
+ gr.Markdown("### An LLM reasoning generation platform that automatically applies reasoning capabilities to LLM models without modification")
245
+
246
+ # Features and benefits section
247
+ with gr.Accordion("✨ Features & Benefits", open=True):
248
+ gr.Markdown("""
249
+ - **Enhanced Reasoning**: Transform any LLM into a step-by-step reasoning engine without model modifications
250
+ - **Transparency**: Visualize the model's thought process alongside direct answers
251
+ - **Improved Accuracy**: See how guided reasoning leads to more accurate solutions for complex problems
252
+ - **Educational Tool**: Perfect for teaching critical thinking and problem-solving approaches
253
+ - **Versatile Application**: Works with mathematical problems, logical puzzles, and complex questions
254
+ - **Side-by-Side Comparison**: Compare standard model responses with reasoning-enhanced outputs
255
+ """)
256
 
257
  with gr.Row(scale=1):
258
  with gr.Column(scale=2):
 
274
  )
275
 
276
  with gr.Row():
277
+ # Define msg textbox first
278
  msg = gr.Textbox(
279
  submit_btn=True,
280
  label="",
281
  show_label=False,
282
+ placeholder="Enter your question here.",
283
  autofocus=True,
284
  )
285
 
286
+ # Examples section - placed after msg variable definition
287
  with gr.Accordion("EXAMPLES", open=False):
288
  examples = gr.Examples(
289
  examples=[
290
+ "[Source: MATH-500)] How many numbers among the first 100 positive integers are divisible by 3, 4, and 5?",
291
+ "[Source: MATH-500)] In the land of Ink, the money system is unique. 1 trinket equals 4 blinkets, and 3 blinkets equal 7 drinkits. What is the value of 56 drinkits in trinkets?",
292
+ "[Source: MATH-500)] The average age of Amy, Ben, and Chris is 6 years. Four years ago, Chris was the same age as Amy is now. Four years from now, Ben's age will be $\\frac{3}{5}$ of Amy's age at that time. How old is Chris now?",
293
+ "[Source: MATH-500)] A bag contains yellow and blue marbles. Currently, the ratio of blue marbles to yellow marbles is 4:3. After adding 5 blue marbles and removing 3 yellow marbles, the ratio becomes 7:3. How many blue marbles were in the bag before any were added?"
294
  ],
295
  inputs=msg
296
  )
297
 
298
  with gr.Row():
299
  with gr.Column():
300
+ gr.Markdown("""## Parameter Adjustment""")
301
  num_tokens = gr.Slider(
302
  50,
303
  4000,
304
  2000,
305
  step=1,
306
+ label="Maximum tokens per reasoning step",
307
  interactive=True,
308
  )
309
  final_num_tokens = gr.Slider(
 
311
  4000,
312
  2000,
313
  step=1,
314
+ label="Maximum tokens for final answer",
315
  interactive=True,
316
  )
317
+ do_sample = gr.Checkbox(True, label="Use sampling")
318
+ temperature = gr.Slider(0.1, 1.0, 0.7, step=0.1, label="Temperature")
319
+
320
+ # Community link at the bottom
321
+ gr.Markdown("<p style='font-size: 12px;'>Community: <a href='https://discord.gg/openfreeai' target='_blank'>https://discord.gg/openfreeai</a></p>")
322
 
323
+ # When user submits a message, both bots respond simultaneously
324
  msg.submit(
325
  user_input,
326
+ [msg, chatbot_original, chatbot_thinking], # inputs
327
+ [msg, chatbot_original, chatbot_thinking], # outputs
328
  ).then(
329
  bot_original,
330
  [
 
333
  do_sample,
334
  temperature,
335
  ],
336
+ chatbot_original, # save new history in outputs
337
  ).then(
338
  bot_thinking,
339
  [
 
343
  do_sample,
344
  temperature,
345
  ],
346
+ chatbot_thinking, # save new history in outputs
347
  )
348
 
349
  if __name__ == "__main__":