Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -6,7 +6,7 @@ import spaces
|
|
6 |
import transformers
|
7 |
from transformers import pipeline
|
8 |
|
9 |
-
#
|
10 |
model_name = "meta-llama/Llama-3.1-8B-Instruct"
|
11 |
if gr.NO_RELOAD:
|
12 |
pipe = pipeline(
|
@@ -16,33 +16,33 @@ if gr.NO_RELOAD:
|
|
16 |
torch_dtype="auto",
|
17 |
)
|
18 |
|
19 |
-
#
|
20 |
-
ANSWER_MARKER = "
|
21 |
|
22 |
-
#
|
23 |
rethink_prepends = [
|
24 |
-
"
|
25 |
-
"
|
26 |
-
"
|
27 |
-
"
|
28 |
-
"
|
29 |
-
"
|
30 |
-
"
|
31 |
]
|
32 |
|
33 |
-
#
|
34 |
final_answer_prompt = """
|
35 |
-
|
36 |
{question}
|
37 |
|
38 |
-
|
39 |
{reasoning_conclusion}
|
40 |
|
41 |
-
|
42 |
{ANSWER_MARKER}
|
43 |
"""
|
44 |
|
45 |
-
#
|
46 |
latex_delimiters = [
|
47 |
{"left": "$$", "right": "$$", "display": True},
|
48 |
{"left": "$", "right": "$", "display": False},
|
@@ -50,9 +50,9 @@ latex_delimiters = [
|
|
50 |
|
51 |
|
52 |
def reformat_math(text):
|
53 |
-
"""
|
54 |
-
|
55 |
-
|
56 |
"""
|
57 |
text = re.sub(r"\\\[\s*(.*?)\s*\\\]", r"$$\1$$", text, flags=re.DOTALL)
|
58 |
text = re.sub(r"\\\(\s*(.*?)\s*\\\)", r"$\1$", text, flags=re.DOTALL)
|
@@ -60,7 +60,7 @@ def reformat_math(text):
|
|
60 |
|
61 |
|
62 |
def user_input(message, history_original, history_thinking):
|
63 |
-
"""
|
64 |
return "", history_original + [
|
65 |
gr.ChatMessage(role="user", content=message.replace(ANSWER_MARKER, ""))
|
66 |
], history_thinking + [
|
@@ -69,7 +69,7 @@ def user_input(message, history_original, history_thinking):
|
|
69 |
|
70 |
|
71 |
def rebuild_messages(history: list):
|
72 |
-
"""
|
73 |
messages = []
|
74 |
for h in history:
|
75 |
if isinstance(h, dict) and not h.get("metadata", {}).get("title", False):
|
@@ -90,16 +90,16 @@ def bot_original(
|
|
90 |
do_sample: bool,
|
91 |
temperature: float,
|
92 |
):
|
93 |
-
"""
|
94 |
|
95 |
-
#
|
96 |
streamer = transformers.TextIteratorStreamer(
|
97 |
pipe.tokenizer, # pyright: ignore
|
98 |
skip_special_tokens=True,
|
99 |
skip_prompt=True,
|
100 |
)
|
101 |
|
102 |
-
#
|
103 |
history.append(
|
104 |
gr.ChatMessage(
|
105 |
role="assistant",
|
@@ -107,10 +107,10 @@ def bot_original(
|
|
107 |
)
|
108 |
)
|
109 |
|
110 |
-
#
|
111 |
-
messages = rebuild_messages(history[:-1]) #
|
112 |
|
113 |
-
#
|
114 |
t = threading.Thread(
|
115 |
target=pipe,
|
116 |
args=(messages,),
|
@@ -140,34 +140,34 @@ def bot_thinking(
|
|
140 |
do_sample: bool,
|
141 |
temperature: float,
|
142 |
):
|
143 |
-
"""
|
144 |
|
145 |
-
#
|
146 |
streamer = transformers.TextIteratorStreamer(
|
147 |
pipe.tokenizer, # pyright: ignore
|
148 |
skip_special_tokens=True,
|
149 |
skip_prompt=True,
|
150 |
)
|
151 |
|
152 |
-
#
|
153 |
question = history[-1]["content"]
|
154 |
|
155 |
-
#
|
156 |
history.append(
|
157 |
gr.ChatMessage(
|
158 |
role="assistant",
|
159 |
content=str(""),
|
160 |
-
metadata={"title": "π§
|
161 |
)
|
162 |
)
|
163 |
|
164 |
-
#
|
165 |
messages = rebuild_messages(history)
|
166 |
|
167 |
-
#
|
168 |
full_reasoning = ""
|
169 |
|
170 |
-
#
|
171 |
for i, prepend in enumerate(rethink_prepends):
|
172 |
if i > 0:
|
173 |
messages[-1]["content"] += "\n\n"
|
@@ -185,7 +185,7 @@ def bot_thinking(
|
|
185 |
)
|
186 |
t.start()
|
187 |
|
188 |
-
#
|
189 |
history[-1].content += prepend.format(question=question)
|
190 |
for token in streamer:
|
191 |
history[-1].content += token
|
@@ -193,21 +193,21 @@ def bot_thinking(
|
|
193 |
yield history
|
194 |
t.join()
|
195 |
|
196 |
-
#
|
197 |
full_reasoning = history[-1].content
|
198 |
|
199 |
-
#
|
200 |
-
history[-1].metadata = {"title": "π
|
201 |
|
202 |
-
#
|
203 |
reasoning_parts = full_reasoning.split("\n\n")
|
204 |
reasoning_conclusion = "\n\n".join(reasoning_parts[-2:]) if len(reasoning_parts) > 2 else full_reasoning
|
205 |
|
206 |
-
#
|
207 |
history.append(gr.ChatMessage(role="assistant", content=""))
|
208 |
|
209 |
-
#
|
210 |
-
final_messages = rebuild_messages(history[:-1]) #
|
211 |
final_prompt = final_answer_prompt.format(
|
212 |
question=question,
|
213 |
reasoning_conclusion=reasoning_conclusion,
|
@@ -215,7 +215,7 @@ def bot_thinking(
|
|
215 |
)
|
216 |
final_messages[-1]["content"] += final_prompt
|
217 |
|
218 |
-
#
|
219 |
t = threading.Thread(
|
220 |
target=pipe,
|
221 |
args=(final_messages,),
|
@@ -228,7 +228,7 @@ def bot_thinking(
|
|
228 |
)
|
229 |
t.start()
|
230 |
|
231 |
-
#
|
232 |
for token in streamer:
|
233 |
history[-1].content += token
|
234 |
history[-1].content = reformat_math(history[-1].content)
|
@@ -238,10 +238,21 @@ def bot_thinking(
|
|
238 |
yield history
|
239 |
|
240 |
|
241 |
-
with gr.Blocks(fill_height=True, title="
|
242 |
-
#
|
243 |
-
gr.Markdown("#
|
244 |
-
gr.Markdown("###
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
245 |
|
246 |
with gr.Row(scale=1):
|
247 |
with gr.Column(scale=2):
|
@@ -263,36 +274,36 @@ with gr.Blocks(fill_height=True, title="Vidraft ThinkFlow") as demo:
|
|
263 |
)
|
264 |
|
265 |
with gr.Row():
|
266 |
-
# msg
|
267 |
msg = gr.Textbox(
|
268 |
submit_btn=True,
|
269 |
label="",
|
270 |
show_label=False,
|
271 |
-
placeholder="
|
272 |
autofocus=True,
|
273 |
)
|
274 |
|
275 |
-
#
|
276 |
with gr.Accordion("EXAMPLES", open=False):
|
277 |
examples = gr.Examples(
|
278 |
examples=[
|
279 |
-
"[
|
280 |
-
"[
|
281 |
-
"[
|
282 |
-
"[
|
283 |
],
|
284 |
inputs=msg
|
285 |
)
|
286 |
|
287 |
with gr.Row():
|
288 |
with gr.Column():
|
289 |
-
gr.Markdown("""##
|
290 |
num_tokens = gr.Slider(
|
291 |
50,
|
292 |
4000,
|
293 |
2000,
|
294 |
step=1,
|
295 |
-
label="
|
296 |
interactive=True,
|
297 |
)
|
298 |
final_num_tokens = gr.Slider(
|
@@ -300,17 +311,20 @@ with gr.Blocks(fill_height=True, title="Vidraft ThinkFlow") as demo:
|
|
300 |
4000,
|
301 |
2000,
|
302 |
step=1,
|
303 |
-
label="
|
304 |
interactive=True,
|
305 |
)
|
306 |
-
do_sample = gr.Checkbox(True, label="
|
307 |
-
temperature = gr.Slider(0.1, 1.0, 0.7, step=0.1, label="
|
|
|
|
|
|
|
308 |
|
309 |
-
#
|
310 |
msg.submit(
|
311 |
user_input,
|
312 |
-
[msg, chatbot_original, chatbot_thinking], #
|
313 |
-
[msg, chatbot_original, chatbot_thinking], #
|
314 |
).then(
|
315 |
bot_original,
|
316 |
[
|
@@ -319,7 +333,7 @@ with gr.Blocks(fill_height=True, title="Vidraft ThinkFlow") as demo:
|
|
319 |
do_sample,
|
320 |
temperature,
|
321 |
],
|
322 |
-
chatbot_original, #
|
323 |
).then(
|
324 |
bot_thinking,
|
325 |
[
|
@@ -329,7 +343,7 @@ with gr.Blocks(fill_height=True, title="Vidraft ThinkFlow") as demo:
|
|
329 |
do_sample,
|
330 |
temperature,
|
331 |
],
|
332 |
-
chatbot_thinking, #
|
333 |
)
|
334 |
|
335 |
if __name__ == "__main__":
|
|
|
6 |
import transformers
|
7 |
from transformers import pipeline
|
8 |
|
9 |
+
# Loading model and tokenizer
|
10 |
model_name = "meta-llama/Llama-3.1-8B-Instruct"
|
11 |
if gr.NO_RELOAD:
|
12 |
pipe = pipeline(
|
|
|
16 |
torch_dtype="auto",
|
17 |
)
|
18 |
|
19 |
+
# Marker for detecting final answer
|
20 |
+
ANSWER_MARKER = "**Answer**"
|
21 |
|
22 |
+
# Sentences to start step-by-step reasoning
|
23 |
rethink_prepends = [
|
24 |
+
"Now, I need to understand the following ",
|
25 |
+
"In my opinion ",
|
26 |
+
"Let me verify if the following is correct ",
|
27 |
+
"Also, I should remember that ",
|
28 |
+
"Another point to note is ",
|
29 |
+
"And I also remember the following fact ",
|
30 |
+
"Now I think I understand sufficiently ",
|
31 |
]
|
32 |
|
33 |
+
# Prompt addition for generating final answer
|
34 |
final_answer_prompt = """
|
35 |
+
Based on my reasoning process so far, I will answer the original question in the language it was asked:
|
36 |
{question}
|
37 |
|
38 |
+
Here is the conclusion I've reasoned:
|
39 |
{reasoning_conclusion}
|
40 |
|
41 |
+
Based on the above reasoning, my final answer:
|
42 |
{ANSWER_MARKER}
|
43 |
"""
|
44 |
|
45 |
+
# Settings for displaying formulas
|
46 |
latex_delimiters = [
|
47 |
{"left": "$$", "right": "$$", "display": True},
|
48 |
{"left": "$", "right": "$", "display": False},
|
|
|
50 |
|
51 |
|
52 |
def reformat_math(text):
|
53 |
+
"""Modify MathJax delimiters to use Gradio syntax (Katex).
|
54 |
+
This is a temporary fix for displaying math formulas in Gradio. Currently,
|
55 |
+
I haven't found a way to make it work as expected with other latex_delimiters...
|
56 |
"""
|
57 |
text = re.sub(r"\\\[\s*(.*?)\s*\\\]", r"$$\1$$", text, flags=re.DOTALL)
|
58 |
text = re.sub(r"\\\(\s*(.*?)\s*\\\)", r"$\1$", text, flags=re.DOTALL)
|
|
|
60 |
|
61 |
|
62 |
def user_input(message, history_original, history_thinking):
|
63 |
+
"""Add user input to history and clear input text box"""
|
64 |
return "", history_original + [
|
65 |
gr.ChatMessage(role="user", content=message.replace(ANSWER_MARKER, ""))
|
66 |
], history_thinking + [
|
|
|
69 |
|
70 |
|
71 |
def rebuild_messages(history: list):
|
72 |
+
"""Reconstruct messages from history for model use without intermediate thinking process"""
|
73 |
messages = []
|
74 |
for h in history:
|
75 |
if isinstance(h, dict) and not h.get("metadata", {}).get("title", False):
|
|
|
90 |
do_sample: bool,
|
91 |
temperature: float,
|
92 |
):
|
93 |
+
"""Make the original model answer questions (without reasoning process)"""
|
94 |
|
95 |
+
# For streaming tokens from thread later
|
96 |
streamer = transformers.TextIteratorStreamer(
|
97 |
pipe.tokenizer, # pyright: ignore
|
98 |
skip_special_tokens=True,
|
99 |
skip_prompt=True,
|
100 |
)
|
101 |
|
102 |
+
# Prepare assistant message
|
103 |
history.append(
|
104 |
gr.ChatMessage(
|
105 |
role="assistant",
|
|
|
107 |
)
|
108 |
)
|
109 |
|
110 |
+
# Messages to be displayed in current chat
|
111 |
+
messages = rebuild_messages(history[:-1]) # Excluding last empty message
|
112 |
|
113 |
+
# Original model answers directly without reasoning
|
114 |
t = threading.Thread(
|
115 |
target=pipe,
|
116 |
args=(messages,),
|
|
|
140 |
do_sample: bool,
|
141 |
temperature: float,
|
142 |
):
|
143 |
+
"""Make the model answer questions with reasoning process"""
|
144 |
|
145 |
+
# For streaming tokens from thread later
|
146 |
streamer = transformers.TextIteratorStreamer(
|
147 |
pipe.tokenizer, # pyright: ignore
|
148 |
skip_special_tokens=True,
|
149 |
skip_prompt=True,
|
150 |
)
|
151 |
|
152 |
+
# For reinserting the question into reasoning if needed
|
153 |
question = history[-1]["content"]
|
154 |
|
155 |
+
# Prepare assistant message
|
156 |
history.append(
|
157 |
gr.ChatMessage(
|
158 |
role="assistant",
|
159 |
content=str(""),
|
160 |
+
metadata={"title": "π§ Thinking...", "status": "pending"},
|
161 |
)
|
162 |
)
|
163 |
|
164 |
+
# Reasoning process to be displayed in current chat
|
165 |
messages = rebuild_messages(history)
|
166 |
|
167 |
+
# Variable to store the entire reasoning process
|
168 |
full_reasoning = ""
|
169 |
|
170 |
+
# Run reasoning steps
|
171 |
for i, prepend in enumerate(rethink_prepends):
|
172 |
if i > 0:
|
173 |
messages[-1]["content"] += "\n\n"
|
|
|
185 |
)
|
186 |
t.start()
|
187 |
|
188 |
+
# Reconstruct history with new content
|
189 |
history[-1].content += prepend.format(question=question)
|
190 |
for token in streamer:
|
191 |
history[-1].content += token
|
|
|
193 |
yield history
|
194 |
t.join()
|
195 |
|
196 |
+
# Save the result of each reasoning step to full_reasoning
|
197 |
full_reasoning = history[-1].content
|
198 |
|
199 |
+
# Reasoning complete, now generate final answer
|
200 |
+
history[-1].metadata = {"title": "π Thought Process", "status": "done"}
|
201 |
|
202 |
+
# Extract conclusion part from reasoning process (approximately last 1-2 paragraphs)
|
203 |
reasoning_parts = full_reasoning.split("\n\n")
|
204 |
reasoning_conclusion = "\n\n".join(reasoning_parts[-2:]) if len(reasoning_parts) > 2 else full_reasoning
|
205 |
|
206 |
+
# Add final answer message
|
207 |
history.append(gr.ChatMessage(role="assistant", content=""))
|
208 |
|
209 |
+
# Construct message for final answer
|
210 |
+
final_messages = rebuild_messages(history[:-1]) # Excluding last empty message
|
211 |
final_prompt = final_answer_prompt.format(
|
212 |
question=question,
|
213 |
reasoning_conclusion=reasoning_conclusion,
|
|
|
215 |
)
|
216 |
final_messages[-1]["content"] += final_prompt
|
217 |
|
218 |
+
# Generate final answer
|
219 |
t = threading.Thread(
|
220 |
target=pipe,
|
221 |
args=(final_messages,),
|
|
|
228 |
)
|
229 |
t.start()
|
230 |
|
231 |
+
# Stream final answer
|
232 |
for token in streamer:
|
233 |
history[-1].content += token
|
234 |
history[-1].content = reformat_math(history[-1].content)
|
|
|
238 |
yield history
|
239 |
|
240 |
|
241 |
+
with gr.Blocks(fill_height=True, title="ThinkFlow") as demo:
|
242 |
+
# Title and description
|
243 |
+
gr.Markdown("# ThinkFlow")
|
244 |
+
gr.Markdown("### An LLM reasoning generation platform that automatically applies reasoning capabilities to LLM models without modification")
|
245 |
+
|
246 |
+
# Features and benefits section
|
247 |
+
with gr.Accordion("β¨ Features & Benefits", open=True):
|
248 |
+
gr.Markdown("""
|
249 |
+
- **Enhanced Reasoning**: Transform any LLM into a step-by-step reasoning engine without model modifications
|
250 |
+
- **Transparency**: Visualize the model's thought process alongside direct answers
|
251 |
+
- **Improved Accuracy**: See how guided reasoning leads to more accurate solutions for complex problems
|
252 |
+
- **Educational Tool**: Perfect for teaching critical thinking and problem-solving approaches
|
253 |
+
- **Versatile Application**: Works with mathematical problems, logical puzzles, and complex questions
|
254 |
+
- **Side-by-Side Comparison**: Compare standard model responses with reasoning-enhanced outputs
|
255 |
+
""")
|
256 |
|
257 |
with gr.Row(scale=1):
|
258 |
with gr.Column(scale=2):
|
|
|
274 |
)
|
275 |
|
276 |
with gr.Row():
|
277 |
+
# Define msg textbox first
|
278 |
msg = gr.Textbox(
|
279 |
submit_btn=True,
|
280 |
label="",
|
281 |
show_label=False,
|
282 |
+
placeholder="Enter your question here.",
|
283 |
autofocus=True,
|
284 |
)
|
285 |
|
286 |
+
# Examples section - placed after msg variable definition
|
287 |
with gr.Accordion("EXAMPLES", open=False):
|
288 |
examples = gr.Examples(
|
289 |
examples=[
|
290 |
+
"[Source: MATH-500)] How many numbers among the first 100 positive integers are divisible by 3, 4, and 5?",
|
291 |
+
"[Source: MATH-500)] In the land of Ink, the money system is unique. 1 trinket equals 4 blinkets, and 3 blinkets equal 7 drinkits. What is the value of 56 drinkits in trinkets?",
|
292 |
+
"[Source: MATH-500)] The average age of Amy, Ben, and Chris is 6 years. Four years ago, Chris was the same age as Amy is now. Four years from now, Ben's age will be $\\frac{3}{5}$ of Amy's age at that time. How old is Chris now?",
|
293 |
+
"[Source: MATH-500)] A bag contains yellow and blue marbles. Currently, the ratio of blue marbles to yellow marbles is 4:3. After adding 5 blue marbles and removing 3 yellow marbles, the ratio becomes 7:3. How many blue marbles were in the bag before any were added?"
|
294 |
],
|
295 |
inputs=msg
|
296 |
)
|
297 |
|
298 |
with gr.Row():
|
299 |
with gr.Column():
|
300 |
+
gr.Markdown("""## Parameter Adjustment""")
|
301 |
num_tokens = gr.Slider(
|
302 |
50,
|
303 |
4000,
|
304 |
2000,
|
305 |
step=1,
|
306 |
+
label="Maximum tokens per reasoning step",
|
307 |
interactive=True,
|
308 |
)
|
309 |
final_num_tokens = gr.Slider(
|
|
|
311 |
4000,
|
312 |
2000,
|
313 |
step=1,
|
314 |
+
label="Maximum tokens for final answer",
|
315 |
interactive=True,
|
316 |
)
|
317 |
+
do_sample = gr.Checkbox(True, label="Use sampling")
|
318 |
+
temperature = gr.Slider(0.1, 1.0, 0.7, step=0.1, label="Temperature")
|
319 |
+
|
320 |
+
# Community link at the bottom
|
321 |
+
gr.Markdown("<p style='font-size: 12px;'>Community: <a href='https://discord.gg/openfreeai' target='_blank'>https://discord.gg/openfreeai</a></p>")
|
322 |
|
323 |
+
# When user submits a message, both bots respond simultaneously
|
324 |
msg.submit(
|
325 |
user_input,
|
326 |
+
[msg, chatbot_original, chatbot_thinking], # inputs
|
327 |
+
[msg, chatbot_original, chatbot_thinking], # outputs
|
328 |
).then(
|
329 |
bot_original,
|
330 |
[
|
|
|
333 |
do_sample,
|
334 |
temperature,
|
335 |
],
|
336 |
+
chatbot_original, # save new history in outputs
|
337 |
).then(
|
338 |
bot_thinking,
|
339 |
[
|
|
|
343 |
do_sample,
|
344 |
temperature,
|
345 |
],
|
346 |
+
chatbot_thinking, # save new history in outputs
|
347 |
)
|
348 |
|
349 |
if __name__ == "__main__":
|