Upload 3 files
Browse files- app (34).py +246 -0
- apt.txt +2 -0
- requirements (15).txt +9 -0
app (34).py
ADDED
@@ -0,0 +1,246 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import time
|
3 |
+
import gc
|
4 |
+
import threading
|
5 |
+
from itertools import islice
|
6 |
+
from datetime import datetime
|
7 |
+
import gradio as gr
|
8 |
+
import torch
|
9 |
+
from transformers import pipeline, TextIteratorStreamer
|
10 |
+
from duckduckgo_search import DDGS
|
11 |
+
import spaces # Import spaces early to enable ZeroGPU support
|
12 |
+
|
13 |
+
# Optional: Disable GPU visibility if you wish to force CPU usage
|
14 |
+
# os.environ["CUDA_VISIBLE_DEVICES"] = ""
|
15 |
+
|
16 |
+
# ------------------------------
|
17 |
+
# Global Cancellation Event
|
18 |
+
# ------------------------------
|
19 |
+
cancel_event = threading.Event()
|
20 |
+
|
21 |
+
# ------------------------------
|
22 |
+
# Torch-Compatible Model Definitions with Adjusted Descriptions
|
23 |
+
# ------------------------------
|
24 |
+
MODELS = {
|
25 |
+
|
26 |
+
"Qwen3-8B": {"repo_id": "Qwen/Qwen3-8B", "description": "Qwen3-8B"},
|
27 |
+
"Qwen3-4B": {"repo_id": "Qwen/Qwen3-4B", "description": "Qwen3-4B"},
|
28 |
+
"Qwen3-1.7B": {"repo_id": "Qwen/Qwen3-1,7B", "description": "Qwen3-1.7B"},
|
29 |
+
"Qwen3-0.6B": {"repo_id": "Qwen/Qwen3-0.6B", "description": "Qwen3-0.6B"},
|
30 |
+
"Gemma-3-4B-IT": {"repo_id": "unsloth/gemma-3-4b-it", "description": "Gemma-3-4B-IT"},
|
31 |
+
"SmolLM2-135M-Instruct-TaiwanChat": {"repo_id": "Luigi/SmolLM2-135M-Instruct-TaiwanChat", "description": "SmolLM2‑135M Instruct fine-tuned on TaiwanChat"},
|
32 |
+
"SmolLM2-135M-Instruct": {"repo_id": "HuggingFaceTB/SmolLM2-135M-Instruct", "description": "Original SmolLM2‑135M Instruct"},
|
33 |
+
"SmolLM2-360M-Instruct-TaiwanChat": {"repo_id": "Luigi/SmolLM2-360M-Instruct-TaiwanChat", "description": "SmolLM2‑360M Instruct fine-tuned on TaiwanChat"},
|
34 |
+
"Llama-3.2-Taiwan-3B-Instruct": {"repo_id": "lianghsun/Llama-3.2-Taiwan-3B-Instruct", "description": "Llama-3.2-Taiwan-3B-Instruct"},
|
35 |
+
"MiniCPM3-4B": {"repo_id": "openbmb/MiniCPM3-4B", "description": "MiniCPM3-4B"},
|
36 |
+
"Qwen2.5-3B-Instruct": {"repo_id": "Qwen/Qwen2.5-3B-Instruct", "description": "Qwen2.5-3B-Instruct"},
|
37 |
+
"Qwen2.5-7B-Instruct": {"repo_id": "Qwen/Qwen2.5-7B-Instruct", "description": "Qwen2.5-7B-Instruct"},
|
38 |
+
"Phi-4-mini-Instruct": {"repo_id": "unsloth/Phi-4-mini-instruct", "description": "Phi-4-mini-Instruct"},
|
39 |
+
"Meta-Llama-3.1-8B-Instruct": {"repo_id": "MaziyarPanahi/Meta-Llama-3.1-8B-Instruct", "description": "Meta-Llama-3.1-8B-Instruct"},
|
40 |
+
"DeepSeek-R1-Distill-Llama-8B": {"repo_id": "unsloth/DeepSeek-R1-Distill-Llama-8B", "description": "DeepSeek-R1-Distill-Llama-8B"},
|
41 |
+
"Mistral-7B-Instruct-v0.3": {"repo_id": "MaziyarPanahi/Mistral-7B-Instruct-v0.3", "description": "Mistral-7B-Instruct-v0.3"},
|
42 |
+
"Qwen2.5-Coder-7B-Instruct": {"repo_id": "Qwen/Qwen2.5-Coder-7B-Instruct", "description": "Qwen2.5-Coder-7B-Instruct"},
|
43 |
+
}
|
44 |
+
|
45 |
+
# Global cache for pipelines to avoid re-loading.
|
46 |
+
PIPELINES = {}
|
47 |
+
|
48 |
+
def load_pipeline(model_name):
|
49 |
+
"""
|
50 |
+
Load and cache a transformers pipeline for text generation.
|
51 |
+
Tries bfloat16, falls back to float16 or float32 if unsupported.
|
52 |
+
"""
|
53 |
+
global PIPELINES
|
54 |
+
if model_name in PIPELINES:
|
55 |
+
return PIPELINES[model_name]
|
56 |
+
repo = MODELS[model_name]["repo_id"]
|
57 |
+
for dtype in (torch.bfloat16, torch.float16, torch.float32):
|
58 |
+
try:
|
59 |
+
pipe = pipeline(
|
60 |
+
task="text-generation",
|
61 |
+
model=repo,
|
62 |
+
tokenizer=repo,
|
63 |
+
trust_remote_code=True,
|
64 |
+
torch_dtype=dtype,
|
65 |
+
device_map="auto"
|
66 |
+
)
|
67 |
+
PIPELINES[model_name] = pipe
|
68 |
+
return pipe
|
69 |
+
except Exception:
|
70 |
+
continue
|
71 |
+
# Final fallback
|
72 |
+
pipe = pipeline(
|
73 |
+
task="text-generation",
|
74 |
+
model=repo,
|
75 |
+
tokenizer=repo,
|
76 |
+
trust_remote_code=True,
|
77 |
+
device_map="auto"
|
78 |
+
)
|
79 |
+
PIPELINES[model_name] = pipe
|
80 |
+
return pipe
|
81 |
+
|
82 |
+
|
83 |
+
def retrieve_context(query, max_results=6, max_chars=600):
|
84 |
+
"""
|
85 |
+
Retrieve search snippets from DuckDuckGo (runs in background).
|
86 |
+
Returns a list of result strings.
|
87 |
+
"""
|
88 |
+
try:
|
89 |
+
with DDGS() as ddgs:
|
90 |
+
return [f"{i+1}. {r.get('title','No Title')} - {r.get('body','')[:max_chars]}"
|
91 |
+
for i, r in enumerate(islice(ddgs.text(query, region="wt-wt", safesearch="off", timelimit="y"), max_results))]
|
92 |
+
except Exception:
|
93 |
+
return []
|
94 |
+
|
95 |
+
|
96 |
+
def format_conversation(history, system_prompt):
|
97 |
+
"""
|
98 |
+
Flatten chat history and system prompt into a single string.
|
99 |
+
"""
|
100 |
+
prompt = system_prompt.strip() + "\n"
|
101 |
+
for msg in history:
|
102 |
+
if msg['role'] == 'user':
|
103 |
+
prompt += "User: " + msg['content'].strip() + "\n"
|
104 |
+
elif msg['role'] == 'assistant':
|
105 |
+
prompt += "Assistant: " + msg['content'].strip() + "\n"
|
106 |
+
else:
|
107 |
+
prompt += msg['content'].strip() + "\n"
|
108 |
+
if not prompt.strip().endswith("Assistant:"):
|
109 |
+
prompt += "Assistant: "
|
110 |
+
return prompt
|
111 |
+
|
112 |
+
@spaces.GPU(duration=60)
|
113 |
+
def chat_response(user_msg, chat_history, system_prompt,
|
114 |
+
enable_search, max_results, max_chars,
|
115 |
+
model_name, max_tokens, temperature,
|
116 |
+
top_k, top_p, repeat_penalty):
|
117 |
+
"""
|
118 |
+
Generates streaming chat responses, optionally with background web search.
|
119 |
+
"""
|
120 |
+
cancel_event.clear()
|
121 |
+
history = list(chat_history or [])
|
122 |
+
history.append({'role': 'user', 'content': user_msg})
|
123 |
+
|
124 |
+
# Launch web search if enabled
|
125 |
+
debug = ''
|
126 |
+
search_results = []
|
127 |
+
if enable_search:
|
128 |
+
debug = 'Search task started.'
|
129 |
+
thread_search = threading.Thread(
|
130 |
+
target=lambda: search_results.extend(
|
131 |
+
retrieve_context(user_msg, int(max_results), int(max_chars))
|
132 |
+
)
|
133 |
+
)
|
134 |
+
thread_search.daemon = True
|
135 |
+
thread_search.start()
|
136 |
+
else:
|
137 |
+
debug = 'Web search disabled.'
|
138 |
+
|
139 |
+
# Prepare assistant placeholder
|
140 |
+
history.append({'role': 'assistant', 'content': ''})
|
141 |
+
|
142 |
+
try:
|
143 |
+
|
144 |
+
# merge any fetched search results into the system prompt
|
145 |
+
if search_results:
|
146 |
+
enriched = system_prompt.strip() + "\n\nRelevant context:\n" + "\n".join(search_results)
|
147 |
+
else:
|
148 |
+
enriched = system_prompt
|
149 |
+
|
150 |
+
# wait up to 1s for snippets, then replace debug with them
|
151 |
+
if enable_search:
|
152 |
+
thread_search.join(timeout=1.0)
|
153 |
+
if search_results:
|
154 |
+
debug = "### Search results merged into prompt\n\n" + "\n".join(
|
155 |
+
f"- {r}" for r in search_results
|
156 |
+
)
|
157 |
+
else:
|
158 |
+
debug = "*No web search results found.*"
|
159 |
+
|
160 |
+
# merge fetched snippets into the system prompt
|
161 |
+
if search_results:
|
162 |
+
enriched = system_prompt.strip() + "\n\nRelevant context:\n" + "\n".join(search_results)
|
163 |
+
else:
|
164 |
+
enriched = system_prompt
|
165 |
+
|
166 |
+
prompt = format_conversation(history, enriched)
|
167 |
+
|
168 |
+
pipe = load_pipeline(model_name)
|
169 |
+
streamer = TextIteratorStreamer(pipe.tokenizer,
|
170 |
+
skip_prompt=True,
|
171 |
+
skip_special_tokens=True)
|
172 |
+
gen_thread = threading.Thread(
|
173 |
+
target=pipe,
|
174 |
+
args=(prompt,),
|
175 |
+
kwargs={
|
176 |
+
'max_new_tokens': max_tokens,
|
177 |
+
'temperature': temperature,
|
178 |
+
'top_k': top_k,
|
179 |
+
'top_p': top_p,
|
180 |
+
'repetition_penalty': repeat_penalty,
|
181 |
+
'streamer': streamer,
|
182 |
+
'return_full_text': False
|
183 |
+
}
|
184 |
+
)
|
185 |
+
gen_thread.start()
|
186 |
+
|
187 |
+
assistant_text = ''
|
188 |
+
for chunk in streamer:
|
189 |
+
if cancel_event.is_set():
|
190 |
+
break
|
191 |
+
assistant_text += chunk
|
192 |
+
history[-1]['content'] = assistant_text
|
193 |
+
# Show debug only once
|
194 |
+
yield history, debug
|
195 |
+
gen_thread.join()
|
196 |
+
except Exception as e:
|
197 |
+
history[-1]['content'] = f"Error: {e}"
|
198 |
+
yield history, debug
|
199 |
+
finally:
|
200 |
+
gc.collect()
|
201 |
+
|
202 |
+
|
203 |
+
def cancel_generation():
|
204 |
+
cancel_event.set()
|
205 |
+
return 'Generation cancelled.'
|
206 |
+
|
207 |
+
|
208 |
+
def update_default_prompt(enable_search):
|
209 |
+
today = datetime.now().strftime('%Y-%m-%d')
|
210 |
+
return f"You are a helpful assistant. Today is {today}."
|
211 |
+
|
212 |
+
# ------------------------------
|
213 |
+
# Gradio UI
|
214 |
+
# ------------------------------
|
215 |
+
with gr.Blocks(title="LLM Inference with ZeroGPU") as demo:
|
216 |
+
gr.Markdown("## 🧠 ZeroGPU LLM Inference with Web Search")
|
217 |
+
gr.Markdown("Interact with the model. Select parameters and chat below.")
|
218 |
+
with gr.Row():
|
219 |
+
with gr.Column(scale=3):
|
220 |
+
model_dd = gr.Dropdown(label="Select Model", choices=list(MODELS.keys()), value=list(MODELS.keys())[0])
|
221 |
+
search_chk = gr.Checkbox(label="Enable Web Search", value=True)
|
222 |
+
sys_prompt = gr.Textbox(label="System Prompt", lines=3, value=update_default_prompt(search_chk.value))
|
223 |
+
gr.Markdown("### Generation Parameters")
|
224 |
+
max_tok = gr.Slider(64, 1024, value=512, step=32, label="Max Tokens")
|
225 |
+
temp = gr.Slider(0.1, 2.0, value=0.7, step=0.1, label="Temperature")
|
226 |
+
k = gr.Slider(1, 100, value=40, step=1, label="Top-K")
|
227 |
+
p = gr.Slider(0.1, 1.0, value=0.9, step=0.05, label="Top-P")
|
228 |
+
rp = gr.Slider(1.0, 2.0, value=1.1, step=0.1, label="Repetition Penalty")
|
229 |
+
gr.Markdown("### Web Search Settings")
|
230 |
+
mr = gr.Number(value=6, precision=0, label="Max Results")
|
231 |
+
mc = gr.Number(value=600, precision=0, label="Max Chars/Result")
|
232 |
+
clr = gr.Button("Clear Chat")
|
233 |
+
cnl = gr.Button("Cancel Generation")
|
234 |
+
with gr.Column(scale=7):
|
235 |
+
chat = gr.Chatbot(type="messages")
|
236 |
+
txt = gr.Textbox(placeholder="Type your message and press Enter...")
|
237 |
+
dbg = gr.Markdown()
|
238 |
+
|
239 |
+
search_chk.change(fn=update_default_prompt, inputs=search_chk, outputs=sys_prompt)
|
240 |
+
clr.click(fn=lambda: ([], "", ""), outputs=[chat, txt, dbg])
|
241 |
+
cnl.click(fn=cancel_generation, outputs=dbg)
|
242 |
+
txt.submit(fn=chat_response,
|
243 |
+
inputs=[txt, chat, sys_prompt, search_chk, mr, mc,
|
244 |
+
model_dd, max_tok, temp, k, p, rp],
|
245 |
+
outputs=[chat, dbg])
|
246 |
+
demo.launch()
|
apt.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
rustc
|
2 |
+
cargo
|
requirements (15).txt
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
wheel
|
2 |
+
streamlit
|
3 |
+
duckduckgo_search
|
4 |
+
gradio>=4.0.0
|
5 |
+
torch==2.4.0
|
6 |
+
transformers
|
7 |
+
spaces
|
8 |
+
sentencepiece
|
9 |
+
accelerate
|