Spaces:
Running
on
Zero
Running
on
Zero
玙珲
commited on
Commit
·
65588dd
1
Parent(s):
f7211cd
modified
Browse files
app.py
CHANGED
@@ -215,18 +215,6 @@ def toggle_media_input(choice: str) -> Tuple:
|
|
215 |
else: # Video
|
216 |
return gr.update(visible=False, value=None), gr.update(visible=True, value=None), gr.update(visible=False), gr.update(visible=True)
|
217 |
|
218 |
-
# # --- MODIFIED: New function to handle chat state and input clearing ---
|
219 |
-
# def process_and_clear(chatbot: List, image_input: PIL.Image.Image, video_input: str, prompt: str, do_sample: bool, max_new_tokens: int, enable_thinking: bool):
|
220 |
-
# """
|
221 |
-
# This function now takes the chatbot state as input to maintain conversation history
|
222 |
-
# and clears the prompt box after submission.
|
223 |
-
# """
|
224 |
-
# # Create a generator by calling the main run_inference function
|
225 |
-
# generator = run_inference(chatbot, image_input, video_input, prompt, do_sample, max_new_tokens, enable_thinking)
|
226 |
-
# # Yield from the generator
|
227 |
-
# for chatbot_state, _ in generator:
|
228 |
-
# yield chatbot_state, "" # Clear prompt after first yield
|
229 |
-
|
230 |
|
231 |
# --- Build Gradio Application ---
|
232 |
# @spaces.GPU
|
@@ -282,7 +270,7 @@ def build_demo(model_path: str):
|
|
282 |
|
283 |
with gr.Accordion("Generation Settings", open=True):
|
284 |
do_sample = gr.Checkbox(label="Enable Sampling (Do Sample)", value=True)
|
285 |
-
max_new_tokens = gr.Slider(minimum=32, maximum=4096, value=
|
286 |
enable_thinking = gr.Checkbox(label="Enable Deep Thinking", value=False)
|
287 |
|
288 |
|
|
|
215 |
else: # Video
|
216 |
return gr.update(visible=False, value=None), gr.update(visible=True, value=None), gr.update(visible=False), gr.update(visible=True)
|
217 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
218 |
|
219 |
# --- Build Gradio Application ---
|
220 |
# @spaces.GPU
|
|
|
270 |
|
271 |
with gr.Accordion("Generation Settings", open=True):
|
272 |
do_sample = gr.Checkbox(label="Enable Sampling (Do Sample)", value=True)
|
273 |
+
max_new_tokens = gr.Slider(minimum=32, maximum=4096, value=2048, step=32, label="Max New Tokens")
|
274 |
enable_thinking = gr.Checkbox(label="Enable Deep Thinking", value=False)
|
275 |
|
276 |
|