Spaces:
Paused
Paused
| # | |
| # SPDX-FileCopyrightText: Hadad <[email protected]> | |
| # SPDX-License-Identifier: Apache-2.0 | |
| # | |
| import gradio as gr | |
| import asyncio | |
| from pathlib import Path | |
| from src.config import * | |
| from src.cores.session import create_session, ensure_stop_event, get_model_key | |
| from src.main.file_extractors import extract_file_content | |
| from src.cores.client import chat_with_model_async | |
| async def respond_async(multi, history, model_display, sess, custom_prompt, deep_search): | |
| """ | |
| Main async handler for user input submission. | |
| Supports text + file uploads (multi-modal input). | |
| Extracts file content and appends to user input. | |
| Streams AI responses back to UI, updating chat history live. | |
| Allows stopping response generation gracefully. | |
| """ | |
| ensure_stop_event(sess) | |
| sess.stop_event.clear() | |
| sess.cancel_token["cancelled"] = False | |
| # Extract text and files from multimodal input | |
| msg_input = {"text": multi.get("text", "").strip(), "files": multi.get("files", [])} | |
| # If no input, reset UI state and return | |
| if not msg_input["text"] and not msg_input["files"]: | |
| yield history, gr.update(value="", interactive=True, submit_btn=True, stop_btn=False), sess | |
| return | |
| # Initialize input with extracted file contents | |
| inp = "" | |
| for f in msg_input["files"]: | |
| # Support dict or direct file path | |
| fp = f.get("data", f.get("name", "")) if isinstance(f, dict) else f | |
| inp += f"{Path(fp).name}\n\n{extract_file_content(fp)}\n\n" | |
| # Append user text input if any | |
| if msg_input["text"]: | |
| inp += msg_input["text"] | |
| # Append user input to chat history with placeholder response | |
| history.append([inp, RESPONSES["RESPONSE_8"]]) | |
| yield history, gr.update(interactive=False, submit_btn=False, stop_btn=True), sess | |
| queue = asyncio.Queue() | |
| # Background async task to fetch streamed AI responses | |
| async def background(): | |
| reasoning = "" | |
| responses = "" | |
| content_started = False | |
| ignore_reasoning = False | |
| async for typ, chunk in chat_with_model_async(history, inp, model_display, sess, custom_prompt, deep_search): | |
| if sess.stop_event.is_set() or sess.cancel_token["cancelled"]: | |
| break | |
| if typ == "reasoning": | |
| if ignore_reasoning: | |
| continue | |
| reasoning += chunk | |
| await queue.put(("reasoning", f"<think>\n{reasoning}\n\n</think>\n\n")) | |
| elif typ == "content": | |
| if not content_started: | |
| content_started = True | |
| ignore_reasoning = True | |
| responses = chunk | |
| await queue.put(("reasoning", "")) # Clear reasoning on content start | |
| await queue.put(("replace", responses)) | |
| else: | |
| responses += chunk | |
| await queue.put(("append", responses)) | |
| await queue.put(None) | |
| return responses | |
| bg_task = asyncio.create_task(background()) | |
| stop_task = asyncio.create_task(sess.stop_event.wait()) | |
| pending_tasks = {bg_task, stop_task} | |
| try: | |
| while True: | |
| queue_task = asyncio.create_task(queue.get()) | |
| pending_tasks.add(queue_task) | |
| done, _ = await asyncio.wait({stop_task, queue_task}, return_when=asyncio.FIRST_COMPLETED) | |
| for task in done: | |
| pending_tasks.discard(task) | |
| if task is stop_task: | |
| # User requested stop, cancel background task and update UI | |
| sess.cancel_token["cancelled"] = True | |
| bg_task.cancel() | |
| try: | |
| await bg_task | |
| except asyncio.CancelledError: | |
| pass | |
| history[-1][1] = RESPONSES["RESPONSE_1"] | |
| yield history, gr.update(value="", interactive=True, submit_btn=True, stop_btn=False), sess | |
| return | |
| result = task.result() | |
| if result is None: | |
| raise StopAsyncIteration | |
| action, text = result | |
| # Update last message content in history with streamed text | |
| history[-1][1] = text | |
| yield history, gr.update(interactive=False, submit_btn=False, stop_btn=True), sess | |
| except StopAsyncIteration: | |
| pass | |
| finally: | |
| for task in pending_tasks: | |
| task.cancel() | |
| await asyncio.gather(*pending_tasks, return_exceptions=True) | |
| yield history, gr.update(value="", interactive=True, submit_btn=True, stop_btn=False), sess | |
| def toggle_deep_search(deep_search_value, history, sess, prompt, model): | |
| """ | |
| Toggle deep search checkbox. Keeps chat intact for production compatibility. | |
| """ | |
| return history, sess, prompt, model, gr.update(value=deep_search_value) | |
| def change_model(new): | |
| """ | |
| Handler to change selected AI model. | |
| Resets chat history and session. | |
| Updates system instructions and deep search checkbox visibility accordingly. | |
| Deep search is only available for default model. | |
| """ | |
| visible = new == MODEL_CHOICES[0] | |
| default_prompt = SYSTEM_PROMPT_MAPPING.get(get_model_key(new, MODEL_MAPPING, DEFAULT_MODEL_KEY), SYSTEM_PROMPT_DEFAULT) | |
| # On model change, clear chat, create new session, reset deep search, update visibility | |
| return [], create_session(), new, default_prompt, False, gr.update(visible=visible) | |
| def stop_response(history, sess): | |
| """ | |
| Handler to stop ongoing AI response generation. | |
| Sets cancellation flags and updates last message to cancellation notice. | |
| """ | |
| ensure_stop_event(sess) | |
| sess.stop_event.set() | |
| sess.cancel_token["cancelled"] = True | |
| if history: | |
| history[-1][1] = RESPONSES["RESPONSE_1"] | |
| return history, None, create_session() | |
| def launch_ui(): | |
| # ============================ | |
| # System Setup | |
| # ============================ | |
| # Install Tesseract OCR and dependencies for text extraction from images. | |
| import os | |
| os.system("apt-get update -q -y && \ | |
| apt-get install -q -y tesseract-ocr \ | |
| tesseract-ocr-eng tesseract-ocr-ind \ | |
| libleptonica-dev libtesseract-dev" | |
| ) | |
| with gr.Blocks(fill_height=True, fill_width=True, title=AI_TYPES["AI_TYPE_4"], head=META_TAGS) as jarvis: | |
| user_history = gr.State([]) | |
| user_session = gr.State(create_session()) | |
| selected_model = gr.State(MODEL_CHOICES[0] if MODEL_CHOICES else "") | |
| J_A_R_V_I_S = gr.State("") | |
| # Chatbot UI | |
| with gr.Column(): chatbot = gr.Chatbot(label=AI_TYPES["AI_TYPE_1"], show_copy_button=True, scale=1, elem_id=AI_TYPES["AI_TYPE_2"], examples=JARVIS_INIT, allow_tags=["think"]) | |
| # User's input | |
| msg = gr.MultimodalTextbox(show_label=False, placeholder=RESPONSES["RESPONSE_5"], interactive=True, file_count=None, file_types=None, sources=[]) | |
| # Sidebar to select AI models and on/off deep search | |
| with gr.Sidebar(open=False): | |
| deep_search = gr.Checkbox(label=AI_TYPES["AI_TYPE_8"], value=False, info=AI_TYPES["AI_TYPE_9"], visible=True) | |
| deep_search.change(fn=toggle_deep_search, inputs=[deep_search, user_history, user_session, J_A_R_V_I_S, selected_model], outputs=[chatbot, user_session, J_A_R_V_I_S, selected_model, deep_search]) | |
| model_radio = gr.Radio(show_label=False, choices=MODEL_CHOICES, value=MODEL_CHOICES[0]) | |
| gr.Markdown(NOTICES) | |
| # Models change | |
| model_radio.change(fn=change_model, inputs=[model_radio], outputs=[user_history, user_session, selected_model, J_A_R_V_I_S, deep_search, deep_search]) | |
| # Initial welcome messages | |
| def on_example_select(evt: gr.SelectData): return evt.value | |
| chatbot.example_select(fn=on_example_select, inputs=[], outputs=[msg]).then(fn=respond_async, inputs=[msg, user_history, selected_model, user_session, J_A_R_V_I_S, deep_search], outputs=[chatbot, msg, user_session]) | |
| # Clear chat | |
| def clear_chat(history, sess, prompt, model): return [], create_session(), prompt, model | |
| chatbot.clear(fn=clear_chat, inputs=[user_history, user_session, J_A_R_V_I_S, selected_model], outputs=[chatbot, user_session, J_A_R_V_I_S, selected_model]) | |
| # Submit message | |
| msg.submit(fn=respond_async, inputs=[msg, user_history, selected_model, user_session, J_A_R_V_I_S, deep_search], outputs=[chatbot, msg, user_session], api_name=INTERNAL_AI_GET_SERVER) | |
| # Stop message | |
| msg.stop(fn=stop_response, inputs=[user_history, user_session], outputs=[chatbot, msg, user_session]) | |
| # Launch | |
| jarvis.queue(default_concurrency_limit=2).launch(max_file_size="1mb", mcp_server=True) | |