import asyncio import os import gradio as gr from core.central_ai_hub import CentralAIHub from loguru import logger # Initialize the Central AI Hub hub = None async def initialize_hub(): global hub if hub is None: logger.info("Initializing Central AI Hub...") # Check if a local model path is provided as an environment variable model_path = os.getenv("LOCAL_MODEL_PATH") hub = CentralAIHub(model_path=model_path) await hub.start() logger.info("Central AI Hub initialized.") async def process_task(task_type, task_content, task_requirements): await initialize_hub() task = { 'type': task_type, 'content': task_content, 'requirements': task_requirements.split(',') if task_requirements else [] } task_id = await hub.delegate_task(task) status = await hub.get_task_status(task_id) return f"Task ID: {task_id}, Status: {status['status']}, Result: {status.get('result', 'N/A')}" if __name__ == "__main__": with gr.Blocks() as demo: gr.Markdown("# Central AI Hub") with gr.Row(): task_type = gr.Dropdown(choices=['code_analysis', 'code_generation', 'error_fixing'], label="Task Type") task_content = gr.Textbox(label="Task Content") task_requirements = gr.Textbox(label="Task Requirements (comma separated)") submit_button = gr.Button("Submit Task") output_text = gr.Textbox(label="Task Status") submit_button.click( process_task, inputs=[task_type, task_content, task_requirements], outputs=[output_text] ) demo.launch(server_name="0.0.0.0", server_port=7860)