Spaces:
Runtime error
Runtime error
| """ | |
| Advanced Agentic System Interface | |
| ------------------------------- | |
| Provides a chat interface to interact with the autonomous agent teams: | |
| - Team A: Coders (App/Software Developers) | |
| - Team B: Business (Entrepreneurs) | |
| - Team C: Research (Deep Online Research) | |
| - Team D: Crypto & Sports Trading | |
| """ | |
| import gradio as gr | |
| from fastapi import FastAPI, HTTPException | |
| from fastapi.middleware.cors import CORSMiddleware | |
| import uvicorn | |
| from typing import Dict, Any, List, Tuple, Optional | |
| import logging | |
| from pathlib import Path | |
| import asyncio | |
| from datetime import datetime | |
| import json | |
| from requests.adapters import HTTPAdapter, Retry | |
| from urllib3.util.retry import Retry | |
| import time | |
| from agentic_system import AgenticSystem | |
| from team_management import TeamManager, TeamType, TeamObjective | |
| from orchestrator import AgentOrchestrator | |
| from reasoning import UnifiedReasoningEngine as ReasoningEngine | |
| # Configure logging | |
| logging.basicConfig(level=logging.INFO) | |
| logger = logging.getLogger(__name__) | |
| # Configure network settings | |
| TIMEOUT = int(os.getenv('REQUESTS_TIMEOUT', '30')) | |
| MAX_RETRIES = 5 | |
| RETRY_BACKOFF = 1 | |
| def setup_requests_session(): | |
| """Configure requests session with retries.""" | |
| session = requests.Session() | |
| retry_strategy = Retry( | |
| total=MAX_RETRIES, | |
| backoff_factor=RETRY_BACKOFF, | |
| status_forcelist=[408, 429, 500, 502, 503, 504], | |
| allowed_methods=["HEAD", "GET", "PUT", "DELETE", "OPTIONS", "TRACE"] | |
| ) | |
| adapter = HTTPAdapter(max_retries=retry_strategy) | |
| session.mount("https://", adapter) | |
| session.mount("http://", adapter) | |
| return session | |
| def check_network(max_attempts=3): | |
| """Check network connectivity with retries.""" | |
| session = setup_requests_session() | |
| for attempt in range(max_attempts): | |
| try: | |
| # Try multiple DNS servers | |
| for dns in ['8.8.8.8', '8.8.4.4', '1.1.1.1']: | |
| try: | |
| socket.gethostbyname('huggingface.co') | |
| break | |
| except socket.gaierror: | |
| continue | |
| # Test connection to Hugging Face | |
| response = session.get('https://huggingface.co/api/health', | |
| timeout=TIMEOUT) | |
| if response.status_code == 200: | |
| return True | |
| except (requests.RequestException, socket.gaierror) as e: | |
| logger.warning(f"Network check attempt {attempt + 1} failed: {e}") | |
| if attempt < max_attempts - 1: | |
| time.sleep(RETRY_BACKOFF * (attempt + 1)) | |
| continue | |
| logger.error("Network connectivity check failed after all attempts") | |
| return False | |
| class ChatInterface: | |
| def __init__(self): | |
| # Check network connectivity | |
| if not check_network(): | |
| logger.warning("Network connectivity issues detected - continuing with degraded functionality") | |
| # Initialize core components with consistent configuration | |
| config = { | |
| "min_confidence": 0.7, | |
| "parallel_threshold": 3, | |
| "learning_rate": 0.1, | |
| "strategy_weights": { | |
| "LOCAL_LLM": 0.8, | |
| "CHAIN_OF_THOUGHT": 0.6, | |
| "TREE_OF_THOUGHTS": 0.5, | |
| "META_LEARNING": 0.4 | |
| } | |
| } | |
| self.orchestrator = AgentOrchestrator(config) | |
| self.agentic_system = AgenticSystem(config) | |
| self.team_manager = TeamManager(self.orchestrator) | |
| self.chat_history = [] | |
| self.active_objectives = {} | |
| # Set up network session | |
| self.session = setup_requests_session() | |
| # Initialize teams | |
| asyncio.run(self.team_manager.initialize_team_agents()) | |
| async def process_message( | |
| self, | |
| message: str, | |
| history: List[List[str]] | |
| ) -> Tuple[str, List[List[str]]]: | |
| """Process incoming chat message.""" | |
| try: | |
| # Update chat history | |
| self.chat_history = history | |
| # Process message | |
| response = await self._handle_message(message) | |
| # Update history | |
| if response: | |
| history.append([message, response]) | |
| return response, history | |
| except Exception as e: | |
| logger.error(f"Error processing message: {str(e)}") | |
| error_msg = "I apologize, but I encountered an error. Please try again." | |
| history.append([message, error_msg]) | |
| return error_msg, history | |
| async def _handle_message(self, message: str) -> str: | |
| """Handle message processing with error recovery.""" | |
| try: | |
| # Analyze intent | |
| intent = await self._analyze_intent(message) | |
| intent_type = self._get_intent_type(intent) | |
| # Route to appropriate handler | |
| if intent_type == "query": | |
| return await self._handle_query(message) | |
| elif intent_type == "objective": | |
| return await self._handle_objective(message) | |
| elif intent_type == "status": | |
| return await self._handle_status_request(message) | |
| else: | |
| return await self._handle_general_chat(message) | |
| except Exception as e: | |
| logger.error(f"Error in message handling: {str(e)}") | |
| return "I apologize, but I encountered an error processing your message. Please try again." | |
| def _get_intent_type(self, intent) -> str: | |
| """Safely extract intent type from various result formats.""" | |
| if isinstance(intent, dict): | |
| return intent.get("type", "general") | |
| return "general" | |
| async def _analyze_intent(self, message: str) -> Dict[str, Any]: | |
| """Analyze user message intent with error handling.""" | |
| try: | |
| # Use reasoning engine to analyze intent | |
| analysis = await self.orchestrator.reasoning_engine.reason( | |
| query=message, | |
| context={ | |
| "chat_history": self.chat_history, | |
| "active_objectives": self.active_objectives | |
| } | |
| ) | |
| return { | |
| "type": analysis.get("intent_type", "general"), | |
| "confidence": analysis.get("confidence", 0.5), | |
| "entities": analysis.get("entities", []), | |
| "action_required": analysis.get("action_required", False) | |
| } | |
| except Exception as e: | |
| logger.error(f"Error analyzing intent: {str(e)}") | |
| return {"type": "general", "confidence": 0.5} | |
| async def _handle_query(self, message: str) -> str: | |
| """Handle information queries.""" | |
| try: | |
| # Get relevant teams for the query | |
| recommended_teams = await self.team_manager.get_team_recommendations(message) | |
| # Get responses from relevant teams | |
| responses = [] | |
| for team_type in recommended_teams: | |
| response = await self._get_team_response(team_type, message) | |
| if response: | |
| responses.append(response) | |
| if not responses: | |
| return "I apologize, but I couldn't find a relevant answer to your query." | |
| # Combine and format responses | |
| return self._format_team_responses(responses) | |
| except Exception as e: | |
| logger.error(f"Error handling query: {str(e)}") | |
| return "I apologize, but I encountered an error processing your query. Please try again." | |
| async def _handle_objective(self, message: str) -> str: | |
| """Handle new objective creation.""" | |
| try: | |
| # Create new objective | |
| objective_id = await self.team_manager.create_objective(message) | |
| if not objective_id: | |
| return "I apologize, but I couldn't create the objective. Please try again." | |
| # Format and return response | |
| return self._format_objective_creation(objective_id) | |
| except Exception as e: | |
| logger.error(f"Error creating objective: {str(e)}") | |
| return "I apologize, but I encountered an error creating the objective. Please try again." | |
| async def _handle_status_request(self, message: str) -> str: | |
| """Handle status check requests.""" | |
| try: | |
| # Get system status | |
| system_status = await self.agentic_system.get_system_status() | |
| # Get team status | |
| team_status = {} | |
| for team_id, team in self.team_manager.teams.items(): | |
| team_status[team.name] = await self.team_manager.monitor_objective_progress(team_id) | |
| # Get objective status | |
| objective_status = {} | |
| for obj_id, obj in self.active_objectives.items(): | |
| objective_status[obj_id] = await self.team_manager.monitor_objective_progress(obj_id) | |
| return self._format_status_response(system_status, team_status, objective_status) | |
| except Exception as e: | |
| logger.error(f"Error getting status: {str(e)}") | |
| return "I apologize, but I encountered an error getting the status. Please try again." | |
| async def _handle_general_chat(self, message: str) -> str: | |
| """Handle general chat interactions with error recovery.""" | |
| try: | |
| # Use reasoning engine for response generation | |
| response = await self.orchestrator.reasoning_engine.reason( | |
| query=message, | |
| context={ | |
| "chat_history": self.chat_history, | |
| "system_state": await self.agentic_system.get_system_status() | |
| } | |
| ) | |
| if not response or not response.get("response"): | |
| return "I apologize, but I couldn't generate a meaningful response. Please try again." | |
| return response["response"] | |
| except Exception as e: | |
| logger.error(f"Error in general chat: {str(e)}") | |
| return "I apologize, but I encountered an error processing your message. Please try again." | |
| async def _get_team_response(self, team_type: TeamType, query: str) -> Dict[str, Any]: | |
| """Get response from a specific team.""" | |
| try: | |
| team = self.team_manager.teams.get(team_type.value) | |
| if not team: | |
| return None | |
| # Get response from team's agents | |
| responses = [] | |
| for agent in team.agents: | |
| response = await agent.process_query(query) | |
| if response: | |
| responses.append(response) | |
| if not responses: | |
| return None | |
| # Return best response | |
| return self._combine_agent_responses(responses) | |
| except Exception as e: | |
| logger.error(f"Error getting team response: {str(e)}") | |
| return None | |
| def _combine_agent_responses(self, responses: List[Dict[str, Any]]) -> Dict[str, Any]: | |
| """Combine multiple agent responses into a coherent response.""" | |
| try: | |
| # Sort by confidence | |
| valid_responses = [ | |
| r for r in responses | |
| if r.get("success", False) and r.get("response") | |
| ] | |
| if not valid_responses: | |
| return None | |
| sorted_responses = sorted( | |
| valid_responses, | |
| key=lambda x: x.get("confidence", 0), | |
| reverse=True | |
| ) | |
| # Take the highest confidence response | |
| return sorted_responses[0] | |
| except Exception as e: | |
| logger.error(f"Error combining responses: {str(e)}") | |
| return None | |
| def _format_team_responses(self, responses: List[Dict[str, Any]]) -> str: | |
| """Format team responses into a readable message.""" | |
| try: | |
| if not responses: | |
| return "No team responses available." | |
| formatted = [] | |
| for resp in responses: | |
| if resp and resp.get("response"): | |
| team_name = resp.get("team_name", "Unknown Team") | |
| confidence = resp.get("confidence", 0) | |
| formatted.append( | |
| f"\n{team_name} (Confidence: {confidence:.2%}):\n{resp['response']}" | |
| ) | |
| if not formatted: | |
| return "No valid team responses available." | |
| return "\n".join(formatted) | |
| except Exception as e: | |
| logger.error(f"Error formatting responses: {str(e)}") | |
| return "Error formatting team responses." | |
| def _format_objective_creation(self, objective_id: str) -> str: | |
| """Format objective creation response.""" | |
| try: | |
| obj = self.active_objectives.get(objective_id) | |
| if not obj: | |
| return "Objective created but details not available." | |
| return "\n".join([ | |
| "New Objective Created:", | |
| f"Description: {obj['description']}", | |
| f"Status: {obj['status']}", | |
| f"Assigned Teams: {', '.join(t.value for t in obj['teams'])}" | |
| ]) | |
| except Exception as e: | |
| logger.error(f"Error formatting objective: {str(e)}") | |
| return "Error formatting objective details." | |
| def _format_status_response( | |
| self, | |
| system_status: Dict[str, Any], | |
| team_status: Dict[str, Any], | |
| objective_status: Dict[str, Any] | |
| ) -> str: | |
| """Format status response.""" | |
| try: | |
| # Format system status | |
| status = [ | |
| "System Status:", | |
| f"- State: {system_status['state']}", | |
| f"- Active Agents: {system_status['agent_count']}", | |
| f"- Active Tasks: {system_status['active_tasks']}", | |
| "\nTeam Status:" | |
| ] | |
| # Add team status | |
| for team_name, team_info in team_status.items(): | |
| status.extend([ | |
| f"\n{team_name}:", | |
| f"- Active Agents: {team_info['active_agents']}", | |
| f"- Completion Rate: {team_info['completion_rate']:.2%}", | |
| f"- Collaboration Score: {team_info['collaboration_score']:.2f}" | |
| ]) | |
| # Add objective status | |
| if objective_status: | |
| status.append("\nActive Objectives:") | |
| for obj_id, obj_info in objective_status.items(): | |
| obj = self.active_objectives[obj_id] | |
| status.extend([ | |
| f"\n{obj['description']}:", | |
| f"- Status: {obj['status']}", | |
| f"- Teams: {', '.join(t.value for t in obj['teams'])}", | |
| f"- Progress: {sum(t['completion_rate'] for t in obj_info.values())/len(obj_info):.2%}" | |
| ]) | |
| return "\n".join(status) | |
| except Exception as e: | |
| logger.error(f"Error formatting status: {str(e)}") | |
| return "Error formatting status information." | |
| class VentureUI: | |
| def __init__(self, app): | |
| self.app = app | |
| def create_interface(self): | |
| """Create the Gradio interface.""" | |
| with gr.Blocks( | |
| theme=gr.themes.Soft(), | |
| analytics_enabled=False, | |
| title="Advanced Agentic System" | |
| ) as interface: | |
| # Verify Gradio version | |
| gr.Markdown(f""" | |
| # Advanced Agentic System Chat Interface v{gr.__version__} | |
| Chat with our autonomous agent teams: | |
| - Team A: Coders (App/Software Developers) | |
| - Team B: Business (Entrepreneurs) | |
| - Team C: Research (Deep Online Research) | |
| - Team D: Crypto & Sports Trading | |
| You can: | |
| 1. Ask questions | |
| 2. Create new objectives | |
| 3. Check status of teams and objectives | |
| 4. Get insights and recommendations | |
| """) | |
| chatbot = gr.Chatbot( | |
| label="Chat History", | |
| height=400, | |
| bubble_full_width=False, | |
| show_copy_button=True, | |
| render_markdown=True | |
| ) | |
| with gr.Row(): | |
| msg = gr.Textbox( | |
| label="Message", | |
| placeholder="Chat with the Agentic System...", | |
| lines=2, | |
| scale=9, | |
| autofocus=True, | |
| container=True | |
| ) | |
| submit = gr.Button( | |
| "Send", | |
| scale=1, | |
| variant="primary" | |
| ) | |
| with gr.Row(): | |
| clear = gr.ClearButton( | |
| [msg, chatbot], | |
| value="Clear Chat", | |
| variant="secondary", | |
| scale=1 | |
| ) | |
| retry = gr.Button( | |
| "Retry Last", | |
| variant="secondary", | |
| scale=1 | |
| ) | |
| async def respond(message, history): | |
| try: | |
| # Convert history to the format expected by process_message | |
| history_list = [[x, y] for x, y in history] if history else [] | |
| response, history_list = await self.app(message, history_list) | |
| # Update history | |
| if history is None: | |
| history = [] | |
| history.append((message, response)) | |
| return "", history | |
| except Exception as e: | |
| logger.error(f"Error in chat response: {str(e)}") | |
| error_msg = "I apologize, but I encountered an error. Please try again." | |
| if history is None: | |
| history = [] | |
| history.append((message, error_msg)) | |
| return "", history | |
| async def retry_last(history): | |
| if not history: | |
| return history | |
| last_user_msg = history[-1][0] | |
| history = history[:-1] # Remove last exchange | |
| return await respond(last_user_msg, history) | |
| msg.submit( | |
| respond, | |
| [msg, chatbot], | |
| [msg, chatbot], | |
| api_name="chat" | |
| ).then( | |
| lambda: gr.update(interactive=True), | |
| None, | |
| [msg, submit], | |
| queue=False | |
| ) | |
| submit.click( | |
| respond, | |
| [msg, chatbot], | |
| [msg, chatbot], | |
| api_name="submit" | |
| ).then( | |
| lambda: gr.update(interactive=True), | |
| None, | |
| [msg, submit], | |
| queue=False | |
| ) | |
| retry.click( | |
| retry_last, | |
| [chatbot], | |
| [chatbot], | |
| api_name="retry" | |
| ) | |
| # Event handlers for better UX | |
| msg.change(lambda x: gr.update(interactive=bool(x.strip())), [msg], [submit]) | |
| # Add example inputs | |
| gr.Examples( | |
| examples=[ | |
| "What can Team A (Coders) help me with?", | |
| "Create a new objective: Analyze market trends", | |
| "What's the status of all teams?", | |
| "Give me insights about recent developments" | |
| ], | |
| inputs=msg, | |
| label="Example Queries" | |
| ) | |
| return interface | |
| def create_chat_interface() -> gr.Blocks: | |
| """Create Gradio chat interface.""" | |
| chat = ChatInterface() | |
| ui = VentureUI(chat.process_message) | |
| return ui.create_interface() | |
| # Initialize FastAPI | |
| app = FastAPI( | |
| title="Advanced Agentic System", | |
| description="Venture Strategy Optimizer with OpenAI-compatible API", | |
| version="1.0.0" | |
| ) | |
| # Add CORS middleware | |
| app.add_middleware( | |
| CORSMiddleware, | |
| allow_origins=["*"], | |
| allow_credentials=True, | |
| allow_methods=["*"], | |
| allow_headers=["*"], | |
| ) | |
| # Include OpenAI-compatible routes | |
| from api.openai_compatible import OpenAICompatibleAPI | |
| reasoning_engine = UnifiedReasoningEngine() | |
| openai_api = OpenAICompatibleAPI(reasoning_engine) | |
| app.include_router(openai_api.router, tags=["OpenAI Compatible"]) | |
| # Original API routes | |
| async def health_check(): | |
| """Health check endpoint.""" | |
| return { | |
| "status": "healthy", | |
| "version": "1.0.0", | |
| "endpoints": { | |
| "openai_compatible": "/v1/chat/completions", | |
| "venture": "/api/venture", | |
| "ui": "/" | |
| } | |
| } | |
| async def reason(query: str, context: Optional[Dict[str, Any]] = None): | |
| """Reasoning endpoint.""" | |
| try: | |
| result = await reasoning_engine.reason(query, context or {}) | |
| return result | |
| except Exception as e: | |
| logger.error(f"Reasoning error: {e}") | |
| raise HTTPException(status_code=500, detail=str(e)) | |
| async def analyze_venture( | |
| venture_type: str, | |
| description: str, | |
| metrics: Optional[Dict[str, Any]] = None | |
| ): | |
| """Venture analysis endpoint.""" | |
| try: | |
| result = await VentureAPI(reasoning_engine).analyze_venture( | |
| venture_type=venture_type, | |
| description=description, | |
| metrics=metrics or {} | |
| ) | |
| return result | |
| except Exception as e: | |
| logger.error(f"Analysis error: {e}") | |
| raise HTTPException(status_code=500, detail=str(e)) | |
| async def get_venture_types(): | |
| """Get available venture types.""" | |
| return VentureAPI(reasoning_engine).get_venture_types() | |
| # Create Gradio interface | |
| interface = create_chat_interface() | |
| # Mount Gradio app to FastAPI | |
| app = gr.mount_gradio_app(app, interface, path="/") | |
| if __name__ == "__main__": | |
| # Run with uvicorn when called directly | |
| uvicorn.run( | |
| "app:app", | |
| host="0.0.0.0", | |
| port=7860, | |
| reload=True, | |
| workers=4 | |
| ) | |