Spaces:
Sleeping
Sleeping
| import asyncio | |
| import inspect | |
| import json | |
| import os | |
| import time | |
| from typing import Any, Dict, List, Optional | |
| import gradio as gr | |
| import pandas as pd | |
| import requests | |
| from dotenv import load_dotenv | |
| from langchain_community.chat_models import ChatHuggingFace | |
| from langchain_community.llms import HuggingFaceEndpoint | |
| from langchain_core.messages import AIMessage, HumanMessage | |
| from langchain_core.tools import StructuredTool | |
| from tools import (absolute, add, divide, exponential, floor_divide, | |
| get_current_time_in_timezone, logarithm, modulus, multiply, | |
| power, roman_calculator_converter, square_root, subtract, | |
| web_search) | |
| # --- Constants --- | |
| DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space" | |
| MAX_AGENT_ITERATIONS = 15 | |
| MAX_CONCURRENT_REQUESTS = 5 # Limit concurrent requests to avoid overwhelming the API | |
| load_dotenv() | |
| HUGGINGFACEHUB_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN") or os.getenv("HF_TOKEN") | |
| # Quick test to see if tokens are available. | |
| print(f"Available env vars: {[k for k in os.environ.keys() if 'TOKEN' in k or 'HF' in k]}") | |
| # Global cache for answers | |
| answer_cache = {} | |
| class ImprovedAgent: | |
| def __init__(self): | |
| if not HUGGINGFACEHUB_API_TOKEN: | |
| raise ValueError("Missing Hugging Face API token. Please set HUGGINGFACEHUB_API_TOKEN.") | |
| print("ImprovedAgent initialized.") | |
| # Initialize LLM with better parameters | |
| self.llm = HuggingFaceEndpoint( | |
| repo_id="Qwen/Qwen2.5-Coder-32B-Instruct", | |
| huggingfacehub_api_token=HUGGINGFACEHUB_API_TOKEN, | |
| temperature=0.1, # Lower temperature for more consistent responses | |
| max_new_tokens=1024, | |
| timeout=30, | |
| ) | |
| self.chat = ChatHuggingFace(llm=self.llm, verbose=False) | |
| # Initialize tools | |
| self.tools = [ | |
| multiply, add, subtract, power, divide, modulus, | |
| square_root, floor_divide, absolute, logarithm, | |
| exponential, web_search, roman_calculator_converter, | |
| get_current_time_in_timezone | |
| ] | |
| self.chat_with_tools = self.chat.bind_tools(self.tools) | |
| print(f"Total tools available: {len(self.tools)}") | |
| # Create tool mapping for easier access | |
| self.tool_map = {tool.name: tool for tool in self.tools} | |
| def _extract_tool_calls(self, response) -> List[Dict]: | |
| """Extract tool calls from the response""" | |
| tool_calls = [] | |
| if hasattr(response, 'tool_calls') and response.tool_calls: | |
| for tool_call in response.tool_calls: | |
| tool_calls.append({ | |
| 'name': tool_call['name'], | |
| 'args': tool_call['args'] | |
| }) | |
| return tool_calls | |
| def _execute_tool_calls(self, tool_calls: List[Dict]) -> List[str]: | |
| """Execute tool calls and return results""" | |
| results = [] | |
| for tool_call in tool_calls: | |
| tool_name = tool_call['name'] | |
| tool_args = tool_call['args'] | |
| if tool_name in self.tool_map: | |
| try: | |
| tool = self.tool_map[tool_name] | |
| result = tool.invoke(tool_args) | |
| results.append(f"Tool {tool_name} result: {result}") | |
| except Exception as e: | |
| results.append(f"Tool {tool_name} error: {str(e)}") | |
| else: | |
| results.append(f"Unknown tool: {tool_name}") | |
| return results | |
| async def answer(self, question: str) -> str: | |
| """Improved answer method with better error handling and tool usage""" | |
| print(f"Processing question: {question[:100]}...") | |
| try: | |
| # Create system prompt for better instruction following | |
| system_prompt = """You are a helpful AI assistant with access to various tools. | |
| When answering questions, use the appropriate tools when needed and provide clear, concise answers. | |
| If you need to perform calculations, use the math tools available. | |
| If you need current information, use the web search tool. | |
| Always provide a final answer after using tools.""" | |
| messages = [ | |
| HumanMessage(content=f"{system_prompt}\n\nQuestion: {question}") | |
| ] | |
| # Initial response | |
| response = await asyncio.to_thread(self.chat_with_tools.invoke, messages) | |
| # Handle tool calls if present | |
| max_iterations = 3 | |
| iteration = 0 | |
| while iteration < max_iterations: | |
| tool_calls = self._extract_tool_calls(response) | |
| if not tool_calls: | |
| break | |
| # Execute tool calls | |
| tool_results = self._execute_tool_calls(tool_calls) | |
| # Add tool results to conversation | |
| messages.append(AIMessage(content=response.content)) | |
| messages.append(HumanMessage(content=f"Tool results: {'; '.join(tool_results)}. Please provide a final answer based on these results.")) | |
| # Get next response | |
| response = await asyncio.to_thread(self.chat_with_tools.invoke, messages) | |
| iteration += 1 | |
| # Extract final answer | |
| final_answer = response.content.strip() | |
| # Clean up the response - remove any tool call artifacts | |
| if "Tool " in final_answer and "result:" in final_answer: | |
| # Try to extract just the final answer part | |
| lines = final_answer.split('\n') | |
| for line in reversed(lines): | |
| if line.strip() and not line.startswith('Tool ') and not 'result:' in line: | |
| final_answer = line.strip() | |
| break | |
| return final_answer | |
| except Exception as e: | |
| print(f"Error in answer method: {e}") | |
| return f"Error processing question: {str(e)}" | |
| def answer_sync(self, question: str) -> str: | |
| """Synchronous version of answer method""" | |
| try: | |
| return asyncio.run(self.answer(question)) | |
| except Exception as e: | |
| print(f"Error in sync answer: {e}") | |
| return f"Error: {str(e)}" | |
| async def process_questions_batch(agent, questions_batch, semaphore): | |
| """Process a batch of questions with rate limiting""" | |
| results = [] | |
| async def process_single_question(task_id, question): | |
| async with semaphore: | |
| try: | |
| # Check cache first | |
| cache_key = f"{task_id}_{hash(question)}" | |
| if cache_key in answer_cache: | |
| print(f"Using cached answer for task {task_id}") | |
| return task_id, question, answer_cache[cache_key], None | |
| answer = await agent.answer(question) | |
| # Cache the result | |
| answer_cache[cache_key] = answer | |
| return task_id, question, answer, None | |
| except Exception as e: | |
| print(f"Error processing task {task_id}: {e}") | |
| return task_id, question, None, str(e) | |
| # Create semaphore for rate limiting | |
| tasks = [] | |
| for item in questions_batch: | |
| task_id = item.get("task_id") | |
| question_text = item.get("question") | |
| if task_id and question_text is not None: | |
| tasks.append(process_single_question(task_id, question_text)) | |
| if tasks: | |
| results = await asyncio.gather(*tasks, return_exceptions=True) | |
| return results | |
| async def run_agent_async_improved(agent, questions_data): | |
| """Improved async processing with batching and caching""" | |
| results_log, answers_payload = [], [] | |
| # Create semaphore for rate limiting | |
| semaphore = asyncio.Semaphore(MAX_CONCURRENT_REQUESTS) | |
| # Process questions in batches | |
| batch_size = 10 | |
| batches = [questions_data[i:i + batch_size] for i in range(0, len(questions_data), batch_size)] | |
| print(f"Processing {len(questions_data)} questions in {len(batches)} batches...") | |
| for i, batch in enumerate(batches): | |
| print(f"Processing batch {i+1}/{len(batches)} ({len(batch)} questions)...") | |
| try: | |
| batch_results = await process_questions_batch(agent, batch, semaphore) | |
| for result in batch_results: | |
| if isinstance(result, Exception): | |
| print(f"Batch processing error: {result}") | |
| continue | |
| task_id, question, answer, error = result | |
| if error: | |
| print(f"Error in task {task_id}: {error}") | |
| results_log.append({ | |
| "Task ID": task_id, | |
| "Question": question[:100] + "..." if len(question) > 100 else question, | |
| "Submitted Answer": f"ERROR: {error}" | |
| }) | |
| else: | |
| answers_payload.append({"task_id": task_id, "submitted_answer": answer}) | |
| results_log.append({ | |
| "Task ID": task_id, | |
| "Question": question[:100] + "..." if len(question) > 100 else question, | |
| "Submitted Answer": answer[:200] + "..." if len(answer) > 200 else answer | |
| }) | |
| # Small delay between batches to be respectful | |
| if i < len(batches) - 1: | |
| await asyncio.sleep(1) | |
| except Exception as e: | |
| print(f"Error processing batch {i+1}: {e}") | |
| # Continue with next batch | |
| continue | |
| return results_log, answers_payload | |
| def cache_answers(profile: gr.OAuthProfile | None): | |
| """Cache answers without submitting""" | |
| if not profile: | |
| return "Please log in to Hugging Face first.", None | |
| username = profile.username | |
| print(f"Caching answers for user: {username}") | |
| # Fetch questions | |
| api_url = DEFAULT_API_URL | |
| questions_url = f"{api_url}/questions" | |
| try: | |
| response = requests.get(questions_url, timeout=15) | |
| response.raise_for_status() | |
| questions_data = response.json() | |
| if not questions_data: | |
| return "No questions found.", None | |
| print(f"Fetched {len(questions_data)} questions for caching.") | |
| # Initialize agent | |
| try: | |
| agent = ImprovedAgent() | |
| except Exception as e: | |
| print(f"Full error details: {e}") | |
| return f"Error initializing agent: {e}", None | |
| # Process questions | |
| results_log, answers_payload = asyncio.run(run_agent_async_improved(agent, questions_data)) | |
| # Store in global cache with username | |
| answer_cache[f"user_{username}"] = answers_payload | |
| status = f"Cached {len(answers_payload)} answers for user {username}. Ready to submit!" | |
| results_df = pd.DataFrame(results_log) | |
| return status, results_df | |
| except Exception as e: | |
| print(f"Error caching answers: {e}") | |
| return f"Error caching answers: {e}", None | |
| def submit_cached_answers(profile: gr.OAuthProfile | None): | |
| """Submit previously cached answers""" | |
| if not profile: | |
| return "Please log in to Hugging Face first.", None | |
| username = profile.username | |
| cache_key = f"user_{username}" | |
| if cache_key not in answer_cache: | |
| return "No cached answers found. Please run 'Cache Answers' first.", None | |
| answers_payload = answer_cache[cache_key] | |
| if not answers_payload: | |
| return "No answers to submit.", None | |
| # Get space info | |
| space_id = os.getenv("SPACE_ID") | |
| agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main" if space_id else "Unknown" | |
| # Submit | |
| api_url = DEFAULT_API_URL | |
| submit_url = f"{api_url}/submit" | |
| submission_data = { | |
| "username": username.strip(), | |
| "agent_code": agent_code, | |
| "answers": answers_payload | |
| } | |
| try: | |
| print(f"Submitting {len(answers_payload)} cached answers...") | |
| response = requests.post(submit_url, json=submission_data, timeout=60) | |
| response.raise_for_status() | |
| result_data = response.json() | |
| final_status = ( | |
| f"Submission Successful!\n" | |
| f"User: {result_data.get('username')}\n" | |
| f"Overall Score: {result_data.get('score', 'N/A')}% " | |
| f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n" | |
| f"Message: {result_data.get('message', 'No message received.')}" | |
| ) | |
| # Clear cache after successful submission | |
| if cache_key in answer_cache: | |
| del answer_cache[cache_key] | |
| return final_status, None | |
| except Exception as e: | |
| print(f"Submission error: {e}") | |
| return f"Submission failed: {e}", None | |
| def run_and_submit_all( profile: gr.OAuthProfile | None): | |
| """ | |
| Fetches all questions, runs the BasicAgent on them, submits all answers, | |
| and displays the results. | |
| """ | |
| # --- Determine HF Space Runtime URL and Repo URL --- | |
| space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code | |
| if profile: | |
| username= f"{profile.username}" | |
| print(f"User logged in: {username}") | |
| else: | |
| print("User not logged in.") | |
| return "Please Login to Hugging Face with the button.", None | |
| api_url = DEFAULT_API_URL | |
| questions_url = f"{api_url}/questions" | |
| submit_url = f"{api_url}/submit" | |
| # 1. Instantiate Agent ( modify this part to create your agent) | |
| try: | |
| agent = BasicAgent() | |
| except Exception as e: | |
| print(f"Error instantiating agent: {e}") | |
| return f"Error initializing agent: {e}", None | |
| # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public) | |
| agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main" | |
| print(agent_code) | |
| # 2. Fetch Questions | |
| print(f"Fetching questions from: {questions_url}") | |
| try: | |
| # Using the retry function instead of direct request | |
| response = make_request_with_retry(questions_url) | |
| questions_data = response.json() | |
| if not questions_data: | |
| print("Fetched questions list is empty.") | |
| return "Fetched questions list is empty or invalid format.", None | |
| print(f"Fetched {len(questions_data)} questions.") | |
| except requests.exceptions.RequestException as e: | |
| print(f"Error fetching questions: {e}") | |
| return f"Error fetching questions: {e}", None | |
| except requests.exceptions.JSONDecodeError as e: | |
| print(f"Error decoding JSON response from questions endpoint: {e}") | |
| print(f"Response text: {response.text[:500]}") | |
| return f"Error decoding server response for questions: {e}", None | |
| except Exception as e: | |
| print(f"An unexpected error occurred fetching questions: {e}") | |
| return f"An unexpected error occurred fetching questions: {e}", None | |
| # 3. Run your Agent | |
| results_log = [] | |
| answers_payload = [] | |
| print(f"Running agent on {len(questions_data)} questions...") | |
| for item in questions_data: | |
| task_id = item.get("task_id") | |
| question_text = item.get("question") | |
| if not task_id or question_text is None: | |
| print(f"Skipping item with missing task_id or question: {item}") | |
| continue | |
| try: | |
| submitted_answer = agent(question_text) | |
| answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer}) | |
| results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer}) | |
| except Exception as e: | |
| print(f"Error running agent on task {task_id}: {e}") | |
| results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"}) | |
| if not answers_payload: | |
| print("Agent did not produce any answers to submit.") | |
| return "Agent did not produce any answers to submit.", pd.DataFrame(results_log) | |
| # 4. Prepare Submission | |
| submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload} | |
| status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..." | |
| print(status_update) | |
| # 5. Submit | |
| print(f"Submitting {len(answers_payload)} answers to: {submit_url}") | |
| try: | |
| # Using the retry function for submission as well | |
| response = make_request_with_retry(submit_url, method="post", json_data=submission_data, timeout=60) | |
| result_data = response.json() | |
| final_status = ( | |
| f"Submission Successful!\n" | |
| f"User: {result_data.get('username')}\n" | |
| f"Overall Score: {result_data.get('score', 'N/A')}% " | |
| f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n" | |
| f"Message: {result_data.get('message', 'No message received.')}" | |
| ) | |
| print("Submission successful.") | |
| results_df = pd.DataFrame(results_log) | |
| return final_status, results_df | |
| except requests.exceptions.HTTPError as e: | |
| error_detail = f"Server responded with status {e.response.status_code}." | |
| try: | |
| error_json = e.response.json() | |
| error_detail += f" Detail: {error_json.get('detail', e.response.text)}" | |
| except requests.exceptions.JSONDecodeError: | |
| error_detail += f" Response: {e.response.text[:500]}" | |
| status_message = f"Submission Failed: {error_detail}" | |
| print(status_message) | |
| results_df = pd.DataFrame(results_log) | |
| return status_message, results_df | |
| except requests.exceptions.Timeout: | |
| status_message = "Submission Failed: The request timed out." | |
| print(status_message) | |
| results_df = pd.DataFrame(results_log) | |
| return status_message, results_df | |
| except requests.exceptions.RequestException as e: | |
| status_message = f"Submission Failed: Network error - {e}" | |
| print(status_message) | |
| results_df = pd.DataFrame(results_log) | |
| return status_message, results_df | |
| except Exception as e: | |
| status_message = f"An unexpected error occurred during submission: {e}" | |
| print(status_message) | |
| results_df = pd.DataFrame(results_log) | |
| return status_message, results_df | |
| # --- Build Gradio Interface using Blocks --- | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# Basic Agent Evaluation Runner") | |
| gr.Markdown( | |
| """ | |
| **Instructions:** | |
| 1. Please clone this space, then modify the code to define your agent's logic, the tools, the necessary packages, etc ... | |
| 2. Log in to your Hugging Face account using the button below. This uses your HF username for submission. | |
| 3. Click 'Run Evaluation & Submit All Answers' to fetch questions, run your agent, submit answers, and see the score. | |
| --- | |
| **Disclaimers:** | |
| Once clicking on the "submit button, it can take quite some time ( this is the time for the agent to go through all the questions). | |
| This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async. | |
| """ | |
| ) | |
| gr.LoginButton() | |
| run_button = gr.Button("Run Evaluation & Submit All Answers") | |
| status_output = gr.Textbox(label="Run Status / Submission Result", lines=5, interactive=False) | |
| # Removed max_rows=10 from DataFrame constructor | |
| results_table = gr.DataFrame(label="Questions and Agent Answers", wrap=True) | |
| run_button.click( | |
| fn=run_and_submit_all, | |
| outputs=[status_output, results_table] | |
| ) | |
| if __name__ == "__main__": | |
| print("\n" + "-"*30 + " App Starting " + "-"*30) | |
| # Check for SPACE_HOST and SPACE_ID at startup for information | |
| space_host_startup = os.getenv("SPACE_HOST") | |
| space_id_startup = os.getenv("SPACE_ID") # Get SPACE_ID at startup | |
| if space_host_startup: | |
| print(f"✅ SPACE_HOST found: {space_host_startup}") | |
| print(f" Runtime URL should be: https://{space_host_startup}.hf.space") | |
| else: | |
| print("ℹ️ SPACE_HOST environment variable not found (running locally?).") | |
| if space_id_startup: # Print repo URLs if SPACE_ID is found | |
| print(f"✅ SPACE_ID found: {space_id_startup}") | |
| print(f" Repo URL: https://huggingface.co/spaces/{space_id_startup}") | |
| print(f" Repo Tree URL: https://huggingface.co/spaces/{space_id_startup}/tree/main") | |
| else: | |
| print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.") | |
| print("-"*(60 + len(" App Starting ")) + "\n") | |
| print("Launching Gradio Interface for Basic Agent Evaluation...") | |
| demo.launch(debug=True, share=False) | |