|
import os |
|
import gradio as gr |
|
import requests |
|
import inspect |
|
import pandas as pd |
|
|
|
from smolagents import CodeAgent, InferenceClientModel, DuckDuckGoSearchTool, HfApiModel, load_tool, tool |
|
from huggingface_hub import InferenceClient |
|
import json |
|
from final_answer import FinalAnswerTool |
|
|
|
api_url = "https://agents-course-unit4-scoring.hf.space" |
|
questions_url = f"{api_url}/questions" |
|
submit_url = f"{api_url}/submit" |
|
|
|
class BasicAgent: |
|
def __init__(self): |
|
print("BasicAgent initialized.") |
|
def __call__(self, question: str) -> str: |
|
print(f"Agent received question (first 50 chars): {question[:50]}...") |
|
|
|
fixed_answer = "This is a default answer." |
|
print(f"Agent returning fixed answer: {fixed_answer}") |
|
return fixed_answer |
|
|
|
def load_questions_from_file(filepath="questions.json"): |
|
try: |
|
with open(filepath, "r", encoding="utf-8") as f: |
|
questions_data = json.load(f) |
|
if not questions_data: |
|
print("Loaded file is empty.") |
|
return "Loaded file is empty.", None |
|
print(f"Loaded {len(questions_data)} questions from file.") |
|
return "Loaded questions successfully.", questions_data |
|
except FileNotFoundError: |
|
print("File not found. Please run the API fetch first.") |
|
return "File not found.", None |
|
except json.JSONDecodeError as e: |
|
print(f"Error decoding JSON: {e}") |
|
return f"Error decoding JSON: {e}", None |
|
except Exception as e: |
|
print(f"Unexpected error: {e}") |
|
return f"Unexpected error: {e}", None |
|
|
|
|
|
|
|
|
|
|
|
model = HfApiModel( |
|
max_tokens=2096, |
|
temperature=0.5, |
|
model_id='Qwen/Qwen2.5-Coder-32B-Instruct', |
|
custom_role_conversions=None, |
|
) |
|
|
|
|
|
final_answer = FinalAnswerTool() |
|
duckDuckGoSearch = DuckDuckGoSearchTool() |
|
|
|
|
|
agent_codeagent = CodeAgent( |
|
model=model, |
|
tools=[final_answer, duckDuckGoSearch], |
|
max_steps=3, |
|
verbosity_level=1, |
|
grammar=None, |
|
planning_interval=None, |
|
name=None, |
|
description=None |
|
|
|
) |
|
|
|
|
|
|
|
def run_once(state): |
|
if state is not None: |
|
return "Already run once. Refresh to rerun.", state |
|
|
|
status_message, questions_data = load_questions_from_file() |
|
if questions_data is None or len(questions_data) == 0: |
|
return "No questions found or failed to load.", None |
|
|
|
question = questions_data[0] |
|
question_text = question["question"] |
|
task_id = question["task_id"] |
|
print(f"\nTask ID: {task_id}") |
|
print(f"Question: {question_text}") |
|
|
|
try: |
|
answer = agent_codeagent(question_text) |
|
output = f"Answer to task {task_id}:\n{answer}" |
|
return output, output |
|
except Exception as e: |
|
return f"Error running agent: {e}", None |
|
|
|
|
|
|
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("## Run AI Agent Once") |
|
|
|
output_text = gr.Textbox(label="Agent Output", lines=10) |
|
run_button = gr.Button("Run Agent") |
|
state = gr.State() |
|
|
|
run_button.click(fn=run_once, inputs=state, outputs=[output_text, state]) |
|
|
|
|
|
demo.launch() |
|
|