Spaces:
Sleeping
Sleeping
Abraham E. Tavarez
commited on
Commit
·
2125ce6
1
Parent(s):
81917a3
Astro final challenge agent
Browse files- README.md +1 -1
- agents/__init__.py +5 -0
- agents/hf_code_agent.py +69 -0
- agents/hf_tool_calling_agent.py +67 -0
- agents/orchestrator_agent.py +56 -0
- app.py +6 -4
- data/__init__.py +6 -0
- data/sample_questions.py +6 -0
- requirements.txt +12 -1
- tools/__init__.py +6 -0
- tools/transcriber.py +30 -0
- tools/visit_website.py +46 -0
- tools/wikipedia_summarize.py +21 -0
README.md
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
| 3 |
emoji: 🕵🏻♂️
|
| 4 |
colorFrom: indigo
|
| 5 |
colorTo: indigo
|
|
|
|
| 1 |
---
|
| 2 |
+
title: Astro Agent
|
| 3 |
emoji: 🕵🏻♂️
|
| 4 |
colorFrom: indigo
|
| 5 |
colorTo: indigo
|
agents/__init__.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .hf_code_agent import hf_code_agent
|
| 2 |
+
from .hf_tool_calling_agent import hf_tool_calling_agent
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
__all__ = ["hf_tool_calling_agent", "hf_code_agent"]
|
agents/hf_code_agent.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
# Add the Project Root to sys.path
|
| 5 |
+
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
|
| 6 |
+
|
| 7 |
+
from smolagents import (
|
| 8 |
+
HfApiModel,
|
| 9 |
+
CodeAgent,
|
| 10 |
+
load_tool,
|
| 11 |
+
Tool,
|
| 12 |
+
InferenceClientModel,
|
| 13 |
+
ToolCallingAgent,
|
| 14 |
+
FinalAnswerTool,
|
| 15 |
+
DuckDuckGoSearchTool,
|
| 16 |
+
VisitWebpageTool,
|
| 17 |
+
GoogleSearchTool,
|
| 18 |
+
PythonInterpreterTool,
|
| 19 |
+
)
|
| 20 |
+
import os
|
| 21 |
+
from huggingface_hub import login
|
| 22 |
+
from dotenv import load_dotenv
|
| 23 |
+
from data.sample_questions import QUESTIONS
|
| 24 |
+
|
| 25 |
+
# from tools.visit_website import VisitWebpageTool
|
| 26 |
+
|
| 27 |
+
load_dotenv()
|
| 28 |
+
login(os.environ["HF_API_KEY"])
|
| 29 |
+
|
| 30 |
+
# Tools
|
| 31 |
+
|
| 32 |
+
# wikipedia = Tool.from_langchain(load_tool("wikipedia", trust_remote_code=True))
|
| 33 |
+
|
| 34 |
+
tools = [
|
| 35 |
+
# DuckDuckGoSearchTool(),
|
| 36 |
+
# VisitWebpageTool(),
|
| 37 |
+
PythonInterpreterTool(),
|
| 38 |
+
FinalAnswerTool(),
|
| 39 |
+
# wikipedia
|
| 40 |
+
]
|
| 41 |
+
|
| 42 |
+
question = QUESTIONS[0]
|
| 43 |
+
|
| 44 |
+
# LLM Model
|
| 45 |
+
model = HfApiModel(
|
| 46 |
+
"deepseek-ai/DeepSeek-R1",
|
| 47 |
+
provider="together",
|
| 48 |
+
# max_tokens=40096,
|
| 49 |
+
# temperature=0.1,
|
| 50 |
+
# token=get_huggingface_token(),
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
# Code Agent
|
| 54 |
+
hf_code_agent = CodeAgent(
|
| 55 |
+
model=model,
|
| 56 |
+
tools=tools,
|
| 57 |
+
max_steps=20,
|
| 58 |
+
additional_authorized_imports=["pandas", "numpy", "time", "bs4", "time"],
|
| 59 |
+
verbosity_level=2,
|
| 60 |
+
name="python_interpreter_agent",
|
| 61 |
+
description="Can run and execute python code."
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
hf_code_agent.logger.console.width = 66
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
if __name__ == "__main__":
|
| 68 |
+
answer = hf_code_agent.run(question)
|
| 69 |
+
print(answer)
|
agents/hf_tool_calling_agent.py
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from smolagents import (
|
| 2 |
+
HfApiModel,
|
| 3 |
+
CodeAgent,
|
| 4 |
+
load_tool,
|
| 5 |
+
Tool,
|
| 6 |
+
InferenceClientModel,
|
| 7 |
+
ToolCallingAgent,
|
| 8 |
+
FinalAnswerTool,
|
| 9 |
+
DuckDuckGoSearchTool,
|
| 10 |
+
VisitWebpageTool,
|
| 11 |
+
GoogleSearchTool,
|
| 12 |
+
PythonInterpreterTool,
|
| 13 |
+
)
|
| 14 |
+
import os
|
| 15 |
+
from huggingface_hub import login
|
| 16 |
+
from dotenv import load_dotenv
|
| 17 |
+
# from langchain.agents import load_tools
|
| 18 |
+
from langchain_community.agent_toolkits.load_tools import load_tools
|
| 19 |
+
from tools.transcriber import transcribe_audio
|
| 20 |
+
load_dotenv()
|
| 21 |
+
login(os.environ["HF_API_KEY"])
|
| 22 |
+
from data.sample_questions import QUESTIONS
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
# Tools
|
| 26 |
+
|
| 27 |
+
wikipedia = Tool.from_langchain(load_tools(["wikipedia"])[0])
|
| 28 |
+
|
| 29 |
+
tools = [
|
| 30 |
+
# GoogleSearchTool(),
|
| 31 |
+
DuckDuckGoSearchTool(),
|
| 32 |
+
VisitWebpageTool(),
|
| 33 |
+
PythonInterpreterTool(),
|
| 34 |
+
FinalAnswerTool(),
|
| 35 |
+
wikipedia,
|
| 36 |
+
transcribe_audio
|
| 37 |
+
]
|
| 38 |
+
|
| 39 |
+
# Model
|
| 40 |
+
# LLM Model
|
| 41 |
+
model = HfApiModel(
|
| 42 |
+
"Qwen/Qwen2.5-72B-Instruct",
|
| 43 |
+
# "deepseek-ai/DeepSeek-R1",
|
| 44 |
+
provider="together",
|
| 45 |
+
# max_tokens=40096,
|
| 46 |
+
temperature=0.1,
|
| 47 |
+
# token=get_huggingface_token(),
|
| 48 |
+
)
|
| 49 |
+
|
| 50 |
+
# Tool Calling Agent
|
| 51 |
+
llm = HfApiModel("meta-llama/Llama-3.2-3B-Instruct", temperature=0)
|
| 52 |
+
|
| 53 |
+
hf_tool_calling_agent = ToolCallingAgent(
|
| 54 |
+
model=model,
|
| 55 |
+
tools=tools,
|
| 56 |
+
max_steps=10,
|
| 57 |
+
name="web_search_tool_calling_agent",
|
| 58 |
+
description="Can perform web searches and can visit the websites",
|
| 59 |
+
verbosity_level=2,
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
hf_tool_calling_agent.logger.console.width = 66
|
| 63 |
+
|
| 64 |
+
if __name__ == "__main__":
|
| 65 |
+
question = QUESTIONS[0]
|
| 66 |
+
answer = hf_tool_calling_agent.run(question)
|
| 67 |
+
print(answer)
|
agents/orchestrator_agent.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from smolagents import (
|
| 2 |
+
HfApiModel,
|
| 3 |
+
CodeAgent,
|
| 4 |
+
Tool,
|
| 5 |
+
InferenceClientModel,
|
| 6 |
+
ToolCallingAgent,
|
| 7 |
+
FinalAnswerTool,
|
| 8 |
+
DuckDuckGoSearchTool,
|
| 9 |
+
VisitWebpageTool,
|
| 10 |
+
GoogleSearchTool,
|
| 11 |
+
PythonInterpreterTool,
|
| 12 |
+
)
|
| 13 |
+
import os
|
| 14 |
+
from huggingface_hub import login
|
| 15 |
+
from dotenv import load_dotenv
|
| 16 |
+
|
| 17 |
+
load_dotenv()
|
| 18 |
+
login(os.environ["HF_API_KEY"])
|
| 19 |
+
|
| 20 |
+
tools = [
|
| 21 |
+
# DuckDuckGoSearchTool(),
|
| 22 |
+
# VisitWebpageTool(),
|
| 23 |
+
# PythonInterpreterTool(),
|
| 24 |
+
FinalAnswerTool(),
|
| 25 |
+
# wikipedia
|
| 26 |
+
]
|
| 27 |
+
|
| 28 |
+
# LLM Model
|
| 29 |
+
model = HfApiModel(
|
| 30 |
+
"deepseek-ai/DeepSeek-R1",
|
| 31 |
+
provider="together",
|
| 32 |
+
max_tokens=8096,
|
| 33 |
+
# temperature=0.1,
|
| 34 |
+
# token=get_huggingface_token(),
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
# Agent
|
| 38 |
+
from agents.hf_code_agent import hf_code_agent
|
| 39 |
+
from agents.hf_tool_calling_agent import hf_tool_calling_agent
|
| 40 |
+
|
| 41 |
+
orchestrator_agent = CodeAgent(
|
| 42 |
+
model=model,
|
| 43 |
+
tools=tools,
|
| 44 |
+
managed_agents=[hf_tool_calling_agent],
|
| 45 |
+
additional_authorized_imports=['pandas', 'numpy', 'time'],
|
| 46 |
+
planning_interval=5,
|
| 47 |
+
verbosity_level=2,
|
| 48 |
+
# final_answer_checks=[FinalAnswerTool()],
|
| 49 |
+
max_steps=10,
|
| 50 |
+
# name="Orchestrator Agent",
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
orchestrator_agent.logger.console.width = 66
|
| 54 |
+
|
| 55 |
+
if __name__ == "__main__":
|
| 56 |
+
question = ""
|
app.py
CHANGED
|
@@ -3,6 +3,7 @@ import gradio as gr
|
|
| 3 |
import requests
|
| 4 |
import inspect
|
| 5 |
import pandas as pd
|
|
|
|
| 6 |
|
| 7 |
# (Keep Constants as is)
|
| 8 |
# --- Constants ---
|
|
@@ -14,10 +15,11 @@ class BasicAgent:
|
|
| 14 |
def __init__(self):
|
| 15 |
print("BasicAgent initialized.")
|
| 16 |
def __call__(self, question: str) -> str:
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
|
|
|
| 21 |
|
| 22 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
| 23 |
"""
|
|
|
|
| 3 |
import requests
|
| 4 |
import inspect
|
| 5 |
import pandas as pd
|
| 6 |
+
from agents.orchestrator_agent import orchestrator_agent
|
| 7 |
|
| 8 |
# (Keep Constants as is)
|
| 9 |
# --- Constants ---
|
|
|
|
| 15 |
def __init__(self):
|
| 16 |
print("BasicAgent initialized.")
|
| 17 |
def __call__(self, question: str) -> str:
|
| 18 |
+
return orchestrator_agent.run(question)
|
| 19 |
+
# print(f"Agent received question (first 50 chars): {question[:50]}...")
|
| 20 |
+
# fixed_answer = "This is a default answer."
|
| 21 |
+
# print(f"Agent returning fixed answer: {fixed_answer}")
|
| 22 |
+
# return fixed_answer
|
| 23 |
|
| 24 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
| 25 |
"""
|
data/__init__.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .sample_questions import QUESTIONS
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
__all__ = [
|
| 5 |
+
"QUESTIONS"
|
| 6 |
+
]
|
data/sample_questions.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
QUESTIONS = [
|
| 2 |
+
"""
|
| 3 |
+
I'm making a grocery list for my mom, but she's a professor of botany and she's a real stickler when it comes to categorizing things. I need to add different foods to different categories on the grocery list, but if I make a mistake, she won't buy anything inserted in the wrong category. Here's the list I have so far:\n\nmilk, eggs, flour, whole bean coffee, Oreos, sweet potatoes, fresh basil, plums, green beans, rice, corn, bell pepper, whole allspice, acorns, broccoli, celery, zucchini, lettuce, peanuts\n\nI need to make headings for the fruits and vegetables. Could you please create a list of just the vegetables from my list? If you could do that, then I can figure out how to categorize the rest of the list into the appropriate categories. But remember that my mom is a real stickler, so make sure that no botanical fruits end up on the vegetable list, or she won't get them when she's at the store. Please alphabetize the list of vegetables, and place each item in a comma separated list.
|
| 4 |
+
""",
|
| 5 |
+
"Who are the pitchers with the number before and after Taishō Tamai's number as of July 2023? Give them to me in the form Pitcher Before, Pitcher After, use their last names only, in Roman characters.",
|
| 6 |
+
]
|
requirements.txt
CHANGED
|
@@ -1,2 +1,13 @@
|
|
| 1 |
gradio
|
| 2 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
gradio
|
| 2 |
+
gradio[oauth]
|
| 3 |
+
requests
|
| 4 |
+
smolagents
|
| 5 |
+
transformers
|
| 6 |
+
dotenv
|
| 7 |
+
duckduckgo-search
|
| 8 |
+
markdownify
|
| 9 |
+
pandas
|
| 10 |
+
numpy
|
| 11 |
+
langchain
|
| 12 |
+
langchain-community
|
| 13 |
+
wikipedia
|
tools/__init__.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .transcriber import transcribe_audio
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
__all__ = [
|
| 5 |
+
"transcribe_audio",
|
| 6 |
+
]
|
tools/transcriber.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Use a pipeline as a high-level helper
|
| 2 |
+
from transformers import pipeline
|
| 3 |
+
from smolagents import tool
|
| 4 |
+
import os
|
| 5 |
+
# print(os.getcwd() + "/audio/interview.mp3")
|
| 6 |
+
transcriber_pipeline = pipeline(
|
| 7 |
+
"automatic-speech-recognition", model="facebook/wav2vec2-base-960h"
|
| 8 |
+
)
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
@tool
|
| 12 |
+
def transcribe_audio(audio_file_path: str) -> str:
|
| 13 |
+
"""Transcribe an audio file into text.
|
| 14 |
+
Args:
|
| 15 |
+
audio_file_path: The path to the audio file to transcribe.
|
| 16 |
+
Returns:
|
| 17 |
+
The transcribed text.
|
| 18 |
+
"""
|
| 19 |
+
try:
|
| 20 |
+
if os.path.isfile(audio_file_path):
|
| 21 |
+
return transcriber_pipeline(audio_file_path)["text"]
|
| 22 |
+
else:
|
| 23 |
+
raise FileNotFoundError(f"Audio file not found: {audio_file_path}")
|
| 24 |
+
except FileNotFoundError as e:
|
| 25 |
+
return f"Error: {str(e)}"
|
| 26 |
+
|
| 27 |
+
# file = os.getcwd() + "/audio/interview.mp3"
|
| 28 |
+
# result = transcribe_audio(file)
|
| 29 |
+
# print(result)
|
| 30 |
+
# transcribe_audio_tool = transcribe_audio.push_to_hub()
|
tools/visit_website.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Optional
|
| 2 |
+
from smolagents.tools import Tool
|
| 3 |
+
import requests
|
| 4 |
+
import markdownify
|
| 5 |
+
import smolagents
|
| 6 |
+
|
| 7 |
+
class VisitWebpageTool(Tool):
|
| 8 |
+
name = "visit_webpage"
|
| 9 |
+
description = "Visits a webpage at the given url and reads its content as a markdown string. Use this to browse webpages."
|
| 10 |
+
inputs = {'url': {'type': 'string', 'description': 'The url of the webpage to visit.'}}
|
| 11 |
+
output_type = "string"
|
| 12 |
+
|
| 13 |
+
def forward(self, url: str) -> str:
|
| 14 |
+
try:
|
| 15 |
+
import requests
|
| 16 |
+
from markdownify import markdownify
|
| 17 |
+
from requests.exceptions import RequestException
|
| 18 |
+
import re
|
| 19 |
+
|
| 20 |
+
from smolagents.utils import truncate_content
|
| 21 |
+
except ImportError as e:
|
| 22 |
+
raise ImportError(
|
| 23 |
+
"You must install packages `markdownify` and `requests` to run this tool: for instance run `pip install markdownify requests`."
|
| 24 |
+
) from e
|
| 25 |
+
try:
|
| 26 |
+
# Send a GET request to the URL with a 20-second timeout
|
| 27 |
+
response = requests.get(url, timeout=20)
|
| 28 |
+
response.raise_for_status() # Raise an exception for bad status codes
|
| 29 |
+
|
| 30 |
+
# Convert the HTML content to Markdown
|
| 31 |
+
markdown_content = markdownify(response.text).strip()
|
| 32 |
+
|
| 33 |
+
# Remove multiple line breaks
|
| 34 |
+
markdown_content = re.sub(r"\n{3,}", "\n\n", markdown_content)
|
| 35 |
+
|
| 36 |
+
return truncate_content(markdown_content, 10000)
|
| 37 |
+
|
| 38 |
+
except requests.exceptions.Timeout:
|
| 39 |
+
return "The request timed out. Please try again later or check the URL."
|
| 40 |
+
except RequestException as e:
|
| 41 |
+
return f"Error fetching the webpage: {str(e)}"
|
| 42 |
+
except Exception as e:
|
| 43 |
+
return f"An unexpected error occurred: {str(e)}"
|
| 44 |
+
|
| 45 |
+
def __init__(self, *args, **kwargs):
|
| 46 |
+
self.is_initialized = False
|
tools/wikipedia_summarize.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from smolagents import tool
|
| 2 |
+
import requests
|
| 3 |
+
|
| 4 |
+
@tool
|
| 5 |
+
def wiki_summarize(topic: str) -> str:
|
| 6 |
+
"""Get the first paragraph summary for a Wikipedia topic.
|
| 7 |
+
rgs:
|
| 8 |
+
topic: The Wikipedia page title to summarize.
|
| 9 |
+
Returns:
|
| 10 |
+
A short summary of the page.
|
| 11 |
+
|
| 12 |
+
"""
|
| 13 |
+
url = f"https://en.wikipedia.org/api/rest_v1/page/summary/{topic}"
|
| 14 |
+
resp = requests.get(url)
|
| 15 |
+
if resp.status_code == 200:
|
| 16 |
+
data = resp.json()
|
| 17 |
+
return data.get("extract", "No summary available.")
|
| 18 |
+
else:
|
| 19 |
+
return f"Error fetching Wikipedia summary (status {resp.status_code})."
|
| 20 |
+
|
| 21 |
+
# wiki_summarize_tool = wiki_summarize.push_to_hub()
|