Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -3,6 +3,8 @@ import time
|
|
| 3 |
import streamlit as st
|
| 4 |
from smolagents import CodeAgent, HfApiModel, tool
|
| 5 |
from huggingface_hub import InferenceClient
|
|
|
|
|
|
|
| 6 |
|
| 7 |
# Retrieve Hugging Face token
|
| 8 |
hf_token = os.getenv("HF_TOKEN")
|
|
@@ -15,20 +17,56 @@ client = InferenceClient(token=hf_token)
|
|
| 15 |
# Custom tools for SmolAgents
|
| 16 |
@tool
|
| 17 |
def search_harry_potter_lore(query: str) -> str:
|
| 18 |
-
"""Search for Harry Potter-related lore or facts
|
|
|
|
| 19 |
Args:
|
| 20 |
query: A specific question or topic about Harry Potter lore.
|
|
|
|
| 21 |
Returns:
|
| 22 |
A concise and informative response based on the query.
|
| 23 |
"""
|
| 24 |
-
|
| 25 |
-
#
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
# Initialize the SmolAgent model
|
| 34 |
model = HfApiModel(model_id="meta-llama/Llama-3.2-3B-Instruct", token=hf_token)
|
|
|
|
| 3 |
import streamlit as st
|
| 4 |
from smolagents import CodeAgent, HfApiModel, tool
|
| 5 |
from huggingface_hub import InferenceClient
|
| 6 |
+
import requests
|
| 7 |
+
from bs4 import BeautifulSoup
|
| 8 |
|
| 9 |
# Retrieve Hugging Face token
|
| 10 |
hf_token = os.getenv("HF_TOKEN")
|
|
|
|
| 17 |
# Custom tools for SmolAgents
|
| 18 |
@tool
|
| 19 |
def search_harry_potter_lore(query: str) -> str:
|
| 20 |
+
"""Search for Harry Potter-related lore or facts across the entire Harry Potter Fandom site.
|
| 21 |
+
|
| 22 |
Args:
|
| 23 |
query: A specific question or topic about Harry Potter lore.
|
| 24 |
+
|
| 25 |
Returns:
|
| 26 |
A concise and informative response based on the query.
|
| 27 |
"""
|
| 28 |
+
headers = {"User-Agent": "Mozilla/5.0"}
|
| 29 |
+
# Construct the search URL for the Harry Potter Fandom site.
|
| 30 |
+
search_url = f"https://harrypotter.fandom.com/wiki/Special:Search?query={query}"
|
| 31 |
+
|
| 32 |
+
try:
|
| 33 |
+
# Fetch the search results page.
|
| 34 |
+
search_response = requests.get(search_url, headers=headers)
|
| 35 |
+
if search_response.status_code != 200:
|
| 36 |
+
return f"Error: Received status code {search_response.status_code} from search."
|
| 37 |
+
|
| 38 |
+
search_soup = BeautifulSoup(search_response.text, 'html.parser')
|
| 39 |
+
|
| 40 |
+
# Look for the first link that appears to be an article.
|
| 41 |
+
article_url = None
|
| 42 |
+
for a in search_soup.find_all("a", href=True):
|
| 43 |
+
href = a["href"]
|
| 44 |
+
# We want links that start with /wiki/ but skip those that contain "Special:"
|
| 45 |
+
if href.startswith("/wiki/") and "Special:" not in href:
|
| 46 |
+
article_url = "https://harrypotter.fandom.com" + href
|
| 47 |
+
break
|
| 48 |
+
|
| 49 |
+
if not article_url:
|
| 50 |
+
return "No results found for your query."
|
| 51 |
+
|
| 52 |
+
# Fetch the article page.
|
| 53 |
+
article_response = requests.get(article_url, headers=headers)
|
| 54 |
+
if article_response.status_code != 200:
|
| 55 |
+
return f"Error: Received status code {article_response.status_code} from the article page."
|
| 56 |
+
|
| 57 |
+
article_soup = BeautifulSoup(article_response.text, 'html.parser')
|
| 58 |
+
|
| 59 |
+
# Extract the first meaningful paragraph.
|
| 60 |
+
paragraphs = article_soup.find_all("p")
|
| 61 |
+
for p in paragraphs:
|
| 62 |
+
text = p.get_text().strip()
|
| 63 |
+
if len(text) > 50: # A simple threshold to ensure the paragraph is informative.
|
| 64 |
+
return text
|
| 65 |
+
|
| 66 |
+
return "Couldn't extract detailed lore from the article."
|
| 67 |
+
|
| 68 |
+
except Exception as e:
|
| 69 |
+
return f"An error occurred: {str(e)}"
|
| 70 |
|
| 71 |
# Initialize the SmolAgent model
|
| 72 |
model = HfApiModel(model_id="meta-llama/Llama-3.2-3B-Instruct", token=hf_token)
|