Spaces:
Runtime error
Runtime error
import gradio as gr | |
from dotenv import load_dotenv | |
from openai import OpenAI | |
from duckduckgo_search import DDGS | |
import os | |
import requests | |
import json | |
import re | |
# Load environment variables | |
load_dotenv() | |
API_KEY = os.getenv("OPENAI_API_KEY") | |
MODEL_NAME = os.getenv("MODEL_NAME") | |
BASE_URL = os.getenv("BASE_URL") | |
IMAGE_SEARCH_ENDPOINT = os.getenv("IMAGE_SEARCH_ENDPOINT") | |
client = OpenAI( | |
api_key=API_KEY, | |
base_url=BASE_URL, | |
) | |
# MCP TOOL: Search via DuckDuckGo | |
def search(query: str) -> list[dict]: | |
with DDGS() as ddgs: | |
results = ddgs.text(query, max_results=5) | |
return results | |
# MCP TOOL: Analyze Image URL to get caption for further searching | |
def analyze_image(data: str) -> str: | |
try: | |
response = requests.post(IMAGE_SEARCH_ENDPOINT, json={"image_url": data}) | |
if response.status_code == 200: | |
return response.json().get("caption", "No caption found") | |
else: | |
return f"Image analysis failed: {response.status_code}" | |
except Exception as e: | |
return f"Error during image analysis: {str(e)}" | |
# Helper to extract tool_code from model response | |
def extract_tool_code(text): | |
match = re.search(r"```tool_code\\n(.*?)```", text, re.DOTALL) | |
return match.group(1).strip() if match else None | |
# Helper to format tool output back to model | |
def format_tool_output(output): | |
return f"```tool_output\n{json.dumps(output)}\n```" | |
# CHAT HANDLER | |
def chat_with_gemma(history, message, image_url): | |
messages = [ | |
{"role": "system", "content": "You are a helpful assistant who helps users find products online using search and image analysis. Wrap tool use in ```tool_code``` and return results in ```tool_output```."} ] | |
for user_msg, bot_msg in history: | |
messages.append({"role": "user", "content": user_msg}) | |
messages.append({"role": "assistant", "content": bot_msg}) | |
if image_url: | |
image_caption = analyze_image(image_url) | |
message = f"Image URL: {image_url}\nCaption: {image_caption}\nUser says: {message}" | |
messages.append({"role": "user", "content": message}) | |
try: | |
response = client.chat.completions.create( | |
model=MODEL_NAME, | |
messages=messages, | |
temperature=0.7, | |
max_tokens=512, | |
) | |
reply = response.choices[0].message.content.strip() | |
tool_code = extract_tool_code(reply) | |
if tool_code: | |
tool_result = eval(tool_code) # Note: Only safe in dev/testing | |
tool_output = format_tool_output(tool_result) | |
messages.append({"role": "user", "content": tool_output}) | |
response2 = client.chat.completions.create( | |
model=MODEL_NAME, | |
messages=messages, | |
temperature=0.7, | |
max_tokens=512, | |
) | |
reply = response2.choices[0].message.content.strip() | |
except Exception as e: | |
reply = f"β οΈ Error: {str(e)}" | |
history.append((message, reply)) | |
return history, "", "" | |
# GRADIO UI | |
with gr.Blocks(title="π§ Gemma Product Finder - MCP Tool", theme=gr.themes.Soft()) as demo: | |
gr.Markdown(""" | |
<h1 style='text-align: center; color: #4e73df;'>ποΈ Gemma Product Finder</h1> | |
<p style='text-align: center; color: #6c757d;'>Find Amazon & Flipkart products with AI</p> | |
""") | |
with gr.Row(): | |
chatbot = gr.Chatbot(height=420, label="π§ Chat with Gemma", bubble_full_width=False) | |
with gr.Row(): | |
msg = gr.Textbox(label="π¬ Ask something", placeholder="e.g. Red Nike shoes under 4000", scale=3) | |
image_url = gr.Textbox(label="π Optional image URL", placeholder="Paste image URL here", scale=2) | |
with gr.Row(): | |
clear = gr.Button("π Clear Chat", variant="secondary") | |
state = gr.State([]) | |
msg.submit(chat_with_gemma, [state, msg, image_url], [chatbot, msg, image_url]) | |
clear.click(lambda: ([], "", ""), outputs=[chatbot, msg, image_url]) | |
if __name__ == "__main__": | |
demo.launch(mcp_server=True) |