Spaces:
Runtime error
Runtime error
avinash
commited on
Commit
Β·
cf929e7
1
Parent(s):
e5ce70e
added mcp logic
Browse files- __pycache__/app.cpython-313.pyc +0 -0
- app.py +75 -14
- requirements.txt +3 -1
__pycache__/app.cpython-313.pyc
ADDED
Binary file (7.43 kB). View file
|
|
app.py
CHANGED
@@ -1,26 +1,64 @@
|
|
1 |
import gradio as gr
|
2 |
from dotenv import load_dotenv
|
3 |
from openai import OpenAI
|
|
|
4 |
import os
|
|
|
|
|
|
|
5 |
|
|
|
6 |
load_dotenv()
|
7 |
|
8 |
API_KEY = os.getenv("OPENAI_API_KEY")
|
9 |
MODEL_NAME = os.getenv("MODEL_NAME")
|
10 |
BASE_URL = os.getenv("BASE_URL")
|
|
|
11 |
|
12 |
client = OpenAI(
|
13 |
api_key=API_KEY,
|
14 |
base_url=BASE_URL,
|
15 |
)
|
16 |
|
|
|
|
|
|
|
|
|
|
|
17 |
|
18 |
-
|
19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
for user_msg, bot_msg in history:
|
21 |
messages.append({"role": "user", "content": user_msg})
|
22 |
messages.append({"role": "assistant", "content": bot_msg})
|
23 |
-
|
|
|
|
|
|
|
|
|
|
|
24 |
|
25 |
try:
|
26 |
response = client.chat.completions.create(
|
@@ -29,29 +67,52 @@ def chat_with_gemma(history, message):
|
|
29 |
temperature=0.7,
|
30 |
max_tokens=512,
|
31 |
)
|
|
|
32 |
reply = response.choices[0].message.content.strip()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
except Exception as e:
|
34 |
reply = f"β οΈ Error: {str(e)}"
|
35 |
|
36 |
-
# Return updated history
|
37 |
history.append((message, reply))
|
38 |
-
return history, ""
|
39 |
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
|
41 |
-
with gr.Blocks(title="Gemma Chat - MCP Tool") as demo:
|
42 |
-
gr.Markdown("## π§ Gemma LLM Chatbot via Modal (MCP Tool)")
|
43 |
with gr.Row():
|
44 |
-
chatbot = gr.Chatbot()
|
|
|
45 |
with gr.Row():
|
46 |
-
msg = gr.Textbox(label="
|
|
|
|
|
47 |
with gr.Row():
|
48 |
-
clear = gr.Button("π Clear Chat")
|
49 |
|
50 |
state = gr.State([])
|
51 |
|
52 |
-
msg.submit(chat_with_gemma, [state, msg], [chatbot, msg])
|
53 |
-
clear.click(lambda: ([], ""), outputs=[chatbot, msg])
|
54 |
|
55 |
-
# β
Launch the app
|
56 |
if __name__ == "__main__":
|
57 |
-
demo.launch(
|
|
|
1 |
import gradio as gr
|
2 |
from dotenv import load_dotenv
|
3 |
from openai import OpenAI
|
4 |
+
from duckduckgo_search import DDGS
|
5 |
import os
|
6 |
+
import requests
|
7 |
+
import json
|
8 |
+
import re
|
9 |
|
10 |
+
# Load environment variables
|
11 |
load_dotenv()
|
12 |
|
13 |
API_KEY = os.getenv("OPENAI_API_KEY")
|
14 |
MODEL_NAME = os.getenv("MODEL_NAME")
|
15 |
BASE_URL = os.getenv("BASE_URL")
|
16 |
+
IMAGE_SEARCH_ENDPOINT = os.getenv("IMAGE_SEARCH_ENDPOINT")
|
17 |
|
18 |
client = OpenAI(
|
19 |
api_key=API_KEY,
|
20 |
base_url=BASE_URL,
|
21 |
)
|
22 |
|
23 |
+
# MCP TOOL: Search via DuckDuckGo
|
24 |
+
def search(query: str) -> list[dict]:
|
25 |
+
with DDGS() as ddgs:
|
26 |
+
results = ddgs.text(query, max_results=5)
|
27 |
+
return results
|
28 |
|
29 |
+
# MCP TOOL: Analyze Image URL to get caption for further searching
|
30 |
+
def analyze_image(data: str) -> str:
|
31 |
+
try:
|
32 |
+
response = requests.post(IMAGE_SEARCH_ENDPOINT, json={"image_url": data})
|
33 |
+
if response.status_code == 200:
|
34 |
+
return response.json().get("caption", "No caption found")
|
35 |
+
else:
|
36 |
+
return f"Image analysis failed: {response.status_code}"
|
37 |
+
except Exception as e:
|
38 |
+
return f"Error during image analysis: {str(e)}"
|
39 |
+
|
40 |
+
# Helper to extract tool_code from model response
|
41 |
+
def extract_tool_code(text):
|
42 |
+
match = re.search(r"```tool_code\\n(.*?)```", text, re.DOTALL)
|
43 |
+
return match.group(1).strip() if match else None
|
44 |
+
|
45 |
+
# Helper to format tool output back to model
|
46 |
+
def format_tool_output(output):
|
47 |
+
return f"```tool_output\n{json.dumps(output)}\n```"
|
48 |
+
|
49 |
+
# CHAT HANDLER
|
50 |
+
def chat_with_gemma(history, message, image_url):
|
51 |
+
messages = [
|
52 |
+
{"role": "system", "content": "You are a helpful assistant who helps users find products online using search and image analysis. Wrap tool use in ```tool_code``` and return results in ```tool_output```."} ]
|
53 |
for user_msg, bot_msg in history:
|
54 |
messages.append({"role": "user", "content": user_msg})
|
55 |
messages.append({"role": "assistant", "content": bot_msg})
|
56 |
+
|
57 |
+
if image_url:
|
58 |
+
image_caption = analyze_image(image_url)
|
59 |
+
message = f"Image URL: {image_url}\nCaption: {image_caption}\nUser says: {message}"
|
60 |
+
|
61 |
+
messages.append({"role": "user", "content": message})
|
62 |
|
63 |
try:
|
64 |
response = client.chat.completions.create(
|
|
|
67 |
temperature=0.7,
|
68 |
max_tokens=512,
|
69 |
)
|
70 |
+
|
71 |
reply = response.choices[0].message.content.strip()
|
72 |
+
|
73 |
+
tool_code = extract_tool_code(reply)
|
74 |
+
if tool_code:
|
75 |
+
tool_result = eval(tool_code) # Note: Only safe in dev/testing
|
76 |
+
tool_output = format_tool_output(tool_result)
|
77 |
+
|
78 |
+
messages.append({"role": "user", "content": tool_output})
|
79 |
+
|
80 |
+
response2 = client.chat.completions.create(
|
81 |
+
model=MODEL_NAME,
|
82 |
+
messages=messages,
|
83 |
+
temperature=0.7,
|
84 |
+
max_tokens=512,
|
85 |
+
)
|
86 |
+
|
87 |
+
reply = response2.choices[0].message.content.strip()
|
88 |
+
|
89 |
except Exception as e:
|
90 |
reply = f"β οΈ Error: {str(e)}"
|
91 |
|
|
|
92 |
history.append((message, reply))
|
93 |
+
return history, "", ""
|
94 |
|
95 |
+
# GRADIO UI
|
96 |
+
with gr.Blocks(title="π§ Gemma Product Finder - MCP Tool", theme=gr.themes.Soft()) as demo:
|
97 |
+
gr.Markdown("""
|
98 |
+
<h1 style='text-align: center; color: #4e73df;'>ποΈ Gemma Product Finder</h1>
|
99 |
+
<p style='text-align: center; color: #6c757d;'>Find Amazon & Flipkart products with AI</p>
|
100 |
+
""")
|
101 |
|
|
|
|
|
102 |
with gr.Row():
|
103 |
+
chatbot = gr.Chatbot(height=420, label="π§ Chat with Gemma", bubble_full_width=False)
|
104 |
+
|
105 |
with gr.Row():
|
106 |
+
msg = gr.Textbox(label="π¬ Ask something", placeholder="e.g. Red Nike shoes under 4000", scale=3)
|
107 |
+
image_url = gr.Textbox(label="π Optional image URL", placeholder="Paste image URL here", scale=2)
|
108 |
+
|
109 |
with gr.Row():
|
110 |
+
clear = gr.Button("π Clear Chat", variant="secondary")
|
111 |
|
112 |
state = gr.State([])
|
113 |
|
114 |
+
msg.submit(chat_with_gemma, [state, msg, image_url], [chatbot, msg, image_url])
|
115 |
+
clear.click(lambda: ([], "", ""), outputs=[chatbot, msg, image_url])
|
116 |
|
|
|
117 |
if __name__ == "__main__":
|
118 |
+
demo.launch(mcp_server=True)
|
requirements.txt
CHANGED
@@ -1,4 +1,6 @@
|
|
1 |
gradio[mcp]
|
2 |
modal
|
3 |
openai
|
4 |
-
python-dotenv
|
|
|
|
|
|
1 |
gradio[mcp]
|
2 |
modal
|
3 |
openai
|
4 |
+
python-dotenv
|
5 |
+
duckduckgo_search
|
6 |
+
requests
|