Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,8 +1,11 @@
|
|
1 |
# app.py
|
2 |
# Hugging Face Space: Gradio app that chats about Gradio docs via the Gradio Docs MCP server.
|
3 |
-
#
|
4 |
-
#
|
5 |
-
#
|
|
|
|
|
|
|
6 |
|
7 |
import os
|
8 |
import asyncio
|
@@ -70,23 +73,25 @@ async def ensure_init():
|
|
70 |
|
71 |
|
72 |
# ----------------------------
|
73 |
-
#
|
74 |
# ----------------------------
|
75 |
-
def
|
76 |
"""
|
77 |
-
Convert
|
|
|
78 |
"""
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
|
|
90 |
"""
|
91 |
Stream deltas and tool logs from MCPClient.process_single_turn_with_tools.
|
92 |
"""
|
@@ -102,38 +107,26 @@ async def stream_answer(messages: List[Dict[str, Any]]) -> Iterable[str]:
|
|
102 |
return
|
103 |
|
104 |
try:
|
105 |
-
|
106 |
-
async for chunk in client.process_single_turn_with_tools(messages):
|
107 |
# chunk is a dict describing text deltas and tool activity
|
108 |
if isinstance(chunk, dict):
|
109 |
ctype = chunk.get("type")
|
110 |
if ctype == "tool_log":
|
111 |
name = chunk.get("tool", "tool")
|
112 |
status = chunk.get("status", "")
|
113 |
-
|
114 |
-
partial += text
|
115 |
-
yield text
|
116 |
elif ctype == "text_delta":
|
117 |
-
|
118 |
-
partial += delta
|
119 |
-
yield delta
|
120 |
elif ctype == "text":
|
121 |
-
|
122 |
-
partial += text
|
123 |
-
yield text
|
124 |
elif ctype == "tool_result":
|
125 |
content = chunk.get("content")
|
126 |
if isinstance(content, str) and content.strip():
|
127 |
-
|
128 |
-
partial += text
|
129 |
-
yield text
|
130 |
else:
|
131 |
# Fallback if provider yields plain strings
|
132 |
-
|
133 |
-
partial += s
|
134 |
-
yield s
|
135 |
except Exception as e:
|
136 |
-
# Surface common errors nicely
|
137 |
msg = str(e)
|
138 |
if "401" in msg or "Unauthorized" in msg:
|
139 |
yield (
|
@@ -146,12 +139,6 @@ async def stream_answer(messages: List[Dict[str, Any]]) -> Iterable[str]:
|
|
146 |
yield f"❌ Error: {msg}"
|
147 |
|
148 |
|
149 |
-
async def respond(user_msg: str, history: List[List[str]]):
|
150 |
-
messages = to_messages(history, user_msg)
|
151 |
-
async for piece in stream_answer(messages):
|
152 |
-
yield piece
|
153 |
-
|
154 |
-
|
155 |
# ----------------------------
|
156 |
# Gradio UI
|
157 |
# ----------------------------
|
@@ -161,11 +148,11 @@ with gr.Blocks(fill_height=True) as demo:
|
|
161 |
"Ask anything about **Gradio**. Answers are grounded in the official docs via MCP."
|
162 |
)
|
163 |
|
|
|
164 |
chat = gr.Chatbot(
|
165 |
label="Gradio Docs Assistant",
|
166 |
height=520,
|
167 |
-
type="messages",
|
168 |
-
avatar_images=(None, None)
|
169 |
)
|
170 |
|
171 |
with gr.Row():
|
@@ -179,18 +166,26 @@ with gr.Blocks(fill_height=True) as demo:
|
|
179 |
with gr.Row():
|
180 |
clear = gr.ClearButton(components=[chat, msg], value="Clear")
|
181 |
info = gr.Markdown(
|
182 |
-
f"**Model:** `{MODEL_ID}` · **Provider:** `{PROVIDER}` · "
|
183 |
-
f"**MCP:** Gradio Docs SSE",
|
184 |
-
elem_classes=["text-sm", "opacity-70"],
|
185 |
)
|
186 |
|
187 |
-
async def on_submit(user_msg: str,
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
194 |
|
195 |
# Wire both Enter and button click
|
196 |
msg.submit(on_submit, inputs=[msg, chat], outputs=chat, queue=True)
|
|
|
1 |
# app.py
|
2 |
# Hugging Face Space: Gradio app that chats about Gradio docs via the Gradio Docs MCP server.
|
3 |
+
# Requirements:
|
4 |
+
# - gradio
|
5 |
+
# - huggingface_hub
|
6 |
+
#
|
7 |
+
# Space secret needed:
|
8 |
+
# - HUGGING_FACE_HUB_TOKEN (or HF_TOKEN)
|
9 |
|
10 |
import os
|
11 |
import asyncio
|
|
|
73 |
|
74 |
|
75 |
# ----------------------------
|
76 |
+
# Helpers for messages
|
77 |
# ----------------------------
|
78 |
+
def to_llm_messages(history_msgs: List[Dict[str, Any]], user_msg: str) -> List[Dict[str, Any]]:
|
79 |
"""
|
80 |
+
Convert Chatbot messages list (role/content dicts) to the LLM format,
|
81 |
+
with a system message prepended and the new user message appended.
|
82 |
"""
|
83 |
+
msgs: List[Dict[str, Any]] = [{"role": "system", "content": SYSTEM_PROMPT}]
|
84 |
+
# Keep only role/content keys
|
85 |
+
for m in history_msgs:
|
86 |
+
role = m.get("role")
|
87 |
+
content = m.get("content")
|
88 |
+
if role in ("user", "assistant") and isinstance(content, str):
|
89 |
+
msgs.append({"role": role, "content": content})
|
90 |
+
msgs.append({"role": "user", "content": user_msg})
|
91 |
+
return msgs
|
92 |
+
|
93 |
+
|
94 |
+
async def stream_answer(messages_for_llm: List[Dict[str, Any]]) -> Iterable[str]:
|
95 |
"""
|
96 |
Stream deltas and tool logs from MCPClient.process_single_turn_with_tools.
|
97 |
"""
|
|
|
107 |
return
|
108 |
|
109 |
try:
|
110 |
+
async for chunk in client.process_single_turn_with_tools(messages_for_llm):
|
|
|
111 |
# chunk is a dict describing text deltas and tool activity
|
112 |
if isinstance(chunk, dict):
|
113 |
ctype = chunk.get("type")
|
114 |
if ctype == "tool_log":
|
115 |
name = chunk.get("tool", "tool")
|
116 |
status = chunk.get("status", "")
|
117 |
+
yield f"\n\n_(using **{name}** {status})_"
|
|
|
|
|
118 |
elif ctype == "text_delta":
|
119 |
+
yield chunk.get("delta", "")
|
|
|
|
|
120 |
elif ctype == "text":
|
121 |
+
yield chunk.get("text", "")
|
|
|
|
|
122 |
elif ctype == "tool_result":
|
123 |
content = chunk.get("content")
|
124 |
if isinstance(content, str) and content.strip():
|
125 |
+
yield f"\n\n**Result:**\n{content}"
|
|
|
|
|
126 |
else:
|
127 |
# Fallback if provider yields plain strings
|
128 |
+
yield str(chunk)
|
|
|
|
|
129 |
except Exception as e:
|
|
|
130 |
msg = str(e)
|
131 |
if "401" in msg or "Unauthorized" in msg:
|
132 |
yield (
|
|
|
139 |
yield f"❌ Error: {msg}"
|
140 |
|
141 |
|
|
|
|
|
|
|
|
|
|
|
|
|
142 |
# ----------------------------
|
143 |
# Gradio UI
|
144 |
# ----------------------------
|
|
|
148 |
"Ask anything about **Gradio**. Answers are grounded in the official docs via MCP."
|
149 |
)
|
150 |
|
151 |
+
# Using the new messages format
|
152 |
chat = gr.Chatbot(
|
153 |
label="Gradio Docs Assistant",
|
154 |
height=520,
|
155 |
+
type="messages", # expects a list of dicts: {"role": "...", "content": "..."}
|
|
|
156 |
)
|
157 |
|
158 |
with gr.Row():
|
|
|
166 |
with gr.Row():
|
167 |
clear = gr.ClearButton(components=[chat, msg], value="Clear")
|
168 |
info = gr.Markdown(
|
169 |
+
f"**Model:** `{MODEL_ID}` · **Provider:** `{PROVIDER}` · **MCP:** Gradio Docs SSE",
|
|
|
|
|
170 |
)
|
171 |
|
172 |
+
async def on_submit(user_msg: str, history_msgs: List[Dict[str, Any]]):
|
173 |
+
"""
|
174 |
+
history_msgs is a list of {"role": ..., "content": ...} dicts.
|
175 |
+
We append the user's message, then stream the assistant reply by
|
176 |
+
updating/overwriting the last assistant message content.
|
177 |
+
"""
|
178 |
+
history_msgs = (history_msgs or []) + [{"role": "user", "content": user_msg}]
|
179 |
+
# Add a placeholder assistant message to stream into
|
180 |
+
history_msgs.append({"role": "assistant", "content": ""})
|
181 |
+
yield history_msgs
|
182 |
+
|
183 |
+
# Build LLM messages and stream chunks
|
184 |
+
messages_for_llm = to_llm_messages(history_msgs[:-1], user_msg)
|
185 |
+
async for delta in stream_answer(messages_for_llm):
|
186 |
+
# Append the delta to the last assistant message
|
187 |
+
history_msgs[-1]["content"] += delta
|
188 |
+
yield history_msgs
|
189 |
|
190 |
# Wire both Enter and button click
|
191 |
msg.submit(on_submit, inputs=[msg, chat], outputs=chat, queue=True)
|