Update app.py
Browse files
app.py
CHANGED
@@ -1,41 +1,49 @@
|
|
1 |
import os
|
2 |
import gradio as gr
|
3 |
from gradio import ChatMessage
|
4 |
-
from typing import Iterator, List, Tuple, Optional
|
5 |
import google.generativeai as genai
|
6 |
-
import time
|
7 |
|
8 |
print("import library complete")
|
9 |
print("add API key")
|
10 |
|
11 |
# get Gemini API Key from the environ variable
|
12 |
GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
|
13 |
-
# --- Безопасность: Добавим проверку наличия ключа ---
|
14 |
if not GEMINI_API_KEY:
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
|
|
|
|
|
|
19 |
|
20 |
print("add API key complete ")
|
21 |
print("add model")
|
22 |
|
23 |
-
# ---
|
24 |
-
used_model = "gemma-3-27b-it"
|
25 |
-
#
|
26 |
-
#
|
27 |
-
|
28 |
-
|
29 |
-
print(f"add model {used_model} complete\n")
|
|
|
|
|
|
|
|
|
|
|
30 |
|
31 |
def format_chat_history(messages: list) -> list:
|
32 |
print("\nstart format history")
|
33 |
"""
|
34 |
-
Formats the chat history into a structure
|
|
|
|
|
35 |
"""
|
36 |
formatted_history = []
|
37 |
for message in messages:
|
38 |
-
# Пропускаем пустые сообщения или сообщения без контента (на всякий случай)
|
39 |
content = message.get("content")
|
40 |
if not content:
|
41 |
continue
|
@@ -47,27 +55,30 @@ def format_chat_history(messages: list) -> list:
|
|
47 |
"parts": [content]
|
48 |
})
|
49 |
elif role == "assistant":
|
50 |
-
|
51 |
-
|
|
|
52 |
formatted_history.append({
|
53 |
-
"role": "model",
|
54 |
"parts": [content]
|
55 |
})
|
56 |
-
|
|
|
|
|
|
|
|
|
|
|
57 |
print(f"Formatted history length: {len(formatted_history)}")
|
|
|
58 |
print("return formatted history")
|
59 |
return formatted_history
|
60 |
|
61 |
-
# ---
|
62 |
def undo_last(history: List[ChatMessage]) -> Tuple[List[ChatMessage], List[ChatMessage]]:
|
63 |
-
"""
|
64 |
-
Removes the last user message and the last assistant response.
|
65 |
-
Returns the updated history and the pair that was removed (for potential redo).
|
66 |
-
"""
|
67 |
print("\nAttempting Undo")
|
68 |
undone_pair = []
|
69 |
if len(history) >= 2:
|
70 |
-
#
|
71 |
if history[-2].role == "user" and history[-1].role == "assistant":
|
72 |
print("Found user/assistant pair to undo.")
|
73 |
undone_pair = history[-2:]
|
@@ -77,36 +88,26 @@ def undo_last(history: List[ChatMessage]) -> Tuple[List[ChatMessage], List[ChatM
|
|
77 |
else:
|
78 |
print("Not enough messages in history to undo.")
|
79 |
return history, undone_pair
|
80 |
-
# -----------------------------
|
81 |
|
82 |
-
# ---
|
83 |
def redo_last(history: List[ChatMessage], undone_pair: List[ChatMessage]) -> Tuple[List[ChatMessage], List[ChatMessage]]:
|
84 |
-
"""
|
85 |
-
Restores the last undone user/assistant pair.
|
86 |
-
Returns the updated history and clears the undone pair state.
|
87 |
-
"""
|
88 |
print("\nAttempting Redo")
|
89 |
-
if undone_pair:
|
90 |
print("Found undone pair to redo.")
|
91 |
history.extend(undone_pair)
|
92 |
-
undone_pair = [] #
|
93 |
else:
|
94 |
-
print("No undone pair available to redo.")
|
|
|
95 |
return history, undone_pair
|
96 |
-
# -----------------------------
|
97 |
|
98 |
-
# ---
|
99 |
def regenerate_response(history: List[ChatMessage]) -> Tuple[List[ChatMessage], Optional[str], List[ChatMessage]]:
|
100 |
-
"""
|
101 |
-
Removes the last assistant response and returns the history ending
|
102 |
-
with the last user message, and the content of that user message.
|
103 |
-
Also returns an empty list to clear the undone_state.
|
104 |
-
"""
|
105 |
print("\nAttempting Regenerate")
|
106 |
last_user_message_content = None
|
107 |
-
history_for_regen = history[:] #
|
108 |
|
109 |
-
#
|
110 |
last_user_index = -1
|
111 |
for i in range(len(history_for_regen) - 1, -1, -1):
|
112 |
if history_for_regen[i].role == "user":
|
@@ -114,126 +115,137 @@ def regenerate_response(history: List[ChatMessage]) -> Tuple[List[ChatMessage],
|
|
114 |
break
|
115 |
|
116 |
if last_user_index != -1:
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
122 |
else:
|
123 |
print("No user message found to regenerate from.")
|
124 |
-
history_for_regen = history #
|
125 |
-
|
126 |
-
# Регенерация инвалидирует предыдущее undo
|
127 |
-
return history_for_regen, last_user_message_content, [] # Возвращаем историю и контент пользователя, очищаем undone_state
|
128 |
-
# ----------------------------------
|
129 |
|
|
|
|
|
130 |
|
|
|
131 |
def stream_gemini_response(user_message: str, messages: list) -> Iterator[list]:
|
132 |
print("start model response stream")
|
133 |
-
"""
|
134 |
-
Streams response with conversation history support for text input only.
|
135 |
-
(Убрали логику "Thinking" для упрощения и совместимости)
|
136 |
-
"""
|
137 |
-
# --- Добавили проверку на None или пустую строку ---
|
138 |
if not user_message or not user_message.strip():
|
139 |
-
print("Empty or None user message received. Skipping API call.")
|
140 |
-
|
141 |
-
# messages.append(ChatMessage(role="assistant", content="Please provide a non-empty text message."))
|
142 |
-
yield messages # Просто возвращаем текущее состояние
|
143 |
return
|
144 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
145 |
|
146 |
try:
|
147 |
-
print(f"\n=== New Request
|
148 |
-
print(f"User message: {user_message}") #
|
149 |
-
|
150 |
-
|
151 |
-
#
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
print("
|
157 |
-
|
158 |
-
|
159 |
-
|
|
|
160 |
response = chat.send_message(user_message, stream=True)
|
161 |
|
162 |
response_buffer = ""
|
163 |
-
#
|
164 |
messages.append(ChatMessage(role="assistant", content=""))
|
165 |
-
yield messages #
|
166 |
|
167 |
print("Streaming response...")
|
|
|
168 |
for chunk in response:
|
169 |
-
|
170 |
try:
|
171 |
-
#
|
172 |
-
|
|
|
|
|
|
|
173 |
current_chunk_text = chunk.candidates[0].content.parts[0].text
|
174 |
-
|
|
|
|
|
|
|
|
|
175 |
response_buffer += current_chunk_text
|
176 |
messages[-1] = ChatMessage(
|
177 |
role="assistant",
|
178 |
content=response_buffer
|
179 |
)
|
180 |
else:
|
181 |
-
#
|
182 |
-
print(f"Warning:
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
messages[-1] = ChatMessage(role="assistant", content=response_buffer)
|
187 |
-
|
188 |
-
except (AttributeError, IndexError, ValueError) as chunk_err:
|
189 |
-
# Ловим ошибки доступа к атрибутам/индексам или другие проблемы чанка
|
190 |
-
print(f"Error processing chunk: {chunk_err}")
|
191 |
print(f"Problematic chunk data: {chunk}")
|
192 |
-
# Можно прервать или продолжить, добавив сообщение об ошибке
|
193 |
messages[-1] = ChatMessage(
|
194 |
role="assistant",
|
195 |
-
content=response_buffer + f"\n\n[
|
196 |
)
|
197 |
yield messages
|
198 |
-
return #
|
199 |
-
# -------------------------------------------
|
200 |
|
201 |
-
# time.sleep(0.05) #
|
202 |
-
yield messages #
|
203 |
-
|
204 |
-
print(f"\n=== Final Response ===\n{response_buffer}")
|
205 |
-
# Проверка на пустой финальный ответ (если модель ничего не вернула)
|
206 |
-
if not response_buffer.strip() and len(messages) > 0 and messages[-1].role == "assistant":
|
207 |
-
messages[-1] = ChatMessage(
|
208 |
-
role="assistant",
|
209 |
-
content="[Модель не дала ответа]"
|
210 |
-
)
|
211 |
-
yield messages
|
212 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
213 |
|
214 |
-
# --- Улучшенная обработка ошибок API ---
|
215 |
except Exception as e:
|
216 |
-
error_message = f"
|
217 |
-
print(f"\n=== Error ===\n{error_message}")
|
218 |
-
#
|
219 |
-
if messages and
|
220 |
-
# Если последний элемент - пустое сообщение ассистента, заменяем его ошибкой
|
221 |
messages[-1] = ChatMessage(role="assistant", content=error_message)
|
222 |
else:
|
223 |
-
# Иначе добавляем новое сообщение с ошибкой
|
224 |
messages.append(ChatMessage(role="assistant", content=error_message))
|
225 |
yield messages
|
226 |
-
# -------------------------------------
|
227 |
-
|
228 |
|
|
|
229 |
def user_message(msg: str, history: list) -> tuple[str, list, list]:
|
230 |
"""Adds user message to chat history and clears the undone state."""
|
231 |
print(f"\nUser message added: '{msg}'")
|
232 |
-
|
233 |
-
|
234 |
-
|
|
|
|
|
|
|
235 |
|
236 |
-
|
|
|
|
|
|
|
237 |
with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal", secondary_hue="slate", neutral_hue="neutral")) as demo:
|
238 |
gr.Markdown("# Chat with " + used_model)
|
239 |
|
@@ -241,138 +253,141 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal", secondary_hue="slate", n
|
|
241 |
<img src="https://api.visitorbadge.io/api/combined?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2Fzelk12%2FGemini-2&countColor=%23263759" />
|
242 |
</a>""")
|
243 |
|
244 |
-
#
|
245 |
undone_state = gr.State([])
|
246 |
-
# --------------------------------------
|
247 |
|
248 |
chatbot = gr.Chatbot(
|
249 |
-
[], #
|
250 |
type="messages",
|
251 |
label=used_model + " Chatbot (Streaming Output)",
|
252 |
render_markdown=True,
|
253 |
scale=1,
|
254 |
-
|
255 |
-
editable=False,
|
256 |
avatar_images=(None,"https://lh3.googleusercontent.com/oxz0sUBF0iYoN4VvhqWTmux-cxfD1rxuYkuFEfm1SFaseXEsjjE4Je_C_V3UQPuJ87sImQK3HfQ3RXiaRnQetjaZbjJJUkiPL5jFJ1WRl5FKJZYibUA=w214-h214-n-nu"),
|
257 |
-
height=600
|
258 |
)
|
259 |
|
260 |
-
with gr.Row(equal_height=False):
|
261 |
input_box = gr.Textbox(
|
262 |
lines=1,
|
263 |
label="Chat Message",
|
264 |
placeholder="Type your message here...",
|
265 |
-
scale=4
|
|
|
266 |
)
|
267 |
|
268 |
-
|
269 |
-
|
270 |
-
submit_button = gr.Button("Submit", scale=1, variant="primary") # Выделяем основную кнопку
|
271 |
with gr.Row():
|
272 |
undo_button = gr.Button("Undo", scale=1)
|
273 |
redo_button = gr.Button("Redo", scale=1)
|
274 |
regenerate_button = gr.Button("Regenerate", scale=1)
|
275 |
clear_button = gr.Button("Clear Chat", scale=1)
|
276 |
|
277 |
-
|
278 |
-
# --- Переименовали кнопки для ясности ---
|
279 |
-
# undo_button = test_button
|
280 |
-
# redo_button = test1_button
|
281 |
-
# regenerate_button = test2_button
|
282 |
-
# ----------------------------------------
|
283 |
-
|
284 |
-
# Add example prompts
|
285 |
example_prompts = [
|
286 |
["Write a short poem about the sunset."],
|
287 |
["Explain the theory of relativity in simple terms."],
|
288 |
-
["If a train leaves Chicago at 6am traveling at 60mph, and another train leaves New York at 8am traveling at 80mph, at what time will they meet?"],
|
289 |
["Summarize the plot of Hamlet."],
|
290 |
["Write a haiku about a cat."]
|
291 |
]
|
292 |
-
|
293 |
gr.Examples(
|
294 |
examples=example_prompts,
|
295 |
inputs=input_box,
|
296 |
-
label="Examples:
|
297 |
-
examples_per_page=
|
298 |
)
|
299 |
|
300 |
-
#
|
301 |
-
msg_store = gr.State("")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
302 |
|
303 |
-
# ---
|
304 |
input_box.submit(
|
305 |
-
user_message,
|
306 |
inputs=[input_box, chatbot],
|
307 |
-
outputs=[input_box, chatbot, undone_state] #
|
308 |
).then(
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
|
|
313 |
).then(
|
314 |
-
stream_gemini_response,
|
315 |
-
inputs=[msg_store, chatbot],
|
316 |
outputs=[chatbot]
|
|
|
317 |
)
|
318 |
|
319 |
-
# ---
|
320 |
submit_button.click(
|
321 |
-
user_message,
|
322 |
inputs=[input_box, chatbot],
|
323 |
-
outputs=[input_box, chatbot, undone_state]
|
324 |
).then(
|
325 |
-
|
|
|
326 |
inputs=[chatbot],
|
327 |
-
outputs=[msg_store,
|
328 |
-
|
329 |
).then(
|
330 |
-
stream_gemini_response, # 3.
|
331 |
inputs=[msg_store, chatbot],
|
332 |
outputs=[chatbot]
|
|
|
333 |
)
|
334 |
|
335 |
-
# ---
|
336 |
clear_button.click(
|
337 |
-
lambda: ([], "", [], ""), #
|
338 |
outputs=[chatbot, input_box, msg_store, undone_state],
|
339 |
-
queue=False
|
340 |
)
|
341 |
|
342 |
-
# ---
|
343 |
undo_button.click(
|
344 |
undo_last,
|
345 |
inputs=[chatbot],
|
346 |
-
outputs=[chatbot, undone_state],
|
347 |
-
queue=False
|
348 |
)
|
349 |
|
350 |
-
# ---
|
351 |
redo_button.click(
|
352 |
redo_last,
|
353 |
-
inputs=[chatbot, undone_state],
|
354 |
-
outputs=[chatbot, undone_state],
|
355 |
-
queue=False
|
356 |
)
|
357 |
|
358 |
-
# ---
|
359 |
regenerate_button.click(
|
360 |
-
regenerate_response, # 1.
|
361 |
inputs=[chatbot],
|
362 |
-
outputs=[chatbot, msg_store, undone_state] #
|
|
|
363 |
).then(
|
364 |
-
stream_gemini_response, # 2.
|
365 |
-
inputs=[msg_store, chatbot],
|
366 |
outputs=[chatbot]
|
|
|
367 |
)
|
368 |
-
# -------------------------------------
|
369 |
|
370 |
gr.Markdown(
|
371 |
"""
|
372 |
<br><br><br>
|
373 |
---
|
374 |
### About this Chatbot
|
375 |
-
**Try out the example prompts
|
376 |
**Key Features:**
|
377 |
* Powered by Google's **""" + used_model + """** model.
|
378 |
* Supports **conversation history**, **undo**, **redo**, and **regenerate**.
|
@@ -386,11 +401,9 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal", secondary_hue="slate", n
|
|
386 |
"""
|
387 |
)
|
388 |
|
389 |
-
|
390 |
# Launch the interface
|
391 |
if __name__ == "__main__":
|
392 |
-
|
393 |
-
# demo.launch(debug=True) #
|
394 |
-
demo.launch(share=True) #
|
395 |
-
|
396 |
-
# -----------------------------------------------------------------
|
|
|
1 |
import os
|
2 |
import gradio as gr
|
3 |
from gradio import ChatMessage
|
4 |
+
from typing import Iterator, List, Tuple, Optional
|
5 |
import google.generativeai as genai
|
6 |
+
import time
|
7 |
|
8 |
print("import library complete")
|
9 |
print("add API key")
|
10 |
|
11 |
# get Gemini API Key from the environ variable
|
12 |
GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
|
|
|
13 |
if not GEMINI_API_KEY:
|
14 |
+
# Если ключ не найден, можно либо вызвать ошибку, либо попробовать работать без него (если модель это позволяет)
|
15 |
+
print("Warning: GEMINI_API_KEY environment variable not set!")
|
16 |
+
# raise ValueError("GEMINI_API_KEY environment variable not set!") # Раскомментируйте, чтобы требовать ключ
|
17 |
+
# На случай если используется модель, не требующая ключа, или для локальных тестов UI
|
18 |
+
genai.configure(api_key="DUMMY_KEY_IF_NEEDED") # Заглушка, если API Key не обязателен для используемой модели/библиотеки
|
19 |
+
else:
|
20 |
+
genai.configure(api_key=GEMINI_API_KEY)
|
21 |
|
22 |
print("add API key complete ")
|
23 |
print("add model")
|
24 |
|
25 |
+
# --- Возвращаем модель из лога пользователя ---
|
26 |
+
used_model = "gemma-3-27b-it"
|
27 |
+
# -------------------------------------------
|
28 |
+
# --- Инициализация модели ---
|
29 |
+
try:
|
30 |
+
model = genai.GenerativeModel(used_model)
|
31 |
+
print(f"add model {used_model} complete\n")
|
32 |
+
except Exception as model_init_error:
|
33 |
+
print(f"Error initializing model {used_model}: {model_init_error}")
|
34 |
+
# Можно завершить работу или использовать запасную модель
|
35 |
+
raise model_init_error # Завершаем, если модель не инициализирована
|
36 |
+
# ----------------------------
|
37 |
|
38 |
def format_chat_history(messages: list) -> list:
|
39 |
print("\nstart format history")
|
40 |
"""
|
41 |
+
Formats the chat history into a structure the API expects.
|
42 |
+
NOTE: Gemma models might expect 'assistant' role instead of 'model'.
|
43 |
+
Adjust if you encounter issues with history context.
|
44 |
"""
|
45 |
formatted_history = []
|
46 |
for message in messages:
|
|
|
47 |
content = message.get("content")
|
48 |
if not content:
|
49 |
continue
|
|
|
55 |
"parts": [content]
|
56 |
})
|
57 |
elif role == "assistant":
|
58 |
+
if not message.get("metadata"): # Skip potential metadata-only messages
|
59 |
+
# --- Используем 'model' - стандарт для google-generativeai. ---
|
60 |
+
# --- Если Gemma требует 'assistant', измените здесь. ---
|
61 |
formatted_history.append({
|
62 |
+
"role": "model",
|
63 |
"parts": [content]
|
64 |
})
|
65 |
+
# --- Альтернатива для Gemma, если 'model' не работает: ---
|
66 |
+
# formatted_history.append({
|
67 |
+
# "role": "assistant",
|
68 |
+
# "parts": [content]
|
69 |
+
# })
|
70 |
+
# ----------------------------------------------------------
|
71 |
print(f"Formatted history length: {len(formatted_history)}")
|
72 |
+
# print(f"Formatted history content: {formatted_history}") # Отладка: посмотреть историю
|
73 |
print("return formatted history")
|
74 |
return formatted_history
|
75 |
|
76 |
+
# --- Функция Undo ---
|
77 |
def undo_last(history: List[ChatMessage]) -> Tuple[List[ChatMessage], List[ChatMessage]]:
|
|
|
|
|
|
|
|
|
78 |
print("\nAttempting Undo")
|
79 |
undone_pair = []
|
80 |
if len(history) >= 2:
|
81 |
+
# Ensure the last two are user and assistant in that order
|
82 |
if history[-2].role == "user" and history[-1].role == "assistant":
|
83 |
print("Found user/assistant pair to undo.")
|
84 |
undone_pair = history[-2:]
|
|
|
88 |
else:
|
89 |
print("Not enough messages in history to undo.")
|
90 |
return history, undone_pair
|
|
|
91 |
|
92 |
+
# --- Функция Redo ---
|
93 |
def redo_last(history: List[ChatMessage], undone_pair: List[ChatMessage]) -> Tuple[List[ChatMessage], List[ChatMessage]]:
|
|
|
|
|
|
|
|
|
94 |
print("\nAttempting Redo")
|
95 |
+
if undone_pair and len(undone_pair) == 2:
|
96 |
print("Found undone pair to redo.")
|
97 |
history.extend(undone_pair)
|
98 |
+
undone_pair = [] # Clear the state after redoing
|
99 |
else:
|
100 |
+
print("No valid undone pair available to redo.")
|
101 |
+
undone_pair = [] # Ensure it's cleared even if invalid
|
102 |
return history, undone_pair
|
|
|
103 |
|
104 |
+
# --- Функция Regenerate ---
|
105 |
def regenerate_response(history: List[ChatMessage]) -> Tuple[List[ChatMessage], Optional[str], List[ChatMessage]]:
|
|
|
|
|
|
|
|
|
|
|
106 |
print("\nAttempting Regenerate")
|
107 |
last_user_message_content = None
|
108 |
+
history_for_regen = history[:] # Work on a copy
|
109 |
|
110 |
+
# Find the index of the last user message
|
111 |
last_user_index = -1
|
112 |
for i in range(len(history_for_regen) - 1, -1, -1):
|
113 |
if history_for_regen[i].role == "user":
|
|
|
115 |
break
|
116 |
|
117 |
if last_user_index != -1:
|
118 |
+
# Check if the very next message is an assistant message (the one to remove)
|
119 |
+
if last_user_index + 1 < len(history_for_regen) and history_for_regen[last_user_index + 1].role == "assistant":
|
120 |
+
print(f"Found last user message at {last_user_index}, removing assistant response at {last_user_index + 1}")
|
121 |
+
last_user_message_content = history_for_regen[last_user_index].content
|
122 |
+
# Trim history up to and including the last user message
|
123 |
+
history_for_regen = history_for_regen[:last_user_index + 1]
|
124 |
+
print("Prepared history for regeneration.")
|
125 |
+
elif last_user_index == len(history_for_regen) - 1:
|
126 |
+
# Last message IS the user message, nothing to remove after it
|
127 |
+
print(f"Last message is the user message at {last_user_index}. Ready for regeneration.")
|
128 |
+
last_user_message_content = history_for_regen[last_user_index].content
|
129 |
+
# History is already correct
|
130 |
+
else:
|
131 |
+
print(f"Found last user message at {last_user_index}, but the next message is not an assistant response. Cannot regenerate.")
|
132 |
+
history_for_regen = history # Revert to original history
|
133 |
+
last_user_message_content = None
|
134 |
else:
|
135 |
print("No user message found to regenerate from.")
|
136 |
+
history_for_regen = history # Revert to original history
|
|
|
|
|
|
|
|
|
137 |
|
138 |
+
# Regeneration invalidates the previous undo state
|
139 |
+
return history_for_regen, last_user_message_content, []
|
140 |
|
141 |
+
# --- Функция стриминга ответа ---
|
142 |
def stream_gemini_response(user_message: str, messages: list) -> Iterator[list]:
|
143 |
print("start model response stream")
|
|
|
|
|
|
|
|
|
|
|
144 |
if not user_message or not user_message.strip():
|
145 |
+
print("Empty or None user message received in stream_gemini_response. Skipping API call.")
|
146 |
+
yield messages
|
|
|
|
|
147 |
return
|
148 |
+
|
149 |
+
# Ensure messages list is not empty before formatting/sending
|
150 |
+
if not messages:
|
151 |
+
print("Error: Message history is empty before sending to API.")
|
152 |
+
messages.append(ChatMessage(role="assistant", content="Error: History is empty."))
|
153 |
+
yield messages
|
154 |
+
return
|
155 |
|
156 |
try:
|
157 |
+
print(f"\n=== New Request ===")
|
158 |
+
print(f"User message to send: '{user_message}'") # This is the trigger message
|
159 |
+
print(f"Current history length before API call: {len(messages)}")
|
160 |
+
|
161 |
+
# Format history *up to the last user message*
|
162 |
+
# The API takes the history via start_chat and the latest message via send_message
|
163 |
+
# Important: Ensure the history sent doesn't contain the assistant message we are about to generate
|
164 |
+
history_to_send = format_chat_history(messages[:-1] if messages[-1].role == "user" else messages) # Send history *before* the current user trigger
|
165 |
+
|
166 |
+
print(f"Formatted history length for API: {len(history_to_send)}")
|
167 |
+
|
168 |
+
print("Initializing chat with history...")
|
169 |
+
chat = model.start_chat(history=history_to_send)
|
170 |
+
print(f"Sending message '{user_message}' to model...")
|
171 |
response = chat.send_message(user_message, stream=True)
|
172 |
|
173 |
response_buffer = ""
|
174 |
+
# Add an empty assistant message placeholder to update
|
175 |
messages.append(ChatMessage(role="assistant", content=""))
|
176 |
+
yield messages # Show the placeholder immediately
|
177 |
|
178 |
print("Streaming response...")
|
179 |
+
chunk_count = 0
|
180 |
for chunk in response:
|
181 |
+
chunk_count += 1
|
182 |
try:
|
183 |
+
# Defensive coding: Check structure exists before accessing
|
184 |
+
current_chunk_text = ""
|
185 |
+
if chunk.candidates and \
|
186 |
+
chunk.candidates[0].content and \
|
187 |
+
chunk.candidates[0].content.parts:
|
188 |
current_chunk_text = chunk.candidates[0].content.parts[0].text
|
189 |
+
elif hasattr(chunk, 'text'): # Fallback for simpler text responses
|
190 |
+
current_chunk_text = chunk.text
|
191 |
+
|
192 |
+
if current_chunk_text:
|
193 |
+
# print(f"Chunk {chunk_count}: '{current_chunk_text}'") # Debug chunk content
|
194 |
response_buffer += current_chunk_text
|
195 |
messages[-1] = ChatMessage(
|
196 |
role="assistant",
|
197 |
content=response_buffer
|
198 |
)
|
199 |
else:
|
200 |
+
# Handle cases where a chunk might be empty or have unexpected structure
|
201 |
+
print(f"Warning: Received empty or unexpected chunk structure at chunk {chunk_count}: {chunk}")
|
202 |
+
|
203 |
+
except (AttributeError, IndexError, ValueError, StopIteration) as chunk_err:
|
204 |
+
print(f"Error processing chunk {chunk_count}: {chunk_err}")
|
|
|
|
|
|
|
|
|
|
|
205 |
print(f"Problematic chunk data: {chunk}")
|
|
|
206 |
messages[-1] = ChatMessage(
|
207 |
role="assistant",
|
208 |
+
content=response_buffer + f"\n\n[Error processing response part: {chunk_err}]"
|
209 |
)
|
210 |
yield messages
|
211 |
+
return # Stop streaming on chunk error
|
|
|
212 |
|
213 |
+
# time.sleep(0.05) # Optional delay for debugging
|
214 |
+
yield messages # Update UI with the latest buffer content
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
215 |
|
216 |
+
print(f"\n=== Final Response ({chunk_count} chunks received) ===\n{response_buffer}")
|
217 |
+
if chunk_count == 0 or not response_buffer.strip():
|
218 |
+
# Handle case where the stream finished but buffer is empty
|
219 |
+
print("Warning: Stream finished but response buffer is empty.")
|
220 |
+
if messages and messages[-1].role == "assistant":
|
221 |
+
messages[-1] = ChatMessage(role="assistant", content="[Model provided no response]")
|
222 |
+
yield messages
|
223 |
|
|
|
224 |
except Exception as e:
|
225 |
+
error_message = f"Error calling Gemini API: {str(e)}"
|
226 |
+
print(f"\n=== API Error ===\n{error_message}")
|
227 |
+
# Attempt to add or replace the last message with the error
|
228 |
+
if messages and messages[-1].role == "assistant" and messages[-1].content == "":
|
|
|
229 |
messages[-1] = ChatMessage(role="assistant", content=error_message)
|
230 |
else:
|
|
|
231 |
messages.append(ChatMessage(role="assistant", content=error_message))
|
232 |
yield messages
|
|
|
|
|
233 |
|
234 |
+
# --- Фу��кция добавления сообщения пользователя ---
|
235 |
def user_message(msg: str, history: list) -> tuple[str, list, list]:
|
236 |
"""Adds user message to chat history and clears the undone state."""
|
237 |
print(f"\nUser message added: '{msg}'")
|
238 |
+
if msg and msg.strip(): # Only add non-empty messages
|
239 |
+
history.append(ChatMessage(role="user", content=msg))
|
240 |
+
else:
|
241 |
+
print("Skipping empty user message.")
|
242 |
+
# New user message invalidates previous undo state
|
243 |
+
return "", history, [] # Return empty string to clear textbox, updated history, empty undone_state
|
244 |
|
245 |
+
|
246 |
+
# ==================================
|
247 |
+
# --- Gradio Interface Definition ---
|
248 |
+
# ==================================
|
249 |
with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal", secondary_hue="slate", neutral_hue="neutral")) as demo:
|
250 |
gr.Markdown("# Chat with " + used_model)
|
251 |
|
|
|
253 |
<img src="https://api.visitorbadge.io/api/combined?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2Fzelk12%2FGemini-2&countColor=%23263759" />
|
254 |
</a>""")
|
255 |
|
256 |
+
# State for Undo/Redo
|
257 |
undone_state = gr.State([])
|
|
|
258 |
|
259 |
chatbot = gr.Chatbot(
|
260 |
+
[], # Start empty
|
261 |
type="messages",
|
262 |
label=used_model + " Chatbot (Streaming Output)",
|
263 |
render_markdown=True,
|
264 |
scale=1,
|
265 |
+
editable=False, # Keep False to avoid conflicts with history management
|
|
|
266 |
avatar_images=(None,"https://lh3.googleusercontent.com/oxz0sUBF0iYoN4VvhqWTmux-cxfD1rxuYkuFEfm1SFaseXEsjjE4Je_C_V3UQPuJ87sImQK3HfQ3RXiaRnQetjaZbjJJUkiPL5jFJ1WRl5FKJZYibUA=w214-h214-n-nu"),
|
267 |
+
height=600
|
268 |
)
|
269 |
|
270 |
+
with gr.Row(equal_height=False):
|
271 |
input_box = gr.Textbox(
|
272 |
lines=1,
|
273 |
label="Chat Message",
|
274 |
placeholder="Type your message here...",
|
275 |
+
scale=4,
|
276 |
+
container=False # Better alignment potentially
|
277 |
)
|
278 |
|
279 |
+
with gr.Column(scale=1, min_width=150):
|
280 |
+
submit_button = gr.Button("Submit", scale=1, variant="primary")
|
|
|
281 |
with gr.Row():
|
282 |
undo_button = gr.Button("Undo", scale=1)
|
283 |
redo_button = gr.Button("Redo", scale=1)
|
284 |
regenerate_button = gr.Button("Regenerate", scale=1)
|
285 |
clear_button = gr.Button("Clear Chat", scale=1)
|
286 |
|
287 |
+
# Examples
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
288 |
example_prompts = [
|
289 |
["Write a short poem about the sunset."],
|
290 |
["Explain the theory of relativity in simple terms."],
|
|
|
291 |
["Summarize the plot of Hamlet."],
|
292 |
["Write a haiku about a cat."]
|
293 |
]
|
|
|
294 |
gr.Examples(
|
295 |
examples=example_prompts,
|
296 |
inputs=input_box,
|
297 |
+
label="Examples:",
|
298 |
+
examples_per_page=4
|
299 |
)
|
300 |
|
301 |
+
# State to hold the message content for streaming
|
302 |
+
msg_store = gr.State("")
|
303 |
+
|
304 |
+
# --- Event Handlers ---
|
305 |
+
|
306 |
+
# --- Helper function to get last user message content ---
|
307 |
+
def get_last_user_message(history: List[ChatMessage]) -> Optional[str]:
|
308 |
+
if history and history[-1].role == "user":
|
309 |
+
return history[-1].content
|
310 |
+
print("Warning: Could not find last user message in history for get_last_user_message")
|
311 |
+
return None # Return None if history is empty or last message isn't user
|
312 |
|
313 |
+
# --- Handler for Enter key in input_box ---
|
314 |
input_box.submit(
|
315 |
+
user_message,
|
316 |
inputs=[input_box, chatbot],
|
317 |
+
outputs=[input_box, chatbot, undone_state] # Clears input, updates history, clears undo
|
318 |
).then(
|
319 |
+
# No positional lambda here! Use fn=
|
320 |
+
fn=get_last_user_message, # 2. Get content of the message just added
|
321 |
+
inputs=[chatbot],
|
322 |
+
outputs=[msg_store], # Store it for streaming
|
323 |
+
queue=False # Getting content is fast
|
324 |
).then(
|
325 |
+
stream_gemini_response, # 3. Stream response using stored message and updated history
|
326 |
+
inputs=[msg_store, chatbot],
|
327 |
outputs=[chatbot]
|
328 |
+
# queue=True by default for potentially long API calls
|
329 |
)
|
330 |
|
331 |
+
# --- Handler for Submit button click ---
|
332 |
submit_button.click(
|
333 |
+
user_message,
|
334 |
inputs=[input_box, chatbot],
|
335 |
+
outputs=[input_box, chatbot, undone_state] # Clears input, updates history, clears undo
|
336 |
).then(
|
337 |
+
# No positional lambda here! Use fn=
|
338 |
+
fn=get_last_user_message, # 2. Get content of the message just added
|
339 |
inputs=[chatbot],
|
340 |
+
outputs=[msg_store], # Store it for streaming
|
341 |
+
queue=False # Getting content is fast
|
342 |
).then(
|
343 |
+
stream_gemini_response, # 3. Stream response using stored message and updated history
|
344 |
inputs=[msg_store, chatbot],
|
345 |
outputs=[chatbot]
|
346 |
+
# queue=True by default
|
347 |
)
|
348 |
|
349 |
+
# --- Handler for Clear button ---
|
350 |
clear_button.click(
|
351 |
+
lambda: ([], "", [], ""), # Clear chatbot, input_box, msg_store, undone_state
|
352 |
outputs=[chatbot, input_box, msg_store, undone_state],
|
353 |
+
queue=False
|
354 |
)
|
355 |
|
356 |
+
# --- Handler for Undo button ---
|
357 |
undo_button.click(
|
358 |
undo_last,
|
359 |
inputs=[chatbot],
|
360 |
+
outputs=[chatbot, undone_state],
|
361 |
+
queue=False
|
362 |
)
|
363 |
|
364 |
+
# --- Handler for Redo button ---
|
365 |
redo_button.click(
|
366 |
redo_last,
|
367 |
+
inputs=[chatbot, undone_state],
|
368 |
+
outputs=[chatbot, undone_state],
|
369 |
+
queue=False
|
370 |
)
|
371 |
|
372 |
+
# --- Handler for Regenerate button ---
|
373 |
regenerate_button.click(
|
374 |
+
regenerate_response, # 1. Prepare history, get last user msg, clear undo
|
375 |
inputs=[chatbot],
|
376 |
+
outputs=[chatbot, msg_store, undone_state] # Update history, put user msg in store, clear undo
|
377 |
+
# queue=False # Preparation should be fast
|
378 |
).then(
|
379 |
+
stream_gemini_response, # 2. Stream new response using msg from store and prepared history
|
380 |
+
inputs=[msg_store, chatbot],
|
381 |
outputs=[chatbot]
|
382 |
+
# queue=True by default
|
383 |
)
|
|
|
384 |
|
385 |
gr.Markdown(
|
386 |
"""
|
387 |
<br><br><br>
|
388 |
---
|
389 |
### About this Chatbot
|
390 |
+
**Try out the example prompts!**
|
391 |
**Key Features:**
|
392 |
* Powered by Google's **""" + used_model + """** model.
|
393 |
* Supports **conversation history**, **undo**, **redo**, and **regenerate**.
|
|
|
401 |
"""
|
402 |
)
|
403 |
|
|
|
404 |
# Launch the interface
|
405 |
if __name__ == "__main__":
|
406 |
+
print("Starting Gradio Interface...")
|
407 |
+
# demo.launch(debug=True) # For detailed debugging
|
408 |
+
# demo.launch(share=True) # To get a public link
|
409 |
+
demo.launch() # Standard local launch
|
|