zelk12 commited on
Commit
8e0ac9f
·
verified ·
1 Parent(s): 282078c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +216 -307
app.py CHANGED
@@ -1,409 +1,318 @@
1
  import os
2
  import gradio as gr
3
  from gradio import ChatMessage
4
- from typing import Iterator, List, Tuple, Optional
5
  import google.generativeai as genai
6
- import time
7
 
8
  print("import library complete")
9
  print("add API key")
10
 
11
  # get Gemini API Key from the environ variable
12
  GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
13
- if not GEMINI_API_KEY:
14
- # Если ключ не найден, можно либо вызвать ошибку, либо попробовать работать без него (если модель это позволяет)
15
- print("Warning: GEMINI_API_KEY environment variable not set!")
16
- # raise ValueError("GEMINI_API_KEY environment variable not set!") # Раскомментируйте, чтобы требовать ключ
17
- # На случай если используется модель, не требующая ключа, или для локальных тестов UI
18
- genai.configure(api_key="DUMMY_KEY_IF_NEEDED") # Заглушка, если API Key не обязателен для используемой модели/библиотеки
19
- else:
20
- genai.configure(api_key=GEMINI_API_KEY)
21
 
22
  print("add API key complete ")
23
  print("add model")
24
 
25
- # --- Возвращаем модель из лога пользователя ---
26
  used_model = "gemma-3-27b-it"
27
- # -------------------------------------------
28
- # --- Инициализация модели ---
29
- try:
30
- model = genai.GenerativeModel(used_model)
31
- print(f"add model {used_model} complete\n")
32
- except Exception as model_init_error:
33
- print(f"Error initializing model {used_model}: {model_init_error}")
34
- # Можно завершить работу или использовать запасную модель
35
- raise model_init_error # Завершаем, если модель не инициализирована
36
- # ----------------------------
37
 
38
  def format_chat_history(messages: list) -> list:
39
  print("\nstart format history")
40
  """
41
- Formats the chat history into a structure the API expects.
42
- NOTE: Gemma models might expect 'assistant' role instead of 'model'.
43
- Adjust if you encounter issues with history context.
44
  """
45
  formatted_history = []
46
  for message in messages:
47
- content = message.get("content")
48
- if not content:
49
- continue
50
-
51
- role = message.get("role")
52
- if role == "user":
 
 
 
 
 
 
 
 
 
 
 
53
  formatted_history.append({
54
- "role": "user",
55
- "parts": [content]
56
  })
57
- elif role == "assistant":
58
- if not message.get("metadata"): # Skip potential metadata-only messages
59
- # --- Используем 'model' - стандарт для google-generativeai. ---
60
- # --- Если Gemma требует 'assistant', измените здесь. ---
61
- formatted_history.append({
62
- "role": "model",
63
- "parts": [content]
64
- })
65
- # --- Альтернатива для Gemma, если 'model' не работает: ---
66
- # formatted_history.append({
67
- # "role": "assistant",
68
- # "parts": [content]
69
- # })
70
- # ----------------------------------------------------------
71
- print(f"Formatted history length: {len(formatted_history)}")
72
- # print(f"Formatted history content: {formatted_history}") # Отладка: посмотреть историю
73
  print("return formatted history")
74
  return formatted_history
75
 
76
- # --- Функция Undo ---
77
- def undo_last(history: List[ChatMessage]) -> Tuple[List[ChatMessage], List[ChatMessage]]:
78
- print("\nAttempting Undo")
79
- undone_pair = []
80
- if len(history) >= 2:
81
- # Ensure the last two are user and assistant in that order
82
- if history[-2].role == "user" and history[-1].role == "assistant":
83
- print("Found user/assistant pair to undo.")
84
- undone_pair = history[-2:]
85
- history = history[:-2]
86
- else:
87
- print("Last two messages are not a user/assistant pair. Cannot undo.")
88
- else:
89
- print("Not enough messages in history to undo.")
90
- return history, undone_pair
91
-
92
- # --- Функция Redo ---
93
- def redo_last(history: List[ChatMessage], undone_pair: List[ChatMessage]) -> Tuple[List[ChatMessage], List[ChatMessage]]:
94
- print("\nAttempting Redo")
95
- if undone_pair and len(undone_pair) == 2:
96
- print("Found undone pair to redo.")
97
- history.extend(undone_pair)
98
- undone_pair = [] # Clear the state after redoing
99
- else:
100
- print("No valid undone pair available to redo.")
101
- undone_pair = [] # Ensure it's cleared even if invalid
102
- return history, undone_pair
103
-
104
- # --- Функция Regenerate ---
105
- def regenerate_response(history: List[ChatMessage]) -> Tuple[List[ChatMessage], Optional[str], List[ChatMessage]]:
106
- print("\nAttempting Regenerate")
107
- last_user_message_content = None
108
- history_for_regen = history[:] # Work on a copy
109
-
110
- # Find the index of the last user message
111
- last_user_index = -1
112
- for i in range(len(history_for_regen) - 1, -1, -1):
113
- if history_for_regen[i].role == "user":
114
- last_user_index = i
115
- break
116
-
117
- if last_user_index != -1:
118
- # Check if the very next message is an assistant message (the one to remove)
119
- if last_user_index + 1 < len(history_for_regen) and history_for_regen[last_user_index + 1].role == "assistant":
120
- print(f"Found last user message at {last_user_index}, removing assistant response at {last_user_index + 1}")
121
- last_user_message_content = history_for_regen[last_user_index].content
122
- # Trim history up to and including the last user message
123
- history_for_regen = history_for_regen[:last_user_index + 1]
124
- print("Prepared history for regeneration.")
125
- elif last_user_index == len(history_for_regen) - 1:
126
- # Last message IS the user message, nothing to remove after it
127
- print(f"Last message is the user message at {last_user_index}. Ready for regeneration.")
128
- last_user_message_content = history_for_regen[last_user_index].content
129
- # History is already correct
130
- else:
131
- print(f"Found last user message at {last_user_index}, but the next message is not an assistant response. Cannot regenerate.")
132
- history_for_regen = history # Revert to original history
133
- last_user_message_content = None
134
- else:
135
- print("No user message found to regenerate from.")
136
- history_for_regen = history # Revert to original history
137
-
138
- # Regeneration invalidates the previous undo state
139
- return history_for_regen, last_user_message_content, []
140
-
141
- # --- Функция стриминга ответа ---
142
  def stream_gemini_response(user_message: str, messages: list) -> Iterator[list]:
143
  print("start model response stream")
144
- if not user_message or not user_message.strip():
145
- print("Empty or None user message received in stream_gemini_response. Skipping API call.")
 
 
 
146
  yield messages
 
147
  return
148
 
149
- # Ensure messages list is not empty before formatting/sending
150
- if not messages:
151
- print("Error: Message history is empty before sending to API.")
152
- messages.append(ChatMessage(role="assistant", content="Error: History is empty."))
153
- yield messages
154
- return
155
-
156
  try:
157
- print(f"\n=== New Request ===")
158
- print(f"User message to send: '{user_message}'") # This is the trigger message
159
- print(f"Current history length before API call: {len(messages)}")
160
-
161
- # Format history *up to the last user message*
162
- # The API takes the history via start_chat and the latest message via send_message
163
- # Important: Ensure the history sent doesn't contain the assistant message we are about to generate
164
- history_to_send = format_chat_history(messages[:-1] if messages[-1].role == "user" else messages) # Send history *before* the current user trigger
165
-
166
- print(f"Formatted history length for API: {len(history_to_send)}")
167
-
168
- print("Initializing chat with history...")
169
- chat = model.start_chat(history=history_to_send)
170
- print(f"Sending message '{user_message}' to model...")
171
  response = chat.send_message(user_message, stream=True)
172
 
 
 
173
  response_buffer = ""
174
- # Add an empty assistant message placeholder to update
175
- messages.append(ChatMessage(role="assistant", content=""))
176
- yield messages # Show the placeholder immediately
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
177
 
178
- print("Streaming response...")
179
- chunk_count = 0
180
  for chunk in response:
181
- chunk_count += 1
182
- try:
183
- # Defensive coding: Check structure exists before accessing
184
- current_chunk_text = ""
185
- if chunk.candidates and \
186
- chunk.candidates[0].content and \
187
- chunk.candidates[0].content.parts:
188
- current_chunk_text = chunk.candidates[0].content.parts[0].text
189
- elif hasattr(chunk, 'text'): # Fallback for simpler text responses
190
- current_chunk_text = chunk.text
191
-
192
- if current_chunk_text:
193
- # print(f"Chunk {chunk_count}: '{current_chunk_text}'") # Debug chunk content
194
- response_buffer += current_chunk_text
195
- messages[-1] = ChatMessage(
 
 
 
 
 
 
 
 
 
196
  role="assistant",
197
  content=response_buffer
198
  )
199
- else:
200
- # Handle cases where a chunk might be empty or have unexpected structure
201
- print(f"Warning: Received empty or unexpected chunk structure at chunk {chunk_count}: {chunk}")
202
-
203
- except (AttributeError, IndexError, ValueError, StopIteration) as chunk_err:
204
- print(f"Error processing chunk {chunk_count}: {chunk_err}")
205
- print(f"Problematic chunk data: {chunk}")
206
- messages[-1] = ChatMessage(
207
- role="assistant",
208
- content=response_buffer + f"\n\n[Error processing response part: {chunk_err}]"
209
- )
210
- yield messages
211
- return # Stop streaming on chunk error
212
-
213
- # time.sleep(0.05) # Optional delay for debugging
214
- yield messages # Update UI with the latest buffer content
215
-
216
- print(f"\n=== Final Response ({chunk_count} chunks received) ===\n{response_buffer}")
217
- if chunk_count == 0 or not response_buffer.strip():
218
- # Handle case where the stream finished but buffer is empty
219
- print("Warning: Stream finished but response buffer is empty.")
220
- if messages and messages[-1].role == "assistant":
221
- messages[-1] = ChatMessage(role="assistant", content="[Model provided no response]")
222
- yield messages
 
 
 
 
223
 
224
  except Exception as e:
225
- error_message = f"Error calling Gemini API: {str(e)}"
226
- print(f"\n=== API Error ===\n{error_message}")
227
- # Attempt to add or replace the last message with the error
228
- if messages and messages[-1].role == "assistant" and messages[-1].content == "":
229
- messages[-1] = ChatMessage(role="assistant", content=error_message)
230
- else:
231
- messages.append(ChatMessage(role="assistant", content=error_message))
232
  yield messages
233
 
234
- # --- Функция добавления сообщения пользователя ---
235
- def user_message(msg: str, history: list) -> tuple[str, list, list]:
236
- """Adds user message to chat history and clears the undone state."""
237
- print(f"\nUser message added: '{msg}'")
238
- if msg and msg.strip(): # Only add non-empty messages
239
- history.append(ChatMessage(role="user", content=msg))
240
- else:
241
- print("Skipping empty user message.")
242
- # New user message invalidates previous undo state
243
- return "", history, [] # Return empty string to clear textbox, updated history, empty undone_state
244
-
245
-
246
- # ==================================
247
- # --- Gradio Interface Definition ---
248
- # ==================================
249
- with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal", secondary_hue="slate", neutral_hue="neutral")) as demo:
250
  gr.Markdown("# Chat with " + used_model)
251
 
 
252
  gr.HTML("""<a href="https://visitorbadge.io/status?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2Fzelk12%2FGemini-2">
253
  <img src="https://api.visitorbadge.io/api/combined?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2Fzelk12%2FGemini-2&countColor=%23263759" />
254
  </a>""")
255
 
256
- # State for Undo/Redo
257
- undone_state = gr.State([])
258
-
259
  chatbot = gr.Chatbot(
260
- [], # Start empty
261
  type="messages",
262
- label=used_model + " Chatbot (Streaming Output)",
263
  render_markdown=True,
264
  scale=1,
265
- editable=False, # Keep False to avoid conflicts with history management
266
- avatar_images=(None,"https://lh3.googleusercontent.com/oxz0sUBF0iYoN4VvhqWTmux-cxfD1rxuYkuFEfm1SFaseXEsjjE4Je_C_V3UQPuJ87sImQK3HfQ3RXiaRnQetjaZbjJJUkiPL5jFJ1WRl5FKJZYibUA=w214-h214-n-nu"),
267
- height=600
268
  )
269
 
270
- with gr.Row(equal_height=False):
271
  input_box = gr.Textbox(
272
  lines=1,
273
  label="Chat Message",
274
  placeholder="Type your message here...",
275
- scale=4,
276
- container=False # Better alignment potentially
277
  )
278
 
279
- with gr.Column(scale=1, min_width=150):
280
- submit_button = gr.Button("Submit", scale=1, variant="primary")
281
- with gr.Row():
282
- undo_button = gr.Button("Undo", scale=1)
283
- redo_button = gr.Button("Redo", scale=1)
284
- regenerate_button = gr.Button("Regenerate", scale=1)
285
- clear_button = gr.Button("Clear Chat", scale=1)
 
 
286
 
287
- # Examples
288
  example_prompts = [
289
  ["Write a short poem about the sunset."],
290
  ["Explain the theory of relativity in simple terms."],
 
291
  ["Summarize the plot of Hamlet."],
292
  ["Write a haiku about a cat."]
293
  ]
 
294
  gr.Examples(
295
  examples=example_prompts,
296
  inputs=input_box,
297
- label="Examples:",
298
- examples_per_page=4
299
  )
300
 
301
- # State to hold the message content for streaming
302
- msg_store = gr.State("")
303
-
304
- # --- Event Handlers ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
305
 
306
- # --- Helper function to get last user message content ---
307
- def get_last_user_message(history: List[ChatMessage]) -> Optional[str]:
308
- if history and history[-1].role == "user":
309
- return history[-1].content
310
- print("Warning: Could not find last user message in history for get_last_user_message")
311
- return None # Return None if history is empty or last message isn't user
312
-
313
- # --- Handler for Enter key in input_box ---
314
  input_box.submit(
315
- user_message,
316
- inputs=[input_box, chatbot],
317
- outputs=[input_box, chatbot, undone_state] # Clears input, updates history, clears undo
 
318
  ).then(
319
- # No positional lambda here! Use fn=
320
- fn=get_last_user_message, # 2. Get content of the message just added
321
- inputs=[chatbot],
322
- outputs=[msg_store], # Store it for streaming
323
- queue=False # Getting content is fast
324
  ).then(
325
- stream_gemini_response, # 3. Stream response using stored message and updated history
326
  inputs=[msg_store, chatbot],
327
- outputs=[chatbot]
328
- # queue=True by default for potentially long API calls
329
  )
330
 
331
- # --- Handler for Submit button click ---
332
  submit_button.click(
333
- user_message,
334
- inputs=[input_box, chatbot],
335
- outputs=[input_box, chatbot, undone_state] # Clears input, updates history, clears undo
 
336
  ).then(
337
- # No positional lambda here! Use fn=
338
- fn=get_last_user_message, # 2. Get content of the message just added
339
- inputs=[chatbot],
340
- outputs=[msg_store], # Store it for streaming
341
- queue=False # Getting content is fast
342
  ).then(
343
- stream_gemini_response, # 3. Stream response using stored message and updated history
344
  inputs=[msg_store, chatbot],
345
- outputs=[chatbot]
346
- # queue=True by default
347
  )
348
 
349
- # --- Handler for Clear button ---
350
  clear_button.click(
351
- lambda: ([], "", [], ""), # Clear chatbot, input_box, msg_store, undone_state
352
- outputs=[chatbot, input_box, msg_store, undone_state],
353
- queue=False
354
- )
355
-
356
- # --- Handler for Undo button ---
357
- undo_button.click(
358
- undo_last,
359
- inputs=[chatbot],
360
- outputs=[chatbot, undone_state],
361
  queue=False
362
  )
363
 
364
- # --- Handler for Redo button ---
365
- redo_button.click(
366
- redo_last,
367
- inputs=[chatbot, undone_state],
368
- outputs=[chatbot, undone_state],
369
- queue=False
370
- )
371
-
372
- # --- Handler for Regenerate button ---
373
- regenerate_button.click(
374
- regenerate_response, # 1. Prepare history, get last user msg, clear undo
375
- inputs=[chatbot],
376
- outputs=[chatbot, msg_store, undone_state] # Update history, put user msg in store, clear undo
377
- # queue=False # Preparation should be fast
378
- ).then(
379
- stream_gemini_response, # 2. Stream new response using msg from store and prepared history
380
- inputs=[msg_store, chatbot],
381
- outputs=[chatbot]
382
- # queue=True by default
383
- )
384
-
385
- gr.Markdown(
386
  """
387
- <br><br><br>
388
  ---
389
  ### About this Chatbot
390
- **Try out the example prompts!**
391
  **Key Features:**
392
- * Powered by Google's **""" + used_model + """** model.
393
- * Supports **conversation history**, **undo**, **redo**, and **regenerate**.
394
- * Uses **streaming** for responses.
395
  **Instructions:**
396
- 1. Type your message or select an example.
397
- 2. Press Enter or click Submit.
398
- 3. Use Undo/Redo to correct mistakes or explore alternatives.
399
- 4. Use Regenerate to get a different response to your last message.
400
- 5. Use Clear Chat to start over.
401
  """
402
  )
403
 
 
404
  # Launch the interface
405
  if __name__ == "__main__":
406
- print("Starting Gradio Interface...")
407
- # demo.launch(debug=True) # For detailed debugging
408
- # demo.launch(share=True) # To get a public link
409
- demo.launch() # Standard local launch
 
1
  import os
2
  import gradio as gr
3
  from gradio import ChatMessage
4
+ from typing import Iterator
5
  import google.generativeai as genai
6
+ import time # Import time module for potential debugging/delay
7
 
8
  print("import library complete")
9
  print("add API key")
10
 
11
  # get Gemini API Key from the environ variable
12
  GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
13
+ genai.configure(api_key=GEMINI_API_KEY)
 
 
 
 
 
 
 
14
 
15
  print("add API key complete ")
16
  print("add model")
17
 
 
18
  used_model = "gemma-3-27b-it"
19
+
20
+ # we will be using the Gemini 2.0 Flash model with Thinking capabilities
21
+ model = genai.GenerativeModel(used_model)
22
+
23
+ print(f"add model {used_model} complete\n")
 
 
 
 
 
24
 
25
  def format_chat_history(messages: list) -> list:
26
  print("\nstart format history")
27
  """
28
+ Formats the chat history into a structure Gemini can understand
 
 
29
  """
30
  formatted_history = []
31
  for message in messages:
32
+ #print(f"t1 {message}")
33
+ # Skip thinking messages (messages with metadata)
34
+ #if not (message.get("role") == "assistant" and "metadata" in message):
35
+ # print(f"t2 {message}")
36
+ # formatted_history.append({
37
+ # "role": "user" if message.get("role") == "user" else "assistant",
38
+ # "parts": [message.get("content", "")]
39
+ # })
40
+
41
+ #print(f"t2 {message}")
42
+
43
+ if message.get("role") == "user" :
44
+ formatted_history.append({
45
+ "role": "user",
46
+ "parts": [message.get("content", "")]
47
+ })
48
+ elif message.get("role") == "assistant" :
49
  formatted_history.append({
50
+ "role": "model",
51
+ "parts": [message.get("content", "")]
52
  })
53
+
54
+ #print(f"t3 {formatted_history}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  print("return formatted history")
56
  return formatted_history
57
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  def stream_gemini_response(user_message: str, messages: list) -> Iterator[list]:
59
  print("start model response stream")
60
+ """
61
+ Streams thoughts and response with conversation history support for text input only.
62
+ """
63
+ if not user_message.strip(): # Robust check: if text message is empty or whitespace
64
+ messages.append(ChatMessage(role="assistant", content="Please provide a non-empty text message. Empty input is not allowed.")) # More specific message
65
  yield messages
66
+ print("Empty text message")
67
  return
68
 
 
 
 
 
 
 
 
69
  try:
70
+ print(f"\n=== New Request (Text) ===")
71
+ print(f"User message: {user_message}")
72
+
73
+ # Format chat history for Gemini
74
+ chat_history = format_chat_history(messages)
75
+
76
+ #print(f"hist {chat_history}")
77
+
78
+ # Initialize Gemini chat
79
+ print("Chat parameter")
80
+ chat = model.start_chat(history=chat_history)
81
+ print("Start response")
 
 
82
  response = chat.send_message(user_message, stream=True)
83
 
84
+ # Initialize buffers and flags
85
+ thought_buffer = ""
86
  response_buffer = ""
87
+ #thinking_complete = False
88
+
89
+ # Add initial thinking message
90
+ #messages.append(
91
+ # ChatMessage(
92
+ # role="assistant",
93
+ # content="",
94
+ # metadata={"title": "⚙️ Thinking: *The thoughts produced by the model are experimental"}
95
+ # )
96
+ #)
97
+
98
+ messages.append(
99
+ ChatMessage(
100
+ role="assistant",
101
+ content=response_buffer
102
+ )
103
+ )
104
+ #print(f"mes {messages} \n\nhis {chat_history}")
105
+
106
+ thinking_complete = True
107
 
 
 
108
  for chunk in response:
109
+ print("chunk start")
110
+ parts = chunk.candidates[0].content.parts
111
+ current_chunk = parts[0].text
112
+
113
+ print(f"\n=========\nparts len: {len(parts)}\n\nparts: {parts}\n\ncurrent chunk: {current_chunk}\n=========\n")
114
+
115
+ if len(parts) == 2 and not thinking_complete:
116
+ # Complete thought and start response
117
+ thought_buffer += current_chunk
118
+ print(f"\n=== Complete Thought ===\n{thought_buffer}")
119
+
120
+ messages[-1] = ChatMessage(
121
+ role="assistant",
122
+ content=thought_buffer,
123
+ metadata={"title": "⚙️ Thinking: *The thoughts produced by the model are experimental"}
124
+ )
125
+ yield messages
126
+
127
+ # Start response
128
+ response_buffer = parts[1].text
129
+ print(f"\n=== Starting Response ===\n{response_buffer}")
130
+
131
+ messages.append(
132
+ ChatMessage(
133
  role="assistant",
134
  content=response_buffer
135
  )
136
+ )
137
+ thinking_complete = True
138
+
139
+ elif thinking_complete:
140
+ # Stream response
141
+ response_buffer += current_chunk
142
+ print(f"\n=== Response Chunk ===\n{current_chunk}")
143
+
144
+ messages[-1] = ChatMessage(
145
+ role="assistant",
146
+ content=response_buffer
147
+ )
148
+
149
+ else:
150
+ # Stream thinking
151
+ thought_buffer += current_chunk
152
+ print(f"\n=== Thinking Chunk ===\n{current_chunk}")
153
+
154
+ messages[-1] = ChatMessage(
155
+ role="assistant",
156
+ content=thought_buffer,
157
+ metadata={"title": "⚙️ Thinking: *The thoughts produced by the model are experimental"}
158
+ )
159
+ #time.sleep(0.05) #Optional: Uncomment this line to add a slight delay for debugging/visualization of streaming. Remove for final version
160
+ print("Response end")
161
+ yield messages
162
+
163
+ print(f"\n=== Final Response ===\n{response_buffer}")
164
 
165
  except Exception as e:
166
+ print(f"\n=== Error ===\n{str(e)}")
167
+ messages.append(
168
+ ChatMessage(
169
+ role="assistant",
170
+ content=f"I apologize, but I encountered an error: {str(e)}"
171
+ )
172
+ )
173
  yield messages
174
 
175
+ def user_message(msg: str, history: list) -> tuple[str, list]:
176
+ """Adds user message to chat history"""
177
+ history.append(ChatMessage(role="user", content=msg))
178
+ return "", history
179
+
180
+
181
+ # Create the Gradio interface
182
+ with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal", secondary_hue="slate", neutral_hue="neutral")) as demo: # Using Soft theme with adjusted hues for a refined look
 
 
 
 
 
 
 
 
183
  gr.Markdown("# Chat with " + used_model)
184
 
185
+
186
  gr.HTML("""<a href="https://visitorbadge.io/status?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2Fzelk12%2FGemini-2">
187
  <img src="https://api.visitorbadge.io/api/combined?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2Fzelk12%2FGemini-2&countColor=%23263759" />
188
  </a>""")
189
 
190
+
 
 
191
  chatbot = gr.Chatbot(
 
192
  type="messages",
193
+ label=used_model + " Chatbot (Streaming Output)", #Label now indicates streaming
194
  render_markdown=True,
195
  scale=1,
196
+ editable="all",
197
+ avatar_images=(None,"https://lh3.googleusercontent.com/oxz0sUBF0iYoN4VvhqWTmux-cxfD1rxuYkuFEfm1SFaseXEsjjE4Je_C_V3UQPuJ87sImQK3HfQ3RXiaRnQetjaZbjJJUkiPL5jFJ1WRl5FKJZYibUA=w214-h214-n-nu")
 
198
  )
199
 
200
+ with gr.Row(equal_height=True):
201
  input_box = gr.Textbox(
202
  lines=1,
203
  label="Chat Message",
204
  placeholder="Type your message here...",
205
+ scale=4
 
206
  )
207
 
208
+ with gr.Column(scale=1):
209
+ submit_button = gr.Button("Submit", scale=1)
210
+ clear_button = gr.Button("Clear Chat", scale=1)
211
+
212
+ with gr.Row(equal_height=True):
213
+ test_button = gr.Button("test", scale=1)
214
+ test1_button = gr.Button("test1", scale=1)
215
+ test2_button = gr.Button("test2", scale=1)
216
+ test3_button = gr.Button("test3", scale=1)
217
 
218
+ # Add example prompts - removed file upload examples. Kept text focused examples.
219
  example_prompts = [
220
  ["Write a short poem about the sunset."],
221
  ["Explain the theory of relativity in simple terms."],
222
+ ["If a train leaves Chicago at 6am traveling at 60mph, and another train leaves New York at 8am traveling at 80mph, at what time will they meet?"],
223
  ["Summarize the plot of Hamlet."],
224
  ["Write a haiku about a cat."]
225
  ]
226
+
227
  gr.Examples(
228
  examples=example_prompts,
229
  inputs=input_box,
230
+ label="Examples: Try these prompts to see Gemini's thinking!",
231
+ examples_per_page=5 # Adjust as needed
232
  )
233
 
234
+ # Created by gemini-2.5-pro-exp-03-25
235
+ #def process_message(msg):
236
+ # """Обрабатывает сообщение пользователя: сохраняет, отображает и генерирует ответ."""
237
+ # msg_store_val, _, _ = lambda msg: (msg, msg, "")(msg) # Store message and clear input (inline lambda)
238
+ # input_box_val, chatbot_val = user_message(msg_store_val, chatbot) # Add user message to chat
239
+ # chatbot_val_final = stream_gemini_response(msg_store_val, chatbot_val) # Generate and stream response
240
+ # return msg_store_val, input_box_val, chatbot_val_final
241
+ #
242
+ #input_box.submit(
243
+ # process_message,
244
+ # inputs=[input_box],
245
+ # outputs=[msg_store, input_box, chatbot], # Исправлены outputs, чтобы включать chatbot
246
+ # queue=False
247
+ #)
248
+
249
+ #submit_button.click(
250
+ # process_message,
251
+ # inputs=[input_box],
252
+ # outputs=[msg_store, input_box, chatbot], # Исправлены outputs, чтобы включать chatbot
253
+ # queue=False
254
+ #)
255
+
256
+ # Set up event handlers
257
+ msg_store = gr.State("") # Store for preserving user message
258
 
 
 
 
 
 
 
 
 
259
  input_box.submit(
260
+ lambda msg: (msg, msg, ""), # Store message and clear input
261
+ inputs=[input_box],
262
+ outputs=[msg_store, input_box, input_box],
263
+ queue=False
264
  ).then(
265
+ user_message, # Add user message to chat
266
+ inputs=[msg_store, chatbot],
267
+ outputs=[input_box, chatbot],
268
+ queue=False
 
269
  ).then(
270
+ stream_gemini_response, # Generate and stream response
271
  inputs=[msg_store, chatbot],
272
+ outputs=chatbot
 
273
  )
274
 
 
275
  submit_button.click(
276
+ lambda msg: (msg, msg, ""), # Store message and clear input
277
+ inputs=[input_box],
278
+ outputs=[msg_store, input_box, input_box],
279
+ queue=False
280
  ).then(
281
+ user_message, # Add user message to chat
282
+ inputs=[msg_store, chatbot],
283
+ outputs=[input_box, chatbot],
284
+ queue=False
 
285
  ).then(
286
+ stream_gemini_response, # Generate and stream response
287
  inputs=[msg_store, chatbot],
288
+ outputs=chatbot
 
289
  )
290
 
 
291
  clear_button.click(
292
+ lambda: ([], "", ""),
293
+ outputs=[chatbot, input_box, msg_store],
 
 
 
 
 
 
 
 
294
  queue=False
295
  )
296
 
297
+ gr.Markdown( # Description moved to the bottom - updated for text-only
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
298
  """
299
+ <br><br><br> <!-- Add some vertical space -->
300
  ---
301
  ### About this Chatbot
302
+ **Try out the example prompts below to see Gemini in action!**
303
  **Key Features:**
304
+ * Powered by Google's **Gemini 2.0 Flash** model.
305
+ * Supports **conversation history** for multi-turn chats.
306
+ * Uses **streaming** for a more interactive experience.
307
  **Instructions:**
308
+ 1. Type your message in the input box below or select an example.
309
+ 2. Press Enter or click Submit to send.
310
+ 3. Observe the chatbot's "Thinking" process followed by the final response.
311
+ 4. Use the "Clear Chat" button to start a new conversation.
 
312
  """
313
  )
314
 
315
+
316
  # Launch the interface
317
  if __name__ == "__main__":
318
+ demo.launch(debug=True)