zelk12 commited on
Commit
464e08f
·
verified ·
1 Parent(s): 370c76a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -4
app.py CHANGED
@@ -5,17 +5,25 @@ from typing import Iterator
5
  import google.generativeai as genai
6
  import time # Import time module for potential debugging/delay
7
 
 
 
 
8
  # get Gemini API Key from the environ variable
9
  GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
10
  genai.configure(api_key=GEMINI_API_KEY)
11
 
 
 
 
12
  used_model = "gemini-2.0-flash-thinking-exp-01-21"
13
 
14
  # we will be using the Gemini 2.0 Flash model with Thinking capabilities
15
  model = genai.GenerativeModel("gemini-2.0-flash-thinking-exp-01-21")
16
 
 
17
 
18
  def format_chat_history(messages: list) -> list:
 
19
  """
20
  Formats the chat history into a structure Gemini can understand
21
  """
@@ -31,21 +39,31 @@ def format_chat_history(messages: list) -> list:
31
  # })
32
 
33
  #print(f"t2 {message}")
34
- formatted_history.append({
35
- "role": "user" if message.get("role") == "user" else "model",
 
 
36
  "parts": [message.get("content", "")]
37
- })
 
 
 
 
 
38
 
39
  #print(f"t3 {formatted_history}")
 
40
  return formatted_history
41
 
42
  def stream_gemini_response(user_message: str, messages: list) -> Iterator[list]:
 
43
  """
44
  Streams thoughts and response with conversation history support for text input only.
45
  """
46
  if not user_message.strip(): # Robust check: if text message is empty or whitespace
47
  messages.append(ChatMessage(role="assistant", content="Please provide a non-empty text message. Empty input is not allowed.")) # More specific message
48
  yield messages
 
49
  return
50
 
51
  try:
@@ -56,7 +74,9 @@ def stream_gemini_response(user_message: str, messages: list) -> Iterator[list]:
56
  chat_history = format_chat_history(messages)
57
 
58
  # Initialize Gemini chat
 
59
  chat = model.start_chat(history=chat_history)
 
60
  response = chat.send_message(user_message, stream=True)
61
 
62
  # Initialize buffers and flags
@@ -132,7 +152,7 @@ def stream_gemini_response(user_message: str, messages: list) -> Iterator[list]:
132
  metadata={"title": "⚙️ Thinking: *The thoughts produced by the model are experimental"}
133
  )
134
  #time.sleep(0.05) #Optional: Uncomment this line to add a slight delay for debugging/visualization of streaming. Remove for final version
135
-
136
  yield messages
137
 
138
  print(f"\n=== Final Response ===\n{response_buffer}")
 
5
  import google.generativeai as genai
6
  import time # Import time module for potential debugging/delay
7
 
8
+ print("import library complete")
9
+ print("add API key")
10
+
11
  # get Gemini API Key from the environ variable
12
  GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
13
  genai.configure(api_key=GEMINI_API_KEY)
14
 
15
+ print("add API key complete")
16
+ print("add model")
17
+
18
  used_model = "gemini-2.0-flash-thinking-exp-01-21"
19
 
20
  # we will be using the Gemini 2.0 Flash model with Thinking capabilities
21
  model = genai.GenerativeModel("gemini-2.0-flash-thinking-exp-01-21")
22
 
23
+ print(f"add model {used_model} colplete")
24
 
25
  def format_chat_history(messages: list) -> list:
26
+ print("start format history")
27
  """
28
  Formats the chat history into a structure Gemini can understand
29
  """
 
39
  # })
40
 
41
  #print(f"t2 {message}")
42
+
43
+ if message.get("role") == "user" :
44
+ formatted_history.append({
45
+ "role": "user",
46
  "parts": [message.get("content", "")]
47
+ })
48
+ elif message.get("role") == "model" :
49
+ formatted_history.append({
50
+ "role": "model",
51
+ "parts": [message.get("content", "")]
52
+ })
53
 
54
  #print(f"t3 {formatted_history}")
55
+ print("return formatted history")
56
  return formatted_history
57
 
58
  def stream_gemini_response(user_message: str, messages: list) -> Iterator[list]:
59
+ print("start model response stream")
60
  """
61
  Streams thoughts and response with conversation history support for text input only.
62
  """
63
  if not user_message.strip(): # Robust check: if text message is empty or whitespace
64
  messages.append(ChatMessage(role="assistant", content="Please provide a non-empty text message. Empty input is not allowed.")) # More specific message
65
  yield messages
66
+ print("Empty text message")
67
  return
68
 
69
  try:
 
74
  chat_history = format_chat_history(messages)
75
 
76
  # Initialize Gemini chat
77
+ print("Chat parameter")
78
  chat = model.start_chat(history=chat_history)
79
+ print("Start response")
80
  response = chat.send_message(user_message, stream=True)
81
 
82
  # Initialize buffers and flags
 
152
  metadata={"title": "⚙️ Thinking: *The thoughts produced by the model are experimental"}
153
  )
154
  #time.sleep(0.05) #Optional: Uncomment this line to add a slight delay for debugging/visualization of streaming. Remove for final version
155
+ print("Response end")
156
  yield messages
157
 
158
  print(f"\n=== Final Response ===\n{response_buffer}")