Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -45,7 +45,7 @@ def format_chat_history(messages: list) -> list:
|
|
45 |
"role": "user",
|
46 |
"parts": [message.get("content", "")]
|
47 |
})
|
48 |
-
elif message.get("role") == "
|
49 |
formatted_history.append({
|
50 |
"role": "model",
|
51 |
"parts": [message.get("content", "")]
|
@@ -95,7 +95,7 @@ def stream_gemini_response(user_message: str, messages: list) -> Iterator[list]:
|
|
95 |
|
96 |
messages.append(
|
97 |
ChatMessage(
|
98 |
-
role="
|
99 |
content=response_buffer
|
100 |
)
|
101 |
)
|
@@ -113,7 +113,7 @@ def stream_gemini_response(user_message: str, messages: list) -> Iterator[list]:
|
|
113 |
print(f"\n=== Complete Thought ===\n{thought_buffer}")
|
114 |
|
115 |
messages[-1] = ChatMessage(
|
116 |
-
role="
|
117 |
content=thought_buffer,
|
118 |
metadata={"title": "⚙️ Thinking: *The thoughts produced by the model are experimental"}
|
119 |
)
|
@@ -125,7 +125,7 @@ def stream_gemini_response(user_message: str, messages: list) -> Iterator[list]:
|
|
125 |
|
126 |
messages.append(
|
127 |
ChatMessage(
|
128 |
-
role="
|
129 |
content=response_buffer
|
130 |
)
|
131 |
)
|
@@ -137,7 +137,7 @@ def stream_gemini_response(user_message: str, messages: list) -> Iterator[list]:
|
|
137 |
print(f"\n=== Response Chunk ===\n{current_chunk}")
|
138 |
|
139 |
messages[-1] = ChatMessage(
|
140 |
-
role="
|
141 |
content=response_buffer
|
142 |
)
|
143 |
|
@@ -147,7 +147,7 @@ def stream_gemini_response(user_message: str, messages: list) -> Iterator[list]:
|
|
147 |
print(f"\n=== Thinking Chunk ===\n{current_chunk}")
|
148 |
|
149 |
messages[-1] = ChatMessage(
|
150 |
-
role="
|
151 |
content=thought_buffer,
|
152 |
metadata={"title": "⚙️ Thinking: *The thoughts produced by the model are experimental"}
|
153 |
)
|
|
|
45 |
"role": "user",
|
46 |
"parts": [message.get("content", "")]
|
47 |
})
|
48 |
+
elif message.get("role") == "assistant" :
|
49 |
formatted_history.append({
|
50 |
"role": "model",
|
51 |
"parts": [message.get("content", "")]
|
|
|
95 |
|
96 |
messages.append(
|
97 |
ChatMessage(
|
98 |
+
role="assistant",
|
99 |
content=response_buffer
|
100 |
)
|
101 |
)
|
|
|
113 |
print(f"\n=== Complete Thought ===\n{thought_buffer}")
|
114 |
|
115 |
messages[-1] = ChatMessage(
|
116 |
+
role="assistant",
|
117 |
content=thought_buffer,
|
118 |
metadata={"title": "⚙️ Thinking: *The thoughts produced by the model are experimental"}
|
119 |
)
|
|
|
125 |
|
126 |
messages.append(
|
127 |
ChatMessage(
|
128 |
+
role="assistant",
|
129 |
content=response_buffer
|
130 |
)
|
131 |
)
|
|
|
137 |
print(f"\n=== Response Chunk ===\n{current_chunk}")
|
138 |
|
139 |
messages[-1] = ChatMessage(
|
140 |
+
role="assistant",
|
141 |
content=response_buffer
|
142 |
)
|
143 |
|
|
|
147 |
print(f"\n=== Thinking Chunk ===\n{current_chunk}")
|
148 |
|
149 |
messages[-1] = ChatMessage(
|
150 |
+
role="assistant",
|
151 |
content=thought_buffer,
|
152 |
metadata={"title": "⚙️ Thinking: *The thoughts produced by the model are experimental"}
|
153 |
)
|