Spaces:
Build error
Build error
Refactor chat history management in process_input function of app.py to utilize dictionaries for user and assistant messages, improving clarity and structure. Update error handling to maintain consistent message formatting in chat history.
Browse files
app.py
CHANGED
@@ -189,11 +189,15 @@ def process_input(image, audio, video, text, chat_history, voice_type, enable_au
|
|
189 |
if not isinstance(chat_history, list):
|
190 |
chat_history = []
|
191 |
|
|
|
|
|
|
|
|
|
192 |
# Find the last incomplete message pair if it exists
|
193 |
-
if chat_history and isinstance(chat_history[-1],
|
194 |
-
chat_history
|
195 |
else:
|
196 |
-
chat_history.
|
197 |
|
198 |
# Clear GPU memory after processing
|
199 |
if torch.cuda.is_available():
|
@@ -209,7 +213,9 @@ def process_input(image, audio, video, text, chat_history, voice_type, enable_au
|
|
209 |
except Exception as e:
|
210 |
print(f"Error during generation: {str(e)}")
|
211 |
error_msg = "I apologize, but I encountered an error processing your request. Please try again."
|
212 |
-
chat_history.append(
|
|
|
|
|
213 |
return chat_history, error_msg, None
|
214 |
|
215 |
except Exception as e:
|
@@ -217,7 +223,10 @@ def process_input(image, audio, video, text, chat_history, voice_type, enable_au
|
|
217 |
if not isinstance(chat_history, list):
|
218 |
chat_history = []
|
219 |
error_msg = "I apologize, but I encountered an error processing your request. Please try again."
|
220 |
-
chat_history.
|
|
|
|
|
|
|
221 |
return chat_history, error_msg, None
|
222 |
|
223 |
def user_input_to_content(user_input):
|
@@ -364,7 +373,7 @@ def create_demo():
|
|
364 |
if not display_message.strip():
|
365 |
display_message = "Multimodal content"
|
366 |
|
367 |
-
return [
|
368 |
|
369 |
multimodal_submit.click(
|
370 |
fn=prepare_multimodal_input,
|
|
|
189 |
if not isinstance(chat_history, list):
|
190 |
chat_history = []
|
191 |
|
192 |
+
# Convert the current messages to the proper format
|
193 |
+
user_message = {"role": "user", "content": user_message_for_display}
|
194 |
+
assistant_message = {"role": "assistant", "content": text_response}
|
195 |
+
|
196 |
# Find the last incomplete message pair if it exists
|
197 |
+
if chat_history and isinstance(chat_history[-1], dict) and chat_history[-1]["role"] == "user":
|
198 |
+
chat_history.append(assistant_message)
|
199 |
else:
|
200 |
+
chat_history.extend([user_message, assistant_message])
|
201 |
|
202 |
# Clear GPU memory after processing
|
203 |
if torch.cuda.is_available():
|
|
|
213 |
except Exception as e:
|
214 |
print(f"Error during generation: {str(e)}")
|
215 |
error_msg = "I apologize, but I encountered an error processing your request. Please try again."
|
216 |
+
chat_history.append(
|
217 |
+
{"role": "assistant", "content": error_msg}
|
218 |
+
)
|
219 |
return chat_history, error_msg, None
|
220 |
|
221 |
except Exception as e:
|
|
|
223 |
if not isinstance(chat_history, list):
|
224 |
chat_history = []
|
225 |
error_msg = "I apologize, but I encountered an error processing your request. Please try again."
|
226 |
+
chat_history.extend([
|
227 |
+
{"role": "user", "content": user_message_for_display},
|
228 |
+
{"role": "assistant", "content": error_msg}
|
229 |
+
])
|
230 |
return chat_history, error_msg, None
|
231 |
|
232 |
def user_input_to_content(user_input):
|
|
|
373 |
if not display_message.strip():
|
374 |
display_message = "Multimodal content"
|
375 |
|
376 |
+
return [{"role": "user", "content": display_message}]
|
377 |
|
378 |
multimodal_submit.click(
|
379 |
fn=prepare_multimodal_input,
|