fixed: Ai responded with LLM even if answer was found in dataset
Browse files
Main.py
CHANGED
@@ -31,12 +31,12 @@ def generate_response(user_input, chat_history):
|
|
31 |
dataset_response = search_dataset(user_input)
|
32 |
|
33 |
if dataset_response:
|
34 |
-
# Add user and assistant responses to the chat history
|
35 |
chat_history.append({"role": "user", "content": user_input})
|
36 |
-
chat_history.append({"role": "assistant", "content":
|
37 |
-
return chat_history
|
38 |
|
39 |
-
#
|
40 |
outputs = pipeline(user_input, max_new_tokens=512)
|
41 |
|
42 |
# Generate the assistant's response
|
|
|
31 |
dataset_response = search_dataset(user_input)
|
32 |
|
33 |
if dataset_response:
|
34 |
+
# Add user and assistant responses to the chat history from dataset match
|
35 |
chat_history.append({"role": "user", "content": user_input})
|
36 |
+
chat_history.append({"role": "assistant", "content": dataset_response})
|
37 |
+
return chat_history # Return early to avoid generating a response from the LLM
|
38 |
|
39 |
+
# If no match found in dataset, generate the response from the LLM
|
40 |
outputs = pipeline(user_input, max_new_tokens=512)
|
41 |
|
42 |
# Generate the assistant's response
|