FraRy commited on
Commit
37b4b2f
·
verified ·
1 Parent(s): af0aef7

fixed: Ai responded with LLM even if answer was found in dataset

Browse files
Files changed (1) hide show
  1. Main.py +4 -4
Main.py CHANGED
@@ -31,12 +31,12 @@ def generate_response(user_input, chat_history):
31
  dataset_response = search_dataset(user_input)
32
 
33
  if dataset_response:
34
- # Add user and assistant responses to the chat history
35
  chat_history.append({"role": "user", "content": user_input})
36
- chat_history.append({"role": "assistant", "content": "You are a heplfull chatbot who specializes in Cisco switch and router configurations" + assistant_response})
37
- return chat_history
38
 
39
- # Generate the response from the LLM
40
  outputs = pipeline(user_input, max_new_tokens=512)
41
 
42
  # Generate the assistant's response
 
31
  dataset_response = search_dataset(user_input)
32
 
33
  if dataset_response:
34
+ # Add user and assistant responses to the chat history from dataset match
35
  chat_history.append({"role": "user", "content": user_input})
36
+ chat_history.append({"role": "assistant", "content": dataset_response})
37
+ return chat_history # Return early to avoid generating a response from the LLM
38
 
39
+ # If no match found in dataset, generate the response from the LLM
40
  outputs = pipeline(user_input, max_new_tokens=512)
41
 
42
  # Generate the assistant's response