Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -241,7 +241,7 @@ Classify as "knowledge_base" if the query:
|
|
241 |
if LLMmodel == "groq":
|
242 |
response = groq_manager.get_next_client().chat.completions.create(
|
243 |
messages=messages,
|
244 |
-
model="llama-3.
|
245 |
temperature=temperature,
|
246 |
max_tokens=10,
|
247 |
stream=False
|
@@ -303,7 +303,7 @@ Guidelines:
|
|
303 |
if LLMmodel == "groq":
|
304 |
response = groq_manager.get_next_client().chat.completions.create(
|
305 |
messages=messages,
|
306 |
-
model="llama-3.
|
307 |
temperature=temperature,
|
308 |
stream=False
|
309 |
)
|
@@ -390,7 +390,7 @@ Please rephrase this query into a complete, contextual search query following th
|
|
390 |
if LLMmodel == "groq":
|
391 |
response = groq_manager.get_next_client().chat.completions.create(
|
392 |
messages=messages,
|
393 |
-
model="llama-3.
|
394 |
temperature=temperature,
|
395 |
max_tokens=200,
|
396 |
stream=False
|
@@ -682,7 +682,7 @@ Instructions:
|
|
682 |
if LLMmodel == "groq":
|
683 |
response = groq_manager.get_next_client().chat.completions.create(
|
684 |
messages=messages,
|
685 |
-
model="llama-3.
|
686 |
temperature=temperature,
|
687 |
max_tokens=5000,
|
688 |
stream=False
|
@@ -820,7 +820,7 @@ class ChatBot:
|
|
820 |
if LLMmodel == "groq":
|
821 |
response = groq_manager.get_next_client().chat.completions.create(
|
822 |
messages=messages,
|
823 |
-
model="llama-3.
|
824 |
temperature=temperature,
|
825 |
max_tokens=5000,
|
826 |
stream=False
|
@@ -1085,7 +1085,7 @@ class ChatBot:
|
|
1085 |
synthesis_response = groq_manager.execute_groq_api_call(
|
1086 |
groq_manager.get_next_client().chat.completions.create,
|
1087 |
messages=messages,
|
1088 |
-
model="llama-3.
|
1089 |
temperature=temperature,
|
1090 |
max_tokens=1000,
|
1091 |
stream=False
|
|
|
241 |
if LLMmodel == "groq":
|
242 |
response = groq_manager.get_next_client().chat.completions.create(
|
243 |
messages=messages,
|
244 |
+
model="llama-3.3-70b-versatile",
|
245 |
temperature=temperature,
|
246 |
max_tokens=10,
|
247 |
stream=False
|
|
|
303 |
if LLMmodel == "groq":
|
304 |
response = groq_manager.get_next_client().chat.completions.create(
|
305 |
messages=messages,
|
306 |
+
model="llama-3.3-70b-versatile",
|
307 |
temperature=temperature,
|
308 |
stream=False
|
309 |
)
|
|
|
390 |
if LLMmodel == "groq":
|
391 |
response = groq_manager.get_next_client().chat.completions.create(
|
392 |
messages=messages,
|
393 |
+
model="llama-3.3-70b-versatile",
|
394 |
temperature=temperature,
|
395 |
max_tokens=200,
|
396 |
stream=False
|
|
|
682 |
if LLMmodel == "groq":
|
683 |
response = groq_manager.get_next_client().chat.completions.create(
|
684 |
messages=messages,
|
685 |
+
model="llama-3.3-70b-versatile",
|
686 |
temperature=temperature,
|
687 |
max_tokens=5000,
|
688 |
stream=False
|
|
|
820 |
if LLMmodel == "groq":
|
821 |
response = groq_manager.get_next_client().chat.completions.create(
|
822 |
messages=messages,
|
823 |
+
model="llama-3.3-70b-versatile",
|
824 |
temperature=temperature,
|
825 |
max_tokens=5000,
|
826 |
stream=False
|
|
|
1085 |
synthesis_response = groq_manager.execute_groq_api_call(
|
1086 |
groq_manager.get_next_client().chat.completions.create,
|
1087 |
messages=messages,
|
1088 |
+
model="llama-3.3-70b-versatile",
|
1089 |
temperature=temperature,
|
1090 |
max_tokens=1000,
|
1091 |
stream=False
|