added EvalDataset Generation
Browse files
app.py
CHANGED
|
@@ -397,20 +397,19 @@ class BSIChatbot:
|
|
| 397 |
return query, context
|
| 398 |
|
| 399 |
def queryRemoteLLM(self, systemPrompt, query, summary):
|
| 400 |
-
|
| 401 |
-
|
| 402 |
-
|
| 403 |
-
|
| 404 |
-
|
| 405 |
-
|
| 406 |
-
|
| 407 |
-
|
| 408 |
-
|
| 409 |
-
|
| 410 |
-
|
| 411 |
-
|
| 412 |
-
|
| 413 |
-
return "AnswerqueryRemoteLLM" #new
|
| 414 |
|
| 415 |
def stepBackPrompt(self, query):
|
| 416 |
systemPrompt = """
|
|
@@ -510,12 +509,12 @@ class BSIChatbot:
|
|
| 510 |
return stream
|
| 511 |
|
| 512 |
else:
|
| 513 |
-
|
| 514 |
-
|
| 515 |
-
|
| 516 |
-
|
|
|
|
| 517 |
self.cleanResources()
|
| 518 |
-
answer = "Answer" #new
|
| 519 |
return answer, context
|
| 520 |
|
| 521 |
def returnImages(self):
|
|
|
|
| 397 |
return query, context
|
| 398 |
|
| 399 |
def queryRemoteLLM(self, systemPrompt, query, summary):
|
| 400 |
+
if summary != True:
|
| 401 |
+
chat_completion = self.llm_client.chat.completions.create(
|
| 402 |
+
messages=[{"role": "system", "content": systemPrompt},
|
| 403 |
+
{"role": "user", "content": "Step-Back Frage, die neu gestellt werden soll: " + query}],
|
| 404 |
+
model=self.llm_remote_model,
|
| 405 |
+
)
|
| 406 |
+
if summary == True:
|
| 407 |
+
chat_completion = self.llm_client.chat.completions.create(
|
| 408 |
+
messages=[{"role": "system", "content": systemPrompt},
|
| 409 |
+
{"role": "user", "content": query}],
|
| 410 |
+
model=self.llm_remote_model,
|
| 411 |
+
)
|
| 412 |
+
return chat_completion.choices[0].message.content
|
|
|
|
| 413 |
|
| 414 |
def stepBackPrompt(self, query):
|
| 415 |
systemPrompt = """
|
|
|
|
| 509 |
return stream
|
| 510 |
|
| 511 |
else:
|
| 512 |
+
answer = self.llm_client.chat.completions.create(
|
| 513 |
+
messages=final_prompt,
|
| 514 |
+
model=self.llm_remote_model,
|
| 515 |
+
stream=False
|
| 516 |
+
)
|
| 517 |
self.cleanResources()
|
|
|
|
| 518 |
return answer, context
|
| 519 |
|
| 520 |
def returnImages(self):
|