MikeMann commited on
Commit
57c5e88
·
1 Parent(s): 7273ef0

added EvalDataset Generation

Browse files
Files changed (1) hide show
  1. app.py +15 -14
app.py CHANGED
@@ -448,9 +448,9 @@ class BSIChatbot:
448
 
449
  if stepBackPrompt == True:
450
  stepBackQuery = self.stepBackPrompt(query)
451
- print("DBG stepBackQuery:" + stepBackQuery)
452
  stepBackQuery, stepBackContext = self.retrieval(stepBackQuery, rerankingStep, True)
453
- print("DBG stepBackContext:" + stepBackContext)
454
  sysPrompt = """
455
  You are an helpful Chatbot for the BSI IT-Grundschutz. Using the information contained in the context,
456
  give a comprehensive answer to the question.
@@ -459,7 +459,7 @@ class BSIChatbot:
459
  If the answer cannot be deduced from the context, do not give an answer.
460
  """
461
  stepBackAnswer = self.queryRemoteLLM(sysPrompt, stepBackQuery, True)
462
- print("DBG stepBackAnswer:" + stepBackAnswer)
463
  context += "Übergreifende Frage:" + stepBackQuery + "Übergreifender Context:" + stepBackAnswer
464
 
465
  #def queryRemoteLLM(self, systemPrompt, query, summary):
@@ -539,9 +539,9 @@ class BSIChatbot:
539
  retrieved_chunks_text.append(
540
  f"The Document is: '{chunk.metadata['source']}'\nImage Description is: ':{chunk.page_content}")
541
  i=1
542
- for chunk in retrieved_chunks_text:
543
- print(f"Retrieved Chunk number {i}:\n{chunk}")
544
- i=i+1
545
 
546
  if rerankingStep==True:
547
  if rerankingModel == None:
@@ -553,9 +553,9 @@ class BSIChatbot:
553
  retrieved_chunks_text=[chunk["content"] for chunk in retrieved_chunks_text]
554
 
555
  i = 1
556
- for chunk in retrieved_chunks_text:
557
- print(f"Reranked Chunk number {i}:\n{chunk}")
558
- i = i + 1
559
 
560
  context = "\nExtracted documents:\n"
561
  context += "".join([doc for i, doc in enumerate(retrieved_chunks_text)])
@@ -638,7 +638,7 @@ class BSIChatbot:
638
  f"The Document is: '{chunk.metadata['source']}'\nImage Description is: ':{chunk.page_content}")
639
  i=1
640
  for chunk in retrieved_chunks_text:
641
- print(f"Retrieved Chunk number {i}:\n{chunk}")
642
  i=i+1
643
 
644
  if rerankingStep==True:
@@ -649,9 +649,9 @@ class BSIChatbot:
649
  rerankingModel
650
  retrieved_chunks_text=rerankingModel.rerank(query, retrieved_chunks_text,k=15)
651
  #REVERSE Rerank results!
652
- print("DBG: Rankorder:")
653
- for chunk in reversed(retrieved_chunks_text):
654
- print(chunk.rank)
655
  retrieved_chunks_text=[chunk["content"] for chunk in reversed(retrieved_chunks_text)]
656
 
657
  i = 1
@@ -704,7 +704,8 @@ class BSIChatbot:
704
  for data in data_list:
705
  data["Context"] = None
706
  data["Answer"] = None
707
- print(data)
 
708
 
709
  i=1
710
  #for data in data_list[:3]:
 
448
 
449
  if stepBackPrompt == True:
450
  stepBackQuery = self.stepBackPrompt(query)
451
+ #newprint("DBG stepBackQuery:" + stepBackQuery)
452
  stepBackQuery, stepBackContext = self.retrieval(stepBackQuery, rerankingStep, True)
453
+ #newprint("DBG stepBackContext:" + stepBackContext)
454
  sysPrompt = """
455
  You are an helpful Chatbot for the BSI IT-Grundschutz. Using the information contained in the context,
456
  give a comprehensive answer to the question.
 
459
  If the answer cannot be deduced from the context, do not give an answer.
460
  """
461
  stepBackAnswer = self.queryRemoteLLM(sysPrompt, stepBackQuery, True)
462
+ #newprint("DBG stepBackAnswer:" + stepBackAnswer)
463
  context += "Übergreifende Frage:" + stepBackQuery + "Übergreifender Context:" + stepBackAnswer
464
 
465
  #def queryRemoteLLM(self, systemPrompt, query, summary):
 
539
  retrieved_chunks_text.append(
540
  f"The Document is: '{chunk.metadata['source']}'\nImage Description is: ':{chunk.page_content}")
541
  i=1
542
+ #newfor chunk in retrieved_chunks_text:
543
+ #newprint(f"Retrieved Chunk number {i}:\n{chunk}")
544
+ #newi=i+1
545
 
546
  if rerankingStep==True:
547
  if rerankingModel == None:
 
553
  retrieved_chunks_text=[chunk["content"] for chunk in retrieved_chunks_text]
554
 
555
  i = 1
556
+ #newfor chunk in retrieved_chunks_text:
557
+ #newprint(f"Reranked Chunk number {i}:\n{chunk}")
558
+ #newi = i + 1
559
 
560
  context = "\nExtracted documents:\n"
561
  context += "".join([doc for i, doc in enumerate(retrieved_chunks_text)])
 
638
  f"The Document is: '{chunk.metadata['source']}'\nImage Description is: ':{chunk.page_content}")
639
  i=1
640
  for chunk in retrieved_chunks_text:
641
+ #newprint(f"Retrieved Chunk number {i}:\n{chunk}")
642
  i=i+1
643
 
644
  if rerankingStep==True:
 
649
  rerankingModel
650
  retrieved_chunks_text=rerankingModel.rerank(query, retrieved_chunks_text,k=15)
651
  #REVERSE Rerank results!
652
+ #newprint("DBG: Rankorder:")
653
+ #newfor chunk in reversed(retrieved_chunks_text):
654
+ #newprint(chunk.rank)
655
  retrieved_chunks_text=[chunk["content"] for chunk in reversed(retrieved_chunks_text)]
656
 
657
  i = 1
 
704
  for data in data_list:
705
  data["Context"] = None
706
  data["Answer"] = None
707
+
708
+ print(data_list)
709
 
710
  i=1
711
  #for data in data_list[:3]: