Spaces:
Sleeping
Sleeping
Upload 4 files
Browse files- retriever/chat_manager.py +100 -43
- retriever/document_manager.py +10 -3
- retriever/llm_manager.py +195 -2
- retriever/vector_store_manager.py +1 -1
retriever/chat_manager.py
CHANGED
|
@@ -1,57 +1,114 @@
|
|
| 1 |
from datetime import datetime
|
| 2 |
import logging
|
| 3 |
from typing import List
|
| 4 |
-
from globals import app_config
|
| 5 |
|
| 6 |
-
def chat_response(query: str, selected_docs: List[str], history: List[dict]) -> List[dict]:
|
| 7 |
-
"""
|
| 8 |
-
Generate a chat response based on the user's query and selected documents.
|
| 9 |
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
|
|
|
|
|
|
|
|
|
| 14 |
|
| 15 |
-
|
| 16 |
-
List[dict]: Updated chat history with the new response in 'messages' format.
|
| 17 |
-
"""
|
| 18 |
-
timestamp = datetime.now().strftime("%H:%M:%S")
|
| 19 |
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
|
|
|
| 27 |
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
return history + [
|
| 42 |
{"role": "user", "content": f"{query}"},
|
| 43 |
-
{"role": "assistant", "content":
|
| 44 |
]
|
| 45 |
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
from datetime import datetime
|
| 2 |
import logging
|
| 3 |
from typing import List
|
|
|
|
| 4 |
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
+
class ChatManager:
|
| 7 |
+
def __init__(self, documentManager, llmManager):
|
| 8 |
+
"""
|
| 9 |
+
Initialize the ChatManager.
|
| 10 |
+
"""
|
| 11 |
+
self.doc_manager = documentManager
|
| 12 |
+
self.llm_manager = llmManager
|
| 13 |
|
| 14 |
+
logging.info("ChatManager initialized")
|
|
|
|
|
|
|
|
|
|
| 15 |
|
| 16 |
+
def generate_chat_response(self, query: str, selected_docs: List[str], history: List[dict]) -> List[dict]:
|
| 17 |
+
"""
|
| 18 |
+
Generate a chat response based on the user's query and selected documents.
|
| 19 |
|
| 20 |
+
Args:
|
| 21 |
+
query (str): The user's query.
|
| 22 |
+
selected_docs (List[str]): List of selected document filenames from the dropdown.
|
| 23 |
+
history (List[dict]): The chat history as a list of {'role': str, 'content': str} dictionaries.
|
| 24 |
|
| 25 |
+
Returns:
|
| 26 |
+
List[dict]: Updated chat history with the new response in 'messages' format.
|
| 27 |
+
"""
|
| 28 |
+
timestamp = datetime.now().strftime("%H:%M:%S")
|
| 29 |
+
logging.info(f"Generating chat response for query: {query} at {timestamp}")
|
| 30 |
+
|
| 31 |
+
# Handle empty query
|
| 32 |
+
if not query:
|
| 33 |
+
logging.warning("Empty query received")
|
| 34 |
+
return history + [{"role": "assistant", "content": "Please enter a query."}]
|
| 35 |
+
|
| 36 |
+
# Handle no selected documents
|
| 37 |
+
if not selected_docs:
|
| 38 |
+
logging.warning("No documents selected")
|
| 39 |
+
return history + [{"role": "assistant", "content": "Please select at least one document."}]
|
| 40 |
+
|
| 41 |
+
# Retrieve the top 5 chunks based on the query and selected documents
|
| 42 |
+
try:
|
| 43 |
+
top_k_results = self.doc_manager.retrieve_top_k(query, selected_docs, k=5)
|
| 44 |
+
except Exception as e:
|
| 45 |
+
logging.error(f"Error retrieving chunks: {str(e)}")
|
| 46 |
+
return history + [
|
| 47 |
+
{"role": "user", "content": f"{query}"},
|
| 48 |
+
{"role": "assistant", "content": f"Error retrieving chunks: {str(e)}"}
|
| 49 |
+
]
|
| 50 |
+
|
| 51 |
+
if not top_k_results:
|
| 52 |
+
logging.info("No relevant chunks found")
|
| 53 |
+
return history + [
|
| 54 |
+
{"role": "user", "content": f"{query}"},
|
| 55 |
+
{"role": "assistant", "content": "No relevant information found in the selected documents."}
|
| 56 |
+
]
|
| 57 |
+
|
| 58 |
+
# Send the top K results to the LLM to generate a response
|
| 59 |
+
try:
|
| 60 |
+
llm_response, source_docs = self.llm_manager.generate_response(query, top_k_results)
|
| 61 |
+
except Exception as e:
|
| 62 |
+
logging.error(f"Error generating LLM response: {str(e)}")
|
| 63 |
+
return history + [
|
| 64 |
+
{"role": "user", "content": f"{query}"},
|
| 65 |
+
{"role": "assistant", "content": f"Error generating response: {str(e)}"}
|
| 66 |
+
]
|
| 67 |
|
| 68 |
+
# Format the response
|
| 69 |
+
response = llm_response
|
| 70 |
+
# Uncomment to include source docs in response (optional)
|
| 71 |
+
# for i, doc in enumerate(source_docs, 1):
|
| 72 |
+
# doc_id = doc.metadata.get('doc_id', 'Unknown')
|
| 73 |
+
# filename = next((name for name, d_id in self.doc_manager.document_ids.items() if d_id == doc_id), 'Unknown')
|
| 74 |
+
# response += f"\n{i}. {filename}: {doc.page_content[:100]}..."
|
| 75 |
+
|
| 76 |
+
logging.info("Chat response generated successfully")
|
| 77 |
+
# Return updated history with new user query and LLM response
|
| 78 |
return history + [
|
| 79 |
{"role": "user", "content": f"{query}"},
|
| 80 |
+
{"role": "assistant", "content": response}
|
| 81 |
]
|
| 82 |
|
| 83 |
+
def generate_summary(self, chunks: any, summary_type: str = "medium") -> str:
|
| 84 |
+
"""
|
| 85 |
+
Generate a summary of the selected documents.
|
| 86 |
+
|
| 87 |
+
Args:
|
| 88 |
+
selected_docs (List[str]): List of selected document filenames.
|
| 89 |
+
summary_type (str): Type of summary ("small", "medium", "detailed").
|
| 90 |
+
k (int): Number of chunks to retrieve from DocumentManager.
|
| 91 |
+
include_toc (bool): Whether to include the table of contents (if available).
|
| 92 |
+
|
| 93 |
+
Returns:
|
| 94 |
+
str: Generated summary.
|
| 95 |
+
|
| 96 |
+
Raises:
|
| 97 |
+
ValueError: If summary_type is invalid or DocumentManager/LLM is not available.
|
| 98 |
+
"""
|
| 99 |
+
if summary_type not in ["small", "medium", "detailed"]:
|
| 100 |
+
raise ValueError("summary_type must be 'small', 'medium', or 'detailed'")
|
| 101 |
+
|
| 102 |
+
if not chunks:
|
| 103 |
+
logging.warning("No documents selected for summarization")
|
| 104 |
+
return "Please select at least one document."
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
llm_summary_response = self.llm_manager.generate_summary_v0(chunks = chunks)
|
| 108 |
+
#logging.info(f" Summary response {llm_summary_response}")
|
| 109 |
+
|
| 110 |
+
return llm_summary_response
|
| 111 |
+
|
| 112 |
+
def generate_sample_questions(self, chunks: any):
|
| 113 |
+
questions = self.llm_manager.generate_questions(chunks = chunks)
|
| 114 |
+
return questions
|
retriever/document_manager.py
CHANGED
|
@@ -24,7 +24,7 @@ class DocumentManager:
|
|
| 24 |
"""
|
| 25 |
try:
|
| 26 |
if file is None:
|
| 27 |
-
return "No file uploaded",
|
| 28 |
|
| 29 |
logging.info(f"Processing file: {file}")
|
| 30 |
|
|
@@ -51,7 +51,6 @@ class DocumentManager:
|
|
| 51 |
|
| 52 |
return (
|
| 53 |
f"Successfully loaded {filename} with {len(page_list)} pages",
|
| 54 |
-
page_list,
|
| 55 |
filename,
|
| 56 |
doc_id
|
| 57 |
)
|
|
@@ -104,11 +103,19 @@ class DocumentManager:
|
|
| 104 |
top_k_results = all_results[:k]
|
| 105 |
|
| 106 |
# Log the list of retrieved documents
|
| 107 |
-
logging.info(f"Result from search :{all_results} ")
|
| 108 |
logging.info(f"Retrieved top {k} documents:")
|
| 109 |
for i, result in enumerate(top_k_results, 1):
|
| 110 |
doc_id = result['metadata'].get('doc_id', 'Unknown')
|
| 111 |
filename = next((name for name, d_id in self.document_ids.items() if d_id == doc_id), 'Unknown')
|
| 112 |
logging.info(f"{i}. Filename: {filename}, Doc ID: {doc_id}, Score: {result['score']:.4f}, Text: {result['text'][:200]}...")
|
| 113 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 114 |
return top_k_results
|
|
|
|
| 24 |
"""
|
| 25 |
try:
|
| 26 |
if file is None:
|
| 27 |
+
return "No file uploaded", None, None
|
| 28 |
|
| 29 |
logging.info(f"Processing file: {file}")
|
| 30 |
|
|
|
|
| 51 |
|
| 52 |
return (
|
| 53 |
f"Successfully loaded {filename} with {len(page_list)} pages",
|
|
|
|
| 54 |
filename,
|
| 55 |
doc_id
|
| 56 |
)
|
|
|
|
| 103 |
top_k_results = all_results[:k]
|
| 104 |
|
| 105 |
# Log the list of retrieved documents
|
| 106 |
+
#logging.info(f"Result from search :{all_results} ")
|
| 107 |
logging.info(f"Retrieved top {k} documents:")
|
| 108 |
for i, result in enumerate(top_k_results, 1):
|
| 109 |
doc_id = result['metadata'].get('doc_id', 'Unknown')
|
| 110 |
filename = next((name for name, d_id in self.document_ids.items() if d_id == doc_id), 'Unknown')
|
| 111 |
logging.info(f"{i}. Filename: {filename}, Doc ID: {doc_id}, Score: {result['score']:.4f}, Text: {result['text'][:200]}...")
|
| 112 |
|
| 113 |
+
return top_k_results
|
| 114 |
+
|
| 115 |
+
def retrieve_summary_chunks(self, query: str, doc_id : str, k: int = 10):
|
| 116 |
+
logging.info(f"Retrieving {k} chunks for summary: {query}, Document Id: {doc_id}")
|
| 117 |
+
results = self.vector_manager.search(query, doc_id, k=k)
|
| 118 |
+
top_k_results = results[:k]
|
| 119 |
+
logging.info(f"Retrieved {len(top_k_results)} chunks for summary")
|
| 120 |
+
|
| 121 |
return top_k_results
|
retriever/llm_manager.py
CHANGED
|
@@ -5,6 +5,8 @@ from langchain_groq import ChatGroq
|
|
| 5 |
from langchain.chains import RetrievalQA
|
| 6 |
from langchain_core.documents import Document
|
| 7 |
from langchain_core.retrievers import BaseRetriever
|
|
|
|
|
|
|
| 8 |
|
| 9 |
class LLMManager:
|
| 10 |
DEFAULT_MODEL = "gemma2-9b-it" # Set the default model name
|
|
@@ -30,7 +32,7 @@ class LLMManager:
|
|
| 30 |
Raises:
|
| 31 |
ValueError: If GROQ_API_KEY is not set.
|
| 32 |
"""
|
| 33 |
-
api_key =
|
| 34 |
if not api_key:
|
| 35 |
raise ValueError("GROQ_API_KEY is not set. Please add it in your environment variables.")
|
| 36 |
|
|
@@ -113,4 +115,195 @@ class LLMManager:
|
|
| 113 |
return response, source_docs
|
| 114 |
except Exception as e:
|
| 115 |
logging.error(f"Error during QA chain invocation: {str(e)}")
|
| 116 |
-
raise e
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
from langchain.chains import RetrievalQA
|
| 6 |
from langchain_core.documents import Document
|
| 7 |
from langchain_core.retrievers import BaseRetriever
|
| 8 |
+
from langchain.chains.summarize import load_summarize_chain
|
| 9 |
+
from langchain.prompts import PromptTemplate
|
| 10 |
|
| 11 |
class LLMManager:
|
| 12 |
DEFAULT_MODEL = "gemma2-9b-it" # Set the default model name
|
|
|
|
| 32 |
Raises:
|
| 33 |
ValueError: If GROQ_API_KEY is not set.
|
| 34 |
"""
|
| 35 |
+
api_key = os.getenv("GROQ_API_KEY")
|
| 36 |
if not api_key:
|
| 37 |
raise ValueError("GROQ_API_KEY is not set. Please add it in your environment variables.")
|
| 38 |
|
|
|
|
| 115 |
return response, source_docs
|
| 116 |
except Exception as e:
|
| 117 |
logging.error(f"Error during QA chain invocation: {str(e)}")
|
| 118 |
+
raise e
|
| 119 |
+
|
| 120 |
+
def generate_summary_v0(self, chunks: any):
|
| 121 |
+
logging.info("Generating summary ...")
|
| 122 |
+
|
| 123 |
+
# Limit the number of chunks (for example, top 30 chunks)
|
| 124 |
+
limited_chunks = chunks[:30]
|
| 125 |
+
|
| 126 |
+
# Combine text from the selected chunks
|
| 127 |
+
full_text = "\n".join(chunk['text'] for chunk in limited_chunks)
|
| 128 |
+
text_length = len(full_text)
|
| 129 |
+
logging.info(f"Total text length (characters): {text_length}")
|
| 130 |
+
|
| 131 |
+
# Define a maximum character limit to fit in a 1024-token context.
|
| 132 |
+
# For many models, roughly 3200 characters is a safe limit.
|
| 133 |
+
MAX_CHAR_LIMIT = 3200
|
| 134 |
+
if text_length > MAX_CHAR_LIMIT:
|
| 135 |
+
logging.warning(f"Input text too long ({text_length} chars), truncating to {MAX_CHAR_LIMIT} chars.")
|
| 136 |
+
full_text = full_text[:MAX_CHAR_LIMIT]
|
| 137 |
+
|
| 138 |
+
# Define a custom prompt to instruct concise summarization in bullet points.
|
| 139 |
+
custom_prompt_template = """
|
| 140 |
+
You are an expert summarizer. Summarize the following text into a concise summary using bullet points.
|
| 141 |
+
Ensure that the final summary is no longer than 20-30 bullet points and fits within 15-20 lines.
|
| 142 |
+
Focus only on the most critical points.
|
| 143 |
+
|
| 144 |
+
Text to summarize:
|
| 145 |
+
{text}
|
| 146 |
+
|
| 147 |
+
Summary:
|
| 148 |
+
"""
|
| 149 |
+
prompt = PromptTemplate(input_variables=["text"], template=custom_prompt_template)
|
| 150 |
+
|
| 151 |
+
# Use the 'stuff' chain type to send a single LLM request with our custom prompt.
|
| 152 |
+
chain = load_summarize_chain(self.generation_llm, chain_type="stuff", prompt=prompt)
|
| 153 |
+
|
| 154 |
+
# Wrap the full text in a single Document object (chain expects a list of Documents)
|
| 155 |
+
docs = [Document(page_content=full_text)]
|
| 156 |
+
|
| 157 |
+
# Generate the summary
|
| 158 |
+
summary = chain.invoke(docs)
|
| 159 |
+
return summary['output_text']
|
| 160 |
+
|
| 161 |
+
def generate_questions(self, chunks: any):
|
| 162 |
+
logging.info("Generating sample questions ...")
|
| 163 |
+
|
| 164 |
+
# Use the top 30 chunks or fewer
|
| 165 |
+
limited_chunks = chunks[:30]
|
| 166 |
+
|
| 167 |
+
# Combine text from chunks
|
| 168 |
+
full_text = "\n".join(chunk['text'] for chunk in limited_chunks)
|
| 169 |
+
text_length = len(full_text)
|
| 170 |
+
logging.info(f"Total text length for questions: {text_length}")
|
| 171 |
+
|
| 172 |
+
MAX_CHAR_LIMIT = 3200
|
| 173 |
+
if text_length > MAX_CHAR_LIMIT:
|
| 174 |
+
logging.warning(f"Input text too long ({text_length} chars), truncating to {MAX_CHAR_LIMIT} chars.")
|
| 175 |
+
full_text = full_text[:MAX_CHAR_LIMIT]
|
| 176 |
+
|
| 177 |
+
# Prompt template for generating questions
|
| 178 |
+
question_prompt_template = """
|
| 179 |
+
You are an AI expert at creating questions from documents.
|
| 180 |
+
|
| 181 |
+
Based on the text below, generate not less than 20 insightful and highly relevant sample questions that a user might ask to better understand the content.
|
| 182 |
+
|
| 183 |
+
**Instructions:**
|
| 184 |
+
- Questions must be specific to the document's content and context.
|
| 185 |
+
- Avoid generic questions like 'What is this document about?'
|
| 186 |
+
- Do not include numbers, prefixes (e.g., '1.', '2.'), or explanations (e.g., '(Clarifies...)').
|
| 187 |
+
- Each question should be a single, clear sentence ending with a question mark.
|
| 188 |
+
- Focus on key concepts, processes, components, or use cases mentioned in the text.
|
| 189 |
+
|
| 190 |
+
Text:
|
| 191 |
+
{text}
|
| 192 |
+
|
| 193 |
+
Output format:
|
| 194 |
+
What is the purpose of the Communication Server in Collateral Management?
|
| 195 |
+
How does the system handle data encryption for secure communication?
|
| 196 |
+
...
|
| 197 |
+
"""
|
| 198 |
+
prompt = PromptTemplate(input_variables=["text"], template=question_prompt_template)
|
| 199 |
+
|
| 200 |
+
chain = load_summarize_chain(self.generation_llm, chain_type="stuff", prompt=prompt)
|
| 201 |
+
docs = [Document(page_content=full_text)]
|
| 202 |
+
|
| 203 |
+
try:
|
| 204 |
+
result = chain.invoke(docs)
|
| 205 |
+
question_output = result.get("output_text", "").strip()
|
| 206 |
+
|
| 207 |
+
# Clean and parse the output into a list of questions
|
| 208 |
+
questions = []
|
| 209 |
+
for line in question_output.split("\n"):
|
| 210 |
+
# Remove any leading/trailing whitespace, numbers, or bullet points
|
| 211 |
+
cleaned_line = line.strip().strip("-*1234567890. ").rstrip(".")
|
| 212 |
+
# Remove any explanation in parentheses
|
| 213 |
+
cleaned_line = cleaned_line.split("(")[0].strip()
|
| 214 |
+
# Ensure the line is a valid question (ends with '?' and is not empty)
|
| 215 |
+
if cleaned_line and cleaned_line.endswith("?"):
|
| 216 |
+
questions.append(cleaned_line)
|
| 217 |
+
|
| 218 |
+
# Limit to 10 questions
|
| 219 |
+
questions = questions[:10]
|
| 220 |
+
logging.info(f"Generated questions: {questions}")
|
| 221 |
+
return questions
|
| 222 |
+
except Exception as e:
|
| 223 |
+
logging.error(f"Error generating questions: {e}")
|
| 224 |
+
return []
|
| 225 |
+
|
| 226 |
+
def generate_summary(self, chunks: Any, toc_text: Any, summary_type: str = "medium") -> str:
|
| 227 |
+
"""
|
| 228 |
+
Generate a summary of the document using LangChain's summarization chains.
|
| 229 |
+
|
| 230 |
+
Args:
|
| 231 |
+
vector_store_manager: Instance of VectorStoreManager with a FAISS vector store.
|
| 232 |
+
summary_type (str): Type of summary ("small", "medium", "detailed").
|
| 233 |
+
k (int): Number of chunks to retrieve from the vector store.
|
| 234 |
+
include_toc (bool): Whether to include the table of contents (if available).
|
| 235 |
+
|
| 236 |
+
Returns:
|
| 237 |
+
str: Generated summary.
|
| 238 |
+
|
| 239 |
+
Raises:
|
| 240 |
+
ValueError: If summary_type is invalid or vector store is not initialized.
|
| 241 |
+
"""
|
| 242 |
+
|
| 243 |
+
# Define chunk retrieval parameters based on summary type
|
| 244 |
+
if summary_type == "small":
|
| 245 |
+
k = min(k, 3) # Fewer chunks for small summary
|
| 246 |
+
chain_type = "stuff" # Use stuff for small summaries
|
| 247 |
+
word_count = "50-100"
|
| 248 |
+
elif summary_type == "medium":
|
| 249 |
+
k = min(k, 10)
|
| 250 |
+
chain_type = "map_reduce" # Use map-reduce for medium summaries
|
| 251 |
+
word_count = "200-400"
|
| 252 |
+
else: # detailed
|
| 253 |
+
k = min(k, 20)
|
| 254 |
+
chain_type = "map_reduce" # Use map-reduce for detailed summaries
|
| 255 |
+
word_count = "500-1000"
|
| 256 |
+
|
| 257 |
+
# Define prompts
|
| 258 |
+
if chain_type == "stuff":
|
| 259 |
+
prompt = PromptTemplate(
|
| 260 |
+
input_variables=["text"],
|
| 261 |
+
template=(
|
| 262 |
+
"Generate a {summary_type} summary ({word_count} words) of the following document excerpts. "
|
| 263 |
+
"Focus on key points and ensure clarity. Stick strictly to the provided text:\n\n"
|
| 264 |
+
"{toc_prompt}{text}"
|
| 265 |
+
).format(
|
| 266 |
+
summary_type=summary_type,
|
| 267 |
+
word_count=word_count,
|
| 268 |
+
toc_prompt="Table of Contents:\n{toc_text}\n\n" if toc_text else ""
|
| 269 |
+
)
|
| 270 |
+
)
|
| 271 |
+
chain = load_summarize_chain(
|
| 272 |
+
llm=self.generation_llm,
|
| 273 |
+
chain_type="stuff",
|
| 274 |
+
prompt=prompt
|
| 275 |
+
)
|
| 276 |
+
else: # map_reduce
|
| 277 |
+
map_prompt = PromptTemplate(
|
| 278 |
+
input_variables=["text"],
|
| 279 |
+
template=(
|
| 280 |
+
"Summarize the following document excerpt in 1-2 sentences, focusing on key points. "
|
| 281 |
+
"Consider the document's structure from this table of contents:\n\n"
|
| 282 |
+
"Table of Contents:\n{toc_text}\n\nExcerpt:\n{text}"
|
| 283 |
+
).format(toc_text=toc_text if toc_text else "Not provided")
|
| 284 |
+
)
|
| 285 |
+
combine_prompt = PromptTemplate(
|
| 286 |
+
input_variables=["text"],
|
| 287 |
+
template=(
|
| 288 |
+
"Combine the following summaries into a cohesive {summary_type} summary "
|
| 289 |
+
"({word_count} words) of the document. Ensure clarity, avoid redundancy, and "
|
| 290 |
+
"organize by key themes or sections if applicable:\n\n{text}"
|
| 291 |
+
).format(summary_type=summary_type, word_count=word_count)
|
| 292 |
+
)
|
| 293 |
+
chain = load_summarize_chain(
|
| 294 |
+
llm=self.generation_llm,
|
| 295 |
+
chain_type="map_reduce",
|
| 296 |
+
map_prompt=map_prompt,
|
| 297 |
+
combine_prompt=combine_prompt,
|
| 298 |
+
return_intermediate_steps=False
|
| 299 |
+
)
|
| 300 |
+
|
| 301 |
+
# Run the chain
|
| 302 |
+
try:
|
| 303 |
+
logging.info(f"Generating {summary_type} summary with {len(chunks)} chunks")
|
| 304 |
+
summary = chain.run(chunks)
|
| 305 |
+
logging.info(f"{summary_type.capitalize()} summary generated successfully")
|
| 306 |
+
return summary
|
| 307 |
+
except Exception as e:
|
| 308 |
+
logging.error(f"Error generating summary: {str(e)}")
|
| 309 |
+
return f"Error generating summary: {str(e)}"
|
retriever/vector_store_manager.py
CHANGED
|
@@ -63,7 +63,7 @@ class VectorStoreManager:
|
|
| 63 |
self.vector_store.save_local(self.embedding_path)
|
| 64 |
logging.info(f"Vector store updated and saved to {self.embedding_path}")
|
| 65 |
|
| 66 |
-
def search(self, query, doc_id, k=
|
| 67 |
"""
|
| 68 |
Search the vector store for relevant chunks, filtered by doc_id.
|
| 69 |
|
|
|
|
| 63 |
self.vector_store.save_local(self.embedding_path)
|
| 64 |
logging.info(f"Vector store updated and saved to {self.embedding_path}")
|
| 65 |
|
| 66 |
+
def search(self, query, doc_id, k=10):
|
| 67 |
"""
|
| 68 |
Search the vector store for relevant chunks, filtered by doc_id.
|
| 69 |
|