Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
|
@@ -144,96 +144,23 @@ st.write("π **Vector Store Created:**", st.session_state.vector_created)
|
|
| 144 |
|
| 145 |
|
| 146 |
# ----------------- Query Input -----------------
|
| 147 |
-
query = st.text_input("π Ask a question about the document:")
|
| 148 |
-
|
| 149 |
if query:
|
| 150 |
with st.spinner("π Retrieving relevant context..."):
|
| 151 |
retriever = st.session_state.vector_store.as_retriever(search_type="similarity", search_kwargs={"k": 5})
|
| 152 |
retrieved_docs = retriever.invoke(query)
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
st.write("πΉ Retrieved Documents:", retrieved_docs)
|
| 156 |
-
|
| 157 |
-
if not retrieved_docs:
|
| 158 |
-
st.error("β No relevant documents retrieved! Try a different query.")
|
| 159 |
-
else:
|
| 160 |
-
# Ensure extracted content is formatted correctly
|
| 161 |
-
context = [d.page_content for d in retrieved_docs]
|
| 162 |
-
|
| 163 |
-
if isinstance(context, list): # Convert list to string for LLMChain
|
| 164 |
-
context_str = "\n".join(context)
|
| 165 |
-
else:
|
| 166 |
-
context_str = str(context)
|
| 167 |
-
|
| 168 |
-
st.success("β
Context retrieved successfully!")
|
| 169 |
-
st.write("πΉ Extracted Context:", context_str)
|
| 170 |
|
| 171 |
# ----------------- Run Individual Chains Explicitly -----------------
|
|
|
|
|
|
|
|
|
|
|
|
|
| 172 |
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
input_variables=["retriever_query", "context"],
|
| 178 |
-
template=relevancy_prompt
|
| 179 |
-
),
|
| 180 |
-
output_key="relevancy_response"
|
| 181 |
-
)
|
| 182 |
-
|
| 183 |
-
relevant_context_chain = LLMChain(
|
| 184 |
-
llm=llm_judge,
|
| 185 |
-
prompt=PromptTemplate(
|
| 186 |
-
input_variables=["relevancy_response"],
|
| 187 |
-
template=relevant_context_picker_prompt
|
| 188 |
-
),
|
| 189 |
-
output_key="context_number"
|
| 190 |
-
)
|
| 191 |
-
|
| 192 |
-
relevant_contexts_chain = LLMChain(
|
| 193 |
-
llm=llm_judge,
|
| 194 |
-
prompt=PromptTemplate(
|
| 195 |
-
input_variables=["context_number", "context"],
|
| 196 |
-
template=response_synth
|
| 197 |
-
),
|
| 198 |
-
output_key="relevant_contexts"
|
| 199 |
-
)
|
| 200 |
-
|
| 201 |
-
response_chain = LLMChain(
|
| 202 |
-
llm=rag_llm,
|
| 203 |
-
prompt=PromptTemplate(
|
| 204 |
-
input_variables=["query", "context"],
|
| 205 |
-
template=rag_prompt
|
| 206 |
-
),
|
| 207 |
-
output_key="final_response"
|
| 208 |
-
)
|
| 209 |
-
|
| 210 |
-
# ----------------- Fix: Ensuring All Keys Exist -----------------
|
| 211 |
-
|
| 212 |
-
response_crisis = context_relevancy_chain.invoke({
|
| 213 |
-
"context": context_str,
|
| 214 |
-
"retriever_query": query
|
| 215 |
-
})
|
| 216 |
-
|
| 217 |
-
# Debugging: Show intermediate response
|
| 218 |
-
st.write("π Context Relevancy Response:", response_crisis["relevancy_response"])
|
| 219 |
-
|
| 220 |
-
relevant_response = relevant_context_chain.invoke({
|
| 221 |
-
"relevancy_response": response_crisis["relevancy_response"]
|
| 222 |
-
})
|
| 223 |
-
|
| 224 |
-
st.write("π Picked Relevant Contexts:", relevant_response["context_number"])
|
| 225 |
-
|
| 226 |
-
contexts = relevant_contexts_chain.invoke({
|
| 227 |
-
"context_number": relevant_response["context_number"],
|
| 228 |
-
"context": context_str # Ensure correct format
|
| 229 |
-
})
|
| 230 |
-
|
| 231 |
-
st.write("π Extracted Relevant Contexts:", contexts["relevant_contexts"])
|
| 232 |
-
|
| 233 |
-
final_response = response_chain.invoke({
|
| 234 |
-
"query": query,
|
| 235 |
-
"context": contexts["relevant_contexts"]
|
| 236 |
-
})
|
| 237 |
|
| 238 |
# ----------------- Display All Outputs -----------------
|
| 239 |
st.markdown("### Context Relevancy Evaluation")
|
|
@@ -246,4 +173,13 @@ if query:
|
|
| 246 |
st.json(contexts["relevant_contexts"])
|
| 247 |
|
| 248 |
st.subheader("context_relevancy_evaluation_chain Statement")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 249 |
st.json(final_response["final_response"])
|
|
|
|
| 144 |
|
| 145 |
|
| 146 |
# ----------------- Query Input -----------------
|
|
|
|
|
|
|
| 147 |
if query:
|
| 148 |
with st.spinner("π Retrieving relevant context..."):
|
| 149 |
retriever = st.session_state.vector_store.as_retriever(search_type="similarity", search_kwargs={"k": 5})
|
| 150 |
retrieved_docs = retriever.invoke(query)
|
| 151 |
+
context = [d.page_content for d in retrieved_docs]
|
| 152 |
+
st.success("β
Context retrieved successfully!")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 153 |
|
| 154 |
# ----------------- Run Individual Chains Explicitly -----------------
|
| 155 |
+
context_relevancy_chain = LLMChain(llm=llm_judge, prompt=PromptTemplate(input_variables=["retriever_query", "context"], template=relevancy_prompt), output_key="relevancy_response")
|
| 156 |
+
relevant_context_chain = LLMChain(llm=llm_judge, prompt=PromptTemplate(input_variables=["relevancy_response"], template=relevant_context_picker_prompt), output_key="context_number")
|
| 157 |
+
relevant_contexts_chain = LLMChain(llm=llm_judge, prompt=PromptTemplate(input_variables=["context_number", "context"], template=response_synth), output_key="relevant_contexts")
|
| 158 |
+
response_chain = LLMChain(llm=rag_llm, prompt=PromptTemplate(input_variables=["query", "context"], template=rag_prompt), output_key="final_response")
|
| 159 |
|
| 160 |
+
response_crisis = context_relevancy_chain.invoke({"context": context, "retriever_query": query})
|
| 161 |
+
relevant_response = relevant_context_chain.invoke({"relevancy_response": response_crisis["relevancy_response"]})
|
| 162 |
+
contexts = relevant_contexts_chain.invoke({"context_number": relevant_response["context_number"], "context": context})
|
| 163 |
+
final_response = response_chain.invoke({"query": query, "context": contexts["relevant_contexts"]})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 164 |
|
| 165 |
# ----------------- Display All Outputs -----------------
|
| 166 |
st.markdown("### Context Relevancy Evaluation")
|
|
|
|
| 173 |
st.json(contexts["relevant_contexts"])
|
| 174 |
|
| 175 |
st.subheader("context_relevancy_evaluation_chain Statement")
|
| 176 |
+
st.json(final_response["relevancy_response"])
|
| 177 |
+
|
| 178 |
+
st.subheader("pick_relevant_context_chain Statement")
|
| 179 |
+
st.json(final_response["context_number"])
|
| 180 |
+
|
| 181 |
+
st.subheader("relevant_contexts_chain Statement")
|
| 182 |
+
st.json(final_response["relevant_contexts"])
|
| 183 |
+
|
| 184 |
+
st.subheader("RAG Response Statement")
|
| 185 |
st.json(final_response["final_response"])
|