Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
|
@@ -56,7 +56,7 @@ if pdf_source == "Upload a PDF file":
|
|
| 56 |
elif pdf_source == "Enter a PDF URL":
|
| 57 |
pdf_url = st.text_input("Enter PDF URL:", value="https://arxiv.org/pdf/2406.06998", key="pdf_url")
|
| 58 |
|
| 59 |
-
# Button to manually trigger the download
|
| 60 |
if st.button("Download and Process PDF") or pdf_url.strip() != "" and st.session_state.get("pdf_path") is None:
|
| 61 |
with st.spinner("Downloading PDF..."):
|
| 62 |
try:
|
|
@@ -198,14 +198,10 @@ if query:
|
|
| 198 |
|
| 199 |
QUERY:
|
| 200 |
{retriever_query}
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
[
|
| 206 |
-
{"content": 1, "score": 0, "reasoning": "The content is relevant."},
|
| 207 |
-
{"content": 2, "score": 1, "reasoning": "The content is irrelevant."}
|
| 208 |
-
]
|
| 209 |
"""
|
| 210 |
|
| 211 |
context_relevancy_checker_prompt = PromptTemplate(input_variables=["retriever_query","context"],template=relevancy_prompt)
|
|
@@ -265,28 +261,6 @@ if query:
|
|
| 265 |
context_relevancy_evaluation_chain = LLMChain(llm=llm_judge, prompt=context_relevancy_checker_prompt, output_key="relevancy_response")
|
| 266 |
|
| 267 |
response_crisis = context_relevancy_evaluation_chain.invoke({"context":context,"retriever_query":query})
|
| 268 |
-
|
| 269 |
-
# Debug Raw LLM Output
|
| 270 |
-
st.write("π οΈ Debugging: Raw LLM Response for Relevancy:", response_crisis['relevancy_response'])
|
| 271 |
-
|
| 272 |
-
# Extract raw JSON response
|
| 273 |
-
raw_response = response_crisis['relevancy_response']
|
| 274 |
-
|
| 275 |
-
# Sanitize the response by removing `<think>` or unwanted text
|
| 276 |
-
if "<think>" in raw_response:
|
| 277 |
-
raw_response = raw_response.split("<think>")[-1] # Keep only JSON part
|
| 278 |
-
if "</think>" in raw_response:
|
| 279 |
-
raw_response = raw_response.split("</think>")[0] # Remove trailing text
|
| 280 |
-
|
| 281 |
-
# Try parsing the JSON safely
|
| 282 |
-
try:
|
| 283 |
-
relevancy_response = json.loads(raw_response)
|
| 284 |
-
st.write("β
Successfully parsed JSON:", relevancy_response) # Debugging output
|
| 285 |
-
except json.JSONDecodeError as e:
|
| 286 |
-
st.error(f"β Failed to parse JSON: {e}")
|
| 287 |
-
st.write("π Raw LLM Response Before Parsing:", raw_response) # Debugging output
|
| 288 |
-
relevancy_response = None # Prevent breaking the pipeline
|
| 289 |
-
|
| 290 |
|
| 291 |
pick_relevant_context_chain = LLMChain(llm=llm_judge, prompt=relevant_prompt, output_key="context_number")
|
| 292 |
|
|
@@ -312,13 +286,13 @@ if query:
|
|
| 312 |
final_output = context_management_chain({"context":context,"retriever_query":query,"query":query})
|
| 313 |
|
| 314 |
st.subheader('final_output["relevancy_response"]')
|
| 315 |
-
st.
|
| 316 |
|
| 317 |
st.subheader('final_output["context_number"]')
|
| 318 |
-
st.
|
| 319 |
|
| 320 |
st.subheader('final_output["relevant_contexts"]')
|
| 321 |
-
st.
|
| 322 |
|
| 323 |
st.subheader('final_output["final_response"]')
|
| 324 |
-
st.
|
|
|
|
| 56 |
elif pdf_source == "Enter a PDF URL":
|
| 57 |
pdf_url = st.text_input("Enter PDF URL:", value="https://arxiv.org/pdf/2406.06998", key="pdf_url")
|
| 58 |
|
| 59 |
+
# β
Button to manually trigger the download
|
| 60 |
if st.button("Download and Process PDF") or pdf_url.strip() != "" and st.session_state.get("pdf_path") is None:
|
| 61 |
with st.spinner("Downloading PDF..."):
|
| 62 |
try:
|
|
|
|
| 198 |
|
| 199 |
QUERY:
|
| 200 |
{retriever_query}
|
| 201 |
+
Provide your verdict in JSON format with a single key 'score' and no preamble or explanation:
|
| 202 |
+
[{{"content:1,"score": <your score either 0 or 1>,"Reasoning":<why you have chose the score as 0 or 1>}},
|
| 203 |
+
{{"content:2,"score": <your score either 0 or 1>,"Reasoning":<why you have chose the score as 0 or 1>}},
|
| 204 |
+
...]
|
|
|
|
|
|
|
|
|
|
|
|
|
| 205 |
"""
|
| 206 |
|
| 207 |
context_relevancy_checker_prompt = PromptTemplate(input_variables=["retriever_query","context"],template=relevancy_prompt)
|
|
|
|
| 261 |
context_relevancy_evaluation_chain = LLMChain(llm=llm_judge, prompt=context_relevancy_checker_prompt, output_key="relevancy_response")
|
| 262 |
|
| 263 |
response_crisis = context_relevancy_evaluation_chain.invoke({"context":context,"retriever_query":query})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 264 |
|
| 265 |
pick_relevant_context_chain = LLMChain(llm=llm_judge, prompt=relevant_prompt, output_key="context_number")
|
| 266 |
|
|
|
|
| 286 |
final_output = context_management_chain({"context":context,"retriever_query":query,"query":query})
|
| 287 |
|
| 288 |
st.subheader('final_output["relevancy_response"]')
|
| 289 |
+
st.json(final_output["relevancy_response"] )
|
| 290 |
|
| 291 |
st.subheader('final_output["context_number"]')
|
| 292 |
+
st.json(final_output["context_number"])
|
| 293 |
|
| 294 |
st.subheader('final_output["relevant_contexts"]')
|
| 295 |
+
st.json(final_output["relevant_contexts"])
|
| 296 |
|
| 297 |
st.subheader('final_output["final_response"]')
|
| 298 |
+
st.json(final_output["final_response"])
|