Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -699,7 +699,7 @@ chain_gemini,memory_gemini = custom_ConversationalRetrievalChain(
|
|
| 699 |
memory_gemini.clear()
|
| 700 |
"""
|
| 701 |
|
| 702 |
-
|
| 703 |
chain = ConversationalRetrievalChain.from_llm(
|
| 704 |
condense_question_prompt=standalone_question_prompt,
|
| 705 |
combine_docs_chain_kwargs={'prompt': answer_prompt},
|
|
@@ -715,7 +715,7 @@ chain = ConversationalRetrievalChain.from_llm(
|
|
| 715 |
verbose= False,
|
| 716 |
return_source_documents=True
|
| 717 |
)
|
| 718 |
-
|
| 719 |
|
| 720 |
"""
|
| 721 |
# let's invoke the chain
|
|
@@ -869,6 +869,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 869 |
gr.Markdown("""## Ask questions of *needs assessment* experts,
|
| 870 |
## get responses from a *needs assessment experts*
|
| 871 |
Ask questions of all of them, or pick your expert below.
|
|
|
|
| 872 |
""" ,
|
| 873 |
elem_id="header")
|
| 874 |
|
|
|
|
| 699 |
memory_gemini.clear()
|
| 700 |
"""
|
| 701 |
|
| 702 |
+
|
| 703 |
chain = ConversationalRetrievalChain.from_llm(
|
| 704 |
condense_question_prompt=standalone_question_prompt,
|
| 705 |
combine_docs_chain_kwargs={'prompt': answer_prompt},
|
|
|
|
| 715 |
verbose= False,
|
| 716 |
return_source_documents=True
|
| 717 |
)
|
| 718 |
+
|
| 719 |
|
| 720 |
"""
|
| 721 |
# let's invoke the chain
|
|
|
|
| 869 |
gr.Markdown("""## Ask questions of *needs assessment* experts,
|
| 870 |
## get responses from a *needs assessment experts*
|
| 871 |
Ask questions of all of them, or pick your expert below.
|
| 872 |
+
Experts only reply based on their prior writings!
|
| 873 |
""" ,
|
| 874 |
elem_id="header")
|
| 875 |
|