Spaces:
Sleeping
Sleeping
added rest
Browse files
app.py
CHANGED
|
@@ -22,20 +22,20 @@ from langchain_community.document_loaders import PyPDFLoader
|
|
| 22 |
load_dotenv()
|
| 23 |
|
| 24 |
|
| 25 |
-
|
| 26 |
|
| 27 |
-
|
| 28 |
-
|
| 29 |
|
| 30 |
-
|
| 31 |
-
|
| 32 |
|
| 33 |
-
|
| 34 |
-
|
| 35 |
|
| 36 |
-
|
| 37 |
|
| 38 |
-
|
| 39 |
|
| 40 |
|
| 41 |
text_splitter = RecursiveCharacterTextSplitter(
|
|
@@ -87,33 +87,33 @@ loader = UnstructuredPDFLoader("br_femogfirs.pdf", strategy="fast")
|
|
| 87 |
data = loader.load_and_split(text_splitter)
|
| 88 |
# data = loader.load()
|
| 89 |
|
| 90 |
-
|
| 91 |
|
| 92 |
-
|
| 93 |
-
|
| 94 |
|
| 95 |
-
|
| 96 |
|
| 97 |
-
|
| 98 |
|
| 99 |
@cl.on_chat_start
|
| 100 |
async def main():
|
| 101 |
mecanic_qa_chain = ""
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
|
| 108 |
cl.user_session.set("runnable", mecanic_qa_chain)
|
| 109 |
|
| 110 |
@cl.on_message
|
| 111 |
async def on_message(message: cl.Message):
|
| 112 |
runnable = cl.user_session.get("runnable")
|
| 113 |
-
|
| 114 |
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
|
|
|
| 22 |
load_dotenv()
|
| 23 |
|
| 24 |
|
| 25 |
+
RAG_PROMPT = """
|
| 26 |
|
| 27 |
+
CONTEXT:
|
| 28 |
+
{context}
|
| 29 |
|
| 30 |
+
QUERY:
|
| 31 |
+
{question}
|
| 32 |
|
| 33 |
+
You house builder and can only provide your answers from the context.
|
| 34 |
+
You can only provide a response in danish
|
| 35 |
|
| 36 |
+
Don't tell in your response that you are getting it from the context.
|
| 37 |
|
| 38 |
+
"""
|
| 39 |
|
| 40 |
|
| 41 |
text_splitter = RecursiveCharacterTextSplitter(
|
|
|
|
| 87 |
data = loader.load_and_split(text_splitter)
|
| 88 |
# data = loader.load()
|
| 89 |
|
| 90 |
+
embedding_model = OpenAIEmbeddings(model="text-embedding-3-small")
|
| 91 |
|
| 92 |
+
vector_store = Pinecone.from_documents(data, embedding_model, index_name="bygnings-regl-rag-1")
|
| 93 |
+
retriever = vector_store.as_retriever()
|
| 94 |
|
| 95 |
+
rag_prompt = ChatPromptTemplate.from_template(RAG_PROMPT)
|
| 96 |
|
| 97 |
+
model = ChatOpenAI(model="gpt-3.5-turbo")
|
| 98 |
|
| 99 |
@cl.on_chat_start
|
| 100 |
async def main():
|
| 101 |
mecanic_qa_chain = ""
|
| 102 |
+
mecanic_qa_chain = (
|
| 103 |
+
{"context": itemgetter("question") | retriever, "question": itemgetter("question")}
|
| 104 |
+
| RunnablePassthrough.assign(context=itemgetter("context"))
|
| 105 |
+
| rag_prompt | model | StrOutputParser()
|
| 106 |
+
)
|
| 107 |
|
| 108 |
cl.user_session.set("runnable", mecanic_qa_chain)
|
| 109 |
|
| 110 |
@cl.on_message
|
| 111 |
async def on_message(message: cl.Message):
|
| 112 |
runnable = cl.user_session.get("runnable")
|
| 113 |
+
msg = cl.Message(content="")
|
| 114 |
|
| 115 |
+
async for chunk in runnable.astream(
|
| 116 |
+
{"question":message.content},
|
| 117 |
+
config=RunnableConfig(callbacks=[cl.LangchainCallbackHandler()]),
|
| 118 |
+
):
|
| 119 |
+
await msg.stream_token(chunk)
|