svijayanand commited on
Commit
0e0157a
·
verified ·
1 Parent(s): 6569693

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -91
app.py CHANGED
@@ -1,115 +1,69 @@
1
  import asyncio
2
- import logging
3
- from dotenv import load_dotenv
4
- from pathlib import Path
5
- from ingest_data import download_data_and_create_embedding
6
 
7
  from langchain_community.vectorstores import FAISS
 
8
  from langchain_core.runnables.passthrough import RunnablePassthrough
9
  from langchain_core.output_parsers import StrOutputParser
10
  from langchain_core.prompts import ChatPromptTemplate
11
  from langchain_openai import ChatOpenAI
12
- from ingest_data import underlying_embeddings, openai_api_key
13
-
14
- from langchain.prompts import ChatPromptTemplate
15
- from langchain.schema import StrOutputParser
16
-
17
- import chainlit as cl
18
-
19
- logging.basicConfig(level=logging.DEBUG)
20
- logger = logging.getLogger(__name__)
21
 
22
- # load env variables
23
- load_dotenv()
24
-
25
- # Specify the path to the file you want to check
26
- file_path = Path('./faiss_index/index.faiss')
27
 
28
- # Check if the file exists
29
- if file_path.exists():
30
  print("Embeddings already done, use the saved index")
31
  # Combine the retrieved data with the output of the LLM
32
  vector_store = FAISS.load_local(
33
  "faiss_index", underlying_embeddings, allow_dangerous_deserialization=True
34
  )
35
- else:
36
- vector_store = download_data_and_create_embedding()
37
-
38
-
39
- # create a prompt template to send to our LLM that will incorporate the documents from our retriever with the
40
- # question we ask the chat model
41
- prompt_template = ChatPromptTemplate.from_template(
42
- "Answer the {question} based on the following {context}."
43
- )
44
-
45
- # create a retriever for our documents
46
- retriever = vector_store.as_retriever()
47
-
48
- # create a chat model / LLM
49
- chat_model = ChatOpenAI(
50
- model="gpt-4o-2024-05-13", temperature=0, api_key=openai_api_key
51
- )
52
-
53
- # create a parser to parse the output of our LLM
54
- parser = StrOutputParser()
55
-
56
- # 💻 Create the sequence (recipe)
57
- runnable_chain = (
58
- # TODO: How do we chain the output of our retriever, prompt, model and model output parser so that we can get a good answer to our query?
59
- {"context": retriever, "question": RunnablePassthrough()}
60
- | prompt_template
61
- | chat_model
62
- | StrOutputParser()
63
- )
64
-
65
-
66
- # Asynchronous execution (e.g., for a better a chatbot user experience)
67
- async def call_chain_async(question):
68
- output_chunks = await runnable_chain.ainvoke(question)
69
- return output_chunks
70
-
71
-
72
- # output_stream = asyncio.run(call_chain_async("What are some good sci-fi movies from the 1980s?"))
73
- # print("".join(output_stream))
74
-
75
- @cl.on_chat_start
76
- async def on_chat_start():
77
- model = ChatOpenAI(streaming=True)
78
- prompt = ChatPromptTemplate.from_messages(
79
- [
80
- (
81
- "system",
82
- "You're a very knowledgeable historian who provides accurate and eloquent answers to historical questions.",
83
- ),
84
- ("human", "{question}"),
85
- ]
86
  )
87
- runnable = prompt | model | StrOutputParser()
88
  cl.user_session.set("runnable", runnable)
89
 
90
 
91
  @cl.on_message
92
  async def on_message(message: cl.Message):
93
- try:
94
- logger.info('Starting application')
95
- # Your main application logic here
96
- runnable = cl.user_session.get("runnable") # type: Runnable
97
-
98
- msg = cl.Message(content="")
99
-
100
- async for chunk in runnable.astream(
101
- {"question": message.content},
 
 
 
102
  config=RunnableConfig(callbacks=[cl.LangchainCallbackHandler()]),
103
  ):
104
  await msg.stream_token(chunk)
105
-
106
- await cl.Message(content=response).send()
107
- logger.info('Application finished successfully')
108
- except Exception as e:
109
- logger.exception("Unhandled exception: %s", e)
110
-
111
 
112
- # @cl.on_message
113
- # async def main(question):
114
- # response = await call_chain_async(question.content)
115
- # await cl.Message(content=response).send()
 
1
  import asyncio
2
+ import chainlit as cl
 
 
 
3
 
4
  from langchain_community.vectorstores import FAISS
5
+ from langchain_openai import OpenAIEmbeddings
6
  from langchain_core.runnables.passthrough import RunnablePassthrough
7
  from langchain_core.output_parsers import StrOutputParser
8
  from langchain_core.prompts import ChatPromptTemplate
9
  from langchain_openai import ChatOpenAI
10
+ from langchain.schema.runnable import Runnable, RunnablePassthrough, RunnableConfig
11
+ from langchain.prompts import PromptTemplate
 
 
 
 
 
 
 
12
 
13
+ @cl.on_chat_start
14
+ async def on_chat_start():
 
 
 
15
 
 
 
16
  print("Embeddings already done, use the saved index")
17
  # Combine the retrieved data with the output of the LLM
18
  vector_store = FAISS.load_local(
19
  "faiss_index", underlying_embeddings, allow_dangerous_deserialization=True
20
  )
21
+
22
+ # create a prompt template to send to our LLM that will incorporate the documents from our retriever with the
23
+ # question we ask the chat model
24
+ prompt_template = ChatPromptTemplate.from_template(
25
+ "Answer the {question} based on the following {context}."
26
+ )
27
+
28
+ # create a retriever for our documents
29
+ retriever = vector_store.as_retriever()
30
+
31
+ # create a chat model / LLM
32
+ chat_model = ChatOpenAI(
33
+ model="gpt-4o-2024-05-13", temperature=0, api_key=openai_api_key
34
+ )
35
+
36
+ # create a parser to parse the output of our LLM
37
+ parser = StrOutputParser()
38
+
39
+ # 💻 Create the sequence (recipe)
40
+ runnable_chain = (
41
+ # TODO: How do we chain the output of our retriever, prompt, model and model output parser so that we can get a good answer to our query?
42
+ {"context": retriever, "question": RunnablePassthrough()}
43
+ | prompt_template
44
+ | chat_model
45
+ | StrOutputParser()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
  )
47
+
48
  cl.user_session.set("runnable", runnable)
49
 
50
 
51
  @cl.on_message
52
  async def on_message(message: cl.Message):
53
+ logger.info('Starting application')
54
+ # Your main application logic here
55
+ runnable = cl.user_session.get("runnable") # type: Runnable
56
+
57
+ msg = cl.Message(content="")
58
+
59
+ async with cl.Step(type="run", name="QA Assistant"):
60
+
61
+ await msg.stream_token("OAI says: ")
62
+
63
+ async for chunk in runn.astream(
64
+ message.content,
65
  config=RunnableConfig(callbacks=[cl.LangchainCallbackHandler()]),
66
  ):
67
  await msg.stream_token(chunk)
 
 
 
 
 
 
68
 
69
+ await msg.send()