Spaces:
Runtime error
Runtime error
Victor Hom
commited on
Commit
·
c867e5d
1
Parent(s):
286bd5e
try passing prompt with chat template to check response
Browse files
app.py
CHANGED
@@ -14,6 +14,12 @@ from langchain.embeddings import CacheBackedEmbeddings
|
|
14 |
from langchain.storage import LocalFileStore
|
15 |
from langchain_community.vectorstores import FAISS
|
16 |
from datasets import load_dataset
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
|
18 |
|
19 |
|
@@ -24,9 +30,20 @@ system_template = """You are a helpful assistant who always speaks in a pleasant
|
|
24 |
"""
|
25 |
|
26 |
user_template = """{input}
|
|
|
|
|
|
|
|
|
27 |
Think through your response step by step.
|
28 |
"""
|
29 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
def setup():
|
31 |
dataset = load_dataset("ShubhamChoksi/IMDB_Movies")
|
32 |
print(dataset['train'][0])
|
@@ -67,19 +84,61 @@ def setup():
|
|
67 |
|
68 |
vector_store.save_local("./vector_store")
|
69 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
vector_store = FAISS.load_local("./vector_store", embedder, allow_dangerous_deserialization=True)
|
71 |
|
72 |
retriever = vector_store.as_retriever()
|
73 |
|
74 |
-
query = "What are some good westerns movies?"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
embedded_query = embedding_model.embed_query(query)
|
76 |
similar_documents = vector_store.similarity_search_by_vector(embedded_query)
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
|
84 |
@cl.on_chat_start # marks a function that will be executed at the start of a user session
|
85 |
async def start_chat():
|
@@ -94,7 +153,7 @@ async def start_chat():
|
|
94 |
setup()
|
95 |
cl.user_session.set("settings", settings)
|
96 |
|
97 |
-
|
98 |
@cl.on_message # marks a function that should be run each time the chatbot receives a message from a user
|
99 |
async def main(message: cl.Message):
|
100 |
settings = cl.user_session.get("settings")
|
@@ -102,24 +161,32 @@ async def main(message: cl.Message):
|
|
102 |
client = AsyncOpenAI()
|
103 |
|
104 |
print(message.content)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
105 |
|
106 |
-
prompt = Prompt(
|
107 |
-
provider=ChatOpenAI.id,
|
108 |
-
messages=[
|
109 |
-
PromptMessage(
|
110 |
-
role="system",
|
111 |
-
template=system_template,
|
112 |
-
formatted=system_template,
|
113 |
-
),
|
114 |
-
PromptMessage(
|
115 |
-
role="user",
|
116 |
-
template=user_template,
|
117 |
-
formatted=user_template.format(input=message.content),
|
118 |
-
),
|
119 |
-
],
|
120 |
-
inputs={"input": message.content},
|
121 |
-
settings=settings,
|
122 |
-
)
|
123 |
|
124 |
print([m.to_openai() for m in prompt.messages])
|
125 |
|
|
|
14 |
from langchain.storage import LocalFileStore
|
15 |
from langchain_community.vectorstores import FAISS
|
16 |
from datasets import load_dataset
|
17 |
+
from langchain_core.runnables.base import RunnableSequence
|
18 |
+
from langchain_core.runnables.passthrough import RunnablePassthrough
|
19 |
+
from langchain_core.output_parsers import StrOutputParser
|
20 |
+
from langchain_core.prompts import ChatPromptTemplate
|
21 |
+
from langchain_openai import ChatOpenAI
|
22 |
+
import asyncio
|
23 |
|
24 |
|
25 |
|
|
|
30 |
"""
|
31 |
|
32 |
user_template = """{input}
|
33 |
+
|
34 |
+
|
35 |
+
|
36 |
+
|
37 |
Think through your response step by step.
|
38 |
"""
|
39 |
|
40 |
+
# used for imdb chat
|
41 |
+
template = """Answer the question based only on the following context:
|
42 |
+
{context}
|
43 |
+
|
44 |
+
Question: {question}
|
45 |
+
"""
|
46 |
+
|
47 |
def setup():
|
48 |
dataset = load_dataset("ShubhamChoksi/IMDB_Movies")
|
49 |
print(dataset['train'][0])
|
|
|
84 |
|
85 |
vector_store.save_local("./vector_store")
|
86 |
|
87 |
+
def input_query(query):
|
88 |
+
openai_api_key = os.getenv("OPENAI_API_KEY")
|
89 |
+
embedding_model = OpenAIEmbeddings(openai_api_key=openai_api_key)
|
90 |
+
|
91 |
+
store = LocalFileStore("./cache/")
|
92 |
+
embedder = CacheBackedEmbeddings.from_bytes_store(
|
93 |
+
embedding_model, store, namespace=embedding_model.model
|
94 |
+
)
|
95 |
vector_store = FAISS.load_local("./vector_store", embedder, allow_dangerous_deserialization=True)
|
96 |
|
97 |
retriever = vector_store.as_retriever()
|
98 |
|
99 |
+
# query = "What are some good westerns movies?"
|
100 |
+
# embedded_query = embedding_model.embed_query(query)
|
101 |
+
# similar_documents = vector_store.similarity_search_by_vector(embedded_query)
|
102 |
+
# for page in similar_documents:
|
103 |
+
# # TODO: Print the similar documents that the similarity search returns?
|
104 |
+
# print(page)
|
105 |
+
# print("00-----0000")
|
106 |
+
# print(page)
|
107 |
+
# print("-------------")
|
108 |
embedded_query = embedding_model.embed_query(query)
|
109 |
similar_documents = vector_store.similarity_search_by_vector(embedded_query)
|
110 |
+
similar_documents_for_prompt = list(map(lambda page: ("assistant", page.page_content), similar_documents))
|
111 |
+
# print(similar_documents_for_prompt)
|
112 |
+
|
113 |
+
similar_documents_for_prompt.append(("human", query))
|
114 |
+
# print(similar_documents_for_prompt)
|
115 |
+
|
116 |
+
# Create the components (chefs)
|
117 |
+
# prompt_template = # TODO: How do we create a prompt template to send to our LLM that will incorporate the documents from our retriever with the question we ask the chat model?
|
118 |
+
alternative_prompt = ChatPromptTemplate.from_messages(
|
119 |
+
similar_documents_for_prompt
|
120 |
+
)
|
121 |
+
print("alternative prompt")
|
122 |
+
print(alternative_prompt.messages)
|
123 |
+
|
124 |
+
|
125 |
+
prompt = ChatPromptTemplate.from_template(template)
|
126 |
+
|
127 |
+
|
128 |
+
#retriever = # TODO: How to we create a retriever for our documents?
|
129 |
+
retriever = vector_store.as_retriever()
|
130 |
+
|
131 |
+
|
132 |
+
#chat_model = # TODO: How do we create a chat model / LLM?
|
133 |
+
chat_model = ChatOpenAI(openai_api_key=openai_api_key, temperature=0)
|
134 |
+
|
135 |
+
|
136 |
+
|
137 |
+
#parser = # TODO: How do we create a parser to parse the output of our LLM?
|
138 |
+
parser = StrOutputParser()
|
139 |
+
runnable_chain = alternative_prompt | chat_model | parser
|
140 |
+
return alternative_prompt, chat_model, parser
|
141 |
+
|
142 |
|
143 |
@cl.on_chat_start # marks a function that will be executed at the start of a user session
|
144 |
async def start_chat():
|
|
|
153 |
setup()
|
154 |
cl.user_session.set("settings", settings)
|
155 |
|
156 |
+
# need to pass the query to the input_query function
|
157 |
@cl.on_message # marks a function that should be run each time the chatbot receives a message from a user
|
158 |
async def main(message: cl.Message):
|
159 |
settings = cl.user_session.get("settings")
|
|
|
161 |
client = AsyncOpenAI()
|
162 |
|
163 |
print(message.content)
|
164 |
+
# message.content is the input query from the user
|
165 |
+
prompt, model, parser = input_query(message.content)
|
166 |
+
|
167 |
+
# prompt = Prompt(
|
168 |
+
# provider=ChatOpenAI.id,
|
169 |
+
# messages=[
|
170 |
+
# PromptMessage(
|
171 |
+
# role="system",
|
172 |
+
# template=system_template,
|
173 |
+
# formatted=system_template,
|
174 |
+
# ),
|
175 |
+
# PromptMessage(
|
176 |
+
# role="user",
|
177 |
+
# template=user_template,
|
178 |
+
# formatted=user_template.format(input=message.content),
|
179 |
+
# ),
|
180 |
+
# ],
|
181 |
+
# inputs={"input": message.content},
|
182 |
+
# settings=settings,
|
183 |
+
# )
|
184 |
+
|
185 |
+
runnable_chain = prompt | model | parser
|
186 |
+
output_chunks = runnable_chain.invoke({})
|
187 |
+
print(''.join(output_chunks))
|
188 |
+
print("output chunks")
|
189 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
190 |
|
191 |
print([m.to_openai() for m in prompt.messages])
|
192 |
|