rohan112 commited on
Commit
b7edee3
·
verified ·
1 Parent(s): 1d72a01

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +82 -81
app.py CHANGED
@@ -1,82 +1,83 @@
1
- import streamlit as st
2
- import random
3
- import time
4
- import os
5
- from langchain_together import ChatTogether
6
- from langchain_text_splitters import RecursiveCharacterTextSplitter
7
- from langchain_community.document_loaders import TextLoader
8
- from langchain_core.prompts import ChatPromptTemplate
9
- from langchain_community.vectorstores import FAISS
10
- from langchain_core.output_parsers import StrOutputParser
11
- from langchain_core.runnables import RunnablePassthrough
12
- from langchain_together import TogetherEmbeddings
13
-
14
- os.environ["TOGETHER_API_KEY"] = "6216ce36aadcb06c35436e7d6bbbc18b354d8140f6e805db485d70ecff4481d0"
15
-
16
- #load
17
- loader = TextLoader("Resume_data.txt")
18
- documents = loader.load()
19
-
20
- # split it into chunks
21
- text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
22
- docs = text_splitter.split_documents(documents)
23
- vectorstore = FAISS.from_documents(docs,
24
- TogetherEmbeddings(model="togethercomputer/m2-bert-80M-8k-retrieval")
25
- )
26
-
27
- retriever = vectorstore.as_retriever()
28
- print("assigning model")
29
- model = ChatTogether(
30
- model="meta-llama/Llama-3-70b-chat-hf",
31
- temperature=0.0,
32
- max_tokens=500,)
33
-
34
- prompt = ChatPromptTemplate([
35
- ("system", "You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. answer as if person is responding. and if user greets then greet back"),
36
- ("user", "context : {context}, Question: {question}")
37
- ])
38
-
39
- chain = (
40
- {"context": retriever, "question": RunnablePassthrough()}
41
- | prompt
42
- | model
43
- | StrOutputParser()
44
- )
45
-
46
-
47
- st.title("Chat with me")
48
-
49
- # Initialize chat history
50
- if "messages" not in st.session_state:
51
- st.session_state.messages = []
52
-
53
- # Display chat messages from history on app rerun
54
- for message in st.session_state.messages:
55
- with st.chat_message(message["role"]):
56
- st.markdown(message["content"])
57
-
58
- # Accept user input
59
- if prompt := st.chat_input("What is up?"):
60
- # Display user message in chat message container
61
- with st.chat_message("user"):
62
- st.markdown(prompt)
63
- # Add user message to chat history
64
- st.session_state.messages.append({"role": "user", "content": prompt})
65
-
66
- ############################################
67
- # Streamed response emulator
68
- def response_generator():
69
- query = f"{prompt}"
70
- if query != "None":
71
- for m in chain.stream(query):
72
- yield m
73
- time.sleep(0.05)
74
- else:
75
- yield "How can i help you?"
76
-
77
- ###########################################
78
- # Display assistant response in chat message container
79
- with st.chat_message("assistant"):
80
- response = st.write_stream(response_generator())
81
- # Add assistant response to chat history
 
82
  st.session_state.messages.append({"role": "assistant", "content": response})
 
1
+ import streamlit as st
2
+ import random
3
+ import time
4
+ import os
5
+ from langchain_together import ChatTogether
6
+ from langchain_text_splitters import RecursiveCharacterTextSplitter
7
+ from langchain_community.document_loaders import TextLoader
8
+ from langchain_core.prompts import ChatPromptTemplate
9
+ from langchain_community.vectorstores import FAISS
10
+ from langchain_core.output_parsers import StrOutputParser
11
+ from langchain_core.runnables import RunnablePassthrough
12
+ from langchain_together import TogetherEmbeddings
13
+
14
+ os.environ["TOGETHER_API_KEY"] = "bafbab854ae828c3b90f675c45c8263e9404d278b5694909ea0855f437b9d1f3"
15
+ # "6216ce36aadcb06c35436e7d6bbbc18b354d8140f6e805db485d70ecff4481d0"
16
+
17
+ #load
18
+ loader = TextLoader("Resume_data.txt")
19
+ documents = loader.load()
20
+
21
+ # split it into chunks
22
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
23
+ docs = text_splitter.split_documents(documents)
24
+ vectorstore = FAISS.from_documents(docs,
25
+ TogetherEmbeddings(model="togethercomputer/m2-bert-80M-8k-retrieval")
26
+ )
27
+
28
+ retriever = vectorstore.as_retriever()
29
+ print("assigning model")
30
+ model = ChatTogether(
31
+ model="meta-llama/Llama-3-70b-chat-hf",
32
+ temperature=0.0,
33
+ max_tokens=500,)
34
+
35
+ prompt = ChatPromptTemplate([
36
+ ("system", "You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. answer as if person is responding. and if user greets then greet back"),
37
+ ("user", "context : {context}, Question: {question}")
38
+ ])
39
+
40
+ chain = (
41
+ {"context": retriever, "question": RunnablePassthrough()}
42
+ | prompt
43
+ | model
44
+ | StrOutputParser()
45
+ )
46
+
47
+
48
+ st.title("Chat with me")
49
+
50
+ # Initialize chat history
51
+ if "messages" not in st.session_state:
52
+ st.session_state.messages = []
53
+
54
+ # Display chat messages from history on app rerun
55
+ for message in st.session_state.messages:
56
+ with st.chat_message(message["role"]):
57
+ st.markdown(message["content"])
58
+
59
+ # Accept user input
60
+ if prompt := st.chat_input("What is up?"):
61
+ # Display user message in chat message container
62
+ with st.chat_message("user"):
63
+ st.markdown(prompt)
64
+ # Add user message to chat history
65
+ st.session_state.messages.append({"role": "user", "content": prompt})
66
+
67
+ ############################################
68
+ # Streamed response emulator
69
+ def response_generator():
70
+ query = f"{prompt}"
71
+ if query != "None":
72
+ for m in chain.stream(query):
73
+ yield m
74
+ time.sleep(0.05)
75
+ else:
76
+ yield "How can i help you?"
77
+
78
+ ###########################################
79
+ # Display assistant response in chat message container
80
+ with st.chat_message("assistant"):
81
+ response = st.write_stream(response_generator())
82
+ # Add assistant response to chat history
83
  st.session_state.messages.append({"role": "assistant", "content": response})