ababio commited on
Commit
8c67ed3
1 Parent(s): 2f80c9f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +65 -59
app.py CHANGED
@@ -1,81 +1,87 @@
1
  import os
2
  from getpass import getpass
3
- import gradio as gr
4
- from pinecone.grpc import PineconeGRPC
5
- from pinecone import ServerlessSpec
6
-
7
- from llama_index.vector_stores import PineconeVectorStore
8
- from llama_index import VectorStoreIndex
9
- from llama_index.retrievers import VectorIndexRetriever
10
  from llama_index.node_parser import SemanticSplitterNodeParser
11
  from llama_index.embeddings import OpenAIEmbedding
12
  from llama_index.ingestion import IngestionPipeline
13
  from llama_index.query_engine import RetrieverQueryEngine
 
 
 
14
 
15
-
16
-
17
- pinecone_api_key = os.getenv("PINECONE_API_KEY")
18
  openai_api_key = os.getenv("OPENAI_API_KEY")
 
19
 
 
 
 
 
 
20
 
21
- # This will be the model we use both for Node parsing and for vectorization
22
  embed_model = OpenAIEmbedding(api_key=openai_api_key)
23
-
24
- # Define the initial pipeline
25
  pipeline = IngestionPipeline(
26
  transformations=[
27
  SemanticSplitterNodeParser(
28
  buffer_size=1,
29
  breakpoint_percentile_threshold=95,
30
  embed_model=embed_model,
31
- ),
32
  embed_model,
33
- ],
34
- )
35
-
36
-
37
-
38
- # Initialize connection to Pinecone
39
- pc = PineconeGRPC(api_key=pinecone_api_key)
40
- index_name = os.getenv("INDEX_NAME")
41
-
42
- # Initialize your index
43
- pinecone_index = pc.Index(index_name)
44
-
45
- # Initialize VectorStore
46
- vector_store = PineconeVectorStore(pinecone_index=pinecone_index)
47
-
48
- pinecone_index.describe_index_stats()
49
-
50
-
51
- # Due to how LlamaIndex works here, if your Open AI API key was
52
- # not set to an environment variable before, you have to set it at this point
53
- if not os.getenv('OPENAI_API_KEY'):
54
- os.environ['OPENAI_API_KEY'] = openai_api_key
55
-
56
- # Instantiate VectorStoreIndex object from our vector_store object
57
  vector_index = VectorStoreIndex.from_vector_store(vector_store=vector_store)
58
-
59
- # Grab 5 search results
60
  retriever = VectorIndexRetriever(index=vector_index, similarity_top_k=5)
61
-
62
-
63
- # Pass in your retriever from above, which is configured to return the top 5 results
64
  query_engine = RetrieverQueryEngine(retriever=retriever)
65
 
66
- # Define the function to handle user input and return the query response
67
- def query_annual_report(summary_request):
68
- llm_query = query_engine.query(summary_request)
69
- return llm_query.response
70
-
71
- # Create the Gradio interface
72
- iface = gr.Interface(
73
- fn=query_annual_report,
74
- inputs="text",
75
- outputs="text",
76
- title="Annual Report Summary Query",
77
- description="Enter your query to get the summary of the annual report."
78
- )
79
-
80
- # Launch the Gradio app
81
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
  from getpass import getpass
3
+ import streamlit as st
4
+ from dotenv import load_dotenv
5
+ from openai.embeddings_utils import OpenAIEmbeddings
6
+ from openai import OpenAI
7
+ from pinecone import PineconeClient, VectorStore
8
+ from faiss import IndexFlatL2
9
+ from llama_index import VectorStoreIndex, VectorIndexRetriever
10
  from llama_index.node_parser import SemanticSplitterNodeParser
11
  from llama_index.embeddings import OpenAIEmbedding
12
  from llama_index.ingestion import IngestionPipeline
13
  from llama_index.query_engine import RetrieverQueryEngine
14
+ from llama_index.memory import ConversationBufferMemory
15
+ from llama_index.chains import ConversationalRetrievalChain
16
+ from llama_index.prompts import user_template, bot_template, css
17
 
18
+ # Load environment variables
19
+ load_dotenv()
20
+ pinecone_api_key = os.getenv("PINECONE_API_KEY")
21
  openai_api_key = os.getenv("OPENAI_API_KEY")
22
+ index_name = os.getenv("INDEX_NAME")
23
 
24
+ # Initialize OpenAI and Pinecone clients
25
+ openai.api_key = openai_api_key
26
+ pinecone_client = PineconeClient(api_key=pinecone_api_key)
27
+ pinecone_index = pinecone_client.Index(index_name)
28
+ vector_store = VectorStore(pinecone_index=pinecone_index)
29
 
30
+ # Initialize LlamaIndex components
31
  embed_model = OpenAIEmbedding(api_key=openai_api_key)
 
 
32
  pipeline = IngestionPipeline(
33
  transformations=[
34
  SemanticSplitterNodeParser(
35
  buffer_size=1,
36
  breakpoint_percentile_threshold=95,
37
  embed_model=embed_model,
38
+ ),
39
  embed_model,
40
+ ],
41
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  vector_index = VectorStoreIndex.from_vector_store(vector_store=vector_store)
 
 
43
  retriever = VectorIndexRetriever(index=vector_index, similarity_top_k=5)
 
 
 
44
  query_engine = RetrieverQueryEngine(retriever=retriever)
45
 
46
+ def get_vectorstore(text_chunks):
47
+ embeddings = OpenAIEmbeddings()
48
+ vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
49
+ return vectorstore
50
+
51
+ def get_conversation_chain(vectorstore):
52
+ llm = OpenAI()
53
+ memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)
54
+ conversation_chain = ConversationalRetrievalChain.from_llm(
55
+ llm=llm,
56
+ retriever=vectorstore.as_retriever(),
57
+ memory=memory
58
+ )
59
+ return conversation_chain
60
+
61
+ def handle_userinput(user_question):
62
+ response = st.session_state.conversation({'question': user_question})
63
+ st.session_state.chat_history = response['chat_history']
64
+
65
+ for i, message in enumerate(st.session_state.chat_history):
66
+ if i % 2 == 0:
67
+ st.write(user_template.replace("{{MSG}}", message.content), unsafe_allow_html=True)
68
+ else:
69
+ st.write(bot_template.replace("{{MSG}}", message.content), unsafe_allow_html=True)
70
+
71
+ def main():
72
+ load_dotenv()
73
+ st.set_page_config(page_title="Chat with Annual Reports", page_icon=":books:")
74
+ st.write(css, unsafe_allow_html=True)
75
+
76
+ if "conversation" not in st.session_state:
77
+ st.session_state.conversation = None
78
+ if "chat_history" not in st.session_state:
79
+ st.session_state.chat_history = None
80
+
81
+ st.header("Chat with Annual Report Documents")
82
+ user_question = st.text_input("Ask a question about your documents:")
83
+ if user_question:
84
+ handle_userinput(user_question)
85
+
86
+ if __name__ == "__main__":
87
+ main()