pdfReport / app.py
ababio's picture
Update app.py
8c67ed3 verified
raw
history blame
No virus
3.21 kB
import os
from getpass import getpass
import streamlit as st
from dotenv import load_dotenv
from openai.embeddings_utils import OpenAIEmbeddings
from openai import OpenAI
from pinecone import PineconeClient, VectorStore
from faiss import IndexFlatL2
from llama_index import VectorStoreIndex, VectorIndexRetriever
from llama_index.node_parser import SemanticSplitterNodeParser
from llama_index.embeddings import OpenAIEmbedding
from llama_index.ingestion import IngestionPipeline
from llama_index.query_engine import RetrieverQueryEngine
from llama_index.memory import ConversationBufferMemory
from llama_index.chains import ConversationalRetrievalChain
from llama_index.prompts import user_template, bot_template, css
# Load environment variables
load_dotenv()
pinecone_api_key = os.getenv("PINECONE_API_KEY")
openai_api_key = os.getenv("OPENAI_API_KEY")
index_name = os.getenv("INDEX_NAME")
# Initialize OpenAI and Pinecone clients
openai.api_key = openai_api_key
pinecone_client = PineconeClient(api_key=pinecone_api_key)
pinecone_index = pinecone_client.Index(index_name)
vector_store = VectorStore(pinecone_index=pinecone_index)
# Initialize LlamaIndex components
embed_model = OpenAIEmbedding(api_key=openai_api_key)
pipeline = IngestionPipeline(
transformations=[
SemanticSplitterNodeParser(
buffer_size=1,
breakpoint_percentile_threshold=95,
embed_model=embed_model,
),
embed_model,
],
)
vector_index = VectorStoreIndex.from_vector_store(vector_store=vector_store)
retriever = VectorIndexRetriever(index=vector_index, similarity_top_k=5)
query_engine = RetrieverQueryEngine(retriever=retriever)
def get_vectorstore(text_chunks):
embeddings = OpenAIEmbeddings()
vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
return vectorstore
def get_conversation_chain(vectorstore):
llm = OpenAI()
memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)
conversation_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=vectorstore.as_retriever(),
memory=memory
)
return conversation_chain
def handle_userinput(user_question):
response = st.session_state.conversation({'question': user_question})
st.session_state.chat_history = response['chat_history']
for i, message in enumerate(st.session_state.chat_history):
if i % 2 == 0:
st.write(user_template.replace("{{MSG}}", message.content), unsafe_allow_html=True)
else:
st.write(bot_template.replace("{{MSG}}", message.content), unsafe_allow_html=True)
def main():
load_dotenv()
st.set_page_config(page_title="Chat with Annual Reports", page_icon=":books:")
st.write(css, unsafe_allow_html=True)
if "conversation" not in st.session_state:
st.session_state.conversation = None
if "chat_history" not in st.session_state:
st.session_state.chat_history = None
st.header("Chat with Annual Report Documents")
user_question = st.text_input("Ask a question about your documents:")
if user_question:
handle_userinput(user_question)
if __name__ == "__main__":
main()