|
import os |
|
import time |
|
import streamlit as st |
|
from getpass import getpass |
|
from openai import OpenAI |
|
from llama_index.node_parser import SemanticSplitterNodeParser |
|
from llama_index.embeddings import OpenAIEmbedding |
|
from llama_index.ingestion import IngestionPipeline |
|
|
|
from pinecone.grpc import PineconeGRPC |
|
from pinecone import ServerlessSpec |
|
|
|
from llama_index.vector_stores import PineconeVectorStore |
|
|
|
from llama_index import VectorStoreIndex |
|
from llama_index.retrievers import VectorIndexRetriever |
|
from llama_index.query_engine import RetrieverQueryEngine |
|
|
|
|
|
pinecone_api_key = os.getenv("PINECONE_API_KEY") |
|
openai_api_key = os.getenv("OPENAI_API_KEY") |
|
|
|
|
|
client = OpenAI(api_key=openai_api_key) |
|
|
|
|
|
|
|
pc = PineconeGRPC(api_key=pinecone_api_key) |
|
index_name = "anualreport" |
|
|
|
|
|
pinecone_index = pc.Index(index_name) |
|
|
|
|
|
vector_store = PineconeVectorStore(pinecone_index=pinecone_index) |
|
|
|
pinecone_index.describe_index_stats() |
|
|
|
|
|
|
|
vector_index = VectorStoreIndex.from_vector_store(vector_store=vector_store) |
|
retriever = VectorIndexRetriever(index=vector_index, similarity_top_k=5) |
|
query_engine = RetrieverQueryEngine(retriever=retriever) |
|
|
|
|
|
embed_model = OpenAIEmbedding(api_key=openai_api_key) |
|
pipeline = IngestionPipeline( |
|
transformations=[ |
|
SemanticSplitterNodeParser(buffer_size=1, breakpoint_percentile_threshold=95, embed_model=embed_model), |
|
embed_model, |
|
], |
|
) |
|
|
|
def query_annual_report(query): |
|
response = query_engine.query(query) |
|
return response.response |
|
|
|
|
|
st.title("ChatGPT-like Clone with Pinecone Integration") |
|
|
|
|
|
if "messages" not in st.session_state: |
|
st.session_state.messages = [] |
|
|
|
|
|
for message in st.session_state.messages: |
|
with st.chat_message(message["role"]): |
|
st.markdown(message["content"]) |
|
|
|
|
|
if prompt := st.chat_input("What is up?"): |
|
st.session_state.messages.append({"role": "user", "content": prompt}) |
|
with st.chat_message("user"): |
|
st.markdown(prompt) |
|
|
|
with st.chat_message("assistant"): |
|
response = query_annual_report(prompt) |
|
st.markdown(response) |
|
st.session_state.messages.append({"role": "assistant", "content": response}) |
|
|