HiPerGator-Docs / app.py
seansullivan's picture
Create app.py
487a211 verified
raw
history blame
5.72 kB
from getpass import getpass
from langchain_openai import OpenAIEmbeddings
from pinecone import Pinecone
from pinecone_text.sparse import SpladeEncoder
from langchain_community.retrievers import PineconeHybridSearchRetriever
import os
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableParallel, RunnablePassthrough, Runnable
from langchain_anthropic import ChatAnthropic
import streamlit as st
# Streamlit App Configuration (gets model_name, index_name, namespace_name before needed)
st.set_page_config(page_title="Chat with HiPerGator Docs", page_icon="🟩")
st.markdown("<h1 style='text-align: center;'>How can I help you?:</h1>", unsafe_allow_html=True)
st.sidebar.title("Options")
model_name = "claude-3-haiku-20240307"
# ========== PART 1 ==========
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY")
PINE_API_KEY = os.getenv("PINE_API_KEY")
embed = OpenAIEmbeddings(
model='text-embedding-3-small',
openai_api_key=OPENAI_API_KEY,
dimensions = 768
)
# ========== PART 2 ==========
index_name='splade'
namespace_name='HiPerGator'
pc = Pinecone(api_key=PINE_API_KEY)
index = pc.Index(index_name)
# ========== PART 3 ==========
splade_encoder = SpladeEncoder()
retriever = PineconeHybridSearchRetriever(
embeddings=embed, sparse_encoder=splade_encoder, index=index
)
# ========== PART 4 ==========
# RAG prompt
template = """You are an expert in HiPerGator (University of Florida's Super Computer) who has access to it's dense documentation. Please use the given context from the documentation to happily assist the user with their question:
Question: {question}
{context}
"""
prompt = ChatPromptTemplate.from_template(template)
# Haiku
model = ChatAnthropic(temperature=0, anthropic_api_key=ANTHROPIC_API_KEY, model_name="claude-3-haiku-20240307")
class SourceDedup(Runnable):
def invoke(self, input, config=None):
assert isinstance(input, dict)
documents = input["context"]
unique_sources = set()
unique_documents = []
for doc in documents:
source = doc.metadata["source"]
if source not in unique_sources:
unique_sources.add(source)
unique_documents.append(doc)
input["context"] = unique_documents
return input
class PassParentContent(Runnable):
def invoke(self, input, config=None):
assert isinstance(input, dict)
documents = input["context"]
for doc in documents:
if "parent_content" in doc.metadata:
doc.page_content = doc.metadata["parent_content"]
return input
rag_chain = (
RunnablePassthrough()
# | SourceDedup()
# | PassParentContent()
| prompt
| model
| StrOutputParser()
)
rag_chain_with_source = RunnableParallel(
{"context": retriever, "question": RunnablePassthrough()}
).assign(answer=rag_chain)
def generate_response(prompt):
start = "Answer: "
st.session_state['generated'].append(start)
yield start
for chunk in rag_chain_with_source.stream(prompt):
if list(chunk.keys())[0] == 'answer':
st.session_state['generated'][-1] += chunk['answer']
yield chunk['answer']
elif list(chunk.keys())[0] == 'context':
pass
# Sources DO NOT work the same with this code... removing for now.
# sources = chunk['context']
#for thing in chunk['context']:
#print()
#print(thing.metadata)
#sources = [doc.metadata['source'] for doc in chunk['context']]
#response = rag_chain_with_source.invoke(prompt)
#answer = response["answer"]
#sources_txt = "\n\nSources:\n" + "\n".join(sources)
#yield sources_txt
#question = "How can I do hybrid search with a pinecone database?"
#answer = generate_response(question)
#print(answer)
# ==================== THE REST OF THE STREAMLIT APP ====================
# Initialize session state variables if they don't exist
if 'generated' not in st.session_state:
st.session_state['generated'] = []
if 'past' not in st.session_state:
st.session_state['past'] = []
if 'messages' not in st.session_state:
st.session_state['messages'] = [{"role": "system", "content": "You are a helpful assistant."}]
if 'total_cost' not in st.session_state:
st.session_state['total_cost'] = 0.0
def refresh_text():
with response_container:
for i in range(len(st.session_state['past'])):
try:
user_message_content = st.session_state["past"][i]
message = st.chat_message("user")
message.write(user_message_content)
except:
print("Past error")
try:
ai_message_content = st.session_state["generated"][i]
message = st.chat_message("assistant")
message.write(ai_message_content)
except:
print("Generated Error")
response_container = st.container()
container = st.container()
if prompt := st.chat_input("Ask a question..."):
st.session_state['past'].append(prompt)
refresh_text()
st.session_state['messages'].append({"role": "user", "content": prompt})
with response_container:
my_generator = generate_response(prompt)
message = st.chat_message("assistant")
message.write_stream(my_generator)
if __name__ == "__main__":
#result = retriever.get_relevant_documents("foo")
#print(result[0].page_content)
pass