Spaces:
Sleeping
Sleeping
File size: 6,871 Bytes
487a211 f09daf4 487a211 d3b6c35 487a211 7555b41 487a211 5ce0e23 487a211 700b7f5 5489ec4 700b7f5 5489ec4 700b7f5 5489ec4 700b7f5 5489ec4 700b7f5 487a211 a975195 487a211 7555b41 487a211 71b50ba 487a211 7430d16 487a211 7430d16 487a211 7430d16 487a211 7555b41 7430d16 4a3ee68 cbef338 7555b41 7b2f4f0 487a211 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 |
from getpass import getpass
from langchain_openai import OpenAIEmbeddings
from pinecone import Pinecone
from pinecone_text.sparse import SpladeEncoder
from langchain_community.retrievers import PineconeHybridSearchRetriever
import os
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableParallel, RunnablePassthrough, Runnable
from langchain_anthropic import ChatAnthropic
import streamlit as st
# Streamlit App Configuration (gets model_name, index_name, namespace_name before needed)
st.set_page_config(page_title="Chat with HiPerGator Docs", page_icon="🟩")
st.markdown("<h1 style='text-align: center;'>Welcome to the HiPerGator Bot. Please type your question below:</h1>", unsafe_allow_html=True)
model_name = "claude-3-haiku-20240307"
# ========== PART 1 ==========
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY")
PINE_API_KEY = os.getenv("PINE_API_KEY")
embed = OpenAIEmbeddings(
model='text-embedding-3-small',
openai_api_key=OPENAI_API_KEY,
dimensions = 768
)
# ========== PART 2 ==========
index_name='splade'
namespace_name='HiperGator'
pc = Pinecone(api_key=PINE_API_KEY)
index = pc.Index(index_name)
# ========== PART 3 ==========
splade_encoder = SpladeEncoder()
retriever = PineconeHybridSearchRetriever(
embeddings=embed, sparse_encoder=splade_encoder, index=index, namespace=namespace_name
)
LANGCHAIN_TRACING_V2 = os.getenv('LANGCHAIN_TRACING_V2')
LANGCHAIN_ENDPOINT = os.getenv('LANGCHAIN_ENDPOINT')
LANGCHAIN_PROJECT = os.getenv('LANGCHAIN_PROJECT')
LANGCHAIN_API_KEY = os.getenv('LANGCHAIN_API_KEY')
# ========== PART 4 ==========
# RAG prompt
template = """You are an expert in HiPerGator, the University of Florida's supercomputer, with access to its dense technical documentation. Your purpose is to provide concise, accurate assistance to the user's specific question using only the context provided from the official HiPerGator documentation.
Restrictions and guidelines:
- Focus solely on answering the user's direct question. Do not deviate to tangential topics.
- Base your response entirely on the provided documentation context. If the question cannot be answered from the given context, state that you do not have enough information to answer based on the excerpt provided.
- Refrain from making assumptions, inferences or providing information beyond what is explicitly stated in the documentation.
- Use precise technical language from the documentation. Avoid oversimplification.
- Do not mention being an AI language model or refer to your own training or knowledge cutoff.
- Format any code examples, commands, or file paths appropriately.
- Let the user know if additional context is needed for a more complete answer.
User's Question:
{question}
Documentation context:
{context}
"""
prompt = ChatPromptTemplate.from_template(template)
# Haiku
model = ChatAnthropic(temperature=0, anthropic_api_key=ANTHROPIC_API_KEY, model_name="claude-3-haiku-20240307")
class SourceDedup(Runnable):
def invoke(self, input, config=None):
assert isinstance(input, dict)
documents = input["context"]
unique_sources = set()
unique_documents = []
for doc in documents:
source = doc.metadata["source"]
if source not in unique_sources:
unique_sources.add(source)
unique_documents.append(doc)
input["context"] = unique_documents
return input
class PassParentContent(Runnable):
def invoke(self, input, config=None):
assert isinstance(input, dict)
documents = input["context"]
for doc in documents:
if "parent_content" in doc.metadata:
doc.page_content = doc.metadata["parent_content"]
return input
rag_chain = (
RunnablePassthrough()
| SourceDedup()
| PassParentContent()
| prompt
| model
| StrOutputParser()
)
rag_chain_with_source = RunnableParallel(
{"context": retriever, "question": RunnablePassthrough()}
).assign(answer=rag_chain)
def generate_response(prompt):
start = ""
st.session_state['generated'].append(start)
yield start
all_sources = []
for chunk in rag_chain_with_source.stream(prompt):
if list(chunk.keys())[0] == 'answer':
st.session_state['generated'][-1] += chunk['answer']
yield chunk['answer']
elif list(chunk.keys())[0] == 'context':
pass
# Sources DO NOT work the same with this code... removing for now.
sources = chunk['context']
for thing in chunk['context']:
print()
print(thing.metadata)
sources = [doc.metadata['source'] for doc in chunk['context']]
all_sources.extend(sources)
formatted_response = f"\n\nSources:\n" + "\n".join(all_sources)
yield formatted_response
# question = "How can I do hybrid search with a pinecone database?"
# answer = generate_response(question)
# print(answer)
# ==================== THE REST OF THE STREAMLIT APP ====================
# Initialize session state variables if they don't exist
if 'generated' not in st.session_state:
st.session_state['generated'] = []
if 'past' not in st.session_state:
st.session_state['past'] = []
if 'messages' not in st.session_state:
st.session_state['messages'] = [{"role": "system", "content": "You are a helpful assistant."}]
if 'total_cost' not in st.session_state:
st.session_state['total_cost'] = 0.0
def refresh_text():
with response_container:
for i in range(len(st.session_state['past'])):
try:
user_message_content = st.session_state["past"][i]
message = st.chat_message("user")
message.write(user_message_content)
except:
print("Past error")
try:
ai_message_content = st.session_state["generated"][i]
message = st.chat_message("assistant")
message.write(ai_message_content)
except:
print("Generated Error")
response_container = st.container()
container = st.container()
if prompt := st.chat_input("Ask a question..."):
st.session_state['past'].append(prompt)
refresh_text()
st.session_state['messages'].append({"role": "user", "content": prompt})
with response_container:
my_generator = generate_response(prompt)
message = st.chat_message("assistant")
message.write_stream(my_generator)
if __name__ == "__main__":
#result = retriever.get_relevant_documents("foo")
#print(result[0].page_content)
pass |