TruLens / app.py
eaglelandsonce's picture
Update app.py
bac8c6c verified
import os
api_key = os.environ["OPENAI_API_KEY"]
import streamlit as st
from openai import OpenAI
import numpy as np
# Assuming chromadb and TruLens are correctly installed and configured
import chromadb
from chromadb.utils.embedding_functions import OpenAIEmbeddingFunction
from trulens_eval import Tru, Feedback, Select, TruCustomApp
from trulens_eval.feedback import Groundedness
from trulens_eval.feedback.provider.openai import OpenAI as fOpenAI
from trulens_eval import Tru
from trulens_eval.tru_custom_app import instrument
tru = Tru()
# Define university information
university_info = """
The University of Washington, founded in 1861 in Seattle, is a public research university
with over 45,000 students across three campuses in Seattle, Tacoma, and Bothell.
As the flagship institution of the six public universities in Washington state,
UW encompasses over 500 buildings and 20 million square feet of space,
including one of the largest library systems in the world.
"""
# Initialize OpenAI client and create embeddings
oai_client = OpenAI()
oai_client.embeddings.create(
model="text-embedding-ada-002",
input=university_info
)
# Set up ChromaDB and embedding function
embedding_function = OpenAIEmbeddingFunction(api_key=api_key,
model_name="text-embedding-ada-002")
chroma_client = chromadb.Client()
vector_store = chroma_client.get_or_create_collection(name="Universities",
embedding_function=embedding_function)
vector_store.add("uni_info", documents=university_info)
# Define RAG_from_scratch class
class RAG_from_scratch:
@instrument
def retrieve(self, query: str) -> list:
results = vector_store.query(
query_texts=query,
n_results=2
)
return results['documents'][0]
@instrument
def generate_completion(self, query: str, context_str: list) -> str:
completion = oai_client.chat.completions.create(
model="gpt-3.5-turbo",
temperature=0,
messages=[
{"role": "user", "content":
f"We have provided context information below. \n"
f"---------------------\n"
f"{context_str}"
f"\n---------------------\n"
f"Given this information, please answer the question: {query}"}
]
).choices[0].message.content
return completion
@instrument
def query(self, query: str) -> str:
context_str = self.retrieve(query)
completion = self.generate_completion(query, context_str)
return completion
rag = RAG_from_scratch()
# Initialize feedback and evaluation mechanisms
fopenai = fOpenAI()
grounded = Groundedness(groundedness_provider=fopenai)
f_groundedness = (
Feedback(grounded.groundedness_measure_with_cot_reasons, name="Groundedness")
.on(Select.RecordCalls.retrieve.rets.collect())
.on_output()
.aggregate(grounded.grounded_statements_aggregator)
)
f_qa_relevance = (
Feedback(fopenai.relevance_with_cot_reasons, name="Answer Relevance")
.on(Select.RecordCalls.retrieve.args.query)
.on_output()
)
f_context_relevance = (
Feedback(fopenai.qs_relevance_with_cot_reasons, name="Context Relevance")
.on(Select.RecordCalls.retrieve.args.query)
.on(Select.RecordCalls.retrieve.rets.collect())
.aggregate(np.mean)
)
tru_rag = TruCustomApp(rag,
app_id='RAG v1',
feedbacks=[f_groundedness, f_qa_relevance, f_context_relevance])
# tru.run_dashboard()
# Streamlit interface
st.title("TruLens Query with Feedback")
query = st.text_input("Ask a question about the University of Washington:")
"""
Example: When was the University of Washington founded?
"""
if st.button("Submit"):
with st.spinner('Searching for information...'):
with tru_rag as recording:
answer = rag.query(query)
final_tru = tru.get_leaderboard(app_ids=["RAG v1"])
st.write(answer)
# Display feedback metrics (mockup, adjust based on your implementation)
st.subheader("Feedback Metrics")
st.write(final_tru)
"""
The RAG Triad
RAGs have become the standard architecture for providing LLMs with context in order to avoid hallucinations. However even RAGs can suffer from hallucination, as is often the case when the retrieval fails to retrieve sufficient context or even retrieves irrelevant context that is then weaved into the LLM’s response.
TruEra has innovated the RAG triad to evaluate for hallucinations along each edge of the RAG architecture, shown below:
"""
st.image('triad.png', caption='Triad Image')
"""
RAG Triad
The RAG triad is made up of 3 evaluations: context relevance, groundedness and answer relevance. Satisfactory evaluations on each provides us confidence that our LLM app is free form hallucination.
Context Relevance
The first step of any RAG application is retrieval; to verify the quality of our retrieval, we want to make sure that each chunk of context is relevant to the input query. This is critical because this context will be used by the LLM to form an answer, so any irrelevant information in the context could be weaved into a hallucination. TruLens enables you to evaluate context relevance by using the structure of the serialized record.
Groundedness
After the context is retrieved, it is then formed into an answer by an LLM. LLMs are often prone to stray from the facts provided, exaggerating or expanding to a correct-sounding answer. To verify the groundedness of our application, we can separate the response into individual claims and independently search for evidence that supports each within the retrieved context.
Answer Relevance
Last, our response still needs to helpfully answer the original question. We can verify this by evaluating the relevance of the final response to the user input.
Putting it together
By reaching satisfactory evaluations for this triad, we can make a nuanced statement about our application’s correctness; our application is verified to be hallucination free up to the limit of its knowledge base. In other words, if the vector database contains only accurate information, then the answers provided by the RAG are also accurate.
"""