|
import streamlit as st |
|
from langchain.document_loaders import WikipediaLoader |
|
from langchain.text_splitter import RecursiveCharacterTextSplitter |
|
from langchain.vectorstores import FAISS |
|
from langchain.embeddings import HuggingFaceEmbeddings |
|
from langchain.llms import Ollama |
|
from langchain.chains import RetrievalQA |
|
|
|
|
|
st.set_page_config(page_title="Cultural Bias Explorer") |
|
st.title("π Cultural Bias Explorer in LLMs (RAG + LangChain)") |
|
st.markdown("Explore how answers vary across cultures using region-specific knowledge bases.") |
|
|
|
|
|
region = st.selectbox("Choose Cultural Region:", ["India", "United States"]) |
|
prompt = st.text_input("Enter your question here:") |
|
|
|
|
|
llm = Ollama(model="llama3") |
|
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2") |
|
|
|
|
|
@st.cache_resource |
|
def build_retriever(region_topic): |
|
loader = WikipediaLoader(query=region_topic, load_max_docs=3) |
|
documents = loader.load() |
|
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100) |
|
docs = text_splitter.split_documents(documents) |
|
db = FAISS.from_documents(docs, embeddings) |
|
return db.as_retriever() |
|
|
|
|
|
if st.button("Generate Answer"): |
|
if prompt.strip() == "": |
|
st.warning("Please enter a prompt.") |
|
else: |
|
retriever = build_retriever(region) |
|
qa_chain = RetrievalQA.from_chain_type(llm=llm, retriever=retriever) |
|
with st.spinner("Thinking..."): |
|
answer = qa_chain.run(prompt) |
|
st.markdown("#### β¨ Region-Specific Answer:") |
|
st.success(answer) |
|
|