File size: 1,734 Bytes
91f167f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
import streamlit as st
from langchain.document_loaders import WikipediaLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import FAISS
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.llms import Ollama # You can change to ChatOpenAI or DeepSeek
from langchain.chains import RetrievalQA
# Title
st.set_page_config(page_title="Cultural Bias Explorer")
st.title("π Cultural Bias Explorer in LLMs (RAG + LangChain)")
st.markdown("Explore how answers vary across cultures using region-specific knowledge bases.")
# Inputs
region = st.selectbox("Choose Cultural Region:", ["India", "United States"])
prompt = st.text_input("Enter your question here:")
# Load LLM
llm = Ollama(model="llama3") # You can change to "deepseek-chat" if available
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
# Function to build retriever
@st.cache_resource
def build_retriever(region_topic):
loader = WikipediaLoader(query=region_topic, load_max_docs=3)
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100)
docs = text_splitter.split_documents(documents)
db = FAISS.from_documents(docs, embeddings)
return db.as_retriever()
# Run RAG if prompt submitted
if st.button("Generate Answer"):
if prompt.strip() == "":
st.warning("Please enter a prompt.")
else:
retriever = build_retriever(region)
qa_chain = RetrievalQA.from_chain_type(llm=llm, retriever=retriever)
with st.spinner("Thinking..."):
answer = qa_chain.run(prompt)
st.markdown("#### β¨ Region-Specific Answer:")
st.success(answer)
|