Spaces:
Build error
Build error
| import os | |
| import chromadb | |
| import requests | |
| import streamlit as st | |
| from langchain.chains import LLMChain | |
| from langchain.prompts import PromptTemplate | |
| from langchain_groq import ChatGroq | |
| from langchain.document_loaders import PDFPlumberLoader | |
| from langchain_experimental.text_splitter import SemanticChunker | |
| from langchain_huggingface import HuggingFaceEmbeddings | |
| from langchain_chroma import Chroma | |
| from prompts import rag_prompt | |
| # Set API Keys | |
| os.environ["GROQ_API_KEY"] = st.secrets.get("GROQ_API_KEY", "") | |
| # Load LLM models | |
| llm_judge = ChatGroq(model="deepseek-r1-distill-llama-70b") | |
| rag_llm = ChatGroq(model="mixtral-8x7b-32768") | |
| llm_judge.verbose = True | |
| rag_llm.verbose = True | |
| # Clear ChromaDB cache to fix tenant issue | |
| chromadb.api.client.SharedSystemClient.clear_system_cache() | |
| st.title("Blah") | |
| # **Initialize session state variables** | |
| if "pdf_path" not in st.session_state: | |
| st.session_state.pdf_path = None | |
| if "pdf_loaded" not in st.session_state: | |
| st.session_state.pdf_loaded = False | |
| if "chunked" not in st.session_state: | |
| st.session_state.chunked = False | |
| if "vector_created" not in st.session_state: | |
| st.session_state.vector_created = False | |
| if "vector_store_path" not in st.session_state: | |
| st.session_state.vector_store_path = "./chroma_langchain_db" | |
| if "vector_store" not in st.session_state: | |
| st.session_state.vector_store = None | |
| if "documents" not in st.session_state: | |
| st.session_state.documents = None | |
| # Step 1: Choose PDF Source | |
| pdf_source = st.radio("Upload or provide a link to a PDF:", ["Upload a PDF file", "Enter a PDF URL"], index=0, horizontal=True) | |
| if pdf_source == "Upload a PDF file": | |
| uploaded_file = st.file_uploader("Upload your PDF file", type="pdf") | |
| if uploaded_file: | |
| st.session_state.pdf_path = "temp.pdf" | |
| with open(st.session_state.pdf_path, "wb") as f: | |
| f.write(uploaded_file.getbuffer()) | |
| st.session_state.pdf_loaded = False | |
| st.session_state.chunked = False | |
| st.session_state.vector_created = False | |
| elif pdf_source == "Enter a PDF URL": | |
| pdf_url = st.text_input("Enter PDF URL:", value="https://arxiv.org/pdf/2406.06998") | |
| if pdf_url and not st.session_state.get("pdf_loaded", False): | |
| with st.spinner("Downloading PDF..."): | |
| try: | |
| response = requests.get(pdf_url) | |
| if response.status_code == 200: | |
| st.session_state.pdf_path = "temp.pdf" | |
| with open(st.session_state.pdf_path, "wb") as f: | |
| f.write(response.content) | |
| st.session_state.pdf_loaded = False | |
| st.session_state.chunked = False | |
| st.session_state.vector_created = False | |
| st.success("β PDF Downloaded Successfully!") | |
| else: | |
| st.error("β Failed to download PDF. Check the URL.") | |
| except Exception as e: | |
| st.error(f"Error downloading PDF: {e}") | |
| # Step 2: Process PDF | |
| if st.session_state.pdf_path and not st.session_state.get("pdf_loaded", False): | |
| with st.spinner("Loading and processing PDF..."): | |
| loader = PDFPlumberLoader(st.session_state.pdf_path) | |
| docs = loader.load() | |
| st.session_state.documents = docs | |
| st.session_state.pdf_loaded = True # β Prevent re-loading | |
| st.success(f"β **PDF Loaded!** Total Pages: {len(docs)}") | |
| # Step 3: Chunking | |
| if st.session_state.get("pdf_loaded", False) and not st.session_state.get("chunked", False): | |
| with st.spinner("Chunking the document..."): | |
| model_name = "nomic-ai/modernbert-embed-base" | |
| embedding_model = HuggingFaceEmbeddings(model_name=model_name, model_kwargs={'device': 'cpu'}, encode_kwargs={'normalize_embeddings': False}) | |
| text_splitter = SemanticChunker(embedding_model) | |
| documents = text_splitter.split_documents(st.session_state.documents) | |
| st.session_state.documents = documents # β Store chunked docs | |
| st.session_state.chunked = True # β Prevent re-chunking | |
| st.success(f"β **Document Chunked!** Total Chunks: {len(documents)}") | |
| # Step 4: Setup Vectorstore | |
| if st.session_state.get("chunked", False) and not st.session_state.get("vector_created", False): | |
| with st.spinner("Creating vector store..."): | |
| model_name = "nomic-ai/modernbert-embed-base" | |
| embedding_model = HuggingFaceEmbeddings(model_name=model_name, model_kwargs={'device': 'cpu'}, encode_kwargs={'normalize_embeddings': False}) | |
| vector_store = Chroma( | |
| collection_name="deepseek_collection", | |
| collection_metadata={"hnsw:space": "cosine"}, | |
| embedding_function=embedding_model, | |
| persist_directory=st.session_state.vector_store_path | |
| ) | |
| vector_store.add_documents(st.session_state.documents) | |
| num_documents = len(vector_store.get()["documents"]) | |
| st.session_state.vector_store = vector_store | |
| st.session_state.vector_created = True # β Prevent re-creating vector store | |
| st.success(f"β **Vector Store Created!** Total documents stored: {num_documents}") | |
| # Step 5: Query Input (this should not trigger previous steps!) | |
| if st.session_state.get("vector_created", False) and st.session_state.get("vector_store", None): | |
| query = st.text_input("π Enter a Query:") | |
| if query: | |
| with st.spinner("Retrieving relevant contexts..."): | |
| retriever = st.session_state.vector_store.as_retriever(search_type="similarity", search_kwargs={"k": 5}) | |
| contexts = retriever.invoke(query) | |
| context_texts = [doc.page_content for doc in contexts] | |
| st.success(f"β **Retrieved {len(context_texts)} Contexts!**") | |
| for i, text in enumerate(context_texts, 1): | |
| st.write(f"**Context {i}:** {text[:500]}...") | |
| # **Step 6: Generate Final Response** | |
| with st.spinner("Generating the final answer..."): | |
| final_prompt = PromptTemplate(input_variables=["query", "context"], template=rag_prompt) | |
| response_chain = LLMChain(llm=rag_llm, prompt=final_prompt, output_key="final_response") | |
| final_response = response_chain.invoke({"query": query, "context": context_texts}) | |
| st.subheader("π₯ RAG Final Response") | |
| st.success(final_response['final_response']) |