Spaces:
Sleeping
Sleeping
Upload 7 files
Browse files- .gitattributes +1 -0
- app.py +67 -0
- chain_setup.py +45 -0
- embedding.py +11 -0
- faiss_index/index.faiss +3 -0
- faiss_index/index.pkl +3 -0
- requirements.txt +16 -0
- vectorstore.py +30 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
faiss_index/index.faiss filter=lfs diff=lfs merge=lfs -text
|
app.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import os
|
3 |
+
|
4 |
+
from embedding import load_embeddings
|
5 |
+
from vectorstore import load_or_build_vectorstore
|
6 |
+
from chain_setup import build_conversational_chain
|
7 |
+
|
8 |
+
def main():
|
9 |
+
st.title("💬 المحادثة التفاعلية - إدارة البيانات وحماية البيانات الشخصية")
|
10 |
+
|
11 |
+
# Apply RTL custom CSS for right-to-left text alignment
|
12 |
+
st.markdown(
|
13 |
+
"""
|
14 |
+
<style>
|
15 |
+
.rtl {
|
16 |
+
direction: rtl;
|
17 |
+
text-align: right;
|
18 |
+
}
|
19 |
+
</style>
|
20 |
+
""",
|
21 |
+
unsafe_allow_html=True
|
22 |
+
)
|
23 |
+
|
24 |
+
# Paths and constants
|
25 |
+
local_file = "Policies001.pdf"
|
26 |
+
index_folder = "faiss_index"
|
27 |
+
|
28 |
+
# Step 1: Load Arabic Embeddings
|
29 |
+
embeddings = load_embeddings()
|
30 |
+
|
31 |
+
# Step 2: Build or load the VectorStore
|
32 |
+
vectorstore = load_or_build_vectorstore(local_file, index_folder, embeddings)
|
33 |
+
|
34 |
+
# Step 3: Build the Conversational Retrieval Chain
|
35 |
+
qa_chain = build_conversational_chain(vectorstore)
|
36 |
+
|
37 |
+
# Step 4: Session State for UI Chat
|
38 |
+
if "messages" not in st.session_state:
|
39 |
+
st.session_state["messages"] = [
|
40 |
+
{"role": "assistant", "content": "👋 مرحبًا! اسألني أي شيء عن إدارة البيانات وحماية البيانات الشخصية!"}
|
41 |
+
]
|
42 |
+
|
43 |
+
# Display existing messages with RTL styling
|
44 |
+
for msg in st.session_state["messages"]:
|
45 |
+
with st.chat_message(msg["role"]):
|
46 |
+
st.markdown(f'<div class="rtl">{msg["content"]}</div>', unsafe_allow_html=True)
|
47 |
+
|
48 |
+
# Step 5: Chat Input
|
49 |
+
user_input = st.chat_input("اكتب سؤالك هنا")
|
50 |
+
|
51 |
+
if user_input:
|
52 |
+
# a) Display user message
|
53 |
+
st.session_state["messages"].append({"role": "user", "content": user_input})
|
54 |
+
with st.chat_message("user"):
|
55 |
+
st.markdown(f'<div class="rtl">{user_input}</div>', unsafe_allow_html=True)
|
56 |
+
|
57 |
+
# b) Run chain
|
58 |
+
response = qa_chain({"question": user_input})
|
59 |
+
answer = response["answer"]
|
60 |
+
|
61 |
+
# c) Display assistant response
|
62 |
+
st.session_state["messages"].append({"role": "assistant", "content": answer})
|
63 |
+
with st.chat_message("assistant"):
|
64 |
+
st.markdown(f'<div class="rtl">{answer}</div>', unsafe_allow_html=True)
|
65 |
+
|
66 |
+
if __name__ == "__main__":
|
67 |
+
main()
|
chain_setup.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from huggingface_hub import hf_hub_download
|
2 |
+
from langchain.llms import LlamaCpp
|
3 |
+
from langchain.chains import ConversationalRetrievalChain
|
4 |
+
from langchain.memory import ConversationBufferMemory
|
5 |
+
|
6 |
+
def load_llm():
|
7 |
+
"""
|
8 |
+
Downloads the GGUF model for Arabic and loads it via llama-cpp.
|
9 |
+
"""
|
10 |
+
model_file = hf_hub_download(
|
11 |
+
repo_id="Moe98/cohere-r7b-arabic-02-2025-GGUF",
|
12 |
+
filename="cohere-r7b-02-2025-Q8_0.gguf",
|
13 |
+
local_dir="./models",
|
14 |
+
local_dir_use_symlinks=False
|
15 |
+
)
|
16 |
+
|
17 |
+
llm = LlamaCpp(
|
18 |
+
model_path=model_file,
|
19 |
+
flash_attn=False,
|
20 |
+
n_ctx=2048,
|
21 |
+
n_batch=512,
|
22 |
+
chat_format="chatml"
|
23 |
+
)
|
24 |
+
return llm
|
25 |
+
|
26 |
+
def build_conversational_chain(vectorstore):
|
27 |
+
"""
|
28 |
+
Creates a ConversationalRetrievalChain using the local llama-cpp-based LLM
|
29 |
+
and a ConversationBufferMemory for multi-turn Q&A.
|
30 |
+
"""
|
31 |
+
llm = load_llm()
|
32 |
+
|
33 |
+
memory = ConversationBufferMemory(
|
34 |
+
memory_key="chat_history",
|
35 |
+
return_messages=True
|
36 |
+
)
|
37 |
+
|
38 |
+
qa_chain = ConversationalRetrievalChain.from_llm(
|
39 |
+
llm=llm,
|
40 |
+
retriever=vectorstore.as_retriever(search_type="similarity", search_kwargs={"k": 5}),
|
41 |
+
memory=memory,
|
42 |
+
verbose=True
|
43 |
+
)
|
44 |
+
|
45 |
+
return qa_chain
|
embedding.py
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_huggingface import HuggingFaceEmbeddings
|
2 |
+
|
3 |
+
def load_embeddings():
|
4 |
+
"""
|
5 |
+
Returns a HuggingFaceEmbeddings instance for Arabic.
|
6 |
+
"""
|
7 |
+
embeddings = HuggingFaceEmbeddings(
|
8 |
+
model_name="CAMeL-Lab/bert-base-arabic-camelbert-mix",
|
9 |
+
model_kwargs={"trust_remote_code": True}
|
10 |
+
)
|
11 |
+
return embeddings
|
faiss_index/index.faiss
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:126eb6b6953e61d0a121fb87a2856d2a716f75c5f7f6a60e3d6c05bb95d0c836
|
3 |
+
size 1520685
|
faiss_index/index.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0a1548b887e6e79289fe03498c29322fac5030208daf71270666cca2e0293d19
|
3 |
+
size 277539
|
requirements.txt
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
streamlit==1.25.0
|
2 |
+
langchain
|
3 |
+
langchain_community
|
4 |
+
torch
|
5 |
+
transformers
|
6 |
+
sentence-transformers
|
7 |
+
accelerate
|
8 |
+
pypdf
|
9 |
+
ollama
|
10 |
+
langchain_experimental
|
11 |
+
faiss-cpu
|
12 |
+
langchain_huggingface
|
13 |
+
einops
|
14 |
+
huggingface-hub
|
15 |
+
sentencepiece
|
16 |
+
llama-cpp-python
|
vectorstore.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from langchain_community.document_loaders import PyPDFLoader
|
3 |
+
from langchain_experimental.text_splitter import SemanticChunker
|
4 |
+
from langchain_community.vectorstores import FAISS
|
5 |
+
|
6 |
+
def load_or_build_vectorstore(local_file: str, index_folder: str, embeddings):
|
7 |
+
"""
|
8 |
+
Loads a local FAISS index if it exists; otherwise,
|
9 |
+
builds a new index from the specified PDF file.
|
10 |
+
"""
|
11 |
+
if os.path.exists(index_folder):
|
12 |
+
print("Loading existing FAISS index from disk...")
|
13 |
+
vectorstore = FAISS.load_local(index_folder, embeddings, allow_dangerous_deserialization=True)
|
14 |
+
else:
|
15 |
+
print("Building a new FAISS index...")
|
16 |
+
loader = PyPDFLoader(local_file)
|
17 |
+
documents = loader.load()
|
18 |
+
|
19 |
+
text_splitter = SemanticChunker(
|
20 |
+
embeddings=embeddings,
|
21 |
+
breakpoint_threshold_type='percentile',
|
22 |
+
breakpoint_threshold_amount=90
|
23 |
+
)
|
24 |
+
chunked_docs = text_splitter.split_documents(documents)
|
25 |
+
print(f"Document split into {len(chunked_docs)} chunks.")
|
26 |
+
|
27 |
+
vectorstore = FAISS.from_documents(chunked_docs, embeddings)
|
28 |
+
vectorstore.save_local(index_folder)
|
29 |
+
|
30 |
+
return vectorstore
|