Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
import torch | |
# Setup | |
st.set_page_config(page_title="Noor-e-Hidayat 🌙", layout="centered") | |
st.markdown("<h1 style='text-align: center;'>🌙 Noor-e-Hidayat – Islamic Chatbot</h1>", unsafe_allow_html=True) | |
st.markdown("Ask anything based on the Qur’an. This assistant replies gently, spiritually, and with reference.") | |
# Load model | |
model_id = "llm-soda/quran-qa-phi-2" | |
tokenizer = AutoTokenizer.from_pretrained(model_id) | |
model = AutoModelForCausalLM.from_pretrained(model_id) | |
# Chat history in session | |
if "messages" not in st.session_state: | |
st.session_state.messages = [] | |
# Chat input box | |
user_input = st.chat_input("Type your question about Islam...") | |
if user_input: | |
# Save user message | |
st.session_state.messages.append({"role": "user", "content": user_input}) | |
# Generate bot reply | |
with st.spinner("Answering with Qur’an wisdom..."): | |
prompt = f"Answer the following Islamic question with Qur’an-based reasoning and reference:\nQuestion: {user_input}\nAnswer:" | |
inputs = tokenizer(prompt, return_tensors="pt", truncation=True).to("cuda" if torch.cuda.is_available() else "cpu") | |
outputs = model.generate(**inputs, max_new_tokens=300) | |
response = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
response = response.replace(prompt, "").strip() | |
# Save bot message | |
st.session_state.messages.append({"role": "bot", "content": response}) | |
# Display chat messages like ChatGPT | |
for msg in st.session_state.messages: | |
if msg["role"] == "user": | |
with st.chat_message("user"): | |
st.markdown(msg["content"]) | |
else: | |
with st.chat_message("assistant"): | |
st.markdown(msg["content"]) |