import streamlit as st from transformers import AutoTokenizer, AutoModelForCausalLM st.title("๐ŸŒ™ Noor-e-Hidayat โ€“ Islamic AI Chatbot") # Load model model_id = "Ellbendls/Qwen3-4b-Quran-LoRA-Fine-Tuned" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) # Input user_input = st.text_input("๐Ÿ•Š๏ธ Ask a question based on Qurโ€™an or Hadith:") if user_input: st.write("๐Ÿค– Generating answer...") prompt = f"Answer the following with Qurโ€™an-based reasoning:\nQuestion: {user_input}\nAnswer:" inputs = tokenizer(prompt, return_tensors="pt", truncation=True) outputs = model.generate(**inputs, max_new_tokens=300) response = tokenizer.decode(outputs[0], skip_special_tokens=True) st.markdown("### ๐Ÿ“œ Answer:") st.write(response)