nagasaich123 commited on
Commit
b78672c
·
verified ·
1 Parent(s): ce26465

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -38
app.py CHANGED
@@ -1,50 +1,49 @@
1
  import os
2
- import requests
3
- from llama_cpp import Llama
4
  import streamlit as st
 
 
5
 
6
- MODEL_URL = "https://huggingface.co/MaziyarPanahi/BioMistral-7B-GGUF/resolve/main/BioMistral-7B.Q4_K_M.gguf"
7
- MODEL_PATH = "BioMistral-7B.Q4_K_M.gguf"
 
8
 
9
- # Streamlit app title
10
  st.set_page_config(page_title="Medical Chatbot")
11
  st.title("🩺 Medical Chatbot using BioMistral-7B")
12
 
13
- # Download model manually if not present
14
- def download_model():
15
- with st.spinner("🔄 Downloading model... please wait (few minutes)..."):
16
- response = requests.get(MODEL_URL, stream=True)
17
- if response.status_code == 200:
18
- with open(MODEL_PATH, "wb") as f:
19
- for chunk in response.iter_content(chunk_size=8192):
20
- if chunk:
21
- f.write(chunk)
22
- else:
23
- st.error(f"Failed to download model. Status: {response.status_code}")
24
- st.stop()
25
-
26
- # Check and download
27
- if not os.path.exists(MODEL_PATH):
28
- download_model()
29
-
30
- # Verify model exists
31
- if not os.path.exists(MODEL_PATH):
32
- st.error("❌ Model file missing after download. Exiting.")
33
- st.stop()
34
 
35
  # Load model
36
  with st.spinner("⚙️ Loading BioMistral model..."):
37
- llm = Llama(
38
- model_path=MODEL_PATH,
39
- n_ctx=4096,
40
- n_threads=8,
41
- n_gpu_layers=35 # Use 0 for CPU-only
42
- )
 
 
 
 
43
 
44
- # UI
45
- query = st.text_input("💬 Enter your medical question:")
46
  if query:
47
- with st.spinner("🧠 Thinking..."):
48
- response = llm(query, max_tokens=512, stop=["</s>"])
49
- st.markdown("**Answer:**")
50
- st.write(response["choices"][0]["text"].strip())
 
 
 
 
 
1
  import os
 
 
2
  import streamlit as st
3
+ from llama_cpp import Llama
4
+ from huggingface_hub import hf_hub_download
5
 
6
+ # Hugging Face repo and model info
7
+ REPO_ID = "MaziyarPanahi/BioMistral-7B-GGUF"
8
+ MODEL_FILENAME = "BioMistral-7B.Q4_K_M.gguf"
9
 
10
+ # Streamlit UI
11
  st.set_page_config(page_title="Medical Chatbot")
12
  st.title("🩺 Medical Chatbot using BioMistral-7B")
13
 
14
+ # Download model via huggingface_hub
15
+ with st.spinner("🔄 Checking/downloading model (this may take a few minutes)..."):
16
+ try:
17
+ model_path = hf_hub_download(
18
+ repo_id=REPO_ID,
19
+ filename=MODEL_FILENAME,
20
+ cache_dir="models" # Optional: use your preferred directory
21
+ )
22
+ except Exception as e:
23
+ st.error(f"❌ Failed to download model: {e}")
24
+ st.stop()
 
 
 
 
 
 
 
 
 
 
25
 
26
  # Load model
27
  with st.spinner("⚙️ Loading BioMistral model..."):
28
+ try:
29
+ llm = Llama(
30
+ model_path=model_path,
31
+ n_ctx=4096,
32
+ n_threads=8,
33
+ n_gpu_layers=35 # Set to 0 for CPU
34
+ )
35
+ except Exception as e:
36
+ st.error(f"❌ Failed to load model: {e}")
37
+ st.stop()
38
 
39
+ # Chat UI
40
+ query = st.text_input("💬 Ask a medical question:")
41
  if query:
42
+ with st.spinner("🧠 Generating answer..."):
43
+ try:
44
+ response = llm(query, max_tokens=512, stop=["</s>"])
45
+ answer = response["choices"][0]["text"].strip()
46
+ st.markdown("**🩺 Answer:**")
47
+ st.write(answer)
48
+ except Exception as e:
49
+ st.error(f"⚠️ Error generating response: {e}")