abdullahzunorain commited on
Commit
68841b4
·
verified ·
1 Parent(s): 2eb66cf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -35
app.py CHANGED
@@ -1,36 +1,48 @@
 
 
1
  import gradio as gr
2
- from transformers import pipeline
3
- import os
4
-
5
- # Set your Hugging Face API key
6
- os.environ["HUGGING_FACE_HUB_TOKEN"] = "your_huggingface_api_key_here"
7
-
8
- # Load the model using Hugging Face's Inference API
9
- model_name = "kingabzpro/Gemma-2-9b-it-chat-doctor"
10
- generator = pipeline("text-generation", model=model_name, use_auth_token=True)
11
-
12
- # Define the response generation function
13
- def generate_response(user_input):
14
- # Generate response using the Hugging Face API
15
- response = generator(user_input, max_length=500, do_sample=True, top_p=0.9, temperature=0.8)
16
- return response[0]["generated_text"]
17
-
18
- # Define Gradio interface for the chatbot
19
- with gr.Blocks() as healthcare_chatbot:
20
- gr.Markdown("<h1>Healthcare Chatbot</h1><p>Ask any healthcare-related questions!</p>")
21
-
22
- chatbox = gr.Chatbot()
23
- msg = gr.Textbox(placeholder="Type your healthcare question here...")
24
- submit_button = gr.Button("Ask")
25
-
26
- # Define the chatbot interaction function
27
- def respond(chat_history, message):
28
- response = generate_response(message)
29
- chat_history.append((message, response))
30
- return chat_history
31
-
32
- # Set up the components for interaction
33
- submit_button.click(respond, [chatbox, msg], chatbox)
34
-
35
- # Launch the Gradio interface
36
- healthcare_chatbot.launch()
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py
2
+
3
  import gradio as gr
4
+ from transformers import AutoTokenizer, AutoModelForMaskedLM, pipeline
5
+ import torch
6
+
7
+ # Step 1: Set up Bio_ClinicalBERT model for medical text generation
8
+ device = 0 if torch.cuda.is_available() else -1 # Set to 0 for GPU, -1 for CPU
9
+ model_name = "emilyalsentzer/Bio_ClinicalBERT" # Bio_ClinicalBERT model for medical context
10
+
11
+ # Load tokenizer and model
12
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
13
+ model = AutoModelForMaskedLM.from_pretrained(model_name)
14
+
15
+ # Create a text generation pipeline with the loaded model
16
+ chatbot = pipeline(
17
+ "text-generation",
18
+ model=model,
19
+ tokenizer=tokenizer,
20
+ device=device,
21
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32
22
+ )
23
+
24
+ # Step 2: Function to generate chatbot responses
25
+ def get_response(user_input):
26
+ response = chatbot(
27
+ user_input,
28
+ max_length=75, # Adjusted to manage memory usage
29
+ num_return_sequences=1,
30
+ truncation=True
31
+ )
32
+ return response[0]['generated_text']
33
+
34
+ # Step 3: Create Gradio interface
35
+ def chatbot_interface(user_input):
36
+ return get_response(user_input)
37
+
38
+ iface = gr.Interface(
39
+ fn=chatbot_interface,
40
+ inputs=gr.Textbox(lines=2, placeholder="Enter your symptoms here..."),
41
+ outputs="text",
42
+ title="Health Chatbot",
43
+ description="Ask your symptoms and get advice!",
44
+ theme="default"
45
+ )
46
+
47
+ # Step 4: Launch the Gradio app
48
+ iface.launch()