Spaces:
Sleeping
Sleeping
# app.py | |
import gradio as gr | |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline | |
import torch | |
# Step 1: Set up BioGPT model for medical text generation | |
device = 0 if torch.cuda.is_available() else -1 # Set to 0 for GPU, -1 for CPU | |
model_name = "microsoft/BioGPT" # Use BioGPT model for medical context | |
# Load tokenizer and model | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForCausalLM.from_pretrained(model_name) | |
# Create a text generation pipeline with the loaded model | |
chatbot = pipeline( | |
"text-generation", | |
model=model, | |
tokenizer=tokenizer, | |
device=device, | |
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32 | |
) | |
# Step 2: Function to generate chatbot responses | |
def get_response(user_input): | |
response = chatbot( | |
user_input, | |
max_length=75, # Adjusted to manage memory usage | |
num_return_sequences=1, | |
truncation=True | |
) | |
return response[0]['generated_text'] | |
# Step 3: Create Gradio interface | |
def chatbot_interface(user_input): | |
return get_response(user_input) | |
iface = gr.Interface( | |
fn=chatbot_interface, | |
inputs=gr.Textbox(lines=2, placeholder="Enter your symptoms here..."), | |
outputs="text", | |
title="Health Chatbot", | |
description="Ask your symptoms and get advice!", | |
theme="default" | |
) | |
# Step 4: Launch the Gradio app | |
iface.launch(share=True) # Enable sharing | |