abdullahzunorain
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -1,16 +1,16 @@
|
|
1 |
# app.py
|
2 |
|
3 |
import gradio as gr
|
4 |
-
from transformers import AutoTokenizer,
|
5 |
import torch
|
6 |
|
7 |
-
# Step 1: Set up
|
8 |
device = 0 if torch.cuda.is_available() else -1 # Set to 0 for GPU, -1 for CPU
|
9 |
-
model_name = "
|
10 |
|
11 |
# Load tokenizer and model
|
12 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
13 |
-
model =
|
14 |
|
15 |
# Create a text generation pipeline with the loaded model
|
16 |
chatbot = pipeline(
|
@@ -45,4 +45,4 @@ iface = gr.Interface(
|
|
45 |
)
|
46 |
|
47 |
# Step 4: Launch the Gradio app
|
48 |
-
iface.launch()
|
|
|
1 |
# app.py
|
2 |
|
3 |
import gradio as gr
|
4 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
5 |
import torch
|
6 |
|
7 |
+
# Step 1: Set up BioGPT model for medical text generation
|
8 |
device = 0 if torch.cuda.is_available() else -1 # Set to 0 for GPU, -1 for CPU
|
9 |
+
model_name = "microsoft/BioGPT" # Use BioGPT model for medical context
|
10 |
|
11 |
# Load tokenizer and model
|
12 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
13 |
+
model = AutoModelForCausalLM.from_pretrained(model_name)
|
14 |
|
15 |
# Create a text generation pipeline with the loaded model
|
16 |
chatbot = pipeline(
|
|
|
45 |
)
|
46 |
|
47 |
# Step 4: Launch the Gradio app
|
48 |
+
iface.launch(share=True) # Enable sharing
|