abdullahzunorain commited on
Commit
39ac574
·
verified ·
1 Parent(s): 68841b4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -1,16 +1,16 @@
1
  # app.py
2
 
3
  import gradio as gr
4
- from transformers import AutoTokenizer, AutoModelForMaskedLM, pipeline
5
  import torch
6
 
7
- # Step 1: Set up Bio_ClinicalBERT model for medical text generation
8
  device = 0 if torch.cuda.is_available() else -1 # Set to 0 for GPU, -1 for CPU
9
- model_name = "emilyalsentzer/Bio_ClinicalBERT" # Bio_ClinicalBERT model for medical context
10
 
11
  # Load tokenizer and model
12
  tokenizer = AutoTokenizer.from_pretrained(model_name)
13
- model = AutoModelForMaskedLM.from_pretrained(model_name)
14
 
15
  # Create a text generation pipeline with the loaded model
16
  chatbot = pipeline(
@@ -45,4 +45,4 @@ iface = gr.Interface(
45
  )
46
 
47
  # Step 4: Launch the Gradio app
48
- iface.launch()
 
1
  # app.py
2
 
3
  import gradio as gr
4
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
5
  import torch
6
 
7
+ # Step 1: Set up BioGPT model for medical text generation
8
  device = 0 if torch.cuda.is_available() else -1 # Set to 0 for GPU, -1 for CPU
9
+ model_name = "microsoft/BioGPT" # Use BioGPT model for medical context
10
 
11
  # Load tokenizer and model
12
  tokenizer = AutoTokenizer.from_pretrained(model_name)
13
+ model = AutoModelForCausalLM.from_pretrained(model_name)
14
 
15
  # Create a text generation pipeline with the loaded model
16
  chatbot = pipeline(
 
45
  )
46
 
47
  # Step 4: Launch the Gradio app
48
+ iface.launch(share=True) # Enable sharing