abdulllah01 commited on
Commit
f9655e5
1 Parent(s): 594f70f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -8
app.py CHANGED
@@ -1,12 +1,25 @@
1
- from google.colab import drive
2
- from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
-
4
- # Mount Google Drive
5
- drive.mount('/content/drive')
6
 
7
- # Path to the saved model on Google Drive
8
- model_path = '/content/drive/My Drive/T5_samsum'
9
 
10
- # Load the tokenizer and model from the specified path
 
11
  tokenizer = AutoTokenizer.from_pretrained(model_path)
12
  model = AutoModelForSeq2SeqLM.from_pretrained(model_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
 
2
+ import streamlit as st
3
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
4
 
5
+ # Load the tokenizer and model from the Hugging Face Model Hub
6
+ model_path = 'abdulllah01/mt5-Summarizer-FineTuned'
7
  tokenizer = AutoTokenizer.from_pretrained(model_path)
8
  model = AutoModelForSeq2SeqLM.from_pretrained(model_path)
9
+
10
+ # Streamlit app
11
+ st.title("Summarization App")
12
+ st.write("This app summarizes text using a fine-tuned T5 model.")
13
+
14
+ # User input
15
+ user_input = st.text_area("Enter text to summarize", "")
16
+
17
+ if st.button("Summarize"):
18
+ if user_input:
19
+ inputs = tokenizer.encode("summarize: " + user_input, return_tensors="pt", max_length=512, truncation=True)
20
+ summary_ids = model.generate(inputs, max_length=150, min_length=40, length_penalty=2.0, num_beams=4, early_stopping=True)
21
+ summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
22
+ st.write("Summary:")
23
+ st.write(summary)
24
+ else:
25
+ st.write("Please enter some text to summarize.")