Spaces:
Sleeping
Sleeping
ahmadrocks
commited on
Commit
•
58c6588
1
Parent(s):
80b6635
Update app.py
Browse files
app.py
CHANGED
@@ -1,23 +1,53 @@
|
|
1 |
import streamlit as st
|
2 |
from transformers import GPT2LMHeadModel, GPT2Tokenizer
|
|
|
3 |
|
4 |
# Load the GPT-2 model and tokenizer
|
5 |
model_name = 'gpt2-large'
|
6 |
model = GPT2LMHeadModel.from_pretrained(model_name)
|
7 |
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
|
8 |
|
9 |
-
st.title("Article Generator")
|
10 |
|
11 |
# Input for the article title
|
12 |
title = st.text_input("Enter the title of the article")
|
13 |
|
|
|
|
|
|
|
|
|
14 |
# Generate the article
|
15 |
if st.button("Generate Article"):
|
16 |
if title:
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
else:
|
22 |
st.warning("Please enter a title to generate an article")
|
23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
from transformers import GPT2LMHeadModel, GPT2Tokenizer
|
3 |
+
import torch
|
4 |
|
5 |
# Load the GPT-2 model and tokenizer
|
6 |
model_name = 'gpt2-large'
|
7 |
model = GPT2LMHeadModel.from_pretrained(model_name)
|
8 |
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
|
9 |
|
10 |
+
st.title("AI Article Generator")
|
11 |
|
12 |
# Input for the article title
|
13 |
title = st.text_input("Enter the title of the article")
|
14 |
|
15 |
+
# Parameters for text generation
|
16 |
+
max_length = st.slider("Maximum length of the article", min_value=50, max_value=1000, value=500, step=50)
|
17 |
+
temperature = st.slider("Temperature (creativity level)", min_value=0.7, max_value=1.5, value=1.0, step=0.1)
|
18 |
+
|
19 |
# Generate the article
|
20 |
if st.button("Generate Article"):
|
21 |
if title:
|
22 |
+
with st.spinner("Generating article..."):
|
23 |
+
try:
|
24 |
+
input_ids = tokenizer.encode(title, return_tensors='pt')
|
25 |
+
output = model.generate(
|
26 |
+
input_ids,
|
27 |
+
max_length=max_length,
|
28 |
+
temperature=temperature,
|
29 |
+
num_return_sequences=1,
|
30 |
+
no_repeat_ngram_size=2,
|
31 |
+
early_stopping=True
|
32 |
+
)
|
33 |
+
article = tokenizer.decode(output[0], skip_special_tokens=True)
|
34 |
+
st.success("Article generated successfully!")
|
35 |
+
st.write(article)
|
36 |
+
except Exception as e:
|
37 |
+
st.error(f"An error occurred: {e}")
|
38 |
else:
|
39 |
st.warning("Please enter a title to generate an article")
|
40 |
|
41 |
+
st.markdown("""
|
42 |
+
<style>
|
43 |
+
.reportview-container {
|
44 |
+
flex-direction: column;
|
45 |
+
align-items: center;
|
46 |
+
}
|
47 |
+
.css-1kyxreq {
|
48 |
+
width: 100%;
|
49 |
+
max-width: 700px;
|
50 |
+
margin: auto;
|
51 |
+
}
|
52 |
+
</style>
|
53 |
+
""", unsafe_allow_html=True)
|