ibrahimgiki commited on
Commit
9b29191
·
verified ·
1 Parent(s): 919caf6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -8
app.py CHANGED
@@ -1,10 +1,31 @@
1
- import streamlit as st
2
- from transformers import pipeline
3
 
4
- pipe = pipeline('sentiment-analysis')
5
- text = st.text_area('Enter some text')
 
 
 
6
 
7
- if text:
8
- out = pipe(text)
9
- st.json(out)
10
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoModelForCausalLM, AutoTokenizer
2
+ import torch
3
 
4
+ # Load the GPT-2 large model and tokenizer
5
+ model_name = "gpt2-large"
6
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
7
+ # Add padding token to the tokenizer
8
+ tokenizer.pad_token = tokenizer.eos_token # Set padding token to EOS token
9
 
10
+ model = AutoModelForCausalLM.from_pretrained(model_name)
11
+
12
+ # Function to generate a blog post based on a topic title
13
+ def generate_blog(topic_title, max_length=300):
14
+ # Step 1: Encode the input
15
+ inputs = tokenizer.encode_plus(topic_title, return_tensors='pt', padding=True)
16
+ input_ids = inputs['input_ids']
17
+ attention_mask = inputs['attention_mask']
18
+
19
+ # Step 2: Generate model output
20
+ output_ids = model.generate(input_ids, attention_mask=attention_mask, max_length=max_length, num_return_sequences=1, pad_token_id=tokenizer.eos_token_id)
21
+
22
+ # Step 3: Decode the output
23
+ blog_post = tokenizer.decode(output_ids[0], skip_special_tokens=True)
24
+
25
+ return blog_post
26
+
27
+ # Example usage
28
+ topic_title = input("Enter a topic title for the blog post: ")
29
+ blog_post = generate_blog(topic_title)
30
+ print("\nGenerated Blog Post:\n")
31
+ print(blog_post)