skylersterling commited on
Commit
3682142
·
verified ·
1 Parent(s): ce142d1

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -0
app.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from transformers import GPT2Tokenizer, GPT2LMHeadModel
4
+ import os
5
+
6
+ # Get the Hugging Face token from the environment variable
7
+ HF_TOKEN = os.environ.get("HF_TOKEN")
8
+
9
+ # Load the tokenizer and model
10
+ tokenizer = GPT2Tokenizer.from_pretrained('gpt2', use_auth_token=HF_TOKEN)
11
+ model = GPT2LMHeadModel.from_pretrained('skylersterling/SentimentGPT', use_auth_token=HF_TOKEN)
12
+ model.eval()
13
+ model.to('cpu')
14
+
15
+ # Define the function that generates text from a prompt
16
+ def generate_text(prompt, temperature, top_p):
17
+ prompt_with_eos = prompt + " > " # Add the "EOS" to the end of the prompt
18
+ input_tokens = tokenizer.encode(prompt_with_eos, return_tensors='pt')
19
+
20
+ input_tokens = input_tokens.to('cpu')
21
+
22
+ generated_text = prompt_with_eos # Start with the initial prompt plus "EOS"
23
+ prompt_length = len(generated_text)
24
+
25
+ for _ in range(80): # Adjust the range to control the number of tokens generated
26
+ with torch.no_grad():
27
+ outputs = model(input_tokens)
28
+ predictions = outputs.logits[:, -1, :] / temperature
29
+ sorted_logits, sorted_indices = torch.sort(predictions, descending=True)
30
+ cumulative_probs = torch.cumsum(torch.softmax(sorted_logits, dim=-1), dim=-1)
31
+ sorted_indices_to_remove = cumulative_probs > top_p
32
+ sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
33
+ sorted_indices_to_remove[..., 0] = 0
34
+ indices_to_remove = sorted_indices[sorted_indices_to_remove]
35
+ predictions[:, indices_to_remove] = -float('Inf')
36
+ next_token = torch.multinomial(torch.softmax(predictions, dim=-1), 1)
37
+
38
+ input_tokens = torch.cat((input_tokens, next_token), dim=1)
39
+
40
+ decoded_token = tokenizer.decode(next_token.item())
41
+ generated_text += decoded_token # Append the new token to the generated text
42
+ if decoded_token == "#": # Stop if the end of sequence token is generated
43
+ break
44
+ yield generated_text[prompt_length:] # Yield the generated text excluding the initial prompt plus "EOS"
45
+
46
+ # Create a Gradio interface with a text input, sliders for temperature and top_p, and a text output
47
+ interface = gr.Interface(
48
+ fn=generate_text,
49
+ inputs=[
50
+ gr.Textbox(lines=2, placeholder="Enter your prompt here..."),
51
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.3, label="Temperature"),
52
+ gr.Slider(minimum=0.05, maximum=1.0, value=0.3, label="Top-p")
53
+ ],
54
+ outputs=gr.Textbox(),
55
+ live=False,
56
+ description="SentimentGPT processes the sequence and returns a reasonably accurate guess of whether the sentiment behind the input is positive or negative."
57
+ )
58
+
59
+ interface.launch()