sbicy commited on
Commit
60c95e2
·
verified ·
1 Parent(s): ecb3ee9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +75 -0
app.py CHANGED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+ import gradio as gr
4
+
5
+ # Set Device to CPU
6
+ device = torch.device('cpu')
7
+
8
+ # Load the Models
9
+ # Model 1: GPT-2 Medium
10
+ tokenizer1 = AutoTokenizer.from_pretrained('gpt2-medium')
11
+ model1 = AutoModelForCausalLM.from_pretrained('gpt2-medium')
12
+ model1.to(device)
13
+
14
+ # Model 2: GPT-Neo 125M
15
+ tokenizer2 = AutoTokenizer.from_pretrained('EleutherAI/gpt-neo-125M')
16
+ model2 = AutoModelForCausalLM.from_pretrained('EleutherAI/gpt-neo-125M')
17
+ model2.to(device)
18
+
19
+ # Define Text Generation Functions
20
+ def generate_text_model1(prompt, temperature, top_p):
21
+ inputs = tokenizer1(prompt, return_tensors='pt').to(device)
22
+ with torch.no_grad():
23
+ outputs = model1.generate(
24
+ **inputs,
25
+ max_new_tokens=30,
26
+ do_sample=True,
27
+ top_k=50,
28
+ top_p=top_p,
29
+ temperature=temperature
30
+ )
31
+ text = tokenizer1.decode(outputs[0], skip_special_tokens=True)
32
+ return text
33
+
34
+ def generate_text_model2(prompt, temperature, top_p):
35
+ inputs = tokenizer2(prompt, return_tensors='pt').to(device)
36
+ with torch.no_grad():
37
+ outputs = model2.generate(
38
+ **inputs,
39
+ max_new_tokens=30,
40
+ do_sample=True,
41
+ top_k=50,
42
+ top_p=top_p,
43
+ temperature=temperature
44
+ )
45
+ text = tokenizer2.decode(outputs[0], skip_special_tokens=True)
46
+ return text
47
+
48
+ def compare_models(prompt, temperature, top_p):
49
+ output1 = generate_text_model1(prompt, temperature, top_p)
50
+ output2 = generate_text_model2(prompt, temperature, top_p)
51
+
52
+ # Include the parameter settings in the outputs
53
+ output1_with_params = f"**Temperature:** {temperature}, **Top-p:** {top_p}\n\n{output1}"
54
+ output2_with_params = f"**Temperature:** {temperature}, **Top-p:** {top_p}\n\n{output2}"
55
+
56
+ return output1_with_params, output2_with_params
57
+
58
+ # Create Gradio Interface
59
+ iface = gr.Interface(
60
+ fn=compare_models,
61
+ inputs=[
62
+ gr.Textbox(lines=2, placeholder='Enter a prompt here...', label='Prompt'),
63
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.8, label='Temperature'),
64
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.95, label='Top-p')
65
+ ],
66
+ outputs=[
67
+ gr.Markdown(label='GPT-2 Medium Output'),
68
+ gr.Markdown(label='GPT-Neo 125M Output')
69
+ ],
70
+ title='Compare Text Generation Models with Adjustable Parameters',
71
+ description='Enter a prompt and adjust the temperature and top-p parameters to see how they affect the generated text.'
72
+ )
73
+
74
+ if __name__ == "__main__":
75
+ iface.launch()