tt
Browse files
app.py
CHANGED
@@ -1,15 +1,21 @@
|
|
1 |
-
import gradio as gr
|
2 |
import torch
|
3 |
-
import
|
4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
)
|
12 |
-
model.eval()
|
13 |
|
14 |
def blend_generate(prompt, wa, wb):
|
15 |
input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(device)
|
@@ -24,35 +30,24 @@ def blend_generate(prompt, wa, wb):
|
|
24 |
# Weighted sum of raw logits (before softmax)
|
25 |
blended_logits = wa * logits_a + wb * logits_b
|
26 |
|
27 |
-
#
|
28 |
probs = torch.softmax(blended_logits, dim=-1)
|
29 |
|
30 |
-
# Sample token from
|
31 |
token = torch.multinomial(probs, 1)
|
32 |
next_token_id = token.item()
|
33 |
-
next_token = tokenizer.decode([next_token_id])
|
34 |
|
35 |
-
|
|
|
36 |
|
|
|
37 |
with gr.Blocks() as demo:
|
38 |
-
gr.
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
wB = gr.Slider(-5, 5, value=1.0, step=0.1, label="Weight B")
|
43 |
-
user_msg = gr.Textbox(label="User Message")
|
44 |
-
temp = gr.Slider(0.1, 2.0, value=1.0, step=0.1, label="Temperature")
|
45 |
-
top_p = gr.Slider(0.1, 1.0, value=0.9, step=0.05, label="Top-p")
|
46 |
-
max_tokens = gr.Slider(1, 200, value=100, step=1, label="Max New Tokens")
|
47 |
-
output = gr.Textbox(label="Response")
|
48 |
|
49 |
btn = gr.Button("Generate")
|
50 |
-
btn.click(
|
51 |
-
|
52 |
-
|
53 |
-
output,
|
54 |
-
show_progress=True,
|
55 |
-
)
|
56 |
-
|
57 |
-
if __name__ == "__main__":
|
58 |
-
demo.launch()
|
|
|
|
|
1 |
import torch
|
2 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
+
import gradio as gr
|
4 |
+
|
5 |
+
# Set device: GPU if available, else CPU
|
6 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
7 |
+
|
8 |
+
# Load two small models and their tokenizer (you can replace these with your models)
|
9 |
+
model_name_a = "distilgpt2"
|
10 |
+
model_name_b = "sshleifer/tiny-gpt2" # very small GPT2 variant for demo
|
11 |
+
|
12 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name_a)
|
13 |
|
14 |
+
model_a = AutoModelForCausalLM.from_pretrained(model_name_a).to(device)
|
15 |
+
model_b = AutoModelForCausalLM.from_pretrained(model_name_b).to(device)
|
16 |
+
|
17 |
+
model_a.eval()
|
18 |
+
model_b.eval()
|
|
|
|
|
19 |
|
20 |
def blend_generate(prompt, wa, wb):
|
21 |
input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(device)
|
|
|
30 |
# Weighted sum of raw logits (before softmax)
|
31 |
blended_logits = wa * logits_a + wb * logits_b
|
32 |
|
33 |
+
# Softmax to get probabilities
|
34 |
probs = torch.softmax(blended_logits, dim=-1)
|
35 |
|
36 |
+
# Sample one token from the blended distribution
|
37 |
token = torch.multinomial(probs, 1)
|
38 |
next_token_id = token.item()
|
|
|
39 |
|
40 |
+
next_token = tokenizer.decode([next_token_id])
|
41 |
+
return prompt + next_token
|
42 |
|
43 |
+
# Gradio UI
|
44 |
with gr.Blocks() as demo:
|
45 |
+
prompt_input = gr.Textbox(label="Prompt", lines=2)
|
46 |
+
weight_a = gr.Slider(0, 1, value=0.5, label="Weight model A")
|
47 |
+
weight_b = gr.Slider(0, 1, value=0.5, label="Weight model B")
|
48 |
+
output_text = gr.Textbox(label="Output")
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
|
50 |
btn = gr.Button("Generate")
|
51 |
+
btn.click(blend_generate, inputs=[prompt_input, weight_a, weight_b], outputs=output_text)
|
52 |
+
|
53 |
+
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|