Amossofer commited on
Commit
1782685
·
1 Parent(s): e45fce6
Files changed (1) hide show
  1. app.py +28 -33
app.py CHANGED
@@ -1,15 +1,21 @@
1
- import gradio as gr
2
  import torch
3
- import torch.nn.functional as F
4
- from transformers import AutoTokenizer, AutoModelForCausalLM
 
 
 
 
 
 
 
 
 
5
 
6
- model_id = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
7
- tokenizer = AutoTokenizer.from_pretrained(model_id)
8
- model = AutoModelForCausalLM.from_pretrained(
9
- model_id,
10
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32
11
- )
12
- model.eval()
13
 
14
  def blend_generate(prompt, wa, wb):
15
  input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(device)
@@ -24,35 +30,24 @@ def blend_generate(prompt, wa, wb):
24
  # Weighted sum of raw logits (before softmax)
25
  blended_logits = wa * logits_a + wb * logits_b
26
 
27
- # Apply softmax safely to get valid probability distribution
28
  probs = torch.softmax(blended_logits, dim=-1)
29
 
30
- # Sample token from valid probability distribution
31
  token = torch.multinomial(probs, 1)
32
  next_token_id = token.item()
33
- next_token = tokenizer.decode([next_token_id])
34
 
35
- return next_token
 
36
 
 
37
  with gr.Blocks() as demo:
38
- gr.Markdown("## Blended Prompt Chat (TinyLlama)")
39
- sysA = gr.Textbox(label="System Prompt A", value="You are assistant A.")
40
- sysB = gr.Textbox(label="System Prompt B", value="You are assistant B.")
41
- wA = gr.Slider(-5, 5, value=1.0, step=0.1, label="Weight A")
42
- wB = gr.Slider(-5, 5, value=1.0, step=0.1, label="Weight B")
43
- user_msg = gr.Textbox(label="User Message")
44
- temp = gr.Slider(0.1, 2.0, value=1.0, step=0.1, label="Temperature")
45
- top_p = gr.Slider(0.1, 1.0, value=0.9, step=0.05, label="Top-p")
46
- max_tokens = gr.Slider(1, 200, value=100, step=1, label="Max New Tokens")
47
- output = gr.Textbox(label="Response")
48
 
49
  btn = gr.Button("Generate")
50
- btn.click(
51
- blend_generate,
52
- [sysA, sysB, wA, wB, user_msg, max_tokens, temp, top_p],
53
- output,
54
- show_progress=True,
55
- )
56
-
57
- if __name__ == "__main__":
58
- demo.launch()
 
 
1
  import torch
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ import gradio as gr
4
+
5
+ # Set device: GPU if available, else CPU
6
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
7
+
8
+ # Load two small models and their tokenizer (you can replace these with your models)
9
+ model_name_a = "distilgpt2"
10
+ model_name_b = "sshleifer/tiny-gpt2" # very small GPT2 variant for demo
11
+
12
+ tokenizer = AutoTokenizer.from_pretrained(model_name_a)
13
 
14
+ model_a = AutoModelForCausalLM.from_pretrained(model_name_a).to(device)
15
+ model_b = AutoModelForCausalLM.from_pretrained(model_name_b).to(device)
16
+
17
+ model_a.eval()
18
+ model_b.eval()
 
 
19
 
20
  def blend_generate(prompt, wa, wb):
21
  input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(device)
 
30
  # Weighted sum of raw logits (before softmax)
31
  blended_logits = wa * logits_a + wb * logits_b
32
 
33
+ # Softmax to get probabilities
34
  probs = torch.softmax(blended_logits, dim=-1)
35
 
36
+ # Sample one token from the blended distribution
37
  token = torch.multinomial(probs, 1)
38
  next_token_id = token.item()
 
39
 
40
+ next_token = tokenizer.decode([next_token_id])
41
+ return prompt + next_token
42
 
43
+ # Gradio UI
44
  with gr.Blocks() as demo:
45
+ prompt_input = gr.Textbox(label="Prompt", lines=2)
46
+ weight_a = gr.Slider(0, 1, value=0.5, label="Weight model A")
47
+ weight_b = gr.Slider(0, 1, value=0.5, label="Weight model B")
48
+ output_text = gr.Textbox(label="Output")
 
 
 
 
 
 
49
 
50
  btn = gr.Button("Generate")
51
+ btn.click(blend_generate, inputs=[prompt_input, weight_a, weight_b], outputs=output_text)
52
+
53
+ demo.launch()