Utiric commited on
Commit
cc6f54d
·
verified ·
1 Parent(s): 6e3133c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +72 -76
app.py CHANGED
@@ -1,99 +1,95 @@
1
- import spaces
 
2
  import gradio as gr
3
- from transformers import AutoModelForCausalLM, AutoTokenizer
4
 
5
- model_name = "daniel-dona/gemma-3-270m-it"
6
 
7
- #pipe = pipeline("text-generation", model=model, device="cuda")
 
 
8
 
9
- @spaces.GPU
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
-
19
- messages = [{"role": "system", "content": system_message}]
20
-
21
- print("Got:", message)
22
-
23
- for val in history:
24
- if val[0]:
25
- messages.append({"role": "user", "content": val[0]})
26
- if val[1]:
27
- messages.append({"role": "assistant", "content": val[1]})
28
-
29
- messages.append({"role": "user", "content": message})
30
-
31
- """response = pipe(
32
- messages,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  max_new_tokens=max_tokens,
34
- temperature=temperature,
35
  top_p=top_p,
36
- return_full_text=False,
37
- )
38
-
39
- generated_text = response[0]['generated_text']
40
-
41
- yield generated_text"""
42
-
43
- tokenizer = AutoTokenizer.from_pretrained(model_name)
44
- model = AutoModelForCausalLM.from_pretrained(
45
- model_name,
46
- torch_dtype="auto",
47
- device_map="auto"
48
  )
49
 
50
- text = tokenizer.apply_chat_template(
51
- messages,
52
- tokenize=False,
53
- add_generation_prompt=True
54
- )
55
 
56
- model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
 
57
 
58
- sample = True
 
 
59
 
60
- if temperature == 0:
61
- sample = False
 
 
62
 
63
- # conduct text completion
64
- generated_ids = model.generate(
65
- **model_inputs,
66
- max_new_tokens=max_tokens,
67
- do_sample=sample,
68
- top_p=top_p,
69
- temperature=temperature
70
- )
71
- output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
72
 
73
- content = tokenizer.decode(output_ids, skip_special_tokens=True).strip("\n")
74
-
75
- return content
76
-
77
-
78
- """
79
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
80
- """
81
  demo = gr.ChatInterface(
82
  respond,
83
  additional_inputs=[
84
  gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
85
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
86
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
87
- gr.Slider(
88
- minimum=0.1,
89
- maximum=1.0,
90
- value=0.95,
91
- step=0.05,
92
- label="Top-p (nucleus sampling)",
93
- ),
94
  ],
95
  )
96
 
97
-
98
  if __name__ == "__main__":
99
  demo.launch()
 
1
+ import time
2
+ import torch
3
  import gradio as gr
4
+ from transformers import AutoTokenizer, AutoModelForCausalLM
5
 
6
+ MODEL_NAME = "google/gemma-3-270m-it"
7
 
8
+ # CPU optimizasyonları
9
+ torch.set_num_threads(torch.get_num_threads()) # tüm çekirdekleri kullan
10
+ torch.set_float32_matmul_precision("high") # matmul hızını artır
11
 
12
+ # Model/Tokenizer global yükleme
13
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
14
+ model = AutoModelForCausalLM.from_pretrained(
15
+ MODEL_NAME,
16
+ torch_dtype=torch.float32, # CPU'da float32
17
+ device_map=None
18
+ )
19
+ model.eval()
20
+
21
+ # Kullanıcı bazlı KV cache
22
+ sessions = {} # {user_id: past_key_values}
23
+
24
+ def build_prompt(message, history, system_message, max_ctx_tokens=1024):
25
+ msgs = [{"role": "system", "content": system_message}]
26
+ for u, a in history:
27
+ if u: msgs.append({"role": "user", "content": u})
28
+ if a: msgs.append({"role": "assistant", "content": a})
29
+ msgs.append({"role": "user", "content": message})
30
+
31
+ # Token bütçesi ile kırpma
32
+ while True:
33
+ text = tokenizer.apply_chat_template(msgs, tokenize=False, add_generation_prompt=True)
34
+ if len(tokenizer(text, add_special_tokens=False).input_ids) <= max_ctx_tokens:
35
+ return text
36
+ # En eski user+assistant çiftini at
37
+ for i in range(1, len(msgs)):
38
+ if msgs[i]["role"] != "system":
39
+ del msgs[i:i+2]
40
+ break
41
+
42
+ def respond(message, history, system_message, max_tokens, temperature, top_p):
43
+ user_id = "default" # API bağlarsan burada kullanıcı ID'si ile değiştir
44
+ past = sessions.get(user_id)
45
+
46
+ if past is None:
47
+ # İlk mesaj → tüm prompt
48
+ text = build_prompt(message, history, system_message)
49
+ inputs = tokenizer([text], return_tensors="pt").to(model.device)
50
+ else:
51
+ # Sadece yeni mesajı encode et
52
+ inputs = tokenizer([message], return_tensors="pt").to(model.device)
53
+
54
+ do_sample = temperature > 0
55
+ gen_kwargs = dict(
56
  max_new_tokens=max_tokens,
57
+ do_sample=do_sample,
58
  top_p=top_p,
59
+ temperature=temperature if do_sample else None,
60
+ use_cache=True,
61
+ past_key_values=past
 
 
 
 
 
 
 
 
 
62
  )
63
 
64
+ start_time = time.time()
65
+ with torch.inference_mode():
66
+ outputs = model.generate(**inputs, **{k: v for k, v in gen_kwargs.items() if v is not None},
67
+ return_dict_in_generate=True, output_scores=False)
68
+ end_time = time.time()
69
 
70
+ # KV cache güncelle
71
+ sessions[user_id] = outputs.past_key_values
72
 
73
+ # Yanıtı decode et
74
+ new_tokens = outputs.sequences[0][inputs["input_ids"].shape[1]:]
75
+ content = tokenizer.decode(new_tokens, skip_special_tokens=True).strip("\n")
76
 
77
+ # T/S hesapla
78
+ token_count = len(new_tokens)
79
+ elapsed = end_time - start_time
80
+ tps = token_count / elapsed if elapsed > 0 else 0
81
 
82
+ return f"{content}\n\n⚡ **Hız:** {tps:.2f} token/sn"
 
 
 
 
 
 
 
 
83
 
 
 
 
 
 
 
 
 
84
  demo = gr.ChatInterface(
85
  respond,
86
  additional_inputs=[
87
  gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
88
+ gr.Slider(minimum=1, maximum=2048, value=256, step=1, label="Max new tokens"),
89
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
90
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p"),
 
 
 
 
 
 
91
  ],
92
  )
93
 
 
94
  if __name__ == "__main__":
95
  demo.launch()