Spaces:
Sleeping
Sleeping
Boning c
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -9,24 +9,41 @@ from html import escape
|
|
9 |
PRIMARY_MODEL = "Smilyai-labs/Sam-reason-A3"
|
10 |
FALLBACK_MODEL = "Smilyai-labs/Sam-reason-A1"
|
11 |
USAGE_LIMIT = 5
|
12 |
-
RESET_AFTER_SECONDS = 20 * 60
|
13 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
14 |
|
15 |
-
# Globals
|
16 |
primary_model = primary_tokenizer = None
|
17 |
fallback_model = fallback_tokenizer = None
|
18 |
-
usage_info = {}
|
19 |
|
20 |
-
# Load
|
21 |
def load_models():
|
22 |
global primary_model, primary_tokenizer, fallback_model, fallback_tokenizer
|
23 |
-
primary_tokenizer = AutoTokenizer.from_pretrained(PRIMARY_MODEL)
|
24 |
-
primary_model = AutoModelForCausalLM.from_pretrained(PRIMARY_MODEL).to(device).eval()
|
25 |
-
fallback_tokenizer = AutoTokenizer.from_pretrained(FALLBACK_MODEL)
|
26 |
-
fallback_model = AutoModelForCausalLM.from_pretrained(FALLBACK_MODEL).to(device).eval()
|
27 |
return f"β
Loaded: {PRIMARY_MODEL} with fallback {FALLBACK_MODEL}"
|
28 |
|
29 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
def generate_stream(prompt, use_fallback=False, max_length=100, temperature=0.2, top_p=0.9):
|
31 |
model = fallback_model if use_fallback else primary_model
|
32 |
tokenizer = fallback_tokenizer if use_fallback else primary_tokenizer
|
@@ -39,7 +56,7 @@ def generate_stream(prompt, use_fallback=False, max_length=100, temperature=0.2,
|
|
39 |
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
|
40 |
probs = torch.softmax(sorted_logits, dim=-1).cumsum(dim=-1)
|
41 |
mask = probs > top_p
|
42 |
-
mask[..., 1:] = mask[..., :-1].clone()
|
43 |
mask[..., 0] = 0
|
44 |
filtered = logits.clone()
|
45 |
filtered[:, sorted_indices[mask]] = -float("Inf")
|
@@ -51,22 +68,12 @@ def generate_stream(prompt, use_fallback=False, max_length=100, temperature=0.2,
|
|
51 |
if next_token.item() == tokenizer.eos_token_id:
|
52 |
break
|
53 |
|
54 |
-
#
|
55 |
-
def format_thinking(text):
|
56 |
-
match = re.search(r"<think>(.*?)</think>", text, re.DOTALL)
|
57 |
-
if match:
|
58 |
-
reasoning = escape(match.group(1).strip())
|
59 |
-
visible = re.sub(r"<think>.*?</think>", "[thinking...]", text, flags=re.DOTALL).strip()
|
60 |
-
return f"{escape(visible)}<br><details><summary>π§ Show reasoning</summary><pre>{reasoning}</pre></details>"
|
61 |
-
return escape(text)
|
62 |
-
|
63 |
-
# Handle each message
|
64 |
def respond(message, history, reasoning_enabled, request: gr.Request):
|
65 |
ip = request.client.host if request else "unknown"
|
66 |
now = time.time()
|
67 |
info = usage_info.get(ip, {"count": 0, "last_seen": 0})
|
68 |
|
69 |
-
# Reset after idle period
|
70 |
if now - info["last_seen"] > RESET_AFTER_SECONDS:
|
71 |
info["count"] = 0
|
72 |
|
@@ -76,10 +83,9 @@ def respond(message, history, reasoning_enabled, request: gr.Request):
|
|
76 |
|
77 |
use_fallback = info["count"] > USAGE_LIMIT
|
78 |
remaining = max(0, USAGE_LIMIT - info["count"])
|
79 |
-
model_used = "
|
80 |
|
81 |
-
|
82 |
-
prompt = prefix + message.strip()
|
83 |
history = history + [[message, ""]]
|
84 |
|
85 |
for output in generate_stream(prompt, use_fallback=use_fallback):
|
@@ -92,7 +98,7 @@ def clear_chat():
|
|
92 |
|
93 |
# UI
|
94 |
with gr.Blocks() as demo:
|
95 |
-
gr.Markdown("# π€ SamAI β Reasoning Chat")
|
96 |
model_status = gr.Textbox(interactive=False, label="Model Status")
|
97 |
usage_counter = gr.Textbox(value="π§ A3 messages left: 5", interactive=False, show_label=False)
|
98 |
chat_box = gr.Chatbot(type="tuples")
|
|
|
9 |
PRIMARY_MODEL = "Smilyai-labs/Sam-reason-A3"
|
10 |
FALLBACK_MODEL = "Smilyai-labs/Sam-reason-A1"
|
11 |
USAGE_LIMIT = 5
|
12 |
+
RESET_AFTER_SECONDS = 20 * 60
|
13 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
14 |
|
|
|
15 |
primary_model = primary_tokenizer = None
|
16 |
fallback_model = fallback_tokenizer = None
|
17 |
+
usage_info = {}
|
18 |
|
19 |
+
# Load models
|
20 |
def load_models():
|
21 |
global primary_model, primary_tokenizer, fallback_model, fallback_tokenizer
|
22 |
+
primary_tokenizer = AutoTokenizer.from_pretrained(PRIMARY_MODEL, trust_remote_code=True)
|
23 |
+
primary_model = AutoModelForCausalLM.from_pretrained(PRIMARY_MODEL, torch_dtype=torch.float16).to(device).eval()
|
24 |
+
fallback_tokenizer = AutoTokenizer.from_pretrained(FALLBACK_MODEL, trust_remote_code=True)
|
25 |
+
fallback_model = AutoModelForCausalLM.from_pretrained(FALLBACK_MODEL, torch_dtype=torch.float16).to(device).eval()
|
26 |
return f"β
Loaded: {PRIMARY_MODEL} with fallback {FALLBACK_MODEL}"
|
27 |
|
28 |
+
# Format multi-turn history
|
29 |
+
def build_chat_prompt(history, user_input, reasoning_enabled):
|
30 |
+
prefix = "/think " if reasoning_enabled else "/no_think "
|
31 |
+
prompt = ""
|
32 |
+
for user_msg, bot_msg in history:
|
33 |
+
prompt += f"<|user|>\n{user_msg}\n<|assistant|>\n{bot_msg}\n"
|
34 |
+
prompt += f"<|user|>\n{user_input}\n<|assistant|>\n"
|
35 |
+
return prefix + prompt
|
36 |
+
|
37 |
+
# Collapse <think> block
|
38 |
+
def format_thinking(text):
|
39 |
+
match = re.search(r"<think>(.*?)</think>", text, re.DOTALL)
|
40 |
+
if match:
|
41 |
+
reasoning = escape(match.group(1).strip())
|
42 |
+
visible = re.sub(r"<think>.*?</think>", "[thinking...]", text, flags=re.DOTALL).strip()
|
43 |
+
return f"{escape(visible)}<br><details><summary>π§ Show reasoning</summary><pre>{reasoning}</pre></details>"
|
44 |
+
return escape(text)
|
45 |
+
|
46 |
+
# Token stream generator
|
47 |
def generate_stream(prompt, use_fallback=False, max_length=100, temperature=0.2, top_p=0.9):
|
48 |
model = fallback_model if use_fallback else primary_model
|
49 |
tokenizer = fallback_tokenizer if use_fallback else primary_tokenizer
|
|
|
56 |
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
|
57 |
probs = torch.softmax(sorted_logits, dim=-1).cumsum(dim=-1)
|
58 |
mask = probs > top_p
|
59 |
+
mask[..., 1:] = mask[..., :-1].clone()
|
60 |
mask[..., 0] = 0
|
61 |
filtered = logits.clone()
|
62 |
filtered[:, sorted_indices[mask]] = -float("Inf")
|
|
|
68 |
if next_token.item() == tokenizer.eos_token_id:
|
69 |
break
|
70 |
|
71 |
+
# Response pipeline
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
72 |
def respond(message, history, reasoning_enabled, request: gr.Request):
|
73 |
ip = request.client.host if request else "unknown"
|
74 |
now = time.time()
|
75 |
info = usage_info.get(ip, {"count": 0, "last_seen": 0})
|
76 |
|
|
|
77 |
if now - info["last_seen"] > RESET_AFTER_SECONDS:
|
78 |
info["count"] = 0
|
79 |
|
|
|
83 |
|
84 |
use_fallback = info["count"] > USAGE_LIMIT
|
85 |
remaining = max(0, USAGE_LIMIT - info["count"])
|
86 |
+
model_used = "A3" if not use_fallback else "Fallback A1"
|
87 |
|
88 |
+
prompt = build_chat_prompt(history, message.strip(), reasoning_enabled)
|
|
|
89 |
history = history + [[message, ""]]
|
90 |
|
91 |
for output in generate_stream(prompt, use_fallback=use_fallback):
|
|
|
98 |
|
99 |
# UI
|
100 |
with gr.Blocks() as demo:
|
101 |
+
gr.Markdown("# π€ SamAI β Reasoning Chat (Chat Mode Enabled)")
|
102 |
model_status = gr.Textbox(interactive=False, label="Model Status")
|
103 |
usage_counter = gr.Textbox(value="π§ A3 messages left: 5", interactive=False, show_label=False)
|
104 |
chat_box = gr.Chatbot(type="tuples")
|