Sam-2.5 Pro Solver

The Sam-2.5 Pro Solver is the final model in the Sam-2.5 series. It builds on the custom Sam2 architecture, optimized for efficiency and coherence, and fine-tuned extensively on Chain-of-Thought datasets for step-by-step reasoning and problem-solving.

Model Description

Architecture: Custom Sam2 decoder-only transformer Layers: 6 Hidden size (d_model): 384 Attention heads: 6 Feedforward expansion: 4× (SwiGLU activation) Normalization: RMSNorm Dropout: 0.1 Parameter sharing: Tied input/output embeddings Head type: Causal LM The architecture was written from scratch in PyTorch, not borrowed from Hugging Face GPT. It uses efficient components (RMSNorm + SwiGLU) and causal masking for autoregressive text generation. Training Base checkpoint: sam2-epoch12-best.safetensors Fine-tuning objective: Chain-of-Thought reasoning Dataset size: Training: 374,410 examples Validation: 26,929 examples Training run: Resumed from epoch 9 → trained 3 more epochs (10–12) Loss: Best validation loss before resume was 2.5403 Frameworks: PyTorch 2.6 + CUDA 12.x, Hugging Face Hub integration

Intended Use

Chatbot with coherence over long conversations Problem-solving assistant (math, logic, structured Q&A) Reasoning-heavy generation with step-by-step explanations Not recommended for real-time mission-critical use, or for generating unsafe or harmful content.

Limitations

Edge-case reasoning still unstable Hallucination possible in fact-based tasks Text-only (no multimodal support) Efficiency focused on research use, not deployment at scale

Example Usage



import math, json, requests, torch
import torch.nn as nn
import torch.nn.functional as F
from pathlib import Path
from safetensors.torch import load_file
from transformers import AutoTokenizer
from dataclasses import dataclass

# -------------------------------
# 1) Local Sam-2 architecture
# -------------------------------
@dataclass
class Sam2Config:
    vocab_size: int
    d_model: int = 384
    n_layers: int = 6
    n_heads: int = 6
    ff_mult: float = 4.0
    dropout: float = 0.1
    input_modality: str = "text"
    head_type: str = "causal_lm"
    version: str = "0.1"

    def __init__(self, vocab_size, d_model=384, n_layers=6, n_heads=6, ff_mult=4.0, dropout=0.1, input_modality="text", head_type="causal_lm", version="0.1", **kwargs):
        self.vocab_size = vocab_size
        self.d_model = d_model
        self.n_layers = n_layers
        self.n_heads = n_heads
        self.ff_mult = ff_mult
        self.dropout = dropout
        self.input_modality = input_modality
        self.head_type = head_type
        self.version = version

class RMSNorm(nn.Module):
    def __init__(self, d, eps=1e-6):
        super().__init__(); self.eps = eps; self.weight = nn.Parameter(torch.ones(d))
    def forward(self, x):
        return self.weight * x * (x.pow(2).mean(-1, keepdim=True) + self.eps).rsqrt()

class MHA(nn.Module):
    def __init__(self, d_model, n_heads, dropout=0.0):
        super().__init__(); assert d_model % n_heads == 0; self.n_heads = n_heads; self.head_dim = d_model // n_heads; self.q_proj = nn.Linear(d_model, d_model, bias=False); self.k_proj = nn.Linear(d_model, d_model, bias=False); self.v_proj = nn.Linear(d_model, d_model, bias=False); self.out_proj = nn.Linear(d_model, d_model, bias=False); self.dropout = nn.Dropout(dropout)
    def forward(self, x, attn_mask=None):
        B, T, C = x.shape
        q = self.q_proj(x).view(B, T, self.n_heads, self.head_dim).transpose(1, 2)
        k = self.k_proj(x).view(B, T, self.n_heads, self.head_dim).transpose(1, 2)
        v = self.v_proj(x).view(B, T, self.n_heads, self.head_dim).transpose(1, 2)
        scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.head_dim)
        causal = torch.triu(torch.ones(T, T, device=x.device, dtype=torch.bool), diagonal=1)
        scores = scores.masked_fill(causal, float("-inf"))
        if attn_mask is not None:
            scores = scores.masked_fill(~attn_mask.unsqueeze(1).unsqueeze(2).bool(), float("-inf"))
        attn = torch.softmax(scores, dim=-1)
        out = torch.matmul(self.dropout(attn), v).transpose(1, 2).contiguous().view(B, T, C)
        return self.out_proj(out)

class SwiGLU(nn.Module):
    def __init__(self, d_model, d_ff, dropout=0.0):
        super().__init__(); self.w1 = nn.Linear(d_model, d_ff, bias=False); self.w2 = nn.Linear(d_model, d_ff, bias=False); self.w3 = nn.Linear(d_ff, d_model, bias=False); self.dropout = nn.Dropout(dropout)
    def forward(self, x): return self.w3(self.dropout(torch.nn.functional.silu(self.w1(x)) * self.w2(x)))

class Block(nn.Module):
    def __init__(self, d_model, n_heads, ff_mult, dropout=0.0):
        super().__init__(); self.norm1 = RMSNorm(d_model); self.attn = MHA(d_model, n_heads, dropout=dropout); self.norm2 = RMSNorm(d_model); self.ff = SwiGLU(d_model, int(ff_mult * d_model), dropout=dropout); self.drop = nn.Dropout(dropout)
    def forward(self, x, attn_mask=None):
        x = x + self.drop(self.attn(self.norm1(x), attn_mask=attn_mask))
        x = x + self.drop(self.ff(self.norm2(x)))
        return x

class Sam2(nn.Module):
    def __init__(self, config: Sam2Config):
        super().__init__(); self.config = config; self.embed = nn.Embedding(config.vocab_size, config.d_model); self.blocks = nn.ModuleList([Block(config.d_model, config.n_heads, config.ff_mult, dropout=config.dropout) for _ in range(config.n_layers)]); self.norm = RMSNorm(config.d_model); self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False); self.lm_head.weight = self.embed.weight
    def forward(self, input_ids, attention_mask=None):
        x = self.embed(input_ids)
        for blk in self.blocks: x = blk(x, attn_mask=attention_mask)
        x = self.norm(x)
        return self.lm_head(x)

# -------------------------------
# 2) Load tokenizer, config & weights
# -------------------------------
hf_repo = "Smilyai-labs/Sam-2.5-PRO-SOLVER"
weights_filename = "sam2-epoch12-best.safetensors"

tokenizer = AutoTokenizer.from_pretrained(hf_repo)

config_url = f"https://huggingface.co/{hf_repo}/raw/main/config.json"
config_data = requests.get(config_url).json()
cfg = Sam2Config(**config_data)

weights_url = f"https://huggingface.co/{hf_repo}/resolve/main/{weights_filename}"
weights_content = requests.get(weights_url).content
with open(weights_filename, "wb") as f:
    f.write(weights_content)

model = Sam2(cfg)
state_dict = load_file(weights_filename)
model.load_state_dict(state_dict)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device).eval()

EOT_ID = tokenizer.convert_tokens_to_ids("<|eot|>") or tokenizer.eos_token_id

# -------------------------------
# 3) Sampling function
# -------------------------------
def sample_next_token(
    logits,
    past_tokens,
    temperature=0.8,
    top_k=40,
    top_p=0.9,
    repetition_penalty=1.1,
    max_repeat=5,
    no_repeat_ngram_size=3
):
    if logits.dim() == 3:
        logits = logits[:, -1, :].clone()
    else:
        logits = logits.clone()
    batch_size, vocab_size = logits.size(0), logits.size(1)
    orig_logits = logits.clone()

    # Temperature scaling
    if temperature != 1.0:
        logits = logits / float(temperature)

    past_list = past_tokens.tolist() if isinstance(past_tokens, torch.Tensor) else list(past_tokens)

    # Apply repetition penalty
    for token_id in set(past_list):
        if 0 <= token_id < vocab_size:
            logits[:, token_id] /= repetition_penalty

    # Prevent excessive repeats of the **last token**
    if len(past_list) >= max_repeat:
        last_token = past_list[-1]
        count = 1
        for i in reversed(past_list[:-1]):
            if i == last_token:
                count += 1
            else:
                break
        if count >= max_repeat:
            logits[:, last_token] = -float("inf")

    # 🔥 N-gram blocking (default = no repeating 3-grams)
    if no_repeat_ngram_size > 0 and len(past_list) >= no_repeat_ngram_size:
        ngram = tuple(past_list[-no_repeat_ngram_size:])
        for token_id in range(vocab_size):
            if tuple(past_list[-(no_repeat_ngram_size - 1):] + [token_id]) == ngram:
                logits[:, token_id] = -float("inf")

    # Top-k filtering
    if top_k is not None and top_k > 0:
        tk = min(max(1, int(top_k)), vocab_size)
        topk_vals, _ = torch.topk(logits, tk, dim=-1)
        min_topk = topk_vals[:, -1].unsqueeze(-1)
        logits[logits < min_topk] = -float("inf")

    # Top-p filtering
    if top_p is not None and 0.0 < top_p < 1.0:
        sorted_logits, sorted_indices = torch.sort(logits, descending=True, dim=-1)
        sorted_probs = F.softmax(sorted_logits, dim=-1)
        cumulative_probs = torch.cumsum(sorted_probs, dim=-1)
        for b in range(batch_size):
            sorted_mask = cumulative_probs[b] > top_p
            if sorted_mask.numel() > 0:
                sorted_mask[0] = False
                tokens_to_remove = sorted_indices[b][sorted_mask]
                logits[b, tokens_to_remove] = -float("inf")

    # Fallback in case everything gets masked
    for b in range(batch_size):
        if torch.isneginf(logits[b]).all():
            logits[b] = orig_logits[b]

    probs = F.softmax(logits, dim=-1)
    if torch.isnan(probs).any():
        probs = torch.ones_like(logits) / logits.size(1)

    next_token = torch.multinomial(probs, num_samples=1)
    return next_token.to(device)


# -------------------------------
# 4) Chat with history
# -------------------------------
SPECIAL_TOKENS = {"bos":"<|bos|>","eot":"<|eot|>","user":"<|user|>","assistant":"<|assistant|>","system":"<|system|>"}
chat_history = []

def generate_sam2_response_stream(user_input, system_prompt = "You are Sam-2, a friendly and concise chatbot. Always give short, direct answers and avoid medical or legal advice."
, max_new_tokens=256, **sample_kwargs):
    chat_history.append(f"{SPECIAL_TOKENS['user']} {user_input} {SPECIAL_TOKENS['eot']}")
    prompt = f"{SPECIAL_TOKENS['system']} {system_prompt} {SPECIAL_TOKENS['eot']}\n" + "\n".join(chat_history) + f"\n{SPECIAL_TOKENS['assistant']}"

    inputs = tokenizer(prompt, return_tensors="pt").to(device)
    input_ids = inputs["input_ids"]
    attention_mask = inputs["attention_mask"]

    for _ in range(max_new_tokens):
        with torch.no_grad():
            logits = model(input_ids, attention_mask=attention_mask)
        next_token = sample_next_token(logits, input_ids[0], **sample_kwargs)
        input_ids = torch.cat([input_ids, next_token], dim=1)
        attention_mask = torch.cat([attention_mask, torch.ones((attention_mask.size(0),1),device=device,dtype=attention_mask.dtype)],dim=1)

        token_id = int(next_token.squeeze().item())
        token_str = tokenizer.decode([token_id], skip_special_tokens=True)
        yield token_str

        if token_id == EOT_ID:
            break

    # append Sam's response to history
    chat_history.append(f"{SPECIAL_TOKENS['assistant']} {''.join(tokenizer.decode(next_token[0], skip_special_tokens=False))} {SPECIAL_TOKENS['eot']}")

# -------------------------------
# 5) Interactive loop
# -------------------------------
print("-"*50)
print("   Welcome to the Interactive Sam-2.5-PRO-SOLVER Chat with memory!")
print("   Type 'quit' or 'exit' to end the session.")
print("-"*50)

sampling_defaults = dict(temperature=0.4, top_k=50, top_p=0.9, repetition_penalty=1.1)

while True:
    try:
        user_message = input("You: ")
        if user_message.lower().strip() in ['quit','exit']:
            print("Sam-2: Goodbye!"); break
        if not user_message.strip(): continue

        print("Sam-2: ", end="", flush=True)
        for token in generate_sam2_response_stream(user_message, max_new_tokens=256, **sampling_defaults):
            print(token, end="", flush=True)
        print("\n")

    except KeyboardInterrupt:
        print("\nSam-2: Goodbye!")
        break

Citation

If you use this model, please cite: SmilyAI Labs. (2025). Sam-2.5 Pro Solver [Language model]. https://huggingface.co/Smilyai-labs/Sam-2.5-PRO-SOLVER

Downloads last month
23
Inference Providers NEW
This model isn't deployed by any Inference Provider. 🙋 Ask for provider support

Datasets used to train Smilyai-labs/Sam-2.5-PRO-SOLVER