Import standard libraries
import torch import torch.nn as nn import torch.nn.functional as F from torch import Tensor from typing import Optional, Union, Tuple, Dict, Any import math
Import Hugging Face Transformers modules
from transformers import ( AutoTokenizer, PreTrainedModel, PretrainedConfig, GenerationMixin, Trainer, TrainingArguments, DataCollatorForLanguageModeling, pipeline) from transformers.utils.doc import add_start_docstrings_to_model_forward, replace_return_docstrings from datasets import Dataset as HFDataset from torch.utils.data import Dataset from transformers.modeling_outputs import CausalLMOutputWithPast
_CONFIG_FOR_DOC = "TinyQwen3Config"
TINY_QWEN3_INPUTS_DOCSTRING = r""" TinyQwen3ForCausalLM input.
Args:
input_ids (torch.LongTensor
of shape (batch_size, sequence_length)
):
Indices of input sequence tokens in the vocabulary.
attention_mask (torch.FloatTensor
, optional):
Mask to avoid performing attention on padding token indices.
labels (torch.LongTensor
, optional):
Labels for computing the language modeling loss.
"""
=== Custom Multi-Head Attention to avoid SDPA warnings ===
class CustomMultiHeadAttention(nn.Module): def init(self, embed_dim, num_heads, dropout=0.1): super().init() assert embed_dim % num_heads == 0
self.embed_dim = embed_dim
self.num_heads = num_heads
self.head_dim = embed_dim // num_heads
self.scale = self.head_dim ** -0.5
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=False)
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=False)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=False)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=False)
self.dropout = nn.Dropout(dropout)
def forward(self, x, attention_mask=None):
batch_size, seq_len, embed_dim = x.size()
# Linear projections
q = self.q_proj(x).view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2)
k = self.k_proj(x).view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2)
v = self.v_proj(x).view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2)
# Scaled dot-product attention
scores = torch.matmul(q, k.transpose(-2, -1)) * self.scale
# Apply causal mask for autoregressive generation
causal_mask = torch.triu(torch.ones(seq_len, seq_len, device=x.device), diagonal=1).bool()
scores = scores.masked_fill(causal_mask.unsqueeze(0).unsqueeze(0), float('-inf'))
# Apply attention mask if provided
if attention_mask is not None:
attention_mask = attention_mask.unsqueeze(1).unsqueeze(1)
scores = scores.masked_fill(attention_mask == 0, float('-inf'))
attn_weights = F.softmax(scores, dim=-1)
attn_weights = self.dropout(attn_weights)
# Apply attention to values
out = torch.matmul(attn_weights, v)
out = out.transpose(1, 2).contiguous().view(batch_size, seq_len, embed_dim)
# Final projection
out = self.out_proj(out)
return out, attn_weights
=== Mixture of Experts Layer ===
class MoeLayer(nn.Module): def init(self, input_dim, hidden_dim, num_experts=4, k=1): super(MoeLayer, self).init() self.num_experts = num_experts self.k = k self.gate = nn.Linear(input_dim, num_experts) self.experts = nn.ModuleList([ nn.Sequential( nn.Linear(input_dim, hidden_dim), nn.GELU(), nn.Linear(hidden_dim, input_dim) ) for _ in range(num_experts) ])
def forward(self, x):
batch_size, seq_len, embed_dim = x.shape
# Compute gate logits and select top-k experts
gate_logits = self.gate(x) # [batch_size, seq_len, num_experts]
weights, indices = torch.topk(gate_logits, self.k, dim=-1)
weights = torch.softmax(weights, dim=-1) # [batch_size, seq_len, k]
# Compute outputs from all experts
expert_outputs = []
for expert in self.experts:
expert_outputs.append(expert(x))
expert_outputs = torch.stack(expert_outputs, dim=-1) # [batch_size, seq_len, embed_dim, num_experts]
# Combine expert outputs
combined_output = torch.zeros_like(x)
for i in range(self.k):
expert_idx = indices[..., i] # [batch_size, seq_len]
weight = weights[..., i] # [batch_size, seq_len]
# Gather outputs from selected experts
selected_output = torch.gather(
expert_outputs,
-1,
expert_idx.unsqueeze(-1).unsqueeze(-1).expand(-1, -1, embed_dim, -1)
).squeeze(-1)
combined_output += selected_output * weight.unsqueeze(-1)
return combined_output
=== Tiny Transformer Block with MoE ===
class TinyMoETransformerBlock(nn.Module): def init(self, embed_dim, num_heads=2, num_experts=4, k=1): super(TinyMoETransformerBlock, self).init() self.attn = CustomMultiHeadAttention(embed_dim, num_heads) self.moe = MoeLayer(embed_dim, embed_dim * 2, num_experts=num_experts, k=k) self.norm1 = nn.LayerNorm(embed_dim) self.norm2 = nn.LayerNorm(embed_dim)
def forward(self, x, attention_mask=None):
attn_out, _ = self.attn(x, attention_mask)
x = self.norm1(x + attn_out)
moe_out = self.moe(x)
x = self.norm2(x + moe_out)
return x
=== TinyQwen3 Model Config and Architecture ===
class TinyQwen3Config(PretrainedConfig): model_type = "tiny_qwen3"
def __init__(
self,
vocab_size=151936, # Match Qwen3-0.6B tokenizer vocab size
embed_dim=128,
num_layers=3,
num_heads=2,
num_experts=4,
k=1,
max_position_embeddings=2048,
**kwargs
):
super().__init__(**kwargs)
self.vocab_size = vocab_size
self.embed_dim = embed_dim
self.num_layers = num_layers
self.num_heads = num_heads
self.num_experts = num_experts
self.k = k
self.max_position_embeddings = max_position_embeddings
class TinyQwen3Simulator(nn.Module): def init(self, config): super().init() self.token_emb = nn.Embedding(config.vocab_size, config.embed_dim) self.pos_emb = nn.Parameter(torch.randn(1, config.max_position_embeddings, config.embed_dim)) self.layers = nn.ModuleList([ TinyMoETransformerBlock(config.embed_dim, config.num_heads, config.num_experts, config.k) for _ in range(config.num_layers) ]) self.final_norm = nn.LayerNorm(config.embed_dim)
def forward(self, input_ids, attention_mask=None):
batch_size, seq_len = input_ids.size()
# Clamp input_ids to valid range
input_ids = torch.clamp(input_ids, 0, self.token_emb.num_embeddings - 1)
# Ensure sequence length doesn't exceed position embeddings
seq_len = min(seq_len, self.pos_emb.size(1))
input_ids = input_ids[:, :seq_len]
x = self.token_emb(input_ids) + self.pos_emb[:, :seq_len, :]
for layer in self.layers:
x = layer(x, attention_mask)
x = self.final_norm(x)
return x
class TinyQwen3ForCausalLM(PreTrainedModel, GenerationMixin): config_class = TinyQwen3Config base_model_prefix = "model" main_input_name = "input_ids"
def __init__(self, config):
super().__init__(config)
self.model = TinyQwen3Simulator(config)
self.lm_head = nn.Linear(config.embed_dim, config.vocab_size, bias=False)
self.post_init()
def post_init(self):
self.apply(self._init_weights)
def _init_weights(self, module):
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=0.02)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=0.02)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def get_input_embeddings(self):
return self.model.token_emb
def set_input_embeddings(self, value):
self.model.token_emb = value
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
@add_start_docstrings_to_model_forward(TINY_QWEN3_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
**kwargs
) -> Union[Tuple, CausalLMOutputWithPast]:
"""
Forward pass of the TinyQwen3 model for causal language modeling.
Returns:
CausalLMOutputWithPast: Model outputs including loss and logits.
"""
# Get hidden states from the model
hidden_states = self.model(input_ids, attention_mask)
# Apply language modeling head to get logits
logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
# Shift labels for next token prediction
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
loss = F.cross_entropy(
shift_logits.view(-1, self.config.vocab_size),
shift_labels.view(-1),
ignore_index=-100
)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
)
def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **kwargs):
if past_key_values is not None:
input_ids = input_ids[:, -1:]
return {"input_ids": input_ids}
=== Dataset: Use Tokenized Text ===
class TokenizedTextDataset(Dataset): def init(self, texts, tokenizer, max_length=128): self.texts = texts self.tokenizer = tokenizer self.max_length = max_length
def __len__(self):
return len(self.texts)
def __getitem__(self, idx):
text = self.texts[idx]
encodings = self.tokenizer(
text,
truncation=True,
padding="max_length",
max_length=self.max_length,
return_tensors="pt"
)
input_ids = encodings["input_ids"].squeeze(0)
# Clamp token IDs to valid range to prevent CUDA errors
input_ids = torch.clamp(input_ids, 0, self.tokenizer.vocab_size - 1)
return {"input_ids": input_ids, "labels": input_ids.clone()}
=== Main Execution ===
if name == "main": import os import warnings
# Suppress the sliding window attention warning
warnings.filterwarnings("ignore", message=".*Sliding Window Attention.*")
os.environ["CUDA_LAUNCH_BLOCKING"] = "1" # For better CUDA error tracing
os.environ["CUDA_VISIBLE_DEVICES"] = "" # Hide all CUDA devices
# Force CPU execution to avoid CUDA issues during debugging
device = torch.device("cpu")
torch.cuda.is_available = lambda: False # Force torch to think CUDA is not available
# Load Qwen3-0.6B tokenizer
print("Loading tokenizer...")
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-0.6B", trust_remote_code=True)
# Add padding token if it doesn't exist
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
print(f"Tokenizer vocab size: {tokenizer.vocab_size}")
# Sample text for training
sample_texts = [
"Artificial intelligence is a wonderful field of study.",
"Deep learning enables machines to learn from data.",
"Transformers have revolutionized NLP.",
"Mixture of Experts makes large models efficient.",
"Qwen3 is a powerful language model."
]
# Test tokenization first
print("Testing tokenization...")
for i, text in enumerate(sample_texts[:2]):
tokens = tokenizer(text, return_tensors="pt")
print(f"Text {i}: {text}")
print(f"Tokens: {tokens['input_ids']}")
print(f"Max token ID: {tokens['input_ids'].max().item()}")
print()
# Create dataset
train_dataset = TokenizedTextDataset(sample_texts, tokenizer, max_length=64)
# Initialize TinyQwen3 model with Qwen3 vocab size
print("Initializing model...")
config = TinyQwen3Config(
vocab_size=tokenizer.vocab_size,
embed_dim=128,
num_layers=2, # Reduced for debugging
num_heads=2,
num_experts=2, # Reduced for debugging
k=1,
max_position_embeddings=64 # Reduced for debugging
)
model = TinyQwen3ForCausalLM(config).to(device)
print(f"Model vocab size: {model.config.vocab_size}")
print(f"Model parameters: {sum(p.numel() for p in model.parameters()):,}")
# Test forward pass
print("Testing forward pass...")
test_input = torch.randint(0, min(1000, tokenizer.vocab_size), (1, 10)).to(device)
try:
with torch.no_grad():
output = model(test_input)
print(f"Forward pass successful! Output shape: {output.logits.shape}")
except Exception as e:
print(f"Forward pass failed: {e}")
exit(1)
# Create a simple training loop instead of using Trainer to avoid CUDA issues
print("Starting manual training loop...")
model.train()
optimizer = torch.optim.AdamW(model.parameters(), lr=5e-5)
# Create a simple DataLoader
from torch.utils.data import DataLoader
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
train_dataloader = DataLoader(train_dataset, batch_size=1, shuffle=True, collate_fn=data_collator)
for epoch in range(1):
print(f"Epoch {epoch + 1}")
total_loss = 0
for step, batch in enumerate(train_dataloader):
# Move batch to device (CPU)
batch = {k: v.to(device) for k, v in batch.items()}
# Forward pass
outputs = model(**batch)
loss = outputs.loss
# Backward pass
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_loss += loss.item()
if step % 2 == 0:
print(f"Step {step}, Loss: {loss.item():.4f}")
if step >= 5: # Train for just a few steps
break
print(f"Average loss: {total_loss / min(len(train_dataloader), 6):.4f}")
print("Training completed successfully!")
# Save model and tokenizer
print("Saving model...")
model.save_pretrained("./tiny_qwen3_model")
tokenizer.save_pretrained("./tiny_qwen3_model")
# Test inference
print("Testing inference...")
try:
pipe = pipeline(
"text-generation",
model="./tiny_qwen3_model",
tokenizer="./tiny_qwen3_model",
trust_remote_code=True,
device=-1 # Force CPU
)
result = pipe("Explain the concept", max_new_tokens=20, do_sample=False)
print("Generated text:", result)
except Exception as e:
print(f"Inference failed: {e}")
# Try direct model inference
model.eval()
test_text = "Explain the concept"
inputs = tokenizer(test_text, return_tensors="pt").to(device)
with torch.no_grad():
outputs = model.generate(
inputs.input_ids,
max_new_tokens=10,
do_sample=False,
pad_token_id=tokenizer.pad_token_id
)
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
print("Direct generation:", generated_text)
Outputs
- Downloads last month
- 16