|
""" |
|
Training script for Dynamic Token-Aware Transformer (DTAT) on enwik8 dataset. |
|
Based on NanoGPT's training structure with modifications for token importance awareness. |
|
""" |
|
|
|
import os |
|
import time |
|
import math |
|
import pickle |
|
from contextlib import nullcontext |
|
import numpy as np |
|
import torch |
|
from torch.nn.parallel import DistributedDataParallel as DDP |
|
from torch.distributed import init_process_group, destroy_process_group |
|
import matplotlib.pyplot as plt |
|
import wandb |
|
from tqdm import tqdm |
|
from datetime import datetime |
|
|
|
from model_dtat import DTATTransformer |
|
from config.dtat_config import get_config |
|
|
|
|
|
|
|
def get_batch(data, block_size, batch_size, device): |
|
"""Generate a small batch of data of inputs x and targets y.""" |
|
ix = torch.randint(len(data) - block_size, (batch_size,)) |
|
x = torch.stack([torch.from_numpy((data[i:i+block_size]).astype(np.int64)) for i in ix]) |
|
y = torch.stack([torch.from_numpy((data[i+1:i+1+block_size]).astype(np.int64)) for i in ix]) |
|
x, y = x.to(device), y.to(device) |
|
return x, y |
|
|
|
def compute_freq_table(data, vocab_size=256): |
|
"""Compute frequency table for the dataset.""" |
|
freq = np.bincount(data, minlength=vocab_size) |
|
return freq / len(data) |
|
|
|
def visualize_importance(tokens, importance_scores, iter_num): |
|
""" |
|
Visualize token importance scores |
|
""" |
|
plt.figure(figsize=(15, 5)) |
|
|
|
scores = importance_scores.detach().squeeze().cpu() |
|
plt.bar(range(len(tokens)), scores) |
|
plt.title(f'Token Importance Scores (Iteration {iter_num})') |
|
plt.xlabel('Token Position') |
|
plt.ylabel('Importance Score') |
|
|
|
|
|
if len(tokens) <= 50: |
|
plt.xticks(range(len(tokens)), tokens, rotation=45) |
|
|
|
|
|
wandb.log({ |
|
'importance_scores': wandb.Image(plt), |
|
'iter': iter_num |
|
}) |
|
plt.close() |
|
|
|
|
|
|
|
|
|
def estimate_loss(model, data, config): |
|
out = {} |
|
model.eval() |
|
losses = torch.zeros(config.eval_iters) |
|
for k in range(config.eval_iters): |
|
X, Y = get_batch(data, config.block_size, config.batch_size, config.device) |
|
with torch.no_grad(): |
|
logits, loss, _ = model(X, Y) |
|
losses[k] = loss.item() |
|
out = losses.mean() |
|
model.train() |
|
return out |
|
|
|
def get_lr(it, config): |
|
""" |
|
Learning rate scheduler with linear warmup and cosine decay |
|
""" |
|
|
|
if it < config.warmup_iters: |
|
return config.learning_rate * it / config.warmup_iters |
|
|
|
|
|
if config.decay_lr: |
|
decay_ratio = (it - config.warmup_iters) / (config.lr_decay_iters - config.warmup_iters) |
|
decay_ratio = min(decay_ratio, 1.0) |
|
coeff = 0.5 * (1.0 + math.cos(math.pi * decay_ratio)) |
|
return config.min_lr + coeff * (config.learning_rate - config.min_lr) |
|
|
|
return config.learning_rate |
|
|
|
def main(): |
|
|
|
ddp = int(os.environ.get('RANK', -1)) != -1 |
|
if ddp: |
|
init_process_group(backend='nccl') |
|
ddp_rank = int(os.environ['RANK']) |
|
ddp_local_rank = int(os.environ['LOCAL_RANK']) |
|
device = f'cuda:{ddp_local_rank}' |
|
master_process = ddp_rank == 0 |
|
seed_offset = ddp_rank |
|
assert config.batch_size % torch.cuda.device_count() == 0 |
|
config.batch_size = config.batch_size // torch.cuda.device_count() |
|
else: |
|
device = 'cuda' if torch.cuda.is_available() else 'cpu' |
|
master_process = True |
|
seed_offset = 0 |
|
|
|
|
|
torch.manual_seed(1337 + seed_offset) |
|
torch.backends.cuda.matmul.allow_tf32 = True |
|
torch.backends.cudnn.allow_tf32 = True |
|
device_type = 'cuda' if 'cuda' in device else 'cpu' |
|
|
|
|
|
config = get_config() |
|
config.device = device |
|
|
|
|
|
if master_process: |
|
wandb.init(project="enwik8-dtat") |
|
wandb.config.update(config.__dict__) |
|
|
|
|
|
config.warmup_iters = 2000 |
|
config.learning_rate = 6e-4 |
|
|
|
|
|
print("Loading data...") |
|
data_dir = os.path.join('data') |
|
train_data = np.memmap(os.path.join(data_dir, 'train.bin'), dtype=np.uint8, mode='r') |
|
val_data = np.memmap(os.path.join(data_dir, 'val.bin'), dtype=np.uint8, mode='r') |
|
|
|
|
|
freq_table = compute_freq_table(train_data) |
|
|
|
|
|
print("Initializing model...") |
|
model = DTATTransformer(config) |
|
model.to(device) |
|
|
|
|
|
optimizer = torch.optim.AdamW( |
|
model.parameters(), |
|
lr=config.learning_rate, |
|
betas=(config.beta1, config.beta2), |
|
weight_decay=config.weight_decay |
|
) |
|
|
|
if ddp: |
|
model = DDP(model, device_ids=[ddp_local_rank]) |
|
|
|
|
|
if hasattr(torch, 'compile'): |
|
try: |
|
model = torch.compile(model) |
|
print("Using torch.compile() for faster training") |
|
except: |
|
print("torch.compile() failed, falling back to default model") |
|
|
|
|
|
scaler = torch.cuda.amp.GradScaler(enabled=config.mixed_precision) |
|
|
|
|
|
torch.backends.cudnn.benchmark = True |
|
|
|
|
|
checkpoint_dir = os.path.join('checkpoints', 'dtat') |
|
os.makedirs(checkpoint_dir, exist_ok=True) |
|
|
|
|
|
print("Starting training...") |
|
print(f"Saving checkpoints to: {checkpoint_dir}") |
|
|
|
|
|
total_steps = config.max_iters |
|
batch_size = config.batch_size |
|
block_size = config.block_size |
|
total_epochs = (total_steps * batch_size * block_size) // len(train_data) |
|
|
|
|
|
pbar = tqdm(range(config.max_iters), desc=f"Training (0/{total_epochs} epochs)") |
|
|
|
best_val_loss = float('inf') |
|
no_improvement = 0 |
|
running_mfu = -1.0 |
|
|
|
t0 = time.time() |
|
|
|
for iter_num in pbar: |
|
|
|
if no_improvement >= config.patience: |
|
print(f"\nEarly stopping triggered after {iter_num} iterations") |
|
print(f"Best validation loss: {best_val_loss:.4f}") |
|
break |
|
|
|
|
|
lr = get_lr(iter_num, config) |
|
for param_group in optimizer.param_groups: |
|
param_group['lr'] = lr |
|
|
|
|
|
X, Y = get_batch(train_data, config.block_size, config.batch_size, device) |
|
|
|
|
|
with torch.cuda.amp.autocast(enabled=config.mixed_precision): |
|
logits, loss, importance_scores = model(X, Y) |
|
|
|
|
|
optimizer.zero_grad(set_to_none=True) |
|
scaler.scale(loss).backward() |
|
scaler.unscale_(optimizer) |
|
torch.nn.utils.clip_grad_norm_(model.parameters(), config.grad_clip) |
|
scaler.step(optimizer) |
|
scaler.update() |
|
|
|
|
|
if iter_num % config.log_interval == 0: |
|
|
|
current_tokens = (iter_num + 1) * batch_size * block_size |
|
current_epoch = current_tokens / len(train_data) |
|
|
|
|
|
grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0).item() |
|
importance_mean = importance_scores.mean().item() |
|
|
|
|
|
pbar.set_description( |
|
f"Training ({current_epoch:.1f}/{total_epochs} epochs) | " |
|
f"loss: {loss.item():.4f} | " |
|
f"bpc: {loss.item():.2f} | " |
|
f"imp: {importance_mean:.2f} | " |
|
f"lr: {lr:.1e} | " |
|
f"tokens/sec: {(batch_size * block_size) / (time.time() - t0):.1f}" |
|
) |
|
|
|
|
|
wandb.log({ |
|
"iter": iter_num, |
|
"loss": loss.item(), |
|
"bpc": loss.item(), |
|
"lr": lr, |
|
"grad_norm": grad_norm, |
|
"importance_mean": importance_mean, |
|
"epoch": current_epoch, |
|
"tokens_per_sec": (batch_size * block_size) / (time.time() - t0), |
|
}) |
|
|
|
|
|
t0 = time.time() |
|
|
|
|
|
if iter_num % (config.log_interval * 10) == 0: |
|
visualize_importance( |
|
X[0].cpu().numpy(), |
|
importance_scores[0], |
|
iter_num |
|
) |
|
|
|
|
|
if iter_num > 0 and iter_num % config.eval_interval == 0: |
|
val_loss = estimate_loss(model, val_data, config) |
|
|
|
|
|
if val_loss < best_val_loss - config.min_delta: |
|
best_val_loss = val_loss |
|
no_improvement = 0 |
|
print(f"Saved best model at iteration {iter_num} with val_loss: {val_loss:.4f}") |
|
torch.save(model.state_dict(), os.path.join(checkpoint_dir, 'best.pt')) |
|
else: |
|
no_improvement += 1 |
|
|
|
|
|
wandb.log({ |
|
"iter": iter_num, |
|
"val_loss": val_loss, |
|
"val_bpc": val_loss, |
|
"epoch": current_epoch, |
|
}) |
|
|
|
|
|
if iter_num % 1000 == 0: |
|
checkpoint = { |
|
'model_state_dict': model.state_dict(), |
|
'optimizer_state_dict': optimizer.state_dict(), |
|
'iter_num': iter_num, |
|
'best_val_loss': best_val_loss, |
|
'config': config, |
|
} |
|
checkpoint_path = os.path.join(checkpoint_dir, f'checkpoint_{iter_num:06d}.pt') |
|
torch.save(checkpoint, checkpoint_path) |
|
print(f"\nSaved checkpoint at iteration {iter_num} to {checkpoint_path}") |
|
|
|
wandb.finish() |
|
|
|
if __name__ == '__main__': |
|
main() |
|
|