sft-v5 / train5.py
ykzhang721's picture
Upload train5.py with huggingface_hub
ad2fdaa verified
import pdb
from transformers import AutoTokenizer
from torch import nn
import os
import time
import torch.distributed as dist
from tqdm import tqdm
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from dataset import *
import wandb
import gc
import os
import argparse
import torch
import torch.optim as optim
import functools
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
from torch.distributed.fsdp import (
FullyShardedDataParallel as FSDP,
MixedPrecision,
ShardingStrategy,
FullStateDictConfig,
StateDictType,
)
from torch.distributed.fsdp.wrap import (
transformer_auto_wrap_policy,
)
from modelforseminat_v5 import *
from transformers import get_cosine_schedule_with_warmup
################################# FSDP Config #####################################
def setup():
# initialize the process group
local_rank = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
dist.init_process_group(
backend='nccl',
init_method='env://',
)
def cleanup():
gc.collect()
torch.cuda.empty_cache()
dist.destroy_process_group()
def get_fsdp_device():
# 每个进程初始化分布式环境后调用
local_rank = int(os.environ.get("LOCAL_RANK", 0)) # torchrun 自动设置
device = torch.device(f"cuda:{local_rank}")
torch.cuda.set_device(device)
return device
def setup_model(
model_name: str, # HF 路径 / 本地目录
dtype: str = "bf16", # "bf16" | "fp16" | "fp32"
chunk_size_limit: int = 5,
attn_impl: str = "flash_attention_2",
load_model_dir: str | None = None, # 可能的 .pt 权重文件
decoder_layers: int = 1,
encoder_layer: int = 1,
mlp: bool = False,
position_embedding_type: str = "absolute",
base: str = None,
length_loss_type: str = "ce"
):
# --- 1. 构造 config -------------------------------------------------------
config = Olmo2ConfigForSemiNAT.from_pretrained(
model_name,
chunk_size_limit=chunk_size_limit,
attn_implementation=attn_impl,
decoder_layers=decoder_layers,
encoder_layer=encoder_layer,
mlp=mlp,
position_embedding_type=position_embedding_type,
length_loss_type=length_loss_type
)
# pdb.set_trace()
# --- 2. 按需设定 dtype ----------------------------------------------------
dtype_map = {"bf16": torch.bfloat16, "fp16": torch.float16, "fp32": torch.float32}
torch_dtype = dtype_map.get(dtype, torch.float32)
# --- 3. 加载基础模型 ------------------------------------------------------
if base == "scratch":
model = Olmo2ForCausalLMForSemiNAT(config).to(torch_dtype)
# pdb.set_trace()
elif base == "pretrained":
model = Olmo2ForCausalLMForSemiNAT.from_pretrained(
model_name,
config=config,
torch_dtype=torch_dtype
)
# pdb.set_trace()
# --- 4. 若给定 .pt,则加载其参数 ----------------------------------------
if load_model_dir and os.path.isfile(load_model_dir) and load_model_dir.endswith(".pt"):
ckpt = torch.load(load_model_dir, map_location="cpu", weights_only=True)
missing_keys, unexpected_keys = model.load_state_dict(ckpt, strict=False)
print(f"[INFO] Loaded weights from {load_model_dir}")
print(
f"Loaded with {len(missing_keys)} missing keys and {len(unexpected_keys)} unexpected keys."
)
if missing_keys:
print("Missing keys:", missing_keys)
if unexpected_keys:
print("Unexpected keys:", unexpected_keys)
# pdb.set_trace()
else:
print("[INFO] No extra .pt weights loaded")
# --- 5. tokenizer --------------------------------------------------------
tokenizer = AutoTokenizer.from_pretrained(model_name)
# pdb.set_trace()
# config = AutoConfig.from_pretrained(model_name)
# model = Olmo2ForCausalLMForSemiNAT(config) # 注意这里不用 from_pretrained
# tokenizer = AutoTokenizer.from_pretrained(model_name)
return model, tokenizer
def reduce_mean(tensor: torch.Tensor) -> torch.Tensor:
# 聚合所有 rank 的 tensor 并求平均
dist.all_reduce(tensor, op=dist.ReduceOp.SUM)
tensor /= dist.get_world_size()
return tensor
def collate_fn(batch):
# 过滤 None
batch = [x for x in batch if x is not None]
if len(batch) == 0:
return None # 如果整 batch 都无效
input_ids, labels, attention_mask, slice_arr, slice_label = zip(*batch)
return (
torch.stack(input_ids),
torch.stack(labels),
torch.stack(attention_mask),
torch.stack(slice_arr),
torch.stack(slice_label)
)
def fsdp_main(args):
local_rank = int(os.environ['LOCAL_RANK'])
rank = int(os.environ['RANK'])
world_size = int(os.environ['WORLD_SIZE'])
if args.use_wandb and rank == 0:
wandb.init(entity="SemiNAT", project=args.project_name, name=args.run_name)
local_rank = int(os.environ['LOCAL_RANK'])
DEVICE = f"cuda:{local_rank}"
# pdb.set_trace()
model, tokenizer = setup_model(args.model_path,args.dtype,args.chunk_size_limit,args.attn_implementation,args.ptm_model_path,args.decoder_layers,args.encoder_layers,args.mlp,args.position_embedding_type,args.base,args.length_loss_type)
optimizer = optim.AdamW(
model.parameters(),
lr=args.lr,
betas=args.betas,
weight_decay=args.weight_decay,
eps=args.eps,
)
train_dataset = eval(f"{args.data_type}")(
tokenizer,
args.data_path,
args.max_length
)
train_sampler = DistributedSampler(train_dataset,
rank=rank,
num_replicas=world_size,
shuffle=True,
drop_last=True)
train_dataloader = DataLoader(dataset=train_dataset,
sampler=train_sampler,
batch_size=args.batch_size,
num_workers=args.data_processess_num,
collate_fn=collate_fn)
num_training_steps = args.epochs * len(train_dataloader) # 总训练步数
num_warmup_steps = num_training_steps * args.warmup_ratio
scheduler = get_cosine_schedule_with_warmup(
optimizer,
num_warmup_steps=num_warmup_steps,
num_training_steps=num_training_steps
)
if args.resume_path:
checkpoint = torch.load(args.resume_path, map_location=DEVICE, weights_only=True)
missing_keys, unexpected_keys = model.load_state_dict(checkpoint["model"], strict=False)
print(
f"Loaded with {len(missing_keys)} missing keys and {len(unexpected_keys)} unexpected keys."
)
if missing_keys:
print("Missing keys:", missing_keys)
if unexpected_keys:
print("Unexpected keys:", unexpected_keys)
full_optim_state = checkpoint["optimizer"]
sharded_state = FSDP.shard_full_optim_state_dict(full_optim_state, model)
FSDP.optim_state_dict_to_device(
sharded_state,
device=torch.cuda.current_device(), # 当前 GPU
dtype=torch.bfloat16 # 或 p.dtype,如果你混用 fp16/bf16
)
optimizer.load_state_dict(sharded_state)
scheduler.load_state_dict(checkpoint["scheduler"])
global_step = checkpoint.get("global_step", 0)
print(f"Size of train dataset: {len(train_dataset)}")
setup()
Olmo2DecoderLayerForSemiNAT_auto_wrap_policy = functools.partial(
transformer_auto_wrap_policy,
transformer_layer_cls={
Olmo2DecoderLayer,
Olmo2DecoderLayerForSemiNAT
}
)
sharding_strategy: ShardingStrategy = ShardingStrategy.FULL_SHARD #for Zero2 and FULL_SHARD for Zero3
torch.cuda.set_device(local_rank)
mp_policy = MixedPrecision(
param_dtype=torch.bfloat16,
reduce_dtype=torch.bfloat16,
buffer_dtype=torch.bfloat16,
)
model = FSDP(model,
auto_wrap_policy=Olmo2DecoderLayerForSemiNAT_auto_wrap_policy,
mixed_precision=mp_policy,
sharding_strategy=sharding_strategy,
device_id=torch.cuda.current_device(),
use_orig_params=True)
torch.autograd.set_detect_anomaly(True)
loss1_list = []
loss2_list = []
loss_list = []
global_step = 0
start_time = time.time()
for epoch in range(1, args.epochs + 1):
model.train()
local_rank = int(os.environ['LOCAL_RANK'])
if train_sampler:
train_sampler.set_epoch(epoch)
if rank == 0:
inner_pbar = tqdm(range(len(train_dataloader)),
colour="blue",
desc="r0 Training Epoch")
memories = []
for batch in train_dataloader:
if batch is None:
continue
optimizer.zero_grad()
loss1, loss2 = model(input_ids=batch[0],
labels=batch[1],
attention_mask=batch[2],
slice_pos=batch[3],
slice_label=batch[4],
use_cache=False).loss
loss = args.alpha * loss1 + loss2
# loss1_list.append(loss1.item())
# loss2_list.append(loss2.item())
# loss_list.append(loss.item())
loss1_scalar = reduce_mean(loss1.detach()).item()
loss2_scalar = reduce_mean(loss2.detach()).item()
total_loss_scalar = reduce_mean(loss.detach()).item()
# pdb.set_trace()
loss.backward()
optimizer.step()
scheduler.step()
# mem = torch.cuda.memory_allocated() / (1024 ** 2)
# memories.append(mem)
global_step += 1
if global_step % args.save_steps == 0:
save_policy = FullStateDictConfig(offload_to_cpu=True,
rank0_only=True)
with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT,
save_policy):
cpu_state = model.state_dict()
full_optim_state = FSDP.full_optim_state_dict(model, optimizer, rank0_only=True)
if rank == 0:
print(f"--> steps: {str(global_step)} saving model ...")
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
save_name = f"{args.save_name}-steps_{str(global_step)}.pt"
print(f"--> saving as model name {save_name}")
save_path = os.path.join(args.save_path, save_name)
torch.save({
"model": cpu_state,
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict() if scheduler else None,
"global_step": global_step,
"args": vars(args),
}, save_path)
if rank == 0:
loss1_list.append(loss1_scalar)
loss2_list.append(loss2_scalar)
loss_list.append(total_loss_scalar)
inner_pbar.update(1)
if args.use_wandb and rank == 0:
wandb.log({
"Length prediction loss (L1)": sum(loss1_list[-20:]) / len(loss1_list[-20:]),
"NAT loss (L2)": sum(loss2_list[-20:]) / len(loss2_list[-20:]),
f"Loss {args.alpha} * L1 + L2": sum(loss_list[-20:]) / len(loss_list[-20:]),
"lr": scheduler.get_last_lr()[0]
})
# avg_mem = sum(memories) / len(memories)
# print(f"Average memory usage over {len(memories)} steps: {avg_mem:.2f} MB")
# dist.all_reduce(loss, op=dist.ReduceOp.SUM)
if rank == 0:
inner_pbar.close()
end_time = time.time()
print(f"Training time: {end_time - start_time} seconds")
dist.barrier()
cleanup()
################################# FSDP Config #####################################
if __name__ == "__main__":
# Training settings
parser = argparse.ArgumentParser()
parser.add_argument('--batch-size',
type=int,
default=4,
metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--model_path', type=str)
parser.add_argument('--save_path', type=str)
parser.add_argument('--save_name', type=str)
parser.add_argument('--data_path', type=str)
parser.add_argument('--data_type', type=str)
parser.add_argument('--project_name', type=str)
parser.add_argument('--run_name', type=str)
parser.add_argument('--max_length', type=int)
parser.add_argument('--chunk_size_limit', type=int)
parser.add_argument('--save_steps', type=int, default=5000)
parser.add_argument('--data_processess_num', type=int, default=8)
parser.add_argument('--epochs',
type=int,
default=2,
metavar='N',
help='number of epochs to train (default: 3)')
parser.add_argument('--lr',
type=float,
default=.002,
metavar='LR',
help='learning rate (default: .002)')
parser.add_argument('--weight_decay', type=float)
parser.add_argument('--betas', type=float, nargs=2)
parser.add_argument('--eps', type=float)
parser.add_argument('--warmup_ratio', type=float)
parser.add_argument('--seed',
type=int,
default=1,
metavar='S',
help='random seed (default: 1)')
parser.add_argument('--use_lora', action='store_true', default=False)
parser.add_argument("--use_wandb",
action="store_true",
help="whether to use wandb")
parser.add_argument('--dtype', type=str)
parser.add_argument('--resume_path', type=str,default=None)
parser.add_argument('--attn_implementation', type=str)
parser.add_argument('--ptm_model_path', type=str,default=None)
parser.add_argument('--decoder_layers', type=int,default=1)
parser.add_argument('--encoder_layers', type=int,default=1)
parser.add_argument('--mlp', action='store_true', default=False)
parser.add_argument('--position_embedding_type', type=str, default="absolute",choices=["absolute","relative"])
parser.add_argument('--base', type=str, default="scratch",choices=["scratch","pretrained"])
parser.add_argument('--length_loss_type', type=str, default="ce",choices=["mse","ce"])
parser.add_argument('--alpha', type=float, default=0.4)
args = parser.parse_args()
# pdb.set_trace()
torch.manual_seed(args.seed)
fsdp_main(args)