sft-v5 / train4.py
ykzhang721's picture
Upload train4.py with huggingface_hub
d437286 verified
import json
import pdb
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer
from transformers.models.auto.modeling_auto import MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMES
import copy
from transformers.modeling_outputs import (
MoeCausalLMOutputWithPast,
MoeModelOutputWithPast,
)
from collections import defaultdict
import numpy as np
import math
from torch import nn
# import pandas as pd
from transformers.cache_utils import Cache, DynamicCache, StaticCache
from dataclasses import dataclass
# from transformers.models.olmoe.configuration_olmoe import OlmoeConfig
# from transformers.models.olmoe.modeling_olmoe import OlmoeMLP, OlmoeAttention, OlmoeFlashAttention2, OlmoeSdpaAttention, OlmoeRMSNorm, OlmoeSparseMoeBlock, apply_rotary_pos_emb, repeat_kv, OlmoeRotaryEmbedding
import os
import sys
import torch.distributed as dist
from tqdm import tqdm
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
import transformers
import pickle
# from transformers.models.olmoe.configuration_olmoe import OlmoeConfig
from dataset import *
# from utils import flash_attn_forward, flash_attn_prepare_decoder_attention_mask, get_multiround_data
# from peft import (get_peft_model, PeftModel)
import random
# from config import *
from datasets import Dataset, DatasetDict, load_dataset
import wandb
import gc
import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import functools
from torch.optim.lr_scheduler import StepLR
import torch.nn.functional as F
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data.distributed import DistributedSampler
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
checkpoint_wrapper, CheckpointImpl)
from torch.distributed.fsdp import (
FullyShardedDataParallel as FSDP,
MixedPrecision,
BackwardPrefetch,
ShardingStrategy,
FullStateDictConfig,
StateDictType,
)
from torch.distributed.fsdp.wrap import (
transformer_auto_wrap_policy,
enable_wrap,
wrap,
)
from functools import partial
from torch.utils.data import DataLoader
from pathlib import Path
from typing import Type, List, Optional, Tuple, Union
from modelforseminat_v5 import *
from transformers import get_cosine_schedule_with_warmup
# from torch.optim.lr_scheduler import _LRScheduler
# class WarmupCosineScheduler(_LRScheduler):
# def __init__(self,
# optimizer,
# warmup_steps,
# total_steps,
# min_lr=0.0,
# last_epoch=-1):
# # self.warmup_steps = warmup_steps
# self.total_steps = total_steps
# self.min_lr = min_lr
# if isinstance(warmup_steps, float) and 0 < warmup_steps < 1:
# self.warmup_steps = int(warmup_steps * total_steps)
# else:
# self.warmup_steps = int(warmup_steps)
# super().__init__(optimizer, last_epoch)
# def get_lr(self):
# step = self.last_epoch + 1
# lrs = []
# for base_lr in self.base_lrs:
# if step < self.warmup_steps:
# # Linear warmup
# lr = base_lr * step / self.warmup_steps
# else:
# # Cosine decay
# progress = (step - self.warmup_steps) / max(
# 1, self.total_steps - self.warmup_steps)
# cosine_decay = 0.5 * (1 + math.cos(math.pi * progress))
# lr = self.min_lr + (base_lr - self.min_lr) * cosine_decay
# lrs.append(lr)
# return lrs
################################# FSDP Config #####################################
def setup():
# initialize the process group
local_rank = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
dist.init_process_group(
backend='nccl',
init_method='env://',
)
def cleanup():
gc.collect()
torch.cuda.empty_cache()
dist.destroy_process_group()
def get_fsdp_device():
# 每个进程初始化分布式环境后调用
local_rank = int(os.environ.get("LOCAL_RANK", 0)) # torchrun 自动设置
device = torch.device(f"cuda:{local_rank}")
torch.cuda.set_device(device)
return device
# def load_trained_model(model_name):
# DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
# olmo_path = "/AIRvePFS/ai4science/users/ai4science/users/zyk/seminat_backup/model/OLMo-2-0425-1B"
# pt_path = "/AIRvePFS/ai4science/users/ai4science/users/zyk/seminat/ckp/sft-v4-0616-1w-1e3-chunklimit5-jueduipos/sft-v4-1e3-len4-fc-chunklimit4-jueduipos-epoch_136.pt"
# config_path = "/AIRvePFS/ai4science/users/ai4science/users/zyk/seminat_backup/model/OLMo-2-0425-1B/config.json"
# config = AutoConfig.from_pretrained(olmo_path)
# model = Olmo2ForCausalLMForSemiNAT.from_pretrained(olmo_path,
# config=config,
# torch_dtype=torch.bfloat16)
# state_dict = torch.load(pt_path, map_location=DEVICE, weights_only=True)
# missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
# print(
# f"Loaded with {len(missing_keys)} missing keys and {len(unexpected_keys)} unexpected keys."
# )
# if missing_keys:
# print("Missing keys:", missing_keys)
# if unexpected_keys:
# print("Unexpected keys:", unexpected_keys)
# model = model.to(DEVICE)
# tokenizer = AutoTokenizer.from_pretrained(olmo_path)
# return model, tokenizer
# def setup_model(model_name,device):
# model = Olmo2ForCausalLMForSemiNAT.from_pretrained(model_name,torch_dtype=torch.bfloat16,device_map=device)
# tokenizer = AutoTokenizer.from_pretrained(model_name)
# # config = AutoConfig.from_pretrained(model_name)
# # model = Olmo2ForCausalLMForSemiNAT(config) # 注意这里不用 from_pretrained
# # tokenizer = AutoTokenizer.from_pretrained(model_name)
# return model, tokenizer
def setup_model(model_name, type):
# pdb.set_trace()
if type == "bf16":
model = Olmo2ForCausalLMForSemiNAT.from_pretrained(
model_name,
torch_dtype=torch.bfloat16
)
elif type == "fp16":
model = Olmo2ForCausalLMForSemiNAT.from_pretrained(
model_name,
torch_dtype=torch.float16
)
else:
model = Olmo2ForCausalLMForSemiNAT.from_pretrained(
model_name
)
tokenizer = AutoTokenizer.from_pretrained(model_name)
# config = AutoConfig.from_pretrained(model_name)
# model = Olmo2ForCausalLMForSemiNAT(config) # 注意这里不用 from_pretrained
# tokenizer = AutoTokenizer.from_pretrained(model_name)
return model, tokenizer
def collate_fn(batch):
# 过滤 None
batch = [x for x in batch if x is not None]
if len(batch) == 0:
return None # 如果整 batch 都无效
input_ids, labels, attention_mask, slice_arr, slice_label = zip(*batch)
return (
torch.stack(input_ids),
torch.stack(labels),
torch.stack(attention_mask),
torch.stack(slice_arr),
torch.stack(slice_label)
)
def fsdp_main(args):
local_rank = int(os.environ['LOCAL_RANK'])
rank = int(os.environ['RANK'])
world_size = int(os.environ['WORLD_SIZE'])
if args.use_wandb and rank == 0:
wandb.init(entity="SemiNAT", project="SemiNAT-SFT", name=args.run_name)
local_rank = int(os.environ['LOCAL_RANK'])
device = f"cuda:{local_rank}"
# model, tokenizer = setup_model(args.model_path, args.dtype, device)
model, tokenizer = setup_model(args.model_path,device)
# model, tokenizer = load_trained_model(args.model_path)
model.config.chunk_size_limit = args.chunk_size_limit
# if ".pkl" in args.data_path:
# train_dataset = pickle.load(open(args.data_path, "rb"))
# else:
# datasets = pd.read_parquet(args.data_path)
# train_dataset = eval(f"{args.data_type}")(
# tokenizer,
# datasets,
# args.max_length,
# args.data_processess_num)
# train_sampler = DistributedSampler(train_dataset,
# rank=rank,
# num_replicas=world_size,
# shuffle=True)
# train_dataloader = DataLoader(dataset=train_dataset,
# sampler=train_sampler,
# batch_size=args.batch_size)
train_dataset = eval(f"{args.data_type}")(
tokenizer,
args.data_path,
args.max_length
)
train_sampler = DistributedSampler(train_dataset,
rank=rank,
num_replicas=world_size,
shuffle=True)
train_dataloader = DataLoader(dataset=train_dataset,
sampler=train_sampler,
batch_size=args.batch_size,
num_workers=args.data_processess_num,
collate_fn=collate_fn)
# pdb.set_trace()
print(f"Size of train dataset: {len(train_dataset)}")
setup()
# Olmo2DecoderLayerForSemiNAT_auto_wrap_policy = functools.partial(
# transformer_auto_wrap_policy,
# transformer_layer_cls={
# Olmo2DecoderLayerForSemiNAT,
# NATEncoderForSemiNAT,
# NATDecoderForSemiNAT,
# })
Olmo2DecoderLayerForSemiNAT_auto_wrap_policy = functools.partial(
transformer_auto_wrap_policy,
transformer_layer_cls={
Olmo2DecoderLayer,
Olmo2DecoderLayerForSemiNAT
}
)
sharding_strategy: ShardingStrategy = ShardingStrategy.FULL_SHARD #for Zero2 and FULL_SHARD for Zero3
torch.cuda.set_device(local_rank)
# local_rank = int(os.environ['LOCAL_RANK'])
# device = torch.device(f"cuda:{local_rank}")
# model = model.to(device)
# if bf16_ready:
mp_policy = MixedPrecision(
param_dtype=torch.bfloat16,
reduce_dtype=torch.bfloat16,
buffer_dtype=torch.bfloat16,
)
# else:
# mp_policy = None # defaults to fp32
# if args.use_lora:
# model = get_peft_model(model, lora_config)
# pdb.set_trace()
# model is on CPU before input to FSDP
model = FSDP(model,
auto_wrap_policy=Olmo2DecoderLayerForSemiNAT_auto_wrap_policy,
mixed_precision=mp_policy,
sharding_strategy=sharding_strategy,
device_id=torch.cuda.current_device(),
use_orig_params=True)
optimizer = optim.AdamW(
model.parameters(),
lr=args.lr,
betas=args.betas,
weight_decay=args.weight_decay,
eps=args.eps,
)
# pdb.set_trace()
# scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
# scheduler = WarmupCosineScheduler(
# optimizer=optimizer, # 优化器对象
# warmup_steps=args.warmup_steps, # warmup 步数(或比例)
# total_steps=args.total_steps, # 总训练步数
# min_lr=args.min_lr # 最小学习率
# )
num_training_steps = args.epochs * len(train_dataloader) # 总训练步数
num_warmup_steps = num_training_steps * args.warmup_ratio
scheduler = get_cosine_schedule_with_warmup(
optimizer,
num_warmup_steps=num_warmup_steps,
num_training_steps=num_training_steps
)
torch.autograd.set_detect_anomaly(True)
loss1_list = []
loss2_list = []
loss_list = []
global_step = 0
start_time = time.time()
for epoch in range(1, args.epochs + 1):
# t0 = time.time()
model.train()
local_rank = int(os.environ['LOCAL_RANK'])
# fsdp_loss = torch.zeros(2).to(local_rank)
if train_sampler:
train_sampler.set_epoch(epoch)
if rank == 0:
inner_pbar = tqdm(range(len(train_dataloader)),
colour="blue",
desc="r0 Training Epoch")
memories = []
for batch in train_dataloader:
if batch is None:
continue
optimizer.zero_grad()
loss1, loss2 = model(input_ids=batch[0],
labels=batch[1],
attention_mask=batch[2],
slice_pos=batch[3],
slice_label=batch[4],
use_cache=False).loss
loss = loss1 + loss2
# loss = loss2
loss1_list.append(loss1.item())
loss2_list.append(loss2.item())
loss_list.append(loss.item())
# pdb.set_trace()
# if torch.isnan(loss):
# print(f"Step {global_step}: loss is NaN, entering pdb …")
# pdb.set_trace()
# print(f"loss1:{loss1},loss2:{loss2}")
loss.backward()
# 按参数计算
# for name, module in model.named_modules():
# total_norm = 0.0
# param_count = 0
# for param in module.parameters(recurse=False):
# if param.grad is not None:
# total_norm += param.grad.data.norm(2).item()**2
# param_count += 1
# if param_count > 0:
# if args.use_wandb and rank == 0:
# total_norm = total_norm**0.5
# wandb.log({f"grad_norm/{name}": total_norm},
# step=global_step)
optimizer.step()
mem = torch.cuda.memory_allocated() / (1024 ** 2)
memories.append(mem)
global_step += 1
if global_step % args.save_steps == 0:
save_policy = FullStateDictConfig(offload_to_cpu=True,
rank0_only=True)
with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT,
save_policy):
cpu_state = model.state_dict()
if rank == 0:
print(f"--> steps: {str(global_step)} saving model ...")
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
save_name = f"{args.save_name}-steps_{str(global_step)}.pt"
print(f"--> saving as model name {save_name}")
save_path = os.path.join(args.save_path, save_name)
torch.save(cpu_state, save_path)
if rank == 0:
inner_pbar.update(1)
if args.use_wandb and rank == 0:
wandb.log({
"length prediction loss":
sum(loss1_list[-20:]) / len(loss1_list[-20:]),
"nat loss":
sum(loss2_list[-20:]) / len(loss2_list[-20:]),
"loss":
sum(loss_list[-20:]) / len(loss_list[-20:]),
"lr": scheduler.get_last_lr()[0]
})
avg_mem = sum(memories) / len(memories)
print(f"Average memory usage over {len(memories)} steps: {avg_mem:.2f} MB")
dist.all_reduce(loss, op=dist.ReduceOp.SUM)
if rank == 0:
inner_pbar.close()
scheduler.step()
# if rank == 0:
# print(f"--> entering save model state")
# save_policy = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
# with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT,
# save_policy):
# cpu_state = model.state_dict()
# if rank == 0:
# print(f"--> epoch: {str(epoch)} saving model ...")
# if not os.path.exists(args.save_path):
# os.makedirs(args.save_path)
# save_name = f"{args.save_name}-epoch_{str(epoch)}.pt"
# print(f"--> saving as model name {save_name}")
# save_path = os.path.join(args.save_path, save_name)
# torch.save(cpu_state, save_path)
end_time = time.time()
print(f"Training time: {end_time - start_time} seconds")
dist.barrier()
cleanup()
################################# FSDP Config #####################################
if __name__ == "__main__":
# Training settings
parser = argparse.ArgumentParser()
parser.add_argument('--batch-size',
type=int,
default=4,
metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--model_path', type=str)
parser.add_argument('--save_path', type=str)
parser.add_argument('--save_name', type=str)
parser.add_argument('--data_path', type=str)
parser.add_argument('--data_type', type=str)
parser.add_argument('--run_name', type=str)
parser.add_argument('--max_length', type=int)
parser.add_argument('--chunk_size_limit', type=int)
parser.add_argument('--save_steps', type=int, default=5000)
parser.add_argument('--data_processess_num', type=int, default=8)
parser.add_argument('--epochs',
type=int,
default=2,
metavar='N',
help='number of epochs to train (default: 3)')
parser.add_argument('--lr',
type=float,
default=.002,
metavar='LR',
help='learning rate (default: .002)')
parser.add_argument('--weight_decay', type=float)
parser.add_argument('--betas', type=float, nargs=2)
parser.add_argument('--eps', type=float)
parser.add_argument('--warmup_ratio', type=float)
parser.add_argument('--seed',
type=int,
default=1,
metavar='S',
help='random seed (default: 1)')
parser.add_argument('--use_lora', action='store_true', default=False)
parser.add_argument("--use_wandb",
action="store_true",
help="whether to use wandb")
parser.add_argument('--dtype', type=str)
args = parser.parse_args()
torch.manual_seed(args.seed)
fsdp_main(args)