|
import json |
|
import pdb |
|
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer |
|
from transformers.models.auto.modeling_auto import MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMES |
|
import copy |
|
from transformers.modeling_outputs import ( |
|
MoeCausalLMOutputWithPast, |
|
MoeModelOutputWithPast, |
|
) |
|
from collections import defaultdict |
|
import numpy as np |
|
import math |
|
from torch import nn |
|
|
|
from transformers.cache_utils import Cache, DynamicCache, StaticCache |
|
from dataclasses import dataclass |
|
|
|
|
|
import os |
|
import sys |
|
import torch.distributed as dist |
|
from tqdm import tqdm |
|
from torch.utils.data import DataLoader |
|
from torch.utils.data.distributed import DistributedSampler |
|
import transformers |
|
import pickle |
|
|
|
|
|
from dataset import * |
|
|
|
|
|
import random |
|
|
|
from datasets import Dataset, DatasetDict, load_dataset |
|
import wandb |
|
import gc |
|
import os |
|
import argparse |
|
import torch |
|
import torch.nn as nn |
|
import torch.nn.functional as F |
|
import torch.optim as optim |
|
import functools |
|
from torch.optim.lr_scheduler import StepLR |
|
import torch.nn.functional as F |
|
import torch.distributed as dist |
|
import torch.multiprocessing as mp |
|
from torch.nn.parallel import DistributedDataParallel as DDP |
|
from torch.utils.data.distributed import DistributedSampler |
|
|
|
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( |
|
checkpoint_wrapper, CheckpointImpl) |
|
|
|
from torch.distributed.fsdp import ( |
|
FullyShardedDataParallel as FSDP, |
|
MixedPrecision, |
|
BackwardPrefetch, |
|
ShardingStrategy, |
|
FullStateDictConfig, |
|
StateDictType, |
|
) |
|
from torch.distributed.fsdp.wrap import ( |
|
transformer_auto_wrap_policy, |
|
enable_wrap, |
|
wrap, |
|
) |
|
from functools import partial |
|
from torch.utils.data import DataLoader |
|
from pathlib import Path |
|
from typing import Type, List, Optional, Tuple, Union |
|
from modelforseminat_v5 import * |
|
from transformers import get_cosine_schedule_with_warmup |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def setup(): |
|
|
|
local_rank = int(os.environ['LOCAL_RANK']) |
|
torch.cuda.set_device(local_rank) |
|
dist.init_process_group( |
|
backend='nccl', |
|
init_method='env://', |
|
) |
|
|
|
|
|
def cleanup(): |
|
gc.collect() |
|
torch.cuda.empty_cache() |
|
dist.destroy_process_group() |
|
|
|
|
|
def get_fsdp_device(): |
|
|
|
local_rank = int(os.environ.get("LOCAL_RANK", 0)) |
|
device = torch.device(f"cuda:{local_rank}") |
|
torch.cuda.set_device(device) |
|
return device |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def setup_model(model_name, type): |
|
|
|
if type == "bf16": |
|
model = Olmo2ForCausalLMForSemiNAT.from_pretrained( |
|
model_name, |
|
torch_dtype=torch.bfloat16 |
|
) |
|
elif type == "fp16": |
|
model = Olmo2ForCausalLMForSemiNAT.from_pretrained( |
|
model_name, |
|
torch_dtype=torch.float16 |
|
) |
|
else: |
|
model = Olmo2ForCausalLMForSemiNAT.from_pretrained( |
|
model_name |
|
) |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
|
|
|
|
|
return model, tokenizer |
|
|
|
def collate_fn(batch): |
|
|
|
batch = [x for x in batch if x is not None] |
|
if len(batch) == 0: |
|
return None |
|
|
|
input_ids, labels, attention_mask, slice_arr, slice_label = zip(*batch) |
|
|
|
return ( |
|
torch.stack(input_ids), |
|
torch.stack(labels), |
|
torch.stack(attention_mask), |
|
torch.stack(slice_arr), |
|
torch.stack(slice_label) |
|
) |
|
|
|
def fsdp_main(args): |
|
local_rank = int(os.environ['LOCAL_RANK']) |
|
rank = int(os.environ['RANK']) |
|
world_size = int(os.environ['WORLD_SIZE']) |
|
if args.use_wandb and rank == 0: |
|
wandb.init(entity="SemiNAT", project="SemiNAT-SFT", name=args.run_name) |
|
|
|
local_rank = int(os.environ['LOCAL_RANK']) |
|
device = f"cuda:{local_rank}" |
|
|
|
|
|
model, tokenizer = setup_model(args.model_path,device) |
|
|
|
|
|
|
|
model.config.chunk_size_limit = args.chunk_size_limit |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
train_dataset = eval(f"{args.data_type}")( |
|
tokenizer, |
|
args.data_path, |
|
args.max_length |
|
) |
|
train_sampler = DistributedSampler(train_dataset, |
|
rank=rank, |
|
num_replicas=world_size, |
|
shuffle=True) |
|
|
|
train_dataloader = DataLoader(dataset=train_dataset, |
|
sampler=train_sampler, |
|
batch_size=args.batch_size, |
|
num_workers=args.data_processess_num, |
|
collate_fn=collate_fn) |
|
|
|
|
|
|
|
print(f"Size of train dataset: {len(train_dataset)}") |
|
|
|
setup() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Olmo2DecoderLayerForSemiNAT_auto_wrap_policy = functools.partial( |
|
transformer_auto_wrap_policy, |
|
transformer_layer_cls={ |
|
Olmo2DecoderLayer, |
|
Olmo2DecoderLayerForSemiNAT |
|
} |
|
) |
|
|
|
|
|
|
|
sharding_strategy: ShardingStrategy = ShardingStrategy.FULL_SHARD |
|
torch.cuda.set_device(local_rank) |
|
|
|
|
|
|
|
|
|
|
|
mp_policy = MixedPrecision( |
|
param_dtype=torch.bfloat16, |
|
reduce_dtype=torch.bfloat16, |
|
buffer_dtype=torch.bfloat16, |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model = FSDP(model, |
|
auto_wrap_policy=Olmo2DecoderLayerForSemiNAT_auto_wrap_policy, |
|
mixed_precision=mp_policy, |
|
sharding_strategy=sharding_strategy, |
|
device_id=torch.cuda.current_device(), |
|
use_orig_params=True) |
|
|
|
optimizer = optim.AdamW( |
|
model.parameters(), |
|
lr=args.lr, |
|
betas=args.betas, |
|
weight_decay=args.weight_decay, |
|
eps=args.eps, |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
num_training_steps = args.epochs * len(train_dataloader) |
|
num_warmup_steps = num_training_steps * args.warmup_ratio |
|
|
|
scheduler = get_cosine_schedule_with_warmup( |
|
optimizer, |
|
num_warmup_steps=num_warmup_steps, |
|
num_training_steps=num_training_steps |
|
) |
|
|
|
torch.autograd.set_detect_anomaly(True) |
|
|
|
loss1_list = [] |
|
loss2_list = [] |
|
loss_list = [] |
|
|
|
global_step = 0 |
|
|
|
|
|
|
|
start_time = time.time() |
|
|
|
for epoch in range(1, args.epochs + 1): |
|
|
|
model.train() |
|
local_rank = int(os.environ['LOCAL_RANK']) |
|
|
|
|
|
if train_sampler: |
|
train_sampler.set_epoch(epoch) |
|
if rank == 0: |
|
inner_pbar = tqdm(range(len(train_dataloader)), |
|
colour="blue", |
|
desc="r0 Training Epoch") |
|
|
|
memories = [] |
|
|
|
for batch in train_dataloader: |
|
if batch is None: |
|
continue |
|
optimizer.zero_grad() |
|
loss1, loss2 = model(input_ids=batch[0], |
|
labels=batch[1], |
|
attention_mask=batch[2], |
|
slice_pos=batch[3], |
|
slice_label=batch[4], |
|
use_cache=False).loss |
|
loss = loss1 + loss2 |
|
|
|
loss1_list.append(loss1.item()) |
|
loss2_list.append(loss2.item()) |
|
loss_list.append(loss.item()) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
loss.backward() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
optimizer.step() |
|
|
|
|
|
mem = torch.cuda.memory_allocated() / (1024 ** 2) |
|
memories.append(mem) |
|
|
|
global_step += 1 |
|
|
|
if global_step % args.save_steps == 0: |
|
save_policy = FullStateDictConfig(offload_to_cpu=True, |
|
rank0_only=True) |
|
with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT, |
|
save_policy): |
|
cpu_state = model.state_dict() |
|
|
|
if rank == 0: |
|
print(f"--> steps: {str(global_step)} saving model ...") |
|
if not os.path.exists(args.save_path): |
|
os.makedirs(args.save_path) |
|
save_name = f"{args.save_name}-steps_{str(global_step)}.pt" |
|
print(f"--> saving as model name {save_name}") |
|
save_path = os.path.join(args.save_path, save_name) |
|
torch.save(cpu_state, save_path) |
|
|
|
if rank == 0: |
|
inner_pbar.update(1) |
|
if args.use_wandb and rank == 0: |
|
wandb.log({ |
|
"length prediction loss": |
|
sum(loss1_list[-20:]) / len(loss1_list[-20:]), |
|
"nat loss": |
|
sum(loss2_list[-20:]) / len(loss2_list[-20:]), |
|
"loss": |
|
sum(loss_list[-20:]) / len(loss_list[-20:]), |
|
"lr": scheduler.get_last_lr()[0] |
|
}) |
|
|
|
|
|
avg_mem = sum(memories) / len(memories) |
|
print(f"Average memory usage over {len(memories)} steps: {avg_mem:.2f} MB") |
|
|
|
|
|
dist.all_reduce(loss, op=dist.ReduceOp.SUM) |
|
|
|
if rank == 0: |
|
inner_pbar.close() |
|
|
|
scheduler.step() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
end_time = time.time() |
|
print(f"Training time: {end_time - start_time} seconds") |
|
|
|
dist.barrier() |
|
cleanup() |
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
parser = argparse.ArgumentParser() |
|
parser.add_argument('--batch-size', |
|
type=int, |
|
default=4, |
|
metavar='N', |
|
help='input batch size for training (default: 64)') |
|
parser.add_argument('--model_path', type=str) |
|
parser.add_argument('--save_path', type=str) |
|
parser.add_argument('--save_name', type=str) |
|
parser.add_argument('--data_path', type=str) |
|
parser.add_argument('--data_type', type=str) |
|
parser.add_argument('--run_name', type=str) |
|
parser.add_argument('--max_length', type=int) |
|
parser.add_argument('--chunk_size_limit', type=int) |
|
parser.add_argument('--save_steps', type=int, default=5000) |
|
parser.add_argument('--data_processess_num', type=int, default=8) |
|
parser.add_argument('--epochs', |
|
type=int, |
|
default=2, |
|
metavar='N', |
|
help='number of epochs to train (default: 3)') |
|
parser.add_argument('--lr', |
|
type=float, |
|
default=.002, |
|
metavar='LR', |
|
help='learning rate (default: .002)') |
|
parser.add_argument('--weight_decay', type=float) |
|
parser.add_argument('--betas', type=float, nargs=2) |
|
parser.add_argument('--eps', type=float) |
|
parser.add_argument('--warmup_ratio', type=float) |
|
parser.add_argument('--seed', |
|
type=int, |
|
default=1, |
|
metavar='S', |
|
help='random seed (default: 1)') |
|
parser.add_argument('--use_lora', action='store_true', default=False) |
|
parser.add_argument("--use_wandb", |
|
action="store_true", |
|
help="whether to use wandb") |
|
parser.add_argument('--dtype', type=str) |
|
args = parser.parse_args() |
|
|
|
torch.manual_seed(args.seed) |
|
|
|
fsdp_main(args) |
|
|