ykzhang721 commited on
Commit
d966684
·
verified ·
1 Parent(s): 7387b16

Upload train4.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. train4.py +539 -0
train4.py ADDED
@@ -0,0 +1,539 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import pdb
3
+ from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer
4
+ from transformers.models.auto.modeling_auto import MODEL_FOR_IMAGE_TEXT_TO_TEXT_MAPPING_NAMES
5
+ import copy
6
+ from transformers.modeling_outputs import (
7
+ MoeCausalLMOutputWithPast,
8
+ MoeModelOutputWithPast,
9
+ )
10
+ from collections import defaultdict
11
+ import numpy as np
12
+ import math
13
+ from torch import nn
14
+ # import pandas as pd
15
+ from transformers.cache_utils import Cache, DynamicCache, StaticCache
16
+ from dataclasses import dataclass
17
+ # from transformers.models.olmoe.configuration_olmoe import OlmoeConfig
18
+ # from transformers.models.olmoe.modeling_olmoe import OlmoeMLP, OlmoeAttention, OlmoeFlashAttention2, OlmoeSdpaAttention, OlmoeRMSNorm, OlmoeSparseMoeBlock, apply_rotary_pos_emb, repeat_kv, OlmoeRotaryEmbedding
19
+ import os
20
+ import sys
21
+ import torch.distributed as dist
22
+ from tqdm import tqdm
23
+ from torch.utils.data import DataLoader
24
+ from torch.utils.data.distributed import DistributedSampler
25
+ import transformers
26
+ import pickle
27
+
28
+ # from transformers.models.olmoe.configuration_olmoe import OlmoeConfig
29
+ from dataset import *
30
+ # from utils import flash_attn_forward, flash_attn_prepare_decoder_attention_mask, get_multiround_data
31
+ # from peft import (get_peft_model, PeftModel)
32
+ import random
33
+ # from config import *
34
+ from datasets import Dataset, DatasetDict, load_dataset
35
+ import wandb
36
+ import gc
37
+ import os
38
+ import argparse
39
+ import torch
40
+ import torch.nn as nn
41
+ import torch.nn.functional as F
42
+ import torch.optim as optim
43
+ import functools
44
+ from torch.optim.lr_scheduler import StepLR
45
+ import torch.nn.functional as F
46
+ import torch.distributed as dist
47
+ import torch.multiprocessing as mp
48
+ from torch.nn.parallel import DistributedDataParallel as DDP
49
+ from torch.utils.data.distributed import DistributedSampler
50
+
51
+ from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
52
+ checkpoint_wrapper, CheckpointImpl)
53
+
54
+ from torch.distributed.fsdp import (
55
+ FullyShardedDataParallel as FSDP,
56
+ MixedPrecision,
57
+ BackwardPrefetch,
58
+ ShardingStrategy,
59
+ FullStateDictConfig,
60
+ StateDictType,
61
+ )
62
+ from torch.distributed.fsdp.wrap import (
63
+ transformer_auto_wrap_policy,
64
+ enable_wrap,
65
+ wrap,
66
+ )
67
+ from functools import partial
68
+ from torch.utils.data import DataLoader
69
+ from pathlib import Path
70
+ from typing import Type, List, Optional, Tuple, Union
71
+ from modelforseminat_v5 import *
72
+ from transformers import get_cosine_schedule_with_warmup
73
+ # from torch.optim.lr_scheduler import _LRScheduler
74
+
75
+ # class WarmupCosineScheduler(_LRScheduler):
76
+
77
+ # def __init__(self,
78
+ # optimizer,
79
+ # warmup_steps,
80
+ # total_steps,
81
+ # min_lr=0.0,
82
+ # last_epoch=-1):
83
+ # # self.warmup_steps = warmup_steps
84
+ # self.total_steps = total_steps
85
+ # self.min_lr = min_lr
86
+ # if isinstance(warmup_steps, float) and 0 < warmup_steps < 1:
87
+ # self.warmup_steps = int(warmup_steps * total_steps)
88
+ # else:
89
+ # self.warmup_steps = int(warmup_steps)
90
+ # super().__init__(optimizer, last_epoch)
91
+
92
+ # def get_lr(self):
93
+ # step = self.last_epoch + 1
94
+ # lrs = []
95
+
96
+ # for base_lr in self.base_lrs:
97
+ # if step < self.warmup_steps:
98
+ # # Linear warmup
99
+ # lr = base_lr * step / self.warmup_steps
100
+ # else:
101
+ # # Cosine decay
102
+ # progress = (step - self.warmup_steps) / max(
103
+ # 1, self.total_steps - self.warmup_steps)
104
+ # cosine_decay = 0.5 * (1 + math.cos(math.pi * progress))
105
+ # lr = self.min_lr + (base_lr - self.min_lr) * cosine_decay
106
+
107
+ # lrs.append(lr)
108
+
109
+ # return lrs
110
+
111
+
112
+
113
+
114
+
115
+
116
+ ################################# FSDP Config #####################################
117
+ def setup():
118
+ # initialize the process group
119
+ local_rank = int(os.environ['LOCAL_RANK'])
120
+ torch.cuda.set_device(local_rank)
121
+ dist.init_process_group(
122
+ backend='nccl',
123
+ init_method='env://',
124
+ )
125
+
126
+
127
+ def cleanup():
128
+ gc.collect()
129
+ torch.cuda.empty_cache()
130
+ dist.destroy_process_group()
131
+
132
+
133
+ def get_fsdp_device():
134
+ # 每个进程初始化分布式环境后调用
135
+ local_rank = int(os.environ.get("LOCAL_RANK", 0)) # torchrun 自动设置
136
+ device = torch.device(f"cuda:{local_rank}")
137
+ torch.cuda.set_device(device)
138
+ return device
139
+
140
+
141
+ # def load_trained_model(model_name):
142
+ # DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
143
+
144
+ # olmo_path = "/AIRvePFS/ai4science/users/ai4science/users/zyk/seminat_backup/model/OLMo-2-0425-1B"
145
+ # pt_path = "/AIRvePFS/ai4science/users/ai4science/users/zyk/seminat/ckp/sft-v4-0616-1w-1e3-chunklimit5-jueduipos/sft-v4-1e3-len4-fc-chunklimit4-jueduipos-epoch_136.pt"
146
+ # config_path = "/AIRvePFS/ai4science/users/ai4science/users/zyk/seminat_backup/model/OLMo-2-0425-1B/config.json"
147
+
148
+
149
+ # config = AutoConfig.from_pretrained(olmo_path)
150
+ # model = Olmo2ForCausalLMForSemiNAT.from_pretrained(olmo_path,
151
+ # config=config,
152
+ # torch_dtype=torch.bfloat16)
153
+ # state_dict = torch.load(pt_path, map_location=DEVICE, weights_only=True)
154
+ # missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
155
+ # print(
156
+ # f"Loaded with {len(missing_keys)} missing keys and {len(unexpected_keys)} unexpected keys."
157
+ # )
158
+ # if missing_keys:
159
+ # print("Missing keys:", missing_keys)
160
+ # if unexpected_keys:
161
+ # print("Unexpected keys:", unexpected_keys)
162
+
163
+ # model = model.to(DEVICE)
164
+
165
+ # tokenizer = AutoTokenizer.from_pretrained(olmo_path)
166
+
167
+ # return model, tokenizer
168
+
169
+
170
+
171
+ # def setup_model(model_name,device):
172
+ # model = Olmo2ForCausalLMForSemiNAT.from_pretrained(model_name,torch_dtype=torch.bfloat16,device_map=device)
173
+ # tokenizer = AutoTokenizer.from_pretrained(model_name)
174
+ # # config = AutoConfig.from_pretrained(model_name)
175
+ # # model = Olmo2ForCausalLMForSemiNAT(config) # 注意这里不用 from_pretrained
176
+ # # tokenizer = AutoTokenizer.from_pretrained(model_name)
177
+ # return model, tokenizer
178
+
179
+ def setup_model(model_name, type):
180
+ # pdb.set_trace()
181
+ if type == "bf16":
182
+ model = Olmo2ForCausalLMForSemiNAT.from_pretrained(
183
+ model_name,
184
+ torch_dtype=torch.bfloat16
185
+ )
186
+ elif type == "fp16":
187
+ model = Olmo2ForCausalLMForSemiNAT.from_pretrained(
188
+ model_name,
189
+ torch_dtype=torch.float16
190
+ )
191
+ else:
192
+ model = Olmo2ForCausalLMForSemiNAT.from_pretrained(
193
+ model_name
194
+ )
195
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
196
+ # config = AutoConfig.from_pretrained(model_name)
197
+ # model = Olmo2ForCausalLMForSemiNAT(config) # 注意这里不用 from_pretrained
198
+ # tokenizer = AutoTokenizer.from_pretrained(model_name)
199
+ return model, tokenizer
200
+
201
+ def collate_fn(batch):
202
+ # 过滤 None
203
+ batch = [x for x in batch if x is not None]
204
+ if len(batch) == 0:
205
+ return None # 如果整 batch 都无效
206
+
207
+ input_ids, labels, attention_mask, slice_arr, slice_label = zip(*batch)
208
+
209
+ return (
210
+ torch.stack(input_ids),
211
+ torch.stack(labels),
212
+ torch.stack(attention_mask),
213
+ torch.stack(slice_arr),
214
+ torch.stack(slice_label)
215
+ )
216
+
217
+ def fsdp_main(args):
218
+ local_rank = int(os.environ['LOCAL_RANK'])
219
+ rank = int(os.environ['RANK'])
220
+ world_size = int(os.environ['WORLD_SIZE'])
221
+ if args.use_wandb and rank == 0:
222
+ wandb.init(entity="SemiNAT", project="SemiNAT-Debug", name=args.run_name)
223
+
224
+ local_rank = int(os.environ['LOCAL_RANK'])
225
+ device = f"cuda:{local_rank}"
226
+
227
+ # model, tokenizer = setup_model(args.model_path, args.dtype, device)
228
+ model, tokenizer = setup_model(args.model_path,device)
229
+
230
+ # model, tokenizer = load_trained_model(args.model_path)
231
+
232
+ model.config.chunk_size_limit = args.chunk_size_limit
233
+
234
+
235
+
236
+ # if ".pkl" in args.data_path:
237
+ # train_dataset = pickle.load(open(args.data_path, "rb"))
238
+ # else:
239
+ # datasets = pd.read_parquet(args.data_path)
240
+ # train_dataset = eval(f"{args.data_type}")(
241
+ # tokenizer,
242
+ # datasets,
243
+ # args.max_length,
244
+ # args.data_processess_num)
245
+
246
+ # train_sampler = DistributedSampler(train_dataset,
247
+ # rank=rank,
248
+ # num_replicas=world_size,
249
+ # shuffle=True)
250
+ # train_dataloader = DataLoader(dataset=train_dataset,
251
+ # sampler=train_sampler,
252
+ # batch_size=args.batch_size)
253
+
254
+
255
+ train_dataset = eval(f"{args.data_type}")(
256
+ tokenizer,
257
+ args.data_path,
258
+ args.max_length
259
+ )
260
+ train_sampler = DistributedSampler(train_dataset,
261
+ rank=rank,
262
+ num_replicas=world_size,
263
+ shuffle=True)
264
+
265
+ train_dataloader = DataLoader(dataset=train_dataset,
266
+ sampler=train_sampler,
267
+ batch_size=args.batch_size,
268
+ num_workers=args.data_processess_num,
269
+ collate_fn=collate_fn)
270
+
271
+ # pdb.set_trace()
272
+
273
+ print(f"Size of train dataset: {len(train_dataset)}")
274
+
275
+ setup()
276
+
277
+ # Olmo2DecoderLayerForSemiNAT_auto_wrap_policy = functools.partial(
278
+ # transformer_auto_wrap_policy,
279
+ # transformer_layer_cls={
280
+ # Olmo2DecoderLayerForSemiNAT,
281
+ # NATEncoderForSemiNAT,
282
+ # NATDecoderForSemiNAT,
283
+ # })
284
+
285
+ Olmo2DecoderLayerForSemiNAT_auto_wrap_policy = functools.partial(
286
+ transformer_auto_wrap_policy,
287
+ transformer_layer_cls={
288
+ Olmo2DecoderLayer,
289
+ Olmo2DecoderLayerForSemiNAT
290
+ }
291
+ )
292
+
293
+
294
+
295
+ sharding_strategy: ShardingStrategy = ShardingStrategy.FULL_SHARD #for Zero2 and FULL_SHARD for Zero3
296
+ torch.cuda.set_device(local_rank)
297
+ # local_rank = int(os.environ['LOCAL_RANK'])
298
+ # device = torch.device(f"cuda:{local_rank}")
299
+ # model = model.to(device)
300
+
301
+ # if bf16_ready:
302
+ mp_policy = MixedPrecision(
303
+ param_dtype=torch.bfloat16,
304
+ reduce_dtype=torch.bfloat16,
305
+ buffer_dtype=torch.bfloat16,
306
+ )
307
+ # else:
308
+ # mp_policy = None # defaults to fp32
309
+
310
+ # if args.use_lora:
311
+ # model = get_peft_model(model, lora_config)
312
+
313
+ # pdb.set_trace()
314
+ # model is on CPU before input to FSDP
315
+ model = FSDP(model,
316
+ auto_wrap_policy=Olmo2DecoderLayerForSemiNAT_auto_wrap_policy,
317
+ mixed_precision=mp_policy,
318
+ sharding_strategy=sharding_strategy,
319
+ device_id=torch.cuda.current_device(),
320
+ use_orig_params=True)
321
+
322
+ optimizer = optim.AdamW(
323
+ model.parameters(),
324
+ lr=args.lr,
325
+ betas=args.betas,
326
+ weight_decay=args.weight_decay,
327
+ eps=args.eps,
328
+ )
329
+
330
+ # pdb.set_trace()
331
+
332
+ # scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
333
+ # scheduler = WarmupCosineScheduler(
334
+ # optimizer=optimizer, # 优化器对象
335
+ # warmup_steps=args.warmup_steps, # warmup 步数(或比例)
336
+ # total_steps=args.total_steps, # 总训练步数
337
+ # min_lr=args.min_lr # 最小学习率
338
+ # )
339
+
340
+
341
+
342
+ num_training_steps = args.epochs * len(train_dataloader) # 总训练步数
343
+ num_warmup_steps = num_training_steps * args.warmup_ratio
344
+
345
+ scheduler = get_cosine_schedule_with_warmup(
346
+ optimizer,
347
+ num_warmup_steps=num_warmup_steps,
348
+ num_training_steps=num_training_steps
349
+ )
350
+
351
+ torch.autograd.set_detect_anomaly(True)
352
+
353
+ loss1_list = []
354
+ loss2_list = []
355
+ loss_list = []
356
+
357
+ global_step = 0
358
+
359
+
360
+
361
+ start_time = time.time()
362
+
363
+ for epoch in range(1, args.epochs + 1):
364
+ # t0 = time.time()
365
+ model.train()
366
+ local_rank = int(os.environ['LOCAL_RANK'])
367
+ # fsdp_loss = torch.zeros(2).to(local_rank)
368
+
369
+ if train_sampler:
370
+ train_sampler.set_epoch(epoch)
371
+ if rank == 0:
372
+ inner_pbar = tqdm(range(len(train_dataloader)),
373
+ colour="blue",
374
+ desc="r0 Training Epoch")
375
+
376
+ memories = []
377
+
378
+ for batch in train_dataloader:
379
+ if batch is None:
380
+ continue
381
+ optimizer.zero_grad()
382
+ loss1, loss2 = model(input_ids=batch[0],
383
+ labels=batch[1],
384
+ attention_mask=batch[2],
385
+ slice_pos=batch[3],
386
+ slice_label=batch[4],
387
+ use_cache=False).loss
388
+ loss = loss1 + loss2
389
+ # loss = loss2
390
+ loss1_list.append(loss1.item())
391
+ loss2_list.append(loss2.item())
392
+ loss_list.append(loss.item())
393
+ # pdb.set_trace()
394
+
395
+ # if torch.isnan(loss):
396
+ # print(f"Step {global_step}: loss is NaN, entering pdb …")
397
+ # pdb.set_trace()
398
+
399
+ # print(f"loss1:{loss1},loss2:{loss2}")
400
+ loss.backward()
401
+
402
+ # 按参数计算
403
+ # for name, module in model.named_modules():
404
+ # total_norm = 0.0
405
+ # param_count = 0
406
+ # for param in module.parameters(recurse=False):
407
+ # if param.grad is not None:
408
+ # total_norm += param.grad.data.norm(2).item()**2
409
+ # param_count += 1
410
+ # if param_count > 0:
411
+ # if args.use_wandb and rank == 0:
412
+ # total_norm = total_norm**0.5
413
+ # wandb.log({f"grad_norm/{name}": total_norm},
414
+ # step=global_step)
415
+
416
+
417
+ optimizer.step()
418
+
419
+
420
+ mem = torch.cuda.memory_allocated() / (1024 ** 2)
421
+ memories.append(mem)
422
+
423
+ global_step += 1
424
+
425
+ if global_step % args.save_steps == 0:
426
+ save_policy = FullStateDictConfig(offload_to_cpu=True,
427
+ rank0_only=True)
428
+ with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT,
429
+ save_policy):
430
+ cpu_state = model.state_dict()
431
+
432
+ if rank == 0:
433
+ print(f"--> steps: {str(global_step)} saving model ...")
434
+ if not os.path.exists(args.save_path):
435
+ os.makedirs(args.save_path)
436
+ save_name = f"{args.save_name}-steps_{str(global_step)}.pt"
437
+ print(f"--> saving as model name {save_name}")
438
+ save_path = os.path.join(args.save_path, save_name)
439
+ torch.save(cpu_state, save_path)
440
+
441
+ if rank == 0:
442
+ inner_pbar.update(1)
443
+ if args.use_wandb and rank == 0:
444
+ wandb.log({
445
+ "length prediction loss":
446
+ sum(loss1_list[-20:]) / len(loss1_list[-20:]),
447
+ "nat loss":
448
+ sum(loss2_list[-20:]) / len(loss2_list[-20:]),
449
+ "loss":
450
+ sum(loss_list[-20:]) / len(loss_list[-20:]),
451
+ "lr": scheduler.get_last_lr()[0]
452
+ })
453
+
454
+
455
+ avg_mem = sum(memories) / len(memories)
456
+ print(f"Average memory usage over {len(memories)} steps: {avg_mem:.2f} MB")
457
+
458
+
459
+ dist.all_reduce(loss, op=dist.ReduceOp.SUM)
460
+
461
+ if rank == 0:
462
+ inner_pbar.close()
463
+
464
+ scheduler.step()
465
+
466
+ # if rank == 0:
467
+ # print(f"--> entering save model state")
468
+
469
+ # save_policy = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
470
+ # with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT,
471
+ # save_policy):
472
+ # cpu_state = model.state_dict()
473
+
474
+ # if rank == 0:
475
+ # print(f"--> epoch: {str(epoch)} saving model ...")
476
+ # if not os.path.exists(args.save_path):
477
+ # os.makedirs(args.save_path)
478
+ # save_name = f"{args.save_name}-epoch_{str(epoch)}.pt"
479
+ # print(f"--> saving as model name {save_name}")
480
+ # save_path = os.path.join(args.save_path, save_name)
481
+ # torch.save(cpu_state, save_path)
482
+
483
+
484
+ end_time = time.time()
485
+ print(f"Training time: {end_time - start_time} seconds")
486
+
487
+ dist.barrier()
488
+ cleanup()
489
+
490
+
491
+ ################################# FSDP Config #####################################
492
+
493
+ if __name__ == "__main__":
494
+ # Training settings
495
+ parser = argparse.ArgumentParser()
496
+ parser.add_argument('--batch-size',
497
+ type=int,
498
+ default=4,
499
+ metavar='N',
500
+ help='input batch size for training (default: 64)')
501
+ parser.add_argument('--model_path', type=str)
502
+ parser.add_argument('--save_path', type=str)
503
+ parser.add_argument('--save_name', type=str)
504
+ parser.add_argument('--data_path', type=str)
505
+ parser.add_argument('--data_type', type=str)
506
+ parser.add_argument('--run_name', type=str)
507
+ parser.add_argument('--max_length', type=int)
508
+ parser.add_argument('--chunk_size_limit', type=int)
509
+ parser.add_argument('--save_steps', type=int, default=5000)
510
+ parser.add_argument('--data_processess_num', type=int, default=8)
511
+ parser.add_argument('--epochs',
512
+ type=int,
513
+ default=2,
514
+ metavar='N',
515
+ help='number of epochs to train (default: 3)')
516
+ parser.add_argument('--lr',
517
+ type=float,
518
+ default=.002,
519
+ metavar='LR',
520
+ help='learning rate (default: .002)')
521
+ parser.add_argument('--weight_decay', type=float)
522
+ parser.add_argument('--betas', type=float, nargs=2)
523
+ parser.add_argument('--eps', type=float)
524
+ parser.add_argument('--warmup_ratio', type=float)
525
+ parser.add_argument('--seed',
526
+ type=int,
527
+ default=1,
528
+ metavar='S',
529
+ help='random seed (default: 1)')
530
+ parser.add_argument('--use_lora', action='store_true', default=False)
531
+ parser.add_argument("--use_wandb",
532
+ action="store_true",
533
+ help="whether to use wandb")
534
+ parser.add_argument('--dtype', type=str)
535
+ args = parser.parse_args()
536
+
537
+ torch.manual_seed(args.seed)
538
+
539
+ fsdp_main(args)