sheep33333 commited on
Commit
74b5db6
·
verified ·
1 Parent(s): cdf3e93

Upload train_vanilla_speed.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. train_vanilla_speed.py +402 -0
train_vanilla_speed.py ADDED
@@ -0,0 +1,402 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pdb
2
+ from transformers import AutoTokenizer
3
+ from torch import nn
4
+ import os
5
+ import time
6
+ from torch.profiler import ProfilerActivity
7
+ from collections import defaultdict
8
+ import torch.distributed as dist
9
+ from tqdm import tqdm
10
+ from torch.utils.data import DataLoader
11
+ from torch.utils.data.distributed import DistributedSampler
12
+ from dataset import *
13
+ import wandb
14
+ import gc
15
+ import os
16
+ import argparse
17
+ import torch
18
+ import torch.optim as optim
19
+ import functools
20
+ import torch.distributed as dist
21
+ from torch.utils.data.distributed import DistributedSampler
22
+ from torch.distributed.fsdp import (
23
+ FullyShardedDataParallel as FSDP,
24
+ MixedPrecision,
25
+ ShardingStrategy,
26
+ FullStateDictConfig,
27
+ StateDictType,
28
+ )
29
+ from torch.distributed.fsdp.wrap import (
30
+ transformer_auto_wrap_policy,
31
+ )
32
+ from modelforseminat_v5 import *
33
+ from transformers import get_cosine_schedule_with_warmup
34
+
35
+
36
+ ################################# FSDP Config #####################################
37
+ def setup():
38
+ # initialize the process group
39
+ local_rank = int(os.environ['LOCAL_RANK'])
40
+ torch.cuda.set_device(local_rank)
41
+ dist.init_process_group(
42
+ backend='nccl',
43
+ init_method='env://',
44
+ )
45
+
46
+
47
+ def cleanup():
48
+ gc.collect()
49
+ torch.cuda.empty_cache()
50
+ dist.destroy_process_group()
51
+
52
+
53
+ def get_fsdp_device():
54
+ # 每个进程初始化分布式环境后调用
55
+ local_rank = int(os.environ.get("LOCAL_RANK", 0)) # torchrun 自动设置
56
+ device = torch.device(f"cuda:{local_rank}")
57
+ torch.cuda.set_device(device)
58
+ return device
59
+
60
+
61
+ def setup_model(
62
+ model_name: str, # HF 路径 / 本地目录
63
+ dtype: str = "bf16", # "bf16" | "fp16" | "fp32"
64
+ chunk_size_limit: int = 5,
65
+ attn_impl: str = "flash_attention_2",
66
+ load_model_dir: str | None = None, # 可能的 .pt 权重文件
67
+ decoder_layers: int = 1,
68
+ encoder_layer: int = 1,
69
+ mlp: bool = False,
70
+ position_embedding_type: str = "absolute",
71
+ base: str = None,
72
+ ):
73
+ # --- 1. 构造 config -------------------------------------------------------
74
+ config = Olmo2ConfigForSemiNAT.from_pretrained(
75
+ model_name,
76
+ chunk_size_limit=chunk_size_limit,
77
+ attn_implementation=attn_impl,
78
+ decoder_layers=decoder_layers,
79
+ encoder_layer=encoder_layer,
80
+ mlp=mlp,
81
+ position_embedding_type=position_embedding_type,
82
+ )
83
+ # --- 2. 按需设定 dtype ----------------------------------------------------
84
+ dtype_map = {"bf16": torch.bfloat16, "fp16": torch.float16, "fp32": torch.float32}
85
+ torch_dtype = dtype_map.get(dtype, torch.float32)
86
+ # --- 3. 加载基础模型 ------------------------------------------------------
87
+ if base == "scratch":
88
+ model = Olmo2ForCausalLMForSemiNAT(config).to(torch_dtype)
89
+ # pdb.set_trace()
90
+ elif base == "pretrained":
91
+ model = Olmo2ForCausalLMForSemiNAT.from_pretrained(
92
+ model_name,
93
+ config=config,
94
+ torch_dtype=torch_dtype,
95
+ )
96
+ # pdb.set_trace()
97
+ # --- 4. 若给定 .pt,则加载其参数 ----------------------------------------
98
+ if load_model_dir and os.path.isfile(load_model_dir) and load_model_dir.endswith(".pt"):
99
+ ckpt = torch.load(load_model_dir, map_location="cpu")
100
+ pdb.set_trace()
101
+ missing_keys, unexpected_keys = model.load_state_dict(ckpt, strict=False)
102
+ print(f"[INFO] Loaded weights from {load_model_dir}")
103
+ print(
104
+ f"Loaded with {len(missing_keys)} missing keys and {len(unexpected_keys)} unexpected keys."
105
+ )
106
+ if missing_keys:
107
+ print("Missing keys:", missing_keys)
108
+ if unexpected_keys:
109
+ print("Unexpected keys:", unexpected_keys)
110
+
111
+ else:
112
+ print("[INFO] No extra .pt weights loaded")
113
+ # --- 5. tokenizer --------------------------------------------------------
114
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
115
+
116
+ # pdb.set_trace()
117
+
118
+ # config = AutoConfig.from_pretrained(model_name)
119
+ # model = Olmo2ForCausalLMForSemiNAT(config) # 注意这里不用 from_pretrained
120
+ # tokenizer = AutoTokenizer.from_pretrained(model_name)
121
+ return model, tokenizer
122
+
123
+ def reduce_mean(tensor: torch.Tensor) -> torch.Tensor:
124
+ # 聚合所有 rank 的 tensor 并求平均
125
+ dist.all_reduce(tensor, op=dist.ReduceOp.SUM)
126
+ tensor /= dist.get_world_size()
127
+ return tensor
128
+
129
+ def collate_fn(batch):
130
+ # 过滤 None
131
+ batch = [x for x in batch if x is not None]
132
+ if len(batch) == 0:
133
+ return None # 如果整 batch 都无效
134
+
135
+ input_ids, labels, attention_mask, slice_arr, slice_label = zip(*batch)
136
+
137
+ return (
138
+ torch.stack(input_ids),
139
+ torch.stack(labels),
140
+ torch.stack(attention_mask),
141
+ torch.stack(slice_arr),
142
+ torch.stack(slice_label)
143
+ )
144
+
145
+ def fsdp_main(args):
146
+ local_rank = int(os.environ['LOCAL_RANK'])
147
+ rank = int(os.environ['RANK'])
148
+ world_size = int(os.environ['WORLD_SIZE'])
149
+ if args.use_wandb and rank == 0:
150
+ wandb.init(entity="SemiNAT", project="SemiNAT-Debug", name=args.run_name)
151
+
152
+ local_rank = int(os.environ['LOCAL_RANK'])
153
+ DEVICE = f"cuda:{local_rank}"
154
+
155
+ # model, tokenizer = setup_model(args.model_path,args.dtype,args.chunk_size_limit,args.attn_implementation,args.ptm_model_path,args.decoder_layers,args.encoder_layers,args.mlp,args.position_embedding_type,args.base)
156
+ model = Olmo2ForCausalLM.from_pretrained(args.model_path,torch_dtype=torch.bfloat16)
157
+ tokenizer = AutoTokenizer.from_pretrained(args.model_path)
158
+
159
+
160
+ optimizer = optim.AdamW(
161
+ model.parameters(),
162
+ lr=args.lr,
163
+ betas=args.betas,
164
+ weight_decay=args.weight_decay,
165
+ eps=args.eps,
166
+ )
167
+
168
+ train_dataset = eval(f"{args.data_type}")(
169
+ tokenizer,
170
+ args.data_path,
171
+ args.max_length
172
+ )
173
+ train_sampler = DistributedSampler(train_dataset,
174
+ rank=rank,
175
+ num_replicas=world_size,
176
+ shuffle=True,
177
+ drop_last=True)
178
+
179
+ train_dataloader = DataLoader(dataset=train_dataset,
180
+ sampler=train_sampler,
181
+ batch_size=args.batch_size,
182
+ num_workers=args.data_processess_num,
183
+ collate_fn=collate_fn)
184
+
185
+
186
+ num_training_steps = args.epochs * len(train_dataloader) # 总训练步数
187
+ num_warmup_steps = num_training_steps * args.warmup_ratio
188
+ scheduler = get_cosine_schedule_with_warmup(
189
+ optimizer,
190
+ num_warmup_steps=num_warmup_steps,
191
+ num_training_steps=num_training_steps
192
+ )
193
+
194
+
195
+ if args.resume_path:
196
+ checkpoint = torch.load(args.resume_path, map_location=DEVICE)
197
+ missing_keys, unexpected_keys = model.load_state_dict(checkpoint["model"], strict=False)
198
+ print(
199
+ f"Loaded with {len(missing_keys)} missing keys and {len(unexpected_keys)} unexpected keys."
200
+ )
201
+ if missing_keys:
202
+ print("Missing keys:", missing_keys)
203
+ if unexpected_keys:
204
+ print("Unexpected keys:", unexpected_keys)
205
+
206
+ optimizer.load_state_dict(checkpoint["optimizer"])
207
+ scheduler.load_state_dict(checkpoint["scheduler"])
208
+ global_step = checkpoint.get("global_step", 0)
209
+
210
+
211
+ print(f"Size of train dataset: {len(train_dataset)}")
212
+
213
+ setup()
214
+
215
+ Olmo2DecoderLayerForSemiNAT_auto_wrap_policy = functools.partial(
216
+ transformer_auto_wrap_policy,
217
+ transformer_layer_cls={
218
+ Olmo2DecoderLayer,
219
+ Olmo2DecoderLayerForSemiNAT
220
+ }
221
+ )
222
+
223
+ sharding_strategy: ShardingStrategy = ShardingStrategy.FULL_SHARD #for Zero2 and FULL_SHARD for Zero3
224
+ torch.cuda.set_device(local_rank)
225
+
226
+
227
+ mp_policy = MixedPrecision(
228
+ param_dtype=torch.bfloat16,
229
+ reduce_dtype=torch.bfloat16,
230
+ buffer_dtype=torch.bfloat16,
231
+ )
232
+
233
+ model = FSDP(model,
234
+ auto_wrap_policy=Olmo2DecoderLayerForSemiNAT_auto_wrap_policy,
235
+ mixed_precision=mp_policy,
236
+ sharding_strategy=sharding_strategy,
237
+ device_id=torch.cuda.current_device(),
238
+ use_orig_params=True)
239
+
240
+
241
+ torch.autograd.set_detect_anomaly(True)
242
+
243
+ loss1_list = []
244
+ loss2_list = []
245
+ loss_list = []
246
+
247
+ global_step = 0
248
+
249
+
250
+ for epoch in range(1, args.epochs + 1):
251
+ model.train()
252
+ local_rank = int(os.environ['LOCAL_RANK'])
253
+
254
+ if train_sampler:
255
+ train_sampler.set_epoch(epoch)
256
+
257
+ if rank == 0:
258
+ inner_pbar = tqdm(range(len(train_dataloader)),
259
+ colour="blue",
260
+ desc="r0 Training Epoch")
261
+
262
+ # ----------- 新增: 性能分析与显存统计 -----------
263
+ time_metrics = defaultdict(list)
264
+
265
+ for i, batch in enumerate(train_dataloader):
266
+ if batch is None:
267
+ continue
268
+ torch.cuda.memory.reset_peak_memory_stats(torch.cuda.current_device())
269
+ torch.cuda.memory._record_memory_history()
270
+ start_time = time.time()
271
+
272
+ optimizer.zero_grad()
273
+ loss = model(input_ids=batch[0],
274
+ labels=batch[1],
275
+ attention_mask=batch[2],
276
+ slice_pos=batch[3],
277
+ slice_label=batch[4],
278
+ use_cache=False).loss
279
+ # loss = loss1 + loss2
280
+ # loss1_scalar = reduce_mean(loss1.detach()).item()
281
+ # loss2_scalar = reduce_mean(loss2.detach()).item()
282
+ total_loss_scalar = reduce_mean(loss.detach()).item()
283
+ loss.backward()
284
+ optimizer.step()
285
+ scheduler.step()
286
+
287
+ pdb.set_trace()
288
+ end_time = time.time()
289
+
290
+
291
+ time_metrics["time"].append(end_time - start_time)
292
+ time_metrics["peak_memory"].append(torch.cuda.max_memory_allocated(torch.cuda.current_device()) / 1024 / 1024)
293
+
294
+ torch.cuda.memory._record_memory_history(enabled=None) # 清理内存记录
295
+
296
+ global_step += 1
297
+
298
+ if global_step % args.save_steps == 0:
299
+ save_policy = FullStateDictConfig(offload_to_cpu=True,
300
+ rank0_only=True)
301
+ with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT,
302
+ save_policy):
303
+ cpu_state = model.state_dict()
304
+
305
+ if rank == 0:
306
+ print(f"--> steps: {str(global_step)} saving model ...")
307
+ if not os.path.exists(args.save_path):
308
+ os.makedirs(args.save_path)
309
+ save_name = f"{args.save_name}-steps_{str(global_step)}.pt"
310
+ print(f"--> saving as model name {save_name}")
311
+ save_path = os.path.join(args.save_path, save_name)
312
+ torch.save({
313
+ "model": cpu_state,
314
+ "optimizer": optimizer.state_dict(),
315
+ "scheduler": scheduler.state_dict() if scheduler else None,
316
+ "global_step": global_step,
317
+ "args": vars(args),
318
+ }, save_path)
319
+
320
+ if rank == 0:
321
+ # loss1_list.append(loss1_scalar)
322
+ # loss2_list.append(loss2_scalar)
323
+ loss_list.append(total_loss_scalar)
324
+ inner_pbar.update(1)
325
+ if args.use_wandb and rank == 0:
326
+ wandb.log({
327
+ # "Length prediction loss": sum(loss1_list[-20:]) / len(loss1_list[-20:]),
328
+ # "NAT loss": sum(loss2_list[-20:]) / len(loss2_list[-20:]),
329
+ "Loss": sum(loss_list[-20:]) / len(loss_list[-20:]),
330
+ "lr": scheduler.get_last_lr()[0]
331
+ })
332
+ if rank == 0:
333
+ inner_pbar.close()
334
+ if rank == 0:
335
+ for key, times in time_metrics.items():
336
+ times = times[1:]
337
+ avg_time = sum(times) / len(times)
338
+ std_time = (sum((t - avg_time) ** 2 for t in times) / len(times)) ** 0.5
339
+ print(f"{key}: {avg_time:.2f} ± {std_time:.2f}")
340
+
341
+
342
+ dist.barrier()
343
+ cleanup()
344
+
345
+
346
+ ################################# FSDP Config #####################################
347
+
348
+ if __name__ == "__main__":
349
+ # Training settings
350
+ parser = argparse.ArgumentParser()
351
+ parser.add_argument('--batch-size',
352
+ type=int,
353
+ default=4,
354
+ metavar='N',
355
+ help='input batch size for training (default: 64)')
356
+ parser.add_argument('--model_path', type=str)
357
+ parser.add_argument('--save_path', type=str)
358
+ parser.add_argument('--save_name', type=str)
359
+ parser.add_argument('--data_path', type=str)
360
+ parser.add_argument('--data_type', type=str)
361
+ parser.add_argument('--run_name', type=str)
362
+ parser.add_argument('--max_length', type=int)
363
+ parser.add_argument('--chunk_size_limit', type=int)
364
+ parser.add_argument('--save_steps', type=int, default=5000)
365
+ parser.add_argument('--data_processess_num', type=int, default=8)
366
+ parser.add_argument('--epochs',
367
+ type=int,
368
+ default=2,
369
+ metavar='N',
370
+ help='number of epochs to train (default: 3)')
371
+ parser.add_argument('--lr',
372
+ type=float,
373
+ default=.002,
374
+ metavar='LR',
375
+ help='learning rate (default: .002)')
376
+ parser.add_argument('--weight_decay', type=float)
377
+ parser.add_argument('--betas', type=float, nargs=2)
378
+ parser.add_argument('--eps', type=float)
379
+ parser.add_argument('--warmup_ratio', type=float)
380
+ parser.add_argument('--seed',
381
+ type=int,
382
+ default=1,
383
+ metavar='S',
384
+ help='random seed (default: 1)')
385
+ parser.add_argument('--use_lora', action='store_true', default=False)
386
+ parser.add_argument("--use_wandb",
387
+ action="store_true",
388
+ help="whether to use wandb")
389
+ parser.add_argument('--dtype', type=str)
390
+ parser.add_argument('--resume_path', type=str,default=None)
391
+ parser.add_argument('--attn_implementation', type=str)
392
+ parser.add_argument('--ptm_model_path', type=str,default=None)
393
+ parser.add_argument('--decoder_layers', type=int,default=1)
394
+ parser.add_argument('--encoder_layers', type=int,default=1)
395
+ parser.add_argument('--mlp', type=bool, default=False)
396
+ parser.add_argument('--position_embedding_type', type=str, default="absolute",choices=["absolute","relative"])
397
+ parser.add_argument('--base', type=str, default="scratch",choices=["scratch","pretrained"])
398
+ args = parser.parse_args()
399
+
400
+ torch.manual_seed(args.seed)
401
+
402
+ fsdp_main(args)