zheminh commited on
Commit
823513a
·
verified ·
1 Parent(s): c6ea044

Add files using upload-large-folder tool

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
args.json ADDED
@@ -0,0 +1,357 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "Qwen/Qwen2.5-7B-Instruct",
3
+ "model_type": "qwen2_5",
4
+ "model_revision": null,
5
+ "task_type": "causal_lm",
6
+ "torch_dtype": "bfloat16",
7
+ "attn_impl": null,
8
+ "num_labels": null,
9
+ "rope_scaling": null,
10
+ "device_map": null,
11
+ "max_memory": {},
12
+ "local_repo_path": null,
13
+ "template": "qwen2_5",
14
+ "system": "You are one of the executors of an AI system that follows a systematic long thinking process to arrive at the precise and accurate answer to a math question specified within <Question> and </Question> tags. The system consists of a planner and multiple executors that can run in parallel. The solution is generated over multiple phases. At each phase, the planner thinks out loud and plans what needs to be done next to solve the question. It identifies tasks that can be executed in parallel and creates prompts which the executors need to follow to carry out the plan. The results from the executors are fed back into the planner to generate plans at the next phase. You will be provided with the math question, the outputs from the planner and executors in the previous phases, and the plan and prompt from the current phase that you need to execute. You need to follow the last mentioned plan and prompt and generate a clear and accurate execution result for it by thinking systematically. Your thoughts may involve detailed considerations such as analyzing the previous steps, verifying the accuracy of the current steps, or refining any errors. Provide your response within <think> and </think> tags. You MUST ONLY carry out the task specified in the prompt. Do NOT go beyond the specified task.",
15
+ "max_length": 5120,
16
+ "truncation_strategy": "delete",
17
+ "max_pixels": null,
18
+ "tools_prompt": "react_en",
19
+ "norm_bbox": null,
20
+ "response_prefix": null,
21
+ "padding_side": "right",
22
+ "loss_scale": "default",
23
+ "sequence_parallel_size": 1,
24
+ "use_chat_template": true,
25
+ "template_backend": "swift",
26
+ "dataset": [
27
+ "emilbiju/Execution-Data-Math:math"
28
+ ],
29
+ "val_dataset": [],
30
+ "split_dataset_ratio": 0.01,
31
+ "data_seed": 42,
32
+ "dataset_num_proc": 1,
33
+ "streaming": false,
34
+ "enable_cache": false,
35
+ "download_mode": "reuse_dataset_if_exists",
36
+ "columns": {},
37
+ "strict": false,
38
+ "remove_unused_columns": true,
39
+ "model_name": [
40
+ null,
41
+ null
42
+ ],
43
+ "model_author": [
44
+ null,
45
+ null
46
+ ],
47
+ "custom_dataset_info": [],
48
+ "quant_method": null,
49
+ "quant_bits": 4,
50
+ "hqq_axis": null,
51
+ "bnb_4bit_compute_dtype": "bfloat16",
52
+ "bnb_4bit_quant_type": "nf4",
53
+ "bnb_4bit_use_double_quant": true,
54
+ "bnb_4bit_quant_storage": "bfloat16",
55
+ "max_new_tokens": 64,
56
+ "temperature": 0.0,
57
+ "top_k": null,
58
+ "top_p": null,
59
+ "repetition_penalty": null,
60
+ "num_beams": 1,
61
+ "stream": false,
62
+ "stop_words": [],
63
+ "logprobs": false,
64
+ "top_logprobs": null,
65
+ "ckpt_dir": null,
66
+ "load_dataset_config": null,
67
+ "lora_modules": [],
68
+ "tuner_backend": "peft",
69
+ "train_type": "full",
70
+ "adapters": [],
71
+ "external_plugins": [],
72
+ "seed": 42,
73
+ "model_kwargs": {},
74
+ "load_args": false,
75
+ "load_data_args": false,
76
+ "use_hf": true,
77
+ "hub_token": null,
78
+ "custom_register_path": [],
79
+ "ignore_args_error": false,
80
+ "use_swift_lora": false,
81
+ "output_dir": "/home/ubuntu/output/v0-20250315-052746",
82
+ "overwrite_output_dir": false,
83
+ "do_train": false,
84
+ "do_eval": false,
85
+ "do_predict": false,
86
+ "eval_strategy": "steps",
87
+ "prediction_loss_only": false,
88
+ "per_device_train_batch_size": 1,
89
+ "per_device_eval_batch_size": 1,
90
+ "per_gpu_train_batch_size": null,
91
+ "per_gpu_eval_batch_size": null,
92
+ "gradient_accumulation_steps": 4,
93
+ "eval_accumulation_steps": null,
94
+ "eval_delay": 0,
95
+ "torch_empty_cache_steps": null,
96
+ "learning_rate": 1e-05,
97
+ "weight_decay": 0.1,
98
+ "adam_beta1": 0.9,
99
+ "adam_beta2": 0.999,
100
+ "adam_epsilon": 1e-08,
101
+ "max_grad_norm": 1.0,
102
+ "num_train_epochs": 3.0,
103
+ "max_steps": -1,
104
+ "lr_scheduler_type": "cosine",
105
+ "lr_scheduler_kwargs": null,
106
+ "warmup_ratio": 0.05,
107
+ "warmup_steps": 0,
108
+ "log_level": "passive",
109
+ "log_level_replica": "warning",
110
+ "log_on_each_node": true,
111
+ "logging_dir": "/home/ubuntu/output/v0-20250315-052746/runs",
112
+ "logging_strategy": "steps",
113
+ "logging_first_step": true,
114
+ "logging_steps": 5,
115
+ "logging_nan_inf_filter": true,
116
+ "save_strategy": "steps",
117
+ "save_steps": 100.0,
118
+ "save_total_limit": 5,
119
+ "save_safetensors": true,
120
+ "save_on_each_node": false,
121
+ "save_only_model": false,
122
+ "restore_callback_states_from_checkpoint": false,
123
+ "no_cuda": false,
124
+ "use_cpu": false,
125
+ "use_mps_device": false,
126
+ "jit_mode_eval": false,
127
+ "use_ipex": false,
128
+ "bf16": true,
129
+ "fp16": false,
130
+ "fp16_opt_level": "O1",
131
+ "half_precision_backend": "auto",
132
+ "bf16_full_eval": false,
133
+ "fp16_full_eval": false,
134
+ "tf32": null,
135
+ "local_rank": 0,
136
+ "ddp_backend": null,
137
+ "tpu_num_cores": null,
138
+ "tpu_metrics_debug": false,
139
+ "debug": null,
140
+ "dataloader_drop_last": false,
141
+ "eval_steps": 100.0,
142
+ "dataloader_num_workers": 4,
143
+ "dataloader_prefetch_factor": null,
144
+ "past_index": -1,
145
+ "run_name": null,
146
+ "disable_tqdm": null,
147
+ "label_names": null,
148
+ "load_best_model_at_end": false,
149
+ "metric_for_best_model": "loss",
150
+ "greater_is_better": false,
151
+ "ignore_data_skip": false,
152
+ "fsdp": "",
153
+ "fsdp_min_num_params": 0,
154
+ "fsdp_config": null,
155
+ "fsdp_transformer_layer_cls_to_wrap": null,
156
+ "accelerator_config": {
157
+ "dispatch_batches": false
158
+ },
159
+ "deepspeed": {
160
+ "fp16": {
161
+ "enabled": "auto",
162
+ "loss_scale": 0,
163
+ "loss_scale_window": 1000,
164
+ "initial_scale_power": 16,
165
+ "hysteresis": 2,
166
+ "min_loss_scale": 1
167
+ },
168
+ "bf16": {
169
+ "enabled": "auto"
170
+ },
171
+ "zero_optimization": {
172
+ "stage": 3,
173
+ "offload_optimizer": {
174
+ "device": "none",
175
+ "pin_memory": true
176
+ },
177
+ "offload_param": {
178
+ "device": "none",
179
+ "pin_memory": true
180
+ },
181
+ "overlap_comm": true,
182
+ "contiguous_gradients": true,
183
+ "sub_group_size": 1000000000.0,
184
+ "reduce_bucket_size": "auto",
185
+ "zero_quantized_weights": false,
186
+ "zero_quantized_gradients": false,
187
+ "stage3_prefetch_bucket_size": "auto",
188
+ "stage3_param_persistence_threshold": "auto",
189
+ "stage3_max_live_parameters": 1000000000.0,
190
+ "stage3_max_reuse_distance": 1000000000.0,
191
+ "stage3_gather_16bit_weights_on_model_save": true
192
+ },
193
+ "gradient_accumulation_steps": "auto",
194
+ "gradient_clipping": "auto",
195
+ "steps_per_print": 2000,
196
+ "train_batch_size": "auto",
197
+ "train_micro_batch_size_per_gpu": "auto",
198
+ "wall_clock_breakdown": false
199
+ },
200
+ "label_smoothing_factor": 0.0,
201
+ "optim": "adamw_torch",
202
+ "optim_args": null,
203
+ "adafactor": false,
204
+ "group_by_length": false,
205
+ "length_column_name": "length",
206
+ "report_to": [
207
+ "tensorboard"
208
+ ],
209
+ "ddp_find_unused_parameters": null,
210
+ "ddp_bucket_cap_mb": null,
211
+ "ddp_broadcast_buffers": null,
212
+ "dataloader_pin_memory": true,
213
+ "dataloader_persistent_workers": false,
214
+ "skip_memory_metrics": true,
215
+ "use_legacy_prediction_loop": false,
216
+ "push_to_hub": false,
217
+ "resume_from_checkpoint": null,
218
+ "hub_model_id": null,
219
+ "hub_strategy": "every_save",
220
+ "hub_private_repo": null,
221
+ "hub_always_push": false,
222
+ "gradient_checkpointing": true,
223
+ "gradient_checkpointing_kwargs": null,
224
+ "include_inputs_for_metrics": false,
225
+ "include_for_metrics": [],
226
+ "eval_do_concat_batches": true,
227
+ "fp16_backend": "auto",
228
+ "evaluation_strategy": "steps",
229
+ "push_to_hub_model_id": null,
230
+ "push_to_hub_organization": null,
231
+ "push_to_hub_token": null,
232
+ "mp_parameters": "",
233
+ "auto_find_batch_size": false,
234
+ "full_determinism": false,
235
+ "torchdynamo": null,
236
+ "ray_scope": "last",
237
+ "ddp_timeout": 1800,
238
+ "torch_compile": false,
239
+ "torch_compile_backend": null,
240
+ "torch_compile_mode": null,
241
+ "dispatch_batches": null,
242
+ "split_batches": null,
243
+ "include_tokens_per_second": false,
244
+ "include_num_input_tokens_seen": false,
245
+ "neftune_noise_alpha": null,
246
+ "optim_target_modules": null,
247
+ "batch_eval_metrics": false,
248
+ "eval_on_start": false,
249
+ "use_liger_kernel": false,
250
+ "eval_use_gather_object": false,
251
+ "average_tokens_across_devices": false,
252
+ "sortish_sampler": false,
253
+ "predict_with_generate": false,
254
+ "generation_max_length": null,
255
+ "generation_num_beams": null,
256
+ "generation_config": null,
257
+ "freeze_parameters": [],
258
+ "freeze_parameters_ratio": 0.0,
259
+ "trainable_parameters": [],
260
+ "freeze_llm": false,
261
+ "freeze_vit": true,
262
+ "freeze_aligner": true,
263
+ "target_modules": [
264
+ "all-linear"
265
+ ],
266
+ "target_regex": null,
267
+ "modules_to_save": [],
268
+ "lora_rank": 8,
269
+ "lora_alpha": 32,
270
+ "lora_dropout": 0.05,
271
+ "lora_bias": "none",
272
+ "lora_dtype": null,
273
+ "lorap_lr_ratio": null,
274
+ "use_rslora": false,
275
+ "use_dora": false,
276
+ "lora_ga_batch_size": 2,
277
+ "lora_ga_iters": 2,
278
+ "lora_ga_max_length": 1024,
279
+ "lora_ga_direction": "ArB2r",
280
+ "lora_ga_scale": "stable",
281
+ "lora_ga_stable_gamma": 16,
282
+ "init_weights": true,
283
+ "fourier_n_frequency": 2000,
284
+ "fourier_scaling": 300.0,
285
+ "boft_block_size": 4,
286
+ "boft_block_num": 0,
287
+ "boft_n_butterfly_factor": 1,
288
+ "boft_dropout": 0.0,
289
+ "vera_rank": 256,
290
+ "vera_projection_prng_key": 0,
291
+ "vera_dropout": 0.0,
292
+ "vera_d_initial": 0.1,
293
+ "adapter_act": "gelu",
294
+ "adapter_length": 128,
295
+ "use_galore": false,
296
+ "galore_target_modules": null,
297
+ "galore_rank": 128,
298
+ "galore_update_proj_gap": 50,
299
+ "galore_scale": 1.0,
300
+ "galore_proj_type": "std",
301
+ "galore_optim_per_parameter": false,
302
+ "galore_with_embedding": false,
303
+ "galore_quantization": false,
304
+ "galore_proj_quant": false,
305
+ "galore_proj_bits": 4,
306
+ "galore_proj_group_size": 256,
307
+ "galore_cos_threshold": 0.4,
308
+ "galore_gamma_proj": 2,
309
+ "galore_queue_size": 5,
310
+ "adalora_target_r": 8,
311
+ "adalora_init_r": 12,
312
+ "adalora_tinit": 0,
313
+ "adalora_tfinal": 0,
314
+ "adalora_deltaT": 1,
315
+ "adalora_beta1": 0.85,
316
+ "adalora_beta2": 0.85,
317
+ "adalora_orth_reg_weight": 0.5,
318
+ "llamapro_num_new_blocks": 4,
319
+ "llamapro_num_groups": null,
320
+ "lisa_activated_layers": 0,
321
+ "lisa_step_interval": 20,
322
+ "reft_layer_key": null,
323
+ "reft_layers": null,
324
+ "reft_rank": 4,
325
+ "reft_intervention_type": "LoreftIntervention",
326
+ "reft_args": null,
327
+ "use_liger": false,
328
+ "model_layer_cls_name": null,
329
+ "metric_warmup_step": 0,
330
+ "fsdp_num": 1,
331
+ "acc_steps": 1,
332
+ "swanlab_token": null,
333
+ "swanlab_project": null,
334
+ "swanlab_workspace": null,
335
+ "swanlab_exp_name": null,
336
+ "swanlab_mode": "cloud",
337
+ "add_version": true,
338
+ "resume_only_model": false,
339
+ "check_model": true,
340
+ "create_checkpoint_symlink": false,
341
+ "packing": false,
342
+ "lazy_tokenize": false,
343
+ "loss_type": null,
344
+ "optimizer": null,
345
+ "metric": null,
346
+ "acc_strategy": "token",
347
+ "zero_hpz_partition_size": null,
348
+ "rank": 0,
349
+ "global_world_size": 8,
350
+ "local_world_size": 8,
351
+ "model_suffix": "Qwen2.5-7B-Instruct",
352
+ "model_info": "ModelInfo(model_type='qwen2_5', model_dir='/home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-7B-Instruct/snapshots/a09a35458c702b33eeacc393d103063234e8bc28', torch_dtype=torch.bfloat16, max_model_len=32768, quant_method=None, quant_bits=None, rope_scaling=None, config=None, task_type='causal_lm', num_labels=None)",
353
+ "model_meta": "ModelMeta(model_type='qwen2_5', model_groups=[ModelGroup(models=[Model(ms_model_id='Qwen/Qwen2.5-0.5B-Instruct', hf_model_id='Qwen/Qwen2.5-0.5B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-1.5B-Instruct', hf_model_id='Qwen/Qwen2.5-1.5B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-3B-Instruct', hf_model_id='Qwen/Qwen2.5-3B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-7B-Instruct', hf_model_id='Qwen/Qwen2.5-7B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-14B-Instruct', hf_model_id='Qwen/Qwen2.5-14B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-32B-Instruct', hf_model_id='Qwen/Qwen2.5-32B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-72B-Instruct', hf_model_id='Qwen/Qwen2.5-72B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-0.5B', hf_model_id='Qwen/Qwen2.5-0.5B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-1.5B', hf_model_id='Qwen/Qwen2.5-1.5B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-3B', hf_model_id='Qwen/Qwen2.5-3B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-7B', hf_model_id='Qwen/Qwen2.5-7B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-14B', hf_model_id='Qwen/Qwen2.5-14B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-32B', hf_model_id='Qwen/Qwen2.5-32B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-72B', hf_model_id='Qwen/Qwen2.5-72B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-0.5B-Instruct-GPTQ-Int4', hf_model_id='Qwen/Qwen2.5-0.5B-Instruct-GPTQ-Int4', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-1.5B-Instruct-GPTQ-Int4', hf_model_id='Qwen/Qwen2.5-1.5B-Instruct-GPTQ-Int4', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-3B-Instruct-GPTQ-Int4', hf_model_id='Qwen/Qwen2.5-3B-Instruct-GPTQ-Int4', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-7B-Instruct-GPTQ-Int4', hf_model_id='Qwen/Qwen2.5-7B-Instruct-GPTQ-Int4', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-14B-Instruct-GPTQ-Int4', hf_model_id='Qwen/Qwen2.5-14B-Instruct-GPTQ-Int4', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-32B-Instruct-GPTQ-Int4', hf_model_id='Qwen/Qwen2.5-32B-Instruct-GPTQ-Int4', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-72B-Instruct-GPTQ-Int4', hf_model_id='Qwen/Qwen2.5-72B-Instruct-GPTQ-Int4', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-0.5B-Instruct-GPTQ-Int8', hf_model_id='Qwen/Qwen2.5-0.5B-Instruct-GPTQ-Int8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-1.5B-Instruct-GPTQ-Int8', hf_model_id='Qwen/Qwen2.5-1.5B-Instruct-GPTQ-Int8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-3B-Instruct-GPTQ-Int8', hf_model_id='Qwen/Qwen2.5-3B-Instruct-GPTQ-Int8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-7B-Instruct-GPTQ-Int8', hf_model_id='Qwen/Qwen2.5-7B-Instruct-GPTQ-Int8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-14B-Instruct-GPTQ-Int8', hf_model_id='Qwen/Qwen2.5-14B-Instruct-GPTQ-Int8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-32B-Instruct-GPTQ-Int8', hf_model_id='Qwen/Qwen2.5-32B-Instruct-GPTQ-Int8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-72B-Instruct-GPTQ-Int8', hf_model_id='Qwen/Qwen2.5-72B-Instruct-GPTQ-Int8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-0.5B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-0.5B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-1.5B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-1.5B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-3B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-3B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-7B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-7B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-14B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-14B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-32B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-32B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-72B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-72B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=[]), ModelGroup(models=[Model(ms_model_id='Qwen/Qwen2.5-Coder-0.5B-Instruct', hf_model_id='Qwen/Qwen2.5-Coder-0.5B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-Coder-1.5B-Instruct', hf_model_id='Qwen/Qwen2.5-Coder-1.5B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-Coder-3B-Instruct', hf_model_id='Qwen/Qwen2.5-Coder-3B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-Coder-7B-Instruct', hf_model_id='Qwen/Qwen2.5-Coder-7B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-Coder-14B-Instruct', hf_model_id='Qwen/Qwen2.5-Coder-14B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-Coder-32B-Instruct', hf_model_id='Qwen/Qwen2.5-Coder-32B-Instruct', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-Coder-0.5B', hf_model_id='Qwen/Qwen2.5-Coder-0.5B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-Coder-1.5B', hf_model_id='Qwen/Qwen2.5-Coder-1.5B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-Coder-3B', hf_model_id='Qwen/Qwen2.5-Coder-3B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-Coder-7B', hf_model_id='Qwen/Qwen2.5-Coder-7B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-Coder-14B', hf_model_id='Qwen/Qwen2.5-Coder-14B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-Coder-32B', hf_model_id='Qwen/Qwen2.5-Coder-32B', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-Coder-0.5B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-Coder-0.5B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-Coder-1.5B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-Coder-1.5B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-Coder-3B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-Coder-3B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-Coder-7B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-Coder-7B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-Coder-14B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-Coder-14B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-Coder-32B-Instruct-AWQ', hf_model_id='Qwen/Qwen2.5-Coder-32B-Instruct-AWQ', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-Coder-0.5B-Instruct-GPTQ-Int4', hf_model_id='Qwen/Qwen2.5-Coder-0.5B-Instruct-GPTQ-Int4', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-Coder-0.5B-Instruct-GPTQ-Int8', hf_model_id='Qwen/Qwen2.5-Coder-0.5B-Instruct-GPTQ-Int8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-Coder-1.5B-Instruct-GPTQ-Int4', hf_model_id='Qwen/Qwen2.5-Coder-1.5B-Instruct-GPTQ-Int4', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-Coder-1.5B-Instruct-GPTQ-Int8', hf_model_id='Qwen/Qwen2.5-Coder-1.5B-Instruct-GPTQ-Int8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-Coder-3B-Instruct-GPTQ-Int4', hf_model_id='Qwen/Qwen2.5-Coder-3B-Instruct-GPTQ-Int4', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-Coder-3B-Instruct-GPTQ-Int8', hf_model_id='Qwen/Qwen2.5-Coder-3B-Instruct-GPTQ-Int8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-Coder-7B-Instruct-GPTQ-Int4', hf_model_id='Qwen/Qwen2.5-Coder-7B-Instruct-GPTQ-Int4', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-Coder-7B-Instruct-GPTQ-Int8', hf_model_id='Qwen/Qwen2.5-Coder-7B-Instruct-GPTQ-Int8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-Coder-14B-Instruct-GPTQ-Int4', hf_model_id='Qwen/Qwen2.5-Coder-14B-Instruct-GPTQ-Int4', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-Coder-14B-Instruct-GPTQ-Int8', hf_model_id='Qwen/Qwen2.5-Coder-14B-Instruct-GPTQ-Int8', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-Coder-32B-Instruct-GPTQ-Int4', hf_model_id='Qwen/Qwen2.5-Coder-32B-Instruct-GPTQ-Int4', model_path=None, ms_revision=None, hf_revision=None), Model(ms_model_id='Qwen/Qwen2.5-Coder-32B-Instruct-GPTQ-Int8', hf_model_id='Qwen/Qwen2.5-Coder-32B-Instruct-GPTQ-Int8', model_path=None, ms_revision=None, hf_revision=None)], ignore_patterns=None, requires=None, tags=['coding'])], template='qwen2_5', get_function=<function get_model_tokenizer_with_flash_attn at 0x7bcae537eb60>, model_arch='llama', architectures=['Qwen2ForCausalLM'], additional_saved_files=[], torch_dtype=None, is_multimodal=False, is_reward=False, task_type=None, ignore_patterns=['*.zip', '*.gguf', '*.pth', '*.pt', 'consolidated*', 'onnx/*', '*.safetensors.md', '*.msgpack', '*.onnx', '*.ot', '*.h5', '*.bin', '*.safetensors'], requires=['transformers>=4.37'], tags=[])",
354
+ "model_dir": "/home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-7B-Instruct/snapshots/a09a35458c702b33eeacc393d103063234e8bc28",
355
+ "hub": "<class 'swift.hub.hub.HFHub'>",
356
+ "training_args": "Seq2SeqTrainingArguments(output_dir='/home/ubuntu/output/v0-20250315-052746', overwrite_output_dir=False, do_train=False, do_eval=True, do_predict=False, eval_strategy=<IntervalStrategy.STEPS: 'steps'>, prediction_loss_only=False, per_device_train_batch_size=1, per_device_eval_batch_size=1, per_gpu_train_batch_size=None, per_gpu_eval_batch_size=None, gradient_accumulation_steps=4, eval_accumulation_steps=None, eval_delay=0, torch_empty_cache_steps=None, learning_rate=1e-05, weight_decay=0.1, adam_beta1=0.9, adam_beta2=0.999, adam_epsilon=1e-08, max_grad_norm=1.0, num_train_epochs=3.0, max_steps=-1, lr_scheduler_type=<SchedulerType.COSINE: 'cosine'>, lr_scheduler_kwargs=None, warmup_ratio=0.05, warmup_steps=0, log_level='passive', log_level_replica='warning', log_on_each_node=True, logging_dir='/home/ubuntu/output/v0-20250315-052746/runs', logging_strategy=<IntervalStrategy.STEPS: 'steps'>, logging_first_step=True, logging_steps=5, logging_nan_inf_filter=True, save_strategy=<SaveStrategy.STEPS: 'steps'>, save_steps=100, save_total_limit=5, save_safetensors=True, save_on_each_node=False, save_only_model=False, restore_callback_states_from_checkpoint=False, no_cuda=False, use_cpu=False, use_mps_device=False, seed=42, data_seed=42, jit_mode_eval=False, use_ipex=False, bf16=True, fp16=False, fp16_opt_level='O1', half_precision_backend='auto', bf16_full_eval=False, fp16_full_eval=False, tf32=None, local_rank=0, ddp_backend=None, tpu_num_cores=None, tpu_metrics_debug=False, debug=[], dataloader_drop_last=False, eval_steps=100, dataloader_num_workers=4, dataloader_prefetch_factor=None, past_index=-1, run_name='/home/ubuntu/output/v0-20250315-052746', disable_tqdm=False, remove_unused_columns=False, label_names=None, load_best_model_at_end=False, metric_for_best_model='loss', greater_is_better=False, ignore_data_skip=False, fsdp=[], fsdp_min_num_params=0, fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, fsdp_transformer_layer_cls_to_wrap=None, accelerator_config=AcceleratorConfig(split_batches=False, dispatch_batches=False, even_batches=True, use_seedable_sampler=True, non_blocking=False, gradient_accumulation_kwargs=None, use_configured_state=False), deepspeed={'fp16': {'enabled': 'auto', 'loss_scale': 0, 'loss_scale_window': 1000, 'initial_scale_power': 16, 'hysteresis': 2, 'min_loss_scale': 1}, 'bf16': {'enabled': 'auto'}, 'zero_optimization': {'stage': 3, 'offload_optimizer': {'device': 'none', 'pin_memory': True}, 'offload_param': {'device': 'none', 'pin_memory': True}, 'overlap_comm': True, 'contiguous_gradients': True, 'sub_group_size': 1000000000.0, 'reduce_bucket_size': 'auto', 'zero_quantized_weights': False, 'zero_quantized_gradients': False, 'stage3_prefetch_bucket_size': 'auto', 'stage3_param_persistence_threshold': 'auto', 'stage3_max_live_parameters': 1000000000.0, 'stage3_max_reuse_distance': 1000000000.0, 'stage3_gather_16bit_weights_on_model_save': True}, 'gradient_accumulation_steps': 'auto', 'gradient_clipping': 'auto', 'steps_per_print': 2000, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'wall_clock_breakdown': False}, label_smoothing_factor=0.0, optim=<OptimizerNames.ADAMW_TORCH: 'adamw_torch'>, optim_args=None, adafactor=False, group_by_length=False, length_column_name='length', report_to=['tensorboard'], ddp_find_unused_parameters=None, ddp_bucket_cap_mb=None, ddp_broadcast_buffers=None, dataloader_pin_memory=True, dataloader_persistent_workers=False, skip_memory_metrics=True, use_legacy_prediction_loop=False, push_to_hub=False, resume_from_checkpoint=None, hub_model_id=None, hub_strategy=<HubStrategy.EVERY_SAVE: 'every_save'>, hub_token=None, hub_private_repo=None, hub_always_push=False, gradient_checkpointing=True, gradient_checkpointing_kwargs=None, include_inputs_for_metrics=False, include_for_metrics=[], eval_do_concat_batches=True, fp16_backend='auto', evaluation_strategy='steps', push_to_hub_model_id=None, push_to_hub_organization=None, push_to_hub_token=None, mp_parameters='', auto_find_batch_size=False, full_determinism=False, torchdynamo=None, ray_scope='last', ddp_timeout=1800, torch_compile=False, torch_compile_backend=None, torch_compile_mode=None, dispatch_batches=None, split_batches=None, include_tokens_per_second=None, include_num_input_tokens_seen=None, neftune_noise_alpha=None, optim_target_modules=None, batch_eval_metrics=False, eval_on_start=False, use_liger_kernel=False, eval_use_gather_object=False, average_tokens_across_devices=None, sortish_sampler=False, predict_with_generate=False, generation_max_length=None, generation_num_beams=None, generation_config=None, acc_strategy='token', sequence_parallel_size=1, check_model=True, train_sampler_random=True, is_encoder_decoder=False, metric_warmup_step=0, train_dataset_sample=-1, fsdp_num=1, acc_steps=1, train_type='full', optimizer=None, local_repo_path=None, galore_config=None)"
357
+ }
config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/home/ubuntu/.cache/huggingface/hub/models--Qwen--Qwen2.5-7B-Instruct/snapshots/a09a35458c702b33eeacc393d103063234e8bc28",
3
+ "architectures": [
4
+ "Qwen2ForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 151643,
8
+ "eos_token_id": 151645,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 3584,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 18944,
13
+ "max_position_embeddings": 32768,
14
+ "max_window_layers": 28,
15
+ "model_type": "qwen2",
16
+ "num_attention_heads": 28,
17
+ "num_hidden_layers": 28,
18
+ "num_key_value_heads": 4,
19
+ "pad_token_id": 151643,
20
+ "rms_norm_eps": 1e-06,
21
+ "rope_scaling": null,
22
+ "rope_theta": 1000000.0,
23
+ "sliding_window": 131072,
24
+ "tie_word_embeddings": false,
25
+ "torch_dtype": "bfloat16",
26
+ "transformers_version": "4.49.0",
27
+ "use_cache": false,
28
+ "use_sliding_window": false,
29
+ "vocab_size": 152064
30
+ }
generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "repetition_penalty": 1.05,
10
+ "temperature": 0.7,
11
+ "top_k": 20,
12
+ "top_p": 0.8,
13
+ "transformers_version": "4.49.0"
14
+ }
latest ADDED
@@ -0,0 +1 @@
 
 
1
+ global_step800
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33c684cb62b22c3bd0b31cf3e39596278a2b2870de276e2d4a045522e8b969f7
3
+ size 4877660776
model-00002-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d729b8098b3d231f181b2adffb3509ece0372bcaa6a69f771cc562a4d3d3cde
3
+ size 4932751008
model-00003-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9c12be66b587ae36c7c729d9d920161577377891ca580cfe67b564653f5ca95
3
+ size 4330865200
model-00004-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b38eb226a4e2c8b01958929346cabe5eff22ce283e254f349565e176978d8496
3
+ size 1089994880
model.safetensors.index.json ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 15231233024
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "model-00004-of-00004.safetensors",
7
+ "model.embed_tokens.weight": "model-00001-of-00004.safetensors",
8
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
9
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
10
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
11
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
12
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
13
+ "model.layers.0.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
14
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
15
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
16
+ "model.layers.0.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
17
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
18
+ "model.layers.0.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
19
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
20
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
21
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
22
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
23
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
24
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
25
+ "model.layers.1.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
26
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
27
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
28
+ "model.layers.1.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
29
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
30
+ "model.layers.1.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
31
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
32
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
33
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
34
+ "model.layers.10.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
35
+ "model.layers.10.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
36
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
37
+ "model.layers.10.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
38
+ "model.layers.10.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
39
+ "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
40
+ "model.layers.10.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
41
+ "model.layers.10.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
42
+ "model.layers.10.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
43
+ "model.layers.10.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
44
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
45
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
46
+ "model.layers.11.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
47
+ "model.layers.11.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
48
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
49
+ "model.layers.11.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
50
+ "model.layers.11.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
51
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
52
+ "model.layers.11.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
53
+ "model.layers.11.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
54
+ "model.layers.11.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
55
+ "model.layers.11.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
56
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
57
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
58
+ "model.layers.12.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
59
+ "model.layers.12.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
60
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
61
+ "model.layers.12.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
62
+ "model.layers.12.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
63
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
64
+ "model.layers.12.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
65
+ "model.layers.12.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
66
+ "model.layers.12.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
67
+ "model.layers.12.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
68
+ "model.layers.13.input_layernorm.weight": "model-00002-of-00004.safetensors",
69
+ "model.layers.13.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
70
+ "model.layers.13.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
71
+ "model.layers.13.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
72
+ "model.layers.13.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
73
+ "model.layers.13.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
74
+ "model.layers.13.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
75
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
76
+ "model.layers.13.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
77
+ "model.layers.13.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
78
+ "model.layers.13.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
79
+ "model.layers.13.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
80
+ "model.layers.14.input_layernorm.weight": "model-00002-of-00004.safetensors",
81
+ "model.layers.14.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
82
+ "model.layers.14.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
83
+ "model.layers.14.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
84
+ "model.layers.14.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
85
+ "model.layers.14.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
86
+ "model.layers.14.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
87
+ "model.layers.14.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
88
+ "model.layers.14.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
89
+ "model.layers.14.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
90
+ "model.layers.14.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
91
+ "model.layers.14.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
92
+ "model.layers.15.input_layernorm.weight": "model-00002-of-00004.safetensors",
93
+ "model.layers.15.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
94
+ "model.layers.15.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
95
+ "model.layers.15.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
96
+ "model.layers.15.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
97
+ "model.layers.15.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
98
+ "model.layers.15.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
99
+ "model.layers.15.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
100
+ "model.layers.15.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
101
+ "model.layers.15.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
102
+ "model.layers.15.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
103
+ "model.layers.15.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
104
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00004.safetensors",
105
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
106
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
107
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
108
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
109
+ "model.layers.16.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
110
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
111
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
112
+ "model.layers.16.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
113
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
114
+ "model.layers.16.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
115
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
116
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00004.safetensors",
117
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
118
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
119
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
120
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
121
+ "model.layers.17.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
122
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
123
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
124
+ "model.layers.17.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
125
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
126
+ "model.layers.17.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
127
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
128
+ "model.layers.18.input_layernorm.weight": "model-00003-of-00004.safetensors",
129
+ "model.layers.18.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
130
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
131
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
132
+ "model.layers.18.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
133
+ "model.layers.18.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
134
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
135
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
136
+ "model.layers.18.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
137
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
138
+ "model.layers.18.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
139
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
140
+ "model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors",
141
+ "model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
142
+ "model.layers.19.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
143
+ "model.layers.19.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
144
+ "model.layers.19.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
145
+ "model.layers.19.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
146
+ "model.layers.19.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
147
+ "model.layers.19.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
148
+ "model.layers.19.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
149
+ "model.layers.19.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
150
+ "model.layers.19.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
151
+ "model.layers.19.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
152
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
153
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
154
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
155
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
156
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
157
+ "model.layers.2.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
158
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
159
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
160
+ "model.layers.2.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
161
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
162
+ "model.layers.2.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
163
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
164
+ "model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
165
+ "model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
166
+ "model.layers.20.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
167
+ "model.layers.20.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
168
+ "model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
169
+ "model.layers.20.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
170
+ "model.layers.20.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
171
+ "model.layers.20.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
172
+ "model.layers.20.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
173
+ "model.layers.20.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
174
+ "model.layers.20.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
175
+ "model.layers.20.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
176
+ "model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
177
+ "model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
178
+ "model.layers.21.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
179
+ "model.layers.21.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
180
+ "model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
181
+ "model.layers.21.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
182
+ "model.layers.21.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
183
+ "model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
184
+ "model.layers.21.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
185
+ "model.layers.21.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
186
+ "model.layers.21.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
187
+ "model.layers.21.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
188
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
189
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
190
+ "model.layers.22.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
191
+ "model.layers.22.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
192
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
193
+ "model.layers.22.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
194
+ "model.layers.22.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
195
+ "model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
196
+ "model.layers.22.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
197
+ "model.layers.22.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
198
+ "model.layers.22.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
199
+ "model.layers.22.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
200
+ "model.layers.23.input_layernorm.weight": "model-00003-of-00004.safetensors",
201
+ "model.layers.23.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
202
+ "model.layers.23.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
203
+ "model.layers.23.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
204
+ "model.layers.23.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
205
+ "model.layers.23.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
206
+ "model.layers.23.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
207
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
208
+ "model.layers.23.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
209
+ "model.layers.23.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
210
+ "model.layers.23.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
211
+ "model.layers.23.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
212
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00004.safetensors",
213
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
214
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
215
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
216
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
217
+ "model.layers.24.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
218
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
219
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
220
+ "model.layers.24.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
221
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
222
+ "model.layers.24.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
223
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
224
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00004.safetensors",
225
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
226
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
227
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
228
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
229
+ "model.layers.25.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
230
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
231
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
232
+ "model.layers.25.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
233
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
234
+ "model.layers.25.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
235
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
236
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00004.safetensors",
237
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
238
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
239
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
240
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
241
+ "model.layers.26.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
242
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
243
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
244
+ "model.layers.26.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
245
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
246
+ "model.layers.26.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
247
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
248
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00004.safetensors",
249
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
250
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00004.safetensors",
251
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00004.safetensors",
252
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
253
+ "model.layers.27.self_attn.k_proj.bias": "model-00003-of-00004.safetensors",
254
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00004.safetensors",
255
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
256
+ "model.layers.27.self_attn.q_proj.bias": "model-00003-of-00004.safetensors",
257
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00004.safetensors",
258
+ "model.layers.27.self_attn.v_proj.bias": "model-00003-of-00004.safetensors",
259
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00004.safetensors",
260
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00004.safetensors",
261
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
262
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
263
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
264
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
265
+ "model.layers.3.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
266
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
267
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
268
+ "model.layers.3.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
269
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
270
+ "model.layers.3.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
271
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
272
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00004.safetensors",
273
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
274
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
275
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
276
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
277
+ "model.layers.4.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
278
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
279
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
280
+ "model.layers.4.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
281
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
282
+ "model.layers.4.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
283
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
284
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00004.safetensors",
285
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
286
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
287
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
288
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
289
+ "model.layers.5.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
290
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
291
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
292
+ "model.layers.5.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
293
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
294
+ "model.layers.5.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
295
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
296
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00004.safetensors",
297
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
298
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
299
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
300
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
301
+ "model.layers.6.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
302
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
303
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
304
+ "model.layers.6.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
305
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
306
+ "model.layers.6.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
307
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
308
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00004.safetensors",
309
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
310
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00004.safetensors",
311
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00004.safetensors",
312
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
313
+ "model.layers.7.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
314
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
315
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
316
+ "model.layers.7.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
317
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
318
+ "model.layers.7.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
319
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
320
+ "model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors",
321
+ "model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
322
+ "model.layers.8.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
323
+ "model.layers.8.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
324
+ "model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
325
+ "model.layers.8.self_attn.k_proj.bias": "model-00001-of-00004.safetensors",
326
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00004.safetensors",
327
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
328
+ "model.layers.8.self_attn.q_proj.bias": "model-00001-of-00004.safetensors",
329
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00004.safetensors",
330
+ "model.layers.8.self_attn.v_proj.bias": "model-00001-of-00004.safetensors",
331
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00004.safetensors",
332
+ "model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
333
+ "model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
334
+ "model.layers.9.mlp.gate_proj.weight": "model-00002-of-00004.safetensors",
335
+ "model.layers.9.mlp.up_proj.weight": "model-00002-of-00004.safetensors",
336
+ "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
337
+ "model.layers.9.self_attn.k_proj.bias": "model-00002-of-00004.safetensors",
338
+ "model.layers.9.self_attn.k_proj.weight": "model-00002-of-00004.safetensors",
339
+ "model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
340
+ "model.layers.9.self_attn.q_proj.bias": "model-00002-of-00004.safetensors",
341
+ "model.layers.9.self_attn.q_proj.weight": "model-00002-of-00004.safetensors",
342
+ "model.layers.9.self_attn.v_proj.bias": "model-00002-of-00004.safetensors",
343
+ "model.layers.9.self_attn.v_proj.weight": "model-00002-of-00004.safetensors",
344
+ "model.norm.weight": "model-00003-of-00004.safetensors"
345
+ }
346
+ }
rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae9162e03c562553a5d9d13120f544d3c47ea71bb39aa44e18253675e17ed4a4
3
+ size 15984
rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4809456871b3a40c8db7e0926a9db11b01149a1d483fb29b16fc69dabaf36c6f
3
+ size 15984
rng_state_2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4bb6bcf25ff148b74eea7dd4895fc42e9433538fff5d75f0d2ae6cb0c2fdadf0
3
+ size 15984
rng_state_3.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f00ea04cd1a52c539d9cc948ac8a04676d6b99702acd09149565f781806f63f
3
+ size 15984
rng_state_4.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5571fb2fc1b413792b01ac691c759786855573992bab1d14875faccdaf8c881e
3
+ size 15984
rng_state_5.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59019ba23ead9c15851cb4349397254458ce50ea3c2987090404f4f3842c6d8f
3
+ size 15984
rng_state_6.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45fdffda57fda4a555da7a5de6fc6ec7324e0dae048b92519af6c4f6a1bc7412
3
+ size 15984
rng_state_7.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:62fb2c13e63aba83c4505fae1639f79a33853d8f1bebe20cecb73bf53c8e7c46
3
+ size 15984
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2cbeaea61b81590fe73403f785c0568861f88d2c83ede8c36e87bd0f862ba83d
3
+ size 1064
special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c5ae00e602b8860cbd784ba82a8aa14e8feecec692e7076590d014d7b7fdafa
3
+ size 11421896
tokenizer_config.json ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "bos_token": null,
198
+ "chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- else %}\n {{- '<|im_start|>system\\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) or (message.role == \"assistant\" and not message.tool_calls) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + message.content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n{%- endif %}\n",
199
+ "clean_up_tokenization_spaces": false,
200
+ "eos_token": "<|im_end|>",
201
+ "errors": "replace",
202
+ "extra_special_tokens": {},
203
+ "model_max_length": 131072,
204
+ "pad_token": "<|endoftext|>",
205
+ "split_special_tokens": false,
206
+ "tokenizer_class": "Qwen2Tokenizer",
207
+ "unk_token": null
208
+ }
trainer_state.json ADDED
@@ -0,0 +1,1715 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.40588063,
3
+ "best_model_checkpoint": "/home/ubuntu/output/v0-20250315-052746/checkpoint-800",
4
+ "epoch": 0.9549388242315726,
5
+ "eval_steps": 100,
6
+ "global_step": 800,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.001193673530289466,
13
+ "grad_norm": 22.298568401591066,
14
+ "learning_rate": 7.936507936507937e-08,
15
+ "loss": 1.0442615747451782,
16
+ "memory(GiB)": 30.7,
17
+ "step": 1,
18
+ "token_acc": 0.7699836867862969,
19
+ "train_speed(iter/s)": 0.093757
20
+ },
21
+ {
22
+ "epoch": 0.005968367651447329,
23
+ "grad_norm": 19.63376949197587,
24
+ "learning_rate": 3.9682539682539683e-07,
25
+ "loss": 0.9844925403594971,
26
+ "memory(GiB)": 36.92,
27
+ "step": 5,
28
+ "token_acc": 0.7549315386400557,
29
+ "train_speed(iter/s)": 0.149789
30
+ },
31
+ {
32
+ "epoch": 0.011936735302894658,
33
+ "grad_norm": 16.540823173599176,
34
+ "learning_rate": 7.936507936507937e-07,
35
+ "loss": 0.9816521644592285,
36
+ "memory(GiB)": 36.92,
37
+ "step": 10,
38
+ "token_acc": 0.7844917012448133,
39
+ "train_speed(iter/s)": 0.162277
40
+ },
41
+ {
42
+ "epoch": 0.017905102954341987,
43
+ "grad_norm": 7.069008652065498,
44
+ "learning_rate": 1.1904761904761906e-06,
45
+ "loss": 0.8435724258422852,
46
+ "memory(GiB)": 36.92,
47
+ "step": 15,
48
+ "token_acc": 0.7823455233291299,
49
+ "train_speed(iter/s)": 0.176636
50
+ },
51
+ {
52
+ "epoch": 0.023873470605789315,
53
+ "grad_norm": 5.623338707216517,
54
+ "learning_rate": 1.5873015873015873e-06,
55
+ "loss": 0.7347106456756591,
56
+ "memory(GiB)": 36.92,
57
+ "step": 20,
58
+ "token_acc": 0.8210757409440176,
59
+ "train_speed(iter/s)": 0.177914
60
+ },
61
+ {
62
+ "epoch": 0.029841838257236644,
63
+ "grad_norm": 4.003256651554691,
64
+ "learning_rate": 1.984126984126984e-06,
65
+ "loss": 0.654999828338623,
66
+ "memory(GiB)": 36.92,
67
+ "step": 25,
68
+ "token_acc": 0.7765267826680314,
69
+ "train_speed(iter/s)": 0.183726
70
+ },
71
+ {
72
+ "epoch": 0.03581020590868397,
73
+ "grad_norm": 3.4287088973952624,
74
+ "learning_rate": 2.380952380952381e-06,
75
+ "loss": 0.5874819755554199,
76
+ "memory(GiB)": 36.92,
77
+ "step": 30,
78
+ "token_acc": 0.8138706921105098,
79
+ "train_speed(iter/s)": 0.184018
80
+ },
81
+ {
82
+ "epoch": 0.0417785735601313,
83
+ "grad_norm": 2.8169959397852256,
84
+ "learning_rate": 2.7777777777777783e-06,
85
+ "loss": 0.6081454753875732,
86
+ "memory(GiB)": 36.92,
87
+ "step": 35,
88
+ "token_acc": 0.81794500723589,
89
+ "train_speed(iter/s)": 0.183529
90
+ },
91
+ {
92
+ "epoch": 0.04774694121157863,
93
+ "grad_norm": 3.325157673054176,
94
+ "learning_rate": 3.1746031746031746e-06,
95
+ "loss": 0.5828543663024902,
96
+ "memory(GiB)": 36.92,
97
+ "step": 40,
98
+ "token_acc": 0.8394230769230769,
99
+ "train_speed(iter/s)": 0.184431
100
+ },
101
+ {
102
+ "epoch": 0.05371530886302596,
103
+ "grad_norm": 2.7879997870731166,
104
+ "learning_rate": 3.5714285714285718e-06,
105
+ "loss": 0.5575875282287598,
106
+ "memory(GiB)": 36.92,
107
+ "step": 45,
108
+ "token_acc": 0.8293939393939394,
109
+ "train_speed(iter/s)": 0.187841
110
+ },
111
+ {
112
+ "epoch": 0.05968367651447329,
113
+ "grad_norm": 2.778252157541432,
114
+ "learning_rate": 3.968253968253968e-06,
115
+ "loss": 0.5551129341125488,
116
+ "memory(GiB)": 36.92,
117
+ "step": 50,
118
+ "token_acc": 0.8260184559981995,
119
+ "train_speed(iter/s)": 0.189026
120
+ },
121
+ {
122
+ "epoch": 0.06565204416592062,
123
+ "grad_norm": 3.0281694802961816,
124
+ "learning_rate": 4.365079365079366e-06,
125
+ "loss": 0.5619981765747071,
126
+ "memory(GiB)": 36.92,
127
+ "step": 55,
128
+ "token_acc": 0.8104107766505904,
129
+ "train_speed(iter/s)": 0.188889
130
+ },
131
+ {
132
+ "epoch": 0.07162041181736795,
133
+ "grad_norm": 3.102265765306523,
134
+ "learning_rate": 4.761904761904762e-06,
135
+ "loss": 0.5332321166992188,
136
+ "memory(GiB)": 36.92,
137
+ "step": 60,
138
+ "token_acc": 0.8470640768028578,
139
+ "train_speed(iter/s)": 0.189797
140
+ },
141
+ {
142
+ "epoch": 0.07758877946881527,
143
+ "grad_norm": 2.8144373694536444,
144
+ "learning_rate": 5.15873015873016e-06,
145
+ "loss": 0.5349865436553956,
146
+ "memory(GiB)": 36.92,
147
+ "step": 65,
148
+ "token_acc": 0.8722838137472284,
149
+ "train_speed(iter/s)": 0.190181
150
+ },
151
+ {
152
+ "epoch": 0.0835571471202626,
153
+ "grad_norm": 3.337016219899744,
154
+ "learning_rate": 5.555555555555557e-06,
155
+ "loss": 0.5452562808990479,
156
+ "memory(GiB)": 36.92,
157
+ "step": 70,
158
+ "token_acc": 0.8432369942196531,
159
+ "train_speed(iter/s)": 0.189336
160
+ },
161
+ {
162
+ "epoch": 0.08952551477170993,
163
+ "grad_norm": 3.06136632501639,
164
+ "learning_rate": 5.9523809523809525e-06,
165
+ "loss": 0.49980897903442384,
166
+ "memory(GiB)": 36.92,
167
+ "step": 75,
168
+ "token_acc": 0.8338361568809468,
169
+ "train_speed(iter/s)": 0.190084
170
+ },
171
+ {
172
+ "epoch": 0.09549388242315726,
173
+ "grad_norm": 2.4877794848384633,
174
+ "learning_rate": 6.349206349206349e-06,
175
+ "loss": 0.539669418334961,
176
+ "memory(GiB)": 36.92,
177
+ "step": 80,
178
+ "token_acc": 0.8455654331197023,
179
+ "train_speed(iter/s)": 0.189397
180
+ },
181
+ {
182
+ "epoch": 0.10146225007460459,
183
+ "grad_norm": 3.221405214526254,
184
+ "learning_rate": 6.746031746031747e-06,
185
+ "loss": 0.5160573959350586,
186
+ "memory(GiB)": 36.92,
187
+ "step": 85,
188
+ "token_acc": 0.8462370242214533,
189
+ "train_speed(iter/s)": 0.190557
190
+ },
191
+ {
192
+ "epoch": 0.10743061772605192,
193
+ "grad_norm": 3.2868066256101085,
194
+ "learning_rate": 7.1428571428571436e-06,
195
+ "loss": 0.4913814067840576,
196
+ "memory(GiB)": 36.92,
197
+ "step": 90,
198
+ "token_acc": 0.8452227659026526,
199
+ "train_speed(iter/s)": 0.190372
200
+ },
201
+ {
202
+ "epoch": 0.11339898537749925,
203
+ "grad_norm": 3.051253318495505,
204
+ "learning_rate": 7.53968253968254e-06,
205
+ "loss": 0.49474325180053713,
206
+ "memory(GiB)": 36.92,
207
+ "step": 95,
208
+ "token_acc": 0.8567275747508306,
209
+ "train_speed(iter/s)": 0.190208
210
+ },
211
+ {
212
+ "epoch": 0.11936735302894658,
213
+ "grad_norm": 2.912617922195808,
214
+ "learning_rate": 7.936507936507936e-06,
215
+ "loss": 0.4725308418273926,
216
+ "memory(GiB)": 36.92,
217
+ "step": 100,
218
+ "token_acc": 0.8608458390177354,
219
+ "train_speed(iter/s)": 0.190477
220
+ },
221
+ {
222
+ "epoch": 0.11936735302894658,
223
+ "eval_loss": 0.45056208968162537,
224
+ "eval_runtime": 10.9299,
225
+ "eval_samples_per_second": 24.611,
226
+ "eval_steps_per_second": 3.111,
227
+ "eval_token_acc": 0.8447598692022433,
228
+ "step": 100
229
+ },
230
+ {
231
+ "epoch": 0.12533572068039392,
232
+ "grad_norm": 2.8012333103883647,
233
+ "learning_rate": 8.333333333333334e-06,
234
+ "loss": 0.5016227722167969,
235
+ "memory(GiB)": 36.92,
236
+ "step": 105,
237
+ "token_acc": 0.842248243559719,
238
+ "train_speed(iter/s)": 0.17126
239
+ },
240
+ {
241
+ "epoch": 0.13130408833184123,
242
+ "grad_norm": 2.912876340051396,
243
+ "learning_rate": 8.730158730158731e-06,
244
+ "loss": 0.518134593963623,
245
+ "memory(GiB)": 36.92,
246
+ "step": 110,
247
+ "token_acc": 0.8509636604384287,
248
+ "train_speed(iter/s)": 0.172461
249
+ },
250
+ {
251
+ "epoch": 0.13727245598328858,
252
+ "grad_norm": 3.3516293509261117,
253
+ "learning_rate": 9.126984126984127e-06,
254
+ "loss": 0.5215555191040039,
255
+ "memory(GiB)": 36.92,
256
+ "step": 115,
257
+ "token_acc": 0.8062340503098797,
258
+ "train_speed(iter/s)": 0.172888
259
+ },
260
+ {
261
+ "epoch": 0.1432408236347359,
262
+ "grad_norm": 3.137187651305806,
263
+ "learning_rate": 9.523809523809525e-06,
264
+ "loss": 0.4938058853149414,
265
+ "memory(GiB)": 36.92,
266
+ "step": 120,
267
+ "token_acc": 0.8689788053949904,
268
+ "train_speed(iter/s)": 0.173312
269
+ },
270
+ {
271
+ "epoch": 0.14920919128618323,
272
+ "grad_norm": 2.9778302322354255,
273
+ "learning_rate": 9.920634920634922e-06,
274
+ "loss": 0.47763543128967284,
275
+ "memory(GiB)": 36.92,
276
+ "step": 125,
277
+ "token_acc": 0.8223684210526315,
278
+ "train_speed(iter/s)": 0.173984
279
+ },
280
+ {
281
+ "epoch": 0.15517755893763055,
282
+ "grad_norm": 2.454176368796001,
283
+ "learning_rate": 9.999930596405254e-06,
284
+ "loss": 0.5025428771972656,
285
+ "memory(GiB)": 36.92,
286
+ "step": 130,
287
+ "token_acc": 0.8584961515689757,
288
+ "train_speed(iter/s)": 0.17494
289
+ },
290
+ {
291
+ "epoch": 0.1611459265890779,
292
+ "grad_norm": 2.308752936530914,
293
+ "learning_rate": 9.999648647603774e-06,
294
+ "loss": 0.4561060905456543,
295
+ "memory(GiB)": 36.92,
296
+ "step": 135,
297
+ "token_acc": 0.8763222131814483,
298
+ "train_speed(iter/s)": 0.175709
299
+ },
300
+ {
301
+ "epoch": 0.1671142942405252,
302
+ "grad_norm": 3.2296962246861276,
303
+ "learning_rate": 9.999149828091632e-06,
304
+ "loss": 0.5205905437469482,
305
+ "memory(GiB)": 36.92,
306
+ "step": 140,
307
+ "token_acc": 0.8126463700234192,
308
+ "train_speed(iter/s)": 0.176096
309
+ },
310
+ {
311
+ "epoch": 0.17308266189197255,
312
+ "grad_norm": 2.8523931115518915,
313
+ "learning_rate": 9.998434159506211e-06,
314
+ "loss": 0.4669060230255127,
315
+ "memory(GiB)": 36.92,
316
+ "step": 145,
317
+ "token_acc": 0.8612167300380228,
318
+ "train_speed(iter/s)": 0.176748
319
+ },
320
+ {
321
+ "epoch": 0.17905102954341987,
322
+ "grad_norm": 2.689699959591659,
323
+ "learning_rate": 9.997501672891208e-06,
324
+ "loss": 0.4870173454284668,
325
+ "memory(GiB)": 36.92,
326
+ "step": 150,
327
+ "token_acc": 0.8250571369208394,
328
+ "train_speed(iter/s)": 0.177191
329
+ },
330
+ {
331
+ "epoch": 0.1850193971948672,
332
+ "grad_norm": 2.8278119407117517,
333
+ "learning_rate": 9.99635240869527e-06,
334
+ "loss": 0.47814245223999025,
335
+ "memory(GiB)": 36.92,
336
+ "step": 155,
337
+ "token_acc": 0.819718309859155,
338
+ "train_speed(iter/s)": 0.177751
339
+ },
340
+ {
341
+ "epoch": 0.19098776484631452,
342
+ "grad_norm": 3.0394183749078887,
343
+ "learning_rate": 9.99498641677025e-06,
344
+ "loss": 0.5187320232391357,
345
+ "memory(GiB)": 36.92,
346
+ "step": 160,
347
+ "token_acc": 0.8540501094624179,
348
+ "train_speed(iter/s)": 0.178072
349
+ },
350
+ {
351
+ "epoch": 0.19695613249776187,
352
+ "grad_norm": 2.4913040205595696,
353
+ "learning_rate": 9.993403756369037e-06,
354
+ "loss": 0.471418571472168,
355
+ "memory(GiB)": 36.92,
356
+ "step": 165,
357
+ "token_acc": 0.8507351108896087,
358
+ "train_speed(iter/s)": 0.178675
359
+ },
360
+ {
361
+ "epoch": 0.20292450014920918,
362
+ "grad_norm": 2.5344653445390937,
363
+ "learning_rate": 9.991604496142997e-06,
364
+ "loss": 0.5218185901641845,
365
+ "memory(GiB)": 36.92,
366
+ "step": 170,
367
+ "token_acc": 0.8207423580786026,
368
+ "train_speed(iter/s)": 0.179206
369
+ },
370
+ {
371
+ "epoch": 0.20889286780065652,
372
+ "grad_norm": 3.1475346404756737,
373
+ "learning_rate": 9.989588714138977e-06,
374
+ "loss": 0.4809536933898926,
375
+ "memory(GiB)": 36.92,
376
+ "step": 175,
377
+ "token_acc": 0.8179710144927537,
378
+ "train_speed(iter/s)": 0.179609
379
+ },
380
+ {
381
+ "epoch": 0.21486123545210384,
382
+ "grad_norm": 2.9443708297446927,
383
+ "learning_rate": 9.987356497795944e-06,
384
+ "loss": 0.5137897491455078,
385
+ "memory(GiB)": 36.92,
386
+ "step": 180,
387
+ "token_acc": 0.838412017167382,
388
+ "train_speed(iter/s)": 0.179743
389
+ },
390
+ {
391
+ "epoch": 0.22082960310355118,
392
+ "grad_norm": 2.3691979165448864,
393
+ "learning_rate": 9.984907943941164e-06,
394
+ "loss": 0.47942285537719725,
395
+ "memory(GiB)": 36.92,
396
+ "step": 185,
397
+ "token_acc": 0.8178681677864537,
398
+ "train_speed(iter/s)": 0.179758
399
+ },
400
+ {
401
+ "epoch": 0.2267979707549985,
402
+ "grad_norm": 2.7655733596568806,
403
+ "learning_rate": 9.98224315878603e-06,
404
+ "loss": 0.4850442886352539,
405
+ "memory(GiB)": 36.92,
406
+ "step": 190,
407
+ "token_acc": 0.8557343020238714,
408
+ "train_speed(iter/s)": 0.180113
409
+ },
410
+ {
411
+ "epoch": 0.23276633840644584,
412
+ "grad_norm": 2.9415771183832837,
413
+ "learning_rate": 9.979362257921428e-06,
414
+ "loss": 0.4999836921691895,
415
+ "memory(GiB)": 36.92,
416
+ "step": 195,
417
+ "token_acc": 0.851006381934217,
418
+ "train_speed(iter/s)": 0.180236
419
+ },
420
+ {
421
+ "epoch": 0.23873470605789315,
422
+ "grad_norm": 3.1074760910693797,
423
+ "learning_rate": 9.976265366312746e-06,
424
+ "loss": 0.5033563137054443,
425
+ "memory(GiB)": 36.92,
426
+ "step": 200,
427
+ "token_acc": 0.8293048128342246,
428
+ "train_speed(iter/s)": 0.180618
429
+ },
430
+ {
431
+ "epoch": 0.23873470605789315,
432
+ "eval_loss": 0.4415110647678375,
433
+ "eval_runtime": 10.926,
434
+ "eval_samples_per_second": 24.62,
435
+ "eval_steps_per_second": 3.112,
436
+ "eval_token_acc": 0.8473173672384502,
437
+ "step": 200
438
+ },
439
+ {
440
+ "epoch": 0.2447030737093405,
441
+ "grad_norm": 2.7742402422517016,
442
+ "learning_rate": 9.972952618294442e-06,
443
+ "loss": 0.48658447265625,
444
+ "memory(GiB)": 36.92,
445
+ "step": 205,
446
+ "token_acc": 0.8399616256759114,
447
+ "train_speed(iter/s)": 0.171547
448
+ },
449
+ {
450
+ "epoch": 0.25067144136078784,
451
+ "grad_norm": 2.9146485975442946,
452
+ "learning_rate": 9.969424157564215e-06,
453
+ "loss": 0.48202037811279297,
454
+ "memory(GiB)": 36.92,
455
+ "step": 210,
456
+ "token_acc": 0.8229777256740914,
457
+ "train_speed(iter/s)": 0.172058
458
+ },
459
+ {
460
+ "epoch": 0.25663980901223515,
461
+ "grad_norm": 2.6037700192849007,
462
+ "learning_rate": 9.965680137176778e-06,
463
+ "loss": 0.4780398368835449,
464
+ "memory(GiB)": 36.92,
465
+ "step": 215,
466
+ "token_acc": 0.8451862602806,
467
+ "train_speed(iter/s)": 0.172776
468
+ },
469
+ {
470
+ "epoch": 0.26260817666368247,
471
+ "grad_norm": 2.4624431066871555,
472
+ "learning_rate": 9.961720719537217e-06,
473
+ "loss": 0.46450080871582033,
474
+ "memory(GiB)": 36.92,
475
+ "step": 220,
476
+ "token_acc": 0.8089250493096647,
477
+ "train_speed(iter/s)": 0.173186
478
+ },
479
+ {
480
+ "epoch": 0.26857654431512984,
481
+ "grad_norm": 2.6192496099911624,
482
+ "learning_rate": 9.957546076393944e-06,
483
+ "loss": 0.44403810501098634,
484
+ "memory(GiB)": 36.92,
485
+ "step": 225,
486
+ "token_acc": 0.8560982743492249,
487
+ "train_speed(iter/s)": 0.173308
488
+ },
489
+ {
490
+ "epoch": 0.27454491196657715,
491
+ "grad_norm": 2.5789044914565227,
492
+ "learning_rate": 9.953156388831246e-06,
493
+ "loss": 0.4940804481506348,
494
+ "memory(GiB)": 36.92,
495
+ "step": 230,
496
+ "token_acc": 0.8385935769656699,
497
+ "train_speed(iter/s)": 0.173656
498
+ },
499
+ {
500
+ "epoch": 0.28051327961802447,
501
+ "grad_norm": 2.3813674364243984,
502
+ "learning_rate": 9.948551847261439e-06,
503
+ "loss": 0.4587420463562012,
504
+ "memory(GiB)": 36.92,
505
+ "step": 235,
506
+ "token_acc": 0.8549975381585426,
507
+ "train_speed(iter/s)": 0.173976
508
+ },
509
+ {
510
+ "epoch": 0.2864816472694718,
511
+ "grad_norm": 2.7610173702743865,
512
+ "learning_rate": 9.943732651416597e-06,
513
+ "loss": 0.4972860336303711,
514
+ "memory(GiB)": 36.92,
515
+ "step": 240,
516
+ "token_acc": 0.8406979379107183,
517
+ "train_speed(iter/s)": 0.174337
518
+ },
519
+ {
520
+ "epoch": 0.29245001492091915,
521
+ "grad_norm": 2.390894612477832,
522
+ "learning_rate": 9.938699010339898e-06,
523
+ "loss": 0.4903904438018799,
524
+ "memory(GiB)": 36.92,
525
+ "step": 245,
526
+ "token_acc": 0.8545253863134658,
527
+ "train_speed(iter/s)": 0.174579
528
+ },
529
+ {
530
+ "epoch": 0.29841838257236647,
531
+ "grad_norm": 2.3711824949447546,
532
+ "learning_rate": 9.933451142376545e-06,
533
+ "loss": 0.4524253845214844,
534
+ "memory(GiB)": 37.05,
535
+ "step": 250,
536
+ "token_acc": 0.8489612577203818,
537
+ "train_speed(iter/s)": 0.174973
538
+ },
539
+ {
540
+ "epoch": 0.3043867502238138,
541
+ "grad_norm": 2.2514671634568364,
542
+ "learning_rate": 9.927989275164305e-06,
543
+ "loss": 0.48909597396850585,
544
+ "memory(GiB)": 37.05,
545
+ "step": 255,
546
+ "token_acc": 0.8518639633747548,
547
+ "train_speed(iter/s)": 0.175028
548
+ },
549
+ {
550
+ "epoch": 0.3103551178752611,
551
+ "grad_norm": 2.3714755110814005,
552
+ "learning_rate": 9.922313645623634e-06,
553
+ "loss": 0.4785162448883057,
554
+ "memory(GiB)": 37.05,
555
+ "step": 260,
556
+ "token_acc": 0.8465215082315454,
557
+ "train_speed(iter/s)": 0.175714
558
+ },
559
+ {
560
+ "epoch": 0.31632348552670847,
561
+ "grad_norm": 2.648679383955696,
562
+ "learning_rate": 9.916424499947395e-06,
563
+ "loss": 0.46675701141357423,
564
+ "memory(GiB)": 37.05,
565
+ "step": 265,
566
+ "token_acc": 0.8571428571428571,
567
+ "train_speed(iter/s)": 0.175927
568
+ },
569
+ {
570
+ "epoch": 0.3222918531781558,
571
+ "grad_norm": 2.579144815458166,
572
+ "learning_rate": 9.910322093590177e-06,
573
+ "loss": 0.47339348793029784,
574
+ "memory(GiB)": 37.05,
575
+ "step": 270,
576
+ "token_acc": 0.8505902192242834,
577
+ "train_speed(iter/s)": 0.176471
578
+ },
579
+ {
580
+ "epoch": 0.3282602208296031,
581
+ "grad_norm": 2.2898701347032793,
582
+ "learning_rate": 9.904006691257224e-06,
583
+ "loss": 0.49665226936340334,
584
+ "memory(GiB)": 37.05,
585
+ "step": 275,
586
+ "token_acc": 0.8427124366910523,
587
+ "train_speed(iter/s)": 0.17689
588
+ },
589
+ {
590
+ "epoch": 0.3342285884810504,
591
+ "grad_norm": 1.9441720928034771,
592
+ "learning_rate": 9.897478566892942e-06,
593
+ "loss": 0.44453701972961424,
594
+ "memory(GiB)": 37.05,
595
+ "step": 280,
596
+ "token_acc": 0.8629363449691991,
597
+ "train_speed(iter/s)": 0.177368
598
+ },
599
+ {
600
+ "epoch": 0.3401969561324978,
601
+ "grad_norm": 2.4637260658165,
602
+ "learning_rate": 9.890738003669029e-06,
603
+ "loss": 0.4563939094543457,
604
+ "memory(GiB)": 37.05,
605
+ "step": 285,
606
+ "token_acc": 0.8230596456201648,
607
+ "train_speed(iter/s)": 0.1776
608
+ },
609
+ {
610
+ "epoch": 0.3461653237839451,
611
+ "grad_norm": 2.287302517723748,
612
+ "learning_rate": 9.883785293972175e-06,
613
+ "loss": 0.504718017578125,
614
+ "memory(GiB)": 37.05,
615
+ "step": 290,
616
+ "token_acc": 0.7899543378995434,
617
+ "train_speed(iter/s)": 0.177582
618
+ },
619
+ {
620
+ "epoch": 0.3521336914353924,
621
+ "grad_norm": 2.328908891504034,
622
+ "learning_rate": 9.87662073939139e-06,
623
+ "loss": 0.4355961799621582,
624
+ "memory(GiB)": 37.05,
625
+ "step": 295,
626
+ "token_acc": 0.8636019960683502,
627
+ "train_speed(iter/s)": 0.177798
628
+ },
629
+ {
630
+ "epoch": 0.35810205908683973,
631
+ "grad_norm": 2.444070696496546,
632
+ "learning_rate": 9.869244650704924e-06,
633
+ "loss": 0.4655925750732422,
634
+ "memory(GiB)": 37.05,
635
+ "step": 300,
636
+ "token_acc": 0.8573033707865169,
637
+ "train_speed(iter/s)": 0.177836
638
+ },
639
+ {
640
+ "epoch": 0.35810205908683973,
641
+ "eval_loss": 0.4284290373325348,
642
+ "eval_runtime": 10.9831,
643
+ "eval_samples_per_second": 24.492,
644
+ "eval_steps_per_second": 3.096,
645
+ "eval_token_acc": 0.8515189711550757,
646
+ "step": 300
647
+ },
648
+ {
649
+ "epoch": 0.3640704267382871,
650
+ "grad_norm": 2.659611192736946,
651
+ "learning_rate": 9.861657347866778e-06,
652
+ "loss": 0.5253509521484375,
653
+ "memory(GiB)": 37.06,
654
+ "step": 305,
655
+ "token_acc": 0.828113750899928,
656
+ "train_speed(iter/s)": 0.171888
657
+ },
658
+ {
659
+ "epoch": 0.3700387943897344,
660
+ "grad_norm": 2.6984676971627226,
661
+ "learning_rate": 9.853859159992831e-06,
662
+ "loss": 0.47617392539978026,
663
+ "memory(GiB)": 37.06,
664
+ "step": 310,
665
+ "token_acc": 0.8316008316008316,
666
+ "train_speed(iter/s)": 0.172231
667
+ },
668
+ {
669
+ "epoch": 0.37600716204118173,
670
+ "grad_norm": 2.195598140600359,
671
+ "learning_rate": 9.845850425346563e-06,
672
+ "loss": 0.4360311508178711,
673
+ "memory(GiB)": 37.06,
674
+ "step": 315,
675
+ "token_acc": 0.848318462594372,
676
+ "train_speed(iter/s)": 0.172652
677
+ },
678
+ {
679
+ "epoch": 0.38197552969262905,
680
+ "grad_norm": 2.4976869303898597,
681
+ "learning_rate": 9.837631491324379e-06,
682
+ "loss": 0.46515851020812987,
683
+ "memory(GiB)": 37.06,
684
+ "step": 320,
685
+ "token_acc": 0.8522144522144522,
686
+ "train_speed(iter/s)": 0.172786
687
+ },
688
+ {
689
+ "epoch": 0.3879438973440764,
690
+ "grad_norm": 3.017469180784894,
691
+ "learning_rate": 9.829202714440544e-06,
692
+ "loss": 0.5420156478881836,
693
+ "memory(GiB)": 37.06,
694
+ "step": 325,
695
+ "token_acc": 0.8376052027543994,
696
+ "train_speed(iter/s)": 0.17318
697
+ },
698
+ {
699
+ "epoch": 0.39391226499552373,
700
+ "grad_norm": 2.5730220119384297,
701
+ "learning_rate": 9.820564460311719e-06,
702
+ "loss": 0.4916552543640137,
703
+ "memory(GiB)": 37.06,
704
+ "step": 330,
705
+ "token_acc": 0.8207920792079207,
706
+ "train_speed(iter/s)": 0.173365
707
+ },
708
+ {
709
+ "epoch": 0.39988063264697105,
710
+ "grad_norm": 2.798903385122773,
711
+ "learning_rate": 9.811717103641096e-06,
712
+ "loss": 0.4587296485900879,
713
+ "memory(GiB)": 37.06,
714
+ "step": 335,
715
+ "token_acc": 0.8592551001310126,
716
+ "train_speed(iter/s)": 0.173592
717
+ },
718
+ {
719
+ "epoch": 0.40584900029841836,
720
+ "grad_norm": 2.6409823275058653,
721
+ "learning_rate": 9.802661028202147e-06,
722
+ "loss": 0.48290514945983887,
723
+ "memory(GiB)": 37.06,
724
+ "step": 340,
725
+ "token_acc": 0.823793194407808,
726
+ "train_speed(iter/s)": 0.173952
727
+ },
728
+ {
729
+ "epoch": 0.41181736794986573,
730
+ "grad_norm": 3.0285812809146635,
731
+ "learning_rate": 9.79339662682198e-06,
732
+ "loss": 0.46567506790161134,
733
+ "memory(GiB)": 37.06,
734
+ "step": 345,
735
+ "token_acc": 0.8304556354916067,
736
+ "train_speed(iter/s)": 0.174192
737
+ },
738
+ {
739
+ "epoch": 0.41778573560131305,
740
+ "grad_norm": 2.4611578793858486,
741
+ "learning_rate": 9.783924301364297e-06,
742
+ "loss": 0.4647653579711914,
743
+ "memory(GiB)": 37.06,
744
+ "step": 350,
745
+ "token_acc": 0.8199260286638927,
746
+ "train_speed(iter/s)": 0.17443
747
+ },
748
+ {
749
+ "epoch": 0.42375410325276036,
750
+ "grad_norm": 2.154896994755901,
751
+ "learning_rate": 9.774244462711962e-06,
752
+ "loss": 0.4952418327331543,
753
+ "memory(GiB)": 37.06,
754
+ "step": 355,
755
+ "token_acc": 0.8217054263565892,
756
+ "train_speed(iter/s)": 0.174757
757
+ },
758
+ {
759
+ "epoch": 0.4297224709042077,
760
+ "grad_norm": 2.005838047714932,
761
+ "learning_rate": 9.764357530749178e-06,
762
+ "loss": 0.4674674034118652,
763
+ "memory(GiB)": 37.06,
764
+ "step": 360,
765
+ "token_acc": 0.841979596266551,
766
+ "train_speed(iter/s)": 0.174828
767
+ },
768
+ {
769
+ "epoch": 0.43569083855565505,
770
+ "grad_norm": 2.292609923640767,
771
+ "learning_rate": 9.754263934343272e-06,
772
+ "loss": 0.44636335372924807,
773
+ "memory(GiB)": 37.06,
774
+ "step": 365,
775
+ "token_acc": 0.8596112311015118,
776
+ "train_speed(iter/s)": 0.175118
777
+ },
778
+ {
779
+ "epoch": 0.44165920620710236,
780
+ "grad_norm": 2.477107058493794,
781
+ "learning_rate": 9.743964111326098e-06,
782
+ "loss": 0.4866192817687988,
783
+ "memory(GiB)": 37.06,
784
+ "step": 370,
785
+ "token_acc": 0.809440252675908,
786
+ "train_speed(iter/s)": 0.175357
787
+ },
788
+ {
789
+ "epoch": 0.4476275738585497,
790
+ "grad_norm": 2.3446291196746922,
791
+ "learning_rate": 9.733458508475038e-06,
792
+ "loss": 0.4887577533721924,
793
+ "memory(GiB)": 37.06,
794
+ "step": 375,
795
+ "token_acc": 0.8332948510736551,
796
+ "train_speed(iter/s)": 0.175371
797
+ },
798
+ {
799
+ "epoch": 0.453595941509997,
800
+ "grad_norm": 2.29799169108157,
801
+ "learning_rate": 9.722747581493625e-06,
802
+ "loss": 0.49045257568359374,
803
+ "memory(GiB)": 37.06,
804
+ "step": 380,
805
+ "token_acc": 0.8406266882766072,
806
+ "train_speed(iter/s)": 0.175414
807
+ },
808
+ {
809
+ "epoch": 0.45956430916144436,
810
+ "grad_norm": 2.563802674403576,
811
+ "learning_rate": 9.711831794991777e-06,
812
+ "loss": 0.4675490379333496,
813
+ "memory(GiB)": 37.06,
814
+ "step": 385,
815
+ "token_acc": 0.847358529964502,
816
+ "train_speed(iter/s)": 0.175567
817
+ },
818
+ {
819
+ "epoch": 0.4655326768128917,
820
+ "grad_norm": 2.480776446284018,
821
+ "learning_rate": 9.700711622465645e-06,
822
+ "loss": 0.4845867156982422,
823
+ "memory(GiB)": 37.06,
824
+ "step": 390,
825
+ "token_acc": 0.8422996998383745,
826
+ "train_speed(iter/s)": 0.17572
827
+ },
828
+ {
829
+ "epoch": 0.471501044464339,
830
+ "grad_norm": 2.721044012538843,
831
+ "learning_rate": 9.689387546277062e-06,
832
+ "loss": 0.46145071983337405,
833
+ "memory(GiB)": 37.06,
834
+ "step": 395,
835
+ "token_acc": 0.8513663630304377,
836
+ "train_speed(iter/s)": 0.175882
837
+ },
838
+ {
839
+ "epoch": 0.4774694121157863,
840
+ "grad_norm": 2.580126202957563,
841
+ "learning_rate": 9.677860057632642e-06,
842
+ "loss": 0.5093360424041748,
843
+ "memory(GiB)": 37.06,
844
+ "step": 400,
845
+ "token_acc": 0.8206378986866791,
846
+ "train_speed(iter/s)": 0.175987
847
+ },
848
+ {
849
+ "epoch": 0.4774694121157863,
850
+ "eval_loss": 0.42347872257232666,
851
+ "eval_runtime": 10.9358,
852
+ "eval_samples_per_second": 24.598,
853
+ "eval_steps_per_second": 3.109,
854
+ "eval_token_acc": 0.8527429166438318,
855
+ "step": 400
856
+ },
857
+ {
858
+ "epoch": 0.4834377797672337,
859
+ "grad_norm": 2.355447977882308,
860
+ "learning_rate": 9.66612965656245e-06,
861
+ "loss": 0.48992347717285156,
862
+ "memory(GiB)": 37.06,
863
+ "step": 405,
864
+ "token_acc": 0.8608419645840294,
865
+ "train_speed(iter/s)": 0.171561
866
+ },
867
+ {
868
+ "epoch": 0.489406147418681,
869
+ "grad_norm": 2.0174115419967773,
870
+ "learning_rate": 9.654196851898325e-06,
871
+ "loss": 0.4750755786895752,
872
+ "memory(GiB)": 37.06,
873
+ "step": 410,
874
+ "token_acc": 0.8274902615470228,
875
+ "train_speed(iter/s)": 0.171858
876
+ },
877
+ {
878
+ "epoch": 0.4953745150701283,
879
+ "grad_norm": 2.155026242929759,
880
+ "learning_rate": 9.642062161251807e-06,
881
+ "loss": 0.46627135276794435,
882
+ "memory(GiB)": 37.06,
883
+ "step": 415,
884
+ "token_acc": 0.8661600496277916,
885
+ "train_speed(iter/s)": 0.17197
886
+ },
887
+ {
888
+ "epoch": 0.5013428827215757,
889
+ "grad_norm": 2.8519922687228174,
890
+ "learning_rate": 9.62972611099168e-06,
891
+ "loss": 0.4620970726013184,
892
+ "memory(GiB)": 37.06,
893
+ "step": 420,
894
+ "token_acc": 0.8595988538681948,
895
+ "train_speed(iter/s)": 0.172268
896
+ },
897
+ {
898
+ "epoch": 0.5073112503730229,
899
+ "grad_norm": 2.5658438134794324,
900
+ "learning_rate": 9.617189236221143e-06,
901
+ "loss": 0.45318241119384767,
902
+ "memory(GiB)": 37.06,
903
+ "step": 425,
904
+ "token_acc": 0.8252274866645748,
905
+ "train_speed(iter/s)": 0.172438
906
+ },
907
+ {
908
+ "epoch": 0.5132796180244703,
909
+ "grad_norm": 2.2980368916312206,
910
+ "learning_rate": 9.604452080754601e-06,
911
+ "loss": 0.46477622985839845,
912
+ "memory(GiB)": 37.06,
913
+ "step": 430,
914
+ "token_acc": 0.8681318681318682,
915
+ "train_speed(iter/s)": 0.17271
916
+ },
917
+ {
918
+ "epoch": 0.5192479856759177,
919
+ "grad_norm": 2.3920351806796925,
920
+ "learning_rate": 9.591515197094064e-06,
921
+ "loss": 0.43802127838134763,
922
+ "memory(GiB)": 37.06,
923
+ "step": 435,
924
+ "token_acc": 0.8632865550022635,
925
+ "train_speed(iter/s)": 0.172963
926
+ },
927
+ {
928
+ "epoch": 0.5252163533273649,
929
+ "grad_norm": 2.3926322888936196,
930
+ "learning_rate": 9.578379146405202e-06,
931
+ "loss": 0.4414364814758301,
932
+ "memory(GiB)": 37.06,
933
+ "step": 440,
934
+ "token_acc": 0.8378196500672948,
935
+ "train_speed(iter/s)": 0.173049
936
+ },
937
+ {
938
+ "epoch": 0.5311847209788123,
939
+ "grad_norm": 2.5309415862721787,
940
+ "learning_rate": 9.565044498492984e-06,
941
+ "loss": 0.4737836837768555,
942
+ "memory(GiB)": 37.06,
943
+ "step": 445,
944
+ "token_acc": 0.8400094809196492,
945
+ "train_speed(iter/s)": 0.173413
946
+ },
947
+ {
948
+ "epoch": 0.5371530886302597,
949
+ "grad_norm": 2.574732220606661,
950
+ "learning_rate": 9.551511831776966e-06,
951
+ "loss": 0.4299252986907959,
952
+ "memory(GiB)": 37.06,
953
+ "step": 450,
954
+ "token_acc": 0.8394777265745008,
955
+ "train_speed(iter/s)": 0.173639
956
+ },
957
+ {
958
+ "epoch": 0.5431214562817069,
959
+ "grad_norm": 2.209862389780888,
960
+ "learning_rate": 9.53778173326621e-06,
961
+ "loss": 0.44927520751953126,
962
+ "memory(GiB)": 37.06,
963
+ "step": 455,
964
+ "token_acc": 0.8641338013627916,
965
+ "train_speed(iter/s)": 0.173751
966
+ },
967
+ {
968
+ "epoch": 0.5490898239331543,
969
+ "grad_norm": 2.524639918389781,
970
+ "learning_rate": 9.523854798533814e-06,
971
+ "loss": 0.44107656478881835,
972
+ "memory(GiB)": 37.06,
973
+ "step": 460,
974
+ "token_acc": 0.8868033496967946,
975
+ "train_speed(iter/s)": 0.174216
976
+ },
977
+ {
978
+ "epoch": 0.5550581915846016,
979
+ "grad_norm": 2.1182849441153215,
980
+ "learning_rate": 9.509731631691071e-06,
981
+ "loss": 0.43174285888671876,
982
+ "memory(GiB)": 37.06,
983
+ "step": 465,
984
+ "token_acc": 0.855464759959142,
985
+ "train_speed(iter/s)": 0.174365
986
+ },
987
+ {
988
+ "epoch": 0.5610265592360489,
989
+ "grad_norm": 2.2926487255366688,
990
+ "learning_rate": 9.495412845361279e-06,
991
+ "loss": 0.48258438110351565,
992
+ "memory(GiB)": 37.06,
993
+ "step": 470,
994
+ "token_acc": 0.8603872818551279,
995
+ "train_speed(iter/s)": 0.174664
996
+ },
997
+ {
998
+ "epoch": 0.5669949268874963,
999
+ "grad_norm": 2.192746026976168,
1000
+ "learning_rate": 9.480899060653154e-06,
1001
+ "loss": 0.4563854217529297,
1002
+ "memory(GiB)": 37.06,
1003
+ "step": 475,
1004
+ "token_acc": 0.8394289067083904,
1005
+ "train_speed(iter/s)": 0.17502
1006
+ },
1007
+ {
1008
+ "epoch": 0.5729632945389436,
1009
+ "grad_norm": 2.014209866578747,
1010
+ "learning_rate": 9.466190907133901e-06,
1011
+ "loss": 0.4754791259765625,
1012
+ "memory(GiB)": 37.06,
1013
+ "step": 480,
1014
+ "token_acc": 0.8577712609970675,
1015
+ "train_speed(iter/s)": 0.175025
1016
+ },
1017
+ {
1018
+ "epoch": 0.5789316621903909,
1019
+ "grad_norm": 2.559320864210838,
1020
+ "learning_rate": 9.451289022801894e-06,
1021
+ "loss": 0.47232685089111326,
1022
+ "memory(GiB)": 37.06,
1023
+ "step": 485,
1024
+ "token_acc": 0.8380402225074882,
1025
+ "train_speed(iter/s)": 0.175186
1026
+ },
1027
+ {
1028
+ "epoch": 0.5849000298418383,
1029
+ "grad_norm": 2.2053676509330433,
1030
+ "learning_rate": 9.436194054058998e-06,
1031
+ "loss": 0.4336155891418457,
1032
+ "memory(GiB)": 37.06,
1033
+ "step": 490,
1034
+ "token_acc": 0.8529990167158309,
1035
+ "train_speed(iter/s)": 0.175216
1036
+ },
1037
+ {
1038
+ "epoch": 0.5908683974932856,
1039
+ "grad_norm": 2.46940001428622,
1040
+ "learning_rate": 9.420906655682553e-06,
1041
+ "loss": 0.45275249481201174,
1042
+ "memory(GiB)": 37.06,
1043
+ "step": 495,
1044
+ "token_acc": 0.8271080928126768,
1045
+ "train_speed(iter/s)": 0.175432
1046
+ },
1047
+ {
1048
+ "epoch": 0.5968367651447329,
1049
+ "grad_norm": 2.3675730058319293,
1050
+ "learning_rate": 9.405427490796941e-06,
1051
+ "loss": 0.48803205490112306,
1052
+ "memory(GiB)": 37.06,
1053
+ "step": 500,
1054
+ "token_acc": 0.8432593011741406,
1055
+ "train_speed(iter/s)": 0.175539
1056
+ },
1057
+ {
1058
+ "epoch": 0.5968367651447329,
1059
+ "eval_loss": 0.4169776141643524,
1060
+ "eval_runtime": 10.9599,
1061
+ "eval_samples_per_second": 24.544,
1062
+ "eval_steps_per_second": 3.102,
1063
+ "eval_token_acc": 0.8532361484079575,
1064
+ "step": 500
1065
+ },
1066
+ {
1067
+ "epoch": 0.6028051327961802,
1068
+ "grad_norm": 2.1414646330001217,
1069
+ "learning_rate": 9.389757230844845e-06,
1070
+ "loss": 0.46323652267456056,
1071
+ "memory(GiB)": 37.06,
1072
+ "step": 505,
1073
+ "token_acc": 0.8552877345904119,
1074
+ "train_speed(iter/s)": 0.159112
1075
+ },
1076
+ {
1077
+ "epoch": 0.6087735004476276,
1078
+ "grad_norm": 2.5503273386919667,
1079
+ "learning_rate": 9.373896555558113e-06,
1080
+ "loss": 0.4701972961425781,
1081
+ "memory(GiB)": 37.06,
1082
+ "step": 510,
1083
+ "token_acc": 0.8592652620205294,
1084
+ "train_speed(iter/s)": 0.159422
1085
+ },
1086
+ {
1087
+ "epoch": 0.6147418680990749,
1088
+ "grad_norm": 2.6125713791079996,
1089
+ "learning_rate": 9.357846152928275e-06,
1090
+ "loss": 0.4990544319152832,
1091
+ "memory(GiB)": 37.06,
1092
+ "step": 515,
1093
+ "token_acc": 0.824811732065002,
1094
+ "train_speed(iter/s)": 0.159707
1095
+ },
1096
+ {
1097
+ "epoch": 0.6207102357505222,
1098
+ "grad_norm": 1.9353177630019818,
1099
+ "learning_rate": 9.341606719176695e-06,
1100
+ "loss": 0.4381883144378662,
1101
+ "memory(GiB)": 37.06,
1102
+ "step": 520,
1103
+ "token_acc": 0.867666063582321,
1104
+ "train_speed(iter/s)": 0.159909
1105
+ },
1106
+ {
1107
+ "epoch": 0.6266786034019696,
1108
+ "grad_norm": 2.3284686918748667,
1109
+ "learning_rate": 9.325178958724387e-06,
1110
+ "loss": 0.45581645965576173,
1111
+ "memory(GiB)": 37.06,
1112
+ "step": 525,
1113
+ "token_acc": 0.8706395348837209,
1114
+ "train_speed(iter/s)": 0.160206
1115
+ },
1116
+ {
1117
+ "epoch": 0.6326469710534169,
1118
+ "grad_norm": 2.2369421417810926,
1119
+ "learning_rate": 9.308563584161439e-06,
1120
+ "loss": 0.4688922882080078,
1121
+ "memory(GiB)": 37.06,
1122
+ "step": 530,
1123
+ "token_acc": 0.8338983050847457,
1124
+ "train_speed(iter/s)": 0.160549
1125
+ },
1126
+ {
1127
+ "epoch": 0.6386153387048642,
1128
+ "grad_norm": 2.4187058758316202,
1129
+ "learning_rate": 9.291761316216115e-06,
1130
+ "loss": 0.43785710334777833,
1131
+ "memory(GiB)": 37.06,
1132
+ "step": 535,
1133
+ "token_acc": 0.8175961715442666,
1134
+ "train_speed(iter/s)": 0.160901
1135
+ },
1136
+ {
1137
+ "epoch": 0.6445837063563116,
1138
+ "grad_norm": 2.11230034988461,
1139
+ "learning_rate": 9.274772883723587e-06,
1140
+ "loss": 0.4285177707672119,
1141
+ "memory(GiB)": 37.06,
1142
+ "step": 540,
1143
+ "token_acc": 0.8522423025435074,
1144
+ "train_speed(iter/s)": 0.161093
1145
+ },
1146
+ {
1147
+ "epoch": 0.6505520740077588,
1148
+ "grad_norm": 2.340278397663115,
1149
+ "learning_rate": 9.257599023594326e-06,
1150
+ "loss": 0.4503736972808838,
1151
+ "memory(GiB)": 37.06,
1152
+ "step": 545,
1153
+ "token_acc": 0.8704713049054184,
1154
+ "train_speed(iter/s)": 0.161286
1155
+ },
1156
+ {
1157
+ "epoch": 0.6565204416592062,
1158
+ "grad_norm": 2.3913667503479705,
1159
+ "learning_rate": 9.24024048078213e-06,
1160
+ "loss": 0.42584834098815916,
1161
+ "memory(GiB)": 37.06,
1162
+ "step": 550,
1163
+ "token_acc": 0.8828032979976443,
1164
+ "train_speed(iter/s)": 0.161464
1165
+ },
1166
+ {
1167
+ "epoch": 0.6624888093106536,
1168
+ "grad_norm": 2.2991966974662628,
1169
+ "learning_rate": 9.222698008251814e-06,
1170
+ "loss": 0.48091468811035154,
1171
+ "memory(GiB)": 37.06,
1172
+ "step": 555,
1173
+ "token_acc": 0.8286792452830188,
1174
+ "train_speed(iter/s)": 0.161689
1175
+ },
1176
+ {
1177
+ "epoch": 0.6684571769621008,
1178
+ "grad_norm": 2.083499198931165,
1179
+ "learning_rate": 9.204972366946546e-06,
1180
+ "loss": 0.4586004734039307,
1181
+ "memory(GiB)": 37.06,
1182
+ "step": 560,
1183
+ "token_acc": 0.8503009027081244,
1184
+ "train_speed(iter/s)": 0.16188
1185
+ },
1186
+ {
1187
+ "epoch": 0.6744255446135482,
1188
+ "grad_norm": 2.475812664409812,
1189
+ "learning_rate": 9.187064325754838e-06,
1190
+ "loss": 0.4561641693115234,
1191
+ "memory(GiB)": 37.06,
1192
+ "step": 565,
1193
+ "token_acc": 0.8384485031067596,
1194
+ "train_speed(iter/s)": 0.162054
1195
+ },
1196
+ {
1197
+ "epoch": 0.6803939122649956,
1198
+ "grad_norm": 2.4413316196832984,
1199
+ "learning_rate": 9.168974661477206e-06,
1200
+ "loss": 0.43843851089477537,
1201
+ "memory(GiB)": 37.06,
1202
+ "step": 570,
1203
+ "token_acc": 0.839965019676432,
1204
+ "train_speed(iter/s)": 0.162185
1205
+ },
1206
+ {
1207
+ "epoch": 0.6863622799164428,
1208
+ "grad_norm": 2.1737549301105075,
1209
+ "learning_rate": 9.150704158792456e-06,
1210
+ "loss": 0.4771718502044678,
1211
+ "memory(GiB)": 37.06,
1212
+ "step": 575,
1213
+ "token_acc": 0.8196035642844154,
1214
+ "train_speed(iter/s)": 0.162359
1215
+ },
1216
+ {
1217
+ "epoch": 0.6923306475678902,
1218
+ "grad_norm": 2.1356874443108342,
1219
+ "learning_rate": 9.13225361022366e-06,
1220
+ "loss": 0.48221721649169924,
1221
+ "memory(GiB)": 37.06,
1222
+ "step": 580,
1223
+ "token_acc": 0.8299897993879632,
1224
+ "train_speed(iter/s)": 0.162445
1225
+ },
1226
+ {
1227
+ "epoch": 0.6982990152193375,
1228
+ "grad_norm": 2.3220256859553077,
1229
+ "learning_rate": 9.113623816103775e-06,
1230
+ "loss": 0.4806779384613037,
1231
+ "memory(GiB)": 37.06,
1232
+ "step": 585,
1233
+ "token_acc": 0.8411007545494895,
1234
+ "train_speed(iter/s)": 0.162682
1235
+ },
1236
+ {
1237
+ "epoch": 0.7042673828707848,
1238
+ "grad_norm": 2.069813477739464,
1239
+ "learning_rate": 9.094815584540922e-06,
1240
+ "loss": 0.4947704792022705,
1241
+ "memory(GiB)": 37.06,
1242
+ "step": 590,
1243
+ "token_acc": 0.862796833773087,
1244
+ "train_speed(iter/s)": 0.162845
1245
+ },
1246
+ {
1247
+ "epoch": 0.7102357505222322,
1248
+ "grad_norm": 2.252802103709778,
1249
+ "learning_rate": 9.075829731383342e-06,
1250
+ "loss": 0.4306300163269043,
1251
+ "memory(GiB)": 37.06,
1252
+ "step": 595,
1253
+ "token_acc": 0.8425353797089894,
1254
+ "train_speed(iter/s)": 0.163154
1255
+ },
1256
+ {
1257
+ "epoch": 0.7162041181736795,
1258
+ "grad_norm": 2.241419478853809,
1259
+ "learning_rate": 9.056667080184004e-06,
1260
+ "loss": 0.4567378520965576,
1261
+ "memory(GiB)": 37.06,
1262
+ "step": 600,
1263
+ "token_acc": 0.8388354561996361,
1264
+ "train_speed(iter/s)": 0.163286
1265
+ },
1266
+ {
1267
+ "epoch": 0.7162041181736795,
1268
+ "eval_loss": 0.41334930062294006,
1269
+ "eval_runtime": 10.9312,
1270
+ "eval_samples_per_second": 24.608,
1271
+ "eval_steps_per_second": 3.11,
1272
+ "eval_token_acc": 0.8542591476224403,
1273
+ "step": 600
1274
+ },
1275
+ {
1276
+ "epoch": 0.7221724858251268,
1277
+ "grad_norm": 2.1208660287310384,
1278
+ "learning_rate": 9.037328462164866e-06,
1279
+ "loss": 0.44713678359985354,
1280
+ "memory(GiB)": 37.06,
1281
+ "step": 605,
1282
+ "token_acc": 0.8356246777796872,
1283
+ "train_speed(iter/s)": 0.151305
1284
+ },
1285
+ {
1286
+ "epoch": 0.7281408534765742,
1287
+ "grad_norm": 1.9420061515865858,
1288
+ "learning_rate": 9.01781471618085e-06,
1289
+ "loss": 0.45147147178649905,
1290
+ "memory(GiB)": 37.06,
1291
+ "step": 610,
1292
+ "token_acc": 0.8882771277816013,
1293
+ "train_speed(iter/s)": 0.151579
1294
+ },
1295
+ {
1296
+ "epoch": 0.7341092211280215,
1297
+ "grad_norm": 2.370549361627338,
1298
+ "learning_rate": 8.998126688683423e-06,
1299
+ "loss": 0.4287998199462891,
1300
+ "memory(GiB)": 37.06,
1301
+ "step": 615,
1302
+ "token_acc": 0.8318122555410691,
1303
+ "train_speed(iter/s)": 0.15183
1304
+ },
1305
+ {
1306
+ "epoch": 0.7400775887794688,
1307
+ "grad_norm": 2.003208951467392,
1308
+ "learning_rate": 8.978265233683903e-06,
1309
+ "loss": 0.4494300842285156,
1310
+ "memory(GiB)": 37.06,
1311
+ "step": 620,
1312
+ "token_acc": 0.8252328878088295,
1313
+ "train_speed(iter/s)": 0.15205
1314
+ },
1315
+ {
1316
+ "epoch": 0.7460459564309161,
1317
+ "grad_norm": 2.602367805333985,
1318
+ "learning_rate": 8.9582312127164e-06,
1319
+ "loss": 0.46652889251708984,
1320
+ "memory(GiB)": 37.06,
1321
+ "step": 625,
1322
+ "token_acc": 0.8474077428118633,
1323
+ "train_speed(iter/s)": 0.152311
1324
+ },
1325
+ {
1326
+ "epoch": 0.7520143240823635,
1327
+ "grad_norm": 2.3007477614457765,
1328
+ "learning_rate": 8.938025494800454e-06,
1329
+ "loss": 0.46235361099243166,
1330
+ "memory(GiB)": 37.06,
1331
+ "step": 630,
1332
+ "token_acc": 0.8234998744664825,
1333
+ "train_speed(iter/s)": 0.152632
1334
+ },
1335
+ {
1336
+ "epoch": 0.7579826917338108,
1337
+ "grad_norm": 2.403260011722763,
1338
+ "learning_rate": 8.917648956403338e-06,
1339
+ "loss": 0.4329329490661621,
1340
+ "memory(GiB)": 37.06,
1341
+ "step": 635,
1342
+ "token_acc": 0.8512756689483509,
1343
+ "train_speed(iter/s)": 0.152969
1344
+ },
1345
+ {
1346
+ "epoch": 0.7639510593852581,
1347
+ "grad_norm": 1.8459463363591184,
1348
+ "learning_rate": 8.897102481402031e-06,
1349
+ "loss": 0.45981664657592775,
1350
+ "memory(GiB)": 37.06,
1351
+ "step": 640,
1352
+ "token_acc": 0.8598321614878657,
1353
+ "train_speed(iter/s)": 0.153182
1354
+ },
1355
+ {
1356
+ "epoch": 0.7699194270367055,
1357
+ "grad_norm": 2.0204814112895044,
1358
+ "learning_rate": 8.876386961044892e-06,
1359
+ "loss": 0.46657752990722656,
1360
+ "memory(GiB)": 37.06,
1361
+ "step": 645,
1362
+ "token_acc": 0.8745874587458746,
1363
+ "train_speed(iter/s)": 0.153345
1364
+ },
1365
+ {
1366
+ "epoch": 0.7758877946881528,
1367
+ "grad_norm": 1.8481808083298177,
1368
+ "learning_rate": 8.855503293912987e-06,
1369
+ "loss": 0.4649078369140625,
1370
+ "memory(GiB)": 37.06,
1371
+ "step": 650,
1372
+ "token_acc": 0.8592820512820513,
1373
+ "train_speed(iter/s)": 0.153498
1374
+ },
1375
+ {
1376
+ "epoch": 0.7818561623396001,
1377
+ "grad_norm": 2.2884914044841698,
1378
+ "learning_rate": 8.834452385881121e-06,
1379
+ "loss": 0.4653633117675781,
1380
+ "memory(GiB)": 37.06,
1381
+ "step": 655,
1382
+ "token_acc": 0.8515602216389618,
1383
+ "train_speed(iter/s)": 0.153659
1384
+ },
1385
+ {
1386
+ "epoch": 0.7878245299910475,
1387
+ "grad_norm": 2.173340273942357,
1388
+ "learning_rate": 8.813235150078532e-06,
1389
+ "loss": 0.46648712158203126,
1390
+ "memory(GiB)": 37.06,
1391
+ "step": 660,
1392
+ "token_acc": 0.8156269959548648,
1393
+ "train_speed(iter/s)": 0.153953
1394
+ },
1395
+ {
1396
+ "epoch": 0.7937928976424948,
1397
+ "grad_norm": 2.2191296614587563,
1398
+ "learning_rate": 8.791852506849301e-06,
1399
+ "loss": 0.45751609802246096,
1400
+ "memory(GiB)": 37.06,
1401
+ "step": 665,
1402
+ "token_acc": 0.8260312580066616,
1403
+ "train_speed(iter/s)": 0.154161
1404
+ },
1405
+ {
1406
+ "epoch": 0.7997612652939421,
1407
+ "grad_norm": 2.2870388856485335,
1408
+ "learning_rate": 8.770305383712407e-06,
1409
+ "loss": 0.4709470748901367,
1410
+ "memory(GiB)": 37.06,
1411
+ "step": 670,
1412
+ "token_acc": 0.842337607735968,
1413
+ "train_speed(iter/s)": 0.154453
1414
+ },
1415
+ {
1416
+ "epoch": 0.8057296329453895,
1417
+ "grad_norm": 2.3046312751781866,
1418
+ "learning_rate": 8.748594715321512e-06,
1419
+ "loss": 0.44265017509460447,
1420
+ "memory(GiB)": 37.06,
1421
+ "step": 675,
1422
+ "token_acc": 0.8602195071443363,
1423
+ "train_speed(iter/s)": 0.154677
1424
+ },
1425
+ {
1426
+ "epoch": 0.8116980005968367,
1427
+ "grad_norm": 2.2464744707673985,
1428
+ "learning_rate": 8.726721443424409e-06,
1429
+ "loss": 0.4592324733734131,
1430
+ "memory(GiB)": 37.06,
1431
+ "step": 680,
1432
+ "token_acc": 0.8654945054945055,
1433
+ "train_speed(iter/s)": 0.154905
1434
+ },
1435
+ {
1436
+ "epoch": 0.8176663682482841,
1437
+ "grad_norm": 2.194092144648434,
1438
+ "learning_rate": 8.704686516822177e-06,
1439
+ "loss": 0.43160429000854494,
1440
+ "memory(GiB)": 37.06,
1441
+ "step": 685,
1442
+ "token_acc": 0.8649193548387096,
1443
+ "train_speed(iter/s)": 0.155078
1444
+ },
1445
+ {
1446
+ "epoch": 0.8236347358997315,
1447
+ "grad_norm": 2.247411516392796,
1448
+ "learning_rate": 8.682490891328016e-06,
1449
+ "loss": 0.45626983642578123,
1450
+ "memory(GiB)": 37.06,
1451
+ "step": 690,
1452
+ "token_acc": 0.8643364928909952,
1453
+ "train_speed(iter/s)": 0.155279
1454
+ },
1455
+ {
1456
+ "epoch": 0.8296031035511787,
1457
+ "grad_norm": 2.035754411138357,
1458
+ "learning_rate": 8.660135529725799e-06,
1459
+ "loss": 0.4315452575683594,
1460
+ "memory(GiB)": 37.06,
1461
+ "step": 695,
1462
+ "token_acc": 0.8554044380816035,
1463
+ "train_speed(iter/s)": 0.155502
1464
+ },
1465
+ {
1466
+ "epoch": 0.8355714712026261,
1467
+ "grad_norm": 2.292286762424394,
1468
+ "learning_rate": 8.6376214017283e-06,
1469
+ "loss": 0.4535685539245605,
1470
+ "memory(GiB)": 37.06,
1471
+ "step": 700,
1472
+ "token_acc": 0.833079268292683,
1473
+ "train_speed(iter/s)": 0.155636
1474
+ },
1475
+ {
1476
+ "epoch": 0.8355714712026261,
1477
+ "eval_loss": 0.4100053906440735,
1478
+ "eval_runtime": 10.9163,
1479
+ "eval_samples_per_second": 24.642,
1480
+ "eval_steps_per_second": 3.115,
1481
+ "eval_token_acc": 0.8548802542883762,
1482
+ "step": 700
1483
+ },
1484
+ {
1485
+ "epoch": 0.8415398388540735,
1486
+ "grad_norm": 2.6314360636405714,
1487
+ "learning_rate": 8.61494948393513e-06,
1488
+ "loss": 0.4539949417114258,
1489
+ "memory(GiB)": 37.06,
1490
+ "step": 705,
1491
+ "token_acc": 0.8583042973286876,
1492
+ "train_speed(iter/s)": 0.146478
1493
+ },
1494
+ {
1495
+ "epoch": 0.8475082065055207,
1496
+ "grad_norm": 2.1848010999728715,
1497
+ "learning_rate": 8.592120759790383e-06,
1498
+ "loss": 0.46171207427978517,
1499
+ "memory(GiB)": 37.06,
1500
+ "step": 710,
1501
+ "token_acc": 0.8417105263157895,
1502
+ "train_speed(iter/s)": 0.146671
1503
+ },
1504
+ {
1505
+ "epoch": 0.8534765741569681,
1506
+ "grad_norm": 2.447774461275868,
1507
+ "learning_rate": 8.56913621953997e-06,
1508
+ "loss": 0.4798592567443848,
1509
+ "memory(GiB)": 37.06,
1510
+ "step": 715,
1511
+ "token_acc": 0.8562048588312541,
1512
+ "train_speed(iter/s)": 0.146953
1513
+ },
1514
+ {
1515
+ "epoch": 0.8594449418084154,
1516
+ "grad_norm": 2.596951485691162,
1517
+ "learning_rate": 8.545996860188668e-06,
1518
+ "loss": 0.4231537342071533,
1519
+ "memory(GiB)": 37.06,
1520
+ "step": 720,
1521
+ "token_acc": 0.831799700406591,
1522
+ "train_speed(iter/s)": 0.147232
1523
+ },
1524
+ {
1525
+ "epoch": 0.8654133094598627,
1526
+ "grad_norm": 2.0232163854750027,
1527
+ "learning_rate": 8.522703685456866e-06,
1528
+ "loss": 0.44301156997680663,
1529
+ "memory(GiB)": 37.06,
1530
+ "step": 725,
1531
+ "token_acc": 0.8794139744552968,
1532
+ "train_speed(iter/s)": 0.1475
1533
+ },
1534
+ {
1535
+ "epoch": 0.8713816771113101,
1536
+ "grad_norm": 2.281907577430269,
1537
+ "learning_rate": 8.49925770573704e-06,
1538
+ "loss": 0.46319947242736814,
1539
+ "memory(GiB)": 37.06,
1540
+ "step": 730,
1541
+ "token_acc": 0.8430570505920344,
1542
+ "train_speed(iter/s)": 0.147765
1543
+ },
1544
+ {
1545
+ "epoch": 0.8773500447627574,
1546
+ "grad_norm": 2.190179810988922,
1547
+ "learning_rate": 8.475659938049912e-06,
1548
+ "loss": 0.4825079917907715,
1549
+ "memory(GiB)": 37.06,
1550
+ "step": 735,
1551
+ "token_acc": 0.839588377723971,
1552
+ "train_speed(iter/s)": 0.147996
1553
+ },
1554
+ {
1555
+ "epoch": 0.8833184124142047,
1556
+ "grad_norm": 2.014804370593861,
1557
+ "learning_rate": 8.45191140600034e-06,
1558
+ "loss": 0.454302978515625,
1559
+ "memory(GiB)": 37.06,
1560
+ "step": 740,
1561
+ "token_acc": 0.8007774538386784,
1562
+ "train_speed(iter/s)": 0.148279
1563
+ },
1564
+ {
1565
+ "epoch": 0.8892867800656521,
1566
+ "grad_norm": 2.1256355584342077,
1567
+ "learning_rate": 8.42801313973292e-06,
1568
+ "loss": 0.4445801258087158,
1569
+ "memory(GiB)": 37.06,
1570
+ "step": 745,
1571
+ "token_acc": 0.846286205907657,
1572
+ "train_speed(iter/s)": 0.148536
1573
+ },
1574
+ {
1575
+ "epoch": 0.8952551477170994,
1576
+ "grad_norm": 2.6544295779283575,
1577
+ "learning_rate": 8.403966175887293e-06,
1578
+ "loss": 0.4630784511566162,
1579
+ "memory(GiB)": 37.06,
1580
+ "step": 750,
1581
+ "token_acc": 0.8537764350453172,
1582
+ "train_speed(iter/s)": 0.148704
1583
+ },
1584
+ {
1585
+ "epoch": 0.9012235153685467,
1586
+ "grad_norm": 2.4745309667627255,
1587
+ "learning_rate": 8.379771557553184e-06,
1588
+ "loss": 0.43903446197509766,
1589
+ "memory(GiB)": 37.06,
1590
+ "step": 755,
1591
+ "token_acc": 0.8682237600922722,
1592
+ "train_speed(iter/s)": 0.148945
1593
+ },
1594
+ {
1595
+ "epoch": 0.907191883019994,
1596
+ "grad_norm": 2.167884085714607,
1597
+ "learning_rate": 8.355430334225159e-06,
1598
+ "loss": 0.445455265045166,
1599
+ "memory(GiB)": 37.06,
1600
+ "step": 760,
1601
+ "token_acc": 0.852589641434263,
1602
+ "train_speed(iter/s)": 0.149189
1603
+ },
1604
+ {
1605
+ "epoch": 0.9131602506714414,
1606
+ "grad_norm": 2.3516013470748116,
1607
+ "learning_rate": 8.330943561757092e-06,
1608
+ "loss": 0.44769630432128904,
1609
+ "memory(GiB)": 37.06,
1610
+ "step": 765,
1611
+ "token_acc": 0.8217955651703623,
1612
+ "train_speed(iter/s)": 0.149338
1613
+ },
1614
+ {
1615
+ "epoch": 0.9191286183228887,
1616
+ "grad_norm": 2.0619205640970506,
1617
+ "learning_rate": 8.30631230231637e-06,
1618
+ "loss": 0.46817874908447266,
1619
+ "memory(GiB)": 37.06,
1620
+ "step": 770,
1621
+ "token_acc": 0.8363870967741935,
1622
+ "train_speed(iter/s)": 0.149487
1623
+ },
1624
+ {
1625
+ "epoch": 0.925096985974336,
1626
+ "grad_norm": 2.3440589362137993,
1627
+ "learning_rate": 8.281537624337823e-06,
1628
+ "loss": 0.4982964038848877,
1629
+ "memory(GiB)": 37.06,
1630
+ "step": 775,
1631
+ "token_acc": 0.8594432314410481,
1632
+ "train_speed(iter/s)": 0.149779
1633
+ },
1634
+ {
1635
+ "epoch": 0.9310653536257834,
1636
+ "grad_norm": 2.0757541904974097,
1637
+ "learning_rate": 8.256620602477372e-06,
1638
+ "loss": 0.4509378433227539,
1639
+ "memory(GiB)": 37.06,
1640
+ "step": 780,
1641
+ "token_acc": 0.8259721555448872,
1642
+ "train_speed(iter/s)": 0.149971
1643
+ },
1644
+ {
1645
+ "epoch": 0.9370337212772307,
1646
+ "grad_norm": 2.086378932611534,
1647
+ "learning_rate": 8.231562317565412e-06,
1648
+ "loss": 0.43694629669189455,
1649
+ "memory(GiB)": 37.06,
1650
+ "step": 785,
1651
+ "token_acc": 0.856384262611634,
1652
+ "train_speed(iter/s)": 0.150204
1653
+ },
1654
+ {
1655
+ "epoch": 0.943002088928678,
1656
+ "grad_norm": 2.308538899901496,
1657
+ "learning_rate": 8.206363856559935e-06,
1658
+ "loss": 0.4430408477783203,
1659
+ "memory(GiB)": 37.06,
1660
+ "step": 790,
1661
+ "token_acc": 0.8422222222222222,
1662
+ "train_speed(iter/s)": 0.15035
1663
+ },
1664
+ {
1665
+ "epoch": 0.9489704565801254,
1666
+ "grad_norm": 1.8314796079076852,
1667
+ "learning_rate": 8.181026312499383e-06,
1668
+ "loss": 0.44437146186828613,
1669
+ "memory(GiB)": 37.06,
1670
+ "step": 795,
1671
+ "token_acc": 0.8529804865009356,
1672
+ "train_speed(iter/s)": 0.150549
1673
+ },
1674
+ {
1675
+ "epoch": 0.9549388242315726,
1676
+ "grad_norm": 2.2397424826021792,
1677
+ "learning_rate": 8.155550784455224e-06,
1678
+ "loss": 0.4815809726715088,
1679
+ "memory(GiB)": 37.06,
1680
+ "step": 800,
1681
+ "token_acc": 0.8588266107909901,
1682
+ "train_speed(iter/s)": 0.150753
1683
+ },
1684
+ {
1685
+ "epoch": 0.9549388242315726,
1686
+ "eval_loss": 0.4058806300163269,
1687
+ "eval_runtime": 11.0737,
1688
+ "eval_samples_per_second": 24.292,
1689
+ "eval_steps_per_second": 3.07,
1690
+ "eval_token_acc": 0.8572368060503096,
1691
+ "step": 800
1692
+ }
1693
+ ],
1694
+ "logging_steps": 5,
1695
+ "max_steps": 2511,
1696
+ "num_input_tokens_seen": 0,
1697
+ "num_train_epochs": 3,
1698
+ "save_steps": 100,
1699
+ "stateful_callbacks": {
1700
+ "TrainerControl": {
1701
+ "args": {
1702
+ "should_epoch_stop": false,
1703
+ "should_evaluate": false,
1704
+ "should_log": false,
1705
+ "should_save": true,
1706
+ "should_training_stop": false
1707
+ },
1708
+ "attributes": {}
1709
+ }
1710
+ },
1711
+ "total_flos": 88119181914112.0,
1712
+ "train_batch_size": 1,
1713
+ "trial_name": null,
1714
+ "trial_params": null
1715
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65baf90d8de25fc3cb2058e1aed844427e0cd325ca6a45cd1651b39d2f04e9ee
3
+ size 8120
vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
zero_to_fp32.py ADDED
@@ -0,0 +1,760 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright (c) Microsoft Corporation.
4
+ # SPDX-License-Identifier: Apache-2.0
5
+
6
+ # DeepSpeed Team
7
+
8
+ # This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
9
+ # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
10
+ # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
11
+ # application.
12
+ #
13
+ # example:
14
+ # python zero_to_fp32.py . output_dir/
15
+ # or
16
+ # python zero_to_fp32.py . output_dir/ --safe_serialization
17
+
18
+ import argparse
19
+ import torch
20
+ import glob
21
+ import math
22
+ import os
23
+ import re
24
+ import gc
25
+ import json
26
+ import numpy as np
27
+ from tqdm import tqdm
28
+ from collections import OrderedDict
29
+ from dataclasses import dataclass
30
+
31
+ # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
32
+ # DeepSpeed data structures it has to be available in the current python environment.
33
+ from deepspeed.utils import logger
34
+ from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
35
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
36
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
37
+
38
+
39
+ @dataclass
40
+ class zero_model_state:
41
+ buffers: dict()
42
+ param_shapes: dict()
43
+ shared_params: list
44
+ ds_version: int
45
+ frozen_param_shapes: dict()
46
+ frozen_param_fragments: dict()
47
+
48
+
49
+ debug = 0
50
+
51
+ # load to cpu
52
+ device = torch.device('cpu')
53
+
54
+
55
+ def atoi(text):
56
+ return int(text) if text.isdigit() else text
57
+
58
+
59
+ def natural_keys(text):
60
+ '''
61
+ alist.sort(key=natural_keys) sorts in human order
62
+ http://nedbatchelder.com/blog/200712/human_sorting.html
63
+ (See Toothy's implementation in the comments)
64
+ '''
65
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
66
+
67
+
68
+ def get_model_state_file(checkpoint_dir, zero_stage):
69
+ if not os.path.isdir(checkpoint_dir):
70
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
71
+
72
+ # there should be only one file
73
+ if zero_stage <= 2:
74
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
75
+ elif zero_stage == 3:
76
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
77
+
78
+ if not os.path.exists(file):
79
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
80
+
81
+ return file
82
+
83
+
84
+ def get_checkpoint_files(checkpoint_dir, glob_pattern):
85
+ # XXX: need to test that this simple glob rule works for multi-node setup too
86
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
87
+
88
+ if len(ckpt_files) == 0:
89
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
90
+
91
+ return ckpt_files
92
+
93
+
94
+ def get_optim_files(checkpoint_dir):
95
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
96
+
97
+
98
+ def get_model_state_files(checkpoint_dir):
99
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
100
+
101
+
102
+ def parse_model_states(files):
103
+ zero_model_states = []
104
+ for file in files:
105
+ state_dict = torch.load(file, map_location=device, weights_only=False)
106
+
107
+ if BUFFER_NAMES not in state_dict:
108
+ raise ValueError(f"{file} is not a model state checkpoint")
109
+ buffer_names = state_dict[BUFFER_NAMES]
110
+ if debug:
111
+ print("Found buffers:", buffer_names)
112
+
113
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
114
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
115
+ param_shapes = state_dict[PARAM_SHAPES]
116
+
117
+ # collect parameters that are included in param_shapes
118
+ param_names = []
119
+ for s in param_shapes:
120
+ for name in s.keys():
121
+ param_names.append(name)
122
+
123
+ # update with frozen parameters
124
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
125
+ if frozen_param_shapes is not None:
126
+ if debug:
127
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
128
+ param_names += list(frozen_param_shapes.keys())
129
+
130
+ # handle shared params
131
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
132
+
133
+ ds_version = state_dict.get(DS_VERSION, None)
134
+
135
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
136
+
137
+ z_model_state = zero_model_state(buffers=buffers,
138
+ param_shapes=param_shapes,
139
+ shared_params=shared_params,
140
+ ds_version=ds_version,
141
+ frozen_param_shapes=frozen_param_shapes,
142
+ frozen_param_fragments=frozen_param_fragments)
143
+ zero_model_states.append(z_model_state)
144
+
145
+ return zero_model_states
146
+
147
+
148
+ def parse_optim_states(files, ds_checkpoint_dir):
149
+ total_files = len(files)
150
+ state_dicts = []
151
+ for f in tqdm(files, desc='Loading checkpoint shards'):
152
+ state_dict = torch.load(f, map_location=device, mmap=True, weights_only=False)
153
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
154
+ # and also handle the case where it was already removed by another helper script
155
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
156
+ state_dicts.append(state_dict)
157
+
158
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
159
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
160
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
161
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
162
+
163
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
164
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
165
+ # use the max of the partition_count to get the dp world_size.
166
+
167
+ if type(world_size) is list:
168
+ world_size = max(world_size)
169
+
170
+ if world_size != total_files:
171
+ raise ValueError(
172
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
173
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
174
+ )
175
+
176
+ # the groups are named differently in each stage
177
+ if zero_stage <= 2:
178
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
179
+ elif zero_stage == 3:
180
+ fp32_groups_key = FP32_FLAT_GROUPS
181
+ else:
182
+ raise ValueError(f"unknown zero stage {zero_stage}")
183
+
184
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
185
+ return zero_stage, world_size, fp32_flat_groups
186
+
187
+
188
+ def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
189
+ """
190
+ Returns fp32 state_dict reconstructed from ds checkpoint
191
+
192
+ Args:
193
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
194
+
195
+ """
196
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
197
+
198
+ optim_files = get_optim_files(ds_checkpoint_dir)
199
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
200
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
201
+
202
+ model_files = get_model_state_files(ds_checkpoint_dir)
203
+
204
+ zero_model_states = parse_model_states(model_files)
205
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
206
+
207
+ if zero_stage <= 2:
208
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
209
+ exclude_frozen_parameters)
210
+ elif zero_stage == 3:
211
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
212
+ exclude_frozen_parameters)
213
+
214
+
215
+ def _zero2_merge_frozen_params(state_dict, zero_model_states):
216
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
217
+ return
218
+
219
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
220
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
221
+
222
+ if debug:
223
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
224
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
225
+
226
+ wanted_params = len(frozen_param_shapes)
227
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
228
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
229
+ print(f'Frozen params: Have {avail_numel} numels to process.')
230
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
231
+
232
+ total_params = 0
233
+ total_numel = 0
234
+ for name, shape in frozen_param_shapes.items():
235
+ total_params += 1
236
+ unpartitioned_numel = shape.numel()
237
+ total_numel += unpartitioned_numel
238
+
239
+ state_dict[name] = frozen_param_fragments[name]
240
+
241
+ if debug:
242
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
243
+
244
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
245
+
246
+
247
+ def _has_callable(obj, fn):
248
+ attr = getattr(obj, fn, None)
249
+ return callable(attr)
250
+
251
+
252
+ def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
253
+ param_shapes = zero_model_states[0].param_shapes
254
+
255
+ # Reconstruction protocol:
256
+ #
257
+ # XXX: document this
258
+
259
+ if debug:
260
+ for i in range(world_size):
261
+ for j in range(len(fp32_flat_groups[0])):
262
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
263
+
264
+ # XXX: memory usage doubles here (zero2)
265
+ num_param_groups = len(fp32_flat_groups[0])
266
+ merged_single_partition_of_fp32_groups = []
267
+ for i in range(num_param_groups):
268
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
269
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
270
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
271
+ avail_numel = sum(
272
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
273
+
274
+ if debug:
275
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
276
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
277
+ # not asserting if there is a mismatch due to possible padding
278
+ print(f"Have {avail_numel} numels to process.")
279
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
280
+
281
+ # params
282
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
283
+ # out-of-core computing solution
284
+ total_numel = 0
285
+ total_params = 0
286
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
287
+ offset = 0
288
+ avail_numel = full_single_fp32_vector.numel()
289
+ for name, shape in shapes.items():
290
+
291
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
292
+ total_numel += unpartitioned_numel
293
+ total_params += 1
294
+
295
+ if debug:
296
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
297
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
298
+ offset += unpartitioned_numel
299
+
300
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
301
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
302
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
303
+ # live optimizer object, so we are checking that the numbers are within the right range
304
+ align_to = 2 * world_size
305
+
306
+ def zero2_align(x):
307
+ return align_to * math.ceil(x / align_to)
308
+
309
+ if debug:
310
+ print(f"original offset={offset}, avail_numel={avail_numel}")
311
+
312
+ offset = zero2_align(offset)
313
+ avail_numel = zero2_align(avail_numel)
314
+
315
+ if debug:
316
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
317
+
318
+ # Sanity check
319
+ if offset != avail_numel:
320
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
321
+
322
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
323
+
324
+
325
+ def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
326
+ exclude_frozen_parameters):
327
+ state_dict = OrderedDict()
328
+
329
+ # buffers
330
+ buffers = zero_model_states[0].buffers
331
+ state_dict.update(buffers)
332
+ if debug:
333
+ print(f"added {len(buffers)} buffers")
334
+
335
+ if not exclude_frozen_parameters:
336
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
337
+
338
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
339
+
340
+ # recover shared parameters
341
+ for pair in zero_model_states[0].shared_params:
342
+ if pair[1] in state_dict:
343
+ state_dict[pair[0]] = state_dict[pair[1]]
344
+
345
+ return state_dict
346
+
347
+
348
+ def zero3_partitioned_param_info(unpartitioned_numel, world_size):
349
+ remainder = unpartitioned_numel % world_size
350
+ padding_numel = (world_size - remainder) if remainder else 0
351
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
352
+ return partitioned_numel, padding_numel
353
+
354
+
355
+ def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
356
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
357
+ return
358
+
359
+ if debug:
360
+ for i in range(world_size):
361
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
362
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
363
+
364
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
365
+ wanted_params = len(frozen_param_shapes)
366
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
367
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
368
+ print(f'Frozen params: Have {avail_numel} numels to process.')
369
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
370
+
371
+ total_params = 0
372
+ total_numel = 0
373
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
374
+ total_params += 1
375
+ unpartitioned_numel = shape.numel()
376
+ total_numel += unpartitioned_numel
377
+
378
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
379
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
380
+
381
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
382
+
383
+ if debug:
384
+ print(
385
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
386
+ )
387
+
388
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
389
+
390
+
391
+ class GatheredTensor:
392
+ """
393
+ A pseudo tensor that collects partitioned weights.
394
+ It is more memory efficient when there are multiple groups.
395
+ """
396
+
397
+ def __init__(self, flat_groups, flat_groups_offset, offset, partitioned_numel, shape):
398
+ self.flat_groups = flat_groups
399
+ self.flat_groups_offset = flat_groups_offset
400
+ self.offset = offset
401
+ self.partitioned_numel = partitioned_numel
402
+ self.shape = shape
403
+ self.dtype = self.flat_groups[0][0].dtype
404
+
405
+ def contiguous(self):
406
+ """
407
+ Merge partitioned weights from flat_groups into a single tensor.
408
+ """
409
+ end_idx = self.offset + self.partitioned_numel
410
+ world_size = len(self.flat_groups)
411
+ pad_flat_param_chunks = []
412
+
413
+ for rank_i in range(world_size):
414
+ # for each rank, we need to collect weights from related group/groups
415
+ flat_groups_at_rank_i = self.flat_groups[rank_i]
416
+ start_group_id = None
417
+ end_group_id = None
418
+ for group_id in range(len(self.flat_groups_offset)):
419
+ if self.flat_groups_offset[group_id] <= self.offset < self.flat_groups_offset[group_id + 1]:
420
+ start_group_id = group_id
421
+ if self.flat_groups_offset[group_id] < end_idx <= self.flat_groups_offset[group_id + 1]:
422
+ end_group_id = group_id
423
+ break
424
+ # collect weights from related group/groups
425
+ for group_id in range(start_group_id, end_group_id + 1):
426
+ flat_tensor = flat_groups_at_rank_i[group_id]
427
+ start_offset = self.offset - self.flat_groups_offset[group_id]
428
+ end_offset = min(end_idx, self.flat_groups_offset[group_id + 1]) - self.flat_groups_offset[group_id]
429
+ pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset])
430
+
431
+ # collect weights from all ranks
432
+ pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0)
433
+ param = pad_flat_param[:self.shape.numel()].view(self.shape).contiguous()
434
+ return param
435
+
436
+
437
+ def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
438
+ param_shapes = zero_model_states[0].param_shapes
439
+ avail_numel = sum([flat_group.numel() for flat_group in fp32_flat_groups[0]]) * world_size
440
+
441
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
442
+ # param, re-consolidating each param, while dealing with padding if any
443
+
444
+ # merge list of dicts, preserving order
445
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
446
+
447
+ if debug:
448
+ for i in range(world_size):
449
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
450
+
451
+ wanted_params = len(param_shapes)
452
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
453
+ # not asserting if there is a mismatch due to possible padding
454
+ avail_numel = fp32_flat_groups[0].numel() * world_size
455
+ print(f"Trainable params: Have {avail_numel} numels to process.")
456
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
457
+
458
+ # params
459
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
460
+ # out-of-core computing solution
461
+ offset = 0
462
+ total_numel = 0
463
+ total_params = 0
464
+ flat_groups_offset = [0] + list(np.cumsum([flat_tensor.numel() for flat_tensor in fp32_flat_groups[0]]))
465
+ for name, shape in tqdm(param_shapes.items(), desc='Gathering sharded weights'):
466
+ unpartitioned_numel = shape.numel()
467
+ total_numel += unpartitioned_numel
468
+ total_params += 1
469
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
470
+
471
+ if debug:
472
+ print(
473
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
474
+ )
475
+
476
+ # memory efficient tensor
477
+ tensor = GatheredTensor(fp32_flat_groups, flat_groups_offset, offset, partitioned_numel, shape)
478
+ state_dict[name] = tensor
479
+ offset += partitioned_numel
480
+
481
+ offset *= world_size
482
+
483
+ # Sanity check
484
+ if offset != avail_numel:
485
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
486
+
487
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
488
+
489
+
490
+ def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
491
+ exclude_frozen_parameters):
492
+ state_dict = OrderedDict()
493
+
494
+ # buffers
495
+ buffers = zero_model_states[0].buffers
496
+ state_dict.update(buffers)
497
+ if debug:
498
+ print(f"added {len(buffers)} buffers")
499
+
500
+ if not exclude_frozen_parameters:
501
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
502
+
503
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
504
+
505
+ # recover shared parameters
506
+ for pair in zero_model_states[0].shared_params:
507
+ if pair[1] in state_dict:
508
+ state_dict[pair[0]] = state_dict[pair[1]]
509
+
510
+ return state_dict
511
+
512
+
513
+ def to_torch_tensor(state_dict, return_empty_tensor=False):
514
+ """
515
+ Convert state_dict of GatheredTensor to torch tensor
516
+ """
517
+ torch_state_dict = {}
518
+ converted_tensors = {}
519
+ for name, tensor in state_dict.items():
520
+ tensor_id = id(tensor)
521
+ if tensor_id in converted_tensors: # shared tensors
522
+ shared_tensor = torch_state_dict[converted_tensors[tensor_id]]
523
+ torch_state_dict[name] = shared_tensor
524
+ else:
525
+ converted_tensors[tensor_id] = name
526
+ if return_empty_tensor:
527
+ torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype)
528
+ else:
529
+ torch_state_dict[name] = tensor.contiguous()
530
+ return torch_state_dict
531
+
532
+
533
+ def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
534
+ tag=None,
535
+ exclude_frozen_parameters=False,
536
+ lazy_mode=False):
537
+ """
538
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
539
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
540
+ via a model hub.
541
+
542
+ Args:
543
+ - ``checkpoint_dir``: path to the desired checkpoint folder
544
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
545
+ - ``exclude_frozen_parameters``: exclude frozen parameters
546
+ - ``lazy_mode``: get state_dict in lazy mode. It returns a dict of pesduo tensor instead of torch tensor, which is more memory efficient.
547
+ Convert the pesduo tensor to torch tensor by ``.contiguous()``
548
+
549
+ Returns:
550
+ - pytorch ``state_dict``
551
+
552
+ A typical usage might be ::
553
+
554
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
555
+ # do the training and checkpoint saving
556
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
557
+ model = model.cpu() # move to cpu
558
+ model.load_state_dict(state_dict)
559
+ # submit to model hub or save the model to share with others
560
+
561
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
562
+ application. i.e. you will need to re-initialize the deepspeed engine, since
563
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
564
+
565
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
566
+
567
+ Note: the above usage may not work if your application doesn't have sufficient free CPU memory.
568
+ You may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
569
+ the checkpoint. Or you can load state_dict in lazy mode ::
570
+
571
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
572
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, lazy_mode=True) # not on cpu
573
+ for name, lazy_tensor in state_dict.item():
574
+ tensor = lazy_tensor.contiguous() # to cpu
575
+ print(name, tensor)
576
+ # del tensor to release memory if it no longer in use
577
+ """
578
+ if tag is None:
579
+ latest_path = os.path.join(checkpoint_dir, 'latest')
580
+ if os.path.isfile(latest_path):
581
+ with open(latest_path, 'r') as fd:
582
+ tag = fd.read().strip()
583
+ else:
584
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
585
+
586
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
587
+
588
+ if not os.path.isdir(ds_checkpoint_dir):
589
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
590
+
591
+ state_dict = _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
592
+ if lazy_mode:
593
+ return state_dict
594
+ else:
595
+ return to_torch_tensor(state_dict)
596
+
597
+
598
+ def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
599
+ output_dir,
600
+ max_shard_size="5GB",
601
+ safe_serialization=False,
602
+ tag=None,
603
+ exclude_frozen_parameters=False):
604
+ """
605
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
606
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
607
+
608
+ Args:
609
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
610
+ - ``output_dir``: directory to the pytorch fp32 state_dict output files
611
+ - ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
612
+ - ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
613
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
614
+ - ``exclude_frozen_parameters``: exclude frozen parameters
615
+ """
616
+
617
+ # Dependency pre-check
618
+ if safe_serialization:
619
+ try:
620
+ from safetensors.torch import save_file
621
+ except ImportError:
622
+ print('If you want to use `safe_serialization`, please `pip install safetensors`')
623
+ raise
624
+ if max_shard_size is not None:
625
+ try:
626
+ from huggingface_hub import split_torch_state_dict_into_shards
627
+ except ImportError:
628
+ print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
629
+ raise
630
+
631
+ # Convert zero checkpoint to state_dict
632
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
633
+ tag,
634
+ exclude_frozen_parameters,
635
+ lazy_mode=True)
636
+
637
+ # Shard the model if it is too big.
638
+ weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
639
+ if max_shard_size is not None:
640
+ filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
641
+ # an memory-efficient approach for sharding
642
+ empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True)
643
+ state_dict_split = split_torch_state_dict_into_shards(empty_state_dict,
644
+ filename_pattern=filename_pattern,
645
+ max_shard_size=max_shard_size)
646
+ else:
647
+ from collections import namedtuple
648
+ StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
649
+ state_dict_split = StateDictSplit(is_sharded=False,
650
+ filename_to_tensors={weights_name: list(state_dict.keys())})
651
+
652
+ # Save the model by shard
653
+ os.makedirs(output_dir, exist_ok=True)
654
+ filename_to_tensors = state_dict_split.filename_to_tensors.items()
655
+ for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
656
+ shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors}
657
+ shard_state_dict = to_torch_tensor(shard_state_dict)
658
+ output_path = os.path.join(output_dir, shard_file)
659
+ if safe_serialization:
660
+ save_file(shard_state_dict, output_path, metadata={"format": "pt"})
661
+ else:
662
+ torch.save(shard_state_dict, output_path)
663
+ # release the memory of current shard
664
+ for tensor_name in list(shard_state_dict.keys()):
665
+ del state_dict[tensor_name]
666
+ del shard_state_dict[tensor_name]
667
+ del shard_state_dict
668
+ gc.collect()
669
+
670
+ # Save index if sharded
671
+ if state_dict_split.is_sharded:
672
+ index = {
673
+ "metadata": state_dict_split.metadata,
674
+ "weight_map": state_dict_split.tensor_to_filename,
675
+ }
676
+ save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
677
+ save_index_file = os.path.join(output_dir, save_index_file)
678
+ with open(save_index_file, "w", encoding="utf-8") as f:
679
+ content = json.dumps(index, indent=2, sort_keys=True) + "\n"
680
+ f.write(content)
681
+
682
+
683
+ def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
684
+ """
685
+ 1. Put the provided model to cpu
686
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
687
+ 3. Load it into the provided model
688
+
689
+ Args:
690
+ - ``model``: the model object to update
691
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
692
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
693
+
694
+ Returns:
695
+ - ``model`: modified model
696
+
697
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
698
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
699
+ conveniently placed for you in the checkpoint folder.
700
+
701
+ A typical usage might be ::
702
+
703
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
704
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
705
+ # submit to model hub or save the model to share with others
706
+
707
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
708
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
709
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
710
+
711
+ """
712
+ logger.info(f"Extracting fp32 weights")
713
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
714
+
715
+ logger.info(f"Overwriting model with fp32 weights")
716
+ model = model.cpu()
717
+ model.load_state_dict(state_dict, strict=False)
718
+
719
+ return model
720
+
721
+
722
+ if __name__ == "__main__":
723
+ parser = argparse.ArgumentParser()
724
+ parser.add_argument("checkpoint_dir",
725
+ type=str,
726
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
727
+ parser.add_argument("output_dir",
728
+ type=str,
729
+ help="directory to the pytorch fp32 state_dict output files"
730
+ "(e.g. path/checkpoint-12-output/)")
731
+ parser.add_argument(
732
+ "--max_shard_size",
733
+ type=str,
734
+ default="5GB",
735
+ help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
736
+ "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
737
+ "We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
738
+ "without CPU OOM issues.")
739
+ parser.add_argument(
740
+ "--safe_serialization",
741
+ default=False,
742
+ action='store_true',
743
+ help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
744
+ parser.add_argument("-t",
745
+ "--tag",
746
+ type=str,
747
+ default=None,
748
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
749
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
750
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
751
+ args = parser.parse_args()
752
+
753
+ debug = args.debug
754
+
755
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
756
+ args.output_dir,
757
+ max_shard_size=args.max_shard_size,
758
+ safe_serialization=args.safe_serialization,
759
+ tag=args.tag,
760
+ exclude_frozen_parameters=args.exclude_frozen_parameters)