|
{ |
|
"best_global_step": 10000, |
|
"best_metric": 63.44155467990254, |
|
"best_model_checkpoint": "nllb-200-600M-dzo-eng-30k-checkpoints/checkpoint-10000", |
|
"epoch": 5.0, |
|
"eval_steps": 500, |
|
"global_step": 12500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.2, |
|
"grad_norm": 0.2576698958873749, |
|
"learning_rate": 2.88024e-05, |
|
"loss": 0.0753, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 0.19992510974407196, |
|
"learning_rate": 2.7602400000000002e-05, |
|
"loss": 0.0735, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 0.2977311909198761, |
|
"learning_rate": 2.64024e-05, |
|
"loss": 0.0713, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 0.21389248967170715, |
|
"learning_rate": 2.52024e-05, |
|
"loss": 0.071, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 0.2741245627403259, |
|
"learning_rate": 2.4002400000000002e-05, |
|
"loss": 0.0688, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_bleu": 62.03431583845565, |
|
"eval_loss": 0.06440743803977966, |
|
"eval_runtime": 243.3832, |
|
"eval_samples_per_second": 6.163, |
|
"eval_steps_per_second": 1.541, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"grad_norm": 0.24694935977458954, |
|
"learning_rate": 2.28024e-05, |
|
"loss": 0.0553, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"grad_norm": 0.26646921038627625, |
|
"learning_rate": 2.1602400000000002e-05, |
|
"loss": 0.0563, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"grad_norm": 0.25563061237335205, |
|
"learning_rate": 2.04024e-05, |
|
"loss": 0.0538, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"grad_norm": 0.25169041752815247, |
|
"learning_rate": 1.92024e-05, |
|
"loss": 0.0546, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 0.2201855629682541, |
|
"learning_rate": 1.80024e-05, |
|
"loss": 0.0543, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_bleu": 62.84538472555695, |
|
"eval_loss": 0.06339649111032486, |
|
"eval_runtime": 245.6581, |
|
"eval_samples_per_second": 6.106, |
|
"eval_steps_per_second": 1.527, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"grad_norm": 0.20503699779510498, |
|
"learning_rate": 1.68024e-05, |
|
"loss": 0.0436, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"grad_norm": 0.18902115523815155, |
|
"learning_rate": 1.56024e-05, |
|
"loss": 0.0444, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 2.6, |
|
"grad_norm": 0.39264440536499023, |
|
"learning_rate": 1.4402400000000001e-05, |
|
"loss": 0.0434, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"grad_norm": 0.22416116297245026, |
|
"learning_rate": 1.3202400000000001e-05, |
|
"loss": 0.0443, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": 0.2807520925998688, |
|
"learning_rate": 1.20024e-05, |
|
"loss": 0.0444, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_bleu": 63.39396750181061, |
|
"eval_loss": 0.06433024257421494, |
|
"eval_runtime": 245.8516, |
|
"eval_samples_per_second": 6.101, |
|
"eval_steps_per_second": 1.525, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"grad_norm": 0.2532879710197449, |
|
"learning_rate": 1.08024e-05, |
|
"loss": 0.0371, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 3.4, |
|
"grad_norm": 0.20367200672626495, |
|
"learning_rate": 9.6024e-06, |
|
"loss": 0.036, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"grad_norm": 0.24893485009670258, |
|
"learning_rate": 8.4024e-06, |
|
"loss": 0.0364, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 3.8, |
|
"grad_norm": 0.27603277564048767, |
|
"learning_rate": 7.2024e-06, |
|
"loss": 0.0366, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 0.21856920421123505, |
|
"learning_rate": 6.0024e-06, |
|
"loss": 0.0377, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_bleu": 63.44155467990254, |
|
"eval_loss": 0.06584766507148743, |
|
"eval_runtime": 246.2205, |
|
"eval_samples_per_second": 6.092, |
|
"eval_steps_per_second": 1.523, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 4.2, |
|
"grad_norm": 0.21814288198947906, |
|
"learning_rate": 4.8024e-06, |
|
"loss": 0.0328, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 4.4, |
|
"grad_norm": 0.20107074081897736, |
|
"learning_rate": 3.6024000000000004e-06, |
|
"loss": 0.0321, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 4.6, |
|
"grad_norm": 0.2721525728702545, |
|
"learning_rate": 2.4024e-06, |
|
"loss": 0.0323, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 4.8, |
|
"grad_norm": 0.33091649413108826, |
|
"learning_rate": 1.2024e-06, |
|
"loss": 0.0329, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 0.2466353476047516, |
|
"learning_rate": 2.4000000000000004e-09, |
|
"loss": 0.0315, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_bleu": 63.062347513148154, |
|
"eval_loss": 0.06781521439552307, |
|
"eval_runtime": 251.1652, |
|
"eval_samples_per_second": 5.972, |
|
"eval_steps_per_second": 1.493, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"step": 12500, |
|
"total_flos": 2.70888075264e+16, |
|
"train_loss": 0.04798975685119629, |
|
"train_runtime": 11153.1781, |
|
"train_samples_per_second": 8.966, |
|
"train_steps_per_second": 1.121 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 12500, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.70888075264e+16, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|