|
{ |
|
"best_global_step": 20, |
|
"best_metric": 1.0, |
|
"best_model_checkpoint": "/home/jupyter/wav2vec2-xls-r-300m-bfloat16-trained-test/checkpoints/checkpoint-20", |
|
"epoch": 4.2, |
|
"eval_steps": 4, |
|
"global_step": 20, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 26.8810977935791, |
|
"learning_rate": 0.0, |
|
"loss": 23.4044, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 13.647429466247559, |
|
"learning_rate": 6.666666666666667e-08, |
|
"loss": 15.7161, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"grad_norm": 9.556838035583496, |
|
"learning_rate": 1.3333333333333334e-07, |
|
"loss": 13.7542, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"grad_norm": 23.727840423583984, |
|
"learning_rate": 2.0000000000000002e-07, |
|
"loss": 23.3498, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"eval_loss": 11.243566513061523, |
|
"eval_runtime": 5.1037, |
|
"eval_samples_per_second": 12.54, |
|
"eval_steps_per_second": 3.135, |
|
"eval_wer": 1.0029940119760479, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 1.05, |
|
"grad_norm": 27.989702224731445, |
|
"learning_rate": 2.6666666666666667e-07, |
|
"loss": 28.0155, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"grad_norm": 11.38569450378418, |
|
"learning_rate": 3.3333333333333335e-07, |
|
"loss": 14.0151, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"grad_norm": 10.933781623840332, |
|
"learning_rate": 4.0000000000000003e-07, |
|
"loss": 13.2631, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"grad_norm": 13.671080589294434, |
|
"learning_rate": 4.666666666666667e-07, |
|
"loss": 16.3699, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"eval_loss": 11.2361421585083, |
|
"eval_runtime": 5.3019, |
|
"eval_samples_per_second": 12.071, |
|
"eval_steps_per_second": 3.018, |
|
"eval_wer": 1.0029940119760479, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"grad_norm": 12.261064529418945, |
|
"learning_rate": 5.333333333333333e-07, |
|
"loss": 13.7451, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 2.1, |
|
"grad_norm": 14.111751556396484, |
|
"learning_rate": 6.000000000000001e-07, |
|
"loss": 16.4979, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"grad_norm": 15.027251243591309, |
|
"learning_rate": 6.666666666666667e-07, |
|
"loss": 15.1573, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"grad_norm": 11.976419448852539, |
|
"learning_rate": 7.333333333333333e-07, |
|
"loss": 13.3081, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"eval_loss": 11.223762512207031, |
|
"eval_runtime": 5.1899, |
|
"eval_samples_per_second": 12.332, |
|
"eval_steps_per_second": 3.083, |
|
"eval_wer": 1.0029940119760479, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 3.05, |
|
"grad_norm": 10.190653800964355, |
|
"learning_rate": 8.000000000000001e-07, |
|
"loss": 14.3285, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 3.1, |
|
"grad_norm": 10.525323867797852, |
|
"learning_rate": 8.666666666666667e-07, |
|
"loss": 14.0348, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 3.15, |
|
"grad_norm": 8.292677879333496, |
|
"learning_rate": 9.333333333333334e-07, |
|
"loss": 11.2145, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"grad_norm": 9.087738990783691, |
|
"learning_rate": 1.0000000000000002e-06, |
|
"loss": 14.0511, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"eval_loss": 11.206008911132812, |
|
"eval_runtime": 5.1678, |
|
"eval_samples_per_second": 12.384, |
|
"eval_steps_per_second": 3.096, |
|
"eval_wer": 1.0029940119760479, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 4.05, |
|
"grad_norm": 11.053936958312988, |
|
"learning_rate": 1.0666666666666667e-06, |
|
"loss": 13.6394, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 4.1, |
|
"grad_norm": 12.242379188537598, |
|
"learning_rate": 1.1333333333333334e-06, |
|
"loss": 14.9119, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 4.15, |
|
"grad_norm": 10.784377098083496, |
|
"learning_rate": 1.2000000000000002e-06, |
|
"loss": 13.2801, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 4.2, |
|
"grad_norm": 12.148858070373535, |
|
"learning_rate": 1.2666666666666667e-06, |
|
"loss": 13.7862, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 4.2, |
|
"eval_loss": 11.182676315307617, |
|
"eval_runtime": 4.9755, |
|
"eval_samples_per_second": 12.863, |
|
"eval_steps_per_second": 3.216, |
|
"eval_wer": 1.0, |
|
"step": 20 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 20, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 9223372036854775807, |
|
"save_steps": 4, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.224933794681436e+17, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|