Davidsamuel101's picture
End of training
b83c1c4 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.999398677089597,
"eval_steps": 500,
"global_step": 7482,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.20044097013429546,
"grad_norm": 6.546669006347656,
"learning_rate": 0.0001335113484646195,
"loss": 4.9792,
"step": 500
},
{
"epoch": 0.4008819402685909,
"grad_norm": 12.99987506866455,
"learning_rate": 0.00019254418535571068,
"loss": 0.9756,
"step": 1000
},
{
"epoch": 0.6013229104028863,
"grad_norm": 5.031010627746582,
"learning_rate": 0.00017769196494875984,
"loss": 0.7285,
"step": 1500
},
{
"epoch": 0.8017638805371818,
"grad_norm": 5.174665451049805,
"learning_rate": 0.000162839744541809,
"loss": 0.6145,
"step": 2000
},
{
"epoch": 0.9997995590298657,
"eval_cer": 0.06818140975752038,
"eval_loss": 0.2314606010913849,
"eval_runtime": 598.1802,
"eval_samples_per_second": 59.596,
"eval_steps_per_second": 7.451,
"eval_wer": 0.08141925042668663,
"step": 2494
},
{
"epoch": 1.0022048506714774,
"grad_norm": 1.5678229331970215,
"learning_rate": 0.00014798752413485816,
"loss": 0.5526,
"step": 2500
},
{
"epoch": 1.2026458208057726,
"grad_norm": 1.3437103033065796,
"learning_rate": 0.00013313530372790732,
"loss": 0.4812,
"step": 3000
},
{
"epoch": 1.4030867909400682,
"grad_norm": 1.1184632778167725,
"learning_rate": 0.00011828308332095648,
"loss": 0.4596,
"step": 3500
},
{
"epoch": 1.6035277610743637,
"grad_norm": 1.1852930784225464,
"learning_rate": 0.00010343086291400565,
"loss": 0.4337,
"step": 4000
},
{
"epoch": 1.8039687312086592,
"grad_norm": 1.735134243965149,
"learning_rate": 8.857864250705481e-05,
"loss": 0.4098,
"step": 4500
},
{
"epoch": 2.0,
"eval_cer": 0.056794456555446654,
"eval_loss": 0.17900477349758148,
"eval_runtime": 419.809,
"eval_samples_per_second": 84.917,
"eval_steps_per_second": 10.617,
"eval_wer": 0.06738346920647913,
"step": 4989
},
{
"epoch": 2.0044097013429547,
"grad_norm": 0.9114906191825867,
"learning_rate": 7.372642210010397e-05,
"loss": 0.3887,
"step": 5000
},
{
"epoch": 2.20485067147725,
"grad_norm": 1.5065760612487793,
"learning_rate": 5.887420169315313e-05,
"loss": 0.3298,
"step": 5500
},
{
"epoch": 2.4052916416115453,
"grad_norm": 1.4358354806900024,
"learning_rate": 4.402198128620229e-05,
"loss": 0.3172,
"step": 6000
},
{
"epoch": 2.605732611745841,
"grad_norm": 0.6124329566955566,
"learning_rate": 2.9169760879251452e-05,
"loss": 0.3094,
"step": 6500
},
{
"epoch": 2.8061735818801363,
"grad_norm": 1.8411574363708496,
"learning_rate": 1.4317540472300609e-05,
"loss": 0.2986,
"step": 7000
},
{
"epoch": 2.999398677089597,
"eval_cer": 0.04912028966875449,
"eval_loss": 0.14919918775558472,
"eval_runtime": 448.6832,
"eval_samples_per_second": 79.453,
"eval_steps_per_second": 9.934,
"eval_wer": 0.0574731780364509,
"step": 7482
},
{
"epoch": 2.999398677089597,
"step": 7482,
"total_flos": 4.06242496282012e+19,
"train_loss": 0.7722781848474098,
"train_runtime": 4057.7267,
"train_samples_per_second": 59.015,
"train_steps_per_second": 1.844
}
],
"logging_steps": 500,
"max_steps": 7482,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.06242496282012e+19,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}