harisali9211's picture
All Dunn!!!
904d7d5 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 500,
"global_step": 1682,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0011890606420927466,
"grad_norm": 115.31934356689453,
"learning_rate": 4.9970273483947685e-05,
"loss": 9.3546,
"step": 1
},
{
"epoch": 0.034482758620689655,
"grad_norm": 18.599306106567383,
"learning_rate": 4.913793103448276e-05,
"loss": 2.4091,
"step": 29
},
{
"epoch": 0.06896551724137931,
"grad_norm": 12.393567085266113,
"learning_rate": 4.827586206896552e-05,
"loss": 1.1241,
"step": 58
},
{
"epoch": 0.10344827586206896,
"grad_norm": 50.094051361083984,
"learning_rate": 4.741379310344828e-05,
"loss": 1.0125,
"step": 87
},
{
"epoch": 0.13793103448275862,
"grad_norm": 39.95038986206055,
"learning_rate": 4.655172413793104e-05,
"loss": 0.9273,
"step": 116
},
{
"epoch": 0.1724137931034483,
"grad_norm": 13.281441688537598,
"learning_rate": 4.5689655172413794e-05,
"loss": 0.8734,
"step": 145
},
{
"epoch": 0.20689655172413793,
"grad_norm": 29.820255279541016,
"learning_rate": 4.482758620689655e-05,
"loss": 0.9182,
"step": 174
},
{
"epoch": 0.2413793103448276,
"grad_norm": 14.038665771484375,
"learning_rate": 4.396551724137931e-05,
"loss": 0.9129,
"step": 203
},
{
"epoch": 0.27586206896551724,
"grad_norm": 13.406801223754883,
"learning_rate": 4.3103448275862066e-05,
"loss": 0.7973,
"step": 232
},
{
"epoch": 0.3103448275862069,
"grad_norm": 15.321793556213379,
"learning_rate": 4.224137931034483e-05,
"loss": 0.8415,
"step": 261
},
{
"epoch": 0.3448275862068966,
"grad_norm": 27.121288299560547,
"learning_rate": 4.1379310344827587e-05,
"loss": 0.7995,
"step": 290
},
{
"epoch": 0.3793103448275862,
"grad_norm": 36.715858459472656,
"learning_rate": 4.0517241379310344e-05,
"loss": 0.6301,
"step": 319
},
{
"epoch": 0.41379310344827586,
"grad_norm": 16.650150299072266,
"learning_rate": 3.965517241379311e-05,
"loss": 0.5675,
"step": 348
},
{
"epoch": 0.4482758620689655,
"grad_norm": 230.8839569091797,
"learning_rate": 3.8793103448275865e-05,
"loss": 0.6154,
"step": 377
},
{
"epoch": 0.4827586206896552,
"grad_norm": 6.414034366607666,
"learning_rate": 3.793103448275862e-05,
"loss": 0.6439,
"step": 406
},
{
"epoch": 0.5172413793103449,
"grad_norm": 9.441433906555176,
"learning_rate": 3.7068965517241385e-05,
"loss": 0.5776,
"step": 435
},
{
"epoch": 0.5517241379310345,
"grad_norm": 23.88970947265625,
"learning_rate": 3.620689655172414e-05,
"loss": 0.6126,
"step": 464
},
{
"epoch": 0.5862068965517241,
"grad_norm": 10.72873592376709,
"learning_rate": 3.53448275862069e-05,
"loss": 0.529,
"step": 493
},
{
"epoch": 0.6206896551724138,
"grad_norm": 22.285120010375977,
"learning_rate": 3.4482758620689657e-05,
"loss": 0.5418,
"step": 522
},
{
"epoch": 0.6551724137931034,
"grad_norm": 28.53759002685547,
"learning_rate": 3.3620689655172414e-05,
"loss": 0.4864,
"step": 551
},
{
"epoch": 0.6896551724137931,
"grad_norm": 15.215431213378906,
"learning_rate": 3.275862068965517e-05,
"loss": 0.5474,
"step": 580
},
{
"epoch": 0.7241379310344828,
"grad_norm": 5.726695537567139,
"learning_rate": 3.1896551724137935e-05,
"loss": 0.4492,
"step": 609
},
{
"epoch": 0.7586206896551724,
"grad_norm": 20.181598663330078,
"learning_rate": 3.103448275862069e-05,
"loss": 0.4925,
"step": 638
},
{
"epoch": 0.7931034482758621,
"grad_norm": 16.79625701904297,
"learning_rate": 3.017241379310345e-05,
"loss": 0.4405,
"step": 667
},
{
"epoch": 0.8275862068965517,
"grad_norm": 5.420645236968994,
"learning_rate": 2.9310344827586206e-05,
"loss": 0.5411,
"step": 696
},
{
"epoch": 0.8620689655172413,
"grad_norm": 6.218174457550049,
"learning_rate": 2.844827586206897e-05,
"loss": 0.5516,
"step": 725
},
{
"epoch": 0.896551724137931,
"grad_norm": 27.71928596496582,
"learning_rate": 2.7586206896551727e-05,
"loss": 0.4341,
"step": 754
},
{
"epoch": 0.9310344827586207,
"grad_norm": 31.43971824645996,
"learning_rate": 2.672413793103448e-05,
"loss": 0.3883,
"step": 783
},
{
"epoch": 0.9655172413793104,
"grad_norm": 7.160225868225098,
"learning_rate": 2.5862068965517244e-05,
"loss": 0.4214,
"step": 812
},
{
"epoch": 1.0,
"grad_norm": 7.139071464538574,
"learning_rate": 2.5e-05,
"loss": 0.3728,
"step": 841
},
{
"epoch": 1.0,
"eval_cer": 0.019845431962462048,
"eval_loss": 0.2980102300643921,
"eval_runtime": 887.0299,
"eval_samples_per_second": 1.896,
"eval_steps_per_second": 0.238,
"step": 841
},
{
"epoch": 1.0344827586206897,
"grad_norm": 14.740863800048828,
"learning_rate": 2.413793103448276e-05,
"loss": 0.3343,
"step": 870
},
{
"epoch": 1.0689655172413792,
"grad_norm": 22.295618057250977,
"learning_rate": 2.327586206896552e-05,
"loss": 0.3419,
"step": 899
},
{
"epoch": 1.103448275862069,
"grad_norm": 2.4551053047180176,
"learning_rate": 2.2413793103448276e-05,
"loss": 0.3123,
"step": 928
},
{
"epoch": 1.1379310344827587,
"grad_norm": 7.861540794372559,
"learning_rate": 2.1551724137931033e-05,
"loss": 0.349,
"step": 957
},
{
"epoch": 1.1724137931034484,
"grad_norm": 7.037637233734131,
"learning_rate": 2.0689655172413793e-05,
"loss": 0.2869,
"step": 986
},
{
"epoch": 1.206896551724138,
"grad_norm": 11.390913009643555,
"learning_rate": 1.9827586206896554e-05,
"loss": 0.2826,
"step": 1015
},
{
"epoch": 1.2413793103448276,
"grad_norm": 2.8627371788024902,
"learning_rate": 1.896551724137931e-05,
"loss": 0.2573,
"step": 1044
},
{
"epoch": 1.2758620689655173,
"grad_norm": 16.846853256225586,
"learning_rate": 1.810344827586207e-05,
"loss": 0.2752,
"step": 1073
},
{
"epoch": 1.3103448275862069,
"grad_norm": 3.9017491340637207,
"learning_rate": 1.7241379310344828e-05,
"loss": 0.2555,
"step": 1102
},
{
"epoch": 1.3448275862068966,
"grad_norm": 5.074669361114502,
"learning_rate": 1.6379310344827585e-05,
"loss": 0.2452,
"step": 1131
},
{
"epoch": 1.3793103448275863,
"grad_norm": 18.69165802001953,
"learning_rate": 1.5517241379310346e-05,
"loss": 0.2666,
"step": 1160
},
{
"epoch": 1.4137931034482758,
"grad_norm": 5.539306640625,
"learning_rate": 1.4655172413793103e-05,
"loss": 0.2741,
"step": 1189
},
{
"epoch": 1.4482758620689655,
"grad_norm": 5.758066654205322,
"learning_rate": 1.3793103448275863e-05,
"loss": 0.2365,
"step": 1218
},
{
"epoch": 1.4827586206896552,
"grad_norm": 4.248748779296875,
"learning_rate": 1.2931034482758622e-05,
"loss": 0.2521,
"step": 1247
},
{
"epoch": 1.5172413793103448,
"grad_norm": 11.129036903381348,
"learning_rate": 1.206896551724138e-05,
"loss": 0.2257,
"step": 1276
},
{
"epoch": 1.5517241379310345,
"grad_norm": 2.8656809329986572,
"learning_rate": 1.1206896551724138e-05,
"loss": 0.2106,
"step": 1305
},
{
"epoch": 1.5862068965517242,
"grad_norm": 2.041245460510254,
"learning_rate": 1.0344827586206897e-05,
"loss": 0.227,
"step": 1334
},
{
"epoch": 1.6206896551724137,
"grad_norm": 1.451377034187317,
"learning_rate": 9.482758620689655e-06,
"loss": 0.1961,
"step": 1363
},
{
"epoch": 1.6551724137931034,
"grad_norm": 4.332240104675293,
"learning_rate": 8.620689655172414e-06,
"loss": 0.195,
"step": 1392
},
{
"epoch": 1.6896551724137931,
"grad_norm": 5.428757667541504,
"learning_rate": 7.758620689655173e-06,
"loss": 0.1929,
"step": 1421
},
{
"epoch": 1.7241379310344827,
"grad_norm": 2.5995798110961914,
"learning_rate": 6.896551724137932e-06,
"loss": 0.1991,
"step": 1450
},
{
"epoch": 1.7586206896551724,
"grad_norm": 2.2909860610961914,
"learning_rate": 6.03448275862069e-06,
"loss": 0.168,
"step": 1479
},
{
"epoch": 1.793103448275862,
"grad_norm": 2.049956798553467,
"learning_rate": 5.172413793103448e-06,
"loss": 0.18,
"step": 1508
},
{
"epoch": 1.8275862068965516,
"grad_norm": 2.291414737701416,
"learning_rate": 4.310344827586207e-06,
"loss": 0.1642,
"step": 1537
},
{
"epoch": 1.8620689655172413,
"grad_norm": 6.125259876251221,
"learning_rate": 3.448275862068966e-06,
"loss": 0.197,
"step": 1566
},
{
"epoch": 1.896551724137931,
"grad_norm": 2.359379291534424,
"learning_rate": 2.586206896551724e-06,
"loss": 0.1615,
"step": 1595
},
{
"epoch": 1.9310344827586206,
"grad_norm": 2.1470141410827637,
"learning_rate": 1.724137931034483e-06,
"loss": 0.1669,
"step": 1624
},
{
"epoch": 1.9655172413793105,
"grad_norm": 13.040664672851562,
"learning_rate": 8.620689655172415e-07,
"loss": 0.1641,
"step": 1653
},
{
"epoch": 2.0,
"grad_norm": 1.5605217218399048,
"learning_rate": 0.0,
"loss": 0.1551,
"step": 1682
},
{
"epoch": 2.0,
"eval_cer": 0.003201766491857577,
"eval_loss": 0.17416498064994812,
"eval_runtime": 882.6649,
"eval_samples_per_second": 1.906,
"eval_steps_per_second": 0.239,
"step": 1682
},
{
"epoch": 2.0,
"step": 1682,
"total_flos": 1.9906356553640313e+19,
"train_loss": 0.47363961749354666,
"train_runtime": 3260.3696,
"train_samples_per_second": 4.125,
"train_steps_per_second": 0.516
}
],
"logging_steps": 29,
"max_steps": 1682,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.9906356553640313e+19,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}