csikasote's picture
End of training
e4bdf53 verified
{
"best_global_step": 1000,
"best_metric": 0.7278133630752563,
"best_model_checkpoint": "/scratch/skscla001/experiments/datasets/results/whisper-medium-bigcgen-female-5hrs-62/checkpoint-1000",
"epoch": 5.373971578160059,
"eval_steps": 200,
"global_step": 1800,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07479431563201197,
"grad_norm": 37.227020263671875,
"learning_rate": 4.2000000000000006e-07,
"loss": 3.586,
"step": 25
},
{
"epoch": 0.14958863126402394,
"grad_norm": 26.48501968383789,
"learning_rate": 9.200000000000001e-07,
"loss": 2.8319,
"step": 50
},
{
"epoch": 0.2243829468960359,
"grad_norm": 19.803409576416016,
"learning_rate": 1.42e-06,
"loss": 2.176,
"step": 75
},
{
"epoch": 0.2991772625280479,
"grad_norm": 22.034574508666992,
"learning_rate": 1.9200000000000003e-06,
"loss": 1.5827,
"step": 100
},
{
"epoch": 0.3739715781600598,
"grad_norm": 15.970525741577148,
"learning_rate": 2.42e-06,
"loss": 1.3234,
"step": 125
},
{
"epoch": 0.4487658937920718,
"grad_norm": 19.18708610534668,
"learning_rate": 2.9e-06,
"loss": 1.271,
"step": 150
},
{
"epoch": 0.5235602094240838,
"grad_norm": 25.591108322143555,
"learning_rate": 3.4000000000000005e-06,
"loss": 1.1523,
"step": 175
},
{
"epoch": 0.5983545250560958,
"grad_norm": 12.59317684173584,
"learning_rate": 3.900000000000001e-06,
"loss": 1.0539,
"step": 200
},
{
"epoch": 0.5983545250560958,
"eval_loss": 1.038666009902954,
"eval_runtime": 299.4494,
"eval_samples_per_second": 1.586,
"eval_steps_per_second": 0.795,
"eval_wer": 0.6708286038592508,
"step": 200
},
{
"epoch": 0.6731488406881077,
"grad_norm": 14.749096870422363,
"learning_rate": 4.4e-06,
"loss": 1.0803,
"step": 225
},
{
"epoch": 0.7479431563201197,
"grad_norm": 19.06480598449707,
"learning_rate": 4.9000000000000005e-06,
"loss": 0.8961,
"step": 250
},
{
"epoch": 0.8227374719521316,
"grad_norm": 17.03508186340332,
"learning_rate": 5.400000000000001e-06,
"loss": 0.9066,
"step": 275
},
{
"epoch": 0.8975317875841436,
"grad_norm": 23.3944034576416,
"learning_rate": 5.9e-06,
"loss": 0.8802,
"step": 300
},
{
"epoch": 0.9723261032161555,
"grad_norm": 13.334399223327637,
"learning_rate": 6.4000000000000006e-06,
"loss": 0.8396,
"step": 325
},
{
"epoch": 1.0448765893792071,
"grad_norm": 19.082271575927734,
"learning_rate": 6.9e-06,
"loss": 0.6707,
"step": 350
},
{
"epoch": 1.1196709050112192,
"grad_norm": 14.394790649414062,
"learning_rate": 7.4e-06,
"loss": 0.6364,
"step": 375
},
{
"epoch": 1.1944652206432311,
"grad_norm": 8.752716064453125,
"learning_rate": 7.9e-06,
"loss": 0.7105,
"step": 400
},
{
"epoch": 1.1944652206432311,
"eval_loss": 0.8432466387748718,
"eval_runtime": 288.242,
"eval_samples_per_second": 1.648,
"eval_steps_per_second": 0.826,
"eval_wer": 0.5763904653802497,
"step": 400
},
{
"epoch": 1.269259536275243,
"grad_norm": 14.430537223815918,
"learning_rate": 8.400000000000001e-06,
"loss": 0.6747,
"step": 425
},
{
"epoch": 1.3440538519072551,
"grad_norm": 9.003552436828613,
"learning_rate": 8.900000000000001e-06,
"loss": 0.7335,
"step": 450
},
{
"epoch": 1.418848167539267,
"grad_norm": 18.6229305267334,
"learning_rate": 9.4e-06,
"loss": 0.755,
"step": 475
},
{
"epoch": 1.493642483171279,
"grad_norm": 11.876631736755371,
"learning_rate": 9.9e-06,
"loss": 0.6344,
"step": 500
},
{
"epoch": 1.5684367988032908,
"grad_norm": 12.877355575561523,
"learning_rate": 9.955555555555556e-06,
"loss": 0.7283,
"step": 525
},
{
"epoch": 1.643231114435303,
"grad_norm": 12.778818130493164,
"learning_rate": 9.9e-06,
"loss": 0.7274,
"step": 550
},
{
"epoch": 1.718025430067315,
"grad_norm": 20.310653686523438,
"learning_rate": 9.844444444444446e-06,
"loss": 0.649,
"step": 575
},
{
"epoch": 1.792819745699327,
"grad_norm": 10.528993606567383,
"learning_rate": 9.78888888888889e-06,
"loss": 0.6344,
"step": 600
},
{
"epoch": 1.792819745699327,
"eval_loss": 0.7586492300033569,
"eval_runtime": 285.93,
"eval_samples_per_second": 1.661,
"eval_steps_per_second": 0.832,
"eval_wer": 0.5600454029511919,
"step": 600
},
{
"epoch": 1.8676140613313388,
"grad_norm": 15.298612594604492,
"learning_rate": 9.733333333333334e-06,
"loss": 0.6574,
"step": 625
},
{
"epoch": 1.9424083769633507,
"grad_norm": 12.833754539489746,
"learning_rate": 9.677777777777778e-06,
"loss": 0.6864,
"step": 650
},
{
"epoch": 2.0149588631264024,
"grad_norm": 10.866531372070312,
"learning_rate": 9.622222222222222e-06,
"loss": 0.5753,
"step": 675
},
{
"epoch": 2.0897531787584143,
"grad_norm": 7.472757816314697,
"learning_rate": 9.566666666666668e-06,
"loss": 0.3958,
"step": 700
},
{
"epoch": 2.164547494390426,
"grad_norm": 10.134075164794922,
"learning_rate": 9.511111111111112e-06,
"loss": 0.3771,
"step": 725
},
{
"epoch": 2.2393418100224385,
"grad_norm": 12.163872718811035,
"learning_rate": 9.455555555555557e-06,
"loss": 0.4046,
"step": 750
},
{
"epoch": 2.3141361256544504,
"grad_norm": 9.965895652770996,
"learning_rate": 9.4e-06,
"loss": 0.4393,
"step": 775
},
{
"epoch": 2.3889304412864623,
"grad_norm": 11.286267280578613,
"learning_rate": 9.344444444444446e-06,
"loss": 0.5221,
"step": 800
},
{
"epoch": 2.3889304412864623,
"eval_loss": 0.7457356452941895,
"eval_runtime": 291.4533,
"eval_samples_per_second": 1.63,
"eval_steps_per_second": 0.817,
"eval_wer": 0.5623155505107832,
"step": 800
},
{
"epoch": 2.463724756918474,
"grad_norm": 10.80130672454834,
"learning_rate": 9.28888888888889e-06,
"loss": 0.3969,
"step": 825
},
{
"epoch": 2.538519072550486,
"grad_norm": 15.953104019165039,
"learning_rate": 9.233333333333334e-06,
"loss": 0.4538,
"step": 850
},
{
"epoch": 2.6133133881824984,
"grad_norm": 11.013789176940918,
"learning_rate": 9.17777777777778e-06,
"loss": 0.4291,
"step": 875
},
{
"epoch": 2.6881077038145103,
"grad_norm": 7.998225688934326,
"learning_rate": 9.122222222222223e-06,
"loss": 0.3928,
"step": 900
},
{
"epoch": 2.762902019446522,
"grad_norm": 7.645500183105469,
"learning_rate": 9.066666666666667e-06,
"loss": 0.4126,
"step": 925
},
{
"epoch": 2.837696335078534,
"grad_norm": 14.570154190063477,
"learning_rate": 9.011111111111111e-06,
"loss": 0.4496,
"step": 950
},
{
"epoch": 2.912490650710546,
"grad_norm": 6.618656158447266,
"learning_rate": 8.955555555555555e-06,
"loss": 0.4808,
"step": 975
},
{
"epoch": 2.987284966342558,
"grad_norm": 13.067177772521973,
"learning_rate": 8.900000000000001e-06,
"loss": 0.4151,
"step": 1000
},
{
"epoch": 2.987284966342558,
"eval_loss": 0.7278133630752563,
"eval_runtime": 288.5371,
"eval_samples_per_second": 1.646,
"eval_steps_per_second": 0.825,
"eval_wer": 0.548921679909194,
"step": 1000
},
{
"epoch": 3.0598354525056095,
"grad_norm": 7.467724800109863,
"learning_rate": 8.844444444444445e-06,
"loss": 0.2844,
"step": 1025
},
{
"epoch": 3.1346297681376214,
"grad_norm": 11.162481307983398,
"learning_rate": 8.788888888888891e-06,
"loss": 0.2591,
"step": 1050
},
{
"epoch": 3.2094240837696333,
"grad_norm": 6.069427490234375,
"learning_rate": 8.733333333333333e-06,
"loss": 0.25,
"step": 1075
},
{
"epoch": 3.2842183994016456,
"grad_norm": 8.44675350189209,
"learning_rate": 8.677777777777779e-06,
"loss": 0.2259,
"step": 1100
},
{
"epoch": 3.3590127150336575,
"grad_norm": 7.666698932647705,
"learning_rate": 8.622222222222223e-06,
"loss": 0.2512,
"step": 1125
},
{
"epoch": 3.4338070306656694,
"grad_norm": 9.301329612731934,
"learning_rate": 8.566666666666667e-06,
"loss": 0.1777,
"step": 1150
},
{
"epoch": 3.5086013462976813,
"grad_norm": 8.964385986328125,
"learning_rate": 8.511111111111113e-06,
"loss": 0.2556,
"step": 1175
},
{
"epoch": 3.5833956619296936,
"grad_norm": 6.371069431304932,
"learning_rate": 8.455555555555555e-06,
"loss": 0.219,
"step": 1200
},
{
"epoch": 3.5833956619296936,
"eval_loss": 0.7980450987815857,
"eval_runtime": 286.8497,
"eval_samples_per_second": 1.656,
"eval_steps_per_second": 0.83,
"eval_wer": 0.5287173666288308,
"step": 1200
},
{
"epoch": 3.6581899775617055,
"grad_norm": 9.424444198608398,
"learning_rate": 8.400000000000001e-06,
"loss": 0.1956,
"step": 1225
},
{
"epoch": 3.7329842931937174,
"grad_norm": 8.646815299987793,
"learning_rate": 8.344444444444445e-06,
"loss": 0.2591,
"step": 1250
},
{
"epoch": 3.8077786088257293,
"grad_norm": 6.182277679443359,
"learning_rate": 8.288888888888889e-06,
"loss": 0.2631,
"step": 1275
},
{
"epoch": 3.882572924457741,
"grad_norm": 8.403949737548828,
"learning_rate": 8.233333333333335e-06,
"loss": 0.2357,
"step": 1300
},
{
"epoch": 3.957367240089753,
"grad_norm": 11.063973426818848,
"learning_rate": 8.177777777777779e-06,
"loss": 0.2359,
"step": 1325
},
{
"epoch": 4.029917726252805,
"grad_norm": 5.503140926361084,
"learning_rate": 8.122222222222223e-06,
"loss": 0.1751,
"step": 1350
},
{
"epoch": 4.104712041884817,
"grad_norm": 5.730655670166016,
"learning_rate": 8.066666666666667e-06,
"loss": 0.1246,
"step": 1375
},
{
"epoch": 4.1795063575168285,
"grad_norm": 7.960923194885254,
"learning_rate": 8.011111111111113e-06,
"loss": 0.1226,
"step": 1400
},
{
"epoch": 4.1795063575168285,
"eval_loss": 0.8193269371986389,
"eval_runtime": 295.7162,
"eval_samples_per_second": 1.606,
"eval_steps_per_second": 0.805,
"eval_wer": 0.5362088535754824,
"step": 1400
},
{
"epoch": 4.25430067314884,
"grad_norm": 7.057325839996338,
"learning_rate": 7.955555555555557e-06,
"loss": 0.1117,
"step": 1425
},
{
"epoch": 4.329094988780852,
"grad_norm": 5.407674312591553,
"learning_rate": 7.9e-06,
"loss": 0.1035,
"step": 1450
},
{
"epoch": 4.403889304412864,
"grad_norm": 5.864582538604736,
"learning_rate": 7.844444444444446e-06,
"loss": 0.1219,
"step": 1475
},
{
"epoch": 4.478683620044877,
"grad_norm": 12.176095008850098,
"learning_rate": 7.788888888888889e-06,
"loss": 0.1455,
"step": 1500
},
{
"epoch": 4.553477935676889,
"grad_norm": 6.329302787780762,
"learning_rate": 7.733333333333334e-06,
"loss": 0.078,
"step": 1525
},
{
"epoch": 4.628272251308901,
"grad_norm": 8.11805534362793,
"learning_rate": 7.677777777777778e-06,
"loss": 0.1018,
"step": 1550
},
{
"epoch": 4.703066566940913,
"grad_norm": 2.373284339904785,
"learning_rate": 7.622222222222223e-06,
"loss": 0.1088,
"step": 1575
},
{
"epoch": 4.7778608825729245,
"grad_norm": 4.515956878662109,
"learning_rate": 7.566666666666667e-06,
"loss": 0.1245,
"step": 1600
},
{
"epoch": 4.7778608825729245,
"eval_loss": 0.8029963374137878,
"eval_runtime": 285.5197,
"eval_samples_per_second": 1.664,
"eval_steps_per_second": 0.834,
"eval_wer": 0.4976163450624291,
"step": 1600
},
{
"epoch": 4.852655198204936,
"grad_norm": 4.4174885749816895,
"learning_rate": 7.511111111111111e-06,
"loss": 0.1565,
"step": 1625
},
{
"epoch": 4.927449513836948,
"grad_norm": 6.87215518951416,
"learning_rate": 7.455555555555556e-06,
"loss": 0.1347,
"step": 1650
},
{
"epoch": 5.0,
"grad_norm": 7.197127342224121,
"learning_rate": 7.4e-06,
"loss": 0.088,
"step": 1675
},
{
"epoch": 5.074794315632012,
"grad_norm": 7.660472393035889,
"learning_rate": 7.344444444444445e-06,
"loss": 0.0532,
"step": 1700
},
{
"epoch": 5.149588631264024,
"grad_norm": 3.8243794441223145,
"learning_rate": 7.28888888888889e-06,
"loss": 0.055,
"step": 1725
},
{
"epoch": 5.224382946896036,
"grad_norm": 3.761762857437134,
"learning_rate": 7.233333333333334e-06,
"loss": 0.0561,
"step": 1750
},
{
"epoch": 5.2991772625280475,
"grad_norm": 3.755816698074341,
"learning_rate": 7.177777777777778e-06,
"loss": 0.0408,
"step": 1775
},
{
"epoch": 5.373971578160059,
"grad_norm": 1.7919914722442627,
"learning_rate": 7.122222222222222e-06,
"loss": 0.0529,
"step": 1800
},
{
"epoch": 5.373971578160059,
"eval_loss": 0.8765420317649841,
"eval_runtime": 284.5844,
"eval_samples_per_second": 1.669,
"eval_steps_per_second": 0.836,
"eval_wer": 0.4962542565266742,
"step": 1800
},
{
"epoch": 5.373971578160059,
"step": 1800,
"total_flos": 1.46609894965248e+19,
"train_loss": 0.551261609726482,
"train_runtime": 6179.7196,
"train_samples_per_second": 6.473,
"train_steps_per_second": 0.809
}
],
"logging_steps": 25,
"max_steps": 5000,
"num_input_tokens_seen": 0,
"num_train_epochs": 15,
"save_steps": 200,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 4,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 4
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.46609894965248e+19,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}