csikasote's picture
End of training
6625be0 verified
{
"best_metric": 0.5614759922027588,
"best_model_checkpoint": "/scratch/skscla001/speech/results/whisper-medium-swagen-male-model-test/checkpoint-800",
"epoch": 3.382335148215366,
"eval_steps": 200,
"global_step": 1400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.060496067755595885,
"grad_norm": 131.1879425048828,
"learning_rate": 4.0000000000000003e-07,
"loss": 9.6896,
"step": 25
},
{
"epoch": 0.12099213551119177,
"grad_norm": 97.95094299316406,
"learning_rate": 9.000000000000001e-07,
"loss": 7.0833,
"step": 50
},
{
"epoch": 0.18148820326678766,
"grad_norm": 64.53318786621094,
"learning_rate": 1.4000000000000001e-06,
"loss": 4.4645,
"step": 75
},
{
"epoch": 0.24198427102238354,
"grad_norm": 73.28852844238281,
"learning_rate": 1.9000000000000002e-06,
"loss": 3.8214,
"step": 100
},
{
"epoch": 0.3024803387779794,
"grad_norm": 57.7812614440918,
"learning_rate": 2.4000000000000003e-06,
"loss": 3.318,
"step": 125
},
{
"epoch": 0.3629764065335753,
"grad_norm": 49.410987854003906,
"learning_rate": 2.9e-06,
"loss": 3.1163,
"step": 150
},
{
"epoch": 0.42347247428917123,
"grad_norm": 69.99105072021484,
"learning_rate": 3.4000000000000005e-06,
"loss": 2.5831,
"step": 175
},
{
"epoch": 0.4839685420447671,
"grad_norm": 69.5449447631836,
"learning_rate": 3.900000000000001e-06,
"loss": 2.7031,
"step": 200
},
{
"epoch": 0.4839685420447671,
"eval_loss": 0.7854222655296326,
"eval_runtime": 292.3968,
"eval_samples_per_second": 1.905,
"eval_steps_per_second": 0.954,
"eval_wer": 0.5021432945499081,
"step": 200
},
{
"epoch": 0.5444646098003629,
"grad_norm": 60.96687316894531,
"learning_rate": 4.4e-06,
"loss": 2.4497,
"step": 225
},
{
"epoch": 0.6049606775559588,
"grad_norm": 52.162254333496094,
"learning_rate": 4.9000000000000005e-06,
"loss": 2.2668,
"step": 250
},
{
"epoch": 0.6654567453115547,
"grad_norm": 52.47731399536133,
"learning_rate": 5.400000000000001e-06,
"loss": 2.0714,
"step": 275
},
{
"epoch": 0.7259528130671506,
"grad_norm": 59.63617706298828,
"learning_rate": 5.9e-06,
"loss": 2.0596,
"step": 300
},
{
"epoch": 0.7864488808227466,
"grad_norm": 56.29938507080078,
"learning_rate": 6.4000000000000006e-06,
"loss": 2.4715,
"step": 325
},
{
"epoch": 0.8469449485783425,
"grad_norm": 59.82426834106445,
"learning_rate": 6.9e-06,
"loss": 1.9759,
"step": 350
},
{
"epoch": 0.9074410163339383,
"grad_norm": 56.96477127075195,
"learning_rate": 7.4e-06,
"loss": 1.9659,
"step": 375
},
{
"epoch": 0.9679370840895342,
"grad_norm": 64.53138732910156,
"learning_rate": 7.9e-06,
"loss": 2.0469,
"step": 400
},
{
"epoch": 0.9679370840895342,
"eval_loss": 0.6388072967529297,
"eval_runtime": 290.0546,
"eval_samples_per_second": 1.92,
"eval_steps_per_second": 0.962,
"eval_wer": 0.3988569095733823,
"step": 400
},
{
"epoch": 1.0266182698124622,
"grad_norm": 40.781829833984375,
"learning_rate": 8.400000000000001e-06,
"loss": 1.6219,
"step": 425
},
{
"epoch": 1.087114337568058,
"grad_norm": 68.04630279541016,
"learning_rate": 8.900000000000001e-06,
"loss": 1.2881,
"step": 450
},
{
"epoch": 1.147610405323654,
"grad_norm": 33.47824478149414,
"learning_rate": 9.4e-06,
"loss": 1.3103,
"step": 475
},
{
"epoch": 1.20810647307925,
"grad_norm": 49.198524475097656,
"learning_rate": 9.9e-06,
"loss": 1.2683,
"step": 500
},
{
"epoch": 1.2686025408348458,
"grad_norm": 42.11520767211914,
"learning_rate": 9.955555555555556e-06,
"loss": 1.2566,
"step": 525
},
{
"epoch": 1.3290986085904417,
"grad_norm": 41.1612548828125,
"learning_rate": 9.9e-06,
"loss": 1.1413,
"step": 550
},
{
"epoch": 1.3895946763460376,
"grad_norm": 31.75577163696289,
"learning_rate": 9.844444444444446e-06,
"loss": 1.1443,
"step": 575
},
{
"epoch": 1.4500907441016335,
"grad_norm": 52.19874572753906,
"learning_rate": 9.78888888888889e-06,
"loss": 1.0941,
"step": 600
},
{
"epoch": 1.4500907441016335,
"eval_loss": 0.5956152081489563,
"eval_runtime": 284.4786,
"eval_samples_per_second": 1.958,
"eval_steps_per_second": 0.981,
"eval_wer": 0.3357828128189426,
"step": 600
},
{
"epoch": 1.5105868118572294,
"grad_norm": 59.771671295166016,
"learning_rate": 9.733333333333334e-06,
"loss": 1.252,
"step": 625
},
{
"epoch": 1.5710828796128251,
"grad_norm": 48.12847900390625,
"learning_rate": 9.677777777777778e-06,
"loss": 1.3705,
"step": 650
},
{
"epoch": 1.631578947368421,
"grad_norm": 37.03394317626953,
"learning_rate": 9.622222222222222e-06,
"loss": 1.2273,
"step": 675
},
{
"epoch": 1.692075015124017,
"grad_norm": 75.09231567382812,
"learning_rate": 9.566666666666668e-06,
"loss": 1.427,
"step": 700
},
{
"epoch": 1.7525710828796128,
"grad_norm": 41.142433166503906,
"learning_rate": 9.511111111111112e-06,
"loss": 1.1998,
"step": 725
},
{
"epoch": 1.8130671506352087,
"grad_norm": 45.4554328918457,
"learning_rate": 9.455555555555557e-06,
"loss": 1.1879,
"step": 750
},
{
"epoch": 1.8735632183908046,
"grad_norm": 44.370784759521484,
"learning_rate": 9.4e-06,
"loss": 1.1157,
"step": 775
},
{
"epoch": 1.9340592861464003,
"grad_norm": 32.7186279296875,
"learning_rate": 9.344444444444446e-06,
"loss": 1.0543,
"step": 800
},
{
"epoch": 1.9340592861464003,
"eval_loss": 0.5614759922027588,
"eval_runtime": 285.1894,
"eval_samples_per_second": 1.953,
"eval_steps_per_second": 0.978,
"eval_wer": 0.3229230455194938,
"step": 800
},
{
"epoch": 1.9945553539019962,
"grad_norm": 24.692054748535156,
"learning_rate": 9.28888888888889e-06,
"loss": 1.0389,
"step": 825
},
{
"epoch": 2.0532365396249244,
"grad_norm": 13.663407325744629,
"learning_rate": 9.233333333333334e-06,
"loss": 0.4904,
"step": 850
},
{
"epoch": 2.1137326073805203,
"grad_norm": 27.10588264465332,
"learning_rate": 9.17777777777778e-06,
"loss": 0.4243,
"step": 875
},
{
"epoch": 2.174228675136116,
"grad_norm": 19.4141788482666,
"learning_rate": 9.122222222222223e-06,
"loss": 0.4291,
"step": 900
},
{
"epoch": 2.234724742891712,
"grad_norm": 16.04352569580078,
"learning_rate": 9.066666666666667e-06,
"loss": 0.5348,
"step": 925
},
{
"epoch": 2.295220810647308,
"grad_norm": 27.763322830200195,
"learning_rate": 9.011111111111111e-06,
"loss": 0.4595,
"step": 950
},
{
"epoch": 2.355716878402904,
"grad_norm": 25.71826171875,
"learning_rate": 8.955555555555555e-06,
"loss": 0.4618,
"step": 975
},
{
"epoch": 2.4162129461585,
"grad_norm": 25.92030143737793,
"learning_rate": 8.900000000000001e-06,
"loss": 0.4611,
"step": 1000
},
{
"epoch": 2.4162129461585,
"eval_loss": 0.5802616477012634,
"eval_runtime": 281.3023,
"eval_samples_per_second": 1.98,
"eval_steps_per_second": 0.992,
"eval_wer": 0.3070014288630333,
"step": 1000
},
{
"epoch": 2.4767090139140957,
"grad_norm": 30.20458984375,
"learning_rate": 8.844444444444445e-06,
"loss": 0.5032,
"step": 1025
},
{
"epoch": 2.5372050816696916,
"grad_norm": 44.72235870361328,
"learning_rate": 8.788888888888891e-06,
"loss": 0.4851,
"step": 1050
},
{
"epoch": 2.5977011494252875,
"grad_norm": 17.111309051513672,
"learning_rate": 8.733333333333333e-06,
"loss": 0.4644,
"step": 1075
},
{
"epoch": 2.6581972171808834,
"grad_norm": 29.341838836669922,
"learning_rate": 8.677777777777779e-06,
"loss": 0.5034,
"step": 1100
},
{
"epoch": 2.718693284936479,
"grad_norm": 6.225165367126465,
"learning_rate": 8.622222222222223e-06,
"loss": 0.4506,
"step": 1125
},
{
"epoch": 2.7791893526920752,
"grad_norm": 23.223388671875,
"learning_rate": 8.566666666666667e-06,
"loss": 0.4935,
"step": 1150
},
{
"epoch": 2.8396854204476707,
"grad_norm": 38.89765167236328,
"learning_rate": 8.511111111111113e-06,
"loss": 0.4469,
"step": 1175
},
{
"epoch": 2.900181488203267,
"grad_norm": 24.92243003845215,
"learning_rate": 8.455555555555555e-06,
"loss": 0.5304,
"step": 1200
},
{
"epoch": 2.900181488203267,
"eval_loss": 0.5714038014411926,
"eval_runtime": 298.3112,
"eval_samples_per_second": 1.867,
"eval_steps_per_second": 0.935,
"eval_wer": 0.34598897734231476,
"step": 1200
},
{
"epoch": 2.9606775559588625,
"grad_norm": 12.515642166137695,
"learning_rate": 8.400000000000001e-06,
"loss": 0.4318,
"step": 1225
},
{
"epoch": 3.0193587416817906,
"grad_norm": 8.178666114807129,
"learning_rate": 8.344444444444445e-06,
"loss": 0.4301,
"step": 1250
},
{
"epoch": 3.0798548094373865,
"grad_norm": 18.96198844909668,
"learning_rate": 8.288888888888889e-06,
"loss": 0.2354,
"step": 1275
},
{
"epoch": 3.1403508771929824,
"grad_norm": 9.2815580368042,
"learning_rate": 8.233333333333335e-06,
"loss": 0.1771,
"step": 1300
},
{
"epoch": 3.2008469449485784,
"grad_norm": 5.816292762756348,
"learning_rate": 8.177777777777779e-06,
"loss": 0.1722,
"step": 1325
},
{
"epoch": 3.2613430127041743,
"grad_norm": 28.862037658691406,
"learning_rate": 8.122222222222223e-06,
"loss": 0.2157,
"step": 1350
},
{
"epoch": 3.32183908045977,
"grad_norm": 4.854808807373047,
"learning_rate": 8.066666666666667e-06,
"loss": 0.2194,
"step": 1375
},
{
"epoch": 3.382335148215366,
"grad_norm": 6.491705894470215,
"learning_rate": 8.011111111111113e-06,
"loss": 0.2124,
"step": 1400
},
{
"epoch": 3.382335148215366,
"eval_loss": 0.5999584197998047,
"eval_runtime": 280.4784,
"eval_samples_per_second": 1.986,
"eval_steps_per_second": 0.995,
"eval_wer": 0.2872014696876914,
"step": 1400
},
{
"epoch": 3.382335148215366,
"step": 1400,
"total_flos": 1.141240407588864e+19,
"train_loss": 1.5056792647497994,
"train_runtime": 4859.6451,
"train_samples_per_second": 8.231,
"train_steps_per_second": 1.029
}
],
"logging_steps": 25,
"max_steps": 5000,
"num_input_tokens_seen": 0,
"num_train_epochs": 13,
"save_steps": 200,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 3,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 3
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.141240407588864e+19,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}