hr16's picture
Training in progress, epoch 1, checkpoint
e594912 verified
{
"best_metric": 0.8883299798792756,
"best_model_checkpoint": "PhoWhisper-small-vispeech-classifier-v4/checkpoint-490",
"epoch": 1.0,
"eval_steps": 500,
"global_step": 490,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01020408163265306,
"grad_norm": 51640.66015625,
"learning_rate": 5.1020408163265303e-08,
"loss": 2.0778,
"step": 5
},
{
"epoch": 0.02040816326530612,
"grad_norm": 46773.35546875,
"learning_rate": 1.0204081632653061e-07,
"loss": 2.0809,
"step": 10
},
{
"epoch": 0.030612244897959183,
"grad_norm": 45343.12109375,
"learning_rate": 1.5306122448979592e-07,
"loss": 2.0771,
"step": 15
},
{
"epoch": 0.04081632653061224,
"grad_norm": 49285.91796875,
"learning_rate": 2.0408163265306121e-07,
"loss": 2.077,
"step": 20
},
{
"epoch": 0.05102040816326531,
"grad_norm": 63271.82421875,
"learning_rate": 2.5510204081632656e-07,
"loss": 2.0755,
"step": 25
},
{
"epoch": 0.061224489795918366,
"grad_norm": 51649.79296875,
"learning_rate": 3.0612244897959183e-07,
"loss": 2.0764,
"step": 30
},
{
"epoch": 0.07142857142857142,
"grad_norm": 51633.98828125,
"learning_rate": 3.5714285714285716e-07,
"loss": 2.0763,
"step": 35
},
{
"epoch": 0.08163265306122448,
"grad_norm": 61777.23828125,
"learning_rate": 4.0816326530612243e-07,
"loss": 2.0772,
"step": 40
},
{
"epoch": 0.09183673469387756,
"grad_norm": 58467.15625,
"learning_rate": 4.591836734693878e-07,
"loss": 2.0743,
"step": 45
},
{
"epoch": 0.10204081632653061,
"grad_norm": 48974.37890625,
"learning_rate": 5.102040816326531e-07,
"loss": 2.072,
"step": 50
},
{
"epoch": 0.11224489795918367,
"grad_norm": 71007.515625,
"learning_rate": 5.612244897959184e-07,
"loss": 2.0751,
"step": 55
},
{
"epoch": 0.12244897959183673,
"grad_norm": 58237.77734375,
"learning_rate": 6.122448979591837e-07,
"loss": 2.0702,
"step": 60
},
{
"epoch": 0.1326530612244898,
"grad_norm": 58785.83203125,
"learning_rate": 6.632653061224491e-07,
"loss": 2.0713,
"step": 65
},
{
"epoch": 0.14285714285714285,
"grad_norm": 52771.265625,
"learning_rate": 7.142857142857143e-07,
"loss": 2.069,
"step": 70
},
{
"epoch": 0.15306122448979592,
"grad_norm": 69647.078125,
"learning_rate": 7.653061224489796e-07,
"loss": 2.0684,
"step": 75
},
{
"epoch": 0.16326530612244897,
"grad_norm": 59148.47265625,
"learning_rate": 8.163265306122449e-07,
"loss": 2.0634,
"step": 80
},
{
"epoch": 0.17346938775510204,
"grad_norm": 66723.921875,
"learning_rate": 8.673469387755103e-07,
"loss": 2.064,
"step": 85
},
{
"epoch": 0.1836734693877551,
"grad_norm": 72545.203125,
"learning_rate": 9.183673469387756e-07,
"loss": 2.0593,
"step": 90
},
{
"epoch": 0.19387755102040816,
"grad_norm": 59112.55859375,
"learning_rate": 9.69387755102041e-07,
"loss": 2.0553,
"step": 95
},
{
"epoch": 0.20408163265306123,
"grad_norm": 68369.265625,
"learning_rate": 1.0204081632653063e-06,
"loss": 2.0566,
"step": 100
},
{
"epoch": 0.21428571428571427,
"grad_norm": 51080.62890625,
"learning_rate": 1.0714285714285714e-06,
"loss": 2.0505,
"step": 105
},
{
"epoch": 0.22448979591836735,
"grad_norm": 78760.0859375,
"learning_rate": 1.122448979591837e-06,
"loss": 2.0496,
"step": 110
},
{
"epoch": 0.23469387755102042,
"grad_norm": 57198.7890625,
"learning_rate": 1.1734693877551022e-06,
"loss": 2.0458,
"step": 115
},
{
"epoch": 0.24489795918367346,
"grad_norm": 98369.4609375,
"learning_rate": 1.2244897959183673e-06,
"loss": 2.0336,
"step": 120
},
{
"epoch": 0.25510204081632654,
"grad_norm": 80801.5703125,
"learning_rate": 1.2755102040816329e-06,
"loss": 2.0354,
"step": 125
},
{
"epoch": 0.2653061224489796,
"grad_norm": 82179.9765625,
"learning_rate": 1.3265306122448982e-06,
"loss": 2.0263,
"step": 130
},
{
"epoch": 0.2755102040816326,
"grad_norm": 83225.078125,
"learning_rate": 1.3775510204081633e-06,
"loss": 2.0168,
"step": 135
},
{
"epoch": 0.2857142857142857,
"grad_norm": 62451.64453125,
"learning_rate": 1.4285714285714286e-06,
"loss": 2.023,
"step": 140
},
{
"epoch": 0.29591836734693877,
"grad_norm": 75567.9453125,
"learning_rate": 1.479591836734694e-06,
"loss": 2.0206,
"step": 145
},
{
"epoch": 0.30612244897959184,
"grad_norm": 98461.484375,
"learning_rate": 1.5306122448979593e-06,
"loss": 1.9984,
"step": 150
},
{
"epoch": 0.3163265306122449,
"grad_norm": 75078.1484375,
"learning_rate": 1.5816326530612248e-06,
"loss": 1.9899,
"step": 155
},
{
"epoch": 0.32653061224489793,
"grad_norm": 97895.671875,
"learning_rate": 1.6326530612244897e-06,
"loss": 1.9758,
"step": 160
},
{
"epoch": 0.336734693877551,
"grad_norm": 65880.2421875,
"learning_rate": 1.6836734693877552e-06,
"loss": 1.9918,
"step": 165
},
{
"epoch": 0.3469387755102041,
"grad_norm": 70675.578125,
"learning_rate": 1.7346938775510206e-06,
"loss": 1.9715,
"step": 170
},
{
"epoch": 0.35714285714285715,
"grad_norm": 64327.19921875,
"learning_rate": 1.7857142857142859e-06,
"loss": 1.9597,
"step": 175
},
{
"epoch": 0.3673469387755102,
"grad_norm": 80610.6171875,
"learning_rate": 1.8367346938775512e-06,
"loss": 1.9632,
"step": 180
},
{
"epoch": 0.37755102040816324,
"grad_norm": 110899.375,
"learning_rate": 1.8877551020408163e-06,
"loss": 1.9476,
"step": 185
},
{
"epoch": 0.3877551020408163,
"grad_norm": 111132.5703125,
"learning_rate": 1.938775510204082e-06,
"loss": 1.9369,
"step": 190
},
{
"epoch": 0.3979591836734694,
"grad_norm": 88722.625,
"learning_rate": 1.989795918367347e-06,
"loss": 1.9452,
"step": 195
},
{
"epoch": 0.40816326530612246,
"grad_norm": 105874.296875,
"learning_rate": 2.0408163265306125e-06,
"loss": 1.9459,
"step": 200
},
{
"epoch": 0.41836734693877553,
"grad_norm": 97286.0078125,
"learning_rate": 2.0918367346938776e-06,
"loss": 1.918,
"step": 205
},
{
"epoch": 0.42857142857142855,
"grad_norm": 82848.0625,
"learning_rate": 2.1428571428571427e-06,
"loss": 1.9135,
"step": 210
},
{
"epoch": 0.4387755102040816,
"grad_norm": 114854.4609375,
"learning_rate": 2.1938775510204083e-06,
"loss": 1.8869,
"step": 215
},
{
"epoch": 0.4489795918367347,
"grad_norm": 86661.25,
"learning_rate": 2.244897959183674e-06,
"loss": 1.8861,
"step": 220
},
{
"epoch": 0.45918367346938777,
"grad_norm": 127591.3984375,
"learning_rate": 2.295918367346939e-06,
"loss": 1.8396,
"step": 225
},
{
"epoch": 0.46938775510204084,
"grad_norm": 71038.8671875,
"learning_rate": 2.3469387755102044e-06,
"loss": 1.8995,
"step": 230
},
{
"epoch": 0.47959183673469385,
"grad_norm": 101402.421875,
"learning_rate": 2.3979591836734696e-06,
"loss": 1.8699,
"step": 235
},
{
"epoch": 0.4897959183673469,
"grad_norm": 113157.3828125,
"learning_rate": 2.4489795918367347e-06,
"loss": 1.8599,
"step": 240
},
{
"epoch": 0.5,
"grad_norm": 115485.0546875,
"learning_rate": 2.5e-06,
"loss": 1.8214,
"step": 245
},
{
"epoch": 0.5102040816326531,
"grad_norm": 139369.8125,
"learning_rate": 2.5510204081632657e-06,
"loss": 1.8539,
"step": 250
},
{
"epoch": 0.5204081632653061,
"grad_norm": 141401.46875,
"learning_rate": 2.602040816326531e-06,
"loss": 1.7507,
"step": 255
},
{
"epoch": 0.5306122448979592,
"grad_norm": 125767.3984375,
"learning_rate": 2.6530612244897964e-06,
"loss": 1.7948,
"step": 260
},
{
"epoch": 0.5408163265306123,
"grad_norm": 148848.5625,
"learning_rate": 2.7040816326530615e-06,
"loss": 1.7871,
"step": 265
},
{
"epoch": 0.5510204081632653,
"grad_norm": 152509.4375,
"learning_rate": 2.7551020408163266e-06,
"loss": 1.8054,
"step": 270
},
{
"epoch": 0.5612244897959183,
"grad_norm": 114681.078125,
"learning_rate": 2.8061224489795917e-06,
"loss": 1.7711,
"step": 275
},
{
"epoch": 0.5714285714285714,
"grad_norm": 119104.984375,
"learning_rate": 2.8571428571428573e-06,
"loss": 1.7139,
"step": 280
},
{
"epoch": 0.5816326530612245,
"grad_norm": 123160.5,
"learning_rate": 2.908163265306123e-06,
"loss": 1.703,
"step": 285
},
{
"epoch": 0.5918367346938775,
"grad_norm": 157218.203125,
"learning_rate": 2.959183673469388e-06,
"loss": 1.7685,
"step": 290
},
{
"epoch": 0.6020408163265306,
"grad_norm": 134804.828125,
"learning_rate": 3.0102040816326534e-06,
"loss": 1.7053,
"step": 295
},
{
"epoch": 0.6122448979591837,
"grad_norm": 153977.625,
"learning_rate": 3.0612244897959185e-06,
"loss": 1.6711,
"step": 300
},
{
"epoch": 0.6224489795918368,
"grad_norm": 204690.125,
"learning_rate": 3.112244897959184e-06,
"loss": 1.5873,
"step": 305
},
{
"epoch": 0.6326530612244898,
"grad_norm": 158604.1875,
"learning_rate": 3.1632653061224496e-06,
"loss": 1.6642,
"step": 310
},
{
"epoch": 0.6428571428571429,
"grad_norm": 181877.484375,
"learning_rate": 3.2142857142857147e-06,
"loss": 1.6204,
"step": 315
},
{
"epoch": 0.6530612244897959,
"grad_norm": 157735.734375,
"learning_rate": 3.2653061224489794e-06,
"loss": 1.6152,
"step": 320
},
{
"epoch": 0.6632653061224489,
"grad_norm": 121935.3828125,
"learning_rate": 3.316326530612245e-06,
"loss": 1.6357,
"step": 325
},
{
"epoch": 0.673469387755102,
"grad_norm": 133579.859375,
"learning_rate": 3.3673469387755105e-06,
"loss": 1.6085,
"step": 330
},
{
"epoch": 0.6836734693877551,
"grad_norm": 154886.953125,
"learning_rate": 3.4183673469387756e-06,
"loss": 1.5609,
"step": 335
},
{
"epoch": 0.6938775510204082,
"grad_norm": 151655.0625,
"learning_rate": 3.469387755102041e-06,
"loss": 1.5905,
"step": 340
},
{
"epoch": 0.7040816326530612,
"grad_norm": 198471.609375,
"learning_rate": 3.5204081632653062e-06,
"loss": 1.5052,
"step": 345
},
{
"epoch": 0.7142857142857143,
"grad_norm": 191516.453125,
"learning_rate": 3.5714285714285718e-06,
"loss": 1.5798,
"step": 350
},
{
"epoch": 0.7244897959183674,
"grad_norm": 207511.296875,
"learning_rate": 3.6224489795918373e-06,
"loss": 1.5382,
"step": 355
},
{
"epoch": 0.7346938775510204,
"grad_norm": 170820.609375,
"learning_rate": 3.6734693877551024e-06,
"loss": 1.485,
"step": 360
},
{
"epoch": 0.7448979591836735,
"grad_norm": 183925.796875,
"learning_rate": 3.724489795918368e-06,
"loss": 1.427,
"step": 365
},
{
"epoch": 0.7551020408163265,
"grad_norm": 302829.75,
"learning_rate": 3.7755102040816327e-06,
"loss": 1.4242,
"step": 370
},
{
"epoch": 0.7653061224489796,
"grad_norm": 292990.5625,
"learning_rate": 3.826530612244898e-06,
"loss": 1.484,
"step": 375
},
{
"epoch": 0.7755102040816326,
"grad_norm": 189517.421875,
"learning_rate": 3.877551020408164e-06,
"loss": 1.5039,
"step": 380
},
{
"epoch": 0.7857142857142857,
"grad_norm": 239271.890625,
"learning_rate": 3.928571428571429e-06,
"loss": 1.3332,
"step": 385
},
{
"epoch": 0.7959183673469388,
"grad_norm": 167386.28125,
"learning_rate": 3.979591836734694e-06,
"loss": 1.429,
"step": 390
},
{
"epoch": 0.8061224489795918,
"grad_norm": 165701.9375,
"learning_rate": 4.03061224489796e-06,
"loss": 1.3167,
"step": 395
},
{
"epoch": 0.8163265306122449,
"grad_norm": 284793.46875,
"learning_rate": 4.081632653061225e-06,
"loss": 1.3867,
"step": 400
},
{
"epoch": 0.826530612244898,
"grad_norm": 159807.671875,
"learning_rate": 4.13265306122449e-06,
"loss": 1.404,
"step": 405
},
{
"epoch": 0.8367346938775511,
"grad_norm": 225698.859375,
"learning_rate": 4.183673469387755e-06,
"loss": 1.3842,
"step": 410
},
{
"epoch": 0.8469387755102041,
"grad_norm": 148945.90625,
"learning_rate": 4.234693877551021e-06,
"loss": 1.4605,
"step": 415
},
{
"epoch": 0.8571428571428571,
"grad_norm": 301350.15625,
"learning_rate": 4.2857142857142855e-06,
"loss": 1.4279,
"step": 420
},
{
"epoch": 0.8673469387755102,
"grad_norm": 216519.640625,
"learning_rate": 4.336734693877551e-06,
"loss": 1.3122,
"step": 425
},
{
"epoch": 0.8775510204081632,
"grad_norm": 243937.1875,
"learning_rate": 4.3877551020408165e-06,
"loss": 1.2084,
"step": 430
},
{
"epoch": 0.8877551020408163,
"grad_norm": 195260.390625,
"learning_rate": 4.438775510204082e-06,
"loss": 1.3024,
"step": 435
},
{
"epoch": 0.8979591836734694,
"grad_norm": 218123.65625,
"learning_rate": 4.489795918367348e-06,
"loss": 1.2303,
"step": 440
},
{
"epoch": 0.9081632653061225,
"grad_norm": 343990.03125,
"learning_rate": 4.540816326530613e-06,
"loss": 1.2012,
"step": 445
},
{
"epoch": 0.9183673469387755,
"grad_norm": 157196.390625,
"learning_rate": 4.591836734693878e-06,
"loss": 1.2632,
"step": 450
},
{
"epoch": 0.9285714285714286,
"grad_norm": 238103.46875,
"learning_rate": 4.642857142857144e-06,
"loss": 1.3131,
"step": 455
},
{
"epoch": 0.9387755102040817,
"grad_norm": 158442.625,
"learning_rate": 4.693877551020409e-06,
"loss": 1.2849,
"step": 460
},
{
"epoch": 0.9489795918367347,
"grad_norm": 178316.640625,
"learning_rate": 4.744897959183674e-06,
"loss": 1.0987,
"step": 465
},
{
"epoch": 0.9591836734693877,
"grad_norm": 240848.03125,
"learning_rate": 4.795918367346939e-06,
"loss": 1.154,
"step": 470
},
{
"epoch": 0.9693877551020408,
"grad_norm": 677730.8125,
"learning_rate": 4.846938775510204e-06,
"loss": 1.1682,
"step": 475
},
{
"epoch": 0.9795918367346939,
"grad_norm": 300622.875,
"learning_rate": 4.897959183673469e-06,
"loss": 1.1212,
"step": 480
},
{
"epoch": 0.9897959183673469,
"grad_norm": 234745.03125,
"learning_rate": 4.948979591836735e-06,
"loss": 1.1859,
"step": 485
},
{
"epoch": 1.0,
"grad_norm": 254966.984375,
"learning_rate": 5e-06,
"loss": 1.1763,
"step": 490
},
{
"epoch": 1.0,
"eval_accuracy": 0.8883299798792756,
"eval_loss": 0.968666136264801,
"eval_runtime": 208.5533,
"eval_samples_per_second": 14.299,
"eval_steps_per_second": 0.599,
"step": 490
}
],
"logging_steps": 5,
"max_steps": 4900,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.49679069336576e+18,
"train_batch_size": 24,
"trial_name": null,
"trial_params": null
}