luyotw's picture
Upload folder using huggingface_hub
85d1cd7 verified
{
"best_global_step": 5000,
"best_metric": 78.11993940343729,
"best_model_checkpoint": "./working_area/output_model/checkpoint-5000",
"epoch": 0.30233401862377557,
"eval_steps": 1000,
"global_step": 5000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0015116700931188777,
"grad_norm": 10.39171028137207,
"learning_rate": 4.800000000000001e-07,
"loss": 0.2719,
"step": 25
},
{
"epoch": 0.0030233401862377555,
"grad_norm": 0.3984987735748291,
"learning_rate": 9.800000000000001e-07,
"loss": 0.0431,
"step": 50
},
{
"epoch": 0.004535010279356633,
"grad_norm": 0.2931273877620697,
"learning_rate": 1.48e-06,
"loss": 0.0297,
"step": 75
},
{
"epoch": 0.006046680372475511,
"grad_norm": 0.24546289443969727,
"learning_rate": 1.98e-06,
"loss": 0.027,
"step": 100
},
{
"epoch": 0.007558350465594389,
"grad_norm": 0.2551668584346771,
"learning_rate": 2.4800000000000004e-06,
"loss": 0.0264,
"step": 125
},
{
"epoch": 0.009070020558713266,
"grad_norm": 0.22325266897678375,
"learning_rate": 2.9800000000000003e-06,
"loss": 0.0258,
"step": 150
},
{
"epoch": 0.010581690651832143,
"grad_norm": 0.2603906989097595,
"learning_rate": 3.48e-06,
"loss": 0.0249,
"step": 175
},
{
"epoch": 0.012093360744951022,
"grad_norm": 0.24611017107963562,
"learning_rate": 3.980000000000001e-06,
"loss": 0.0246,
"step": 200
},
{
"epoch": 0.013605030838069899,
"grad_norm": 0.26433423161506653,
"learning_rate": 4.48e-06,
"loss": 0.0262,
"step": 225
},
{
"epoch": 0.015116700931188777,
"grad_norm": 0.2509951889514923,
"learning_rate": 4.980000000000001e-06,
"loss": 0.0263,
"step": 250
},
{
"epoch": 0.016628371024307656,
"grad_norm": 0.268083393573761,
"learning_rate": 5.480000000000001e-06,
"loss": 0.0255,
"step": 275
},
{
"epoch": 0.018140041117426533,
"grad_norm": 0.2258174866437912,
"learning_rate": 5.98e-06,
"loss": 0.0253,
"step": 300
},
{
"epoch": 0.01965171121054541,
"grad_norm": 0.24694132804870605,
"learning_rate": 6.480000000000001e-06,
"loss": 0.024,
"step": 325
},
{
"epoch": 0.021163381303664287,
"grad_norm": 0.24790944159030914,
"learning_rate": 6.98e-06,
"loss": 0.0256,
"step": 350
},
{
"epoch": 0.022675051396783167,
"grad_norm": 0.25042369961738586,
"learning_rate": 7.48e-06,
"loss": 0.0251,
"step": 375
},
{
"epoch": 0.024186721489902044,
"grad_norm": 0.2964789569377899,
"learning_rate": 7.980000000000002e-06,
"loss": 0.026,
"step": 400
},
{
"epoch": 0.02569839158302092,
"grad_norm": 0.2886582016944885,
"learning_rate": 8.48e-06,
"loss": 0.0261,
"step": 425
},
{
"epoch": 0.027210061676139798,
"grad_norm": 0.24788400530815125,
"learning_rate": 8.98e-06,
"loss": 0.0244,
"step": 450
},
{
"epoch": 0.028721731769258678,
"grad_norm": 0.25503554940223694,
"learning_rate": 9.48e-06,
"loss": 0.0237,
"step": 475
},
{
"epoch": 0.030233401862377555,
"grad_norm": 0.24687393009662628,
"learning_rate": 9.980000000000001e-06,
"loss": 0.0246,
"step": 500
},
{
"epoch": 0.03174507195549643,
"grad_norm": 0.2741446793079376,
"learning_rate": 9.946666666666667e-06,
"loss": 0.0254,
"step": 525
},
{
"epoch": 0.03325674204861531,
"grad_norm": 0.25302210450172424,
"learning_rate": 9.891111111111113e-06,
"loss": 0.0241,
"step": 550
},
{
"epoch": 0.034768412141734185,
"grad_norm": 0.24115385115146637,
"learning_rate": 9.835555555555556e-06,
"loss": 0.025,
"step": 575
},
{
"epoch": 0.036280082234853066,
"grad_norm": 0.24566683173179626,
"learning_rate": 9.780000000000001e-06,
"loss": 0.0241,
"step": 600
},
{
"epoch": 0.037791752327971946,
"grad_norm": 0.21123534440994263,
"learning_rate": 9.724444444444445e-06,
"loss": 0.0238,
"step": 625
},
{
"epoch": 0.03930342242109082,
"grad_norm": 0.23343868553638458,
"learning_rate": 9.66888888888889e-06,
"loss": 0.0234,
"step": 650
},
{
"epoch": 0.0408150925142097,
"grad_norm": 0.28975582122802734,
"learning_rate": 9.613333333333335e-06,
"loss": 0.0229,
"step": 675
},
{
"epoch": 0.04232676260732857,
"grad_norm": 0.24031095206737518,
"learning_rate": 9.557777777777777e-06,
"loss": 0.0236,
"step": 700
},
{
"epoch": 0.043838432700447454,
"grad_norm": 0.29754847288131714,
"learning_rate": 9.502222222222223e-06,
"loss": 0.0235,
"step": 725
},
{
"epoch": 0.045350102793566334,
"grad_norm": 0.25962579250335693,
"learning_rate": 9.446666666666667e-06,
"loss": 0.0242,
"step": 750
},
{
"epoch": 0.04686177288668521,
"grad_norm": 0.2572610378265381,
"learning_rate": 9.391111111111111e-06,
"loss": 0.0233,
"step": 775
},
{
"epoch": 0.04837344297980409,
"grad_norm": 0.24092882871627808,
"learning_rate": 9.335555555555557e-06,
"loss": 0.0227,
"step": 800
},
{
"epoch": 0.04988511307292297,
"grad_norm": 0.23399066925048828,
"learning_rate": 9.280000000000001e-06,
"loss": 0.0239,
"step": 825
},
{
"epoch": 0.05139678316604184,
"grad_norm": 0.26382818818092346,
"learning_rate": 9.224444444444445e-06,
"loss": 0.0233,
"step": 850
},
{
"epoch": 0.05290845325916072,
"grad_norm": 0.25903263688087463,
"learning_rate": 9.168888888888889e-06,
"loss": 0.0237,
"step": 875
},
{
"epoch": 0.054420123352279595,
"grad_norm": 0.21435964107513428,
"learning_rate": 9.113333333333335e-06,
"loss": 0.0224,
"step": 900
},
{
"epoch": 0.055931793445398476,
"grad_norm": 0.23641858994960785,
"learning_rate": 9.057777777777779e-06,
"loss": 0.0217,
"step": 925
},
{
"epoch": 0.057443463538517356,
"grad_norm": 0.24842330813407898,
"learning_rate": 9.002222222222223e-06,
"loss": 0.0234,
"step": 950
},
{
"epoch": 0.05895513363163623,
"grad_norm": 0.2286524474620819,
"learning_rate": 8.946666666666669e-06,
"loss": 0.0228,
"step": 975
},
{
"epoch": 0.06046680372475511,
"grad_norm": 0.24313467741012573,
"learning_rate": 8.891111111111111e-06,
"loss": 0.0219,
"step": 1000
},
{
"epoch": 0.06046680372475511,
"eval_loss": 0.02475658990442753,
"eval_runtime": 12970.4314,
"eval_samples_per_second": 8.844,
"eval_steps_per_second": 0.553,
"eval_wer": 81.0296191819464,
"step": 1000
},
{
"epoch": 0.06197847381787399,
"grad_norm": 0.21169662475585938,
"learning_rate": 8.835555555555557e-06,
"loss": 0.023,
"step": 1025
},
{
"epoch": 0.06349014391099286,
"grad_norm": 0.2406071424484253,
"learning_rate": 8.78e-06,
"loss": 0.0235,
"step": 1050
},
{
"epoch": 0.06500181400411174,
"grad_norm": 0.23690740764141083,
"learning_rate": 8.724444444444445e-06,
"loss": 0.0223,
"step": 1075
},
{
"epoch": 0.06651348409723062,
"grad_norm": 0.2665939927101135,
"learning_rate": 8.66888888888889e-06,
"loss": 0.0223,
"step": 1100
},
{
"epoch": 0.0680251541903495,
"grad_norm": 0.26822060346603394,
"learning_rate": 8.613333333333333e-06,
"loss": 0.0229,
"step": 1125
},
{
"epoch": 0.06953682428346837,
"grad_norm": 0.23397238552570343,
"learning_rate": 8.557777777777778e-06,
"loss": 0.0217,
"step": 1150
},
{
"epoch": 0.07104849437658725,
"grad_norm": 0.24292343854904175,
"learning_rate": 8.502222222222223e-06,
"loss": 0.0237,
"step": 1175
},
{
"epoch": 0.07256016446970613,
"grad_norm": 0.21283379197120667,
"learning_rate": 8.446666666666668e-06,
"loss": 0.0229,
"step": 1200
},
{
"epoch": 0.07407183456282501,
"grad_norm": 0.28053444623947144,
"learning_rate": 8.391111111111112e-06,
"loss": 0.0245,
"step": 1225
},
{
"epoch": 0.07558350465594389,
"grad_norm": 0.24319562315940857,
"learning_rate": 8.335555555555556e-06,
"loss": 0.0222,
"step": 1250
},
{
"epoch": 0.07709517474906276,
"grad_norm": 0.23033973574638367,
"learning_rate": 8.28e-06,
"loss": 0.0216,
"step": 1275
},
{
"epoch": 0.07860684484218164,
"grad_norm": 0.2720530927181244,
"learning_rate": 8.224444444444444e-06,
"loss": 0.023,
"step": 1300
},
{
"epoch": 0.08011851493530052,
"grad_norm": 0.27409815788269043,
"learning_rate": 8.16888888888889e-06,
"loss": 0.0228,
"step": 1325
},
{
"epoch": 0.0816301850284194,
"grad_norm": 0.28669819235801697,
"learning_rate": 8.113333333333334e-06,
"loss": 0.0228,
"step": 1350
},
{
"epoch": 0.08314185512153828,
"grad_norm": 0.24168507754802704,
"learning_rate": 8.057777777777778e-06,
"loss": 0.0233,
"step": 1375
},
{
"epoch": 0.08465352521465715,
"grad_norm": 0.2719112038612366,
"learning_rate": 8.002222222222222e-06,
"loss": 0.0211,
"step": 1400
},
{
"epoch": 0.08616519530777603,
"grad_norm": 0.2333834171295166,
"learning_rate": 7.946666666666666e-06,
"loss": 0.0222,
"step": 1425
},
{
"epoch": 0.08767686540089491,
"grad_norm": 0.2352941334247589,
"learning_rate": 7.891111111111112e-06,
"loss": 0.0228,
"step": 1450
},
{
"epoch": 0.08918853549401379,
"grad_norm": 0.258932888507843,
"learning_rate": 7.835555555555556e-06,
"loss": 0.0229,
"step": 1475
},
{
"epoch": 0.09070020558713267,
"grad_norm": 0.23196369409561157,
"learning_rate": 7.78e-06,
"loss": 0.0218,
"step": 1500
},
{
"epoch": 0.09221187568025155,
"grad_norm": 0.24886886775493622,
"learning_rate": 7.724444444444446e-06,
"loss": 0.0224,
"step": 1525
},
{
"epoch": 0.09372354577337041,
"grad_norm": 0.2534898817539215,
"learning_rate": 7.66888888888889e-06,
"loss": 0.0213,
"step": 1550
},
{
"epoch": 0.0952352158664893,
"grad_norm": 0.24856404960155487,
"learning_rate": 7.613333333333334e-06,
"loss": 0.0217,
"step": 1575
},
{
"epoch": 0.09674688595960818,
"grad_norm": 0.2615777552127838,
"learning_rate": 7.557777777777779e-06,
"loss": 0.0209,
"step": 1600
},
{
"epoch": 0.09825855605272706,
"grad_norm": 0.293625146150589,
"learning_rate": 7.502222222222223e-06,
"loss": 0.0232,
"step": 1625
},
{
"epoch": 0.09977022614584594,
"grad_norm": 0.21186749637126923,
"learning_rate": 7.446666666666668e-06,
"loss": 0.0214,
"step": 1650
},
{
"epoch": 0.1012818962389648,
"grad_norm": 0.2184201329946518,
"learning_rate": 7.3911111111111125e-06,
"loss": 0.0226,
"step": 1675
},
{
"epoch": 0.10279356633208368,
"grad_norm": 0.23249119520187378,
"learning_rate": 7.335555555555556e-06,
"loss": 0.0229,
"step": 1700
},
{
"epoch": 0.10430523642520256,
"grad_norm": 0.23204003274440765,
"learning_rate": 7.280000000000001e-06,
"loss": 0.022,
"step": 1725
},
{
"epoch": 0.10581690651832144,
"grad_norm": 0.23885048925876617,
"learning_rate": 7.224444444444445e-06,
"loss": 0.0223,
"step": 1750
},
{
"epoch": 0.10732857661144032,
"grad_norm": 0.2814381718635559,
"learning_rate": 7.1688888888888895e-06,
"loss": 0.0213,
"step": 1775
},
{
"epoch": 0.10884024670455919,
"grad_norm": 0.21481429040431976,
"learning_rate": 7.113333333333334e-06,
"loss": 0.0209,
"step": 1800
},
{
"epoch": 0.11035191679767807,
"grad_norm": 0.3092924654483795,
"learning_rate": 7.057777777777778e-06,
"loss": 0.0237,
"step": 1825
},
{
"epoch": 0.11186358689079695,
"grad_norm": 0.25033241510391235,
"learning_rate": 7.0022222222222225e-06,
"loss": 0.0212,
"step": 1850
},
{
"epoch": 0.11337525698391583,
"grad_norm": 0.20690016448497772,
"learning_rate": 6.946666666666667e-06,
"loss": 0.0226,
"step": 1875
},
{
"epoch": 0.11488692707703471,
"grad_norm": 0.216666117310524,
"learning_rate": 6.891111111111111e-06,
"loss": 0.0225,
"step": 1900
},
{
"epoch": 0.11639859717015359,
"grad_norm": 0.2532588243484497,
"learning_rate": 6.835555555555556e-06,
"loss": 0.0219,
"step": 1925
},
{
"epoch": 0.11791026726327246,
"grad_norm": 0.23438499867916107,
"learning_rate": 6.780000000000001e-06,
"loss": 0.0215,
"step": 1950
},
{
"epoch": 0.11942193735639134,
"grad_norm": 0.2752824127674103,
"learning_rate": 6.724444444444444e-06,
"loss": 0.0216,
"step": 1975
},
{
"epoch": 0.12093360744951022,
"grad_norm": 0.2573551535606384,
"learning_rate": 6.668888888888889e-06,
"loss": 0.0217,
"step": 2000
},
{
"epoch": 0.12093360744951022,
"eval_loss": 0.023528048768639565,
"eval_runtime": 13684.2124,
"eval_samples_per_second": 8.383,
"eval_steps_per_second": 0.524,
"eval_wer": 79.85773738006931,
"step": 2000
},
{
"epoch": 0.1224452775426291,
"grad_norm": 0.19551323354244232,
"learning_rate": 6.613333333333334e-06,
"loss": 0.0209,
"step": 2025
},
{
"epoch": 0.12395694763574798,
"grad_norm": 0.24706204235553741,
"learning_rate": 6.557777777777778e-06,
"loss": 0.0215,
"step": 2050
},
{
"epoch": 0.12546861772886686,
"grad_norm": 0.23044320940971375,
"learning_rate": 6.502222222222223e-06,
"loss": 0.0225,
"step": 2075
},
{
"epoch": 0.12698028782198573,
"grad_norm": 0.23613490164279938,
"learning_rate": 6.446666666666668e-06,
"loss": 0.021,
"step": 2100
},
{
"epoch": 0.12849195791510462,
"grad_norm": 0.21719467639923096,
"learning_rate": 6.391111111111111e-06,
"loss": 0.0221,
"step": 2125
},
{
"epoch": 0.1300036280082235,
"grad_norm": 0.22605890035629272,
"learning_rate": 6.335555555555556e-06,
"loss": 0.0212,
"step": 2150
},
{
"epoch": 0.13151529810134235,
"grad_norm": 0.27460452914237976,
"learning_rate": 6.280000000000001e-06,
"loss": 0.0241,
"step": 2175
},
{
"epoch": 0.13302696819446125,
"grad_norm": 0.26805680990219116,
"learning_rate": 6.224444444444445e-06,
"loss": 0.0215,
"step": 2200
},
{
"epoch": 0.13453863828758011,
"grad_norm": 0.2031622678041458,
"learning_rate": 6.16888888888889e-06,
"loss": 0.0211,
"step": 2225
},
{
"epoch": 0.136050308380699,
"grad_norm": 0.33984532952308655,
"learning_rate": 6.113333333333333e-06,
"loss": 0.0217,
"step": 2250
},
{
"epoch": 0.13756197847381788,
"grad_norm": 0.2332555651664734,
"learning_rate": 6.057777777777778e-06,
"loss": 0.0213,
"step": 2275
},
{
"epoch": 0.13907364856693674,
"grad_norm": 0.2189512699842453,
"learning_rate": 6.002222222222223e-06,
"loss": 0.0212,
"step": 2300
},
{
"epoch": 0.14058531866005564,
"grad_norm": 0.21263238787651062,
"learning_rate": 5.946666666666668e-06,
"loss": 0.0224,
"step": 2325
},
{
"epoch": 0.1420969887531745,
"grad_norm": 0.23778782784938812,
"learning_rate": 5.891111111111112e-06,
"loss": 0.0233,
"step": 2350
},
{
"epoch": 0.1436086588462934,
"grad_norm": 0.2161298543214798,
"learning_rate": 5.8355555555555565e-06,
"loss": 0.0222,
"step": 2375
},
{
"epoch": 0.14512032893941226,
"grad_norm": 0.22459618747234344,
"learning_rate": 5.78e-06,
"loss": 0.0216,
"step": 2400
},
{
"epoch": 0.14663199903253113,
"grad_norm": 0.24907240271568298,
"learning_rate": 5.724444444444445e-06,
"loss": 0.0217,
"step": 2425
},
{
"epoch": 0.14814366912565002,
"grad_norm": 0.2513597011566162,
"learning_rate": 5.6688888888888895e-06,
"loss": 0.0218,
"step": 2450
},
{
"epoch": 0.1496553392187689,
"grad_norm": 0.2687082588672638,
"learning_rate": 5.613333333333334e-06,
"loss": 0.0227,
"step": 2475
},
{
"epoch": 0.15116700931188778,
"grad_norm": 0.22070540487766266,
"learning_rate": 5.557777777777778e-06,
"loss": 0.0223,
"step": 2500
},
{
"epoch": 0.15267867940500665,
"grad_norm": 0.2508254945278168,
"learning_rate": 5.5022222222222224e-06,
"loss": 0.0205,
"step": 2525
},
{
"epoch": 0.15419034949812552,
"grad_norm": 0.22595292329788208,
"learning_rate": 5.4466666666666665e-06,
"loss": 0.0217,
"step": 2550
},
{
"epoch": 0.1557020195912444,
"grad_norm": 0.2362944632768631,
"learning_rate": 5.391111111111111e-06,
"loss": 0.0216,
"step": 2575
},
{
"epoch": 0.15721368968436328,
"grad_norm": 0.2035423070192337,
"learning_rate": 5.335555555555556e-06,
"loss": 0.0206,
"step": 2600
},
{
"epoch": 0.15872535977748217,
"grad_norm": 0.22915472090244293,
"learning_rate": 5.28e-06,
"loss": 0.0208,
"step": 2625
},
{
"epoch": 0.16023702987060104,
"grad_norm": 0.20232254266738892,
"learning_rate": 5.224444444444445e-06,
"loss": 0.0214,
"step": 2650
},
{
"epoch": 0.1617486999637199,
"grad_norm": 0.21948222815990448,
"learning_rate": 5.168888888888889e-06,
"loss": 0.0208,
"step": 2675
},
{
"epoch": 0.1632603700568388,
"grad_norm": 0.2563857138156891,
"learning_rate": 5.113333333333333e-06,
"loss": 0.0207,
"step": 2700
},
{
"epoch": 0.16477204014995767,
"grad_norm": 0.2101213037967682,
"learning_rate": 5.057777777777778e-06,
"loss": 0.0201,
"step": 2725
},
{
"epoch": 0.16628371024307656,
"grad_norm": 0.2433857023715973,
"learning_rate": 5.002222222222223e-06,
"loss": 0.021,
"step": 2750
},
{
"epoch": 0.16779538033619543,
"grad_norm": 0.21939025819301605,
"learning_rate": 4.946666666666667e-06,
"loss": 0.0228,
"step": 2775
},
{
"epoch": 0.1693070504293143,
"grad_norm": 0.21120359003543854,
"learning_rate": 4.891111111111111e-06,
"loss": 0.0225,
"step": 2800
},
{
"epoch": 0.1708187205224332,
"grad_norm": 0.19768129289150238,
"learning_rate": 4.835555555555556e-06,
"loss": 0.0206,
"step": 2825
},
{
"epoch": 0.17233039061555205,
"grad_norm": 0.2555268704891205,
"learning_rate": 4.78e-06,
"loss": 0.0217,
"step": 2850
},
{
"epoch": 0.17384206070867095,
"grad_norm": 0.2393975704908371,
"learning_rate": 4.724444444444445e-06,
"loss": 0.0204,
"step": 2875
},
{
"epoch": 0.17535373080178981,
"grad_norm": 0.23027943074703217,
"learning_rate": 4.66888888888889e-06,
"loss": 0.0214,
"step": 2900
},
{
"epoch": 0.1768654008949087,
"grad_norm": 0.2525699734687805,
"learning_rate": 4.613333333333334e-06,
"loss": 0.0206,
"step": 2925
},
{
"epoch": 0.17837707098802758,
"grad_norm": 0.2413048893213272,
"learning_rate": 4.557777777777778e-06,
"loss": 0.0189,
"step": 2950
},
{
"epoch": 0.17988874108114644,
"grad_norm": 0.2660181224346161,
"learning_rate": 4.502222222222223e-06,
"loss": 0.0214,
"step": 2975
},
{
"epoch": 0.18140041117426534,
"grad_norm": 0.21868810057640076,
"learning_rate": 4.446666666666667e-06,
"loss": 0.0221,
"step": 3000
},
{
"epoch": 0.18140041117426534,
"eval_loss": 0.02285671979188919,
"eval_runtime": 14051.4519,
"eval_samples_per_second": 8.164,
"eval_steps_per_second": 0.51,
"eval_wer": 79.02540528304515,
"step": 3000
},
{
"epoch": 0.1829120812673842,
"grad_norm": 0.23031719028949738,
"learning_rate": 4.391111111111112e-06,
"loss": 0.0206,
"step": 3025
},
{
"epoch": 0.1844237513605031,
"grad_norm": 0.26470455527305603,
"learning_rate": 4.3355555555555565e-06,
"loss": 0.021,
"step": 3050
},
{
"epoch": 0.18593542145362196,
"grad_norm": 0.2614658772945404,
"learning_rate": 4.2800000000000005e-06,
"loss": 0.0213,
"step": 3075
},
{
"epoch": 0.18744709154674083,
"grad_norm": 0.2713329792022705,
"learning_rate": 4.2244444444444446e-06,
"loss": 0.0207,
"step": 3100
},
{
"epoch": 0.18895876163985972,
"grad_norm": 0.20872850716114044,
"learning_rate": 4.168888888888889e-06,
"loss": 0.0216,
"step": 3125
},
{
"epoch": 0.1904704317329786,
"grad_norm": 0.22996503114700317,
"learning_rate": 4.1133333333333335e-06,
"loss": 0.0219,
"step": 3150
},
{
"epoch": 0.19198210182609748,
"grad_norm": 0.23216116428375244,
"learning_rate": 4.057777777777778e-06,
"loss": 0.0209,
"step": 3175
},
{
"epoch": 0.19349377191921635,
"grad_norm": 0.22368553280830383,
"learning_rate": 4.002222222222222e-06,
"loss": 0.0195,
"step": 3200
},
{
"epoch": 0.19500544201233522,
"grad_norm": 0.2579832971096039,
"learning_rate": 3.946666666666667e-06,
"loss": 0.0205,
"step": 3225
},
{
"epoch": 0.1965171121054541,
"grad_norm": 0.21672658622264862,
"learning_rate": 3.891111111111111e-06,
"loss": 0.0214,
"step": 3250
},
{
"epoch": 0.19802878219857298,
"grad_norm": 0.2356463521718979,
"learning_rate": 3.835555555555555e-06,
"loss": 0.0199,
"step": 3275
},
{
"epoch": 0.19954045229169187,
"grad_norm": 0.240219384431839,
"learning_rate": 3.7800000000000002e-06,
"loss": 0.0203,
"step": 3300
},
{
"epoch": 0.20105212238481074,
"grad_norm": 0.20598304271697998,
"learning_rate": 3.724444444444445e-06,
"loss": 0.0218,
"step": 3325
},
{
"epoch": 0.2025637924779296,
"grad_norm": 0.21276068687438965,
"learning_rate": 3.668888888888889e-06,
"loss": 0.0202,
"step": 3350
},
{
"epoch": 0.2040754625710485,
"grad_norm": 0.234477698802948,
"learning_rate": 3.6133333333333336e-06,
"loss": 0.0213,
"step": 3375
},
{
"epoch": 0.20558713266416737,
"grad_norm": 0.22812841832637787,
"learning_rate": 3.5577777777777785e-06,
"loss": 0.0193,
"step": 3400
},
{
"epoch": 0.20709880275728626,
"grad_norm": 0.2191910743713379,
"learning_rate": 3.5022222222222225e-06,
"loss": 0.0218,
"step": 3425
},
{
"epoch": 0.20861047285040513,
"grad_norm": 0.21586079895496368,
"learning_rate": 3.446666666666667e-06,
"loss": 0.022,
"step": 3450
},
{
"epoch": 0.210122142943524,
"grad_norm": 0.29229211807250977,
"learning_rate": 3.391111111111111e-06,
"loss": 0.0221,
"step": 3475
},
{
"epoch": 0.2116338130366429,
"grad_norm": 0.2535618245601654,
"learning_rate": 3.335555555555556e-06,
"loss": 0.0209,
"step": 3500
},
{
"epoch": 0.21314548312976175,
"grad_norm": 0.23379455506801605,
"learning_rate": 3.2800000000000004e-06,
"loss": 0.0215,
"step": 3525
},
{
"epoch": 0.21465715322288065,
"grad_norm": 0.20163771510124207,
"learning_rate": 3.2244444444444444e-06,
"loss": 0.0197,
"step": 3550
},
{
"epoch": 0.21616882331599951,
"grad_norm": 0.22687122225761414,
"learning_rate": 3.1688888888888893e-06,
"loss": 0.0202,
"step": 3575
},
{
"epoch": 0.21768049340911838,
"grad_norm": 0.2215571254491806,
"learning_rate": 3.1133333333333337e-06,
"loss": 0.0213,
"step": 3600
},
{
"epoch": 0.21919216350223727,
"grad_norm": 0.25743547081947327,
"learning_rate": 3.0577777777777778e-06,
"loss": 0.0194,
"step": 3625
},
{
"epoch": 0.22070383359535614,
"grad_norm": 0.23957869410514832,
"learning_rate": 3.0022222222222227e-06,
"loss": 0.02,
"step": 3650
},
{
"epoch": 0.22221550368847504,
"grad_norm": 0.24700717628002167,
"learning_rate": 2.946666666666667e-06,
"loss": 0.0212,
"step": 3675
},
{
"epoch": 0.2237271737815939,
"grad_norm": 0.2590341567993164,
"learning_rate": 2.891111111111111e-06,
"loss": 0.02,
"step": 3700
},
{
"epoch": 0.2252388438747128,
"grad_norm": 0.2064068615436554,
"learning_rate": 2.835555555555556e-06,
"loss": 0.0212,
"step": 3725
},
{
"epoch": 0.22675051396783166,
"grad_norm": 0.25424307584762573,
"learning_rate": 2.7800000000000005e-06,
"loss": 0.022,
"step": 3750
},
{
"epoch": 0.22826218406095053,
"grad_norm": 0.25873327255249023,
"learning_rate": 2.7244444444444445e-06,
"loss": 0.0211,
"step": 3775
},
{
"epoch": 0.22977385415406942,
"grad_norm": 0.2244931161403656,
"learning_rate": 2.6688888888888894e-06,
"loss": 0.021,
"step": 3800
},
{
"epoch": 0.2312855242471883,
"grad_norm": 0.23963423073291779,
"learning_rate": 2.6133333333333334e-06,
"loss": 0.0207,
"step": 3825
},
{
"epoch": 0.23279719434030718,
"grad_norm": 0.22690649330615997,
"learning_rate": 2.557777777777778e-06,
"loss": 0.0207,
"step": 3850
},
{
"epoch": 0.23430886443342605,
"grad_norm": 0.19450053572654724,
"learning_rate": 2.5022222222222224e-06,
"loss": 0.0191,
"step": 3875
},
{
"epoch": 0.23582053452654492,
"grad_norm": 0.23574170470237732,
"learning_rate": 2.446666666666667e-06,
"loss": 0.0206,
"step": 3900
},
{
"epoch": 0.2373322046196638,
"grad_norm": 0.21273107826709747,
"learning_rate": 2.3911111111111113e-06,
"loss": 0.0198,
"step": 3925
},
{
"epoch": 0.23884387471278268,
"grad_norm": 0.20970465242862701,
"learning_rate": 2.3355555555555557e-06,
"loss": 0.0191,
"step": 3950
},
{
"epoch": 0.24035554480590157,
"grad_norm": 0.2582346796989441,
"learning_rate": 2.28e-06,
"loss": 0.0223,
"step": 3975
},
{
"epoch": 0.24186721489902044,
"grad_norm": 0.25113871693611145,
"learning_rate": 2.2244444444444447e-06,
"loss": 0.0213,
"step": 4000
},
{
"epoch": 0.24186721489902044,
"eval_loss": 0.022349417209625244,
"eval_runtime": 12773.0468,
"eval_samples_per_second": 8.981,
"eval_steps_per_second": 0.561,
"eval_wer": 78.43598182103119,
"step": 4000
},
{
"epoch": 0.2433788849921393,
"grad_norm": 0.21822433173656464,
"learning_rate": 2.168888888888889e-06,
"loss": 0.0204,
"step": 4025
},
{
"epoch": 0.2448905550852582,
"grad_norm": 0.2285871058702469,
"learning_rate": 2.1133333333333336e-06,
"loss": 0.0203,
"step": 4050
},
{
"epoch": 0.24640222517837707,
"grad_norm": 0.28536123037338257,
"learning_rate": 2.057777777777778e-06,
"loss": 0.0217,
"step": 4075
},
{
"epoch": 0.24791389527149596,
"grad_norm": 0.24871906638145447,
"learning_rate": 2.0022222222222225e-06,
"loss": 0.0212,
"step": 4100
},
{
"epoch": 0.24942556536461483,
"grad_norm": 0.23168586194515228,
"learning_rate": 1.9466666666666665e-06,
"loss": 0.0194,
"step": 4125
},
{
"epoch": 0.2509372354577337,
"grad_norm": 0.26161977648735046,
"learning_rate": 1.8911111111111114e-06,
"loss": 0.0209,
"step": 4150
},
{
"epoch": 0.2524489055508526,
"grad_norm": 0.23529359698295593,
"learning_rate": 1.8355555555555557e-06,
"loss": 0.02,
"step": 4175
},
{
"epoch": 0.25396057564397145,
"grad_norm": 0.2294083535671234,
"learning_rate": 1.7800000000000001e-06,
"loss": 0.0196,
"step": 4200
},
{
"epoch": 0.2554722457370903,
"grad_norm": 0.237132266163826,
"learning_rate": 1.7244444444444448e-06,
"loss": 0.0203,
"step": 4225
},
{
"epoch": 0.25698391583020924,
"grad_norm": 0.2331571727991104,
"learning_rate": 1.668888888888889e-06,
"loss": 0.0203,
"step": 4250
},
{
"epoch": 0.2584955859233281,
"grad_norm": 0.22561855614185333,
"learning_rate": 1.6133333333333335e-06,
"loss": 0.0192,
"step": 4275
},
{
"epoch": 0.260007256016447,
"grad_norm": 0.22706526517868042,
"learning_rate": 1.5577777777777777e-06,
"loss": 0.0209,
"step": 4300
},
{
"epoch": 0.26151892610956584,
"grad_norm": 0.21990585327148438,
"learning_rate": 1.5022222222222224e-06,
"loss": 0.0203,
"step": 4325
},
{
"epoch": 0.2630305962026847,
"grad_norm": 0.21273785829544067,
"learning_rate": 1.4466666666666669e-06,
"loss": 0.0196,
"step": 4350
},
{
"epoch": 0.26454226629580363,
"grad_norm": 0.232136532664299,
"learning_rate": 1.3911111111111111e-06,
"loss": 0.0198,
"step": 4375
},
{
"epoch": 0.2660539363889225,
"grad_norm": 0.21890687942504883,
"learning_rate": 1.3355555555555558e-06,
"loss": 0.0211,
"step": 4400
},
{
"epoch": 0.26756560648204136,
"grad_norm": 0.2208709567785263,
"learning_rate": 1.28e-06,
"loss": 0.0213,
"step": 4425
},
{
"epoch": 0.26907727657516023,
"grad_norm": 0.20006310939788818,
"learning_rate": 1.2244444444444445e-06,
"loss": 0.0197,
"step": 4450
},
{
"epoch": 0.2705889466682791,
"grad_norm": 0.20703347027301788,
"learning_rate": 1.168888888888889e-06,
"loss": 0.02,
"step": 4475
},
{
"epoch": 0.272100616761398,
"grad_norm": 0.24182920157909393,
"learning_rate": 1.1133333333333334e-06,
"loss": 0.0215,
"step": 4500
},
{
"epoch": 0.2736122868545169,
"grad_norm": 0.2219599187374115,
"learning_rate": 1.0577777777777779e-06,
"loss": 0.0224,
"step": 4525
},
{
"epoch": 0.27512395694763575,
"grad_norm": 0.22244104743003845,
"learning_rate": 1.0022222222222223e-06,
"loss": 0.0198,
"step": 4550
},
{
"epoch": 0.2766356270407546,
"grad_norm": 0.23590952157974243,
"learning_rate": 9.466666666666667e-07,
"loss": 0.0211,
"step": 4575
},
{
"epoch": 0.2781472971338735,
"grad_norm": 0.19948144257068634,
"learning_rate": 8.911111111111112e-07,
"loss": 0.0209,
"step": 4600
},
{
"epoch": 0.2796589672269924,
"grad_norm": 0.2427368015050888,
"learning_rate": 8.355555555555556e-07,
"loss": 0.0197,
"step": 4625
},
{
"epoch": 0.28117063732011127,
"grad_norm": 0.20901180803775787,
"learning_rate": 7.8e-07,
"loss": 0.0209,
"step": 4650
},
{
"epoch": 0.28268230741323014,
"grad_norm": 0.2242818921804428,
"learning_rate": 7.244444444444446e-07,
"loss": 0.0193,
"step": 4675
},
{
"epoch": 0.284193977506349,
"grad_norm": 0.2646438181400299,
"learning_rate": 6.68888888888889e-07,
"loss": 0.0201,
"step": 4700
},
{
"epoch": 0.28570564759946787,
"grad_norm": 0.22899940609931946,
"learning_rate": 6.133333333333333e-07,
"loss": 0.0205,
"step": 4725
},
{
"epoch": 0.2872173176925868,
"grad_norm": 0.23318634927272797,
"learning_rate": 5.577777777777779e-07,
"loss": 0.0204,
"step": 4750
},
{
"epoch": 0.28872898778570566,
"grad_norm": 0.25688520073890686,
"learning_rate": 5.022222222222222e-07,
"loss": 0.0202,
"step": 4775
},
{
"epoch": 0.2902406578788245,
"grad_norm": 0.2177484780550003,
"learning_rate": 4.466666666666667e-07,
"loss": 0.0209,
"step": 4800
},
{
"epoch": 0.2917523279719434,
"grad_norm": 0.22160756587982178,
"learning_rate": 3.9111111111111115e-07,
"loss": 0.0205,
"step": 4825
},
{
"epoch": 0.29326399806506226,
"grad_norm": 0.18207120895385742,
"learning_rate": 3.3555555555555556e-07,
"loss": 0.0191,
"step": 4850
},
{
"epoch": 0.2947756681581812,
"grad_norm": 0.23712973296642303,
"learning_rate": 2.8e-07,
"loss": 0.0201,
"step": 4875
},
{
"epoch": 0.29628733825130005,
"grad_norm": 0.2736773192882538,
"learning_rate": 2.2444444444444445e-07,
"loss": 0.0208,
"step": 4900
},
{
"epoch": 0.2977990083444189,
"grad_norm": 0.22754475474357605,
"learning_rate": 1.6888888888888888e-07,
"loss": 0.0194,
"step": 4925
},
{
"epoch": 0.2993106784375378,
"grad_norm": 0.21516871452331543,
"learning_rate": 1.1333333333333336e-07,
"loss": 0.0205,
"step": 4950
},
{
"epoch": 0.30082234853065665,
"grad_norm": 0.21580274403095245,
"learning_rate": 5.777777777777778e-08,
"loss": 0.0204,
"step": 4975
},
{
"epoch": 0.30233401862377557,
"grad_norm": 0.2437376081943512,
"learning_rate": 2.2222222222222225e-09,
"loss": 0.0211,
"step": 5000
},
{
"epoch": 0.30233401862377557,
"eval_loss": 0.02213170751929283,
"eval_runtime": 12800.0788,
"eval_samples_per_second": 8.962,
"eval_steps_per_second": 0.56,
"eval_wer": 78.11993940343729,
"step": 5000
}
],
"logging_steps": 25,
"max_steps": 5000,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.61736640512e+19,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}