luyotw's picture
Upload folder using huggingface_hub
85d1cd7 verified
{
"best_global_step": 3000,
"best_metric": 79.02540528304515,
"best_model_checkpoint": "./working_area/output_model/checkpoint-3000",
"epoch": 0.18140041117426534,
"eval_steps": 1000,
"global_step": 3000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0015116700931188777,
"grad_norm": 10.39171028137207,
"learning_rate": 4.800000000000001e-07,
"loss": 0.2719,
"step": 25
},
{
"epoch": 0.0030233401862377555,
"grad_norm": 0.3984987735748291,
"learning_rate": 9.800000000000001e-07,
"loss": 0.0431,
"step": 50
},
{
"epoch": 0.004535010279356633,
"grad_norm": 0.2931273877620697,
"learning_rate": 1.48e-06,
"loss": 0.0297,
"step": 75
},
{
"epoch": 0.006046680372475511,
"grad_norm": 0.24546289443969727,
"learning_rate": 1.98e-06,
"loss": 0.027,
"step": 100
},
{
"epoch": 0.007558350465594389,
"grad_norm": 0.2551668584346771,
"learning_rate": 2.4800000000000004e-06,
"loss": 0.0264,
"step": 125
},
{
"epoch": 0.009070020558713266,
"grad_norm": 0.22325266897678375,
"learning_rate": 2.9800000000000003e-06,
"loss": 0.0258,
"step": 150
},
{
"epoch": 0.010581690651832143,
"grad_norm": 0.2603906989097595,
"learning_rate": 3.48e-06,
"loss": 0.0249,
"step": 175
},
{
"epoch": 0.012093360744951022,
"grad_norm": 0.24611017107963562,
"learning_rate": 3.980000000000001e-06,
"loss": 0.0246,
"step": 200
},
{
"epoch": 0.013605030838069899,
"grad_norm": 0.26433423161506653,
"learning_rate": 4.48e-06,
"loss": 0.0262,
"step": 225
},
{
"epoch": 0.015116700931188777,
"grad_norm": 0.2509951889514923,
"learning_rate": 4.980000000000001e-06,
"loss": 0.0263,
"step": 250
},
{
"epoch": 0.016628371024307656,
"grad_norm": 0.268083393573761,
"learning_rate": 5.480000000000001e-06,
"loss": 0.0255,
"step": 275
},
{
"epoch": 0.018140041117426533,
"grad_norm": 0.2258174866437912,
"learning_rate": 5.98e-06,
"loss": 0.0253,
"step": 300
},
{
"epoch": 0.01965171121054541,
"grad_norm": 0.24694132804870605,
"learning_rate": 6.480000000000001e-06,
"loss": 0.024,
"step": 325
},
{
"epoch": 0.021163381303664287,
"grad_norm": 0.24790944159030914,
"learning_rate": 6.98e-06,
"loss": 0.0256,
"step": 350
},
{
"epoch": 0.022675051396783167,
"grad_norm": 0.25042369961738586,
"learning_rate": 7.48e-06,
"loss": 0.0251,
"step": 375
},
{
"epoch": 0.024186721489902044,
"grad_norm": 0.2964789569377899,
"learning_rate": 7.980000000000002e-06,
"loss": 0.026,
"step": 400
},
{
"epoch": 0.02569839158302092,
"grad_norm": 0.2886582016944885,
"learning_rate": 8.48e-06,
"loss": 0.0261,
"step": 425
},
{
"epoch": 0.027210061676139798,
"grad_norm": 0.24788400530815125,
"learning_rate": 8.98e-06,
"loss": 0.0244,
"step": 450
},
{
"epoch": 0.028721731769258678,
"grad_norm": 0.25503554940223694,
"learning_rate": 9.48e-06,
"loss": 0.0237,
"step": 475
},
{
"epoch": 0.030233401862377555,
"grad_norm": 0.24687393009662628,
"learning_rate": 9.980000000000001e-06,
"loss": 0.0246,
"step": 500
},
{
"epoch": 0.03174507195549643,
"grad_norm": 0.2741446793079376,
"learning_rate": 9.946666666666667e-06,
"loss": 0.0254,
"step": 525
},
{
"epoch": 0.03325674204861531,
"grad_norm": 0.25302210450172424,
"learning_rate": 9.891111111111113e-06,
"loss": 0.0241,
"step": 550
},
{
"epoch": 0.034768412141734185,
"grad_norm": 0.24115385115146637,
"learning_rate": 9.835555555555556e-06,
"loss": 0.025,
"step": 575
},
{
"epoch": 0.036280082234853066,
"grad_norm": 0.24566683173179626,
"learning_rate": 9.780000000000001e-06,
"loss": 0.0241,
"step": 600
},
{
"epoch": 0.037791752327971946,
"grad_norm": 0.21123534440994263,
"learning_rate": 9.724444444444445e-06,
"loss": 0.0238,
"step": 625
},
{
"epoch": 0.03930342242109082,
"grad_norm": 0.23343868553638458,
"learning_rate": 9.66888888888889e-06,
"loss": 0.0234,
"step": 650
},
{
"epoch": 0.0408150925142097,
"grad_norm": 0.28975582122802734,
"learning_rate": 9.613333333333335e-06,
"loss": 0.0229,
"step": 675
},
{
"epoch": 0.04232676260732857,
"grad_norm": 0.24031095206737518,
"learning_rate": 9.557777777777777e-06,
"loss": 0.0236,
"step": 700
},
{
"epoch": 0.043838432700447454,
"grad_norm": 0.29754847288131714,
"learning_rate": 9.502222222222223e-06,
"loss": 0.0235,
"step": 725
},
{
"epoch": 0.045350102793566334,
"grad_norm": 0.25962579250335693,
"learning_rate": 9.446666666666667e-06,
"loss": 0.0242,
"step": 750
},
{
"epoch": 0.04686177288668521,
"grad_norm": 0.2572610378265381,
"learning_rate": 9.391111111111111e-06,
"loss": 0.0233,
"step": 775
},
{
"epoch": 0.04837344297980409,
"grad_norm": 0.24092882871627808,
"learning_rate": 9.335555555555557e-06,
"loss": 0.0227,
"step": 800
},
{
"epoch": 0.04988511307292297,
"grad_norm": 0.23399066925048828,
"learning_rate": 9.280000000000001e-06,
"loss": 0.0239,
"step": 825
},
{
"epoch": 0.05139678316604184,
"grad_norm": 0.26382818818092346,
"learning_rate": 9.224444444444445e-06,
"loss": 0.0233,
"step": 850
},
{
"epoch": 0.05290845325916072,
"grad_norm": 0.25903263688087463,
"learning_rate": 9.168888888888889e-06,
"loss": 0.0237,
"step": 875
},
{
"epoch": 0.054420123352279595,
"grad_norm": 0.21435964107513428,
"learning_rate": 9.113333333333335e-06,
"loss": 0.0224,
"step": 900
},
{
"epoch": 0.055931793445398476,
"grad_norm": 0.23641858994960785,
"learning_rate": 9.057777777777779e-06,
"loss": 0.0217,
"step": 925
},
{
"epoch": 0.057443463538517356,
"grad_norm": 0.24842330813407898,
"learning_rate": 9.002222222222223e-06,
"loss": 0.0234,
"step": 950
},
{
"epoch": 0.05895513363163623,
"grad_norm": 0.2286524474620819,
"learning_rate": 8.946666666666669e-06,
"loss": 0.0228,
"step": 975
},
{
"epoch": 0.06046680372475511,
"grad_norm": 0.24313467741012573,
"learning_rate": 8.891111111111111e-06,
"loss": 0.0219,
"step": 1000
},
{
"epoch": 0.06046680372475511,
"eval_loss": 0.02475658990442753,
"eval_runtime": 12970.4314,
"eval_samples_per_second": 8.844,
"eval_steps_per_second": 0.553,
"eval_wer": 81.0296191819464,
"step": 1000
},
{
"epoch": 0.06197847381787399,
"grad_norm": 0.21169662475585938,
"learning_rate": 8.835555555555557e-06,
"loss": 0.023,
"step": 1025
},
{
"epoch": 0.06349014391099286,
"grad_norm": 0.2406071424484253,
"learning_rate": 8.78e-06,
"loss": 0.0235,
"step": 1050
},
{
"epoch": 0.06500181400411174,
"grad_norm": 0.23690740764141083,
"learning_rate": 8.724444444444445e-06,
"loss": 0.0223,
"step": 1075
},
{
"epoch": 0.06651348409723062,
"grad_norm": 0.2665939927101135,
"learning_rate": 8.66888888888889e-06,
"loss": 0.0223,
"step": 1100
},
{
"epoch": 0.0680251541903495,
"grad_norm": 0.26822060346603394,
"learning_rate": 8.613333333333333e-06,
"loss": 0.0229,
"step": 1125
},
{
"epoch": 0.06953682428346837,
"grad_norm": 0.23397238552570343,
"learning_rate": 8.557777777777778e-06,
"loss": 0.0217,
"step": 1150
},
{
"epoch": 0.07104849437658725,
"grad_norm": 0.24292343854904175,
"learning_rate": 8.502222222222223e-06,
"loss": 0.0237,
"step": 1175
},
{
"epoch": 0.07256016446970613,
"grad_norm": 0.21283379197120667,
"learning_rate": 8.446666666666668e-06,
"loss": 0.0229,
"step": 1200
},
{
"epoch": 0.07407183456282501,
"grad_norm": 0.28053444623947144,
"learning_rate": 8.391111111111112e-06,
"loss": 0.0245,
"step": 1225
},
{
"epoch": 0.07558350465594389,
"grad_norm": 0.24319562315940857,
"learning_rate": 8.335555555555556e-06,
"loss": 0.0222,
"step": 1250
},
{
"epoch": 0.07709517474906276,
"grad_norm": 0.23033973574638367,
"learning_rate": 8.28e-06,
"loss": 0.0216,
"step": 1275
},
{
"epoch": 0.07860684484218164,
"grad_norm": 0.2720530927181244,
"learning_rate": 8.224444444444444e-06,
"loss": 0.023,
"step": 1300
},
{
"epoch": 0.08011851493530052,
"grad_norm": 0.27409815788269043,
"learning_rate": 8.16888888888889e-06,
"loss": 0.0228,
"step": 1325
},
{
"epoch": 0.0816301850284194,
"grad_norm": 0.28669819235801697,
"learning_rate": 8.113333333333334e-06,
"loss": 0.0228,
"step": 1350
},
{
"epoch": 0.08314185512153828,
"grad_norm": 0.24168507754802704,
"learning_rate": 8.057777777777778e-06,
"loss": 0.0233,
"step": 1375
},
{
"epoch": 0.08465352521465715,
"grad_norm": 0.2719112038612366,
"learning_rate": 8.002222222222222e-06,
"loss": 0.0211,
"step": 1400
},
{
"epoch": 0.08616519530777603,
"grad_norm": 0.2333834171295166,
"learning_rate": 7.946666666666666e-06,
"loss": 0.0222,
"step": 1425
},
{
"epoch": 0.08767686540089491,
"grad_norm": 0.2352941334247589,
"learning_rate": 7.891111111111112e-06,
"loss": 0.0228,
"step": 1450
},
{
"epoch": 0.08918853549401379,
"grad_norm": 0.258932888507843,
"learning_rate": 7.835555555555556e-06,
"loss": 0.0229,
"step": 1475
},
{
"epoch": 0.09070020558713267,
"grad_norm": 0.23196369409561157,
"learning_rate": 7.78e-06,
"loss": 0.0218,
"step": 1500
},
{
"epoch": 0.09221187568025155,
"grad_norm": 0.24886886775493622,
"learning_rate": 7.724444444444446e-06,
"loss": 0.0224,
"step": 1525
},
{
"epoch": 0.09372354577337041,
"grad_norm": 0.2534898817539215,
"learning_rate": 7.66888888888889e-06,
"loss": 0.0213,
"step": 1550
},
{
"epoch": 0.0952352158664893,
"grad_norm": 0.24856404960155487,
"learning_rate": 7.613333333333334e-06,
"loss": 0.0217,
"step": 1575
},
{
"epoch": 0.09674688595960818,
"grad_norm": 0.2615777552127838,
"learning_rate": 7.557777777777779e-06,
"loss": 0.0209,
"step": 1600
},
{
"epoch": 0.09825855605272706,
"grad_norm": 0.293625146150589,
"learning_rate": 7.502222222222223e-06,
"loss": 0.0232,
"step": 1625
},
{
"epoch": 0.09977022614584594,
"grad_norm": 0.21186749637126923,
"learning_rate": 7.446666666666668e-06,
"loss": 0.0214,
"step": 1650
},
{
"epoch": 0.1012818962389648,
"grad_norm": 0.2184201329946518,
"learning_rate": 7.3911111111111125e-06,
"loss": 0.0226,
"step": 1675
},
{
"epoch": 0.10279356633208368,
"grad_norm": 0.23249119520187378,
"learning_rate": 7.335555555555556e-06,
"loss": 0.0229,
"step": 1700
},
{
"epoch": 0.10430523642520256,
"grad_norm": 0.23204003274440765,
"learning_rate": 7.280000000000001e-06,
"loss": 0.022,
"step": 1725
},
{
"epoch": 0.10581690651832144,
"grad_norm": 0.23885048925876617,
"learning_rate": 7.224444444444445e-06,
"loss": 0.0223,
"step": 1750
},
{
"epoch": 0.10732857661144032,
"grad_norm": 0.2814381718635559,
"learning_rate": 7.1688888888888895e-06,
"loss": 0.0213,
"step": 1775
},
{
"epoch": 0.10884024670455919,
"grad_norm": 0.21481429040431976,
"learning_rate": 7.113333333333334e-06,
"loss": 0.0209,
"step": 1800
},
{
"epoch": 0.11035191679767807,
"grad_norm": 0.3092924654483795,
"learning_rate": 7.057777777777778e-06,
"loss": 0.0237,
"step": 1825
},
{
"epoch": 0.11186358689079695,
"grad_norm": 0.25033241510391235,
"learning_rate": 7.0022222222222225e-06,
"loss": 0.0212,
"step": 1850
},
{
"epoch": 0.11337525698391583,
"grad_norm": 0.20690016448497772,
"learning_rate": 6.946666666666667e-06,
"loss": 0.0226,
"step": 1875
},
{
"epoch": 0.11488692707703471,
"grad_norm": 0.216666117310524,
"learning_rate": 6.891111111111111e-06,
"loss": 0.0225,
"step": 1900
},
{
"epoch": 0.11639859717015359,
"grad_norm": 0.2532588243484497,
"learning_rate": 6.835555555555556e-06,
"loss": 0.0219,
"step": 1925
},
{
"epoch": 0.11791026726327246,
"grad_norm": 0.23438499867916107,
"learning_rate": 6.780000000000001e-06,
"loss": 0.0215,
"step": 1950
},
{
"epoch": 0.11942193735639134,
"grad_norm": 0.2752824127674103,
"learning_rate": 6.724444444444444e-06,
"loss": 0.0216,
"step": 1975
},
{
"epoch": 0.12093360744951022,
"grad_norm": 0.2573551535606384,
"learning_rate": 6.668888888888889e-06,
"loss": 0.0217,
"step": 2000
},
{
"epoch": 0.12093360744951022,
"eval_loss": 0.023528048768639565,
"eval_runtime": 13684.2124,
"eval_samples_per_second": 8.383,
"eval_steps_per_second": 0.524,
"eval_wer": 79.85773738006931,
"step": 2000
},
{
"epoch": 0.1224452775426291,
"grad_norm": 0.19551323354244232,
"learning_rate": 6.613333333333334e-06,
"loss": 0.0209,
"step": 2025
},
{
"epoch": 0.12395694763574798,
"grad_norm": 0.24706204235553741,
"learning_rate": 6.557777777777778e-06,
"loss": 0.0215,
"step": 2050
},
{
"epoch": 0.12546861772886686,
"grad_norm": 0.23044320940971375,
"learning_rate": 6.502222222222223e-06,
"loss": 0.0225,
"step": 2075
},
{
"epoch": 0.12698028782198573,
"grad_norm": 0.23613490164279938,
"learning_rate": 6.446666666666668e-06,
"loss": 0.021,
"step": 2100
},
{
"epoch": 0.12849195791510462,
"grad_norm": 0.21719467639923096,
"learning_rate": 6.391111111111111e-06,
"loss": 0.0221,
"step": 2125
},
{
"epoch": 0.1300036280082235,
"grad_norm": 0.22605890035629272,
"learning_rate": 6.335555555555556e-06,
"loss": 0.0212,
"step": 2150
},
{
"epoch": 0.13151529810134235,
"grad_norm": 0.27460452914237976,
"learning_rate": 6.280000000000001e-06,
"loss": 0.0241,
"step": 2175
},
{
"epoch": 0.13302696819446125,
"grad_norm": 0.26805680990219116,
"learning_rate": 6.224444444444445e-06,
"loss": 0.0215,
"step": 2200
},
{
"epoch": 0.13453863828758011,
"grad_norm": 0.2031622678041458,
"learning_rate": 6.16888888888889e-06,
"loss": 0.0211,
"step": 2225
},
{
"epoch": 0.136050308380699,
"grad_norm": 0.33984532952308655,
"learning_rate": 6.113333333333333e-06,
"loss": 0.0217,
"step": 2250
},
{
"epoch": 0.13756197847381788,
"grad_norm": 0.2332555651664734,
"learning_rate": 6.057777777777778e-06,
"loss": 0.0213,
"step": 2275
},
{
"epoch": 0.13907364856693674,
"grad_norm": 0.2189512699842453,
"learning_rate": 6.002222222222223e-06,
"loss": 0.0212,
"step": 2300
},
{
"epoch": 0.14058531866005564,
"grad_norm": 0.21263238787651062,
"learning_rate": 5.946666666666668e-06,
"loss": 0.0224,
"step": 2325
},
{
"epoch": 0.1420969887531745,
"grad_norm": 0.23778782784938812,
"learning_rate": 5.891111111111112e-06,
"loss": 0.0233,
"step": 2350
},
{
"epoch": 0.1436086588462934,
"grad_norm": 0.2161298543214798,
"learning_rate": 5.8355555555555565e-06,
"loss": 0.0222,
"step": 2375
},
{
"epoch": 0.14512032893941226,
"grad_norm": 0.22459618747234344,
"learning_rate": 5.78e-06,
"loss": 0.0216,
"step": 2400
},
{
"epoch": 0.14663199903253113,
"grad_norm": 0.24907240271568298,
"learning_rate": 5.724444444444445e-06,
"loss": 0.0217,
"step": 2425
},
{
"epoch": 0.14814366912565002,
"grad_norm": 0.2513597011566162,
"learning_rate": 5.6688888888888895e-06,
"loss": 0.0218,
"step": 2450
},
{
"epoch": 0.1496553392187689,
"grad_norm": 0.2687082588672638,
"learning_rate": 5.613333333333334e-06,
"loss": 0.0227,
"step": 2475
},
{
"epoch": 0.15116700931188778,
"grad_norm": 0.22070540487766266,
"learning_rate": 5.557777777777778e-06,
"loss": 0.0223,
"step": 2500
},
{
"epoch": 0.15267867940500665,
"grad_norm": 0.2508254945278168,
"learning_rate": 5.5022222222222224e-06,
"loss": 0.0205,
"step": 2525
},
{
"epoch": 0.15419034949812552,
"grad_norm": 0.22595292329788208,
"learning_rate": 5.4466666666666665e-06,
"loss": 0.0217,
"step": 2550
},
{
"epoch": 0.1557020195912444,
"grad_norm": 0.2362944632768631,
"learning_rate": 5.391111111111111e-06,
"loss": 0.0216,
"step": 2575
},
{
"epoch": 0.15721368968436328,
"grad_norm": 0.2035423070192337,
"learning_rate": 5.335555555555556e-06,
"loss": 0.0206,
"step": 2600
},
{
"epoch": 0.15872535977748217,
"grad_norm": 0.22915472090244293,
"learning_rate": 5.28e-06,
"loss": 0.0208,
"step": 2625
},
{
"epoch": 0.16023702987060104,
"grad_norm": 0.20232254266738892,
"learning_rate": 5.224444444444445e-06,
"loss": 0.0214,
"step": 2650
},
{
"epoch": 0.1617486999637199,
"grad_norm": 0.21948222815990448,
"learning_rate": 5.168888888888889e-06,
"loss": 0.0208,
"step": 2675
},
{
"epoch": 0.1632603700568388,
"grad_norm": 0.2563857138156891,
"learning_rate": 5.113333333333333e-06,
"loss": 0.0207,
"step": 2700
},
{
"epoch": 0.16477204014995767,
"grad_norm": 0.2101213037967682,
"learning_rate": 5.057777777777778e-06,
"loss": 0.0201,
"step": 2725
},
{
"epoch": 0.16628371024307656,
"grad_norm": 0.2433857023715973,
"learning_rate": 5.002222222222223e-06,
"loss": 0.021,
"step": 2750
},
{
"epoch": 0.16779538033619543,
"grad_norm": 0.21939025819301605,
"learning_rate": 4.946666666666667e-06,
"loss": 0.0228,
"step": 2775
},
{
"epoch": 0.1693070504293143,
"grad_norm": 0.21120359003543854,
"learning_rate": 4.891111111111111e-06,
"loss": 0.0225,
"step": 2800
},
{
"epoch": 0.1708187205224332,
"grad_norm": 0.19768129289150238,
"learning_rate": 4.835555555555556e-06,
"loss": 0.0206,
"step": 2825
},
{
"epoch": 0.17233039061555205,
"grad_norm": 0.2555268704891205,
"learning_rate": 4.78e-06,
"loss": 0.0217,
"step": 2850
},
{
"epoch": 0.17384206070867095,
"grad_norm": 0.2393975704908371,
"learning_rate": 4.724444444444445e-06,
"loss": 0.0204,
"step": 2875
},
{
"epoch": 0.17535373080178981,
"grad_norm": 0.23027943074703217,
"learning_rate": 4.66888888888889e-06,
"loss": 0.0214,
"step": 2900
},
{
"epoch": 0.1768654008949087,
"grad_norm": 0.2525699734687805,
"learning_rate": 4.613333333333334e-06,
"loss": 0.0206,
"step": 2925
},
{
"epoch": 0.17837707098802758,
"grad_norm": 0.2413048893213272,
"learning_rate": 4.557777777777778e-06,
"loss": 0.0189,
"step": 2950
},
{
"epoch": 0.17988874108114644,
"grad_norm": 0.2660181224346161,
"learning_rate": 4.502222222222223e-06,
"loss": 0.0214,
"step": 2975
},
{
"epoch": 0.18140041117426534,
"grad_norm": 0.21868810057640076,
"learning_rate": 4.446666666666667e-06,
"loss": 0.0221,
"step": 3000
},
{
"epoch": 0.18140041117426534,
"eval_loss": 0.02285671979188919,
"eval_runtime": 14051.4519,
"eval_samples_per_second": 8.164,
"eval_steps_per_second": 0.51,
"eval_wer": 79.02540528304515,
"step": 3000
}
],
"logging_steps": 25,
"max_steps": 5000,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.770419843072e+19,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}