luyotw's picture
Upload folder using huggingface_hub
0e607de verified
{
"best_global_step": 1000,
"best_metric": 83.2001826067108,
"best_model_checkpoint": "./working_area/output_model/checkpoint-1000",
"epoch": 5.424954792043399,
"eval_steps": 1000,
"global_step": 3000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.045207956600361664,
"grad_norm": 11.010286331176758,
"learning_rate": 4.800000000000001e-07,
"loss": 0.2809,
"step": 25
},
{
"epoch": 0.09041591320072333,
"grad_norm": 0.42908957600593567,
"learning_rate": 9.800000000000001e-07,
"loss": 0.0471,
"step": 50
},
{
"epoch": 0.13562386980108498,
"grad_norm": 0.30389493703842163,
"learning_rate": 1.48e-06,
"loss": 0.0313,
"step": 75
},
{
"epoch": 0.18083182640144665,
"grad_norm": 0.24248044192790985,
"learning_rate": 1.98e-06,
"loss": 0.027,
"step": 100
},
{
"epoch": 0.22603978300180833,
"grad_norm": 0.32717078924179077,
"learning_rate": 2.4800000000000004e-06,
"loss": 0.0282,
"step": 125
},
{
"epoch": 0.27124773960216997,
"grad_norm": 0.2853568494319916,
"learning_rate": 2.9800000000000003e-06,
"loss": 0.0271,
"step": 150
},
{
"epoch": 0.31645569620253167,
"grad_norm": 0.2582108974456787,
"learning_rate": 3.48e-06,
"loss": 0.0278,
"step": 175
},
{
"epoch": 0.3616636528028933,
"grad_norm": 0.28241080045700073,
"learning_rate": 3.980000000000001e-06,
"loss": 0.0267,
"step": 200
},
{
"epoch": 0.40687160940325495,
"grad_norm": 0.2449728548526764,
"learning_rate": 4.48e-06,
"loss": 0.0256,
"step": 225
},
{
"epoch": 0.45207956600361665,
"grad_norm": 0.29675620794296265,
"learning_rate": 4.980000000000001e-06,
"loss": 0.0266,
"step": 250
},
{
"epoch": 0.4972875226039783,
"grad_norm": 0.23451566696166992,
"learning_rate": 5.480000000000001e-06,
"loss": 0.0251,
"step": 275
},
{
"epoch": 0.5424954792043399,
"grad_norm": 0.2808288633823395,
"learning_rate": 5.98e-06,
"loss": 0.0253,
"step": 300
},
{
"epoch": 0.5877034358047016,
"grad_norm": 0.25567078590393066,
"learning_rate": 6.480000000000001e-06,
"loss": 0.0253,
"step": 325
},
{
"epoch": 0.6329113924050633,
"grad_norm": 0.31116265058517456,
"learning_rate": 6.98e-06,
"loss": 0.0243,
"step": 350
},
{
"epoch": 0.6781193490054249,
"grad_norm": 0.30322587490081787,
"learning_rate": 7.48e-06,
"loss": 0.0255,
"step": 375
},
{
"epoch": 0.7233273056057866,
"grad_norm": 0.22659313678741455,
"learning_rate": 7.980000000000002e-06,
"loss": 0.0238,
"step": 400
},
{
"epoch": 0.7685352622061483,
"grad_norm": 0.2599071264266968,
"learning_rate": 8.48e-06,
"loss": 0.0242,
"step": 425
},
{
"epoch": 0.8137432188065099,
"grad_norm": 0.2490476816892624,
"learning_rate": 8.98e-06,
"loss": 0.0239,
"step": 450
},
{
"epoch": 0.8589511754068716,
"grad_norm": 0.23308442533016205,
"learning_rate": 9.48e-06,
"loss": 0.0245,
"step": 475
},
{
"epoch": 0.9041591320072333,
"grad_norm": 0.27857279777526855,
"learning_rate": 9.980000000000001e-06,
"loss": 0.0228,
"step": 500
},
{
"epoch": 0.9493670886075949,
"grad_norm": 0.246293306350708,
"learning_rate": 9.946666666666667e-06,
"loss": 0.0238,
"step": 525
},
{
"epoch": 0.9945750452079566,
"grad_norm": 0.2526630163192749,
"learning_rate": 9.891111111111113e-06,
"loss": 0.0232,
"step": 550
},
{
"epoch": 1.0397830018083183,
"grad_norm": 0.24326889216899872,
"learning_rate": 9.835555555555556e-06,
"loss": 0.019,
"step": 575
},
{
"epoch": 1.0849909584086799,
"grad_norm": 0.29082971811294556,
"learning_rate": 9.780000000000001e-06,
"loss": 0.0202,
"step": 600
},
{
"epoch": 1.1301989150090417,
"grad_norm": 0.24304933845996857,
"learning_rate": 9.724444444444445e-06,
"loss": 0.0186,
"step": 625
},
{
"epoch": 1.1754068716094033,
"grad_norm": 0.235320582985878,
"learning_rate": 9.66888888888889e-06,
"loss": 0.0202,
"step": 650
},
{
"epoch": 1.2206148282097649,
"grad_norm": 0.30403777956962585,
"learning_rate": 9.613333333333335e-06,
"loss": 0.0188,
"step": 675
},
{
"epoch": 1.2658227848101267,
"grad_norm": 0.25596335530281067,
"learning_rate": 9.557777777777777e-06,
"loss": 0.0194,
"step": 700
},
{
"epoch": 1.3110307414104883,
"grad_norm": 0.2653414011001587,
"learning_rate": 9.502222222222223e-06,
"loss": 0.0199,
"step": 725
},
{
"epoch": 1.3562386980108498,
"grad_norm": 0.21654127538204193,
"learning_rate": 9.446666666666667e-06,
"loss": 0.0194,
"step": 750
},
{
"epoch": 1.4014466546112117,
"grad_norm": 0.2140333354473114,
"learning_rate": 9.391111111111111e-06,
"loss": 0.0204,
"step": 775
},
{
"epoch": 1.4466546112115732,
"grad_norm": 0.22869764268398285,
"learning_rate": 9.335555555555557e-06,
"loss": 0.0196,
"step": 800
},
{
"epoch": 1.4918625678119348,
"grad_norm": 0.20432953536510468,
"learning_rate": 9.280000000000001e-06,
"loss": 0.0187,
"step": 825
},
{
"epoch": 1.5370705244122966,
"grad_norm": 0.24801786243915558,
"learning_rate": 9.224444444444445e-06,
"loss": 0.0188,
"step": 850
},
{
"epoch": 1.5822784810126582,
"grad_norm": 0.21581323444843292,
"learning_rate": 9.168888888888889e-06,
"loss": 0.0202,
"step": 875
},
{
"epoch": 1.6274864376130198,
"grad_norm": 0.2703355848789215,
"learning_rate": 9.113333333333335e-06,
"loss": 0.02,
"step": 900
},
{
"epoch": 1.6726943942133814,
"grad_norm": 0.25052380561828613,
"learning_rate": 9.057777777777779e-06,
"loss": 0.0209,
"step": 925
},
{
"epoch": 1.7179023508137432,
"grad_norm": 0.2611878216266632,
"learning_rate": 9.002222222222223e-06,
"loss": 0.0191,
"step": 950
},
{
"epoch": 1.763110307414105,
"grad_norm": 0.22690066695213318,
"learning_rate": 8.946666666666669e-06,
"loss": 0.0181,
"step": 975
},
{
"epoch": 1.8083182640144666,
"grad_norm": 0.2346459925174713,
"learning_rate": 8.891111111111111e-06,
"loss": 0.0208,
"step": 1000
},
{
"epoch": 1.8083182640144666,
"eval_loss": 0.026504401117563248,
"eval_runtime": 585.8134,
"eval_samples_per_second": 7.467,
"eval_steps_per_second": 0.468,
"eval_wer": 83.2001826067108,
"step": 1000
},
{
"epoch": 1.8535262206148282,
"grad_norm": 0.20547257363796234,
"learning_rate": 8.835555555555557e-06,
"loss": 0.0198,
"step": 1025
},
{
"epoch": 1.8987341772151898,
"grad_norm": 0.2572881281375885,
"learning_rate": 8.78e-06,
"loss": 0.0198,
"step": 1050
},
{
"epoch": 1.9439421338155516,
"grad_norm": 0.2234957218170166,
"learning_rate": 8.724444444444445e-06,
"loss": 0.0192,
"step": 1075
},
{
"epoch": 1.9891500904159132,
"grad_norm": 0.23254449665546417,
"learning_rate": 8.66888888888889e-06,
"loss": 0.0184,
"step": 1100
},
{
"epoch": 2.034358047016275,
"grad_norm": 0.17144112288951874,
"learning_rate": 8.613333333333333e-06,
"loss": 0.015,
"step": 1125
},
{
"epoch": 2.0795660036166366,
"grad_norm": 0.2235105186700821,
"learning_rate": 8.557777777777778e-06,
"loss": 0.0149,
"step": 1150
},
{
"epoch": 2.124773960216998,
"grad_norm": 0.17870232462882996,
"learning_rate": 8.502222222222223e-06,
"loss": 0.0138,
"step": 1175
},
{
"epoch": 2.1699819168173597,
"grad_norm": 0.17442700266838074,
"learning_rate": 8.446666666666668e-06,
"loss": 0.0136,
"step": 1200
},
{
"epoch": 2.2151898734177213,
"grad_norm": 0.18573014438152313,
"learning_rate": 8.391111111111112e-06,
"loss": 0.0143,
"step": 1225
},
{
"epoch": 2.2603978300180834,
"grad_norm": 0.21831056475639343,
"learning_rate": 8.335555555555556e-06,
"loss": 0.0145,
"step": 1250
},
{
"epoch": 2.305605786618445,
"grad_norm": 0.19887284934520721,
"learning_rate": 8.28e-06,
"loss": 0.0133,
"step": 1275
},
{
"epoch": 2.3508137432188065,
"grad_norm": 0.18920965492725372,
"learning_rate": 8.224444444444444e-06,
"loss": 0.0142,
"step": 1300
},
{
"epoch": 2.396021699819168,
"grad_norm": 0.20874938368797302,
"learning_rate": 8.16888888888889e-06,
"loss": 0.0137,
"step": 1325
},
{
"epoch": 2.4412296564195297,
"grad_norm": 0.24395205080509186,
"learning_rate": 8.113333333333334e-06,
"loss": 0.0132,
"step": 1350
},
{
"epoch": 2.4864376130198913,
"grad_norm": 0.1873113512992859,
"learning_rate": 8.057777777777778e-06,
"loss": 0.0132,
"step": 1375
},
{
"epoch": 2.5316455696202533,
"grad_norm": 0.20954665541648865,
"learning_rate": 8.002222222222222e-06,
"loss": 0.0149,
"step": 1400
},
{
"epoch": 2.576853526220615,
"grad_norm": 0.2077728658914566,
"learning_rate": 7.946666666666666e-06,
"loss": 0.0134,
"step": 1425
},
{
"epoch": 2.6220614828209765,
"grad_norm": 0.1829787939786911,
"learning_rate": 7.891111111111112e-06,
"loss": 0.0137,
"step": 1450
},
{
"epoch": 2.667269439421338,
"grad_norm": 0.19568945467472076,
"learning_rate": 7.835555555555556e-06,
"loss": 0.0137,
"step": 1475
},
{
"epoch": 2.7124773960216997,
"grad_norm": 0.22679755091667175,
"learning_rate": 7.78e-06,
"loss": 0.0144,
"step": 1500
},
{
"epoch": 2.7576853526220617,
"grad_norm": 0.20547282695770264,
"learning_rate": 7.724444444444446e-06,
"loss": 0.0145,
"step": 1525
},
{
"epoch": 2.8028933092224233,
"grad_norm": 0.19972319900989532,
"learning_rate": 7.66888888888889e-06,
"loss": 0.0133,
"step": 1550
},
{
"epoch": 2.848101265822785,
"grad_norm": 0.19863390922546387,
"learning_rate": 7.613333333333334e-06,
"loss": 0.0151,
"step": 1575
},
{
"epoch": 2.8933092224231465,
"grad_norm": 0.20162835717201233,
"learning_rate": 7.557777777777779e-06,
"loss": 0.0141,
"step": 1600
},
{
"epoch": 2.938517179023508,
"grad_norm": 0.1872899830341339,
"learning_rate": 7.502222222222223e-06,
"loss": 0.014,
"step": 1625
},
{
"epoch": 2.9837251356238697,
"grad_norm": 0.2120290845632553,
"learning_rate": 7.446666666666668e-06,
"loss": 0.014,
"step": 1650
},
{
"epoch": 3.0289330922242317,
"grad_norm": 0.16408328711986542,
"learning_rate": 7.3911111111111125e-06,
"loss": 0.0114,
"step": 1675
},
{
"epoch": 3.0741410488245933,
"grad_norm": 0.18837198615074158,
"learning_rate": 7.335555555555556e-06,
"loss": 0.0093,
"step": 1700
},
{
"epoch": 3.119349005424955,
"grad_norm": 0.18760152161121368,
"learning_rate": 7.280000000000001e-06,
"loss": 0.0094,
"step": 1725
},
{
"epoch": 3.1645569620253164,
"grad_norm": 0.1616961508989334,
"learning_rate": 7.224444444444445e-06,
"loss": 0.0101,
"step": 1750
},
{
"epoch": 3.209764918625678,
"grad_norm": 0.18492265045642853,
"learning_rate": 7.1688888888888895e-06,
"loss": 0.0104,
"step": 1775
},
{
"epoch": 3.2549728752260396,
"grad_norm": 0.17340563237667084,
"learning_rate": 7.113333333333334e-06,
"loss": 0.0098,
"step": 1800
},
{
"epoch": 3.3001808318264017,
"grad_norm": 0.1744614690542221,
"learning_rate": 7.057777777777778e-06,
"loss": 0.0094,
"step": 1825
},
{
"epoch": 3.3453887884267632,
"grad_norm": 0.1791224479675293,
"learning_rate": 7.0022222222222225e-06,
"loss": 0.0099,
"step": 1850
},
{
"epoch": 3.390596745027125,
"grad_norm": 0.1386963427066803,
"learning_rate": 6.946666666666667e-06,
"loss": 0.0099,
"step": 1875
},
{
"epoch": 3.4358047016274864,
"grad_norm": 0.18395636975765228,
"learning_rate": 6.891111111111111e-06,
"loss": 0.0097,
"step": 1900
},
{
"epoch": 3.481012658227848,
"grad_norm": 0.19020919501781464,
"learning_rate": 6.835555555555556e-06,
"loss": 0.0102,
"step": 1925
},
{
"epoch": 3.52622061482821,
"grad_norm": 0.17320391535758972,
"learning_rate": 6.780000000000001e-06,
"loss": 0.0097,
"step": 1950
},
{
"epoch": 3.571428571428571,
"grad_norm": 0.179366797208786,
"learning_rate": 6.724444444444444e-06,
"loss": 0.0101,
"step": 1975
},
{
"epoch": 3.616636528028933,
"grad_norm": 0.21282289922237396,
"learning_rate": 6.668888888888889e-06,
"loss": 0.0098,
"step": 2000
},
{
"epoch": 3.616636528028933,
"eval_loss": 0.02841034159064293,
"eval_runtime": 558.8068,
"eval_samples_per_second": 7.827,
"eval_steps_per_second": 0.49,
"eval_wer": 83.83930609449898,
"step": 2000
},
{
"epoch": 3.661844484629295,
"grad_norm": 0.19607700407505035,
"learning_rate": 6.613333333333334e-06,
"loss": 0.0094,
"step": 2025
},
{
"epoch": 3.7070524412296564,
"grad_norm": 0.2073734551668167,
"learning_rate": 6.557777777777778e-06,
"loss": 0.0103,
"step": 2050
},
{
"epoch": 3.752260397830018,
"grad_norm": 0.16625015437602997,
"learning_rate": 6.502222222222223e-06,
"loss": 0.0094,
"step": 2075
},
{
"epoch": 3.7974683544303796,
"grad_norm": 0.17772376537322998,
"learning_rate": 6.446666666666668e-06,
"loss": 0.0096,
"step": 2100
},
{
"epoch": 3.8426763110307416,
"grad_norm": 0.19553086161613464,
"learning_rate": 6.391111111111111e-06,
"loss": 0.0094,
"step": 2125
},
{
"epoch": 3.887884267631103,
"grad_norm": 0.1660359501838684,
"learning_rate": 6.335555555555556e-06,
"loss": 0.0097,
"step": 2150
},
{
"epoch": 3.9330922242314648,
"grad_norm": 0.1923891305923462,
"learning_rate": 6.280000000000001e-06,
"loss": 0.0093,
"step": 2175
},
{
"epoch": 3.9783001808318263,
"grad_norm": 0.22166165709495544,
"learning_rate": 6.224444444444445e-06,
"loss": 0.01,
"step": 2200
},
{
"epoch": 4.023508137432188,
"grad_norm": 0.13340774178504944,
"learning_rate": 6.16888888888889e-06,
"loss": 0.0082,
"step": 2225
},
{
"epoch": 4.06871609403255,
"grad_norm": 0.15644919872283936,
"learning_rate": 6.113333333333333e-06,
"loss": 0.0063,
"step": 2250
},
{
"epoch": 4.113924050632911,
"grad_norm": 0.1358548253774643,
"learning_rate": 6.057777777777778e-06,
"loss": 0.0064,
"step": 2275
},
{
"epoch": 4.159132007233273,
"grad_norm": 0.1385568082332611,
"learning_rate": 6.002222222222223e-06,
"loss": 0.0063,
"step": 2300
},
{
"epoch": 4.204339963833634,
"grad_norm": 0.12219341844320297,
"learning_rate": 5.946666666666668e-06,
"loss": 0.0059,
"step": 2325
},
{
"epoch": 4.249547920433996,
"grad_norm": 0.18871645629405975,
"learning_rate": 5.891111111111112e-06,
"loss": 0.0068,
"step": 2350
},
{
"epoch": 4.294755877034358,
"grad_norm": 0.1760212779045105,
"learning_rate": 5.8355555555555565e-06,
"loss": 0.0065,
"step": 2375
},
{
"epoch": 4.3399638336347195,
"grad_norm": 0.1436540186405182,
"learning_rate": 5.78e-06,
"loss": 0.0061,
"step": 2400
},
{
"epoch": 4.3851717902350815,
"grad_norm": 0.18262667953968048,
"learning_rate": 5.724444444444445e-06,
"loss": 0.0067,
"step": 2425
},
{
"epoch": 4.430379746835443,
"grad_norm": 0.156539648771286,
"learning_rate": 5.6688888888888895e-06,
"loss": 0.0069,
"step": 2450
},
{
"epoch": 4.475587703435805,
"grad_norm": 0.16270945966243744,
"learning_rate": 5.613333333333334e-06,
"loss": 0.0061,
"step": 2475
},
{
"epoch": 4.520795660036167,
"grad_norm": 0.13660362362861633,
"learning_rate": 5.557777777777778e-06,
"loss": 0.0065,
"step": 2500
},
{
"epoch": 4.566003616636528,
"grad_norm": 0.17038831114768982,
"learning_rate": 5.5022222222222224e-06,
"loss": 0.0063,
"step": 2525
},
{
"epoch": 4.61121157323689,
"grad_norm": 0.18963129818439484,
"learning_rate": 5.4466666666666665e-06,
"loss": 0.0064,
"step": 2550
},
{
"epoch": 4.656419529837251,
"grad_norm": 0.17544642090797424,
"learning_rate": 5.391111111111111e-06,
"loss": 0.007,
"step": 2575
},
{
"epoch": 4.701627486437613,
"grad_norm": 0.14972664415836334,
"learning_rate": 5.335555555555556e-06,
"loss": 0.0061,
"step": 2600
},
{
"epoch": 4.746835443037975,
"grad_norm": 0.15808548033237457,
"learning_rate": 5.28e-06,
"loss": 0.0064,
"step": 2625
},
{
"epoch": 4.792043399638336,
"grad_norm": 0.21300840377807617,
"learning_rate": 5.224444444444445e-06,
"loss": 0.0063,
"step": 2650
},
{
"epoch": 4.837251356238698,
"grad_norm": 0.14417383074760437,
"learning_rate": 5.168888888888889e-06,
"loss": 0.0064,
"step": 2675
},
{
"epoch": 4.882459312839059,
"grad_norm": 0.14862936735153198,
"learning_rate": 5.113333333333333e-06,
"loss": 0.0061,
"step": 2700
},
{
"epoch": 4.9276672694394215,
"grad_norm": 0.17642611265182495,
"learning_rate": 5.057777777777778e-06,
"loss": 0.0062,
"step": 2725
},
{
"epoch": 4.972875226039783,
"grad_norm": 0.1384003609418869,
"learning_rate": 5.002222222222223e-06,
"loss": 0.0069,
"step": 2750
},
{
"epoch": 5.018083182640145,
"grad_norm": 0.08383563905954361,
"learning_rate": 4.946666666666667e-06,
"loss": 0.0051,
"step": 2775
},
{
"epoch": 5.063291139240507,
"grad_norm": 0.11989054828882217,
"learning_rate": 4.891111111111111e-06,
"loss": 0.0036,
"step": 2800
},
{
"epoch": 5.108499095840868,
"grad_norm": 0.12534335255622864,
"learning_rate": 4.835555555555556e-06,
"loss": 0.0038,
"step": 2825
},
{
"epoch": 5.15370705244123,
"grad_norm": 0.15589576959609985,
"learning_rate": 4.78e-06,
"loss": 0.0045,
"step": 2850
},
{
"epoch": 5.198915009041591,
"grad_norm": 0.1306639164686203,
"learning_rate": 4.724444444444445e-06,
"loss": 0.004,
"step": 2875
},
{
"epoch": 5.244122965641953,
"grad_norm": 0.16587673127651215,
"learning_rate": 4.66888888888889e-06,
"loss": 0.0037,
"step": 2900
},
{
"epoch": 5.289330922242315,
"grad_norm": 0.12906399369239807,
"learning_rate": 4.613333333333334e-06,
"loss": 0.0039,
"step": 2925
},
{
"epoch": 5.334538878842676,
"grad_norm": 0.1345156729221344,
"learning_rate": 4.557777777777778e-06,
"loss": 0.0035,
"step": 2950
},
{
"epoch": 5.379746835443038,
"grad_norm": 0.138445183634758,
"learning_rate": 4.502222222222223e-06,
"loss": 0.0036,
"step": 2975
},
{
"epoch": 5.424954792043399,
"grad_norm": 0.1678357869386673,
"learning_rate": 4.446666666666667e-06,
"loss": 0.0039,
"step": 3000
},
{
"epoch": 5.424954792043399,
"eval_loss": 0.031730033457279205,
"eval_runtime": 545.7298,
"eval_samples_per_second": 8.015,
"eval_steps_per_second": 0.502,
"eval_wer": 85.09472723122575,
"step": 3000
}
],
"logging_steps": 25,
"max_steps": 5000,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.770419843072e+19,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}