zephyr-gemma-2-9b-sft / trainer_state.json
tanliboy's picture
Model save
e080919 verified
raw
history blame
34.1 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.999474513925381,
"eval_steps": 500,
"global_step": 951,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0010509721492380452,
"grad_norm": 67.45640910506796,
"learning_rate": 3.125e-08,
"loss": 2.5156,
"step": 1
},
{
"epoch": 0.005254860746190226,
"grad_norm": 75.55100712691016,
"learning_rate": 1.5625e-07,
"loss": 2.5354,
"step": 5
},
{
"epoch": 0.010509721492380452,
"grad_norm": 66.60203909118349,
"learning_rate": 3.125e-07,
"loss": 2.5407,
"step": 10
},
{
"epoch": 0.015764582238570676,
"grad_norm": 36.16907230722729,
"learning_rate": 4.6875e-07,
"loss": 2.1943,
"step": 15
},
{
"epoch": 0.021019442984760904,
"grad_norm": 16.934219590193187,
"learning_rate": 6.25e-07,
"loss": 1.7895,
"step": 20
},
{
"epoch": 0.02627430373095113,
"grad_norm": 6.67698023522737,
"learning_rate": 7.8125e-07,
"loss": 1.4917,
"step": 25
},
{
"epoch": 0.03152916447714135,
"grad_norm": 3.725999126673678,
"learning_rate": 9.375e-07,
"loss": 1.3425,
"step": 30
},
{
"epoch": 0.03678402522333158,
"grad_norm": 2.150496113400007,
"learning_rate": 1.09375e-06,
"loss": 1.2571,
"step": 35
},
{
"epoch": 0.04203888596952181,
"grad_norm": 1.409974548672782,
"learning_rate": 1.25e-06,
"loss": 1.208,
"step": 40
},
{
"epoch": 0.047293746715712036,
"grad_norm": 1.1758035521958887,
"learning_rate": 1.40625e-06,
"loss": 1.1701,
"step": 45
},
{
"epoch": 0.05254860746190226,
"grad_norm": 1.0288132651204651,
"learning_rate": 1.5625e-06,
"loss": 1.1422,
"step": 50
},
{
"epoch": 0.057803468208092484,
"grad_norm": 1.0269817770392966,
"learning_rate": 1.7187499999999998e-06,
"loss": 1.1347,
"step": 55
},
{
"epoch": 0.0630583289542827,
"grad_norm": 1.1632025269193185,
"learning_rate": 1.875e-06,
"loss": 1.1252,
"step": 60
},
{
"epoch": 0.06831318970047294,
"grad_norm": 0.973286786188442,
"learning_rate": 2.0312500000000002e-06,
"loss": 1.1206,
"step": 65
},
{
"epoch": 0.07356805044666316,
"grad_norm": 1.1374146499942284,
"learning_rate": 2.1875e-06,
"loss": 1.123,
"step": 70
},
{
"epoch": 0.0788229111928534,
"grad_norm": 0.9559677259256227,
"learning_rate": 2.3437500000000002e-06,
"loss": 1.105,
"step": 75
},
{
"epoch": 0.08407777193904362,
"grad_norm": 1.0626129431033882,
"learning_rate": 2.5e-06,
"loss": 1.0969,
"step": 80
},
{
"epoch": 0.08933263268523384,
"grad_norm": 0.9767375057324753,
"learning_rate": 2.6562499999999998e-06,
"loss": 1.1099,
"step": 85
},
{
"epoch": 0.09458749343142407,
"grad_norm": 0.9240797677725251,
"learning_rate": 2.8125e-06,
"loss": 1.1119,
"step": 90
},
{
"epoch": 0.09984235417761429,
"grad_norm": 0.9401234680151226,
"learning_rate": 2.96875e-06,
"loss": 1.0926,
"step": 95
},
{
"epoch": 0.10509721492380451,
"grad_norm": 0.9462854605403194,
"learning_rate": 2.9998379903275155e-06,
"loss": 1.1108,
"step": 100
},
{
"epoch": 0.11035207566999475,
"grad_norm": 0.9678742768022235,
"learning_rate": 2.9991798860113893e-06,
"loss": 1.1162,
"step": 105
},
{
"epoch": 0.11560693641618497,
"grad_norm": 0.9200494964131395,
"learning_rate": 2.998015783397426e-06,
"loss": 1.1091,
"step": 110
},
{
"epoch": 0.1208617971623752,
"grad_norm": 0.9581044551729833,
"learning_rate": 2.9963460753897363e-06,
"loss": 1.0963,
"step": 115
},
{
"epoch": 0.1261166579085654,
"grad_norm": 0.911911983689549,
"learning_rate": 2.994171325542714e-06,
"loss": 1.0911,
"step": 120
},
{
"epoch": 0.13137151865475566,
"grad_norm": 0.8931755968055785,
"learning_rate": 2.991492267870822e-06,
"loss": 1.0917,
"step": 125
},
{
"epoch": 0.13662637940094588,
"grad_norm": 0.882022590090018,
"learning_rate": 2.9883098066008556e-06,
"loss": 1.088,
"step": 130
},
{
"epoch": 0.1418812401471361,
"grad_norm": 0.9953724063836157,
"learning_rate": 2.984625015866745e-06,
"loss": 1.0859,
"step": 135
},
{
"epoch": 0.14713610089332632,
"grad_norm": 1.878615978166485,
"learning_rate": 2.9804391393470235e-06,
"loss": 1.1006,
"step": 140
},
{
"epoch": 0.15239096163951654,
"grad_norm": 0.970035528231665,
"learning_rate": 2.975753589845059e-06,
"loss": 1.1022,
"step": 145
},
{
"epoch": 0.1576458223857068,
"grad_norm": 0.9147530382000614,
"learning_rate": 2.970569948812214e-06,
"loss": 1.0691,
"step": 150
},
{
"epoch": 0.162900683131897,
"grad_norm": 0.9374168562620145,
"learning_rate": 2.9648899658140767e-06,
"loss": 1.0861,
"step": 155
},
{
"epoch": 0.16815554387808723,
"grad_norm": 0.8832219863831399,
"learning_rate": 2.9587155579399543e-06,
"loss": 1.0823,
"step": 160
},
{
"epoch": 0.17341040462427745,
"grad_norm": 0.9316518026448157,
"learning_rate": 2.9520488091558225e-06,
"loss": 1.0772,
"step": 165
},
{
"epoch": 0.17866526537046767,
"grad_norm": 0.9096920364451422,
"learning_rate": 2.944891969600953e-06,
"loss": 1.0875,
"step": 170
},
{
"epoch": 0.1839201261166579,
"grad_norm": 0.9007871965996922,
"learning_rate": 2.9372474548284537e-06,
"loss": 1.0949,
"step": 175
},
{
"epoch": 0.18917498686284814,
"grad_norm": 0.876698627967954,
"learning_rate": 2.9291178449899786e-06,
"loss": 1.0934,
"step": 180
},
{
"epoch": 0.19442984760903836,
"grad_norm": 0.9059798194790952,
"learning_rate": 2.920505883964884e-06,
"loss": 1.0918,
"step": 185
},
{
"epoch": 0.19968470835522859,
"grad_norm": 0.8939533236216217,
"learning_rate": 2.9114144784341226e-06,
"loss": 1.0854,
"step": 190
},
{
"epoch": 0.2049395691014188,
"grad_norm": 0.9262446830111234,
"learning_rate": 2.9018466968991914e-06,
"loss": 1.0749,
"step": 195
},
{
"epoch": 0.21019442984760903,
"grad_norm": 0.8549091150362632,
"learning_rate": 2.8918057686464587e-06,
"loss": 1.0825,
"step": 200
},
{
"epoch": 0.21544929059379928,
"grad_norm": 0.8724281083548375,
"learning_rate": 2.881295082657229e-06,
"loss": 1.0769,
"step": 205
},
{
"epoch": 0.2207041513399895,
"grad_norm": 0.8895456534687597,
"learning_rate": 2.8703181864639013e-06,
"loss": 1.0926,
"step": 210
},
{
"epoch": 0.22595901208617972,
"grad_norm": 0.9539096387018893,
"learning_rate": 2.8588787849526228e-06,
"loss": 1.0733,
"step": 215
},
{
"epoch": 0.23121387283236994,
"grad_norm": 0.8703158811213173,
"learning_rate": 2.846980739112822e-06,
"loss": 1.0749,
"step": 220
},
{
"epoch": 0.23646873357856016,
"grad_norm": 0.8785853904575431,
"learning_rate": 2.834628064734065e-06,
"loss": 1.0698,
"step": 225
},
{
"epoch": 0.2417235943247504,
"grad_norm": 0.889877361216805,
"learning_rate": 2.821824931050655e-06,
"loss": 1.0751,
"step": 230
},
{
"epoch": 0.24697845507094063,
"grad_norm": 0.8959992372183687,
"learning_rate": 2.8085756593344505e-06,
"loss": 1.07,
"step": 235
},
{
"epoch": 0.2522333158171308,
"grad_norm": 0.8657778595116195,
"learning_rate": 2.794884721436361e-06,
"loss": 1.0653,
"step": 240
},
{
"epoch": 0.25748817656332107,
"grad_norm": 0.8631791797614975,
"learning_rate": 2.780756738277021e-06,
"loss": 1.0979,
"step": 245
},
{
"epoch": 0.2627430373095113,
"grad_norm": 0.9028283942047837,
"learning_rate": 2.766196478287156e-06,
"loss": 1.0907,
"step": 250
},
{
"epoch": 0.2679978980557015,
"grad_norm": 1.105737597863472,
"learning_rate": 2.751208855798155e-06,
"loss": 1.0801,
"step": 255
},
{
"epoch": 0.27325275880189176,
"grad_norm": 0.9224613563982136,
"learning_rate": 2.7357989293834005e-06,
"loss": 1.0821,
"step": 260
},
{
"epoch": 0.27850761954808195,
"grad_norm": 0.8624741728874925,
"learning_rate": 2.7199719001509175e-06,
"loss": 1.075,
"step": 265
},
{
"epoch": 0.2837624802942722,
"grad_norm": 0.8715022403371375,
"learning_rate": 2.7037331099879117e-06,
"loss": 1.088,
"step": 270
},
{
"epoch": 0.28901734104046245,
"grad_norm": 0.8409580987018042,
"learning_rate": 2.687088039757792e-06,
"loss": 1.0797,
"step": 275
},
{
"epoch": 0.29427220178665264,
"grad_norm": 0.9670308072371985,
"learning_rate": 2.6700423074502888e-06,
"loss": 1.0717,
"step": 280
},
{
"epoch": 0.2995270625328429,
"grad_norm": 0.8449151190647378,
"learning_rate": 2.652601666285289e-06,
"loss": 1.1141,
"step": 285
},
{
"epoch": 0.3047819232790331,
"grad_norm": 0.911604937769195,
"learning_rate": 2.6347720027710253e-06,
"loss": 1.067,
"step": 290
},
{
"epoch": 0.31003678402522333,
"grad_norm": 0.8655301197268408,
"learning_rate": 2.6165593347172837e-06,
"loss": 1.0731,
"step": 295
},
{
"epoch": 0.3152916447714136,
"grad_norm": 0.880039459937268,
"learning_rate": 2.5979698092042925e-06,
"loss": 1.0877,
"step": 300
},
{
"epoch": 0.3205465055176038,
"grad_norm": 0.9360576412818693,
"learning_rate": 2.5790097005079765e-06,
"loss": 1.0899,
"step": 305
},
{
"epoch": 0.325801366263794,
"grad_norm": 0.8489030162852722,
"learning_rate": 2.559685407982288e-06,
"loss": 1.0667,
"step": 310
},
{
"epoch": 0.3310562270099842,
"grad_norm": 0.8638592230026647,
"learning_rate": 2.5400034538993135e-06,
"loss": 1.0867,
"step": 315
},
{
"epoch": 0.33631108775617446,
"grad_norm": 0.9101865357911341,
"learning_rate": 2.519970481247901e-06,
"loss": 1.0521,
"step": 320
},
{
"epoch": 0.3415659485023647,
"grad_norm": 0.8626549506195685,
"learning_rate": 2.4995932514915404e-06,
"loss": 1.0684,
"step": 325
},
{
"epoch": 0.3468208092485549,
"grad_norm": 0.8534791671434315,
"learning_rate": 2.478878642286253e-06,
"loss": 1.0644,
"step": 330
},
{
"epoch": 0.35207566999474516,
"grad_norm": 0.8353900580922416,
"learning_rate": 2.4578336451592705e-06,
"loss": 1.0813,
"step": 335
},
{
"epoch": 0.35733053074093535,
"grad_norm": 0.8518466429904108,
"learning_rate": 2.4364653631492774e-06,
"loss": 1.0626,
"step": 340
},
{
"epoch": 0.3625853914871256,
"grad_norm": 0.8341915866164423,
"learning_rate": 2.414781008409014e-06,
"loss": 1.0737,
"step": 345
},
{
"epoch": 0.3678402522333158,
"grad_norm": 0.8723412570499911,
"learning_rate": 2.3927878997710575e-06,
"loss": 1.0981,
"step": 350
},
{
"epoch": 0.37309511297950604,
"grad_norm": 0.841906976458512,
"learning_rate": 2.3704934602775926e-06,
"loss": 1.0827,
"step": 355
},
{
"epoch": 0.3783499737256963,
"grad_norm": 0.879119551017937,
"learning_rate": 2.347905214675008e-06,
"loss": 1.0713,
"step": 360
},
{
"epoch": 0.3836048344718865,
"grad_norm": 0.8718882110600734,
"learning_rate": 2.3250307868741717e-06,
"loss": 1.0707,
"step": 365
},
{
"epoch": 0.38885969521807673,
"grad_norm": 0.8002367155481277,
"learning_rate": 2.3018778973772334e-06,
"loss": 1.0573,
"step": 370
},
{
"epoch": 0.3941145559642669,
"grad_norm": 0.8221977480285654,
"learning_rate": 2.278454360671823e-06,
"loss": 1.0867,
"step": 375
},
{
"epoch": 0.39936941671045717,
"grad_norm": 0.8870576322337221,
"learning_rate": 2.2547680825935325e-06,
"loss": 1.0851,
"step": 380
},
{
"epoch": 0.4046242774566474,
"grad_norm": 0.8153258721073422,
"learning_rate": 2.2308270576575657e-06,
"loss": 1.0683,
"step": 385
},
{
"epoch": 0.4098791382028376,
"grad_norm": 0.8299248390745442,
"learning_rate": 2.206639366360451e-06,
"loss": 1.0883,
"step": 390
},
{
"epoch": 0.41513399894902786,
"grad_norm": 0.8891580311632679,
"learning_rate": 2.1822131724527425e-06,
"loss": 1.0587,
"step": 395
},
{
"epoch": 0.42038885969521805,
"grad_norm": 0.830348041290943,
"learning_rate": 2.157556720183616e-06,
"loss": 1.0542,
"step": 400
},
{
"epoch": 0.4256437204414083,
"grad_norm": 0.8254218651707308,
"learning_rate": 2.1326783315182984e-06,
"loss": 1.0666,
"step": 405
},
{
"epoch": 0.43089858118759855,
"grad_norm": 0.8211111371978261,
"learning_rate": 2.1075864033292623e-06,
"loss": 1.0723,
"step": 410
},
{
"epoch": 0.43615344193378874,
"grad_norm": 0.8308235610863787,
"learning_rate": 2.082289404562144e-06,
"loss": 1.0767,
"step": 415
},
{
"epoch": 0.441408302679979,
"grad_norm": 0.8127153392787219,
"learning_rate": 2.0567958733773313e-06,
"loss": 1.0614,
"step": 420
},
{
"epoch": 0.4466631634261692,
"grad_norm": 0.8123707093337108,
"learning_rate": 2.0311144142681904e-06,
"loss": 1.064,
"step": 425
},
{
"epoch": 0.45191802417235943,
"grad_norm": 0.829943736431802,
"learning_rate": 2.005253695156909e-06,
"loss": 1.0472,
"step": 430
},
{
"epoch": 0.4571728849185497,
"grad_norm": 0.831220074995737,
"learning_rate": 1.9792224444689222e-06,
"loss": 1.0615,
"step": 435
},
{
"epoch": 0.4624277456647399,
"grad_norm": 0.8745913548912436,
"learning_rate": 1.9530294481869286e-06,
"loss": 1.0802,
"step": 440
},
{
"epoch": 0.4676826064109301,
"grad_norm": 0.8449818880595231,
"learning_rate": 1.926683546885469e-06,
"loss": 1.0588,
"step": 445
},
{
"epoch": 0.4729374671571203,
"grad_norm": 0.8068996721164886,
"learning_rate": 1.9001936327470894e-06,
"loss": 1.0708,
"step": 450
},
{
"epoch": 0.47819232790331057,
"grad_norm": 0.824592533003207,
"learning_rate": 1.873568646561075e-06,
"loss": 1.0672,
"step": 455
},
{
"epoch": 0.4834471886495008,
"grad_norm": 0.83022160250907,
"learning_rate": 1.8468175747057898e-06,
"loss": 1.0748,
"step": 460
},
{
"epoch": 0.488702049395691,
"grad_norm": 0.8333740262644744,
"learning_rate": 1.8199494461156204e-06,
"loss": 1.0532,
"step": 465
},
{
"epoch": 0.49395691014188126,
"grad_norm": 0.8922005131672666,
"learning_rate": 1.7929733292335591e-06,
"loss": 1.0733,
"step": 470
},
{
"epoch": 0.49921177088807145,
"grad_norm": 0.9005753696617086,
"learning_rate": 1.765898328950455e-06,
"loss": 1.0647,
"step": 475
},
{
"epoch": 0.5044666316342616,
"grad_norm": 0.8247436750347265,
"learning_rate": 1.738733583531959e-06,
"loss": 1.0802,
"step": 480
},
{
"epoch": 0.509721492380452,
"grad_norm": 0.841670784750436,
"learning_rate": 1.7114882615342073e-06,
"loss": 1.056,
"step": 485
},
{
"epoch": 0.5149763531266421,
"grad_norm": 0.8182688706522747,
"learning_rate": 1.6841715587092798e-06,
"loss": 1.0783,
"step": 490
},
{
"epoch": 0.5202312138728323,
"grad_norm": 1.239386954886359,
"learning_rate": 1.6567926949014804e-06,
"loss": 1.0745,
"step": 495
},
{
"epoch": 0.5254860746190226,
"grad_norm": 0.8030922261487655,
"learning_rate": 1.6293609109354836e-06,
"loss": 1.0612,
"step": 500
},
{
"epoch": 0.5307409353652128,
"grad_norm": 0.8189314991097896,
"learning_rate": 1.601885465497404e-06,
"loss": 1.065,
"step": 505
},
{
"epoch": 0.535995796111403,
"grad_norm": 0.9050946114254789,
"learning_rate": 1.5743756320098334e-06,
"loss": 1.0643,
"step": 510
},
{
"epoch": 0.5412506568575933,
"grad_norm": 0.8807760866261877,
"learning_rate": 1.5468406955019059e-06,
"loss": 1.0569,
"step": 515
},
{
"epoch": 0.5465055176037835,
"grad_norm": 0.8569862964159628,
"learning_rate": 1.5192899494754443e-06,
"loss": 1.0731,
"step": 520
},
{
"epoch": 0.5517603783499737,
"grad_norm": 0.8091790353711866,
"learning_rate": 1.4917326927682494e-06,
"loss": 1.0703,
"step": 525
},
{
"epoch": 0.5570152390961639,
"grad_norm": 0.822047948500721,
"learning_rate": 1.4641782264155852e-06,
"loss": 1.0737,
"step": 530
},
{
"epoch": 0.5622700998423542,
"grad_norm": 0.8252210935515614,
"learning_rate": 1.4366358505109237e-06,
"loss": 1.0857,
"step": 535
},
{
"epoch": 0.5675249605885444,
"grad_norm": 0.8152512103821862,
"learning_rate": 1.4091148610670098e-06,
"loss": 1.0697,
"step": 540
},
{
"epoch": 0.5727798213347346,
"grad_norm": 0.8348760975293736,
"learning_rate": 1.3816245468782988e-06,
"loss": 1.0598,
"step": 545
},
{
"epoch": 0.5780346820809249,
"grad_norm": 0.8008142952039858,
"learning_rate": 1.3541741863858352e-06,
"loss": 1.0532,
"step": 550
},
{
"epoch": 0.5832895428271151,
"grad_norm": 0.8525665050865836,
"learning_rate": 1.326773044545621e-06,
"loss": 1.0672,
"step": 555
},
{
"epoch": 0.5885444035733053,
"grad_norm": 0.828489906999064,
"learning_rate": 1.299430369701541e-06,
"loss": 1.0578,
"step": 560
},
{
"epoch": 0.5937992643194955,
"grad_norm": 0.8071915455528251,
"learning_rate": 1.272155390463889e-06,
"loss": 1.084,
"step": 565
},
{
"epoch": 0.5990541250656858,
"grad_norm": 0.8213168186088048,
"learning_rate": 1.2449573125945607e-06,
"loss": 1.0678,
"step": 570
},
{
"epoch": 0.604308985811876,
"grad_norm": 0.808856584314896,
"learning_rate": 1.2178453158999509e-06,
"loss": 1.0705,
"step": 575
},
{
"epoch": 0.6095638465580662,
"grad_norm": 0.8097966068652359,
"learning_rate": 1.1908285511326195e-06,
"loss": 1.0561,
"step": 580
},
{
"epoch": 0.6148187073042565,
"grad_norm": 0.8103079561814972,
"learning_rate": 1.1639161369027564e-06,
"loss": 1.0543,
"step": 585
},
{
"epoch": 0.6200735680504467,
"grad_norm": 0.7934957921328578,
"learning_rate": 1.1371171566004986e-06,
"loss": 1.0507,
"step": 590
},
{
"epoch": 0.6253284287966369,
"grad_norm": 0.7898748331831235,
"learning_rate": 1.1104406553301357e-06,
"loss": 1.0637,
"step": 595
},
{
"epoch": 0.6305832895428272,
"grad_norm": 0.8261021360086062,
"learning_rate": 1.0838956368572335e-06,
"loss": 1.0526,
"step": 600
},
{
"epoch": 0.6358381502890174,
"grad_norm": 0.8086241727940497,
"learning_rate": 1.0574910605697135e-06,
"loss": 1.0715,
"step": 605
},
{
"epoch": 0.6410930110352075,
"grad_norm": 0.9088888967600997,
"learning_rate": 1.03123583845391e-06,
"loss": 1.0692,
"step": 610
},
{
"epoch": 0.6463478717813977,
"grad_norm": 0.8079809849272349,
"learning_rate": 1.0051388320866258e-06,
"loss": 1.0583,
"step": 615
},
{
"epoch": 0.651602732527588,
"grad_norm": 0.8422109756331945,
"learning_rate": 9.792088496441992e-07,
"loss": 1.0819,
"step": 620
},
{
"epoch": 0.6568575932737782,
"grad_norm": 0.8172134359132237,
"learning_rate": 9.53454642929601e-07,
"loss": 1.0772,
"step": 625
},
{
"epoch": 0.6621124540199684,
"grad_norm": 0.9233197165497478,
"learning_rate": 9.278849044185509e-07,
"loss": 1.0637,
"step": 630
},
{
"epoch": 0.6673673147661587,
"grad_norm": 0.9171808214559961,
"learning_rate": 9.025082643256647e-07,
"loss": 1.043,
"step": 635
},
{
"epoch": 0.6726221755123489,
"grad_norm": 0.8043456622976703,
"learning_rate": 8.77333287691609e-07,
"loss": 1.0584,
"step": 640
},
{
"epoch": 0.6778770362585391,
"grad_norm": 0.8108655439985347,
"learning_rate": 8.523684714922608e-07,
"loss": 1.0742,
"step": 645
},
{
"epoch": 0.6831318970047294,
"grad_norm": 0.7968848199287993,
"learning_rate": 8.276222417708309e-07,
"loss": 1.0557,
"step": 650
},
{
"epoch": 0.6883867577509196,
"grad_norm": 0.8129711567557044,
"learning_rate": 8.031029507939401e-07,
"loss": 1.0548,
"step": 655
},
{
"epoch": 0.6936416184971098,
"grad_norm": 1.004235271804759,
"learning_rate": 7.788188742325803e-07,
"loss": 1.0612,
"step": 660
},
{
"epoch": 0.6988964792433,
"grad_norm": 0.8208103251521766,
"learning_rate": 7.547782083689479e-07,
"loss": 1.0643,
"step": 665
},
{
"epoch": 0.7041513399894903,
"grad_norm": 0.8005037088376794,
"learning_rate": 7.309890673300506e-07,
"loss": 1.045,
"step": 670
},
{
"epoch": 0.7094062007356805,
"grad_norm": 1.3197940343995904,
"learning_rate": 7.074594803490618e-07,
"loss": 1.0518,
"step": 675
},
{
"epoch": 0.7146610614818707,
"grad_norm": 0.8555830044827688,
"learning_rate": 6.841973890553168e-07,
"loss": 1.0611,
"step": 680
},
{
"epoch": 0.719915922228061,
"grad_norm": 0.8099691174064823,
"learning_rate": 6.6121064479388e-07,
"loss": 1.0365,
"step": 685
},
{
"epoch": 0.7251707829742512,
"grad_norm": 0.8053501843842861,
"learning_rate": 6.385070059755846e-07,
"loss": 1.074,
"step": 690
},
{
"epoch": 0.7304256437204414,
"grad_norm": 0.8346149088986957,
"learning_rate": 6.160941354584404e-07,
"loss": 1.0609,
"step": 695
},
{
"epoch": 0.7356805044666316,
"grad_norm": 0.8155402643621729,
"learning_rate": 5.93979597961289e-07,
"loss": 1.0505,
"step": 700
},
{
"epoch": 0.7409353652128219,
"grad_norm": 0.7796064768407052,
"learning_rate": 5.721708575105861e-07,
"loss": 1.0589,
"step": 705
},
{
"epoch": 0.7461902259590121,
"grad_norm": 0.8275975218466692,
"learning_rate": 5.506752749211673e-07,
"loss": 1.0546,
"step": 710
},
{
"epoch": 0.7514450867052023,
"grad_norm": 0.8163230049703886,
"learning_rate": 5.295001053118499e-07,
"loss": 1.0562,
"step": 715
},
{
"epoch": 0.7566999474513926,
"grad_norm": 0.985195116035074,
"learning_rate": 5.086524956567084e-07,
"loss": 1.0713,
"step": 720
},
{
"epoch": 0.7619548081975828,
"grad_norm": 0.96356977644872,
"learning_rate": 4.88139482372852e-07,
"loss": 1.0375,
"step": 725
},
{
"epoch": 0.767209668943773,
"grad_norm": 0.8163211083671094,
"learning_rate": 4.679679889455153e-07,
"loss": 1.0525,
"step": 730
},
{
"epoch": 0.7724645296899633,
"grad_norm": 0.7987340798976842,
"learning_rate": 4.4814482359126713e-07,
"loss": 1.082,
"step": 735
},
{
"epoch": 0.7777193904361535,
"grad_norm": 0.8117501301968848,
"learning_rate": 4.2867667696012255e-07,
"loss": 1.0626,
"step": 740
},
{
"epoch": 0.7829742511823437,
"grad_norm": 0.8929835779245712,
"learning_rate": 4.0957011987733655e-07,
"loss": 1.076,
"step": 745
},
{
"epoch": 0.7882291119285338,
"grad_norm": 0.8088648176966625,
"learning_rate": 3.908316011256419e-07,
"loss": 1.0744,
"step": 750
},
{
"epoch": 0.7934839726747241,
"grad_norm": 0.7980716806439362,
"learning_rate": 3.7246744526867525e-07,
"loss": 1.0566,
"step": 755
},
{
"epoch": 0.7987388334209143,
"grad_norm": 0.8089436395697871,
"learning_rate": 3.5448385051633225e-07,
"loss": 1.0465,
"step": 760
},
{
"epoch": 0.8039936941671045,
"grad_norm": 0.8015612514297874,
"learning_rate": 3.368868866327678e-07,
"loss": 1.0611,
"step": 765
},
{
"epoch": 0.8092485549132948,
"grad_norm": 0.7877310543664608,
"learning_rate": 3.1968249288774887e-07,
"loss": 1.0604,
"step": 770
},
{
"epoch": 0.814503415659485,
"grad_norm": 0.8023241081523638,
"learning_rate": 3.0287647605205155e-07,
"loss": 1.0723,
"step": 775
},
{
"epoch": 0.8197582764056752,
"grad_norm": 0.8051580485406397,
"learning_rate": 2.86474508437579e-07,
"loss": 1.0579,
"step": 780
},
{
"epoch": 0.8250131371518655,
"grad_norm": 0.8081087810817446,
"learning_rate": 2.704821259828608e-07,
"loss": 1.0684,
"step": 785
},
{
"epoch": 0.8302679978980557,
"grad_norm": 0.8036521909829868,
"learning_rate": 2.5490472638458195e-07,
"loss": 1.0484,
"step": 790
},
{
"epoch": 0.8355228586442459,
"grad_norm": 0.8069169918745295,
"learning_rate": 2.3974756727576886e-07,
"loss": 1.0698,
"step": 795
},
{
"epoch": 0.8407777193904361,
"grad_norm": 0.8010632009065842,
"learning_rate": 2.2501576445125077e-07,
"loss": 1.0592,
"step": 800
},
{
"epoch": 0.8460325801366264,
"grad_norm": 0.8112748895796706,
"learning_rate": 2.1071429014099365e-07,
"loss": 1.063,
"step": 805
},
{
"epoch": 0.8512874408828166,
"grad_norm": 0.7987887307844111,
"learning_rate": 1.9684797133188865e-07,
"loss": 1.0396,
"step": 810
},
{
"epoch": 0.8565423016290068,
"grad_norm": 0.7974743093384434,
"learning_rate": 1.8342148813856414e-07,
"loss": 1.0497,
"step": 815
},
{
"epoch": 0.8617971623751971,
"grad_norm": 0.9606632973010945,
"learning_rate": 1.7043937222376766e-07,
"loss": 1.0484,
"step": 820
},
{
"epoch": 0.8670520231213873,
"grad_norm": 0.8027436257960178,
"learning_rate": 1.579060052688548e-07,
"loss": 1.0674,
"step": 825
},
{
"epoch": 0.8723068838675775,
"grad_norm": 0.8077058467523247,
"learning_rate": 1.4582561749489847e-07,
"loss": 1.0658,
"step": 830
},
{
"epoch": 0.8775617446137677,
"grad_norm": 0.796090164625106,
"learning_rate": 1.3420228623491742e-07,
"loss": 1.0339,
"step": 835
},
{
"epoch": 0.882816605359958,
"grad_norm": 0.7956190292731135,
"learning_rate": 1.2303993455770946e-07,
"loss": 1.0678,
"step": 840
},
{
"epoch": 0.8880714661061482,
"grad_norm": 0.8060093191984734,
"learning_rate": 1.1234232994374916e-07,
"loss": 1.0562,
"step": 845
},
{
"epoch": 0.8933263268523384,
"grad_norm": 0.8052954491456666,
"learning_rate": 1.0211308301360039e-07,
"loss": 1.0635,
"step": 850
},
{
"epoch": 0.8985811875985287,
"grad_norm": 0.789706910907941,
"learning_rate": 9.235564630927196e-08,
"loss": 1.0497,
"step": 855
},
{
"epoch": 0.9038360483447189,
"grad_norm": 0.9407705619653095,
"learning_rate": 8.307331312892601e-08,
"loss": 1.0573,
"step": 860
},
{
"epoch": 0.9090909090909091,
"grad_norm": 0.7849160686891212,
"learning_rate": 7.426921641533562e-08,
"loss": 1.0535,
"step": 865
},
{
"epoch": 0.9143457698370994,
"grad_norm": 0.7819582818532375,
"learning_rate": 6.594632769846354e-08,
"loss": 1.0605,
"step": 870
},
{
"epoch": 0.9196006305832896,
"grad_norm": 0.8235274235744744,
"learning_rate": 5.810745609252166e-08,
"loss": 1.0481,
"step": 875
},
{
"epoch": 0.9248554913294798,
"grad_norm": 0.8326201665614436,
"learning_rate": 5.0755247347847814e-08,
"loss": 1.05,
"step": 880
},
{
"epoch": 0.9301103520756699,
"grad_norm": 0.8084764417080952,
"learning_rate": 4.389218295792002e-08,
"loss": 1.0649,
"step": 885
},
{
"epoch": 0.9353652128218602,
"grad_norm": 0.8589755183334531,
"learning_rate": 3.7520579321812186e-08,
"loss": 1.073,
"step": 890
},
{
"epoch": 0.9406200735680504,
"grad_norm": 0.8084706845759793,
"learning_rate": 3.1642586962369765e-08,
"loss": 1.0363,
"step": 895
},
{
"epoch": 0.9458749343142406,
"grad_norm": 0.7879594862220682,
"learning_rate": 2.6260189800372757e-08,
"loss": 1.0517,
"step": 900
},
{
"epoch": 0.9511297950604309,
"grad_norm": 0.7751896271813346,
"learning_rate": 2.13752044849288e-08,
"loss": 1.0625,
"step": 905
},
{
"epoch": 0.9563846558066211,
"grad_norm": 0.8037723865057425,
"learning_rate": 1.698927978032383e-08,
"loss": 1.0486,
"step": 910
},
{
"epoch": 0.9616395165528113,
"grad_norm": 0.7949419082977591,
"learning_rate": 1.3103896009537208e-08,
"loss": 1.0476,
"step": 915
},
{
"epoch": 0.9668943772990016,
"grad_norm": 0.7711064252783653,
"learning_rate": 9.720364554606898e-09,
"loss": 1.0553,
"step": 920
},
{
"epoch": 0.9721492380451918,
"grad_norm": 0.9190212136355437,
"learning_rate": 6.839827414016675e-09,
"loss": 1.0636,
"step": 925
},
{
"epoch": 0.977404098791382,
"grad_norm": 0.7895845548954091,
"learning_rate": 4.463256817252792e-09,
"loss": 1.0626,
"step": 930
},
{
"epoch": 0.9826589595375722,
"grad_norm": 0.7799515201158175,
"learning_rate": 2.5914548966596285e-09,
"loss": 1.0584,
"step": 935
},
{
"epoch": 0.9879138202837625,
"grad_norm": 0.7896292102395543,
"learning_rate": 1.2250534167067561e-09,
"loss": 1.0562,
"step": 940
},
{
"epoch": 0.9931686810299527,
"grad_norm": 0.8122355011762691,
"learning_rate": 3.6451356075817287e-10,
"loss": 1.051,
"step": 945
},
{
"epoch": 0.9984235417761429,
"grad_norm": 0.7870290469964676,
"learning_rate": 1.0125775414981941e-11,
"loss": 1.096,
"step": 950
},
{
"epoch": 0.999474513925381,
"eval_loss": 1.0638214349746704,
"eval_runtime": 593.8184,
"eval_samples_per_second": 22.692,
"eval_steps_per_second": 0.711,
"step": 951
},
{
"epoch": 0.999474513925381,
"step": 951,
"total_flos": 905758069751808.0,
"train_loss": 1.1023603343061092,
"train_runtime": 21224.38,
"train_samples_per_second": 5.738,
"train_steps_per_second": 0.045
}
],
"logging_steps": 5,
"max_steps": 951,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 905758069751808.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}