LegrandFrederic's picture
Upload trainer_state.json with huggingface_hub
316e22c verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"eval_steps": 500,
"global_step": 1210,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.08264462809917356,
"grad_norm": 9.173369407653809,
"learning_rate": 1.6393442622950818e-05,
"loss": 1.2749,
"step": 10
},
{
"epoch": 0.1652892561983471,
"grad_norm": 3.0318551063537598,
"learning_rate": 3.2786885245901635e-05,
"loss": 0.7164,
"step": 20
},
{
"epoch": 0.24793388429752067,
"grad_norm": 1.8877290487289429,
"learning_rate": 4.918032786885246e-05,
"loss": 0.4175,
"step": 30
},
{
"epoch": 0.3305785123966942,
"grad_norm": 2.459096670150757,
"learning_rate": 6.557377049180327e-05,
"loss": 0.3476,
"step": 40
},
{
"epoch": 0.4132231404958678,
"grad_norm": 1.8838213682174683,
"learning_rate": 8.19672131147541e-05,
"loss": 0.2797,
"step": 50
},
{
"epoch": 0.49586776859504134,
"grad_norm": 1.053584337234497,
"learning_rate": 9.836065573770493e-05,
"loss": 0.2348,
"step": 60
},
{
"epoch": 0.5785123966942148,
"grad_norm": 2.7973430156707764,
"learning_rate": 9.998486219870769e-05,
"loss": 0.224,
"step": 70
},
{
"epoch": 0.6611570247933884,
"grad_norm": 1.6049909591674805,
"learning_rate": 9.993254576451651e-05,
"loss": 0.2017,
"step": 80
},
{
"epoch": 0.743801652892562,
"grad_norm": 0.7833113074302673,
"learning_rate": 9.984290291014105e-05,
"loss": 0.2074,
"step": 90
},
{
"epoch": 0.8264462809917356,
"grad_norm": 3.117323160171509,
"learning_rate": 9.971600064692222e-05,
"loss": 0.1826,
"step": 100
},
{
"epoch": 0.9090909090909091,
"grad_norm": 0.858580470085144,
"learning_rate": 9.955193383898376e-05,
"loss": 0.1674,
"step": 110
},
{
"epoch": 0.9917355371900827,
"grad_norm": 0.7289248108863831,
"learning_rate": 9.935082513231775e-05,
"loss": 0.1584,
"step": 120
},
{
"epoch": 1.0743801652892562,
"grad_norm": 2.072802782058716,
"learning_rate": 9.911282486310213e-05,
"loss": 0.154,
"step": 130
},
{
"epoch": 1.1570247933884297,
"grad_norm": 1.2961859703063965,
"learning_rate": 9.883811094531906e-05,
"loss": 0.1567,
"step": 140
},
{
"epoch": 1.2396694214876034,
"grad_norm": 0.9249287843704224,
"learning_rate": 9.85268887377574e-05,
"loss": 0.1537,
"step": 150
},
{
"epoch": 1.322314049586777,
"grad_norm": 1.7591155767440796,
"learning_rate": 9.817939089049964e-05,
"loss": 0.1435,
"step": 160
},
{
"epoch": 1.4049586776859504,
"grad_norm": 3.4378185272216797,
"learning_rate": 9.779587717100729e-05,
"loss": 0.1329,
"step": 170
},
{
"epoch": 1.487603305785124,
"grad_norm": 0.6799660921096802,
"learning_rate": 9.737663426993513e-05,
"loss": 0.1388,
"step": 180
},
{
"epoch": 1.5702479338842976,
"grad_norm": 0.6710700988769531,
"learning_rate": 9.69219755868194e-05,
"loss": 0.147,
"step": 190
},
{
"epoch": 1.6528925619834711,
"grad_norm": 1.6649482250213623,
"learning_rate": 9.643224099579998e-05,
"loss": 0.1219,
"step": 200
},
{
"epoch": 1.7355371900826446,
"grad_norm": 1.4101303815841675,
"learning_rate": 9.59077965915521e-05,
"loss": 0.1132,
"step": 210
},
{
"epoch": 1.8181818181818183,
"grad_norm": 0.7230204939842224,
"learning_rate": 9.534903441561693e-05,
"loss": 0.1196,
"step": 220
},
{
"epoch": 1.9008264462809916,
"grad_norm": 2.3801050186157227,
"learning_rate": 9.47563721633361e-05,
"loss": 0.106,
"step": 230
},
{
"epoch": 1.9834710743801653,
"grad_norm": 1.3875033855438232,
"learning_rate": 9.413025287160905e-05,
"loss": 0.1181,
"step": 240
},
{
"epoch": 2.0661157024793386,
"grad_norm": 0.49982619285583496,
"learning_rate": 9.347114458770657e-05,
"loss": 0.0937,
"step": 250
},
{
"epoch": 2.1487603305785123,
"grad_norm": 1.5207865238189697,
"learning_rate": 9.277954001938818e-05,
"loss": 0.1005,
"step": 260
},
{
"epoch": 2.231404958677686,
"grad_norm": 0.5808604955673218,
"learning_rate": 9.205595616658495e-05,
"loss": 0.1046,
"step": 270
},
{
"epoch": 2.3140495867768593,
"grad_norm": 0.5397279262542725,
"learning_rate": 9.1300933934923e-05,
"loss": 0.095,
"step": 280
},
{
"epoch": 2.396694214876033,
"grad_norm": 2.023026704788208,
"learning_rate": 9.051503773137646e-05,
"loss": 0.1054,
"step": 290
},
{
"epoch": 2.479338842975207,
"grad_norm": 0.8514553904533386,
"learning_rate": 8.969885504235256e-05,
"loss": 0.1058,
"step": 300
},
{
"epoch": 2.56198347107438,
"grad_norm": 0.7182636260986328,
"learning_rate": 8.885299599452382e-05,
"loss": 0.1155,
"step": 310
},
{
"epoch": 2.644628099173554,
"grad_norm": 1.0667051076889038,
"learning_rate": 8.797809289873587e-05,
"loss": 0.1009,
"step": 320
},
{
"epoch": 2.7272727272727275,
"grad_norm": 0.7455232739448547,
"learning_rate": 8.70747997773317e-05,
"loss": 0.0969,
"step": 330
},
{
"epoch": 2.809917355371901,
"grad_norm": 0.5841661691665649,
"learning_rate": 8.614379187524592e-05,
"loss": 0.0885,
"step": 340
},
{
"epoch": 2.8925619834710745,
"grad_norm": 0.5531947016716003,
"learning_rate": 8.518576515523424e-05,
"loss": 0.0909,
"step": 350
},
{
"epoch": 2.975206611570248,
"grad_norm": 0.7314962148666382,
"learning_rate": 8.42014357776155e-05,
"loss": 0.0821,
"step": 360
},
{
"epoch": 3.0578512396694215,
"grad_norm": 1.354279637336731,
"learning_rate": 8.319153956491568e-05,
"loss": 0.0908,
"step": 370
},
{
"epoch": 3.1404958677685952,
"grad_norm": 0.621191143989563,
"learning_rate": 8.215683145181312e-05,
"loss": 0.0822,
"step": 380
},
{
"epoch": 3.2231404958677685,
"grad_norm": 0.6993005871772766,
"learning_rate": 8.109808492079718e-05,
"loss": 0.0791,
"step": 390
},
{
"epoch": 3.3057851239669422,
"grad_norm": 1.1259503364562988,
"learning_rate": 8.001609142396149e-05,
"loss": 0.089,
"step": 400
},
{
"epoch": 3.3884297520661155,
"grad_norm": 1.8521549701690674,
"learning_rate": 7.891165979136429e-05,
"loss": 0.0741,
"step": 410
},
{
"epoch": 3.4710743801652892,
"grad_norm": 0.6957982778549194,
"learning_rate": 7.778561562639818e-05,
"loss": 0.076,
"step": 420
},
{
"epoch": 3.553719008264463,
"grad_norm": 0.4278518259525299,
"learning_rate": 7.663880068862106e-05,
"loss": 0.0752,
"step": 430
},
{
"epoch": 3.6363636363636362,
"grad_norm": 0.9472731351852417,
"learning_rate": 7.547207226450979e-05,
"loss": 0.0899,
"step": 440
},
{
"epoch": 3.71900826446281,
"grad_norm": 0.472150593996048,
"learning_rate": 7.428630252660704e-05,
"loss": 0.0746,
"step": 450
},
{
"epoch": 3.8016528925619832,
"grad_norm": 0.614167332649231,
"learning_rate": 7.308237788154003e-05,
"loss": 0.0785,
"step": 460
},
{
"epoch": 3.884297520661157,
"grad_norm": 0.5245425701141357,
"learning_rate": 7.186119830739883e-05,
"loss": 0.0756,
"step": 470
},
{
"epoch": 3.9669421487603307,
"grad_norm": 0.4369754195213318,
"learning_rate": 7.062367668096967e-05,
"loss": 0.0673,
"step": 480
},
{
"epoch": 4.049586776859504,
"grad_norm": 0.7638271450996399,
"learning_rate": 6.93707380953258e-05,
"loss": 0.0582,
"step": 490
},
{
"epoch": 4.132231404958677,
"grad_norm": 0.7099161744117737,
"learning_rate": 6.810331916828622e-05,
"loss": 0.065,
"step": 500
},
{
"epoch": 4.214876033057851,
"grad_norm": 0.9365837574005127,
"learning_rate": 6.682236734225944e-05,
"loss": 0.0736,
"step": 510
},
{
"epoch": 4.297520661157025,
"grad_norm": 0.5708351135253906,
"learning_rate": 6.552884017599517e-05,
"loss": 0.0582,
"step": 520
},
{
"epoch": 4.380165289256198,
"grad_norm": 2.0133728981018066,
"learning_rate": 6.422370462877396e-05,
"loss": 0.0737,
"step": 530
},
{
"epoch": 4.462809917355372,
"grad_norm": 0.615628719329834,
"learning_rate": 6.29079363375694e-05,
"loss": 0.0619,
"step": 540
},
{
"epoch": 4.545454545454545,
"grad_norm": 1.595961570739746,
"learning_rate": 6.15825188877235e-05,
"loss": 0.0646,
"step": 550
},
{
"epoch": 4.628099173553719,
"grad_norm": 0.5787972807884216,
"learning_rate": 6.0248443077680316e-05,
"loss": 0.0727,
"step": 560
},
{
"epoch": 4.710743801652892,
"grad_norm": 0.37428799271583557,
"learning_rate": 5.890670617832764e-05,
"loss": 0.0694,
"step": 570
},
{
"epoch": 4.793388429752066,
"grad_norm": 0.35877710580825806,
"learning_rate": 5.755831118750016e-05,
"loss": 0.0615,
"step": 580
},
{
"epoch": 4.87603305785124,
"grad_norm": 0.7337440252304077,
"learning_rate": 5.620426608020156e-05,
"loss": 0.0693,
"step": 590
},
{
"epoch": 4.958677685950414,
"grad_norm": 0.4097314476966858,
"learning_rate": 5.484558305510609e-05,
"loss": 0.0584,
"step": 600
},
{
"epoch": 5.041322314049586,
"grad_norm": 0.9892732501029968,
"learning_rate": 5.348327777790262e-05,
"loss": 0.0654,
"step": 610
},
{
"epoch": 5.12396694214876,
"grad_norm": 0.9509891867637634,
"learning_rate": 5.211836862204715e-05,
"loss": 0.0594,
"step": 620
},
{
"epoch": 5.206611570247934,
"grad_norm": 0.6350495219230652,
"learning_rate": 5.075187590749101e-05,
"loss": 0.0568,
"step": 630
},
{
"epoch": 5.289256198347108,
"grad_norm": 0.4029739201068878,
"learning_rate": 4.93848211379541e-05,
"loss": 0.0572,
"step": 640
},
{
"epoch": 5.371900826446281,
"grad_norm": 0.8811182379722595,
"learning_rate": 4.8018226237313165e-05,
"loss": 0.0602,
"step": 650
},
{
"epoch": 5.454545454545454,
"grad_norm": 0.9591042995452881,
"learning_rate": 4.665311278567593e-05,
"loss": 0.0571,
"step": 660
},
{
"epoch": 5.537190082644628,
"grad_norm": 0.4754871726036072,
"learning_rate": 4.5290501255712415e-05,
"loss": 0.0489,
"step": 670
},
{
"epoch": 5.619834710743802,
"grad_norm": 1.2373918294906616,
"learning_rate": 4.3931410249813806e-05,
"loss": 0.0559,
"step": 680
},
{
"epoch": 5.702479338842975,
"grad_norm": 0.8961721062660217,
"learning_rate": 4.2576855738649714e-05,
"loss": 0.0532,
"step": 690
},
{
"epoch": 5.785123966942149,
"grad_norm": 0.4459110498428345,
"learning_rate": 4.122785030169256e-05,
"loss": 0.0655,
"step": 700
},
{
"epoch": 5.867768595041323,
"grad_norm": 0.38759222626686096,
"learning_rate": 3.988540237027702e-05,
"loss": 0.0542,
"step": 710
},
{
"epoch": 5.950413223140496,
"grad_norm": 1.9169152975082397,
"learning_rate": 3.8550515473760514e-05,
"loss": 0.0507,
"step": 720
},
{
"epoch": 6.033057851239669,
"grad_norm": 2.071638345718384,
"learning_rate": 3.722418748934785e-05,
"loss": 0.0559,
"step": 730
},
{
"epoch": 6.115702479338843,
"grad_norm": 0.7945961952209473,
"learning_rate": 3.590740989614131e-05,
"loss": 0.0542,
"step": 740
},
{
"epoch": 6.198347107438017,
"grad_norm": 0.488382488489151,
"learning_rate": 3.460116703397336e-05,
"loss": 0.0599,
"step": 750
},
{
"epoch": 6.2809917355371905,
"grad_norm": 0.5963483452796936,
"learning_rate": 3.330643536757638e-05,
"loss": 0.051,
"step": 760
},
{
"epoch": 6.363636363636363,
"grad_norm": 0.6972360610961914,
"learning_rate": 3.2024182756639185e-05,
"loss": 0.0574,
"step": 770
},
{
"epoch": 6.446280991735537,
"grad_norm": 0.6945924758911133,
"learning_rate": 3.075536773229624e-05,
"loss": 0.0533,
"step": 780
},
{
"epoch": 6.528925619834711,
"grad_norm": 0.3093964755535126,
"learning_rate": 2.9500938780590275e-05,
"loss": 0.0567,
"step": 790
},
{
"epoch": 6.6115702479338845,
"grad_norm": 1.643970251083374,
"learning_rate": 2.826183363344391e-05,
"loss": 0.0527,
"step": 800
},
{
"epoch": 6.694214876033058,
"grad_norm": 0.8777986168861389,
"learning_rate": 2.7038978567670558e-05,
"loss": 0.0543,
"step": 810
},
{
"epoch": 6.776859504132231,
"grad_norm": 0.5457648038864136,
"learning_rate": 2.5833287712548198e-05,
"loss": 0.0533,
"step": 820
},
{
"epoch": 6.859504132231405,
"grad_norm": 0.49185365438461304,
"learning_rate": 2.4645662366474188e-05,
"loss": 0.0551,
"step": 830
},
{
"epoch": 6.9421487603305785,
"grad_norm": 0.3884984254837036,
"learning_rate": 2.3476990323211267e-05,
"loss": 0.0513,
"step": 840
},
{
"epoch": 7.024793388429752,
"grad_norm": 0.5958797335624695,
"learning_rate": 2.2328145208229095e-05,
"loss": 0.0631,
"step": 850
},
{
"epoch": 7.107438016528926,
"grad_norm": 1.0943728685379028,
"learning_rate": 2.119998582563692e-05,
"loss": 0.0531,
"step": 860
},
{
"epoch": 7.190082644628099,
"grad_norm": 0.691371500492096,
"learning_rate": 2.0093355516195888e-05,
"loss": 0.0554,
"step": 870
},
{
"epoch": 7.2727272727272725,
"grad_norm": 5.228278636932373,
"learning_rate": 1.900908152689062e-05,
"loss": 0.0509,
"step": 880
},
{
"epoch": 7.355371900826446,
"grad_norm": 0.3314078748226166,
"learning_rate": 1.7947974392531612e-05,
"loss": 0.0419,
"step": 890
},
{
"epoch": 7.43801652892562,
"grad_norm": 0.6459057927131653,
"learning_rate": 1.6910827329850616e-05,
"loss": 0.0443,
"step": 900
},
{
"epoch": 7.520661157024794,
"grad_norm": 0.29878008365631104,
"learning_rate": 1.589841564454176e-05,
"loss": 0.0424,
"step": 910
},
{
"epoch": 7.6033057851239665,
"grad_norm": 0.4006846249103546,
"learning_rate": 1.4911496151692012e-05,
"loss": 0.0479,
"step": 920
},
{
"epoch": 7.68595041322314,
"grad_norm": 0.5304248332977295,
"learning_rate": 1.3950806610033957e-05,
"loss": 0.0436,
"step": 930
},
{
"epoch": 7.768595041322314,
"grad_norm": 0.7876132726669312,
"learning_rate": 1.3017065170443948e-05,
"loss": 0.0448,
"step": 940
},
{
"epoch": 7.851239669421488,
"grad_norm": 0.9319537281990051,
"learning_rate": 1.2110969839097797e-05,
"loss": 0.045,
"step": 950
},
{
"epoch": 7.933884297520661,
"grad_norm": 1.810137391090393,
"learning_rate": 1.1233197955685409e-05,
"loss": 0.0432,
"step": 960
},
{
"epoch": 8.016528925619834,
"grad_norm": 1.6881096363067627,
"learning_rate": 1.03844056870744e-05,
"loss": 0.0492,
"step": 970
},
{
"epoch": 8.099173553719009,
"grad_norm": 1.5425840616226196,
"learning_rate": 9.565227536801135e-06,
"loss": 0.0454,
"step": 980
},
{
"epoch": 8.181818181818182,
"grad_norm": 1.0203477144241333,
"learning_rate": 8.776275870755924e-06,
"loss": 0.0551,
"step": 990
},
{
"epoch": 8.264462809917354,
"grad_norm": 1.097283959388733,
"learning_rate": 8.018140459416962e-06,
"loss": 0.039,
"step": 1000
},
{
"epoch": 8.347107438016529,
"grad_norm": 0.6623409390449524,
"learning_rate": 7.291388036975072e-06,
"loss": 0.0438,
"step": 1010
},
{
"epoch": 8.429752066115702,
"grad_norm": 0.38722532987594604,
"learning_rate": 6.596561877679036e-06,
"loss": 0.0541,
"step": 1020
},
{
"epoch": 8.512396694214877,
"grad_norm": 0.1958349198102951,
"learning_rate": 5.93418138971803e-06,
"loss": 0.042,
"step": 1030
},
{
"epoch": 8.59504132231405,
"grad_norm": 0.44341278076171875,
"learning_rate": 5.304741726944873e-06,
"loss": 0.0434,
"step": 1040
},
{
"epoch": 8.677685950413224,
"grad_norm": 2.145524024963379,
"learning_rate": 4.70871341873021e-06,
"loss": 0.0357,
"step": 1050
},
{
"epoch": 8.760330578512397,
"grad_norm": 1.4434798955917358,
"learning_rate": 4.146542018224447e-06,
"loss": 0.0405,
"step": 1060
},
{
"epoch": 8.84297520661157,
"grad_norm": 0.6765860915184021,
"learning_rate": 3.6186477692903954e-06,
"loss": 0.0426,
"step": 1070
},
{
"epoch": 8.925619834710744,
"grad_norm": 0.3515833020210266,
"learning_rate": 3.1254252923553994e-06,
"loss": 0.0464,
"step": 1080
},
{
"epoch": 9.008264462809917,
"grad_norm": 0.4808438718318939,
"learning_rate": 2.667243289418059e-06,
"loss": 0.045,
"step": 1090
},
{
"epoch": 9.090909090909092,
"grad_norm": 0.4166981875896454,
"learning_rate": 2.244444268429857e-06,
"loss": 0.0477,
"step": 1100
},
{
"epoch": 9.173553719008265,
"grad_norm": 0.5427719950675964,
"learning_rate": 1.8573442872578616e-06,
"loss": 0.0494,
"step": 1110
},
{
"epoch": 9.256198347107437,
"grad_norm": 0.3531731963157654,
"learning_rate": 1.5062327174197644e-06,
"loss": 0.0431,
"step": 1120
},
{
"epoch": 9.338842975206612,
"grad_norm": 1.0387060642242432,
"learning_rate": 1.191372027768034e-06,
"loss": 0.0478,
"step": 1130
},
{
"epoch": 9.421487603305785,
"grad_norm": 0.3243672251701355,
"learning_rate": 9.129975882847364e-07,
"loss": 0.0393,
"step": 1140
},
{
"epoch": 9.50413223140496,
"grad_norm": 0.33455690741539,
"learning_rate": 6.713174941338162e-07,
"loss": 0.0419,
"step": 1150
},
{
"epoch": 9.586776859504132,
"grad_norm": 0.4129684567451477,
"learning_rate": 4.6651241010226e-07,
"loss": 0.0463,
"step": 1160
},
{
"epoch": 9.669421487603305,
"grad_norm": 2.450713872909546,
"learning_rate": 2.9873543554652106e-07,
"loss": 0.0448,
"step": 1170
},
{
"epoch": 9.75206611570248,
"grad_norm": 0.22442331910133362,
"learning_rate": 1.681119899450856e-07,
"loss": 0.0462,
"step": 1180
},
{
"epoch": 9.834710743801653,
"grad_norm": 0.9303910732269287,
"learning_rate": 7.473971914280787e-08,
"loss": 0.0449,
"step": 1190
},
{
"epoch": 9.917355371900827,
"grad_norm": 1.7050765752792358,
"learning_rate": 1.8688422357004966e-08,
"loss": 0.0365,
"step": 1200
},
{
"epoch": 10.0,
"grad_norm": 0.60956209897995,
"learning_rate": 0.0,
"loss": 0.0377,
"step": 1210
},
{
"epoch": 10.0,
"step": 1210,
"total_flos": 1.3059661832211168e+17,
"train_loss": 0.09883986462738888,
"train_runtime": 1198.7826,
"train_samples_per_second": 49.142,
"train_steps_per_second": 1.009
}
],
"logging_steps": 10,
"max_steps": 1210,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 10000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.3059661832211168e+17,
"train_batch_size": 49,
"trial_name": null,
"trial_params": null
}