Stewart Slocum
Add fine-tuned model
cb51546
raw
history blame
43.4 kB
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 0,
"global_step": 246,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0040650406504065045,
"grad_norm": 1.2443748712539673,
"learning_rate": 1e-05,
"loss": 2.1704,
"step": 1
},
{
"epoch": 0.008130081300813009,
"grad_norm": 1.198485255241394,
"learning_rate": 9.959349593495936e-06,
"loss": 2.1382,
"step": 2
},
{
"epoch": 0.012195121951219513,
"grad_norm": 1.1655025482177734,
"learning_rate": 9.91869918699187e-06,
"loss": 2.1257,
"step": 3
},
{
"epoch": 0.016260162601626018,
"grad_norm": 1.1087367534637451,
"learning_rate": 9.878048780487805e-06,
"loss": 2.1008,
"step": 4
},
{
"epoch": 0.02032520325203252,
"grad_norm": 1.0223109722137451,
"learning_rate": 9.837398373983741e-06,
"loss": 2.0118,
"step": 5
},
{
"epoch": 0.024390243902439025,
"grad_norm": 1.0011451244354248,
"learning_rate": 9.796747967479675e-06,
"loss": 2.0316,
"step": 6
},
{
"epoch": 0.028455284552845527,
"grad_norm": 0.968566358089447,
"learning_rate": 9.756097560975611e-06,
"loss": 2.0904,
"step": 7
},
{
"epoch": 0.032520325203252036,
"grad_norm": 0.9195019602775574,
"learning_rate": 9.715447154471546e-06,
"loss": 2.0712,
"step": 8
},
{
"epoch": 0.036585365853658534,
"grad_norm": 0.8058457970619202,
"learning_rate": 9.67479674796748e-06,
"loss": 1.8876,
"step": 9
},
{
"epoch": 0.04065040650406504,
"grad_norm": 0.7549053430557251,
"learning_rate": 9.634146341463415e-06,
"loss": 1.8909,
"step": 10
},
{
"epoch": 0.044715447154471545,
"grad_norm": 0.7223576307296753,
"learning_rate": 9.59349593495935e-06,
"loss": 2.0057,
"step": 11
},
{
"epoch": 0.04878048780487805,
"grad_norm": 0.6774242520332336,
"learning_rate": 9.552845528455286e-06,
"loss": 1.9098,
"step": 12
},
{
"epoch": 0.052845528455284556,
"grad_norm": 0.6018033027648926,
"learning_rate": 9.51219512195122e-06,
"loss": 1.8596,
"step": 13
},
{
"epoch": 0.056910569105691054,
"grad_norm": 0.5510868430137634,
"learning_rate": 9.471544715447156e-06,
"loss": 1.8775,
"step": 14
},
{
"epoch": 0.06097560975609756,
"grad_norm": 0.5308274030685425,
"learning_rate": 9.43089430894309e-06,
"loss": 1.853,
"step": 15
},
{
"epoch": 0.06504065040650407,
"grad_norm": 0.47880470752716064,
"learning_rate": 9.390243902439025e-06,
"loss": 1.8277,
"step": 16
},
{
"epoch": 0.06910569105691057,
"grad_norm": 0.49379265308380127,
"learning_rate": 9.34959349593496e-06,
"loss": 1.9113,
"step": 17
},
{
"epoch": 0.07317073170731707,
"grad_norm": 0.462348073720932,
"learning_rate": 9.308943089430895e-06,
"loss": 1.7872,
"step": 18
},
{
"epoch": 0.07723577235772358,
"grad_norm": 0.45618218183517456,
"learning_rate": 9.268292682926831e-06,
"loss": 1.8817,
"step": 19
},
{
"epoch": 0.08130081300813008,
"grad_norm": 0.4135752022266388,
"learning_rate": 9.227642276422764e-06,
"loss": 1.7765,
"step": 20
},
{
"epoch": 0.08536585365853659,
"grad_norm": 0.4315033555030823,
"learning_rate": 9.1869918699187e-06,
"loss": 1.7997,
"step": 21
},
{
"epoch": 0.08943089430894309,
"grad_norm": 0.41308435797691345,
"learning_rate": 9.146341463414635e-06,
"loss": 1.7839,
"step": 22
},
{
"epoch": 0.09349593495934959,
"grad_norm": 0.38933464884757996,
"learning_rate": 9.10569105691057e-06,
"loss": 1.7688,
"step": 23
},
{
"epoch": 0.0975609756097561,
"grad_norm": 0.37282490730285645,
"learning_rate": 9.065040650406505e-06,
"loss": 1.7134,
"step": 24
},
{
"epoch": 0.1016260162601626,
"grad_norm": 0.37599578499794006,
"learning_rate": 9.02439024390244e-06,
"loss": 1.7569,
"step": 25
},
{
"epoch": 0.10569105691056911,
"grad_norm": 0.36915072798728943,
"learning_rate": 8.983739837398374e-06,
"loss": 1.7513,
"step": 26
},
{
"epoch": 0.10975609756097561,
"grad_norm": 0.37054121494293213,
"learning_rate": 8.94308943089431e-06,
"loss": 1.7617,
"step": 27
},
{
"epoch": 0.11382113821138211,
"grad_norm": 0.3536517918109894,
"learning_rate": 8.902439024390244e-06,
"loss": 1.7369,
"step": 28
},
{
"epoch": 0.11788617886178862,
"grad_norm": 0.34083041548728943,
"learning_rate": 8.86178861788618e-06,
"loss": 1.6592,
"step": 29
},
{
"epoch": 0.12195121951219512,
"grad_norm": 0.358023464679718,
"learning_rate": 8.821138211382113e-06,
"loss": 1.7287,
"step": 30
},
{
"epoch": 0.12601626016260162,
"grad_norm": 0.3317272961139679,
"learning_rate": 8.78048780487805e-06,
"loss": 1.6652,
"step": 31
},
{
"epoch": 0.13008130081300814,
"grad_norm": 0.3286559581756592,
"learning_rate": 8.739837398373985e-06,
"loss": 1.6648,
"step": 32
},
{
"epoch": 0.13414634146341464,
"grad_norm": 0.3171916902065277,
"learning_rate": 8.69918699186992e-06,
"loss": 1.6781,
"step": 33
},
{
"epoch": 0.13821138211382114,
"grad_norm": 0.3212682008743286,
"learning_rate": 8.658536585365854e-06,
"loss": 1.706,
"step": 34
},
{
"epoch": 0.14227642276422764,
"grad_norm": 0.321341335773468,
"learning_rate": 8.617886178861789e-06,
"loss": 1.6633,
"step": 35
},
{
"epoch": 0.14634146341463414,
"grad_norm": 0.3067283034324646,
"learning_rate": 8.577235772357724e-06,
"loss": 1.6671,
"step": 36
},
{
"epoch": 0.15040650406504066,
"grad_norm": 0.2952353358268738,
"learning_rate": 8.536585365853658e-06,
"loss": 1.6429,
"step": 37
},
{
"epoch": 0.15447154471544716,
"grad_norm": 0.28897207975387573,
"learning_rate": 8.495934959349595e-06,
"loss": 1.5994,
"step": 38
},
{
"epoch": 0.15853658536585366,
"grad_norm": 0.3044757544994354,
"learning_rate": 8.45528455284553e-06,
"loss": 1.6484,
"step": 39
},
{
"epoch": 0.16260162601626016,
"grad_norm": 0.2715882658958435,
"learning_rate": 8.414634146341464e-06,
"loss": 1.5489,
"step": 40
},
{
"epoch": 0.16666666666666666,
"grad_norm": 0.262287437915802,
"learning_rate": 8.373983739837399e-06,
"loss": 1.5362,
"step": 41
},
{
"epoch": 0.17073170731707318,
"grad_norm": 0.2884230315685272,
"learning_rate": 8.333333333333334e-06,
"loss": 1.6417,
"step": 42
},
{
"epoch": 0.17479674796747968,
"grad_norm": 0.2722843289375305,
"learning_rate": 8.292682926829268e-06,
"loss": 1.5869,
"step": 43
},
{
"epoch": 0.17886178861788618,
"grad_norm": 0.25501325726509094,
"learning_rate": 8.252032520325203e-06,
"loss": 1.4989,
"step": 44
},
{
"epoch": 0.18292682926829268,
"grad_norm": 0.2949991822242737,
"learning_rate": 8.21138211382114e-06,
"loss": 1.6111,
"step": 45
},
{
"epoch": 0.18699186991869918,
"grad_norm": 0.26580971479415894,
"learning_rate": 8.170731707317073e-06,
"loss": 1.573,
"step": 46
},
{
"epoch": 0.1910569105691057,
"grad_norm": 0.28367504477500916,
"learning_rate": 8.130081300813009e-06,
"loss": 1.6192,
"step": 47
},
{
"epoch": 0.1951219512195122,
"grad_norm": 0.2633240222930908,
"learning_rate": 8.089430894308944e-06,
"loss": 1.5603,
"step": 48
},
{
"epoch": 0.1991869918699187,
"grad_norm": 0.24888679385185242,
"learning_rate": 8.048780487804879e-06,
"loss": 1.5013,
"step": 49
},
{
"epoch": 0.2032520325203252,
"grad_norm": 0.25869449973106384,
"learning_rate": 8.008130081300813e-06,
"loss": 1.5718,
"step": 50
},
{
"epoch": 0.2073170731707317,
"grad_norm": 0.26135310530662537,
"learning_rate": 7.967479674796748e-06,
"loss": 1.5476,
"step": 51
},
{
"epoch": 0.21138211382113822,
"grad_norm": 0.2613206207752228,
"learning_rate": 7.926829268292685e-06,
"loss": 1.5685,
"step": 52
},
{
"epoch": 0.21544715447154472,
"grad_norm": 0.24914291501045227,
"learning_rate": 7.886178861788618e-06,
"loss": 1.522,
"step": 53
},
{
"epoch": 0.21951219512195122,
"grad_norm": 0.2416287362575531,
"learning_rate": 7.845528455284554e-06,
"loss": 1.5258,
"step": 54
},
{
"epoch": 0.22357723577235772,
"grad_norm": 0.23537378013134003,
"learning_rate": 7.804878048780489e-06,
"loss": 1.5002,
"step": 55
},
{
"epoch": 0.22764227642276422,
"grad_norm": 0.24182282388210297,
"learning_rate": 7.764227642276424e-06,
"loss": 1.5264,
"step": 56
},
{
"epoch": 0.23170731707317074,
"grad_norm": 0.24550394713878632,
"learning_rate": 7.723577235772358e-06,
"loss": 1.5361,
"step": 57
},
{
"epoch": 0.23577235772357724,
"grad_norm": 0.22000114619731903,
"learning_rate": 7.682926829268293e-06,
"loss": 1.4482,
"step": 58
},
{
"epoch": 0.23983739837398374,
"grad_norm": 0.21993432939052582,
"learning_rate": 7.64227642276423e-06,
"loss": 1.452,
"step": 59
},
{
"epoch": 0.24390243902439024,
"grad_norm": 0.23607592284679413,
"learning_rate": 7.601626016260163e-06,
"loss": 1.4749,
"step": 60
},
{
"epoch": 0.24796747967479674,
"grad_norm": 0.22953565418720245,
"learning_rate": 7.560975609756098e-06,
"loss": 1.4828,
"step": 61
},
{
"epoch": 0.25203252032520324,
"grad_norm": 0.21641074120998383,
"learning_rate": 7.520325203252034e-06,
"loss": 1.4558,
"step": 62
},
{
"epoch": 0.25609756097560976,
"grad_norm": 0.23295767605304718,
"learning_rate": 7.4796747967479676e-06,
"loss": 1.5053,
"step": 63
},
{
"epoch": 0.2601626016260163,
"grad_norm": 0.21931757032871246,
"learning_rate": 7.439024390243903e-06,
"loss": 1.4599,
"step": 64
},
{
"epoch": 0.26422764227642276,
"grad_norm": 0.21457414329051971,
"learning_rate": 7.398373983739838e-06,
"loss": 1.4686,
"step": 65
},
{
"epoch": 0.2682926829268293,
"grad_norm": 0.2142302244901657,
"learning_rate": 7.357723577235773e-06,
"loss": 1.4362,
"step": 66
},
{
"epoch": 0.27235772357723576,
"grad_norm": 0.21409818530082703,
"learning_rate": 7.317073170731707e-06,
"loss": 1.4607,
"step": 67
},
{
"epoch": 0.2764227642276423,
"grad_norm": 0.211210697889328,
"learning_rate": 7.276422764227643e-06,
"loss": 1.4587,
"step": 68
},
{
"epoch": 0.2804878048780488,
"grad_norm": 0.21644359827041626,
"learning_rate": 7.2357723577235786e-06,
"loss": 1.4481,
"step": 69
},
{
"epoch": 0.2845528455284553,
"grad_norm": 0.2180495411157608,
"learning_rate": 7.1951219512195125e-06,
"loss": 1.416,
"step": 70
},
{
"epoch": 0.2886178861788618,
"grad_norm": 0.20745021104812622,
"learning_rate": 7.154471544715448e-06,
"loss": 1.4157,
"step": 71
},
{
"epoch": 0.2926829268292683,
"grad_norm": 0.20739568769931793,
"learning_rate": 7.113821138211383e-06,
"loss": 1.4276,
"step": 72
},
{
"epoch": 0.2967479674796748,
"grad_norm": 0.20237509906291962,
"learning_rate": 7.0731707317073175e-06,
"loss": 1.4011,
"step": 73
},
{
"epoch": 0.3008130081300813,
"grad_norm": 0.19157394766807556,
"learning_rate": 7.032520325203252e-06,
"loss": 1.3681,
"step": 74
},
{
"epoch": 0.3048780487804878,
"grad_norm": 0.2131585031747818,
"learning_rate": 6.991869918699188e-06,
"loss": 1.4443,
"step": 75
},
{
"epoch": 0.3089430894308943,
"grad_norm": 0.208163782954216,
"learning_rate": 6.951219512195122e-06,
"loss": 1.4089,
"step": 76
},
{
"epoch": 0.3130081300813008,
"grad_norm": 0.1992938071489334,
"learning_rate": 6.910569105691057e-06,
"loss": 1.3887,
"step": 77
},
{
"epoch": 0.3170731707317073,
"grad_norm": 0.20457559823989868,
"learning_rate": 6.869918699186993e-06,
"loss": 1.3853,
"step": 78
},
{
"epoch": 0.32113821138211385,
"grad_norm": 0.20216362178325653,
"learning_rate": 6.829268292682928e-06,
"loss": 1.3893,
"step": 79
},
{
"epoch": 0.3252032520325203,
"grad_norm": 0.19666500389575958,
"learning_rate": 6.788617886178862e-06,
"loss": 1.4147,
"step": 80
},
{
"epoch": 0.32926829268292684,
"grad_norm": 0.2000611275434494,
"learning_rate": 6.747967479674797e-06,
"loss": 1.3686,
"step": 81
},
{
"epoch": 0.3333333333333333,
"grad_norm": 0.21209818124771118,
"learning_rate": 6.707317073170733e-06,
"loss": 1.3857,
"step": 82
},
{
"epoch": 0.33739837398373984,
"grad_norm": 0.21749112010002136,
"learning_rate": 6.666666666666667e-06,
"loss": 1.415,
"step": 83
},
{
"epoch": 0.34146341463414637,
"grad_norm": 0.2626613676548004,
"learning_rate": 6.626016260162602e-06,
"loss": 1.3677,
"step": 84
},
{
"epoch": 0.34552845528455284,
"grad_norm": 0.19924424588680267,
"learning_rate": 6.585365853658538e-06,
"loss": 1.3835,
"step": 85
},
{
"epoch": 0.34959349593495936,
"grad_norm": 0.19534048438072205,
"learning_rate": 6.544715447154472e-06,
"loss": 1.3563,
"step": 86
},
{
"epoch": 0.35365853658536583,
"grad_norm": 0.2014574259519577,
"learning_rate": 6.504065040650407e-06,
"loss": 1.3704,
"step": 87
},
{
"epoch": 0.35772357723577236,
"grad_norm": 0.2033248394727707,
"learning_rate": 6.463414634146342e-06,
"loss": 1.4016,
"step": 88
},
{
"epoch": 0.3617886178861789,
"grad_norm": 0.2129010260105133,
"learning_rate": 6.422764227642278e-06,
"loss": 1.4318,
"step": 89
},
{
"epoch": 0.36585365853658536,
"grad_norm": 0.19626440107822418,
"learning_rate": 6.3821138211382115e-06,
"loss": 1.3912,
"step": 90
},
{
"epoch": 0.3699186991869919,
"grad_norm": 0.205119788646698,
"learning_rate": 6.341463414634147e-06,
"loss": 1.4114,
"step": 91
},
{
"epoch": 0.37398373983739835,
"grad_norm": 0.20082072913646698,
"learning_rate": 6.300813008130082e-06,
"loss": 1.3829,
"step": 92
},
{
"epoch": 0.3780487804878049,
"grad_norm": 0.20235207676887512,
"learning_rate": 6.260162601626017e-06,
"loss": 1.3832,
"step": 93
},
{
"epoch": 0.3821138211382114,
"grad_norm": 0.2006131112575531,
"learning_rate": 6.219512195121951e-06,
"loss": 1.376,
"step": 94
},
{
"epoch": 0.3861788617886179,
"grad_norm": 0.19920463860034943,
"learning_rate": 6.178861788617887e-06,
"loss": 1.3868,
"step": 95
},
{
"epoch": 0.3902439024390244,
"grad_norm": 0.19277149438858032,
"learning_rate": 6.138211382113821e-06,
"loss": 1.3526,
"step": 96
},
{
"epoch": 0.3943089430894309,
"grad_norm": 0.1967436820268631,
"learning_rate": 6.0975609756097564e-06,
"loss": 1.3311,
"step": 97
},
{
"epoch": 0.3983739837398374,
"grad_norm": 0.20269839465618134,
"learning_rate": 6.056910569105692e-06,
"loss": 1.3427,
"step": 98
},
{
"epoch": 0.4024390243902439,
"grad_norm": 0.2014787793159485,
"learning_rate": 6.016260162601627e-06,
"loss": 1.3508,
"step": 99
},
{
"epoch": 0.4065040650406504,
"grad_norm": 0.20438070595264435,
"learning_rate": 5.9756097560975615e-06,
"loss": 1.3559,
"step": 100
},
{
"epoch": 0.4105691056910569,
"grad_norm": 0.19471648335456848,
"learning_rate": 5.934959349593496e-06,
"loss": 1.3263,
"step": 101
},
{
"epoch": 0.4146341463414634,
"grad_norm": 0.1897473782300949,
"learning_rate": 5.894308943089432e-06,
"loss": 1.3599,
"step": 102
},
{
"epoch": 0.4186991869918699,
"grad_norm": 0.19831465184688568,
"learning_rate": 5.853658536585366e-06,
"loss": 1.356,
"step": 103
},
{
"epoch": 0.42276422764227645,
"grad_norm": 0.19808410108089447,
"learning_rate": 5.813008130081301e-06,
"loss": 1.3559,
"step": 104
},
{
"epoch": 0.4268292682926829,
"grad_norm": 0.1782761812210083,
"learning_rate": 5.772357723577237e-06,
"loss": 1.31,
"step": 105
},
{
"epoch": 0.43089430894308944,
"grad_norm": 0.2128615379333496,
"learning_rate": 5.731707317073171e-06,
"loss": 1.402,
"step": 106
},
{
"epoch": 0.4349593495934959,
"grad_norm": 0.2091735452413559,
"learning_rate": 5.691056910569106e-06,
"loss": 1.3964,
"step": 107
},
{
"epoch": 0.43902439024390244,
"grad_norm": 0.19047264754772186,
"learning_rate": 5.650406504065041e-06,
"loss": 1.3781,
"step": 108
},
{
"epoch": 0.44308943089430897,
"grad_norm": 0.201967254281044,
"learning_rate": 5.609756097560977e-06,
"loss": 1.3574,
"step": 109
},
{
"epoch": 0.44715447154471544,
"grad_norm": 0.17995291948318481,
"learning_rate": 5.569105691056911e-06,
"loss": 1.3217,
"step": 110
},
{
"epoch": 0.45121951219512196,
"grad_norm": 0.2009151726961136,
"learning_rate": 5.528455284552846e-06,
"loss": 1.372,
"step": 111
},
{
"epoch": 0.45528455284552843,
"grad_norm": 0.18182797729969025,
"learning_rate": 5.487804878048781e-06,
"loss": 1.2898,
"step": 112
},
{
"epoch": 0.45934959349593496,
"grad_norm": 0.18322426080703735,
"learning_rate": 5.447154471544716e-06,
"loss": 1.3016,
"step": 113
},
{
"epoch": 0.4634146341463415,
"grad_norm": 0.18239295482635498,
"learning_rate": 5.4065040650406504e-06,
"loss": 1.3583,
"step": 114
},
{
"epoch": 0.46747967479674796,
"grad_norm": 0.19691058993339539,
"learning_rate": 5.365853658536586e-06,
"loss": 1.313,
"step": 115
},
{
"epoch": 0.4715447154471545,
"grad_norm": 0.18001240491867065,
"learning_rate": 5.32520325203252e-06,
"loss": 1.3059,
"step": 116
},
{
"epoch": 0.47560975609756095,
"grad_norm": 0.1999204009771347,
"learning_rate": 5.2845528455284555e-06,
"loss": 1.324,
"step": 117
},
{
"epoch": 0.4796747967479675,
"grad_norm": 0.19896095991134644,
"learning_rate": 5.243902439024391e-06,
"loss": 1.3648,
"step": 118
},
{
"epoch": 0.483739837398374,
"grad_norm": 0.18776021897792816,
"learning_rate": 5.203252032520326e-06,
"loss": 1.2972,
"step": 119
},
{
"epoch": 0.4878048780487805,
"grad_norm": 0.19416946172714233,
"learning_rate": 5.162601626016261e-06,
"loss": 1.3373,
"step": 120
},
{
"epoch": 0.491869918699187,
"grad_norm": 0.19433578848838806,
"learning_rate": 5.121951219512195e-06,
"loss": 1.3076,
"step": 121
},
{
"epoch": 0.4959349593495935,
"grad_norm": 0.18256975710391998,
"learning_rate": 5.081300813008131e-06,
"loss": 1.2832,
"step": 122
},
{
"epoch": 0.5,
"grad_norm": 0.18513993918895721,
"learning_rate": 5.040650406504065e-06,
"loss": 1.3172,
"step": 123
},
{
"epoch": 0.5040650406504065,
"grad_norm": 0.19009090960025787,
"learning_rate": 5e-06,
"loss": 1.3169,
"step": 124
},
{
"epoch": 0.508130081300813,
"grad_norm": 0.1833588182926178,
"learning_rate": 4.959349593495935e-06,
"loss": 1.2932,
"step": 125
},
{
"epoch": 0.5121951219512195,
"grad_norm": 0.17829759418964386,
"learning_rate": 4.918699186991871e-06,
"loss": 1.3326,
"step": 126
},
{
"epoch": 0.516260162601626,
"grad_norm": 0.18041731417179108,
"learning_rate": 4.8780487804878055e-06,
"loss": 1.2987,
"step": 127
},
{
"epoch": 0.5203252032520326,
"grad_norm": 0.19885776937007904,
"learning_rate": 4.83739837398374e-06,
"loss": 1.3069,
"step": 128
},
{
"epoch": 0.524390243902439,
"grad_norm": 0.17805083096027374,
"learning_rate": 4.796747967479675e-06,
"loss": 1.3226,
"step": 129
},
{
"epoch": 0.5284552845528455,
"grad_norm": 0.176824152469635,
"learning_rate": 4.75609756097561e-06,
"loss": 1.2887,
"step": 130
},
{
"epoch": 0.532520325203252,
"grad_norm": 0.19132983684539795,
"learning_rate": 4.715447154471545e-06,
"loss": 1.3114,
"step": 131
},
{
"epoch": 0.5365853658536586,
"grad_norm": 0.18644371628761292,
"learning_rate": 4.67479674796748e-06,
"loss": 1.3038,
"step": 132
},
{
"epoch": 0.540650406504065,
"grad_norm": 0.19085392355918884,
"learning_rate": 4.634146341463416e-06,
"loss": 1.3141,
"step": 133
},
{
"epoch": 0.5447154471544715,
"grad_norm": 0.17294111847877502,
"learning_rate": 4.59349593495935e-06,
"loss": 1.2709,
"step": 134
},
{
"epoch": 0.5487804878048781,
"grad_norm": 0.21765175461769104,
"learning_rate": 4.552845528455285e-06,
"loss": 1.3192,
"step": 135
},
{
"epoch": 0.5528455284552846,
"grad_norm": 0.1797131896018982,
"learning_rate": 4.51219512195122e-06,
"loss": 1.2781,
"step": 136
},
{
"epoch": 0.556910569105691,
"grad_norm": 0.1938735544681549,
"learning_rate": 4.471544715447155e-06,
"loss": 1.33,
"step": 137
},
{
"epoch": 0.5609756097560976,
"grad_norm": 0.17687402665615082,
"learning_rate": 4.43089430894309e-06,
"loss": 1.2757,
"step": 138
},
{
"epoch": 0.5650406504065041,
"grad_norm": 0.181070938706398,
"learning_rate": 4.390243902439025e-06,
"loss": 1.2786,
"step": 139
},
{
"epoch": 0.5691056910569106,
"grad_norm": 0.22047542035579681,
"learning_rate": 4.34959349593496e-06,
"loss": 1.337,
"step": 140
},
{
"epoch": 0.573170731707317,
"grad_norm": 0.1789373755455017,
"learning_rate": 4.308943089430894e-06,
"loss": 1.2926,
"step": 141
},
{
"epoch": 0.5772357723577236,
"grad_norm": 0.192376047372818,
"learning_rate": 4.268292682926829e-06,
"loss": 1.3014,
"step": 142
},
{
"epoch": 0.5813008130081301,
"grad_norm": 0.18353502452373505,
"learning_rate": 4.227642276422765e-06,
"loss": 1.3375,
"step": 143
},
{
"epoch": 0.5853658536585366,
"grad_norm": 0.18353086709976196,
"learning_rate": 4.1869918699186995e-06,
"loss": 1.2781,
"step": 144
},
{
"epoch": 0.5894308943089431,
"grad_norm": 0.19154737889766693,
"learning_rate": 4.146341463414634e-06,
"loss": 1.2515,
"step": 145
},
{
"epoch": 0.5934959349593496,
"grad_norm": 0.19711637496948242,
"learning_rate": 4.10569105691057e-06,
"loss": 1.325,
"step": 146
},
{
"epoch": 0.5975609756097561,
"grad_norm": 0.19635708630084991,
"learning_rate": 4.0650406504065046e-06,
"loss": 1.3318,
"step": 147
},
{
"epoch": 0.6016260162601627,
"grad_norm": 0.19847285747528076,
"learning_rate": 4.024390243902439e-06,
"loss": 1.3464,
"step": 148
},
{
"epoch": 0.6056910569105691,
"grad_norm": 0.20473280549049377,
"learning_rate": 3.983739837398374e-06,
"loss": 1.3094,
"step": 149
},
{
"epoch": 0.6097560975609756,
"grad_norm": 0.1827707588672638,
"learning_rate": 3.943089430894309e-06,
"loss": 1.3238,
"step": 150
},
{
"epoch": 0.6138211382113821,
"grad_norm": 0.18579614162445068,
"learning_rate": 3.902439024390244e-06,
"loss": 1.3175,
"step": 151
},
{
"epoch": 0.6178861788617886,
"grad_norm": 0.17664667963981628,
"learning_rate": 3.861788617886179e-06,
"loss": 1.2808,
"step": 152
},
{
"epoch": 0.6219512195121951,
"grad_norm": 0.18311426043510437,
"learning_rate": 3.821138211382115e-06,
"loss": 1.2676,
"step": 153
},
{
"epoch": 0.6260162601626016,
"grad_norm": 0.18462157249450684,
"learning_rate": 3.780487804878049e-06,
"loss": 1.3112,
"step": 154
},
{
"epoch": 0.6300813008130082,
"grad_norm": 0.1831246167421341,
"learning_rate": 3.7398373983739838e-06,
"loss": 1.2657,
"step": 155
},
{
"epoch": 0.6341463414634146,
"grad_norm": 0.17907500267028809,
"learning_rate": 3.699186991869919e-06,
"loss": 1.2794,
"step": 156
},
{
"epoch": 0.6382113821138211,
"grad_norm": 0.18612568080425262,
"learning_rate": 3.6585365853658537e-06,
"loss": 1.3098,
"step": 157
},
{
"epoch": 0.6422764227642277,
"grad_norm": 0.19356365501880646,
"learning_rate": 3.6178861788617893e-06,
"loss": 1.2861,
"step": 158
},
{
"epoch": 0.6463414634146342,
"grad_norm": 0.18286974728107452,
"learning_rate": 3.577235772357724e-06,
"loss": 1.2853,
"step": 159
},
{
"epoch": 0.6504065040650406,
"grad_norm": 0.19886158406734467,
"learning_rate": 3.5365853658536588e-06,
"loss": 1.2977,
"step": 160
},
{
"epoch": 0.6544715447154471,
"grad_norm": 0.17624559998512268,
"learning_rate": 3.495934959349594e-06,
"loss": 1.2371,
"step": 161
},
{
"epoch": 0.6585365853658537,
"grad_norm": 0.18079955875873566,
"learning_rate": 3.4552845528455287e-06,
"loss": 1.2884,
"step": 162
},
{
"epoch": 0.6626016260162602,
"grad_norm": 0.18184345960617065,
"learning_rate": 3.414634146341464e-06,
"loss": 1.2872,
"step": 163
},
{
"epoch": 0.6666666666666666,
"grad_norm": 0.18814067542552948,
"learning_rate": 3.3739837398373986e-06,
"loss": 1.3077,
"step": 164
},
{
"epoch": 0.6707317073170732,
"grad_norm": 0.18188297748565674,
"learning_rate": 3.3333333333333333e-06,
"loss": 1.252,
"step": 165
},
{
"epoch": 0.6747967479674797,
"grad_norm": 0.18605956435203552,
"learning_rate": 3.292682926829269e-06,
"loss": 1.2462,
"step": 166
},
{
"epoch": 0.6788617886178862,
"grad_norm": 0.18796762824058533,
"learning_rate": 3.2520325203252037e-06,
"loss": 1.207,
"step": 167
},
{
"epoch": 0.6829268292682927,
"grad_norm": 0.17487967014312744,
"learning_rate": 3.211382113821139e-06,
"loss": 1.2696,
"step": 168
},
{
"epoch": 0.6869918699186992,
"grad_norm": 0.1884680986404419,
"learning_rate": 3.1707317073170736e-06,
"loss": 1.2916,
"step": 169
},
{
"epoch": 0.6910569105691057,
"grad_norm": 0.18868300318717957,
"learning_rate": 3.1300813008130083e-06,
"loss": 1.259,
"step": 170
},
{
"epoch": 0.6951219512195121,
"grad_norm": 0.1876087635755539,
"learning_rate": 3.0894308943089435e-06,
"loss": 1.287,
"step": 171
},
{
"epoch": 0.6991869918699187,
"grad_norm": 0.19446611404418945,
"learning_rate": 3.0487804878048782e-06,
"loss": 1.2429,
"step": 172
},
{
"epoch": 0.7032520325203252,
"grad_norm": 0.1855759173631668,
"learning_rate": 3.0081300813008134e-06,
"loss": 1.2698,
"step": 173
},
{
"epoch": 0.7073170731707317,
"grad_norm": 0.18855759501457214,
"learning_rate": 2.967479674796748e-06,
"loss": 1.2977,
"step": 174
},
{
"epoch": 0.7113821138211383,
"grad_norm": 0.20293879508972168,
"learning_rate": 2.926829268292683e-06,
"loss": 1.2701,
"step": 175
},
{
"epoch": 0.7154471544715447,
"grad_norm": 0.1948600560426712,
"learning_rate": 2.8861788617886185e-06,
"loss": 1.3133,
"step": 176
},
{
"epoch": 0.7195121951219512,
"grad_norm": 0.1887495517730713,
"learning_rate": 2.845528455284553e-06,
"loss": 1.26,
"step": 177
},
{
"epoch": 0.7235772357723578,
"grad_norm": 0.19558636844158173,
"learning_rate": 2.8048780487804884e-06,
"loss": 1.2934,
"step": 178
},
{
"epoch": 0.7276422764227642,
"grad_norm": 0.1895134001970291,
"learning_rate": 2.764227642276423e-06,
"loss": 1.3002,
"step": 179
},
{
"epoch": 0.7317073170731707,
"grad_norm": 0.18442754447460175,
"learning_rate": 2.723577235772358e-06,
"loss": 1.2515,
"step": 180
},
{
"epoch": 0.7357723577235772,
"grad_norm": 0.17319710552692413,
"learning_rate": 2.682926829268293e-06,
"loss": 1.2455,
"step": 181
},
{
"epoch": 0.7398373983739838,
"grad_norm": 0.17397530376911163,
"learning_rate": 2.6422764227642278e-06,
"loss": 1.2526,
"step": 182
},
{
"epoch": 0.7439024390243902,
"grad_norm": 0.18584556877613068,
"learning_rate": 2.601626016260163e-06,
"loss": 1.2593,
"step": 183
},
{
"epoch": 0.7479674796747967,
"grad_norm": 0.1924012452363968,
"learning_rate": 2.5609756097560977e-06,
"loss": 1.2845,
"step": 184
},
{
"epoch": 0.7520325203252033,
"grad_norm": 0.18047532439231873,
"learning_rate": 2.5203252032520324e-06,
"loss": 1.3022,
"step": 185
},
{
"epoch": 0.7560975609756098,
"grad_norm": 0.19239181280136108,
"learning_rate": 2.4796747967479676e-06,
"loss": 1.3515,
"step": 186
},
{
"epoch": 0.7601626016260162,
"grad_norm": 0.18099461495876312,
"learning_rate": 2.4390243902439027e-06,
"loss": 1.2365,
"step": 187
},
{
"epoch": 0.7642276422764228,
"grad_norm": 0.19709230959415436,
"learning_rate": 2.3983739837398375e-06,
"loss": 1.2553,
"step": 188
},
{
"epoch": 0.7682926829268293,
"grad_norm": 0.18170712888240814,
"learning_rate": 2.3577235772357727e-06,
"loss": 1.2782,
"step": 189
},
{
"epoch": 0.7723577235772358,
"grad_norm": 0.18511487543582916,
"learning_rate": 2.317073170731708e-06,
"loss": 1.289,
"step": 190
},
{
"epoch": 0.7764227642276422,
"grad_norm": 0.1844373643398285,
"learning_rate": 2.2764227642276426e-06,
"loss": 1.2878,
"step": 191
},
{
"epoch": 0.7804878048780488,
"grad_norm": 0.175090953707695,
"learning_rate": 2.2357723577235773e-06,
"loss": 1.2684,
"step": 192
},
{
"epoch": 0.7845528455284553,
"grad_norm": 0.1956668645143509,
"learning_rate": 2.1951219512195125e-06,
"loss": 1.2691,
"step": 193
},
{
"epoch": 0.7886178861788617,
"grad_norm": 0.17475838959217072,
"learning_rate": 2.154471544715447e-06,
"loss": 1.2333,
"step": 194
},
{
"epoch": 0.7926829268292683,
"grad_norm": 0.1879858672618866,
"learning_rate": 2.1138211382113824e-06,
"loss": 1.2953,
"step": 195
},
{
"epoch": 0.7967479674796748,
"grad_norm": 0.1937568187713623,
"learning_rate": 2.073170731707317e-06,
"loss": 1.2671,
"step": 196
},
{
"epoch": 0.8008130081300813,
"grad_norm": 0.191900834441185,
"learning_rate": 2.0325203252032523e-06,
"loss": 1.2859,
"step": 197
},
{
"epoch": 0.8048780487804879,
"grad_norm": 0.17896786332130432,
"learning_rate": 1.991869918699187e-06,
"loss": 1.2723,
"step": 198
},
{
"epoch": 0.8089430894308943,
"grad_norm": 0.21629469096660614,
"learning_rate": 1.951219512195122e-06,
"loss": 1.2974,
"step": 199
},
{
"epoch": 0.8130081300813008,
"grad_norm": 0.1890254020690918,
"learning_rate": 1.9105691056910574e-06,
"loss": 1.2321,
"step": 200
},
{
"epoch": 0.8170731707317073,
"grad_norm": 0.19379396736621857,
"learning_rate": 1.8699186991869919e-06,
"loss": 1.2474,
"step": 201
},
{
"epoch": 0.8211382113821138,
"grad_norm": 0.17745374143123627,
"learning_rate": 1.8292682926829268e-06,
"loss": 1.2523,
"step": 202
},
{
"epoch": 0.8252032520325203,
"grad_norm": 0.19853124022483826,
"learning_rate": 1.788617886178862e-06,
"loss": 1.2544,
"step": 203
},
{
"epoch": 0.8292682926829268,
"grad_norm": 0.19184249639511108,
"learning_rate": 1.747967479674797e-06,
"loss": 1.2272,
"step": 204
},
{
"epoch": 0.8333333333333334,
"grad_norm": 0.18143822252750397,
"learning_rate": 1.707317073170732e-06,
"loss": 1.237,
"step": 205
},
{
"epoch": 0.8373983739837398,
"grad_norm": 0.18249163031578064,
"learning_rate": 1.6666666666666667e-06,
"loss": 1.2222,
"step": 206
},
{
"epoch": 0.8414634146341463,
"grad_norm": 0.1959405392408371,
"learning_rate": 1.6260162601626018e-06,
"loss": 1.2525,
"step": 207
},
{
"epoch": 0.8455284552845529,
"grad_norm": 0.25541865825653076,
"learning_rate": 1.5853658536585368e-06,
"loss": 1.3476,
"step": 208
},
{
"epoch": 0.8495934959349594,
"grad_norm": 0.18992851674556732,
"learning_rate": 1.5447154471544717e-06,
"loss": 1.277,
"step": 209
},
{
"epoch": 0.8536585365853658,
"grad_norm": 0.21258093416690826,
"learning_rate": 1.5040650406504067e-06,
"loss": 1.2651,
"step": 210
},
{
"epoch": 0.8577235772357723,
"grad_norm": 0.19474169611930847,
"learning_rate": 1.4634146341463414e-06,
"loss": 1.2357,
"step": 211
},
{
"epoch": 0.8617886178861789,
"grad_norm": 0.1917145848274231,
"learning_rate": 1.4227642276422766e-06,
"loss": 1.2572,
"step": 212
},
{
"epoch": 0.8658536585365854,
"grad_norm": 0.19641472399234772,
"learning_rate": 1.3821138211382116e-06,
"loss": 1.2907,
"step": 213
},
{
"epoch": 0.8699186991869918,
"grad_norm": 0.19125299155712128,
"learning_rate": 1.3414634146341465e-06,
"loss": 1.2274,
"step": 214
},
{
"epoch": 0.8739837398373984,
"grad_norm": 0.18693210184574127,
"learning_rate": 1.3008130081300815e-06,
"loss": 1.2848,
"step": 215
},
{
"epoch": 0.8780487804878049,
"grad_norm": 0.1859700083732605,
"learning_rate": 1.2601626016260162e-06,
"loss": 1.2359,
"step": 216
},
{
"epoch": 0.8821138211382114,
"grad_norm": 0.20786939561367035,
"learning_rate": 1.2195121951219514e-06,
"loss": 1.2777,
"step": 217
},
{
"epoch": 0.8861788617886179,
"grad_norm": 0.18669180572032928,
"learning_rate": 1.1788617886178863e-06,
"loss": 1.2823,
"step": 218
},
{
"epoch": 0.8902439024390244,
"grad_norm": 0.2070901244878769,
"learning_rate": 1.1382113821138213e-06,
"loss": 1.3024,
"step": 219
},
{
"epoch": 0.8943089430894309,
"grad_norm": 0.19596239924430847,
"learning_rate": 1.0975609756097562e-06,
"loss": 1.2324,
"step": 220
},
{
"epoch": 0.8983739837398373,
"grad_norm": 0.19302377104759216,
"learning_rate": 1.0569105691056912e-06,
"loss": 1.2666,
"step": 221
},
{
"epoch": 0.9024390243902439,
"grad_norm": 0.19474874436855316,
"learning_rate": 1.0162601626016261e-06,
"loss": 1.2566,
"step": 222
},
{
"epoch": 0.9065040650406504,
"grad_norm": 0.18354295194149017,
"learning_rate": 9.75609756097561e-07,
"loss": 1.2615,
"step": 223
},
{
"epoch": 0.9105691056910569,
"grad_norm": 0.19459068775177002,
"learning_rate": 9.349593495934959e-07,
"loss": 1.2776,
"step": 224
},
{
"epoch": 0.9146341463414634,
"grad_norm": 0.20152311027050018,
"learning_rate": 8.94308943089431e-07,
"loss": 1.2719,
"step": 225
},
{
"epoch": 0.9186991869918699,
"grad_norm": 0.18795745074748993,
"learning_rate": 8.53658536585366e-07,
"loss": 1.2685,
"step": 226
},
{
"epoch": 0.9227642276422764,
"grad_norm": 0.1888560950756073,
"learning_rate": 8.130081300813009e-07,
"loss": 1.2476,
"step": 227
},
{
"epoch": 0.926829268292683,
"grad_norm": 0.189235657453537,
"learning_rate": 7.723577235772359e-07,
"loss": 1.2692,
"step": 228
},
{
"epoch": 0.9308943089430894,
"grad_norm": 0.17088742554187775,
"learning_rate": 7.317073170731707e-07,
"loss": 1.2531,
"step": 229
},
{
"epoch": 0.9349593495934959,
"grad_norm": 0.18722310662269592,
"learning_rate": 6.910569105691058e-07,
"loss": 1.2428,
"step": 230
},
{
"epoch": 0.9390243902439024,
"grad_norm": 0.17706981301307678,
"learning_rate": 6.504065040650407e-07,
"loss": 1.2566,
"step": 231
},
{
"epoch": 0.943089430894309,
"grad_norm": 0.20467019081115723,
"learning_rate": 6.097560975609757e-07,
"loss": 1.2928,
"step": 232
},
{
"epoch": 0.9471544715447154,
"grad_norm": 0.17970900237560272,
"learning_rate": 5.691056910569106e-07,
"loss": 1.2513,
"step": 233
},
{
"epoch": 0.9512195121951219,
"grad_norm": 0.17920483648777008,
"learning_rate": 5.284552845528456e-07,
"loss": 1.2004,
"step": 234
},
{
"epoch": 0.9552845528455285,
"grad_norm": 0.18399786949157715,
"learning_rate": 4.878048780487805e-07,
"loss": 1.2692,
"step": 235
},
{
"epoch": 0.959349593495935,
"grad_norm": 0.17873434722423553,
"learning_rate": 4.471544715447155e-07,
"loss": 1.2468,
"step": 236
},
{
"epoch": 0.9634146341463414,
"grad_norm": 0.18509528040885925,
"learning_rate": 4.0650406504065046e-07,
"loss": 1.271,
"step": 237
},
{
"epoch": 0.967479674796748,
"grad_norm": 0.19402359426021576,
"learning_rate": 3.6585365853658536e-07,
"loss": 1.2472,
"step": 238
},
{
"epoch": 0.9715447154471545,
"grad_norm": 0.19081974029541016,
"learning_rate": 3.2520325203252037e-07,
"loss": 1.2416,
"step": 239
},
{
"epoch": 0.975609756097561,
"grad_norm": 0.17355622351169586,
"learning_rate": 2.845528455284553e-07,
"loss": 1.2045,
"step": 240
},
{
"epoch": 0.9796747967479674,
"grad_norm": 0.20138268172740936,
"learning_rate": 2.439024390243903e-07,
"loss": 1.2959,
"step": 241
},
{
"epoch": 0.983739837398374,
"grad_norm": 0.18235686421394348,
"learning_rate": 2.0325203252032523e-07,
"loss": 1.2448,
"step": 242
},
{
"epoch": 0.9878048780487805,
"grad_norm": 0.17484009265899658,
"learning_rate": 1.6260162601626018e-07,
"loss": 1.2441,
"step": 243
},
{
"epoch": 0.991869918699187,
"grad_norm": 0.18030861020088196,
"learning_rate": 1.2195121951219514e-07,
"loss": 1.2433,
"step": 244
},
{
"epoch": 0.9959349593495935,
"grad_norm": 0.1873464286327362,
"learning_rate": 8.130081300813009e-08,
"loss": 1.2672,
"step": 245
},
{
"epoch": 1.0,
"grad_norm": 0.17657622694969177,
"learning_rate": 4.0650406504065046e-08,
"loss": 1.2245,
"step": 246
}
],
"logging_steps": 1.0,
"max_steps": 246,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 0,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8.599595463265485e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}