4Brp / checkpoint-574 /trainer_state.json
hardlyworking's picture
Training in progress, step 574, checkpoint
41284af verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.999129677980853,
"eval_steps": 72,
"global_step": 574,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0,
"eval_loss": 1.136976718902588,
"eval_runtime": 64.1168,
"eval_samples_per_second": 73.117,
"eval_steps_per_second": 9.14,
"step": 0
},
{
"epoch": 0.0017406440382941688,
"grad_norm": 1.859375,
"learning_rate": 0.0,
"loss": 1.1426,
"step": 1
},
{
"epoch": 0.0034812880765883376,
"grad_norm": 1.9765625,
"learning_rate": 1.7543859649122808e-07,
"loss": 1.1274,
"step": 2
},
{
"epoch": 0.005221932114882507,
"grad_norm": 1.828125,
"learning_rate": 3.5087719298245616e-07,
"loss": 1.1205,
"step": 3
},
{
"epoch": 0.006962576153176675,
"grad_norm": 1.8515625,
"learning_rate": 5.263157894736843e-07,
"loss": 1.1383,
"step": 4
},
{
"epoch": 0.008703220191470844,
"grad_norm": 1.796875,
"learning_rate": 7.017543859649123e-07,
"loss": 1.14,
"step": 5
},
{
"epoch": 0.010443864229765013,
"grad_norm": 1.734375,
"learning_rate": 8.771929824561404e-07,
"loss": 1.1031,
"step": 6
},
{
"epoch": 0.012184508268059183,
"grad_norm": 2.015625,
"learning_rate": 1.0526315789473685e-06,
"loss": 1.1794,
"step": 7
},
{
"epoch": 0.01392515230635335,
"grad_norm": 1.765625,
"learning_rate": 1.2280701754385965e-06,
"loss": 1.1766,
"step": 8
},
{
"epoch": 0.015665796344647518,
"grad_norm": 1.8046875,
"learning_rate": 1.4035087719298246e-06,
"loss": 1.1251,
"step": 9
},
{
"epoch": 0.017406440382941687,
"grad_norm": 1.6796875,
"learning_rate": 1.5789473684210526e-06,
"loss": 1.1622,
"step": 10
},
{
"epoch": 0.019147084421235857,
"grad_norm": 1.8671875,
"learning_rate": 1.7543859649122807e-06,
"loss": 1.0846,
"step": 11
},
{
"epoch": 0.020887728459530026,
"grad_norm": 1.765625,
"learning_rate": 1.929824561403509e-06,
"loss": 1.1644,
"step": 12
},
{
"epoch": 0.022628372497824196,
"grad_norm": 1.7265625,
"learning_rate": 2.105263157894737e-06,
"loss": 1.1361,
"step": 13
},
{
"epoch": 0.024369016536118365,
"grad_norm": 1.8671875,
"learning_rate": 2.280701754385965e-06,
"loss": 1.1783,
"step": 14
},
{
"epoch": 0.02610966057441253,
"grad_norm": 1.828125,
"learning_rate": 2.456140350877193e-06,
"loss": 1.127,
"step": 15
},
{
"epoch": 0.0278503046127067,
"grad_norm": 1.640625,
"learning_rate": 2.631578947368421e-06,
"loss": 1.1587,
"step": 16
},
{
"epoch": 0.02959094865100087,
"grad_norm": 1.671875,
"learning_rate": 2.8070175438596493e-06,
"loss": 1.1088,
"step": 17
},
{
"epoch": 0.031331592689295036,
"grad_norm": 1.59375,
"learning_rate": 2.9824561403508774e-06,
"loss": 1.174,
"step": 18
},
{
"epoch": 0.03307223672758921,
"grad_norm": 1.71875,
"learning_rate": 3.157894736842105e-06,
"loss": 1.1453,
"step": 19
},
{
"epoch": 0.034812880765883375,
"grad_norm": 1.8515625,
"learning_rate": 3.3333333333333333e-06,
"loss": 1.1922,
"step": 20
},
{
"epoch": 0.03655352480417755,
"grad_norm": 1.5390625,
"learning_rate": 3.5087719298245615e-06,
"loss": 1.1541,
"step": 21
},
{
"epoch": 0.038294168842471714,
"grad_norm": 1.5390625,
"learning_rate": 3.6842105263157896e-06,
"loss": 1.0909,
"step": 22
},
{
"epoch": 0.04003481288076589,
"grad_norm": 1.515625,
"learning_rate": 3.859649122807018e-06,
"loss": 1.1498,
"step": 23
},
{
"epoch": 0.04177545691906005,
"grad_norm": 1.5703125,
"learning_rate": 4.035087719298246e-06,
"loss": 1.1096,
"step": 24
},
{
"epoch": 0.04351610095735422,
"grad_norm": 1.453125,
"learning_rate": 4.210526315789474e-06,
"loss": 1.1152,
"step": 25
},
{
"epoch": 0.04525674499564839,
"grad_norm": 1.390625,
"learning_rate": 4.385964912280702e-06,
"loss": 1.1111,
"step": 26
},
{
"epoch": 0.04699738903394256,
"grad_norm": 1.296875,
"learning_rate": 4.56140350877193e-06,
"loss": 1.161,
"step": 27
},
{
"epoch": 0.04873803307223673,
"grad_norm": 1.390625,
"learning_rate": 4.736842105263158e-06,
"loss": 1.1557,
"step": 28
},
{
"epoch": 0.050478677110530897,
"grad_norm": 1.3125,
"learning_rate": 4.912280701754386e-06,
"loss": 1.1065,
"step": 29
},
{
"epoch": 0.05221932114882506,
"grad_norm": 1.28125,
"learning_rate": 5.087719298245615e-06,
"loss": 1.0493,
"step": 30
},
{
"epoch": 0.053959965187119235,
"grad_norm": 1.1953125,
"learning_rate": 5.263157894736842e-06,
"loss": 1.0652,
"step": 31
},
{
"epoch": 0.0557006092254134,
"grad_norm": 1.1484375,
"learning_rate": 5.438596491228071e-06,
"loss": 1.0389,
"step": 32
},
{
"epoch": 0.057441253263707574,
"grad_norm": 1.1015625,
"learning_rate": 5.6140350877192985e-06,
"loss": 1.0349,
"step": 33
},
{
"epoch": 0.05918189730200174,
"grad_norm": 1.0390625,
"learning_rate": 5.789473684210527e-06,
"loss": 1.1109,
"step": 34
},
{
"epoch": 0.060922541340295906,
"grad_norm": 1.03125,
"learning_rate": 5.964912280701755e-06,
"loss": 1.0541,
"step": 35
},
{
"epoch": 0.06266318537859007,
"grad_norm": 0.9453125,
"learning_rate": 6.140350877192983e-06,
"loss": 1.1176,
"step": 36
},
{
"epoch": 0.06440382941688425,
"grad_norm": 0.9140625,
"learning_rate": 6.31578947368421e-06,
"loss": 1.0774,
"step": 37
},
{
"epoch": 0.06614447345517842,
"grad_norm": 0.9453125,
"learning_rate": 6.491228070175439e-06,
"loss": 1.031,
"step": 38
},
{
"epoch": 0.06788511749347259,
"grad_norm": 0.85546875,
"learning_rate": 6.666666666666667e-06,
"loss": 1.0977,
"step": 39
},
{
"epoch": 0.06962576153176675,
"grad_norm": 0.8203125,
"learning_rate": 6.842105263157896e-06,
"loss": 1.0871,
"step": 40
},
{
"epoch": 0.07136640557006092,
"grad_norm": 0.83984375,
"learning_rate": 7.017543859649123e-06,
"loss": 1.0856,
"step": 41
},
{
"epoch": 0.0731070496083551,
"grad_norm": 0.82421875,
"learning_rate": 7.192982456140352e-06,
"loss": 1.0189,
"step": 42
},
{
"epoch": 0.07484769364664925,
"grad_norm": 0.76953125,
"learning_rate": 7.368421052631579e-06,
"loss": 1.0633,
"step": 43
},
{
"epoch": 0.07658833768494343,
"grad_norm": 0.7578125,
"learning_rate": 7.5438596491228074e-06,
"loss": 1.0582,
"step": 44
},
{
"epoch": 0.0783289817232376,
"grad_norm": 0.78125,
"learning_rate": 7.719298245614036e-06,
"loss": 1.0261,
"step": 45
},
{
"epoch": 0.08006962576153177,
"grad_norm": 0.66796875,
"learning_rate": 7.894736842105265e-06,
"loss": 0.9962,
"step": 46
},
{
"epoch": 0.08181026979982593,
"grad_norm": 0.6953125,
"learning_rate": 8.070175438596492e-06,
"loss": 1.0041,
"step": 47
},
{
"epoch": 0.0835509138381201,
"grad_norm": 0.6640625,
"learning_rate": 8.24561403508772e-06,
"loss": 1.0953,
"step": 48
},
{
"epoch": 0.08529155787641428,
"grad_norm": 0.671875,
"learning_rate": 8.421052631578948e-06,
"loss": 1.0397,
"step": 49
},
{
"epoch": 0.08703220191470844,
"grad_norm": 0.66015625,
"learning_rate": 8.596491228070176e-06,
"loss": 1.0366,
"step": 50
},
{
"epoch": 0.08877284595300261,
"grad_norm": 0.65625,
"learning_rate": 8.771929824561405e-06,
"loss": 1.005,
"step": 51
},
{
"epoch": 0.09051348999129678,
"grad_norm": 0.62109375,
"learning_rate": 8.947368421052632e-06,
"loss": 1.1106,
"step": 52
},
{
"epoch": 0.09225413402959094,
"grad_norm": 0.59375,
"learning_rate": 9.12280701754386e-06,
"loss": 1.002,
"step": 53
},
{
"epoch": 0.09399477806788512,
"grad_norm": 0.5859375,
"learning_rate": 9.298245614035088e-06,
"loss": 1.0599,
"step": 54
},
{
"epoch": 0.09573542210617929,
"grad_norm": 0.59765625,
"learning_rate": 9.473684210526315e-06,
"loss": 1.0736,
"step": 55
},
{
"epoch": 0.09747606614447346,
"grad_norm": 0.56640625,
"learning_rate": 9.649122807017545e-06,
"loss": 1.0179,
"step": 56
},
{
"epoch": 0.09921671018276762,
"grad_norm": 0.58203125,
"learning_rate": 9.824561403508772e-06,
"loss": 1.0308,
"step": 57
},
{
"epoch": 0.10095735422106179,
"grad_norm": 0.5625,
"learning_rate": 1e-05,
"loss": 1.0135,
"step": 58
},
{
"epoch": 0.10269799825935597,
"grad_norm": 0.5859375,
"learning_rate": 9.999979270446263e-06,
"loss": 0.9598,
"step": 59
},
{
"epoch": 0.10443864229765012,
"grad_norm": 0.5390625,
"learning_rate": 9.999917081956933e-06,
"loss": 1.0041,
"step": 60
},
{
"epoch": 0.1061792863359443,
"grad_norm": 0.53125,
"learning_rate": 9.999813435047668e-06,
"loss": 1.0726,
"step": 61
},
{
"epoch": 0.10791993037423847,
"grad_norm": 0.55078125,
"learning_rate": 9.99966833057789e-06,
"loss": 0.9915,
"step": 62
},
{
"epoch": 0.10966057441253264,
"grad_norm": 0.55078125,
"learning_rate": 9.999481769750779e-06,
"loss": 1.0266,
"step": 63
},
{
"epoch": 0.1114012184508268,
"grad_norm": 0.5234375,
"learning_rate": 9.999253754113263e-06,
"loss": 1.0524,
"step": 64
},
{
"epoch": 0.11314186248912098,
"grad_norm": 0.53125,
"learning_rate": 9.998984285556008e-06,
"loss": 1.0254,
"step": 65
},
{
"epoch": 0.11488250652741515,
"grad_norm": 0.53515625,
"learning_rate": 9.998673366313399e-06,
"loss": 1.0132,
"step": 66
},
{
"epoch": 0.11662315056570931,
"grad_norm": 0.5234375,
"learning_rate": 9.998320998963523e-06,
"loss": 0.9604,
"step": 67
},
{
"epoch": 0.11836379460400348,
"grad_norm": 0.54296875,
"learning_rate": 9.997927186428145e-06,
"loss": 1.0147,
"step": 68
},
{
"epoch": 0.12010443864229765,
"grad_norm": 0.52734375,
"learning_rate": 9.997491931972694e-06,
"loss": 0.998,
"step": 69
},
{
"epoch": 0.12184508268059181,
"grad_norm": 0.51953125,
"learning_rate": 9.997015239206216e-06,
"loss": 1.034,
"step": 70
},
{
"epoch": 0.12358572671888599,
"grad_norm": 0.53125,
"learning_rate": 9.996497112081365e-06,
"loss": 1.0103,
"step": 71
},
{
"epoch": 0.12532637075718014,
"grad_norm": 0.5078125,
"learning_rate": 9.99593755489436e-06,
"loss": 1.0053,
"step": 72
},
{
"epoch": 0.12532637075718014,
"eval_loss": 0.9893413186073303,
"eval_runtime": 60.3364,
"eval_samples_per_second": 77.698,
"eval_steps_per_second": 9.712,
"step": 72
},
{
"epoch": 0.12706701479547433,
"grad_norm": 0.50390625,
"learning_rate": 9.995336572284945e-06,
"loss": 0.9841,
"step": 73
},
{
"epoch": 0.1288076588337685,
"grad_norm": 0.53125,
"learning_rate": 9.994694169236366e-06,
"loss": 0.9298,
"step": 74
},
{
"epoch": 0.13054830287206268,
"grad_norm": 0.51171875,
"learning_rate": 9.99401035107531e-06,
"loss": 1.0449,
"step": 75
},
{
"epoch": 0.13228894691035684,
"grad_norm": 0.515625,
"learning_rate": 9.993285123471878e-06,
"loss": 1.0139,
"step": 76
},
{
"epoch": 0.134029590948651,
"grad_norm": 0.5078125,
"learning_rate": 9.992518492439526e-06,
"loss": 1.0129,
"step": 77
},
{
"epoch": 0.13577023498694518,
"grad_norm": 0.54296875,
"learning_rate": 9.991710464335022e-06,
"loss": 0.9696,
"step": 78
},
{
"epoch": 0.13751087902523934,
"grad_norm": 0.53125,
"learning_rate": 9.990861045858392e-06,
"loss": 1.0114,
"step": 79
},
{
"epoch": 0.1392515230635335,
"grad_norm": 0.515625,
"learning_rate": 9.989970244052861e-06,
"loss": 0.957,
"step": 80
},
{
"epoch": 0.1409921671018277,
"grad_norm": 0.5078125,
"learning_rate": 9.9890380663048e-06,
"loss": 0.9603,
"step": 81
},
{
"epoch": 0.14273281114012185,
"grad_norm": 0.52734375,
"learning_rate": 9.98806452034366e-06,
"loss": 1.0523,
"step": 82
},
{
"epoch": 0.144473455178416,
"grad_norm": 0.5078125,
"learning_rate": 9.987049614241907e-06,
"loss": 0.9804,
"step": 83
},
{
"epoch": 0.1462140992167102,
"grad_norm": 0.54296875,
"learning_rate": 9.985993356414965e-06,
"loss": 1.0307,
"step": 84
},
{
"epoch": 0.14795474325500435,
"grad_norm": 0.515625,
"learning_rate": 9.984895755621136e-06,
"loss": 0.9802,
"step": 85
},
{
"epoch": 0.1496953872932985,
"grad_norm": 0.494140625,
"learning_rate": 9.983756820961528e-06,
"loss": 0.9547,
"step": 86
},
{
"epoch": 0.1514360313315927,
"grad_norm": 0.5,
"learning_rate": 9.982576561879984e-06,
"loss": 1.014,
"step": 87
},
{
"epoch": 0.15317667536988686,
"grad_norm": 0.494140625,
"learning_rate": 9.981354988163002e-06,
"loss": 0.9708,
"step": 88
},
{
"epoch": 0.15491731940818101,
"grad_norm": 0.51171875,
"learning_rate": 9.980092109939651e-06,
"loss": 0.9717,
"step": 89
},
{
"epoch": 0.1566579634464752,
"grad_norm": 0.484375,
"learning_rate": 9.978787937681496e-06,
"loss": 0.9862,
"step": 90
},
{
"epoch": 0.15839860748476936,
"grad_norm": 0.498046875,
"learning_rate": 9.977442482202498e-06,
"loss": 0.9813,
"step": 91
},
{
"epoch": 0.16013925152306355,
"grad_norm": 0.494140625,
"learning_rate": 9.976055754658935e-06,
"loss": 0.9551,
"step": 92
},
{
"epoch": 0.1618798955613577,
"grad_norm": 0.51171875,
"learning_rate": 9.974627766549301e-06,
"loss": 0.9777,
"step": 93
},
{
"epoch": 0.16362053959965187,
"grad_norm": 0.50390625,
"learning_rate": 9.973158529714224e-06,
"loss": 0.9648,
"step": 94
},
{
"epoch": 0.16536118363794605,
"grad_norm": 0.48828125,
"learning_rate": 9.971648056336349e-06,
"loss": 0.964,
"step": 95
},
{
"epoch": 0.1671018276762402,
"grad_norm": 0.5078125,
"learning_rate": 9.97009635894025e-06,
"loss": 0.9635,
"step": 96
},
{
"epoch": 0.16884247171453437,
"grad_norm": 0.515625,
"learning_rate": 9.968503450392332e-06,
"loss": 1.0115,
"step": 97
},
{
"epoch": 0.17058311575282856,
"grad_norm": 0.51171875,
"learning_rate": 9.966869343900702e-06,
"loss": 0.9841,
"step": 98
},
{
"epoch": 0.17232375979112272,
"grad_norm": 0.484375,
"learning_rate": 9.965194053015083e-06,
"loss": 1.0241,
"step": 99
},
{
"epoch": 0.17406440382941687,
"grad_norm": 0.50390625,
"learning_rate": 9.963477591626687e-06,
"loss": 0.968,
"step": 100
},
{
"epoch": 0.17580504786771106,
"grad_norm": 0.50390625,
"learning_rate": 9.961719973968102e-06,
"loss": 0.946,
"step": 101
},
{
"epoch": 0.17754569190600522,
"grad_norm": 0.5234375,
"learning_rate": 9.959921214613187e-06,
"loss": 0.9284,
"step": 102
},
{
"epoch": 0.17928633594429938,
"grad_norm": 0.494140625,
"learning_rate": 9.958081328476926e-06,
"loss": 0.958,
"step": 103
},
{
"epoch": 0.18102697998259357,
"grad_norm": 0.486328125,
"learning_rate": 9.956200330815329e-06,
"loss": 0.9361,
"step": 104
},
{
"epoch": 0.18276762402088773,
"grad_norm": 0.5078125,
"learning_rate": 9.954278237225296e-06,
"loss": 1.0116,
"step": 105
},
{
"epoch": 0.18450826805918188,
"grad_norm": 0.5078125,
"learning_rate": 9.952315063644479e-06,
"loss": 1.0041,
"step": 106
},
{
"epoch": 0.18624891209747607,
"grad_norm": 0.5,
"learning_rate": 9.950310826351168e-06,
"loss": 0.9725,
"step": 107
},
{
"epoch": 0.18798955613577023,
"grad_norm": 0.498046875,
"learning_rate": 9.948265541964136e-06,
"loss": 0.9763,
"step": 108
},
{
"epoch": 0.18973020017406442,
"grad_norm": 0.5,
"learning_rate": 9.946179227442521e-06,
"loss": 0.9814,
"step": 109
},
{
"epoch": 0.19147084421235858,
"grad_norm": 0.50390625,
"learning_rate": 9.944051900085668e-06,
"loss": 1.0092,
"step": 110
},
{
"epoch": 0.19321148825065274,
"grad_norm": 0.515625,
"learning_rate": 9.941883577532993e-06,
"loss": 0.9836,
"step": 111
},
{
"epoch": 0.19495213228894692,
"grad_norm": 0.49609375,
"learning_rate": 9.939674277763845e-06,
"loss": 0.9598,
"step": 112
},
{
"epoch": 0.19669277632724108,
"grad_norm": 0.48828125,
"learning_rate": 9.937424019097337e-06,
"loss": 0.988,
"step": 113
},
{
"epoch": 0.19843342036553524,
"grad_norm": 0.498046875,
"learning_rate": 9.935132820192218e-06,
"loss": 0.9512,
"step": 114
},
{
"epoch": 0.20017406440382943,
"grad_norm": 0.498046875,
"learning_rate": 9.932800700046697e-06,
"loss": 0.9916,
"step": 115
},
{
"epoch": 0.20191470844212359,
"grad_norm": 0.51171875,
"learning_rate": 9.9304276779983e-06,
"loss": 0.9397,
"step": 116
},
{
"epoch": 0.20365535248041775,
"grad_norm": 0.48828125,
"learning_rate": 9.9280137737237e-06,
"loss": 0.9791,
"step": 117
},
{
"epoch": 0.20539599651871193,
"grad_norm": 0.515625,
"learning_rate": 9.925559007238564e-06,
"loss": 0.9231,
"step": 118
},
{
"epoch": 0.2071366405570061,
"grad_norm": 0.5,
"learning_rate": 9.923063398897372e-06,
"loss": 0.9854,
"step": 119
},
{
"epoch": 0.20887728459530025,
"grad_norm": 0.50390625,
"learning_rate": 9.920526969393267e-06,
"loss": 1.0411,
"step": 120
},
{
"epoch": 0.21061792863359444,
"grad_norm": 0.5078125,
"learning_rate": 9.917949739757869e-06,
"loss": 1.0254,
"step": 121
},
{
"epoch": 0.2123585726718886,
"grad_norm": 0.5,
"learning_rate": 9.915331731361104e-06,
"loss": 0.9208,
"step": 122
},
{
"epoch": 0.21409921671018275,
"grad_norm": 0.5078125,
"learning_rate": 9.912672965911034e-06,
"loss": 0.9195,
"step": 123
},
{
"epoch": 0.21583986074847694,
"grad_norm": 0.48046875,
"learning_rate": 9.909973465453666e-06,
"loss": 0.9938,
"step": 124
},
{
"epoch": 0.2175805047867711,
"grad_norm": 0.5078125,
"learning_rate": 9.907233252372775e-06,
"loss": 0.904,
"step": 125
},
{
"epoch": 0.2193211488250653,
"grad_norm": 0.48828125,
"learning_rate": 9.904452349389717e-06,
"loss": 0.9882,
"step": 126
},
{
"epoch": 0.22106179286335945,
"grad_norm": 0.50390625,
"learning_rate": 9.901630779563247e-06,
"loss": 0.9688,
"step": 127
},
{
"epoch": 0.2228024369016536,
"grad_norm": 0.51953125,
"learning_rate": 9.898768566289316e-06,
"loss": 1.0522,
"step": 128
},
{
"epoch": 0.2245430809399478,
"grad_norm": 0.48828125,
"learning_rate": 9.895865733300887e-06,
"loss": 0.9551,
"step": 129
},
{
"epoch": 0.22628372497824195,
"grad_norm": 0.498046875,
"learning_rate": 9.89292230466773e-06,
"loss": 0.9623,
"step": 130
},
{
"epoch": 0.2280243690165361,
"grad_norm": 0.515625,
"learning_rate": 9.889938304796236e-06,
"loss": 0.9702,
"step": 131
},
{
"epoch": 0.2297650130548303,
"grad_norm": 0.5078125,
"learning_rate": 9.886913758429194e-06,
"loss": 0.9679,
"step": 132
},
{
"epoch": 0.23150565709312446,
"grad_norm": 0.466796875,
"learning_rate": 9.883848690645601e-06,
"loss": 0.9719,
"step": 133
},
{
"epoch": 0.23324630113141862,
"grad_norm": 0.4921875,
"learning_rate": 9.880743126860458e-06,
"loss": 0.9717,
"step": 134
},
{
"epoch": 0.2349869451697128,
"grad_norm": 0.4921875,
"learning_rate": 9.87759709282454e-06,
"loss": 0.9365,
"step": 135
},
{
"epoch": 0.23672758920800696,
"grad_norm": 0.478515625,
"learning_rate": 9.874410614624202e-06,
"loss": 0.9254,
"step": 136
},
{
"epoch": 0.23846823324630112,
"grad_norm": 0.498046875,
"learning_rate": 9.871183718681153e-06,
"loss": 1.0045,
"step": 137
},
{
"epoch": 0.2402088772845953,
"grad_norm": 0.51171875,
"learning_rate": 9.867916431752237e-06,
"loss": 0.9693,
"step": 138
},
{
"epoch": 0.24194952132288947,
"grad_norm": 0.474609375,
"learning_rate": 9.864608780929218e-06,
"loss": 0.9981,
"step": 139
},
{
"epoch": 0.24369016536118362,
"grad_norm": 0.4921875,
"learning_rate": 9.861260793638539e-06,
"loss": 0.9569,
"step": 140
},
{
"epoch": 0.2454308093994778,
"grad_norm": 0.490234375,
"learning_rate": 9.857872497641117e-06,
"loss": 1.0422,
"step": 141
},
{
"epoch": 0.24717145343777197,
"grad_norm": 0.49609375,
"learning_rate": 9.854443921032098e-06,
"loss": 1.0408,
"step": 142
},
{
"epoch": 0.24891209747606616,
"grad_norm": 0.484375,
"learning_rate": 9.850975092240625e-06,
"loss": 0.8893,
"step": 143
},
{
"epoch": 0.2506527415143603,
"grad_norm": 0.5078125,
"learning_rate": 9.84746604002961e-06,
"loss": 0.9679,
"step": 144
},
{
"epoch": 0.2506527415143603,
"eval_loss": 0.9576423168182373,
"eval_runtime": 59.1215,
"eval_samples_per_second": 79.294,
"eval_steps_per_second": 9.912,
"step": 144
},
{
"epoch": 0.2523933855526545,
"grad_norm": 0.5,
"learning_rate": 9.843916793495487e-06,
"loss": 0.9071,
"step": 145
},
{
"epoch": 0.25413402959094866,
"grad_norm": 0.498046875,
"learning_rate": 9.840327382067972e-06,
"loss": 0.9496,
"step": 146
},
{
"epoch": 0.2558746736292428,
"grad_norm": 0.5078125,
"learning_rate": 9.836697835509827e-06,
"loss": 0.9864,
"step": 147
},
{
"epoch": 0.257615317667537,
"grad_norm": 0.5,
"learning_rate": 9.833028183916601e-06,
"loss": 1.0082,
"step": 148
},
{
"epoch": 0.25935596170583114,
"grad_norm": 0.494140625,
"learning_rate": 9.829318457716395e-06,
"loss": 0.9591,
"step": 149
},
{
"epoch": 0.26109660574412535,
"grad_norm": 0.515625,
"learning_rate": 9.82556868766959e-06,
"loss": 1.0204,
"step": 150
},
{
"epoch": 0.2628372497824195,
"grad_norm": 0.4921875,
"learning_rate": 9.821778904868616e-06,
"loss": 0.9805,
"step": 151
},
{
"epoch": 0.26457789382071367,
"grad_norm": 0.5078125,
"learning_rate": 9.817949140737672e-06,
"loss": 0.9761,
"step": 152
},
{
"epoch": 0.26631853785900783,
"grad_norm": 0.50390625,
"learning_rate": 9.81407942703248e-06,
"loss": 0.9789,
"step": 153
},
{
"epoch": 0.268059181897302,
"grad_norm": 0.49609375,
"learning_rate": 9.810169795840012e-06,
"loss": 0.952,
"step": 154
},
{
"epoch": 0.26979982593559615,
"grad_norm": 0.484375,
"learning_rate": 9.806220279578236e-06,
"loss": 0.9431,
"step": 155
},
{
"epoch": 0.27154046997389036,
"grad_norm": 0.5078125,
"learning_rate": 9.802230910995833e-06,
"loss": 1.0015,
"step": 156
},
{
"epoch": 0.2732811140121845,
"grad_norm": 0.49609375,
"learning_rate": 9.798201723171938e-06,
"loss": 0.9513,
"step": 157
},
{
"epoch": 0.2750217580504787,
"grad_norm": 0.48828125,
"learning_rate": 9.794132749515854e-06,
"loss": 0.9454,
"step": 158
},
{
"epoch": 0.27676240208877284,
"grad_norm": 0.494140625,
"learning_rate": 9.790024023766789e-06,
"loss": 0.9581,
"step": 159
},
{
"epoch": 0.278503046127067,
"grad_norm": 0.482421875,
"learning_rate": 9.785875579993558e-06,
"loss": 0.9874,
"step": 160
},
{
"epoch": 0.28024369016536116,
"grad_norm": 0.49609375,
"learning_rate": 9.781687452594318e-06,
"loss": 0.9417,
"step": 161
},
{
"epoch": 0.2819843342036554,
"grad_norm": 0.478515625,
"learning_rate": 9.777459676296276e-06,
"loss": 0.9589,
"step": 162
},
{
"epoch": 0.28372497824194953,
"grad_norm": 0.515625,
"learning_rate": 9.773192286155395e-06,
"loss": 0.9851,
"step": 163
},
{
"epoch": 0.2854656222802437,
"grad_norm": 0.49609375,
"learning_rate": 9.768885317556116e-06,
"loss": 0.98,
"step": 164
},
{
"epoch": 0.28720626631853785,
"grad_norm": 0.5078125,
"learning_rate": 9.764538806211052e-06,
"loss": 0.9651,
"step": 165
},
{
"epoch": 0.288946910356832,
"grad_norm": 0.50390625,
"learning_rate": 9.760152788160697e-06,
"loss": 0.9407,
"step": 166
},
{
"epoch": 0.2906875543951262,
"grad_norm": 0.46875,
"learning_rate": 9.755727299773135e-06,
"loss": 0.9553,
"step": 167
},
{
"epoch": 0.2924281984334204,
"grad_norm": 0.470703125,
"learning_rate": 9.75126237774372e-06,
"loss": 0.9823,
"step": 168
},
{
"epoch": 0.29416884247171454,
"grad_norm": 0.5,
"learning_rate": 9.746758059094791e-06,
"loss": 0.9832,
"step": 169
},
{
"epoch": 0.2959094865100087,
"grad_norm": 0.50390625,
"learning_rate": 9.742214381175355e-06,
"loss": 0.8976,
"step": 170
},
{
"epoch": 0.29765013054830286,
"grad_norm": 0.48828125,
"learning_rate": 9.737631381660777e-06,
"loss": 0.9331,
"step": 171
},
{
"epoch": 0.299390774586597,
"grad_norm": 0.50390625,
"learning_rate": 9.733009098552473e-06,
"loss": 0.9666,
"step": 172
},
{
"epoch": 0.30113141862489123,
"grad_norm": 0.490234375,
"learning_rate": 9.728347570177587e-06,
"loss": 0.9781,
"step": 173
},
{
"epoch": 0.3028720626631854,
"grad_norm": 0.484375,
"learning_rate": 9.723646835188681e-06,
"loss": 0.969,
"step": 174
},
{
"epoch": 0.30461270670147955,
"grad_norm": 0.51171875,
"learning_rate": 9.71890693256341e-06,
"loss": 0.9481,
"step": 175
},
{
"epoch": 0.3063533507397737,
"grad_norm": 0.49609375,
"learning_rate": 9.7141279016042e-06,
"loss": 1.0301,
"step": 176
},
{
"epoch": 0.30809399477806787,
"grad_norm": 0.48828125,
"learning_rate": 9.709309781937925e-06,
"loss": 1.023,
"step": 177
},
{
"epoch": 0.30983463881636203,
"grad_norm": 0.490234375,
"learning_rate": 9.704452613515571e-06,
"loss": 0.9598,
"step": 178
},
{
"epoch": 0.31157528285465624,
"grad_norm": 0.482421875,
"learning_rate": 9.699556436611912e-06,
"loss": 0.9699,
"step": 179
},
{
"epoch": 0.3133159268929504,
"grad_norm": 0.498046875,
"learning_rate": 9.694621291825174e-06,
"loss": 0.903,
"step": 180
},
{
"epoch": 0.31505657093124456,
"grad_norm": 0.515625,
"learning_rate": 9.689647220076696e-06,
"loss": 0.9581,
"step": 181
},
{
"epoch": 0.3167972149695387,
"grad_norm": 0.490234375,
"learning_rate": 9.684634262610593e-06,
"loss": 0.939,
"step": 182
},
{
"epoch": 0.3185378590078329,
"grad_norm": 0.498046875,
"learning_rate": 9.679582460993413e-06,
"loss": 0.9363,
"step": 183
},
{
"epoch": 0.3202785030461271,
"grad_norm": 0.482421875,
"learning_rate": 9.674491857113792e-06,
"loss": 1.0215,
"step": 184
},
{
"epoch": 0.32201914708442125,
"grad_norm": 0.48828125,
"learning_rate": 9.669362493182112e-06,
"loss": 0.9464,
"step": 185
},
{
"epoch": 0.3237597911227154,
"grad_norm": 0.482421875,
"learning_rate": 9.66419441173014e-06,
"loss": 0.8955,
"step": 186
},
{
"epoch": 0.32550043516100957,
"grad_norm": 0.494140625,
"learning_rate": 9.658987655610687e-06,
"loss": 0.9503,
"step": 187
},
{
"epoch": 0.32724107919930373,
"grad_norm": 0.474609375,
"learning_rate": 9.653742267997245e-06,
"loss": 0.9808,
"step": 188
},
{
"epoch": 0.3289817232375979,
"grad_norm": 0.48046875,
"learning_rate": 9.648458292383631e-06,
"loss": 1.0155,
"step": 189
},
{
"epoch": 0.3307223672758921,
"grad_norm": 0.5078125,
"learning_rate": 9.643135772583627e-06,
"loss": 0.929,
"step": 190
},
{
"epoch": 0.33246301131418626,
"grad_norm": 0.5,
"learning_rate": 9.63777475273062e-06,
"loss": 0.9214,
"step": 191
},
{
"epoch": 0.3342036553524804,
"grad_norm": 0.47265625,
"learning_rate": 9.632375277277226e-06,
"loss": 0.9762,
"step": 192
},
{
"epoch": 0.3359442993907746,
"grad_norm": 0.474609375,
"learning_rate": 9.626937390994932e-06,
"loss": 0.9734,
"step": 193
},
{
"epoch": 0.33768494342906874,
"grad_norm": 0.498046875,
"learning_rate": 9.621461138973725e-06,
"loss": 0.9599,
"step": 194
},
{
"epoch": 0.3394255874673629,
"grad_norm": 0.50390625,
"learning_rate": 9.615946566621704e-06,
"loss": 0.9448,
"step": 195
},
{
"epoch": 0.3411662315056571,
"grad_norm": 0.494140625,
"learning_rate": 9.61039371966472e-06,
"loss": 0.9721,
"step": 196
},
{
"epoch": 0.3429068755439513,
"grad_norm": 0.48828125,
"learning_rate": 9.60480264414599e-06,
"loss": 0.97,
"step": 197
},
{
"epoch": 0.34464751958224543,
"grad_norm": 0.49609375,
"learning_rate": 9.599173386425711e-06,
"loss": 0.9964,
"step": 198
},
{
"epoch": 0.3463881636205396,
"grad_norm": 0.486328125,
"learning_rate": 9.593505993180687e-06,
"loss": 0.9715,
"step": 199
},
{
"epoch": 0.34812880765883375,
"grad_norm": 0.4921875,
"learning_rate": 9.587800511403931e-06,
"loss": 0.9655,
"step": 200
},
{
"epoch": 0.34986945169712796,
"grad_norm": 0.5078125,
"learning_rate": 9.582056988404276e-06,
"loss": 0.9606,
"step": 201
},
{
"epoch": 0.3516100957354221,
"grad_norm": 0.50390625,
"learning_rate": 9.576275471805993e-06,
"loss": 0.9166,
"step": 202
},
{
"epoch": 0.3533507397737163,
"grad_norm": 0.494140625,
"learning_rate": 9.570456009548383e-06,
"loss": 0.9063,
"step": 203
},
{
"epoch": 0.35509138381201044,
"grad_norm": 0.484375,
"learning_rate": 9.564598649885391e-06,
"loss": 0.9295,
"step": 204
},
{
"epoch": 0.3568320278503046,
"grad_norm": 0.48046875,
"learning_rate": 9.558703441385195e-06,
"loss": 0.9933,
"step": 205
},
{
"epoch": 0.35857267188859876,
"grad_norm": 0.48046875,
"learning_rate": 9.552770432929812e-06,
"loss": 0.9572,
"step": 206
},
{
"epoch": 0.360313315926893,
"grad_norm": 0.482421875,
"learning_rate": 9.54679967371469e-06,
"loss": 0.9484,
"step": 207
},
{
"epoch": 0.36205395996518713,
"grad_norm": 0.47265625,
"learning_rate": 9.540791213248299e-06,
"loss": 0.9266,
"step": 208
},
{
"epoch": 0.3637946040034813,
"grad_norm": 0.5,
"learning_rate": 9.534745101351719e-06,
"loss": 0.9526,
"step": 209
},
{
"epoch": 0.36553524804177545,
"grad_norm": 0.5,
"learning_rate": 9.528661388158234e-06,
"loss": 1.0046,
"step": 210
},
{
"epoch": 0.3672758920800696,
"grad_norm": 0.51171875,
"learning_rate": 9.522540124112902e-06,
"loss": 0.9757,
"step": 211
},
{
"epoch": 0.36901653611836377,
"grad_norm": 0.48046875,
"learning_rate": 9.516381359972157e-06,
"loss": 1.0195,
"step": 212
},
{
"epoch": 0.370757180156658,
"grad_norm": 0.490234375,
"learning_rate": 9.51018514680337e-06,
"loss": 0.971,
"step": 213
},
{
"epoch": 0.37249782419495214,
"grad_norm": 0.5,
"learning_rate": 9.503951535984434e-06,
"loss": 0.9336,
"step": 214
},
{
"epoch": 0.3742384682332463,
"grad_norm": 0.494140625,
"learning_rate": 9.49768057920334e-06,
"loss": 0.9887,
"step": 215
},
{
"epoch": 0.37597911227154046,
"grad_norm": 0.486328125,
"learning_rate": 9.491372328457737e-06,
"loss": 0.966,
"step": 216
},
{
"epoch": 0.37597911227154046,
"eval_loss": 0.9439952373504639,
"eval_runtime": 59.4647,
"eval_samples_per_second": 78.837,
"eval_steps_per_second": 9.855,
"step": 216
},
{
"epoch": 0.3777197563098346,
"grad_norm": 0.494140625,
"learning_rate": 9.485026836054519e-06,
"loss": 0.9771,
"step": 217
},
{
"epoch": 0.37946040034812883,
"grad_norm": 0.498046875,
"learning_rate": 9.478644154609372e-06,
"loss": 0.9988,
"step": 218
},
{
"epoch": 0.381201044386423,
"grad_norm": 0.470703125,
"learning_rate": 9.472224337046357e-06,
"loss": 0.9875,
"step": 219
},
{
"epoch": 0.38294168842471715,
"grad_norm": 0.482421875,
"learning_rate": 9.46576743659745e-06,
"loss": 0.9664,
"step": 220
},
{
"epoch": 0.3846823324630113,
"grad_norm": 0.478515625,
"learning_rate": 9.45927350680212e-06,
"loss": 0.9693,
"step": 221
},
{
"epoch": 0.38642297650130547,
"grad_norm": 0.48828125,
"learning_rate": 9.452742601506873e-06,
"loss": 0.9137,
"step": 222
},
{
"epoch": 0.38816362053959963,
"grad_norm": 0.5078125,
"learning_rate": 9.446174774864808e-06,
"loss": 0.9047,
"step": 223
},
{
"epoch": 0.38990426457789384,
"grad_norm": 0.486328125,
"learning_rate": 9.439570081335173e-06,
"loss": 0.9926,
"step": 224
},
{
"epoch": 0.391644908616188,
"grad_norm": 0.478515625,
"learning_rate": 9.432928575682908e-06,
"loss": 0.9693,
"step": 225
},
{
"epoch": 0.39338555265448216,
"grad_norm": 0.50390625,
"learning_rate": 9.426250312978191e-06,
"loss": 0.9106,
"step": 226
},
{
"epoch": 0.3951261966927763,
"grad_norm": 0.5,
"learning_rate": 9.419535348595985e-06,
"loss": 0.9639,
"step": 227
},
{
"epoch": 0.3968668407310705,
"grad_norm": 0.49609375,
"learning_rate": 9.412783738215576e-06,
"loss": 0.9338,
"step": 228
},
{
"epoch": 0.39860748476936464,
"grad_norm": 0.48828125,
"learning_rate": 9.405995537820111e-06,
"loss": 1.0216,
"step": 229
},
{
"epoch": 0.40034812880765885,
"grad_norm": 0.498046875,
"learning_rate": 9.399170803696139e-06,
"loss": 0.942,
"step": 230
},
{
"epoch": 0.402088772845953,
"grad_norm": 0.486328125,
"learning_rate": 9.392309592433134e-06,
"loss": 0.9184,
"step": 231
},
{
"epoch": 0.40382941688424717,
"grad_norm": 0.5234375,
"learning_rate": 9.385411960923036e-06,
"loss": 0.9178,
"step": 232
},
{
"epoch": 0.40557006092254133,
"grad_norm": 0.4921875,
"learning_rate": 9.378477966359773e-06,
"loss": 0.9303,
"step": 233
},
{
"epoch": 0.4073107049608355,
"grad_norm": 0.5,
"learning_rate": 9.371507666238793e-06,
"loss": 0.9563,
"step": 234
},
{
"epoch": 0.4090513489991297,
"grad_norm": 0.47265625,
"learning_rate": 9.364501118356579e-06,
"loss": 0.92,
"step": 235
},
{
"epoch": 0.41079199303742386,
"grad_norm": 0.4765625,
"learning_rate": 9.357458380810175e-06,
"loss": 0.9532,
"step": 236
},
{
"epoch": 0.412532637075718,
"grad_norm": 0.482421875,
"learning_rate": 9.350379511996706e-06,
"loss": 0.9604,
"step": 237
},
{
"epoch": 0.4142732811140122,
"grad_norm": 0.48828125,
"learning_rate": 9.343264570612883e-06,
"loss": 0.9415,
"step": 238
},
{
"epoch": 0.41601392515230634,
"grad_norm": 0.48828125,
"learning_rate": 9.336113615654535e-06,
"loss": 0.9752,
"step": 239
},
{
"epoch": 0.4177545691906005,
"grad_norm": 0.484375,
"learning_rate": 9.328926706416102e-06,
"loss": 0.9517,
"step": 240
},
{
"epoch": 0.4194952132288947,
"grad_norm": 0.484375,
"learning_rate": 9.321703902490152e-06,
"loss": 0.9245,
"step": 241
},
{
"epoch": 0.4212358572671889,
"grad_norm": 0.50390625,
"learning_rate": 9.314445263766888e-06,
"loss": 0.9341,
"step": 242
},
{
"epoch": 0.42297650130548303,
"grad_norm": 0.47265625,
"learning_rate": 9.307150850433643e-06,
"loss": 0.9399,
"step": 243
},
{
"epoch": 0.4247171453437772,
"grad_norm": 0.48828125,
"learning_rate": 9.299820722974396e-06,
"loss": 0.9865,
"step": 244
},
{
"epoch": 0.42645778938207135,
"grad_norm": 0.5078125,
"learning_rate": 9.29245494216925e-06,
"loss": 0.9538,
"step": 245
},
{
"epoch": 0.4281984334203655,
"grad_norm": 0.5,
"learning_rate": 9.285053569093948e-06,
"loss": 1.0095,
"step": 246
},
{
"epoch": 0.4299390774586597,
"grad_norm": 0.494140625,
"learning_rate": 9.277616665119352e-06,
"loss": 0.9691,
"step": 247
},
{
"epoch": 0.4316797214969539,
"grad_norm": 0.4921875,
"learning_rate": 9.27014429191094e-06,
"loss": 0.9854,
"step": 248
},
{
"epoch": 0.43342036553524804,
"grad_norm": 0.498046875,
"learning_rate": 9.262636511428304e-06,
"loss": 0.9179,
"step": 249
},
{
"epoch": 0.4351610095735422,
"grad_norm": 0.490234375,
"learning_rate": 9.255093385924616e-06,
"loss": 0.9388,
"step": 250
},
{
"epoch": 0.43690165361183636,
"grad_norm": 0.51953125,
"learning_rate": 9.247514977946124e-06,
"loss": 0.9788,
"step": 251
},
{
"epoch": 0.4386422976501306,
"grad_norm": 0.515625,
"learning_rate": 9.239901350331635e-06,
"loss": 0.9301,
"step": 252
},
{
"epoch": 0.44038294168842473,
"grad_norm": 0.474609375,
"learning_rate": 9.232252566211993e-06,
"loss": 0.9656,
"step": 253
},
{
"epoch": 0.4421235857267189,
"grad_norm": 0.4921875,
"learning_rate": 9.224568689009548e-06,
"loss": 1.0119,
"step": 254
},
{
"epoch": 0.44386422976501305,
"grad_norm": 0.49609375,
"learning_rate": 9.216849782437637e-06,
"loss": 0.874,
"step": 255
},
{
"epoch": 0.4456048738033072,
"grad_norm": 0.5,
"learning_rate": 9.20909591050006e-06,
"loss": 0.9191,
"step": 256
},
{
"epoch": 0.44734551784160137,
"grad_norm": 0.50390625,
"learning_rate": 9.201307137490536e-06,
"loss": 0.9017,
"step": 257
},
{
"epoch": 0.4490861618798956,
"grad_norm": 0.4921875,
"learning_rate": 9.19348352799218e-06,
"loss": 0.9363,
"step": 258
},
{
"epoch": 0.45082680591818974,
"grad_norm": 0.494140625,
"learning_rate": 9.185625146876966e-06,
"loss": 0.9921,
"step": 259
},
{
"epoch": 0.4525674499564839,
"grad_norm": 0.482421875,
"learning_rate": 9.177732059305187e-06,
"loss": 0.9358,
"step": 260
},
{
"epoch": 0.45430809399477806,
"grad_norm": 0.494140625,
"learning_rate": 9.169804330724916e-06,
"loss": 0.9257,
"step": 261
},
{
"epoch": 0.4560487380330722,
"grad_norm": 0.49609375,
"learning_rate": 9.161842026871465e-06,
"loss": 0.9201,
"step": 262
},
{
"epoch": 0.4577893820713664,
"grad_norm": 0.498046875,
"learning_rate": 9.153845213766837e-06,
"loss": 0.9212,
"step": 263
},
{
"epoch": 0.4595300261096606,
"grad_norm": 0.5,
"learning_rate": 9.145813957719174e-06,
"loss": 0.9735,
"step": 264
},
{
"epoch": 0.46127067014795475,
"grad_norm": 0.5078125,
"learning_rate": 9.137748325322223e-06,
"loss": 0.9585,
"step": 265
},
{
"epoch": 0.4630113141862489,
"grad_norm": 0.50390625,
"learning_rate": 9.129648383454764e-06,
"loss": 0.9781,
"step": 266
},
{
"epoch": 0.46475195822454307,
"grad_norm": 0.486328125,
"learning_rate": 9.121514199280072e-06,
"loss": 0.9759,
"step": 267
},
{
"epoch": 0.46649260226283723,
"grad_norm": 0.5078125,
"learning_rate": 9.113345840245348e-06,
"loss": 0.9688,
"step": 268
},
{
"epoch": 0.46823324630113144,
"grad_norm": 0.49609375,
"learning_rate": 9.105143374081167e-06,
"loss": 0.9092,
"step": 269
},
{
"epoch": 0.4699738903394256,
"grad_norm": 0.48046875,
"learning_rate": 9.096906868800917e-06,
"loss": 0.9357,
"step": 270
},
{
"epoch": 0.47171453437771976,
"grad_norm": 0.4921875,
"learning_rate": 9.088636392700227e-06,
"loss": 1.0134,
"step": 271
},
{
"epoch": 0.4734551784160139,
"grad_norm": 0.49609375,
"learning_rate": 9.08033201435641e-06,
"loss": 0.9494,
"step": 272
},
{
"epoch": 0.4751958224543081,
"grad_norm": 0.48046875,
"learning_rate": 9.071993802627887e-06,
"loss": 0.9446,
"step": 273
},
{
"epoch": 0.47693646649260224,
"grad_norm": 0.494140625,
"learning_rate": 9.063621826653624e-06,
"loss": 0.8926,
"step": 274
},
{
"epoch": 0.47867711053089645,
"grad_norm": 0.484375,
"learning_rate": 9.055216155852548e-06,
"loss": 0.9216,
"step": 275
},
{
"epoch": 0.4804177545691906,
"grad_norm": 0.470703125,
"learning_rate": 9.046776859922983e-06,
"loss": 0.9442,
"step": 276
},
{
"epoch": 0.4821583986074848,
"grad_norm": 0.484375,
"learning_rate": 9.038304008842064e-06,
"loss": 0.9456,
"step": 277
},
{
"epoch": 0.48389904264577893,
"grad_norm": 0.482421875,
"learning_rate": 9.02979767286516e-06,
"loss": 0.909,
"step": 278
},
{
"epoch": 0.4856396866840731,
"grad_norm": 0.50390625,
"learning_rate": 9.021257922525289e-06,
"loss": 0.9597,
"step": 279
},
{
"epoch": 0.48738033072236725,
"grad_norm": 0.5,
"learning_rate": 9.012684828632538e-06,
"loss": 0.9646,
"step": 280
},
{
"epoch": 0.48912097476066146,
"grad_norm": 0.50390625,
"learning_rate": 9.004078462273471e-06,
"loss": 0.9679,
"step": 281
},
{
"epoch": 0.4908616187989556,
"grad_norm": 0.5078125,
"learning_rate": 8.995438894810541e-06,
"loss": 0.952,
"step": 282
},
{
"epoch": 0.4926022628372498,
"grad_norm": 0.484375,
"learning_rate": 8.9867661978815e-06,
"loss": 0.9792,
"step": 283
},
{
"epoch": 0.49434290687554394,
"grad_norm": 0.5,
"learning_rate": 8.978060443398802e-06,
"loss": 0.939,
"step": 284
},
{
"epoch": 0.4960835509138381,
"grad_norm": 0.486328125,
"learning_rate": 8.96932170354901e-06,
"loss": 0.9974,
"step": 285
},
{
"epoch": 0.4978241949521323,
"grad_norm": 0.48828125,
"learning_rate": 8.960550050792194e-06,
"loss": 0.9947,
"step": 286
},
{
"epoch": 0.4995648389904265,
"grad_norm": 0.50390625,
"learning_rate": 8.951745557861333e-06,
"loss": 0.9139,
"step": 287
},
{
"epoch": 0.5013054830287206,
"grad_norm": 0.49609375,
"learning_rate": 8.942908297761712e-06,
"loss": 0.9397,
"step": 288
},
{
"epoch": 0.5013054830287206,
"eval_loss": 0.9357889294624329,
"eval_runtime": 60.8471,
"eval_samples_per_second": 77.046,
"eval_steps_per_second": 9.631,
"step": 288
},
{
"epoch": 0.5030461270670148,
"grad_norm": 0.482421875,
"learning_rate": 8.934038343770312e-06,
"loss": 0.9656,
"step": 289
},
{
"epoch": 0.504786771105309,
"grad_norm": 0.5078125,
"learning_rate": 8.925135769435211e-06,
"loss": 0.9896,
"step": 290
},
{
"epoch": 0.5065274151436031,
"grad_norm": 0.5,
"learning_rate": 8.916200648574964e-06,
"loss": 0.905,
"step": 291
},
{
"epoch": 0.5082680591818973,
"grad_norm": 0.490234375,
"learning_rate": 8.907233055277999e-06,
"loss": 0.9309,
"step": 292
},
{
"epoch": 0.5100087032201914,
"grad_norm": 0.48046875,
"learning_rate": 8.898233063902e-06,
"loss": 0.9796,
"step": 293
},
{
"epoch": 0.5117493472584856,
"grad_norm": 0.5234375,
"learning_rate": 8.889200749073285e-06,
"loss": 0.9335,
"step": 294
},
{
"epoch": 0.5134899912967799,
"grad_norm": 0.498046875,
"learning_rate": 8.880136185686202e-06,
"loss": 0.9292,
"step": 295
},
{
"epoch": 0.515230635335074,
"grad_norm": 0.478515625,
"learning_rate": 8.871039448902488e-06,
"loss": 0.9116,
"step": 296
},
{
"epoch": 0.5169712793733682,
"grad_norm": 0.47265625,
"learning_rate": 8.861910614150662e-06,
"loss": 0.9315,
"step": 297
},
{
"epoch": 0.5187119234116623,
"grad_norm": 0.478515625,
"learning_rate": 8.852749757125392e-06,
"loss": 0.9283,
"step": 298
},
{
"epoch": 0.5204525674499565,
"grad_norm": 0.5078125,
"learning_rate": 8.843556953786872e-06,
"loss": 0.952,
"step": 299
},
{
"epoch": 0.5221932114882507,
"grad_norm": 0.48828125,
"learning_rate": 8.834332280360181e-06,
"loss": 0.9999,
"step": 300
},
{
"epoch": 0.5239338555265448,
"grad_norm": 0.498046875,
"learning_rate": 8.82507581333467e-06,
"loss": 0.9453,
"step": 301
},
{
"epoch": 0.525674499564839,
"grad_norm": 0.482421875,
"learning_rate": 8.815787629463306e-06,
"loss": 0.8678,
"step": 302
},
{
"epoch": 0.5274151436031331,
"grad_norm": 0.5,
"learning_rate": 8.806467805762056e-06,
"loss": 0.9878,
"step": 303
},
{
"epoch": 0.5291557876414273,
"grad_norm": 0.49609375,
"learning_rate": 8.797116419509232e-06,
"loss": 0.8964,
"step": 304
},
{
"epoch": 0.5308964316797214,
"grad_norm": 0.474609375,
"learning_rate": 8.78773354824486e-06,
"loss": 0.9584,
"step": 305
},
{
"epoch": 0.5326370757180157,
"grad_norm": 0.482421875,
"learning_rate": 8.778319269770033e-06,
"loss": 0.9715,
"step": 306
},
{
"epoch": 0.5343777197563099,
"grad_norm": 0.484375,
"learning_rate": 8.768873662146271e-06,
"loss": 0.9034,
"step": 307
},
{
"epoch": 0.536118363794604,
"grad_norm": 0.494140625,
"learning_rate": 8.759396803694863e-06,
"loss": 0.9189,
"step": 308
},
{
"epoch": 0.5378590078328982,
"grad_norm": 0.484375,
"learning_rate": 8.749888772996226e-06,
"loss": 1.0066,
"step": 309
},
{
"epoch": 0.5395996518711923,
"grad_norm": 0.47265625,
"learning_rate": 8.74034964888926e-06,
"loss": 1.0147,
"step": 310
},
{
"epoch": 0.5413402959094865,
"grad_norm": 0.484375,
"learning_rate": 8.730779510470672e-06,
"loss": 0.9504,
"step": 311
},
{
"epoch": 0.5430809399477807,
"grad_norm": 0.482421875,
"learning_rate": 8.721178437094346e-06,
"loss": 0.9239,
"step": 312
},
{
"epoch": 0.5448215839860748,
"grad_norm": 0.5078125,
"learning_rate": 8.711546508370666e-06,
"loss": 0.9145,
"step": 313
},
{
"epoch": 0.546562228024369,
"grad_norm": 0.498046875,
"learning_rate": 8.701883804165867e-06,
"loss": 0.897,
"step": 314
},
{
"epoch": 0.5483028720626631,
"grad_norm": 0.490234375,
"learning_rate": 8.692190404601368e-06,
"loss": 0.8796,
"step": 315
},
{
"epoch": 0.5500435161009574,
"grad_norm": 0.53515625,
"learning_rate": 8.682466390053106e-06,
"loss": 1.03,
"step": 316
},
{
"epoch": 0.5517841601392516,
"grad_norm": 0.482421875,
"learning_rate": 8.672711841150877e-06,
"loss": 0.9676,
"step": 317
},
{
"epoch": 0.5535248041775457,
"grad_norm": 0.490234375,
"learning_rate": 8.662926838777657e-06,
"loss": 1.0098,
"step": 318
},
{
"epoch": 0.5552654482158399,
"grad_norm": 0.5078125,
"learning_rate": 8.653111464068937e-06,
"loss": 0.9344,
"step": 319
},
{
"epoch": 0.557006092254134,
"grad_norm": 0.51171875,
"learning_rate": 8.643265798412057e-06,
"loss": 0.9555,
"step": 320
},
{
"epoch": 0.5587467362924282,
"grad_norm": 0.5,
"learning_rate": 8.633389923445515e-06,
"loss": 0.8845,
"step": 321
},
{
"epoch": 0.5604873803307223,
"grad_norm": 0.48046875,
"learning_rate": 8.623483921058304e-06,
"loss": 0.9403,
"step": 322
},
{
"epoch": 0.5622280243690165,
"grad_norm": 0.482421875,
"learning_rate": 8.613547873389228e-06,
"loss": 0.9654,
"step": 323
},
{
"epoch": 0.5639686684073107,
"grad_norm": 0.490234375,
"learning_rate": 8.603581862826222e-06,
"loss": 1.0108,
"step": 324
},
{
"epoch": 0.5657093124456049,
"grad_norm": 0.4921875,
"learning_rate": 8.593585972005665e-06,
"loss": 0.9708,
"step": 325
},
{
"epoch": 0.5674499564838991,
"grad_norm": 0.484375,
"learning_rate": 8.5835602838117e-06,
"loss": 0.9333,
"step": 326
},
{
"epoch": 0.5691906005221932,
"grad_norm": 0.478515625,
"learning_rate": 8.573504881375543e-06,
"loss": 0.9067,
"step": 327
},
{
"epoch": 0.5709312445604874,
"grad_norm": 0.486328125,
"learning_rate": 8.563419848074798e-06,
"loss": 0.9388,
"step": 328
},
{
"epoch": 0.5726718885987816,
"grad_norm": 0.4921875,
"learning_rate": 8.55330526753276e-06,
"loss": 0.9246,
"step": 329
},
{
"epoch": 0.5744125326370757,
"grad_norm": 0.498046875,
"learning_rate": 8.543161223617724e-06,
"loss": 0.9222,
"step": 330
},
{
"epoch": 0.5761531766753699,
"grad_norm": 0.490234375,
"learning_rate": 8.532987800442292e-06,
"loss": 1.0001,
"step": 331
},
{
"epoch": 0.577893820713664,
"grad_norm": 0.498046875,
"learning_rate": 8.522785082362675e-06,
"loss": 1.0003,
"step": 332
},
{
"epoch": 0.5796344647519582,
"grad_norm": 0.498046875,
"learning_rate": 8.512553153977988e-06,
"loss": 0.9103,
"step": 333
},
{
"epoch": 0.5813751087902524,
"grad_norm": 0.498046875,
"learning_rate": 8.502292100129553e-06,
"loss": 0.9895,
"step": 334
},
{
"epoch": 0.5831157528285466,
"grad_norm": 0.4921875,
"learning_rate": 8.492002005900201e-06,
"loss": 0.9981,
"step": 335
},
{
"epoch": 0.5848563968668408,
"grad_norm": 0.4765625,
"learning_rate": 8.481682956613555e-06,
"loss": 0.8758,
"step": 336
},
{
"epoch": 0.5865970409051349,
"grad_norm": 0.49609375,
"learning_rate": 8.471335037833328e-06,
"loss": 0.9845,
"step": 337
},
{
"epoch": 0.5883376849434291,
"grad_norm": 0.474609375,
"learning_rate": 8.460958335362617e-06,
"loss": 0.9974,
"step": 338
},
{
"epoch": 0.5900783289817232,
"grad_norm": 0.4921875,
"learning_rate": 8.450552935243186e-06,
"loss": 0.9725,
"step": 339
},
{
"epoch": 0.5918189730200174,
"grad_norm": 0.48046875,
"learning_rate": 8.440118923754757e-06,
"loss": 0.9362,
"step": 340
},
{
"epoch": 0.5935596170583116,
"grad_norm": 0.47265625,
"learning_rate": 8.429656387414289e-06,
"loss": 0.9659,
"step": 341
},
{
"epoch": 0.5953002610966057,
"grad_norm": 0.5078125,
"learning_rate": 8.419165412975265e-06,
"loss": 0.9346,
"step": 342
},
{
"epoch": 0.5970409051348999,
"grad_norm": 0.48828125,
"learning_rate": 8.408646087426975e-06,
"loss": 0.8759,
"step": 343
},
{
"epoch": 0.598781549173194,
"grad_norm": 0.5,
"learning_rate": 8.398098497993785e-06,
"loss": 0.9359,
"step": 344
},
{
"epoch": 0.6005221932114883,
"grad_norm": 0.5,
"learning_rate": 8.387522732134428e-06,
"loss": 0.9171,
"step": 345
},
{
"epoch": 0.6022628372497825,
"grad_norm": 0.49609375,
"learning_rate": 8.376918877541263e-06,
"loss": 0.919,
"step": 346
},
{
"epoch": 0.6040034812880766,
"grad_norm": 0.5234375,
"learning_rate": 8.36628702213956e-06,
"loss": 0.9211,
"step": 347
},
{
"epoch": 0.6057441253263708,
"grad_norm": 0.494140625,
"learning_rate": 8.355627254086771e-06,
"loss": 0.9191,
"step": 348
},
{
"epoch": 0.6074847693646649,
"grad_norm": 0.49609375,
"learning_rate": 8.344939661771784e-06,
"loss": 0.926,
"step": 349
},
{
"epoch": 0.6092254134029591,
"grad_norm": 0.5,
"learning_rate": 8.334224333814209e-06,
"loss": 0.9088,
"step": 350
},
{
"epoch": 0.6109660574412533,
"grad_norm": 0.50390625,
"learning_rate": 8.323481359063631e-06,
"loss": 0.9455,
"step": 351
},
{
"epoch": 0.6127067014795474,
"grad_norm": 0.50390625,
"learning_rate": 8.312710826598884e-06,
"loss": 0.8873,
"step": 352
},
{
"epoch": 0.6144473455178416,
"grad_norm": 0.49609375,
"learning_rate": 8.301912825727294e-06,
"loss": 0.9562,
"step": 353
},
{
"epoch": 0.6161879895561357,
"grad_norm": 0.48828125,
"learning_rate": 8.29108744598396e-06,
"loss": 0.9324,
"step": 354
},
{
"epoch": 0.61792863359443,
"grad_norm": 0.51171875,
"learning_rate": 8.280234777131e-06,
"loss": 0.9037,
"step": 355
},
{
"epoch": 0.6196692776327241,
"grad_norm": 0.5078125,
"learning_rate": 8.269354909156803e-06,
"loss": 0.9127,
"step": 356
},
{
"epoch": 0.6214099216710183,
"grad_norm": 0.494140625,
"learning_rate": 8.258447932275296e-06,
"loss": 0.9003,
"step": 357
},
{
"epoch": 0.6231505657093125,
"grad_norm": 0.494140625,
"learning_rate": 8.247513936925182e-06,
"loss": 0.9871,
"step": 358
},
{
"epoch": 0.6248912097476066,
"grad_norm": 0.50390625,
"learning_rate": 8.236553013769198e-06,
"loss": 0.8732,
"step": 359
},
{
"epoch": 0.6266318537859008,
"grad_norm": 0.486328125,
"learning_rate": 8.225565253693365e-06,
"loss": 0.9563,
"step": 360
},
{
"epoch": 0.6266318537859008,
"eval_loss": 0.9300395846366882,
"eval_runtime": 59.3628,
"eval_samples_per_second": 78.972,
"eval_steps_per_second": 9.871,
"step": 360
},
{
"epoch": 0.6283724978241949,
"grad_norm": 0.5078125,
"learning_rate": 8.214550747806227e-06,
"loss": 0.9173,
"step": 361
},
{
"epoch": 0.6301131418624891,
"grad_norm": 0.494140625,
"learning_rate": 8.2035095874381e-06,
"loss": 0.9297,
"step": 362
},
{
"epoch": 0.6318537859007833,
"grad_norm": 0.50390625,
"learning_rate": 8.192441864140314e-06,
"loss": 0.9582,
"step": 363
},
{
"epoch": 0.6335944299390774,
"grad_norm": 0.50390625,
"learning_rate": 8.181347669684456e-06,
"loss": 0.9176,
"step": 364
},
{
"epoch": 0.6353350739773717,
"grad_norm": 0.494140625,
"learning_rate": 8.170227096061607e-06,
"loss": 0.9181,
"step": 365
},
{
"epoch": 0.6370757180156658,
"grad_norm": 0.486328125,
"learning_rate": 8.15908023548158e-06,
"loss": 0.9708,
"step": 366
},
{
"epoch": 0.63881636205396,
"grad_norm": 0.48046875,
"learning_rate": 8.147907180372147e-06,
"loss": 0.9683,
"step": 367
},
{
"epoch": 0.6405570060922542,
"grad_norm": 0.4765625,
"learning_rate": 8.136708023378292e-06,
"loss": 0.9728,
"step": 368
},
{
"epoch": 0.6422976501305483,
"grad_norm": 0.498046875,
"learning_rate": 8.125482857361426e-06,
"loss": 0.9696,
"step": 369
},
{
"epoch": 0.6440382941688425,
"grad_norm": 0.49609375,
"learning_rate": 8.114231775398618e-06,
"loss": 0.9189,
"step": 370
},
{
"epoch": 0.6457789382071366,
"grad_norm": 0.486328125,
"learning_rate": 8.102954870781831e-06,
"loss": 0.9361,
"step": 371
},
{
"epoch": 0.6475195822454308,
"grad_norm": 0.490234375,
"learning_rate": 8.091652237017152e-06,
"loss": 0.9612,
"step": 372
},
{
"epoch": 0.6492602262837249,
"grad_norm": 0.498046875,
"learning_rate": 8.080323967823993e-06,
"loss": 0.9442,
"step": 373
},
{
"epoch": 0.6510008703220191,
"grad_norm": 0.48828125,
"learning_rate": 8.068970157134349e-06,
"loss": 0.9346,
"step": 374
},
{
"epoch": 0.6527415143603134,
"grad_norm": 0.486328125,
"learning_rate": 8.057590899091985e-06,
"loss": 0.8999,
"step": 375
},
{
"epoch": 0.6544821583986075,
"grad_norm": 0.498046875,
"learning_rate": 8.046186288051681e-06,
"loss": 0.9674,
"step": 376
},
{
"epoch": 0.6562228024369017,
"grad_norm": 0.48046875,
"learning_rate": 8.034756418578434e-06,
"loss": 0.9364,
"step": 377
},
{
"epoch": 0.6579634464751958,
"grad_norm": 0.484375,
"learning_rate": 8.023301385446682e-06,
"loss": 0.9341,
"step": 378
},
{
"epoch": 0.65970409051349,
"grad_norm": 0.4921875,
"learning_rate": 8.011821283639515e-06,
"loss": 0.8986,
"step": 379
},
{
"epoch": 0.6614447345517842,
"grad_norm": 0.515625,
"learning_rate": 8.000316208347891e-06,
"loss": 0.9266,
"step": 380
},
{
"epoch": 0.6631853785900783,
"grad_norm": 0.5078125,
"learning_rate": 7.988786254969837e-06,
"loss": 0.8911,
"step": 381
},
{
"epoch": 0.6649260226283725,
"grad_norm": 0.5078125,
"learning_rate": 7.977231519109665e-06,
"loss": 0.9221,
"step": 382
},
{
"epoch": 0.6666666666666666,
"grad_norm": 0.466796875,
"learning_rate": 7.965652096577188e-06,
"loss": 0.9635,
"step": 383
},
{
"epoch": 0.6684073107049608,
"grad_norm": 0.48828125,
"learning_rate": 7.954048083386909e-06,
"loss": 0.9023,
"step": 384
},
{
"epoch": 0.6701479547432551,
"grad_norm": 0.48828125,
"learning_rate": 7.942419575757235e-06,
"loss": 0.9277,
"step": 385
},
{
"epoch": 0.6718885987815492,
"grad_norm": 0.486328125,
"learning_rate": 7.930766670109675e-06,
"loss": 0.9826,
"step": 386
},
{
"epoch": 0.6736292428198434,
"grad_norm": 0.490234375,
"learning_rate": 7.919089463068038e-06,
"loss": 0.9223,
"step": 387
},
{
"epoch": 0.6753698868581375,
"grad_norm": 0.51953125,
"learning_rate": 7.907388051457647e-06,
"loss": 1.0468,
"step": 388
},
{
"epoch": 0.6771105308964317,
"grad_norm": 0.4921875,
"learning_rate": 7.895662532304516e-06,
"loss": 0.9233,
"step": 389
},
{
"epoch": 0.6788511749347258,
"grad_norm": 0.478515625,
"learning_rate": 7.883913002834555e-06,
"loss": 0.9756,
"step": 390
},
{
"epoch": 0.68059181897302,
"grad_norm": 0.494140625,
"learning_rate": 7.872139560472767e-06,
"loss": 0.9,
"step": 391
},
{
"epoch": 0.6823324630113142,
"grad_norm": 0.51953125,
"learning_rate": 7.860342302842434e-06,
"loss": 0.894,
"step": 392
},
{
"epoch": 0.6840731070496083,
"grad_norm": 0.5,
"learning_rate": 7.848521327764309e-06,
"loss": 0.9369,
"step": 393
},
{
"epoch": 0.6858137510879025,
"grad_norm": 0.5,
"learning_rate": 7.836676733255809e-06,
"loss": 0.9416,
"step": 394
},
{
"epoch": 0.6875543951261966,
"grad_norm": 0.494140625,
"learning_rate": 7.824808617530197e-06,
"loss": 0.8992,
"step": 395
},
{
"epoch": 0.6892950391644909,
"grad_norm": 0.486328125,
"learning_rate": 7.812917078995769e-06,
"loss": 0.9457,
"step": 396
},
{
"epoch": 0.6910356832027851,
"grad_norm": 0.4765625,
"learning_rate": 7.801002216255042e-06,
"loss": 0.9484,
"step": 397
},
{
"epoch": 0.6927763272410792,
"grad_norm": 0.50390625,
"learning_rate": 7.78906412810393e-06,
"loss": 0.9297,
"step": 398
},
{
"epoch": 0.6945169712793734,
"grad_norm": 0.498046875,
"learning_rate": 7.777102913530927e-06,
"loss": 0.951,
"step": 399
},
{
"epoch": 0.6962576153176675,
"grad_norm": 0.4921875,
"learning_rate": 7.76511867171629e-06,
"loss": 0.9091,
"step": 400
},
{
"epoch": 0.6979982593559617,
"grad_norm": 0.49609375,
"learning_rate": 7.753111502031214e-06,
"loss": 0.9041,
"step": 401
},
{
"epoch": 0.6997389033942559,
"grad_norm": 0.50390625,
"learning_rate": 7.741081504037009e-06,
"loss": 0.9463,
"step": 402
},
{
"epoch": 0.70147954743255,
"grad_norm": 0.486328125,
"learning_rate": 7.729028777484266e-06,
"loss": 0.9494,
"step": 403
},
{
"epoch": 0.7032201914708442,
"grad_norm": 0.482421875,
"learning_rate": 7.716953422312044e-06,
"loss": 0.9266,
"step": 404
},
{
"epoch": 0.7049608355091384,
"grad_norm": 0.5078125,
"learning_rate": 7.704855538647033e-06,
"loss": 0.9731,
"step": 405
},
{
"epoch": 0.7067014795474326,
"grad_norm": 0.490234375,
"learning_rate": 7.692735226802729e-06,
"loss": 0.961,
"step": 406
},
{
"epoch": 0.7084421235857267,
"grad_norm": 0.482421875,
"learning_rate": 7.680592587278585e-06,
"loss": 0.9506,
"step": 407
},
{
"epoch": 0.7101827676240209,
"grad_norm": 0.5,
"learning_rate": 7.668427720759207e-06,
"loss": 0.9084,
"step": 408
},
{
"epoch": 0.7119234116623151,
"grad_norm": 0.5,
"learning_rate": 7.656240728113493e-06,
"loss": 0.9147,
"step": 409
},
{
"epoch": 0.7136640557006092,
"grad_norm": 0.4921875,
"learning_rate": 7.644031710393815e-06,
"loss": 0.9498,
"step": 410
},
{
"epoch": 0.7154046997389034,
"grad_norm": 0.490234375,
"learning_rate": 7.631800768835167e-06,
"loss": 0.9145,
"step": 411
},
{
"epoch": 0.7171453437771975,
"grad_norm": 0.498046875,
"learning_rate": 7.619548004854332e-06,
"loss": 0.8884,
"step": 412
},
{
"epoch": 0.7188859878154917,
"grad_norm": 0.48828125,
"learning_rate": 7.607273520049041e-06,
"loss": 0.9777,
"step": 413
},
{
"epoch": 0.720626631853786,
"grad_norm": 0.478515625,
"learning_rate": 7.594977416197134e-06,
"loss": 0.9802,
"step": 414
},
{
"epoch": 0.72236727589208,
"grad_norm": 0.49609375,
"learning_rate": 7.582659795255707e-06,
"loss": 0.9484,
"step": 415
},
{
"epoch": 0.7241079199303743,
"grad_norm": 0.5078125,
"learning_rate": 7.570320759360273e-06,
"loss": 0.9231,
"step": 416
},
{
"epoch": 0.7258485639686684,
"grad_norm": 0.498046875,
"learning_rate": 7.557960410823917e-06,
"loss": 0.9416,
"step": 417
},
{
"epoch": 0.7275892080069626,
"grad_norm": 0.4921875,
"learning_rate": 7.545578852136443e-06,
"loss": 1.0058,
"step": 418
},
{
"epoch": 0.7293298520452568,
"grad_norm": 0.5,
"learning_rate": 7.533176185963523e-06,
"loss": 0.9426,
"step": 419
},
{
"epoch": 0.7310704960835509,
"grad_norm": 0.498046875,
"learning_rate": 7.520752515145855e-06,
"loss": 0.949,
"step": 420
},
{
"epoch": 0.7328111401218451,
"grad_norm": 0.484375,
"learning_rate": 7.508307942698296e-06,
"loss": 0.9727,
"step": 421
},
{
"epoch": 0.7345517841601392,
"grad_norm": 0.515625,
"learning_rate": 7.495842571809021e-06,
"loss": 0.8763,
"step": 422
},
{
"epoch": 0.7362924281984334,
"grad_norm": 0.5,
"learning_rate": 7.4833565058386595e-06,
"loss": 0.8914,
"step": 423
},
{
"epoch": 0.7380330722367275,
"grad_norm": 0.49609375,
"learning_rate": 7.470849848319443e-06,
"loss": 0.9157,
"step": 424
},
{
"epoch": 0.7397737162750218,
"grad_norm": 0.490234375,
"learning_rate": 7.458322702954342e-06,
"loss": 0.8703,
"step": 425
},
{
"epoch": 0.741514360313316,
"grad_norm": 0.48828125,
"learning_rate": 7.44577517361621e-06,
"loss": 0.9106,
"step": 426
},
{
"epoch": 0.7432550043516101,
"grad_norm": 0.5078125,
"learning_rate": 7.4332073643469196e-06,
"loss": 0.8928,
"step": 427
},
{
"epoch": 0.7449956483899043,
"grad_norm": 0.5234375,
"learning_rate": 7.420619379356504e-06,
"loss": 0.9402,
"step": 428
},
{
"epoch": 0.7467362924281984,
"grad_norm": 0.490234375,
"learning_rate": 7.408011323022286e-06,
"loss": 0.8997,
"step": 429
},
{
"epoch": 0.7484769364664926,
"grad_norm": 0.48828125,
"learning_rate": 7.395383299888019e-06,
"loss": 0.9831,
"step": 430
},
{
"epoch": 0.7502175805047868,
"grad_norm": 0.494140625,
"learning_rate": 7.382735414663017e-06,
"loss": 1.04,
"step": 431
},
{
"epoch": 0.7519582245430809,
"grad_norm": 0.49609375,
"learning_rate": 7.370067772221285e-06,
"loss": 0.9034,
"step": 432
},
{
"epoch": 0.7519582245430809,
"eval_loss": 0.9259106516838074,
"eval_runtime": 59.5681,
"eval_samples_per_second": 78.7,
"eval_steps_per_second": 9.837,
"step": 432
},
{
"epoch": 0.7536988685813751,
"grad_norm": 0.4921875,
"learning_rate": 7.357380477600654e-06,
"loss": 0.9201,
"step": 433
},
{
"epoch": 0.7554395126196692,
"grad_norm": 0.5078125,
"learning_rate": 7.3446736360019065e-06,
"loss": 0.9291,
"step": 434
},
{
"epoch": 0.7571801566579635,
"grad_norm": 0.47265625,
"learning_rate": 7.331947352787905e-06,
"loss": 0.951,
"step": 435
},
{
"epoch": 0.7589208006962577,
"grad_norm": 0.49609375,
"learning_rate": 7.319201733482715e-06,
"loss": 0.9208,
"step": 436
},
{
"epoch": 0.7606614447345518,
"grad_norm": 0.490234375,
"learning_rate": 7.3064368837707425e-06,
"loss": 0.9116,
"step": 437
},
{
"epoch": 0.762402088772846,
"grad_norm": 0.50390625,
"learning_rate": 7.2936529094958365e-06,
"loss": 0.9165,
"step": 438
},
{
"epoch": 0.7641427328111401,
"grad_norm": 0.494140625,
"learning_rate": 7.280849916660434e-06,
"loss": 0.9513,
"step": 439
},
{
"epoch": 0.7658833768494343,
"grad_norm": 0.48828125,
"learning_rate": 7.268028011424664e-06,
"loss": 0.9725,
"step": 440
},
{
"epoch": 0.7676240208877284,
"grad_norm": 0.494140625,
"learning_rate": 7.255187300105477e-06,
"loss": 0.9328,
"step": 441
},
{
"epoch": 0.7693646649260226,
"grad_norm": 0.50390625,
"learning_rate": 7.24232788917576e-06,
"loss": 0.8776,
"step": 442
},
{
"epoch": 0.7711053089643168,
"grad_norm": 0.5078125,
"learning_rate": 7.229449885263451e-06,
"loss": 0.9432,
"step": 443
},
{
"epoch": 0.7728459530026109,
"grad_norm": 0.478515625,
"learning_rate": 7.21655339515066e-06,
"loss": 0.8951,
"step": 444
},
{
"epoch": 0.7745865970409052,
"grad_norm": 0.4921875,
"learning_rate": 7.203638525772783e-06,
"loss": 0.961,
"step": 445
},
{
"epoch": 0.7763272410791993,
"grad_norm": 0.48828125,
"learning_rate": 7.1907053842176075e-06,
"loss": 0.9498,
"step": 446
},
{
"epoch": 0.7780678851174935,
"grad_norm": 0.498046875,
"learning_rate": 7.17775407772444e-06,
"loss": 0.9638,
"step": 447
},
{
"epoch": 0.7798085291557877,
"grad_norm": 0.490234375,
"learning_rate": 7.164784713683197e-06,
"loss": 0.9516,
"step": 448
},
{
"epoch": 0.7815491731940818,
"grad_norm": 0.466796875,
"learning_rate": 7.1517973996335335e-06,
"loss": 0.9108,
"step": 449
},
{
"epoch": 0.783289817232376,
"grad_norm": 0.50390625,
"learning_rate": 7.138792243263936e-06,
"loss": 0.9089,
"step": 450
},
{
"epoch": 0.7850304612706701,
"grad_norm": 0.50390625,
"learning_rate": 7.125769352410845e-06,
"loss": 0.9667,
"step": 451
},
{
"epoch": 0.7867711053089643,
"grad_norm": 0.484375,
"learning_rate": 7.112728835057742e-06,
"loss": 0.9458,
"step": 452
},
{
"epoch": 0.7885117493472585,
"grad_norm": 0.478515625,
"learning_rate": 7.099670799334269e-06,
"loss": 0.899,
"step": 453
},
{
"epoch": 0.7902523933855526,
"grad_norm": 0.498046875,
"learning_rate": 7.08659535351533e-06,
"loss": 0.9031,
"step": 454
},
{
"epoch": 0.7919930374238469,
"grad_norm": 0.49609375,
"learning_rate": 7.073502606020187e-06,
"loss": 0.9771,
"step": 455
},
{
"epoch": 0.793733681462141,
"grad_norm": 0.52734375,
"learning_rate": 7.060392665411564e-06,
"loss": 0.9113,
"step": 456
},
{
"epoch": 0.7954743255004352,
"grad_norm": 0.498046875,
"learning_rate": 7.0472656403947505e-06,
"loss": 1.0172,
"step": 457
},
{
"epoch": 0.7972149695387293,
"grad_norm": 0.4765625,
"learning_rate": 7.034121639816691e-06,
"loss": 0.9282,
"step": 458
},
{
"epoch": 0.7989556135770235,
"grad_norm": 0.50390625,
"learning_rate": 7.020960772665096e-06,
"loss": 0.896,
"step": 459
},
{
"epoch": 0.8006962576153177,
"grad_norm": 0.490234375,
"learning_rate": 7.007783148067524e-06,
"loss": 0.881,
"step": 460
},
{
"epoch": 0.8024369016536118,
"grad_norm": 0.5078125,
"learning_rate": 6.994588875290488e-06,
"loss": 0.9155,
"step": 461
},
{
"epoch": 0.804177545691906,
"grad_norm": 0.5078125,
"learning_rate": 6.9813780637385385e-06,
"loss": 0.9012,
"step": 462
},
{
"epoch": 0.8059181897302001,
"grad_norm": 0.50390625,
"learning_rate": 6.968150822953372e-06,
"loss": 0.9085,
"step": 463
},
{
"epoch": 0.8076588337684943,
"grad_norm": 0.498046875,
"learning_rate": 6.954907262612906e-06,
"loss": 0.9818,
"step": 464
},
{
"epoch": 0.8093994778067886,
"grad_norm": 0.484375,
"learning_rate": 6.941647492530378e-06,
"loss": 0.9717,
"step": 465
},
{
"epoch": 0.8111401218450827,
"grad_norm": 0.5,
"learning_rate": 6.928371622653434e-06,
"loss": 0.9369,
"step": 466
},
{
"epoch": 0.8128807658833769,
"grad_norm": 0.486328125,
"learning_rate": 6.91507976306322e-06,
"loss": 0.8943,
"step": 467
},
{
"epoch": 0.814621409921671,
"grad_norm": 0.498046875,
"learning_rate": 6.901772023973459e-06,
"loss": 0.9113,
"step": 468
},
{
"epoch": 0.8163620539599652,
"grad_norm": 0.49609375,
"learning_rate": 6.888448515729552e-06,
"loss": 0.8995,
"step": 469
},
{
"epoch": 0.8181026979982594,
"grad_norm": 0.486328125,
"learning_rate": 6.8751093488076485e-06,
"loss": 0.9159,
"step": 470
},
{
"epoch": 0.8198433420365535,
"grad_norm": 0.498046875,
"learning_rate": 6.86175463381374e-06,
"loss": 0.9388,
"step": 471
},
{
"epoch": 0.8215839860748477,
"grad_norm": 0.49609375,
"learning_rate": 6.8483844814827405e-06,
"loss": 0.9524,
"step": 472
},
{
"epoch": 0.8233246301131418,
"grad_norm": 0.5078125,
"learning_rate": 6.8349990026775656e-06,
"loss": 0.9458,
"step": 473
},
{
"epoch": 0.825065274151436,
"grad_norm": 0.5,
"learning_rate": 6.821598308388217e-06,
"loss": 0.9258,
"step": 474
},
{
"epoch": 0.8268059181897301,
"grad_norm": 0.482421875,
"learning_rate": 6.8081825097308584e-06,
"loss": 0.9159,
"step": 475
},
{
"epoch": 0.8285465622280244,
"grad_norm": 0.5,
"learning_rate": 6.794751717946897e-06,
"loss": 0.9029,
"step": 476
},
{
"epoch": 0.8302872062663186,
"grad_norm": 0.484375,
"learning_rate": 6.781306044402064e-06,
"loss": 0.8852,
"step": 477
},
{
"epoch": 0.8320278503046127,
"grad_norm": 0.482421875,
"learning_rate": 6.767845600585479e-06,
"loss": 0.945,
"step": 478
},
{
"epoch": 0.8337684943429069,
"grad_norm": 0.474609375,
"learning_rate": 6.754370498108747e-06,
"loss": 0.9776,
"step": 479
},
{
"epoch": 0.835509138381201,
"grad_norm": 0.490234375,
"learning_rate": 6.740880848705005e-06,
"loss": 0.9229,
"step": 480
},
{
"epoch": 0.8372497824194952,
"grad_norm": 0.50390625,
"learning_rate": 6.72737676422802e-06,
"loss": 0.9686,
"step": 481
},
{
"epoch": 0.8389904264577894,
"grad_norm": 0.484375,
"learning_rate": 6.713858356651253e-06,
"loss": 0.9459,
"step": 482
},
{
"epoch": 0.8407310704960835,
"grad_norm": 0.486328125,
"learning_rate": 6.700325738066923e-06,
"loss": 0.9472,
"step": 483
},
{
"epoch": 0.8424717145343777,
"grad_norm": 0.5,
"learning_rate": 6.686779020685089e-06,
"loss": 0.9417,
"step": 484
},
{
"epoch": 0.8442123585726719,
"grad_norm": 0.490234375,
"learning_rate": 6.6732183168327146e-06,
"loss": 0.9666,
"step": 485
},
{
"epoch": 0.8459530026109661,
"grad_norm": 0.490234375,
"learning_rate": 6.659643738952732e-06,
"loss": 0.9368,
"step": 486
},
{
"epoch": 0.8476936466492603,
"grad_norm": 0.484375,
"learning_rate": 6.646055399603122e-06,
"loss": 0.9466,
"step": 487
},
{
"epoch": 0.8494342906875544,
"grad_norm": 0.5,
"learning_rate": 6.6324534114559656e-06,
"loss": 0.9159,
"step": 488
},
{
"epoch": 0.8511749347258486,
"grad_norm": 0.5078125,
"learning_rate": 6.618837887296523e-06,
"loss": 0.9067,
"step": 489
},
{
"epoch": 0.8529155787641427,
"grad_norm": 0.48046875,
"learning_rate": 6.605208940022289e-06,
"loss": 0.9608,
"step": 490
},
{
"epoch": 0.8546562228024369,
"grad_norm": 0.498046875,
"learning_rate": 6.591566682642061e-06,
"loss": 0.9663,
"step": 491
},
{
"epoch": 0.856396866840731,
"grad_norm": 0.5234375,
"learning_rate": 6.5779112282750035e-06,
"loss": 0.8931,
"step": 492
},
{
"epoch": 0.8581375108790252,
"grad_norm": 0.498046875,
"learning_rate": 6.564242690149705e-06,
"loss": 0.9383,
"step": 493
},
{
"epoch": 0.8598781549173194,
"grad_norm": 0.484375,
"learning_rate": 6.550561181603244e-06,
"loss": 0.9343,
"step": 494
},
{
"epoch": 0.8616187989556136,
"grad_norm": 0.486328125,
"learning_rate": 6.536866816080247e-06,
"loss": 0.9999,
"step": 495
},
{
"epoch": 0.8633594429939078,
"grad_norm": 0.4765625,
"learning_rate": 6.523159707131951e-06,
"loss": 0.9955,
"step": 496
},
{
"epoch": 0.8651000870322019,
"grad_norm": 0.50390625,
"learning_rate": 6.509439968415252e-06,
"loss": 0.8897,
"step": 497
},
{
"epoch": 0.8668407310704961,
"grad_norm": 0.498046875,
"learning_rate": 6.4957077136917776e-06,
"loss": 0.9065,
"step": 498
},
{
"epoch": 0.8685813751087903,
"grad_norm": 0.4921875,
"learning_rate": 6.481963056826932e-06,
"loss": 0.9224,
"step": 499
},
{
"epoch": 0.8703220191470844,
"grad_norm": 0.50390625,
"learning_rate": 6.468206111788957e-06,
"loss": 0.9638,
"step": 500
},
{
"epoch": 0.8720626631853786,
"grad_norm": 0.5,
"learning_rate": 6.454436992647984e-06,
"loss": 0.9232,
"step": 501
},
{
"epoch": 0.8738033072236727,
"grad_norm": 0.51171875,
"learning_rate": 6.440655813575093e-06,
"loss": 0.9412,
"step": 502
},
{
"epoch": 0.8755439512619669,
"grad_norm": 0.5078125,
"learning_rate": 6.426862688841359e-06,
"loss": 0.9051,
"step": 503
},
{
"epoch": 0.8772845953002611,
"grad_norm": 0.5078125,
"learning_rate": 6.413057732816911e-06,
"loss": 0.9214,
"step": 504
},
{
"epoch": 0.8772845953002611,
"eval_loss": 0.9230473637580872,
"eval_runtime": 59.2532,
"eval_samples_per_second": 79.118,
"eval_steps_per_second": 9.89,
"step": 504
},
{
"epoch": 0.8790252393385553,
"grad_norm": 0.49609375,
"learning_rate": 6.3992410599699786e-06,
"loss": 0.908,
"step": 505
},
{
"epoch": 0.8807658833768495,
"grad_norm": 0.48828125,
"learning_rate": 6.385412784865948e-06,
"loss": 0.9613,
"step": 506
},
{
"epoch": 0.8825065274151436,
"grad_norm": 0.498046875,
"learning_rate": 6.371573022166409e-06,
"loss": 0.9061,
"step": 507
},
{
"epoch": 0.8842471714534378,
"grad_norm": 0.5,
"learning_rate": 6.357721886628201e-06,
"loss": 0.9117,
"step": 508
},
{
"epoch": 0.8859878154917319,
"grad_norm": 0.48046875,
"learning_rate": 6.34385949310247e-06,
"loss": 0.9219,
"step": 509
},
{
"epoch": 0.8877284595300261,
"grad_norm": 0.5078125,
"learning_rate": 6.329985956533708e-06,
"loss": 0.976,
"step": 510
},
{
"epoch": 0.8894691035683203,
"grad_norm": 0.515625,
"learning_rate": 6.3161013919588e-06,
"loss": 0.9131,
"step": 511
},
{
"epoch": 0.8912097476066144,
"grad_norm": 0.5,
"learning_rate": 6.302205914506083e-06,
"loss": 0.908,
"step": 512
},
{
"epoch": 0.8929503916449086,
"grad_norm": 0.4921875,
"learning_rate": 6.2882996393943706e-06,
"loss": 0.9786,
"step": 513
},
{
"epoch": 0.8946910356832027,
"grad_norm": 0.48828125,
"learning_rate": 6.274382681932019e-06,
"loss": 0.8728,
"step": 514
},
{
"epoch": 0.896431679721497,
"grad_norm": 0.482421875,
"learning_rate": 6.2604551575159476e-06,
"loss": 0.9491,
"step": 515
},
{
"epoch": 0.8981723237597912,
"grad_norm": 0.49609375,
"learning_rate": 6.24651718163071e-06,
"loss": 0.995,
"step": 516
},
{
"epoch": 0.8999129677980853,
"grad_norm": 0.482421875,
"learning_rate": 6.2325688698475106e-06,
"loss": 0.9574,
"step": 517
},
{
"epoch": 0.9016536118363795,
"grad_norm": 0.486328125,
"learning_rate": 6.218610337823262e-06,
"loss": 0.9004,
"step": 518
},
{
"epoch": 0.9033942558746736,
"grad_norm": 0.48046875,
"learning_rate": 6.2046417012996195e-06,
"loss": 0.9165,
"step": 519
},
{
"epoch": 0.9051348999129678,
"grad_norm": 0.498046875,
"learning_rate": 6.1906630761020245e-06,
"loss": 0.9534,
"step": 520
},
{
"epoch": 0.906875543951262,
"grad_norm": 0.5,
"learning_rate": 6.17667457813874e-06,
"loss": 0.8883,
"step": 521
},
{
"epoch": 0.9086161879895561,
"grad_norm": 0.51171875,
"learning_rate": 6.162676323399898e-06,
"loss": 0.919,
"step": 522
},
{
"epoch": 0.9103568320278503,
"grad_norm": 0.4921875,
"learning_rate": 6.148668427956523e-06,
"loss": 0.9187,
"step": 523
},
{
"epoch": 0.9120974760661444,
"grad_norm": 0.494140625,
"learning_rate": 6.134651007959586e-06,
"loss": 0.8984,
"step": 524
},
{
"epoch": 0.9138381201044387,
"grad_norm": 0.4921875,
"learning_rate": 6.120624179639032e-06,
"loss": 0.9585,
"step": 525
},
{
"epoch": 0.9155787641427328,
"grad_norm": 0.49609375,
"learning_rate": 6.106588059302818e-06,
"loss": 0.9848,
"step": 526
},
{
"epoch": 0.917319408181027,
"grad_norm": 0.4921875,
"learning_rate": 6.092542763335947e-06,
"loss": 0.9542,
"step": 527
},
{
"epoch": 0.9190600522193212,
"grad_norm": 0.5,
"learning_rate": 6.0784884081995065e-06,
"loss": 0.9476,
"step": 528
},
{
"epoch": 0.9208006962576153,
"grad_norm": 0.498046875,
"learning_rate": 6.0644251104296995e-06,
"loss": 0.9425,
"step": 529
},
{
"epoch": 0.9225413402959095,
"grad_norm": 0.5,
"learning_rate": 6.0503529866368824e-06,
"loss": 0.9532,
"step": 530
},
{
"epoch": 0.9242819843342036,
"grad_norm": 0.49609375,
"learning_rate": 6.036272153504592e-06,
"loss": 0.9243,
"step": 531
},
{
"epoch": 0.9260226283724978,
"grad_norm": 0.490234375,
"learning_rate": 6.022182727788586e-06,
"loss": 0.9452,
"step": 532
},
{
"epoch": 0.927763272410792,
"grad_norm": 0.5234375,
"learning_rate": 6.008084826315863e-06,
"loss": 0.9243,
"step": 533
},
{
"epoch": 0.9295039164490861,
"grad_norm": 0.498046875,
"learning_rate": 5.993978565983709e-06,
"loss": 0.8917,
"step": 534
},
{
"epoch": 0.9312445604873804,
"grad_norm": 0.4921875,
"learning_rate": 5.979864063758717e-06,
"loss": 0.9562,
"step": 535
},
{
"epoch": 0.9329852045256745,
"grad_norm": 0.478515625,
"learning_rate": 5.965741436675816e-06,
"loss": 0.9904,
"step": 536
},
{
"epoch": 0.9347258485639687,
"grad_norm": 0.5,
"learning_rate": 5.9516108018373145e-06,
"loss": 0.8953,
"step": 537
},
{
"epoch": 0.9364664926022629,
"grad_norm": 0.494140625,
"learning_rate": 5.937472276411909e-06,
"loss": 0.8674,
"step": 538
},
{
"epoch": 0.938207136640557,
"grad_norm": 0.498046875,
"learning_rate": 5.923325977633732e-06,
"loss": 1.0281,
"step": 539
},
{
"epoch": 0.9399477806788512,
"grad_norm": 0.494140625,
"learning_rate": 5.909172022801364e-06,
"loss": 0.9278,
"step": 540
},
{
"epoch": 0.9416884247171453,
"grad_norm": 0.486328125,
"learning_rate": 5.8950105292768754e-06,
"loss": 0.8704,
"step": 541
},
{
"epoch": 0.9434290687554395,
"grad_norm": 0.515625,
"learning_rate": 5.880841614484841e-06,
"loss": 0.9561,
"step": 542
},
{
"epoch": 0.9451697127937336,
"grad_norm": 0.47265625,
"learning_rate": 5.866665395911375e-06,
"loss": 0.9407,
"step": 543
},
{
"epoch": 0.9469103568320278,
"grad_norm": 0.484375,
"learning_rate": 5.852481991103149e-06,
"loss": 0.8833,
"step": 544
},
{
"epoch": 0.9486510008703221,
"grad_norm": 0.5,
"learning_rate": 5.838291517666427e-06,
"loss": 0.993,
"step": 545
},
{
"epoch": 0.9503916449086162,
"grad_norm": 0.494140625,
"learning_rate": 5.824094093266077e-06,
"loss": 0.9231,
"step": 546
},
{
"epoch": 0.9521322889469104,
"grad_norm": 0.490234375,
"learning_rate": 5.809889835624611e-06,
"loss": 0.9512,
"step": 547
},
{
"epoch": 0.9538729329852045,
"grad_norm": 0.494140625,
"learning_rate": 5.795678862521197e-06,
"loss": 0.9175,
"step": 548
},
{
"epoch": 0.9556135770234987,
"grad_norm": 0.486328125,
"learning_rate": 5.781461291790687e-06,
"loss": 0.8922,
"step": 549
},
{
"epoch": 0.9573542210617929,
"grad_norm": 0.494140625,
"learning_rate": 5.767237241322641e-06,
"loss": 0.9701,
"step": 550
},
{
"epoch": 0.959094865100087,
"grad_norm": 0.49609375,
"learning_rate": 5.753006829060343e-06,
"loss": 0.9464,
"step": 551
},
{
"epoch": 0.9608355091383812,
"grad_norm": 0.5,
"learning_rate": 5.738770172999835e-06,
"loss": 0.9335,
"step": 552
},
{
"epoch": 0.9625761531766753,
"grad_norm": 0.458984375,
"learning_rate": 5.724527391188927e-06,
"loss": 0.9689,
"step": 553
},
{
"epoch": 0.9643167972149695,
"grad_norm": 0.50390625,
"learning_rate": 5.710278601726222e-06,
"loss": 0.8756,
"step": 554
},
{
"epoch": 0.9660574412532638,
"grad_norm": 0.484375,
"learning_rate": 5.696023922760141e-06,
"loss": 0.9459,
"step": 555
},
{
"epoch": 0.9677980852915579,
"grad_norm": 0.50390625,
"learning_rate": 5.681763472487933e-06,
"loss": 0.9305,
"step": 556
},
{
"epoch": 0.9695387293298521,
"grad_norm": 0.50390625,
"learning_rate": 5.667497369154712e-06,
"loss": 0.8707,
"step": 557
},
{
"epoch": 0.9712793733681462,
"grad_norm": 0.47265625,
"learning_rate": 5.6532257310524565e-06,
"loss": 0.8977,
"step": 558
},
{
"epoch": 0.9730200174064404,
"grad_norm": 0.494140625,
"learning_rate": 5.638948676519043e-06,
"loss": 0.8735,
"step": 559
},
{
"epoch": 0.9747606614447345,
"grad_norm": 0.50390625,
"learning_rate": 5.624666323937257e-06,
"loss": 0.9788,
"step": 560
},
{
"epoch": 0.9765013054830287,
"grad_norm": 0.49609375,
"learning_rate": 5.610378791733821e-06,
"loss": 0.9348,
"step": 561
},
{
"epoch": 0.9782419495213229,
"grad_norm": 0.48046875,
"learning_rate": 5.596086198378399e-06,
"loss": 0.9258,
"step": 562
},
{
"epoch": 0.979982593559617,
"grad_norm": 0.490234375,
"learning_rate": 5.5817886623826245e-06,
"loss": 0.9184,
"step": 563
},
{
"epoch": 0.9817232375979112,
"grad_norm": 0.490234375,
"learning_rate": 5.567486302299112e-06,
"loss": 0.9439,
"step": 564
},
{
"epoch": 0.9834638816362054,
"grad_norm": 0.474609375,
"learning_rate": 5.553179236720482e-06,
"loss": 0.9887,
"step": 565
},
{
"epoch": 0.9852045256744996,
"grad_norm": 0.48828125,
"learning_rate": 5.5388675842783644e-06,
"loss": 0.9439,
"step": 566
},
{
"epoch": 0.9869451697127938,
"grad_norm": 0.490234375,
"learning_rate": 5.524551463642429e-06,
"loss": 0.971,
"step": 567
},
{
"epoch": 0.9886858137510879,
"grad_norm": 0.486328125,
"learning_rate": 5.510230993519391e-06,
"loss": 0.9583,
"step": 568
},
{
"epoch": 0.9904264577893821,
"grad_norm": 0.50390625,
"learning_rate": 5.495906292652035e-06,
"loss": 0.9788,
"step": 569
},
{
"epoch": 0.9921671018276762,
"grad_norm": 0.4921875,
"learning_rate": 5.48157747981822e-06,
"loss": 0.9634,
"step": 570
},
{
"epoch": 0.9939077458659704,
"grad_norm": 0.5078125,
"learning_rate": 5.467244673829908e-06,
"loss": 0.8834,
"step": 571
},
{
"epoch": 0.9956483899042646,
"grad_norm": 0.49609375,
"learning_rate": 5.452907993532164e-06,
"loss": 0.9113,
"step": 572
},
{
"epoch": 0.9973890339425587,
"grad_norm": 0.50390625,
"learning_rate": 5.438567557802186e-06,
"loss": 0.8828,
"step": 573
},
{
"epoch": 0.999129677980853,
"grad_norm": 0.48828125,
"learning_rate": 5.424223485548303e-06,
"loss": 0.9363,
"step": 574
}
],
"logging_steps": 1,
"max_steps": 1148,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 574,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3.613017336006574e+18,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}