cimol's picture
Training in progress, step 161, checkpoint
d466c7c verified
{
"best_metric": 0.28952550888061523,
"best_model_checkpoint": "miner_id_24/checkpoint-100",
"epoch": 2.9953488372093022,
"eval_steps": 50,
"global_step": 161,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.018604651162790697,
"grad_norm": 1.01740562915802,
"learning_rate": 7e-06,
"loss": 1.7104,
"step": 1
},
{
"epoch": 0.018604651162790697,
"eval_loss": 0.6080694794654846,
"eval_runtime": 10.5925,
"eval_samples_per_second": 8.591,
"eval_steps_per_second": 2.171,
"step": 1
},
{
"epoch": 0.037209302325581395,
"grad_norm": 1.2670235633850098,
"learning_rate": 1.4e-05,
"loss": 1.8752,
"step": 2
},
{
"epoch": 0.05581395348837209,
"grad_norm": 1.3142772912979126,
"learning_rate": 2.1e-05,
"loss": 1.7304,
"step": 3
},
{
"epoch": 0.07441860465116279,
"grad_norm": 1.2250651121139526,
"learning_rate": 2.8e-05,
"loss": 1.9433,
"step": 4
},
{
"epoch": 0.09302325581395349,
"grad_norm": 1.5665745735168457,
"learning_rate": 3.5e-05,
"loss": 2.1169,
"step": 5
},
{
"epoch": 0.11162790697674418,
"grad_norm": 1.5763330459594727,
"learning_rate": 4.2e-05,
"loss": 2.4289,
"step": 6
},
{
"epoch": 0.13023255813953488,
"grad_norm": 1.7122726440429688,
"learning_rate": 4.899999999999999e-05,
"loss": 2.4843,
"step": 7
},
{
"epoch": 0.14883720930232558,
"grad_norm": 1.7638782262802124,
"learning_rate": 5.6e-05,
"loss": 1.914,
"step": 8
},
{
"epoch": 0.16744186046511628,
"grad_norm": 2.0044069290161133,
"learning_rate": 6.3e-05,
"loss": 2.2168,
"step": 9
},
{
"epoch": 0.18604651162790697,
"grad_norm": 2.2012875080108643,
"learning_rate": 7e-05,
"loss": 2.3124,
"step": 10
},
{
"epoch": 0.20465116279069767,
"grad_norm": 2.3812570571899414,
"learning_rate": 6.99924252512213e-05,
"loss": 2.1676,
"step": 11
},
{
"epoch": 0.22325581395348837,
"grad_norm": 1.740301251411438,
"learning_rate": 6.996970428356058e-05,
"loss": 1.9273,
"step": 12
},
{
"epoch": 0.24186046511627907,
"grad_norm": 4.242422580718994,
"learning_rate": 6.993184693162481e-05,
"loss": 3.0044,
"step": 13
},
{
"epoch": 0.26046511627906976,
"grad_norm": 1.112087368965149,
"learning_rate": 6.987886958169575e-05,
"loss": 1.5393,
"step": 14
},
{
"epoch": 0.27906976744186046,
"grad_norm": 0.9684474468231201,
"learning_rate": 6.981079516463718e-05,
"loss": 1.1536,
"step": 15
},
{
"epoch": 0.29767441860465116,
"grad_norm": 0.8984664082527161,
"learning_rate": 6.972765314596951e-05,
"loss": 1.1399,
"step": 16
},
{
"epoch": 0.31627906976744186,
"grad_norm": 0.9321298599243164,
"learning_rate": 6.962947951311591e-05,
"loss": 1.4814,
"step": 17
},
{
"epoch": 0.33488372093023255,
"grad_norm": 0.8456665277481079,
"learning_rate": 6.951631675982521e-05,
"loss": 1.3894,
"step": 18
},
{
"epoch": 0.35348837209302325,
"grad_norm": 0.7932624220848083,
"learning_rate": 6.938821386777899e-05,
"loss": 1.4062,
"step": 19
},
{
"epoch": 0.37209302325581395,
"grad_norm": 1.1389163732528687,
"learning_rate": 6.924522628539012e-05,
"loss": 1.4328,
"step": 20
},
{
"epoch": 0.39069767441860465,
"grad_norm": 1.0435296297073364,
"learning_rate": 6.908741590380231e-05,
"loss": 1.496,
"step": 21
},
{
"epoch": 0.40930232558139534,
"grad_norm": 1.1448780298233032,
"learning_rate": 6.891485103010098e-05,
"loss": 1.2627,
"step": 22
},
{
"epoch": 0.42790697674418604,
"grad_norm": 1.52295982837677,
"learning_rate": 6.87276063577471e-05,
"loss": 1.4529,
"step": 23
},
{
"epoch": 0.44651162790697674,
"grad_norm": 1.8864622116088867,
"learning_rate": 6.852576293424654e-05,
"loss": 2.0397,
"step": 24
},
{
"epoch": 0.46511627906976744,
"grad_norm": 1.491450548171997,
"learning_rate": 6.830940812606936e-05,
"loss": 1.5753,
"step": 25
},
{
"epoch": 0.48372093023255813,
"grad_norm": 2.425402879714966,
"learning_rate": 6.807863558083377e-05,
"loss": 1.2114,
"step": 26
},
{
"epoch": 0.5023255813953489,
"grad_norm": 0.5629173517227173,
"learning_rate": 6.78335451867715e-05,
"loss": 1.6225,
"step": 27
},
{
"epoch": 0.5209302325581395,
"grad_norm": 0.7677317261695862,
"learning_rate": 6.757424302949186e-05,
"loss": 1.6867,
"step": 28
},
{
"epoch": 0.5395348837209303,
"grad_norm": 0.9299004673957825,
"learning_rate": 6.73008413460634e-05,
"loss": 1.288,
"step": 29
},
{
"epoch": 0.5581395348837209,
"grad_norm": 0.8549377918243408,
"learning_rate": 6.701345847643282e-05,
"loss": 1.1299,
"step": 30
},
{
"epoch": 0.5767441860465117,
"grad_norm": 1.0633025169372559,
"learning_rate": 6.671221881220246e-05,
"loss": 1.5311,
"step": 31
},
{
"epoch": 0.5953488372093023,
"grad_norm": 1.01902174949646,
"learning_rate": 6.639725274278824e-05,
"loss": 1.3447,
"step": 32
},
{
"epoch": 0.6139534883720931,
"grad_norm": 1.1506015062332153,
"learning_rate": 6.606869659898156e-05,
"loss": 1.3272,
"step": 33
},
{
"epoch": 0.6325581395348837,
"grad_norm": 1.0536060333251953,
"learning_rate": 6.572669259393953e-05,
"loss": 1.317,
"step": 34
},
{
"epoch": 0.6511627906976745,
"grad_norm": 1.214861512184143,
"learning_rate": 6.537138876162896e-05,
"loss": 1.2711,
"step": 35
},
{
"epoch": 0.6697674418604651,
"grad_norm": 1.3296098709106445,
"learning_rate": 6.5002938892751e-05,
"loss": 1.471,
"step": 36
},
{
"epoch": 0.6883720930232559,
"grad_norm": 1.5142545700073242,
"learning_rate": 6.462150246817388e-05,
"loss": 1.5411,
"step": 37
},
{
"epoch": 0.7069767441860465,
"grad_norm": 1.7185128927230835,
"learning_rate": 6.422724458990284e-05,
"loss": 1.1718,
"step": 38
},
{
"epoch": 0.7255813953488373,
"grad_norm": 2.9415876865386963,
"learning_rate": 6.38203359096168e-05,
"loss": 1.1685,
"step": 39
},
{
"epoch": 0.7441860465116279,
"grad_norm": 0.6370990872383118,
"learning_rate": 6.340095255480317e-05,
"loss": 1.2429,
"step": 40
},
{
"epoch": 0.7627906976744186,
"grad_norm": 0.7854994535446167,
"learning_rate": 6.296927605252219e-05,
"loss": 1.1277,
"step": 41
},
{
"epoch": 0.7813953488372093,
"grad_norm": 0.6397520899772644,
"learning_rate": 6.252549325083437e-05,
"loss": 0.9854,
"step": 42
},
{
"epoch": 0.8,
"grad_norm": 0.8113309741020203,
"learning_rate": 6.206979623792457e-05,
"loss": 1.5336,
"step": 43
},
{
"epoch": 0.8186046511627907,
"grad_norm": 0.9286227226257324,
"learning_rate": 6.160238225895803e-05,
"loss": 1.32,
"step": 44
},
{
"epoch": 0.8372093023255814,
"grad_norm": 0.9448747634887695,
"learning_rate": 6.112345363070428e-05,
"loss": 1.2975,
"step": 45
},
{
"epoch": 0.8558139534883721,
"grad_norm": 0.92212975025177,
"learning_rate": 6.063321765396568e-05,
"loss": 1.1939,
"step": 46
},
{
"epoch": 0.8744186046511628,
"grad_norm": 1.1105095148086548,
"learning_rate": 6.013188652384889e-05,
"loss": 1.0333,
"step": 47
},
{
"epoch": 0.8930232558139535,
"grad_norm": 1.1572589874267578,
"learning_rate": 5.9619677237917655e-05,
"loss": 1.2029,
"step": 48
},
{
"epoch": 0.9116279069767442,
"grad_norm": 1.4601818323135376,
"learning_rate": 5.9096811502266993e-05,
"loss": 1.3201,
"step": 49
},
{
"epoch": 0.9302325581395349,
"grad_norm": 1.729429006576538,
"learning_rate": 5.8563515635559344e-05,
"loss": 1.3054,
"step": 50
},
{
"epoch": 0.9302325581395349,
"eval_loss": 0.3099779188632965,
"eval_runtime": 10.8877,
"eval_samples_per_second": 8.358,
"eval_steps_per_second": 2.112,
"step": 50
},
{
"epoch": 0.9488372093023256,
"grad_norm": 1.666062593460083,
"learning_rate": 5.8020020471064145e-05,
"loss": 1.1947,
"step": 51
},
{
"epoch": 0.9674418604651163,
"grad_norm": 2.129988431930542,
"learning_rate": 5.746656125674329e-05,
"loss": 1.003,
"step": 52
},
{
"epoch": 0.986046511627907,
"grad_norm": 0.9616814255714417,
"learning_rate": 5.690337755342581e-05,
"loss": 1.3023,
"step": 53
},
{
"epoch": 1.0046511627906978,
"grad_norm": 1.7374155521392822,
"learning_rate": 5.633071313111564e-05,
"loss": 1.3017,
"step": 54
},
{
"epoch": 1.0232558139534884,
"grad_norm": 0.5342385768890381,
"learning_rate": 5.574881586347755e-05,
"loss": 0.8393,
"step": 55
},
{
"epoch": 1.041860465116279,
"grad_norm": 0.7542828917503357,
"learning_rate": 5.5157937620546804e-05,
"loss": 0.9745,
"step": 56
},
{
"epoch": 1.0604651162790697,
"grad_norm": 0.7869417071342468,
"learning_rate": 5.4558334159709074e-05,
"loss": 1.1747,
"step": 57
},
{
"epoch": 1.0790697674418606,
"grad_norm": 0.773287296295166,
"learning_rate": 5.3950265014997655e-05,
"loss": 0.8018,
"step": 58
},
{
"epoch": 1.0976744186046512,
"grad_norm": 1.2457112073898315,
"learning_rate": 5.3333993384756045e-05,
"loss": 0.9032,
"step": 59
},
{
"epoch": 1.1162790697674418,
"grad_norm": 1.0725675821304321,
"learning_rate": 5.270978601771444e-05,
"loss": 0.9193,
"step": 60
},
{
"epoch": 1.1348837209302325,
"grad_norm": 0.763644814491272,
"learning_rate": 5.207791309752948e-05,
"loss": 0.7066,
"step": 61
},
{
"epoch": 1.1534883720930234,
"grad_norm": 0.8121412396430969,
"learning_rate": 5.1438648125837204e-05,
"loss": 0.7076,
"step": 62
},
{
"epoch": 1.172093023255814,
"grad_norm": 1.0602554082870483,
"learning_rate": 5.07922678038698e-05,
"loss": 0.7972,
"step": 63
},
{
"epoch": 1.1906976744186046,
"grad_norm": 1.2864611148834229,
"learning_rate": 5.013905191268754e-05,
"loss": 0.9987,
"step": 64
},
{
"epoch": 1.2093023255813953,
"grad_norm": 1.507265567779541,
"learning_rate": 4.9479283192077487e-05,
"loss": 0.9133,
"step": 65
},
{
"epoch": 1.2279069767441861,
"grad_norm": 1.0964022874832153,
"learning_rate": 4.881324721817167e-05,
"loss": 0.6193,
"step": 66
},
{
"epoch": 1.2465116279069768,
"grad_norm": 1.7399965524673462,
"learning_rate": 4.8141232279837554e-05,
"loss": 0.8116,
"step": 67
},
{
"epoch": 1.2651162790697674,
"grad_norm": 0.8102202415466309,
"learning_rate": 4.746352925389414e-05,
"loss": 1.1357,
"step": 68
},
{
"epoch": 1.283720930232558,
"grad_norm": 0.9682488441467285,
"learning_rate": 4.678043147920822e-05,
"loss": 1.0843,
"step": 69
},
{
"epoch": 1.302325581395349,
"grad_norm": 0.8420053720474243,
"learning_rate": 4.60922346297246e-05,
"loss": 1.052,
"step": 70
},
{
"epoch": 1.3209302325581396,
"grad_norm": 0.9388038516044617,
"learning_rate": 4.539923658648585e-05,
"loss": 0.8371,
"step": 71
},
{
"epoch": 1.3395348837209302,
"grad_norm": 0.8845089077949524,
"learning_rate": 4.4701737308696665e-05,
"loss": 0.7301,
"step": 72
},
{
"epoch": 1.3581395348837209,
"grad_norm": 1.6632778644561768,
"learning_rate": 4.4000038703888604e-05,
"loss": 1.0172,
"step": 73
},
{
"epoch": 1.3767441860465115,
"grad_norm": 1.184276819229126,
"learning_rate": 4.3294444497241656e-05,
"loss": 0.8308,
"step": 74
},
{
"epoch": 1.3953488372093024,
"grad_norm": 1.2161909341812134,
"learning_rate": 4.2585260100118973e-05,
"loss": 0.6478,
"step": 75
},
{
"epoch": 1.413953488372093,
"grad_norm": 1.1866018772125244,
"learning_rate": 4.187279247787175e-05,
"loss": 0.6407,
"step": 76
},
{
"epoch": 1.4325581395348836,
"grad_norm": 1.264217495918274,
"learning_rate": 4.115735001697149e-05,
"loss": 0.8542,
"step": 77
},
{
"epoch": 1.4511627906976745,
"grad_norm": 1.6465506553649902,
"learning_rate": 4.0439242391527176e-05,
"loss": 0.7179,
"step": 78
},
{
"epoch": 1.4697674418604652,
"grad_norm": 1.624822735786438,
"learning_rate": 3.971878042924501e-05,
"loss": 0.6409,
"step": 79
},
{
"epoch": 1.4883720930232558,
"grad_norm": 1.7168909311294556,
"learning_rate": 3.899627597688895e-05,
"loss": 0.7917,
"step": 80
},
{
"epoch": 1.5069767441860464,
"grad_norm": 0.8661062121391296,
"learning_rate": 3.8272041765299995e-05,
"loss": 0.9741,
"step": 81
},
{
"epoch": 1.525581395348837,
"grad_norm": 0.9207056164741516,
"learning_rate": 3.754639127403304e-05,
"loss": 1.2139,
"step": 82
},
{
"epoch": 1.544186046511628,
"grad_norm": 1.0560969114303589,
"learning_rate": 3.6819638595669306e-05,
"loss": 0.8169,
"step": 83
},
{
"epoch": 1.5627906976744186,
"grad_norm": 1.0724343061447144,
"learning_rate": 3.609209829986387e-05,
"loss": 0.9448,
"step": 84
},
{
"epoch": 1.5813953488372094,
"grad_norm": 1.2073761224746704,
"learning_rate": 3.536408529718625e-05,
"loss": 0.9814,
"step": 85
},
{
"epoch": 1.6,
"grad_norm": 1.2773185968399048,
"learning_rate": 3.463591470281375e-05,
"loss": 0.7854,
"step": 86
},
{
"epoch": 1.6186046511627907,
"grad_norm": 1.2945756912231445,
"learning_rate": 3.390790170013612e-05,
"loss": 0.8327,
"step": 87
},
{
"epoch": 1.6372093023255814,
"grad_norm": 1.2657415866851807,
"learning_rate": 3.318036140433069e-05,
"loss": 0.7456,
"step": 88
},
{
"epoch": 1.655813953488372,
"grad_norm": 1.308043360710144,
"learning_rate": 3.245360872596697e-05,
"loss": 0.5729,
"step": 89
},
{
"epoch": 1.6744186046511627,
"grad_norm": 1.3613492250442505,
"learning_rate": 3.172795823469999e-05,
"loss": 0.6906,
"step": 90
},
{
"epoch": 1.6930232558139535,
"grad_norm": 1.2736989259719849,
"learning_rate": 3.100372402311106e-05,
"loss": 0.3997,
"step": 91
},
{
"epoch": 1.7116279069767442,
"grad_norm": 1.814155101776123,
"learning_rate": 3.0281219570754987e-05,
"loss": 0.5502,
"step": 92
},
{
"epoch": 1.730232558139535,
"grad_norm": 1.9878065586090088,
"learning_rate": 2.956075760847283e-05,
"loss": 1.1875,
"step": 93
},
{
"epoch": 1.7488372093023257,
"grad_norm": 0.9894658923149109,
"learning_rate": 2.8842649983028517e-05,
"loss": 1.081,
"step": 94
},
{
"epoch": 1.7674418604651163,
"grad_norm": 0.9422222375869751,
"learning_rate": 2.8127207522128252e-05,
"loss": 0.6992,
"step": 95
},
{
"epoch": 1.786046511627907,
"grad_norm": 1.0437465906143188,
"learning_rate": 2.7414739899881024e-05,
"loss": 0.7245,
"step": 96
},
{
"epoch": 1.8046511627906976,
"grad_norm": 1.2193009853363037,
"learning_rate": 2.670555550275834e-05,
"loss": 0.7591,
"step": 97
},
{
"epoch": 1.8232558139534882,
"grad_norm": 1.1166754961013794,
"learning_rate": 2.5999961296111394e-05,
"loss": 0.9399,
"step": 98
},
{
"epoch": 1.841860465116279,
"grad_norm": 1.4289640188217163,
"learning_rate": 2.5298262691303332e-05,
"loss": 0.9413,
"step": 99
},
{
"epoch": 1.8604651162790697,
"grad_norm": 1.2337830066680908,
"learning_rate": 2.4600763413514138e-05,
"loss": 0.7486,
"step": 100
},
{
"epoch": 1.8604651162790697,
"eval_loss": 0.28952550888061523,
"eval_runtime": 10.8771,
"eval_samples_per_second": 8.366,
"eval_steps_per_second": 2.115,
"step": 100
},
{
"epoch": 1.8790697674418606,
"grad_norm": 1.4255279302597046,
"learning_rate": 2.3907765370275404e-05,
"loss": 0.765,
"step": 101
},
{
"epoch": 1.8976744186046512,
"grad_norm": 1.7940795421600342,
"learning_rate": 2.3219568520791783e-05,
"loss": 0.6342,
"step": 102
},
{
"epoch": 1.916279069767442,
"grad_norm": 1.6805592775344849,
"learning_rate": 2.2536470746105853e-05,
"loss": 0.7094,
"step": 103
},
{
"epoch": 1.9348837209302325,
"grad_norm": 2.0769453048706055,
"learning_rate": 2.1858767720162456e-05,
"loss": 0.8721,
"step": 104
},
{
"epoch": 1.9534883720930232,
"grad_norm": 1.9850454330444336,
"learning_rate": 2.1186752781828328e-05,
"loss": 0.5382,
"step": 105
},
{
"epoch": 1.9720930232558138,
"grad_norm": 1.6767321825027466,
"learning_rate": 2.0520716807922514e-05,
"loss": 0.5035,
"step": 106
},
{
"epoch": 1.9906976744186047,
"grad_norm": 1.6948860883712769,
"learning_rate": 1.986094808731247e-05,
"loss": 0.9199,
"step": 107
},
{
"epoch": 2.0093023255813955,
"grad_norm": 1.2903045415878296,
"learning_rate": 1.92077321961302e-05,
"loss": 0.6539,
"step": 108
},
{
"epoch": 2.027906976744186,
"grad_norm": 0.6793351173400879,
"learning_rate": 1.8561351874162783e-05,
"loss": 0.6779,
"step": 109
},
{
"epoch": 2.046511627906977,
"grad_norm": 1.0761351585388184,
"learning_rate": 1.7922086902470502e-05,
"loss": 0.6817,
"step": 110
},
{
"epoch": 2.0651162790697675,
"grad_norm": 0.825526773929596,
"learning_rate": 1.7290213982285564e-05,
"loss": 0.5877,
"step": 111
},
{
"epoch": 2.083720930232558,
"grad_norm": 0.9566294550895691,
"learning_rate": 1.666600661524396e-05,
"loss": 0.5537,
"step": 112
},
{
"epoch": 2.1023255813953488,
"grad_norm": 0.9278555512428284,
"learning_rate": 1.604973498500235e-05,
"loss": 0.6015,
"step": 113
},
{
"epoch": 2.1209302325581394,
"grad_norm": 0.9701972007751465,
"learning_rate": 1.5441665840290927e-05,
"loss": 0.547,
"step": 114
},
{
"epoch": 2.13953488372093,
"grad_norm": 1.187429666519165,
"learning_rate": 1.4842062379453195e-05,
"loss": 0.5522,
"step": 115
},
{
"epoch": 2.158139534883721,
"grad_norm": 0.8903448581695557,
"learning_rate": 1.4251184136522453e-05,
"loss": 0.403,
"step": 116
},
{
"epoch": 2.1767441860465118,
"grad_norm": 0.9742254614830017,
"learning_rate": 1.3669286868884356e-05,
"loss": 0.279,
"step": 117
},
{
"epoch": 2.1953488372093024,
"grad_norm": 0.9774483442306519,
"learning_rate": 1.3096622446574184e-05,
"loss": 0.3753,
"step": 118
},
{
"epoch": 2.213953488372093,
"grad_norm": 1.1328277587890625,
"learning_rate": 1.2533438743256703e-05,
"loss": 0.2929,
"step": 119
},
{
"epoch": 2.2325581395348837,
"grad_norm": 1.241883397102356,
"learning_rate": 1.1979979528935865e-05,
"loss": 0.3254,
"step": 120
},
{
"epoch": 2.2511627906976743,
"grad_norm": 0.8701224327087402,
"learning_rate": 1.1436484364440646e-05,
"loss": 0.5831,
"step": 121
},
{
"epoch": 2.269767441860465,
"grad_norm": 0.9288952350616455,
"learning_rate": 1.0903188497732999e-05,
"loss": 0.8483,
"step": 122
},
{
"epoch": 2.2883720930232556,
"grad_norm": 0.7865915298461914,
"learning_rate": 1.038032276208234e-05,
"loss": 0.5125,
"step": 123
},
{
"epoch": 2.3069767441860467,
"grad_norm": 1.1094341278076172,
"learning_rate": 9.86811347615111e-06,
"loss": 0.453,
"step": 124
},
{
"epoch": 2.3255813953488373,
"grad_norm": 1.1128486394882202,
"learning_rate": 9.36678234603432e-06,
"loss": 0.4629,
"step": 125
},
{
"epoch": 2.344186046511628,
"grad_norm": 1.2837432622909546,
"learning_rate": 8.876546369295717e-06,
"loss": 0.6856,
"step": 126
},
{
"epoch": 2.3627906976744186,
"grad_norm": 1.148620367050171,
"learning_rate": 8.397617741041962e-06,
"loss": 0.5077,
"step": 127
},
{
"epoch": 2.3813953488372093,
"grad_norm": 1.2100828886032104,
"learning_rate": 7.930203762075426e-06,
"loss": 0.4271,
"step": 128
},
{
"epoch": 2.4,
"grad_norm": 1.403386116027832,
"learning_rate": 7.47450674916562e-06,
"loss": 0.3401,
"step": 129
},
{
"epoch": 2.4186046511627906,
"grad_norm": 1.1033246517181396,
"learning_rate": 7.030723947477803e-06,
"loss": 0.2728,
"step": 130
},
{
"epoch": 2.4372093023255816,
"grad_norm": 1.4235916137695312,
"learning_rate": 6.599047445196836e-06,
"loss": 0.3717,
"step": 131
},
{
"epoch": 2.4558139534883723,
"grad_norm": 1.2089860439300537,
"learning_rate": 6.1796640903832005e-06,
"loss": 0.3064,
"step": 132
},
{
"epoch": 2.474418604651163,
"grad_norm": 1.1210060119628906,
"learning_rate": 5.772755410097166e-06,
"loss": 0.2141,
"step": 133
},
{
"epoch": 2.4930232558139536,
"grad_norm": 1.133145809173584,
"learning_rate": 5.378497531826109e-06,
"loss": 0.5761,
"step": 134
},
{
"epoch": 2.511627906976744,
"grad_norm": 1.5591737031936646,
"learning_rate": 4.997061107248992e-06,
"loss": 0.8019,
"step": 135
},
{
"epoch": 2.530232558139535,
"grad_norm": 1.521730899810791,
"learning_rate": 4.6286112383710245e-06,
"loss": 0.58,
"step": 136
},
{
"epoch": 2.5488372093023255,
"grad_norm": 1.5007507801055908,
"learning_rate": 4.273307406060465e-06,
"loss": 0.6187,
"step": 137
},
{
"epoch": 2.567441860465116,
"grad_norm": 1.2669973373413086,
"learning_rate": 3.931303401018427e-06,
"loss": 0.5699,
"step": 138
},
{
"epoch": 2.5860465116279068,
"grad_norm": 1.4318617582321167,
"learning_rate": 3.602747257211758e-06,
"loss": 0.5587,
"step": 139
},
{
"epoch": 2.604651162790698,
"grad_norm": 1.6378320455551147,
"learning_rate": 3.2877811877975422e-06,
"loss": 0.4079,
"step": 140
},
{
"epoch": 2.6232558139534885,
"grad_norm": 1.452219009399414,
"learning_rate": 2.9865415235671784e-06,
"loss": 0.4239,
"step": 141
},
{
"epoch": 2.641860465116279,
"grad_norm": 1.3756489753723145,
"learning_rate": 2.6991586539366018e-06,
"loss": 0.4746,
"step": 142
},
{
"epoch": 2.66046511627907,
"grad_norm": 1.2296236753463745,
"learning_rate": 2.425756970508135e-06,
"loss": 0.3361,
"step": 143
},
{
"epoch": 2.6790697674418604,
"grad_norm": 1.558907389640808,
"learning_rate": 2.166454813228505e-06,
"loss": 0.4134,
"step": 144
},
{
"epoch": 2.697674418604651,
"grad_norm": 1.4058880805969238,
"learning_rate": 1.9213644191662354e-06,
"loss": 0.2047,
"step": 145
},
{
"epoch": 2.7162790697674417,
"grad_norm": 1.1016334295272827,
"learning_rate": 1.6905918739306423e-06,
"loss": 0.149,
"step": 146
},
{
"epoch": 2.734883720930233,
"grad_norm": 1.425627589225769,
"learning_rate": 1.4742370657534552e-06,
"loss": 0.9796,
"step": 147
},
{
"epoch": 2.753488372093023,
"grad_norm": 1.2110799551010132,
"learning_rate": 1.2723936422528996e-06,
"loss": 0.6505,
"step": 148
},
{
"epoch": 2.772093023255814,
"grad_norm": 1.069684386253357,
"learning_rate": 1.0851489698990146e-06,
"loss": 0.6269,
"step": 149
},
{
"epoch": 2.7906976744186047,
"grad_norm": 1.4680191278457642,
"learning_rate": 9.125840961976982e-07,
"loss": 0.8263,
"step": 150
},
{
"epoch": 2.7906976744186047,
"eval_loss": 0.3216976821422577,
"eval_runtime": 10.869,
"eval_samples_per_second": 8.372,
"eval_steps_per_second": 2.116,
"step": 150
},
{
"epoch": 2.8093023255813954,
"grad_norm": 1.4344302415847778,
"learning_rate": 7.547737146098809e-07,
"loss": 0.768,
"step": 151
},
{
"epoch": 2.827906976744186,
"grad_norm": 1.6189731359481812,
"learning_rate": 6.117861322210028e-07,
"loss": 0.5337,
"step": 152
},
{
"epoch": 2.8465116279069766,
"grad_norm": 1.4234650135040283,
"learning_rate": 4.836832401747892e-07,
"loss": 0.5149,
"step": 153
},
{
"epoch": 2.8651162790697673,
"grad_norm": 1.1778002977371216,
"learning_rate": 3.705204868840894e-07,
"loss": 0.4272,
"step": 154
},
{
"epoch": 2.883720930232558,
"grad_norm": 1.35899019241333,
"learning_rate": 2.7234685403047243e-07,
"loss": 0.3179,
"step": 155
},
{
"epoch": 2.902325581395349,
"grad_norm": 1.4125027656555176,
"learning_rate": 1.892048353628217e-07,
"loss": 0.365,
"step": 156
},
{
"epoch": 2.9209302325581397,
"grad_norm": 1.287215232849121,
"learning_rate": 1.211304183042494e-07,
"loss": 0.3343,
"step": 157
},
{
"epoch": 2.9395348837209303,
"grad_norm": 1.398272156715393,
"learning_rate": 6.81530683751752e-08,
"loss": 0.3152,
"step": 158
},
{
"epoch": 2.958139534883721,
"grad_norm": 1.168246865272522,
"learning_rate": 3.029571643941353e-08,
"loss": 0.1939,
"step": 159
},
{
"epoch": 2.9767441860465116,
"grad_norm": 1.5252883434295654,
"learning_rate": 7.574748778697015e-09,
"loss": 0.4645,
"step": 160
},
{
"epoch": 2.9953488372093022,
"grad_norm": 1.418809413909912,
"learning_rate": 0.0,
"loss": 0.3772,
"step": 161
}
],
"logging_steps": 1,
"max_steps": 161,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 4,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 1
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.532890163478528e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}