Llama-3.3-70B-Aster-v0-stage2-ep1 / trainer_state.json
Hasnonname's picture
Add files using upload-large-folder tool
dc2f9d9 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9961880559085133,
"eval_steps": 49,
"global_step": 196,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.005082592121982211,
"grad_norm": 2.417446798898839,
"learning_rate": 6.666666666666667e-07,
"loss": 1.3227,
"step": 1
},
{
"epoch": 0.005082592121982211,
"eval_loss": NaN,
"eval_runtime": 462.2137,
"eval_samples_per_second": 3.014,
"eval_steps_per_second": 0.379,
"step": 1
},
{
"epoch": 0.010165184243964422,
"grad_norm": 2.928692071056059,
"learning_rate": 1.3333333333333334e-06,
"loss": 1.3358,
"step": 2
},
{
"epoch": 0.015247776365946633,
"grad_norm": 2.295922498777242,
"learning_rate": 2.0000000000000003e-06,
"loss": 1.3668,
"step": 3
},
{
"epoch": 0.020330368487928845,
"grad_norm": 2.165639635427681,
"learning_rate": 2.666666666666667e-06,
"loss": 1.2876,
"step": 4
},
{
"epoch": 0.025412960609911054,
"grad_norm": 1.4335801076375077,
"learning_rate": 3.3333333333333333e-06,
"loss": 1.2307,
"step": 5
},
{
"epoch": 0.030495552731893267,
"grad_norm": 1.3021209574622363,
"learning_rate": 4.000000000000001e-06,
"loss": 1.2824,
"step": 6
},
{
"epoch": 0.035578144853875476,
"grad_norm": 1.233144475780061,
"learning_rate": 4.666666666666667e-06,
"loss": 1.2609,
"step": 7
},
{
"epoch": 0.04066073697585769,
"grad_norm": 1.8649443938694577,
"learning_rate": 5.333333333333334e-06,
"loss": 1.2592,
"step": 8
},
{
"epoch": 0.045743329097839895,
"grad_norm": 2.344947953156482,
"learning_rate": 6e-06,
"loss": 1.1416,
"step": 9
},
{
"epoch": 0.05082592121982211,
"grad_norm": 4.601477812008778,
"learning_rate": 6.666666666666667e-06,
"loss": 1.1554,
"step": 10
},
{
"epoch": 0.05590851334180432,
"grad_norm": 0.9781052406224989,
"learning_rate": 7.333333333333334e-06,
"loss": 1.1799,
"step": 11
},
{
"epoch": 0.060991105463786534,
"grad_norm": 1.5150887808544182,
"learning_rate": 8.000000000000001e-06,
"loss": 1.1938,
"step": 12
},
{
"epoch": 0.06607369758576874,
"grad_norm": 1.195420841394076,
"learning_rate": 8.666666666666668e-06,
"loss": 1.2875,
"step": 13
},
{
"epoch": 0.07115628970775095,
"grad_norm": 1.446747842249035,
"learning_rate": 9.333333333333334e-06,
"loss": 1.2113,
"step": 14
},
{
"epoch": 0.07623888182973317,
"grad_norm": 1.0206440434231885,
"learning_rate": 1e-05,
"loss": 1.1023,
"step": 15
},
{
"epoch": 0.08132147395171538,
"grad_norm": 0.7766830838305955,
"learning_rate": 1.0666666666666667e-05,
"loss": 1.1893,
"step": 16
},
{
"epoch": 0.08640406607369759,
"grad_norm": 0.8562502894880828,
"learning_rate": 1.1333333333333334e-05,
"loss": 1.099,
"step": 17
},
{
"epoch": 0.09148665819567979,
"grad_norm": 1.1790195599158821,
"learning_rate": 1.2e-05,
"loss": 1.1266,
"step": 18
},
{
"epoch": 0.096569250317662,
"grad_norm": 1.1536312340591073,
"learning_rate": 1.2666666666666667e-05,
"loss": 1.1868,
"step": 19
},
{
"epoch": 0.10165184243964422,
"grad_norm": 0.7170693721812672,
"learning_rate": 1.3333333333333333e-05,
"loss": 1.1192,
"step": 20
},
{
"epoch": 0.10673443456162643,
"grad_norm": 0.960585656847019,
"learning_rate": 1.4e-05,
"loss": 1.1264,
"step": 21
},
{
"epoch": 0.11181702668360864,
"grad_norm": 0.8933902995648031,
"learning_rate": 1.4666666666666668e-05,
"loss": 1.0475,
"step": 22
},
{
"epoch": 0.11689961880559085,
"grad_norm": 0.9526763881562114,
"learning_rate": 1.5333333333333334e-05,
"loss": 1.0869,
"step": 23
},
{
"epoch": 0.12198221092757307,
"grad_norm": 0.8261066261177119,
"learning_rate": 1.6000000000000003e-05,
"loss": 1.1273,
"step": 24
},
{
"epoch": 0.12706480304955528,
"grad_norm": 1.0138316404290697,
"learning_rate": 1.6666666666666667e-05,
"loss": 1.0167,
"step": 25
},
{
"epoch": 0.13214739517153748,
"grad_norm": 0.6244135166489783,
"learning_rate": 1.7333333333333336e-05,
"loss": 1.0642,
"step": 26
},
{
"epoch": 0.1372299872935197,
"grad_norm": 0.6860413953391165,
"learning_rate": 1.8e-05,
"loss": 1.0598,
"step": 27
},
{
"epoch": 0.1423125794155019,
"grad_norm": 0.7123933488497386,
"learning_rate": 1.866666666666667e-05,
"loss": 1.1359,
"step": 28
},
{
"epoch": 0.1473951715374841,
"grad_norm": 0.8194224963685599,
"learning_rate": 1.9333333333333333e-05,
"loss": 1.0469,
"step": 29
},
{
"epoch": 0.15247776365946633,
"grad_norm": 0.7428981466577943,
"learning_rate": 2e-05,
"loss": 1.0621,
"step": 30
},
{
"epoch": 0.15756035578144853,
"grad_norm": 0.5978655906350417,
"learning_rate": 1.9994461368042097e-05,
"loss": 1.0809,
"step": 31
},
{
"epoch": 0.16264294790343076,
"grad_norm": 0.5523263409594675,
"learning_rate": 1.9988895058300946e-05,
"loss": 1.1207,
"step": 32
},
{
"epoch": 0.16772554002541296,
"grad_norm": 0.6386088808060907,
"learning_rate": 1.998330086278876e-05,
"loss": 1.1099,
"step": 33
},
{
"epoch": 0.17280813214739518,
"grad_norm": 0.5889449105044371,
"learning_rate": 1.9977678571428572e-05,
"loss": 1.1882,
"step": 34
},
{
"epoch": 0.17789072426937738,
"grad_norm": 0.6171122595004415,
"learning_rate": 1.9972027972027975e-05,
"loss": 1.1434,
"step": 35
},
{
"epoch": 0.18297331639135958,
"grad_norm": 0.660722632169665,
"learning_rate": 1.9966348850252384e-05,
"loss": 1.1197,
"step": 36
},
{
"epoch": 0.1880559085133418,
"grad_norm": 0.8384340606450145,
"learning_rate": 1.9960640989597977e-05,
"loss": 1.0796,
"step": 37
},
{
"epoch": 0.193138500635324,
"grad_norm": 0.6854031551884068,
"learning_rate": 1.995490417136415e-05,
"loss": 1.081,
"step": 38
},
{
"epoch": 0.19822109275730623,
"grad_norm": 0.6791992732368597,
"learning_rate": 1.9949138174625602e-05,
"loss": 1.1199,
"step": 39
},
{
"epoch": 0.20330368487928843,
"grad_norm": 0.5653035313976759,
"learning_rate": 1.9943342776203965e-05,
"loss": 1.1191,
"step": 40
},
{
"epoch": 0.20838627700127066,
"grad_norm": 0.7529491506419821,
"learning_rate": 1.9937517750639022e-05,
"loss": 1.1309,
"step": 41
},
{
"epoch": 0.21346886912325286,
"grad_norm": 2.739538155317824,
"learning_rate": 1.9931662870159458e-05,
"loss": 1.1405,
"step": 42
},
{
"epoch": 0.21855146124523506,
"grad_norm": 0.8235383414154238,
"learning_rate": 1.9925777904653155e-05,
"loss": 1.0874,
"step": 43
},
{
"epoch": 0.22363405336721728,
"grad_norm": 0.636431907641577,
"learning_rate": 1.9919862621637095e-05,
"loss": 1.1171,
"step": 44
},
{
"epoch": 0.22871664548919948,
"grad_norm": 0.5849746433127464,
"learning_rate": 1.9913916786226688e-05,
"loss": 1.0112,
"step": 45
},
{
"epoch": 0.2337992376111817,
"grad_norm": 0.771258264938101,
"learning_rate": 1.990794016110472e-05,
"loss": 1.0638,
"step": 46
},
{
"epoch": 0.2388818297331639,
"grad_norm": 0.7496683574824505,
"learning_rate": 1.9901932506489762e-05,
"loss": 0.9786,
"step": 47
},
{
"epoch": 0.24396442185514614,
"grad_norm": 0.8193989912254472,
"learning_rate": 1.989589358010411e-05,
"loss": 1.1322,
"step": 48
},
{
"epoch": 0.24904701397712833,
"grad_norm": 0.5316839323339139,
"learning_rate": 1.98898231371412e-05,
"loss": 1.0685,
"step": 49
},
{
"epoch": 0.24904701397712833,
"eval_loss": NaN,
"eval_runtime": 454.8058,
"eval_samples_per_second": 3.063,
"eval_steps_per_second": 0.385,
"step": 49
},
{
"epoch": 0.25412960609911056,
"grad_norm": 0.5814548864442379,
"learning_rate": 1.988372093023256e-05,
"loss": 0.9883,
"step": 50
},
{
"epoch": 0.25921219822109276,
"grad_norm": 0.6816224250940385,
"learning_rate": 1.9877586709414165e-05,
"loss": 1.0678,
"step": 51
},
{
"epoch": 0.26429479034307496,
"grad_norm": 0.6659302304663588,
"learning_rate": 1.9871420222092346e-05,
"loss": 1.1103,
"step": 52
},
{
"epoch": 0.26937738246505716,
"grad_norm": 0.6557010282469508,
"learning_rate": 1.9865221213009086e-05,
"loss": 1.1027,
"step": 53
},
{
"epoch": 0.2744599745870394,
"grad_norm": 0.5102660591956714,
"learning_rate": 1.9858989424206816e-05,
"loss": 1.0463,
"step": 54
},
{
"epoch": 0.2795425667090216,
"grad_norm": 0.9092403386769214,
"learning_rate": 1.985272459499264e-05,
"loss": 1.1451,
"step": 55
},
{
"epoch": 0.2846251588310038,
"grad_norm": 0.6067477876422573,
"learning_rate": 1.984642646190195e-05,
"loss": 1.0269,
"step": 56
},
{
"epoch": 0.289707750952986,
"grad_norm": 0.9539627940086233,
"learning_rate": 1.9840094758661536e-05,
"loss": 1.035,
"step": 57
},
{
"epoch": 0.2947903430749682,
"grad_norm": 0.5662189783414849,
"learning_rate": 1.983372921615202e-05,
"loss": 1.0151,
"step": 58
},
{
"epoch": 0.29987293519695046,
"grad_norm": 0.657165599335774,
"learning_rate": 1.9827329562369756e-05,
"loss": 1.0643,
"step": 59
},
{
"epoch": 0.30495552731893266,
"grad_norm": 0.6751154617452281,
"learning_rate": 1.9820895522388063e-05,
"loss": 1.054,
"step": 60
},
{
"epoch": 0.31003811944091486,
"grad_norm": 0.6619429396351022,
"learning_rate": 1.9814426818317872e-05,
"loss": 1.078,
"step": 61
},
{
"epoch": 0.31512071156289706,
"grad_norm": 0.5939366111849416,
"learning_rate": 1.980792316926771e-05,
"loss": 1.0312,
"step": 62
},
{
"epoch": 0.3202033036848793,
"grad_norm": 0.6050698536675223,
"learning_rate": 1.980138429130304e-05,
"loss": 0.9909,
"step": 63
},
{
"epoch": 0.3252858958068615,
"grad_norm": 0.709448530717843,
"learning_rate": 1.9794809897404953e-05,
"loss": 1.0087,
"step": 64
},
{
"epoch": 0.3303684879288437,
"grad_norm": 0.669736059058813,
"learning_rate": 1.978819969742814e-05,
"loss": 1.0107,
"step": 65
},
{
"epoch": 0.3354510800508259,
"grad_norm": 0.6986784735462407,
"learning_rate": 1.9781553398058257e-05,
"loss": 0.9947,
"step": 66
},
{
"epoch": 0.3405336721728081,
"grad_norm": 1.1264664572165972,
"learning_rate": 1.977487070276848e-05,
"loss": 0.9921,
"step": 67
},
{
"epoch": 0.34561626429479037,
"grad_norm": 0.6700835793434858,
"learning_rate": 1.9768151311775475e-05,
"loss": 1.111,
"step": 68
},
{
"epoch": 0.35069885641677256,
"grad_norm": 0.6822724145331737,
"learning_rate": 1.9761394921994493e-05,
"loss": 1.0596,
"step": 69
},
{
"epoch": 0.35578144853875476,
"grad_norm": 0.6040759384639588,
"learning_rate": 1.9754601226993868e-05,
"loss": 1.1039,
"step": 70
},
{
"epoch": 0.36086404066073696,
"grad_norm": 0.6449973639426745,
"learning_rate": 1.9747769916948632e-05,
"loss": 1.0643,
"step": 71
},
{
"epoch": 0.36594663278271916,
"grad_norm": 1.3707597910260259,
"learning_rate": 1.9740900678593462e-05,
"loss": 1.0051,
"step": 72
},
{
"epoch": 0.3710292249047014,
"grad_norm": 0.8317766567975909,
"learning_rate": 1.973399319517476e-05,
"loss": 1.0358,
"step": 73
},
{
"epoch": 0.3761118170266836,
"grad_norm": 0.6836349542770239,
"learning_rate": 1.972704714640199e-05,
"loss": 0.9989,
"step": 74
},
{
"epoch": 0.3811944091486658,
"grad_norm": 0.672285267907023,
"learning_rate": 1.9720062208398136e-05,
"loss": 1.1016,
"step": 75
},
{
"epoch": 0.386277001270648,
"grad_norm": 0.6008642636107718,
"learning_rate": 1.971303805364941e-05,
"loss": 1.0148,
"step": 76
},
{
"epoch": 0.39135959339263027,
"grad_norm": 0.6137126435530088,
"learning_rate": 1.9705974350954024e-05,
"loss": 0.946,
"step": 77
},
{
"epoch": 0.39644218551461247,
"grad_norm": 0.6511125540817548,
"learning_rate": 1.9698870765370138e-05,
"loss": 1.0868,
"step": 78
},
{
"epoch": 0.40152477763659467,
"grad_norm": 0.65753027445619,
"learning_rate": 1.9691726958162946e-05,
"loss": 1.0311,
"step": 79
},
{
"epoch": 0.40660736975857686,
"grad_norm": 0.5461817721258532,
"learning_rate": 1.968454258675079e-05,
"loss": 0.9735,
"step": 80
},
{
"epoch": 0.41168996188055906,
"grad_norm": 4.0148449609783015,
"learning_rate": 1.967731730465043e-05,
"loss": 1.087,
"step": 81
},
{
"epoch": 0.4167725540025413,
"grad_norm": 0.5687951661793367,
"learning_rate": 1.967005076142132e-05,
"loss": 1.0163,
"step": 82
},
{
"epoch": 0.4218551461245235,
"grad_norm": 0.5981525816475669,
"learning_rate": 1.9662742602608975e-05,
"loss": 0.953,
"step": 83
},
{
"epoch": 0.4269377382465057,
"grad_norm": 0.6527915836371785,
"learning_rate": 1.96553924696873e-05,
"loss": 1.0516,
"step": 84
},
{
"epoch": 0.4320203303684879,
"grad_norm": 0.5963214365816606,
"learning_rate": 1.9648000000000002e-05,
"loss": 1.1112,
"step": 85
},
{
"epoch": 0.4371029224904701,
"grad_norm": 0.8494180495407153,
"learning_rate": 1.96405648267009e-05,
"loss": 1.0262,
"step": 86
},
{
"epoch": 0.44218551461245237,
"grad_norm": 0.5619218395689203,
"learning_rate": 1.9633086578693274e-05,
"loss": 0.9716,
"step": 87
},
{
"epoch": 0.44726810673443457,
"grad_norm": 0.5453585209280232,
"learning_rate": 1.9625564880568112e-05,
"loss": 1.0342,
"step": 88
},
{
"epoch": 0.45235069885641677,
"grad_norm": 0.8362630225169261,
"learning_rate": 1.9617999352541277e-05,
"loss": 0.9356,
"step": 89
},
{
"epoch": 0.45743329097839897,
"grad_norm": 0.5377879009778299,
"learning_rate": 1.9610389610389612e-05,
"loss": 0.9989,
"step": 90
},
{
"epoch": 0.4625158831003812,
"grad_norm": 0.6137834508091341,
"learning_rate": 1.9602735265385868e-05,
"loss": 0.9171,
"step": 91
},
{
"epoch": 0.4675984752223634,
"grad_norm": 1.769683080350329,
"learning_rate": 1.959503592423253e-05,
"loss": 1.0589,
"step": 92
},
{
"epoch": 0.4726810673443456,
"grad_norm": 0.6001041891666766,
"learning_rate": 1.9587291188994433e-05,
"loss": 1.0287,
"step": 93
},
{
"epoch": 0.4777636594663278,
"grad_norm": 0.6289491210658303,
"learning_rate": 1.9579500657030224e-05,
"loss": 1.0286,
"step": 94
},
{
"epoch": 0.48284625158831,
"grad_norm": 0.7114332846322945,
"learning_rate": 1.957166392092257e-05,
"loss": 0.9367,
"step": 95
},
{
"epoch": 0.48792884371029227,
"grad_norm": 0.5391940496295085,
"learning_rate": 1.956378056840714e-05,
"loss": 0.9247,
"step": 96
},
{
"epoch": 0.49301143583227447,
"grad_norm": 0.589674085065515,
"learning_rate": 1.9555850182300298e-05,
"loss": 0.9389,
"step": 97
},
{
"epoch": 0.49809402795425667,
"grad_norm": 2.5768068703243765,
"learning_rate": 1.9547872340425533e-05,
"loss": 1.0362,
"step": 98
},
{
"epoch": 0.49809402795425667,
"eval_loss": NaN,
"eval_runtime": 454.7897,
"eval_samples_per_second": 3.063,
"eval_steps_per_second": 0.385,
"step": 98
},
{
"epoch": 0.5031766200762389,
"grad_norm": 0.5898947886720844,
"learning_rate": 1.9539846615538515e-05,
"loss": 1.0852,
"step": 99
},
{
"epoch": 0.5082592121982211,
"grad_norm": 1.170247791808104,
"learning_rate": 1.9531772575250837e-05,
"loss": 0.9584,
"step": 100
},
{
"epoch": 0.5133418043202033,
"grad_norm": 0.5296707610138294,
"learning_rate": 1.9523649781952368e-05,
"loss": 0.9995,
"step": 101
},
{
"epoch": 0.5184243964421855,
"grad_norm": 1.005560288522664,
"learning_rate": 1.9515477792732166e-05,
"loss": 0.9777,
"step": 102
},
{
"epoch": 0.5235069885641678,
"grad_norm": 1.0428938939017154,
"learning_rate": 1.9507256159298012e-05,
"loss": 1.0735,
"step": 103
},
{
"epoch": 0.5285895806861499,
"grad_norm": 0.7851181913743503,
"learning_rate": 1.949898442789438e-05,
"loss": 0.991,
"step": 104
},
{
"epoch": 0.5336721728081322,
"grad_norm": 1.2550922763496395,
"learning_rate": 1.9490662139219015e-05,
"loss": 0.983,
"step": 105
},
{
"epoch": 0.5387547649301143,
"grad_norm": 0.622273113232527,
"learning_rate": 1.9482288828337875e-05,
"loss": 0.9351,
"step": 106
},
{
"epoch": 0.5438373570520966,
"grad_norm": 1.3022101837507225,
"learning_rate": 1.9473864024598567e-05,
"loss": 0.9618,
"step": 107
},
{
"epoch": 0.5489199491740788,
"grad_norm": 0.5578554807566163,
"learning_rate": 1.946538725154215e-05,
"loss": 1.0337,
"step": 108
},
{
"epoch": 0.554002541296061,
"grad_norm": 0.7324460383858523,
"learning_rate": 1.9456858026813338e-05,
"loss": 1.0602,
"step": 109
},
{
"epoch": 0.5590851334180432,
"grad_norm": 0.5524354794155449,
"learning_rate": 1.9448275862068968e-05,
"loss": 0.9662,
"step": 110
},
{
"epoch": 0.5641677255400254,
"grad_norm": 0.668823878032025,
"learning_rate": 1.9439640262884816e-05,
"loss": 1.0083,
"step": 111
},
{
"epoch": 0.5692503176620076,
"grad_norm": 0.6100277575552286,
"learning_rate": 1.9430950728660656e-05,
"loss": 1.0001,
"step": 112
},
{
"epoch": 0.5743329097839899,
"grad_norm": 0.5908486995002686,
"learning_rate": 1.9422206752523496e-05,
"loss": 1.094,
"step": 113
},
{
"epoch": 0.579415501905972,
"grad_norm": 0.5679655400443153,
"learning_rate": 1.9413407821229053e-05,
"loss": 0.9961,
"step": 114
},
{
"epoch": 0.5844980940279543,
"grad_norm": 0.6224828274554025,
"learning_rate": 1.9404553415061298e-05,
"loss": 1.0172,
"step": 115
},
{
"epoch": 0.5895806861499364,
"grad_norm": 0.5913419268978121,
"learning_rate": 1.939564300773015e-05,
"loss": 1.0175,
"step": 116
},
{
"epoch": 0.5946632782719187,
"grad_norm": 0.6162736480008056,
"learning_rate": 1.9386676066267185e-05,
"loss": 1.0338,
"step": 117
},
{
"epoch": 0.5997458703939009,
"grad_norm": 0.6128921125188392,
"learning_rate": 1.937765205091938e-05,
"loss": 1.0118,
"step": 118
},
{
"epoch": 0.6048284625158831,
"grad_norm": 0.5935201407467202,
"learning_rate": 1.9368570415040795e-05,
"loss": 1.0772,
"step": 119
},
{
"epoch": 0.6099110546378653,
"grad_norm": 2.2955297203218574,
"learning_rate": 1.935943060498221e-05,
"loss": 1.0561,
"step": 120
},
{
"epoch": 0.6149936467598475,
"grad_norm": 0.6021547556099237,
"learning_rate": 1.9350232059978583e-05,
"loss": 1.0433,
"step": 121
},
{
"epoch": 0.6200762388818297,
"grad_norm": 15.458265619359883,
"learning_rate": 1.9340974212034385e-05,
"loss": 0.9981,
"step": 122
},
{
"epoch": 0.625158831003812,
"grad_norm": 1.2140568290030576,
"learning_rate": 1.9331656485806687e-05,
"loss": 1.0703,
"step": 123
},
{
"epoch": 0.6302414231257941,
"grad_norm": 1.072369391194644,
"learning_rate": 1.932227829848594e-05,
"loss": 1.0085,
"step": 124
},
{
"epoch": 0.6353240152477764,
"grad_norm": 0.5560086533767725,
"learning_rate": 1.9312839059674507e-05,
"loss": 0.948,
"step": 125
},
{
"epoch": 0.6404066073697586,
"grad_norm": 0.6835673411297744,
"learning_rate": 1.93033381712627e-05,
"loss": 1.0326,
"step": 126
},
{
"epoch": 0.6454891994917408,
"grad_norm": 0.5632555836113124,
"learning_rate": 1.9293775027302514e-05,
"loss": 1.0066,
"step": 127
},
{
"epoch": 0.650571791613723,
"grad_norm": 0.756797864885407,
"learning_rate": 1.9284149013878745e-05,
"loss": 1.0946,
"step": 128
},
{
"epoch": 0.6556543837357052,
"grad_norm": 0.5284862562943021,
"learning_rate": 1.927445950897765e-05,
"loss": 1.006,
"step": 129
},
{
"epoch": 0.6607369758576874,
"grad_norm": 0.6743386059861107,
"learning_rate": 1.926470588235294e-05,
"loss": 1.0845,
"step": 130
},
{
"epoch": 0.6658195679796697,
"grad_norm": 0.600326744146643,
"learning_rate": 1.9254887495389156e-05,
"loss": 1.0703,
"step": 131
},
{
"epoch": 0.6709021601016518,
"grad_norm": 0.6781151858674082,
"learning_rate": 1.9245003700962252e-05,
"loss": 1.039,
"step": 132
},
{
"epoch": 0.6759847522236341,
"grad_norm": 0.6330220485375354,
"learning_rate": 1.923505384329744e-05,
"loss": 1.0711,
"step": 133
},
{
"epoch": 0.6810673443456162,
"grad_norm": 0.7355514488741505,
"learning_rate": 1.9225037257824145e-05,
"loss": 1.0303,
"step": 134
},
{
"epoch": 0.6861499364675985,
"grad_norm": 1.2520345572978264,
"learning_rate": 1.921495327102804e-05,
"loss": 1.1175,
"step": 135
},
{
"epoch": 0.6912325285895807,
"grad_norm": 0.6090687761757717,
"learning_rate": 1.920480120030008e-05,
"loss": 0.9646,
"step": 136
},
{
"epoch": 0.6963151207115629,
"grad_norm": 0.5292705285452848,
"learning_rate": 1.9194580353782463e-05,
"loss": 0.9466,
"step": 137
},
{
"epoch": 0.7013977128335451,
"grad_norm": 0.631666235944858,
"learning_rate": 1.9184290030211485e-05,
"loss": 1.0214,
"step": 138
},
{
"epoch": 0.7064803049555273,
"grad_norm": 0.5330412095712574,
"learning_rate": 1.9173929518757108e-05,
"loss": 1.0399,
"step": 139
},
{
"epoch": 0.7115628970775095,
"grad_norm": 0.5857060018855021,
"learning_rate": 1.9163498098859318e-05,
"loss": 0.8816,
"step": 140
},
{
"epoch": 0.7166454891994918,
"grad_norm": 0.5525598551714797,
"learning_rate": 1.9152995040061047e-05,
"loss": 1.059,
"step": 141
},
{
"epoch": 0.7217280813214739,
"grad_norm": 0.5680224766891685,
"learning_rate": 1.9142419601837675e-05,
"loss": 1.0108,
"step": 142
},
{
"epoch": 0.7268106734434562,
"grad_norm": 0.8442456969016195,
"learning_rate": 1.9131771033422974e-05,
"loss": 1.1537,
"step": 143
},
{
"epoch": 0.7318932655654383,
"grad_norm": 0.6777941101499753,
"learning_rate": 1.912104857363146e-05,
"loss": 0.9485,
"step": 144
},
{
"epoch": 0.7369758576874206,
"grad_norm": 0.6047611797963537,
"learning_rate": 1.9110251450676987e-05,
"loss": 1.1167,
"step": 145
},
{
"epoch": 0.7420584498094028,
"grad_norm": 0.7302224761524364,
"learning_rate": 1.9099378881987576e-05,
"loss": 1.0066,
"step": 146
},
{
"epoch": 0.747141041931385,
"grad_norm": 0.679593185673484,
"learning_rate": 1.9088430074016365e-05,
"loss": 1.0814,
"step": 147
},
{
"epoch": 0.747141041931385,
"eval_loss": NaN,
"eval_runtime": 454.9184,
"eval_samples_per_second": 3.062,
"eval_steps_per_second": 0.385,
"step": 147
},
{
"epoch": 0.7522236340533672,
"grad_norm": 0.5396369338052232,
"learning_rate": 1.9077404222048476e-05,
"loss": 1.0294,
"step": 148
},
{
"epoch": 0.7573062261753494,
"grad_norm": 0.569056375341392,
"learning_rate": 1.9066300510003925e-05,
"loss": 1.0177,
"step": 149
},
{
"epoch": 0.7623888182973316,
"grad_norm": 0.6924084063596142,
"learning_rate": 1.905511811023622e-05,
"loss": 0.9484,
"step": 150
},
{
"epoch": 0.7674714104193139,
"grad_norm": 0.776918131135741,
"learning_rate": 1.904385618332675e-05,
"loss": 1.0091,
"step": 151
},
{
"epoch": 0.772554002541296,
"grad_norm": 0.5631403333949265,
"learning_rate": 1.9032513877874704e-05,
"loss": 1.0455,
"step": 152
},
{
"epoch": 0.7776365946632783,
"grad_norm": 0.5234633902025975,
"learning_rate": 1.9021090330282533e-05,
"loss": 0.8902,
"step": 153
},
{
"epoch": 0.7827191867852605,
"grad_norm": 0.8954844619126904,
"learning_rate": 1.9009584664536742e-05,
"loss": 1.1005,
"step": 154
},
{
"epoch": 0.7878017789072427,
"grad_norm": 0.9593051257588633,
"learning_rate": 1.899799599198397e-05,
"loss": 1.0136,
"step": 155
},
{
"epoch": 0.7928843710292249,
"grad_norm": 0.7462701977681836,
"learning_rate": 1.8986323411102173e-05,
"loss": 0.9644,
"step": 156
},
{
"epoch": 0.7979669631512071,
"grad_norm": 0.5185601200545743,
"learning_rate": 1.8974566007266858e-05,
"loss": 1.0351,
"step": 157
},
{
"epoch": 0.8030495552731893,
"grad_norm": 0.5198417937751084,
"learning_rate": 1.896272285251216e-05,
"loss": 1.0483,
"step": 158
},
{
"epoch": 0.8081321473951716,
"grad_norm": 0.6547184013524777,
"learning_rate": 1.8950793005286704e-05,
"loss": 1.064,
"step": 159
},
{
"epoch": 0.8132147395171537,
"grad_norm": 0.6353570340115701,
"learning_rate": 1.8938775510204083e-05,
"loss": 0.9942,
"step": 160
},
{
"epoch": 0.818297331639136,
"grad_norm": 0.6593647760506935,
"learning_rate": 1.8926669397787795e-05,
"loss": 0.9364,
"step": 161
},
{
"epoch": 0.8233799237611181,
"grad_norm": 0.7127754736535036,
"learning_rate": 1.8914473684210527e-05,
"loss": 1.0383,
"step": 162
},
{
"epoch": 0.8284625158831004,
"grad_norm": 0.5387503654944155,
"learning_rate": 1.8902187371027652e-05,
"loss": 1.013,
"step": 163
},
{
"epoch": 0.8335451080050826,
"grad_norm": 0.5486326058897425,
"learning_rate": 1.8889809444904724e-05,
"loss": 1.0064,
"step": 164
},
{
"epoch": 0.8386277001270648,
"grad_norm": 0.5469444493707462,
"learning_rate": 1.887733887733888e-05,
"loss": 0.9686,
"step": 165
},
{
"epoch": 0.843710292249047,
"grad_norm": 1.2362111579657413,
"learning_rate": 1.8864774624373957e-05,
"loss": 0.9969,
"step": 166
},
{
"epoch": 0.8487928843710292,
"grad_norm": 0.6714649440871081,
"learning_rate": 1.8852115626309178e-05,
"loss": 0.9914,
"step": 167
},
{
"epoch": 0.8538754764930114,
"grad_norm": 0.5790818230708058,
"learning_rate": 1.883936080740118e-05,
"loss": 0.9756,
"step": 168
},
{
"epoch": 0.8589580686149937,
"grad_norm": 0.7837618766260191,
"learning_rate": 1.882650907555931e-05,
"loss": 1.0127,
"step": 169
},
{
"epoch": 0.8640406607369758,
"grad_norm": 0.7086746606620099,
"learning_rate": 1.88135593220339e-05,
"loss": 1.1598,
"step": 170
},
{
"epoch": 0.8691232528589581,
"grad_norm": 0.6038564000981422,
"learning_rate": 1.880051042109741e-05,
"loss": 1.0328,
"step": 171
},
{
"epoch": 0.8742058449809402,
"grad_norm": 0.5480641081333995,
"learning_rate": 1.878736122971819e-05,
"loss": 1.0436,
"step": 172
},
{
"epoch": 0.8792884371029225,
"grad_norm": 0.5760210427645431,
"learning_rate": 1.8774110587226747e-05,
"loss": 0.9944,
"step": 173
},
{
"epoch": 0.8843710292249047,
"grad_norm": 0.5973246266208786,
"learning_rate": 1.8760757314974183e-05,
"loss": 0.9776,
"step": 174
},
{
"epoch": 0.8894536213468869,
"grad_norm": 0.5919506841693293,
"learning_rate": 1.8747300215982725e-05,
"loss": 0.9878,
"step": 175
},
{
"epoch": 0.8945362134688691,
"grad_norm": 1.0854485554789506,
"learning_rate": 1.873373807458803e-05,
"loss": 1.106,
"step": 176
},
{
"epoch": 0.8996188055908514,
"grad_norm": 0.5983944451554725,
"learning_rate": 1.872006965607314e-05,
"loss": 1.0879,
"step": 177
},
{
"epoch": 0.9047013977128335,
"grad_norm": 0.6634154303973875,
"learning_rate": 1.870629370629371e-05,
"loss": 0.9286,
"step": 178
},
{
"epoch": 0.9097839898348158,
"grad_norm": 0.5854692268990905,
"learning_rate": 1.869240895129443e-05,
"loss": 0.9738,
"step": 179
},
{
"epoch": 0.9148665819567979,
"grad_norm": 0.6129124494505737,
"learning_rate": 1.8678414096916303e-05,
"loss": 1.1037,
"step": 180
},
{
"epoch": 0.9199491740787802,
"grad_norm": 1.0109462911647256,
"learning_rate": 1.8664307828394515e-05,
"loss": 0.9844,
"step": 181
},
{
"epoch": 0.9250317662007624,
"grad_norm": 0.5765605814856808,
"learning_rate": 1.8650088809946714e-05,
"loss": 1.0584,
"step": 182
},
{
"epoch": 0.9301143583227446,
"grad_norm": 0.5399969782623486,
"learning_rate": 1.8635755684351314e-05,
"loss": 1.0444,
"step": 183
},
{
"epoch": 0.9351969504447268,
"grad_norm": 0.6015410734318279,
"learning_rate": 1.862130707251567e-05,
"loss": 1.0178,
"step": 184
},
{
"epoch": 0.940279542566709,
"grad_norm": 0.5028759554734284,
"learning_rate": 1.8606741573033708e-05,
"loss": 0.9699,
"step": 185
},
{
"epoch": 0.9453621346886912,
"grad_norm": 0.5458432185038177,
"learning_rate": 1.8592057761732854e-05,
"loss": 0.9319,
"step": 186
},
{
"epoch": 0.9504447268106735,
"grad_norm": 0.5646703109862106,
"learning_rate": 1.8577254191209787e-05,
"loss": 0.9879,
"step": 187
},
{
"epoch": 0.9555273189326556,
"grad_norm": 0.5213455747955001,
"learning_rate": 1.856232939035487e-05,
"loss": 0.93,
"step": 188
},
{
"epoch": 0.9606099110546379,
"grad_norm": 0.6160630051745865,
"learning_rate": 1.854728186386478e-05,
"loss": 0.9984,
"step": 189
},
{
"epoch": 0.96569250317662,
"grad_norm": 0.5717108146075167,
"learning_rate": 1.853211009174312e-05,
"loss": 0.9986,
"step": 190
},
{
"epoch": 0.9707750952986023,
"grad_norm": 1.1933909225187864,
"learning_rate": 1.8516812528788576e-05,
"loss": 0.9603,
"step": 191
},
{
"epoch": 0.9758576874205845,
"grad_norm": 0.5476464515272882,
"learning_rate": 1.8501387604070308e-05,
"loss": 1.0174,
"step": 192
},
{
"epoch": 0.9809402795425667,
"grad_norm": 0.6026655085443507,
"learning_rate": 1.8485833720390153e-05,
"loss": 0.9648,
"step": 193
},
{
"epoch": 0.9860228716645489,
"grad_norm": 0.5597982416459779,
"learning_rate": 1.8470149253731344e-05,
"loss": 1.064,
"step": 194
},
{
"epoch": 0.9911054637865311,
"grad_norm": 0.5256542216530053,
"learning_rate": 1.845433255269321e-05,
"loss": 0.9485,
"step": 195
},
{
"epoch": 0.9961880559085133,
"grad_norm": 0.5698011465117779,
"learning_rate": 1.843838193791157e-05,
"loss": 1.0465,
"step": 196
},
{
"epoch": 0.9961880559085133,
"eval_loss": NaN,
"eval_runtime": 454.5687,
"eval_samples_per_second": 3.064,
"eval_steps_per_second": 0.385,
"step": 196
}
],
"logging_steps": 1,
"max_steps": 392,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 196,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 7.09553504082985e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}