Medra27B-Lora / checkpoint-160 /trainer_state.json
nicoboss's picture
Upload folder using huggingface_hub
6db9e5d verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.02515278350920631,
"eval_steps": 160,
"global_step": 160,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00015720489693253945,
"grad_norm": 1.3751904964447021,
"learning_rate": 0.0,
"loss": 3.5741,
"step": 1
},
{
"epoch": 0.00015720489693253945,
"eval_loss": 3.4173049926757812,
"eval_runtime": 2315.7248,
"eval_samples_per_second": 3.998,
"eval_steps_per_second": 1.999,
"step": 1
},
{
"epoch": 0.0003144097938650789,
"grad_norm": 1.231239676475525,
"learning_rate": 5e-06,
"loss": 3.3021,
"step": 2
},
{
"epoch": 0.00047161469079761836,
"grad_norm": 1.3657807111740112,
"learning_rate": 1e-05,
"loss": 3.6333,
"step": 3
},
{
"epoch": 0.0006288195877301578,
"grad_norm": 1.3117496967315674,
"learning_rate": 1.5e-05,
"loss": 3.3731,
"step": 4
},
{
"epoch": 0.0007860244846626972,
"grad_norm": 1.4118576049804688,
"learning_rate": 2e-05,
"loss": 3.612,
"step": 5
},
{
"epoch": 0.0009432293815952367,
"grad_norm": 1.3155895471572876,
"learning_rate": 2.5e-05,
"loss": 3.3296,
"step": 6
},
{
"epoch": 0.001100434278527776,
"grad_norm": 1.2847192287445068,
"learning_rate": 3e-05,
"loss": 3.2168,
"step": 7
},
{
"epoch": 0.0012576391754603156,
"grad_norm": 1.1421078443527222,
"learning_rate": 3.5e-05,
"loss": 3.085,
"step": 8
},
{
"epoch": 0.0014148440723928551,
"grad_norm": 0.9923035502433777,
"learning_rate": 4e-05,
"loss": 3.0472,
"step": 9
},
{
"epoch": 0.0015720489693253944,
"grad_norm": 0.795043408870697,
"learning_rate": 4.5e-05,
"loss": 2.6666,
"step": 10
},
{
"epoch": 0.001729253866257934,
"grad_norm": 0.5987974405288696,
"learning_rate": 5e-05,
"loss": 2.473,
"step": 11
},
{
"epoch": 0.0018864587631904734,
"grad_norm": 0.4488905668258667,
"learning_rate": 4.9999999236547564e-05,
"loss": 2.3731,
"step": 12
},
{
"epoch": 0.002043663660123013,
"grad_norm": 0.3517301380634308,
"learning_rate": 4.999999694619029e-05,
"loss": 2.2158,
"step": 13
},
{
"epoch": 0.002200868557055552,
"grad_norm": 0.3045121431350708,
"learning_rate": 4.999999312892831e-05,
"loss": 2.3351,
"step": 14
},
{
"epoch": 0.002358073453988092,
"grad_norm": 0.24488244950771332,
"learning_rate": 4.9999987784761884e-05,
"loss": 2.2693,
"step": 15
},
{
"epoch": 0.0025152783509206312,
"grad_norm": 0.22892728447914124,
"learning_rate": 4.999998091369132e-05,
"loss": 2.1006,
"step": 16
},
{
"epoch": 0.0026724832478531705,
"grad_norm": 0.23219206929206848,
"learning_rate": 4.999997251571704e-05,
"loss": 2.215,
"step": 17
},
{
"epoch": 0.0028296881447857102,
"grad_norm": 0.24427154660224915,
"learning_rate": 4.999996259083956e-05,
"loss": 2.1708,
"step": 18
},
{
"epoch": 0.0029868930417182495,
"grad_norm": 0.2640205919742584,
"learning_rate": 4.999995113905947e-05,
"loss": 2.1709,
"step": 19
},
{
"epoch": 0.003144097938650789,
"grad_norm": 0.26644033193588257,
"learning_rate": 4.999993816037749e-05,
"loss": 2.1733,
"step": 20
},
{
"epoch": 0.0033013028355833285,
"grad_norm": 0.2621535062789917,
"learning_rate": 4.9999923654794414e-05,
"loss": 2.0059,
"step": 21
},
{
"epoch": 0.003458507732515868,
"grad_norm": 0.2586187422275543,
"learning_rate": 4.999990762231111e-05,
"loss": 2.0336,
"step": 22
},
{
"epoch": 0.003615712629448407,
"grad_norm": 0.26732271909713745,
"learning_rate": 4.9999890062928566e-05,
"loss": 2.0566,
"step": 23
},
{
"epoch": 0.003772917526380947,
"grad_norm": 0.2357867807149887,
"learning_rate": 4.999987097664787e-05,
"loss": 1.9529,
"step": 24
},
{
"epoch": 0.003930122423313486,
"grad_norm": 0.2297009825706482,
"learning_rate": 4.999985036347016e-05,
"loss": 2.0369,
"step": 25
},
{
"epoch": 0.004087327320246026,
"grad_norm": 0.20529747009277344,
"learning_rate": 4.9999828223396705e-05,
"loss": 1.9781,
"step": 26
},
{
"epoch": 0.004244532217178565,
"grad_norm": 0.18342873454093933,
"learning_rate": 4.999980455642887e-05,
"loss": 1.9986,
"step": 27
},
{
"epoch": 0.004401737114111104,
"grad_norm": 0.16487397253513336,
"learning_rate": 4.999977936256809e-05,
"loss": 1.9063,
"step": 28
},
{
"epoch": 0.004558942011043644,
"grad_norm": 0.1762266606092453,
"learning_rate": 4.99997526418159e-05,
"loss": 1.9517,
"step": 29
},
{
"epoch": 0.004716146907976184,
"grad_norm": 0.16371938586235046,
"learning_rate": 4.999972439417394e-05,
"loss": 1.7734,
"step": 30
},
{
"epoch": 0.004873351804908723,
"grad_norm": 0.17309769988059998,
"learning_rate": 4.999969461964392e-05,
"loss": 1.8732,
"step": 31
},
{
"epoch": 0.0050305567018412625,
"grad_norm": 0.15772338211536407,
"learning_rate": 4.9999663318227683e-05,
"loss": 1.7537,
"step": 32
},
{
"epoch": 0.005187761598773802,
"grad_norm": 0.17521986365318298,
"learning_rate": 4.9999630489927126e-05,
"loss": 2.0077,
"step": 33
},
{
"epoch": 0.005344966495706341,
"grad_norm": 0.15462292730808258,
"learning_rate": 4.999959613474425e-05,
"loss": 1.8576,
"step": 34
},
{
"epoch": 0.005502171392638881,
"grad_norm": 0.15280336141586304,
"learning_rate": 4.999956025268117e-05,
"loss": 1.862,
"step": 35
},
{
"epoch": 0.0056593762895714205,
"grad_norm": 0.14518432319164276,
"learning_rate": 4.999952284374006e-05,
"loss": 1.8893,
"step": 36
},
{
"epoch": 0.005816581186503959,
"grad_norm": 0.16087624430656433,
"learning_rate": 4.999948390792321e-05,
"loss": 1.8658,
"step": 37
},
{
"epoch": 0.005973786083436499,
"grad_norm": 0.17504698038101196,
"learning_rate": 4.999944344523301e-05,
"loss": 1.7647,
"step": 38
},
{
"epoch": 0.006130990980369039,
"grad_norm": 0.17786233127117157,
"learning_rate": 4.999940145567191e-05,
"loss": 1.8133,
"step": 39
},
{
"epoch": 0.006288195877301578,
"grad_norm": 0.1628972887992859,
"learning_rate": 4.999935793924249e-05,
"loss": 1.7731,
"step": 40
},
{
"epoch": 0.006445400774234117,
"grad_norm": 0.13461466133594513,
"learning_rate": 4.9999312895947406e-05,
"loss": 1.7558,
"step": 41
},
{
"epoch": 0.006602605671166657,
"grad_norm": 0.12960125505924225,
"learning_rate": 4.99992663257894e-05,
"loss": 1.7639,
"step": 42
},
{
"epoch": 0.006759810568099196,
"grad_norm": 0.10991287231445312,
"learning_rate": 4.9999218228771324e-05,
"loss": 1.7538,
"step": 43
},
{
"epoch": 0.006917015465031736,
"grad_norm": 0.11583230644464493,
"learning_rate": 4.999916860489612e-05,
"loss": 1.715,
"step": 44
},
{
"epoch": 0.007074220361964275,
"grad_norm": 0.10344280302524567,
"learning_rate": 4.999911745416681e-05,
"loss": 1.6907,
"step": 45
},
{
"epoch": 0.007231425258896814,
"grad_norm": 0.10546118766069412,
"learning_rate": 4.999906477658651e-05,
"loss": 1.7294,
"step": 46
},
{
"epoch": 0.007388630155829354,
"grad_norm": 0.11775675415992737,
"learning_rate": 4.9999010572158465e-05,
"loss": 1.7146,
"step": 47
},
{
"epoch": 0.007545835052761894,
"grad_norm": 0.11109112203121185,
"learning_rate": 4.999895484088596e-05,
"loss": 1.6939,
"step": 48
},
{
"epoch": 0.007703039949694433,
"grad_norm": 0.1116517186164856,
"learning_rate": 4.999889758277242e-05,
"loss": 1.7271,
"step": 49
},
{
"epoch": 0.007860244846626972,
"grad_norm": 0.11245547980070114,
"learning_rate": 4.999883879782132e-05,
"loss": 1.7333,
"step": 50
},
{
"epoch": 0.008017449743559512,
"grad_norm": 0.1150551363825798,
"learning_rate": 4.999877848603626e-05,
"loss": 1.7036,
"step": 51
},
{
"epoch": 0.008174654640492052,
"grad_norm": 0.10856381803750992,
"learning_rate": 4.999871664742093e-05,
"loss": 1.7493,
"step": 52
},
{
"epoch": 0.008331859537424591,
"grad_norm": 0.10760089010000229,
"learning_rate": 4.9998653281979095e-05,
"loss": 1.6292,
"step": 53
},
{
"epoch": 0.00848906443435713,
"grad_norm": 0.0932115837931633,
"learning_rate": 4.9998588389714634e-05,
"loss": 1.6608,
"step": 54
},
{
"epoch": 0.00864626933128967,
"grad_norm": 0.09837482124567032,
"learning_rate": 4.9998521970631504e-05,
"loss": 1.7834,
"step": 55
},
{
"epoch": 0.008803474228222209,
"grad_norm": 0.08872833847999573,
"learning_rate": 4.9998454024733775e-05,
"loss": 1.6484,
"step": 56
},
{
"epoch": 0.008960679125154749,
"grad_norm": 0.08829163759946823,
"learning_rate": 4.9998384552025577e-05,
"loss": 1.5913,
"step": 57
},
{
"epoch": 0.009117884022087288,
"grad_norm": 0.09087682515382767,
"learning_rate": 4.999831355251117e-05,
"loss": 1.6809,
"step": 58
},
{
"epoch": 0.009275088919019828,
"grad_norm": 0.08675853163003922,
"learning_rate": 4.9998241026194884e-05,
"loss": 1.6519,
"step": 59
},
{
"epoch": 0.009432293815952368,
"grad_norm": 0.08463481813669205,
"learning_rate": 4.999816697308114e-05,
"loss": 1.6234,
"step": 60
},
{
"epoch": 0.009589498712884906,
"grad_norm": 0.08403950184583664,
"learning_rate": 4.999809139317448e-05,
"loss": 1.6533,
"step": 61
},
{
"epoch": 0.009746703609817445,
"grad_norm": 0.08155622333288193,
"learning_rate": 4.99980142864795e-05,
"loss": 1.6726,
"step": 62
},
{
"epoch": 0.009903908506749985,
"grad_norm": 0.08056480437517166,
"learning_rate": 4.999793565300093e-05,
"loss": 1.5881,
"step": 63
},
{
"epoch": 0.010061113403682525,
"grad_norm": 0.07879023998975754,
"learning_rate": 4.999785549274355e-05,
"loss": 1.5568,
"step": 64
},
{
"epoch": 0.010218318300615065,
"grad_norm": 0.07828455418348312,
"learning_rate": 4.9997773805712265e-05,
"loss": 1.6464,
"step": 65
},
{
"epoch": 0.010375523197547604,
"grad_norm": 0.08054805546998978,
"learning_rate": 4.9997690591912075e-05,
"loss": 1.6213,
"step": 66
},
{
"epoch": 0.010532728094480142,
"grad_norm": 0.07610727101564407,
"learning_rate": 4.999760585134805e-05,
"loss": 1.5729,
"step": 67
},
{
"epoch": 0.010689932991412682,
"grad_norm": 0.07693428546190262,
"learning_rate": 4.999751958402537e-05,
"loss": 1.5444,
"step": 68
},
{
"epoch": 0.010847137888345222,
"grad_norm": 0.0810319185256958,
"learning_rate": 4.99974317899493e-05,
"loss": 1.7045,
"step": 69
},
{
"epoch": 0.011004342785277762,
"grad_norm": 0.07729896157979965,
"learning_rate": 4.9997342469125205e-05,
"loss": 1.6268,
"step": 70
},
{
"epoch": 0.011161547682210301,
"grad_norm": 0.07730107754468918,
"learning_rate": 4.999725162155855e-05,
"loss": 1.658,
"step": 71
},
{
"epoch": 0.011318752579142841,
"grad_norm": 0.08072328567504883,
"learning_rate": 4.9997159247254864e-05,
"loss": 1.5045,
"step": 72
},
{
"epoch": 0.011475957476075379,
"grad_norm": 0.08120577782392502,
"learning_rate": 4.9997065346219805e-05,
"loss": 1.568,
"step": 73
},
{
"epoch": 0.011633162373007919,
"grad_norm": 0.08131498098373413,
"learning_rate": 4.99969699184591e-05,
"loss": 1.6035,
"step": 74
},
{
"epoch": 0.011790367269940458,
"grad_norm": 0.08395873010158539,
"learning_rate": 4.9996872963978584e-05,
"loss": 1.5844,
"step": 75
},
{
"epoch": 0.011947572166872998,
"grad_norm": 0.08502068370580673,
"learning_rate": 4.999677448278417e-05,
"loss": 1.6661,
"step": 76
},
{
"epoch": 0.012104777063805538,
"grad_norm": 0.08467952907085419,
"learning_rate": 4.999667447488188e-05,
"loss": 1.5537,
"step": 77
},
{
"epoch": 0.012261981960738078,
"grad_norm": 0.19682182371616364,
"learning_rate": 4.999657294027782e-05,
"loss": 1.5051,
"step": 78
},
{
"epoch": 0.012419186857670617,
"grad_norm": 0.08586428314447403,
"learning_rate": 4.999646987897818e-05,
"loss": 1.565,
"step": 79
},
{
"epoch": 0.012576391754603155,
"grad_norm": 0.08156823366880417,
"learning_rate": 4.999636529098928e-05,
"loss": 1.6627,
"step": 80
},
{
"epoch": 0.012733596651535695,
"grad_norm": 0.08715341240167618,
"learning_rate": 4.9996259176317486e-05,
"loss": 1.5862,
"step": 81
},
{
"epoch": 0.012890801548468235,
"grad_norm": 0.09664586186408997,
"learning_rate": 4.999615153496928e-05,
"loss": 1.5741,
"step": 82
},
{
"epoch": 0.013048006445400774,
"grad_norm": 0.08438891172409058,
"learning_rate": 4.999604236695125e-05,
"loss": 1.5933,
"step": 83
},
{
"epoch": 0.013205211342333314,
"grad_norm": 0.08333732932806015,
"learning_rate": 4.999593167227006e-05,
"loss": 1.5904,
"step": 84
},
{
"epoch": 0.013362416239265854,
"grad_norm": 0.07945791631937027,
"learning_rate": 4.9995819450932455e-05,
"loss": 1.5763,
"step": 85
},
{
"epoch": 0.013519621136198392,
"grad_norm": 0.07682961225509644,
"learning_rate": 4.9995705702945304e-05,
"loss": 1.5197,
"step": 86
},
{
"epoch": 0.013676826033130932,
"grad_norm": 0.07547677308320999,
"learning_rate": 4.999559042831555e-05,
"loss": 1.6825,
"step": 87
},
{
"epoch": 0.013834030930063471,
"grad_norm": 0.07293456047773361,
"learning_rate": 4.999547362705025e-05,
"loss": 1.5466,
"step": 88
},
{
"epoch": 0.013991235826996011,
"grad_norm": 0.07730914652347565,
"learning_rate": 4.999535529915651e-05,
"loss": 1.5775,
"step": 89
},
{
"epoch": 0.01414844072392855,
"grad_norm": 0.07689664512872696,
"learning_rate": 4.9995235444641565e-05,
"loss": 1.5881,
"step": 90
},
{
"epoch": 0.01430564562086109,
"grad_norm": 0.07754997909069061,
"learning_rate": 4.999511406351275e-05,
"loss": 1.5037,
"step": 91
},
{
"epoch": 0.014462850517793628,
"grad_norm": 0.07229866087436676,
"learning_rate": 4.999499115577746e-05,
"loss": 1.5077,
"step": 92
},
{
"epoch": 0.014620055414726168,
"grad_norm": 0.07491567730903625,
"learning_rate": 4.9994866721443215e-05,
"loss": 1.5461,
"step": 93
},
{
"epoch": 0.014777260311658708,
"grad_norm": 0.07258685678243637,
"learning_rate": 4.9994740760517605e-05,
"loss": 1.5516,
"step": 94
},
{
"epoch": 0.014934465208591248,
"grad_norm": 0.07643327116966248,
"learning_rate": 4.9994613273008334e-05,
"loss": 1.6223,
"step": 95
},
{
"epoch": 0.015091670105523787,
"grad_norm": 0.0740588903427124,
"learning_rate": 4.999448425892318e-05,
"loss": 1.5322,
"step": 96
},
{
"epoch": 0.015248875002456327,
"grad_norm": 0.44172239303588867,
"learning_rate": 4.999435371827003e-05,
"loss": 1.5498,
"step": 97
},
{
"epoch": 0.015406079899388867,
"grad_norm": 0.0756363570690155,
"learning_rate": 4.999422165105684e-05,
"loss": 1.559,
"step": 98
},
{
"epoch": 0.015563284796321405,
"grad_norm": 0.07251248508691788,
"learning_rate": 4.99940880572917e-05,
"loss": 1.5903,
"step": 99
},
{
"epoch": 0.015720489693253945,
"grad_norm": 0.06931837648153305,
"learning_rate": 4.999395293698275e-05,
"loss": 1.4849,
"step": 100
},
{
"epoch": 0.015877694590186484,
"grad_norm": 0.07403590530157089,
"learning_rate": 4.9993816290138254e-05,
"loss": 1.5191,
"step": 101
},
{
"epoch": 0.016034899487119024,
"grad_norm": 0.07027724385261536,
"learning_rate": 4.999367811676655e-05,
"loss": 1.5655,
"step": 102
},
{
"epoch": 0.016192104384051564,
"grad_norm": 0.07320379465818405,
"learning_rate": 4.9993538416876093e-05,
"loss": 1.4869,
"step": 103
},
{
"epoch": 0.016349309280984103,
"grad_norm": 0.0726180374622345,
"learning_rate": 4.9993397190475396e-05,
"loss": 1.4629,
"step": 104
},
{
"epoch": 0.016506514177916643,
"grad_norm": 0.07542011886835098,
"learning_rate": 4.999325443757309e-05,
"loss": 1.5976,
"step": 105
},
{
"epoch": 0.016663719074849183,
"grad_norm": 0.07440067082643509,
"learning_rate": 4.9993110158177895e-05,
"loss": 1.5469,
"step": 106
},
{
"epoch": 0.016820923971781723,
"grad_norm": 0.07547372579574585,
"learning_rate": 4.999296435229863e-05,
"loss": 1.5328,
"step": 107
},
{
"epoch": 0.01697812886871426,
"grad_norm": 0.07532137632369995,
"learning_rate": 4.999281701994419e-05,
"loss": 1.6742,
"step": 108
},
{
"epoch": 0.0171353337656468,
"grad_norm": 0.07249438762664795,
"learning_rate": 4.999266816112358e-05,
"loss": 1.4799,
"step": 109
},
{
"epoch": 0.01729253866257934,
"grad_norm": 0.07399806380271912,
"learning_rate": 4.999251777584589e-05,
"loss": 1.5438,
"step": 110
},
{
"epoch": 0.017449743559511878,
"grad_norm": 0.08135057240724564,
"learning_rate": 4.99923658641203e-05,
"loss": 1.5608,
"step": 111
},
{
"epoch": 0.017606948456444418,
"grad_norm": 0.07508935779333115,
"learning_rate": 4.99922124259561e-05,
"loss": 1.5894,
"step": 112
},
{
"epoch": 0.017764153353376957,
"grad_norm": 0.07432372123003006,
"learning_rate": 4.999205746136265e-05,
"loss": 1.4818,
"step": 113
},
{
"epoch": 0.017921358250309497,
"grad_norm": 0.07694194465875626,
"learning_rate": 4.999190097034942e-05,
"loss": 1.5629,
"step": 114
},
{
"epoch": 0.018078563147242037,
"grad_norm": 0.07384433597326279,
"learning_rate": 4.999174295292597e-05,
"loss": 1.4829,
"step": 115
},
{
"epoch": 0.018235768044174577,
"grad_norm": 0.07152919471263885,
"learning_rate": 4.999158340910195e-05,
"loss": 1.4748,
"step": 116
},
{
"epoch": 0.018392972941107116,
"grad_norm": 0.07719701528549194,
"learning_rate": 4.999142233888709e-05,
"loss": 1.5524,
"step": 117
},
{
"epoch": 0.018550177838039656,
"grad_norm": 0.07540587335824966,
"learning_rate": 4.999125974229125e-05,
"loss": 1.4661,
"step": 118
},
{
"epoch": 0.018707382734972196,
"grad_norm": 0.0787581130862236,
"learning_rate": 4.9991095619324344e-05,
"loss": 1.6455,
"step": 119
},
{
"epoch": 0.018864587631904736,
"grad_norm": 0.07454577833414078,
"learning_rate": 4.999092996999641e-05,
"loss": 1.5083,
"step": 120
},
{
"epoch": 0.019021792528837272,
"grad_norm": 0.0751076266169548,
"learning_rate": 4.9990762794317545e-05,
"loss": 1.4874,
"step": 121
},
{
"epoch": 0.01917899742576981,
"grad_norm": 0.07733119279146194,
"learning_rate": 4.999059409229798e-05,
"loss": 1.6308,
"step": 122
},
{
"epoch": 0.01933620232270235,
"grad_norm": 0.07897089421749115,
"learning_rate": 4.999042386394802e-05,
"loss": 1.5906,
"step": 123
},
{
"epoch": 0.01949340721963489,
"grad_norm": 0.07758141309022903,
"learning_rate": 4.999025210927804e-05,
"loss": 1.5604,
"step": 124
},
{
"epoch": 0.01965061211656743,
"grad_norm": 0.07845707982778549,
"learning_rate": 4.9990078828298544e-05,
"loss": 1.5901,
"step": 125
},
{
"epoch": 0.01980781701349997,
"grad_norm": 0.0772818773984909,
"learning_rate": 4.998990402102012e-05,
"loss": 1.4516,
"step": 126
},
{
"epoch": 0.01996502191043251,
"grad_norm": 0.07795504480600357,
"learning_rate": 4.998972768745344e-05,
"loss": 1.4642,
"step": 127
},
{
"epoch": 0.02012222680736505,
"grad_norm": 0.0784008800983429,
"learning_rate": 4.998954982760926e-05,
"loss": 1.5936,
"step": 128
},
{
"epoch": 0.02027943170429759,
"grad_norm": 0.07791212201118469,
"learning_rate": 4.9989370441498465e-05,
"loss": 1.4705,
"step": 129
},
{
"epoch": 0.02043663660123013,
"grad_norm": 0.07785367220640182,
"learning_rate": 4.9989189529132004e-05,
"loss": 1.5085,
"step": 130
},
{
"epoch": 0.02059384149816267,
"grad_norm": 0.07916689664125443,
"learning_rate": 4.9989007090520925e-05,
"loss": 1.5365,
"step": 131
},
{
"epoch": 0.02075104639509521,
"grad_norm": 0.0775083601474762,
"learning_rate": 4.9988823125676367e-05,
"loss": 1.5286,
"step": 132
},
{
"epoch": 0.020908251292027745,
"grad_norm": 0.08110442757606506,
"learning_rate": 4.998863763460956e-05,
"loss": 1.5779,
"step": 133
},
{
"epoch": 0.021065456188960285,
"grad_norm": 0.0814640000462532,
"learning_rate": 4.998845061733185e-05,
"loss": 1.4778,
"step": 134
},
{
"epoch": 0.021222661085892824,
"grad_norm": 0.08069492131471634,
"learning_rate": 4.998826207385465e-05,
"loss": 1.5317,
"step": 135
},
{
"epoch": 0.021379865982825364,
"grad_norm": 0.07377774268388748,
"learning_rate": 4.998807200418948e-05,
"loss": 1.5258,
"step": 136
},
{
"epoch": 0.021537070879757904,
"grad_norm": 0.0787922590970993,
"learning_rate": 4.9987880408347945e-05,
"loss": 1.5185,
"step": 137
},
{
"epoch": 0.021694275776690444,
"grad_norm": 0.07662995159626007,
"learning_rate": 4.9987687286341745e-05,
"loss": 1.4637,
"step": 138
},
{
"epoch": 0.021851480673622983,
"grad_norm": 0.08528955280780792,
"learning_rate": 4.9987492638182676e-05,
"loss": 1.4776,
"step": 139
},
{
"epoch": 0.022008685570555523,
"grad_norm": 0.08089053630828857,
"learning_rate": 4.9987296463882626e-05,
"loss": 1.5885,
"step": 140
},
{
"epoch": 0.022165890467488063,
"grad_norm": 0.08029694855213165,
"learning_rate": 4.998709876345358e-05,
"loss": 1.4557,
"step": 141
},
{
"epoch": 0.022323095364420602,
"grad_norm": 0.07918502390384674,
"learning_rate": 4.9986899536907614e-05,
"loss": 1.4285,
"step": 142
},
{
"epoch": 0.022480300261353142,
"grad_norm": 0.0813126415014267,
"learning_rate": 4.998669878425689e-05,
"loss": 1.5958,
"step": 143
},
{
"epoch": 0.022637505158285682,
"grad_norm": 0.07935188710689545,
"learning_rate": 4.998649650551368e-05,
"loss": 1.5249,
"step": 144
},
{
"epoch": 0.02279471005521822,
"grad_norm": 0.08163304626941681,
"learning_rate": 4.9986292700690324e-05,
"loss": 1.483,
"step": 145
},
{
"epoch": 0.022951914952150758,
"grad_norm": 0.08277447521686554,
"learning_rate": 4.998608736979928e-05,
"loss": 1.6212,
"step": 146
},
{
"epoch": 0.023109119849083298,
"grad_norm": 0.08285827934741974,
"learning_rate": 4.9985880512853076e-05,
"loss": 1.4495,
"step": 147
},
{
"epoch": 0.023266324746015837,
"grad_norm": 0.082750603556633,
"learning_rate": 4.998567212986437e-05,
"loss": 1.4335,
"step": 148
},
{
"epoch": 0.023423529642948377,
"grad_norm": 0.07986058294773102,
"learning_rate": 4.998546222084587e-05,
"loss": 1.4704,
"step": 149
},
{
"epoch": 0.023580734539880917,
"grad_norm": 0.08105576783418655,
"learning_rate": 4.9985250785810396e-05,
"loss": 1.5183,
"step": 150
},
{
"epoch": 0.023737939436813457,
"grad_norm": 0.08202917128801346,
"learning_rate": 4.9985037824770866e-05,
"loss": 1.5423,
"step": 151
},
{
"epoch": 0.023895144333745996,
"grad_norm": 0.08937894552946091,
"learning_rate": 4.998482333774029e-05,
"loss": 1.5731,
"step": 152
},
{
"epoch": 0.024052349230678536,
"grad_norm": 0.08333728462457657,
"learning_rate": 4.9984607324731766e-05,
"loss": 1.5133,
"step": 153
},
{
"epoch": 0.024209554127611076,
"grad_norm": 0.08529175072908401,
"learning_rate": 4.998438978575849e-05,
"loss": 1.516,
"step": 154
},
{
"epoch": 0.024366759024543615,
"grad_norm": 0.08508963882923126,
"learning_rate": 4.998417072083374e-05,
"loss": 1.5646,
"step": 155
},
{
"epoch": 0.024523963921476155,
"grad_norm": 0.08971578627824783,
"learning_rate": 4.99839501299709e-05,
"loss": 1.4714,
"step": 156
},
{
"epoch": 0.024681168818408695,
"grad_norm": 0.08380109816789627,
"learning_rate": 4.998372801318345e-05,
"loss": 1.4476,
"step": 157
},
{
"epoch": 0.024838373715341235,
"grad_norm": 0.08533143252134323,
"learning_rate": 4.9983504370484945e-05,
"loss": 1.4866,
"step": 158
},
{
"epoch": 0.02499557861227377,
"grad_norm": 0.08318709582090378,
"learning_rate": 4.998327920188905e-05,
"loss": 1.5274,
"step": 159
},
{
"epoch": 0.02515278350920631,
"grad_norm": 0.08486370742321014,
"learning_rate": 4.9983052507409525e-05,
"loss": 1.4713,
"step": 160
},
{
"epoch": 0.02515278350920631,
"eval_loss": 1.5136528015136719,
"eval_runtime": 2318.8971,
"eval_samples_per_second": 3.992,
"eval_steps_per_second": 1.996,
"step": 160
}
],
"logging_steps": 1,
"max_steps": 12722,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 160,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 8.195809495062938e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}