oceanpty's picture
Upload trainer_state.json with huggingface_hub
f42160a verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 500,
"global_step": 476,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.004201680672268907,
"grad_norm": 6.843970053848994,
"learning_rate": 4.1666666666666667e-07,
"loss": 1.4834,
"step": 1
},
{
"epoch": 0.02100840336134454,
"grad_norm": 5.888026413386213,
"learning_rate": 2.0833333333333334e-06,
"loss": 1.4466,
"step": 5
},
{
"epoch": 0.04201680672268908,
"grad_norm": 1.672754748671725,
"learning_rate": 4.166666666666667e-06,
"loss": 1.29,
"step": 10
},
{
"epoch": 0.06302521008403361,
"grad_norm": 1.2549304333706048,
"learning_rate": 6.25e-06,
"loss": 1.265,
"step": 15
},
{
"epoch": 0.08403361344537816,
"grad_norm": 1.0804048514467623,
"learning_rate": 8.333333333333334e-06,
"loss": 1.218,
"step": 20
},
{
"epoch": 0.10504201680672269,
"grad_norm": 1.0012856727023651,
"learning_rate": 1.0416666666666668e-05,
"loss": 1.1874,
"step": 25
},
{
"epoch": 0.12605042016806722,
"grad_norm": 1.0428770541540702,
"learning_rate": 1.25e-05,
"loss": 1.1738,
"step": 30
},
{
"epoch": 0.14705882352941177,
"grad_norm": 0.8971861131120643,
"learning_rate": 1.4583333333333333e-05,
"loss": 1.1823,
"step": 35
},
{
"epoch": 0.16806722689075632,
"grad_norm": 0.8761555093987056,
"learning_rate": 1.6666666666666667e-05,
"loss": 1.1509,
"step": 40
},
{
"epoch": 0.18907563025210083,
"grad_norm": 0.9022691166860466,
"learning_rate": 1.8750000000000002e-05,
"loss": 1.1738,
"step": 45
},
{
"epoch": 0.21008403361344538,
"grad_norm": 0.8595698667749491,
"learning_rate": 1.9998922457512608e-05,
"loss": 1.1827,
"step": 50
},
{
"epoch": 0.23109243697478993,
"grad_norm": 0.8131261419160233,
"learning_rate": 1.9986802771267902e-05,
"loss": 1.1595,
"step": 55
},
{
"epoch": 0.25210084033613445,
"grad_norm": 0.9352189654627542,
"learning_rate": 1.996123284790336e-05,
"loss": 1.1751,
"step": 60
},
{
"epoch": 0.27310924369747897,
"grad_norm": 0.8183133797776339,
"learning_rate": 1.9922247125020307e-05,
"loss": 1.1368,
"step": 65
},
{
"epoch": 0.29411764705882354,
"grad_norm": 0.7919702228536482,
"learning_rate": 1.9869898108633834e-05,
"loss": 1.1576,
"step": 70
},
{
"epoch": 0.31512605042016806,
"grad_norm": 0.809220998201082,
"learning_rate": 1.9804256302457653e-05,
"loss": 1.1214,
"step": 75
},
{
"epoch": 0.33613445378151263,
"grad_norm": 0.7934942224774806,
"learning_rate": 1.972541011294959e-05,
"loss": 1.1665,
"step": 80
},
{
"epoch": 0.35714285714285715,
"grad_norm": 0.8016460068505575,
"learning_rate": 1.963346573024568e-05,
"loss": 1.14,
"step": 85
},
{
"epoch": 0.37815126050420167,
"grad_norm": 0.7959323509300636,
"learning_rate": 1.952854698514318e-05,
"loss": 1.1427,
"step": 90
},
{
"epoch": 0.39915966386554624,
"grad_norm": 0.834564941958375,
"learning_rate": 1.9410795182325113e-05,
"loss": 1.1582,
"step": 95
},
{
"epoch": 0.42016806722689076,
"grad_norm": 0.8021887426628376,
"learning_rate": 1.9280368910050943e-05,
"loss": 1.1425,
"step": 100
},
{
"epoch": 0.4411764705882353,
"grad_norm": 0.8535114934490843,
"learning_rate": 1.9137443826569758e-05,
"loss": 1.1507,
"step": 105
},
{
"epoch": 0.46218487394957986,
"grad_norm": 0.8097650803240342,
"learning_rate": 1.898221242354353e-05,
"loss": 1.1376,
"step": 110
},
{
"epoch": 0.4831932773109244,
"grad_norm": 0.854806698328117,
"learning_rate": 1.881488376679912e-05,
"loss": 1.1538,
"step": 115
},
{
"epoch": 0.5042016806722689,
"grad_norm": 0.7411750487736666,
"learning_rate": 1.8635683214758213e-05,
"loss": 1.1548,
"step": 120
},
{
"epoch": 0.5252100840336135,
"grad_norm": 0.7470899750611266,
"learning_rate": 1.8444852114924325e-05,
"loss": 1.1285,
"step": 125
},
{
"epoch": 0.5462184873949579,
"grad_norm": 0.7855863245637623,
"learning_rate": 1.8242647478835717e-05,
"loss": 1.1428,
"step": 130
},
{
"epoch": 0.5672268907563025,
"grad_norm": 0.74562538616598,
"learning_rate": 1.8029341635921985e-05,
"loss": 1.1303,
"step": 135
},
{
"epoch": 0.5882352941176471,
"grad_norm": 0.751956755942676,
"learning_rate": 1.780522186673046e-05,
"loss": 1.157,
"step": 140
},
{
"epoch": 0.6092436974789915,
"grad_norm": 0.7490360146750048,
"learning_rate": 1.7570590016016455e-05,
"loss": 1.1245,
"step": 145
},
{
"epoch": 0.6302521008403361,
"grad_norm": 0.7962243135552697,
"learning_rate": 1.7325762086218415e-05,
"loss": 1.1577,
"step": 150
},
{
"epoch": 0.6512605042016807,
"grad_norm": 0.7647261906536346,
"learning_rate": 1.7071067811865477e-05,
"loss": 1.1378,
"step": 155
},
{
"epoch": 0.6722689075630253,
"grad_norm": 0.7569274861307836,
"learning_rate": 1.680685021549063e-05,
"loss": 1.1249,
"step": 160
},
{
"epoch": 0.6932773109243697,
"grad_norm": 0.7606095560211281,
"learning_rate": 1.6533465145647598e-05,
"loss": 1.1405,
"step": 165
},
{
"epoch": 0.7142857142857143,
"grad_norm": 0.7797117802105004,
"learning_rate": 1.6251280797653606e-05,
"loss": 1.1432,
"step": 170
},
{
"epoch": 0.7352941176470589,
"grad_norm": 0.7411898433127342,
"learning_rate": 1.5960677217703512e-05,
"loss": 1.1341,
"step": 175
},
{
"epoch": 0.7563025210084033,
"grad_norm": 0.7937630835311652,
"learning_rate": 1.566204579102317e-05,
"loss": 1.1771,
"step": 180
},
{
"epoch": 0.7773109243697479,
"grad_norm": 0.7831403718494921,
"learning_rate": 1.5355788714751378e-05,
"loss": 1.1346,
"step": 185
},
{
"epoch": 0.7983193277310925,
"grad_norm": 0.7670962454375009,
"learning_rate": 1.5042318456260305e-05,
"loss": 1.1411,
"step": 190
},
{
"epoch": 0.819327731092437,
"grad_norm": 0.8187182133488576,
"learning_rate": 1.4722057197643986e-05,
"loss": 1.1287,
"step": 195
},
{
"epoch": 0.8403361344537815,
"grad_norm": 0.7707658033677821,
"learning_rate": 1.4395436267123017e-05,
"loss": 1.1686,
"step": 200
},
{
"epoch": 0.8613445378151261,
"grad_norm": 0.7929027157964723,
"learning_rate": 1.40628955581312e-05,
"loss": 1.1095,
"step": 205
},
{
"epoch": 0.8823529411764706,
"grad_norm": 0.7671917924588447,
"learning_rate": 1.3724882936866596e-05,
"loss": 1.1532,
"step": 210
},
{
"epoch": 0.9033613445378151,
"grad_norm": 0.743350743534685,
"learning_rate": 1.3381853639104817e-05,
"loss": 1.1177,
"step": 215
},
{
"epoch": 0.9243697478991597,
"grad_norm": 0.7994542205340992,
"learning_rate": 1.3034269657086993e-05,
"loss": 1.0984,
"step": 220
},
{
"epoch": 0.9453781512605042,
"grad_norm": 0.8028923468414233,
"learning_rate": 1.2682599117308083e-05,
"loss": 1.1225,
"step": 225
},
{
"epoch": 0.9663865546218487,
"grad_norm": 0.7701315329198188,
"learning_rate": 1.2327315650043605e-05,
"loss": 1.1225,
"step": 230
},
{
"epoch": 0.9873949579831933,
"grad_norm": 0.749625186589024,
"learning_rate": 1.1968897751463841e-05,
"loss": 1.1205,
"step": 235
},
{
"epoch": 1.0,
"eval_loss": 1.122010350227356,
"eval_runtime": 4.1604,
"eval_samples_per_second": 35.574,
"eval_steps_per_second": 0.721,
"step": 238
},
{
"epoch": 1.0084033613445378,
"grad_norm": 0.8685596492224139,
"learning_rate": 1.1607828139194683e-05,
"loss": 1.0392,
"step": 240
},
{
"epoch": 1.0294117647058822,
"grad_norm": 0.8195681790245358,
"learning_rate": 1.1244593102192961e-05,
"loss": 0.8804,
"step": 245
},
{
"epoch": 1.050420168067227,
"grad_norm": 0.7862106935161252,
"learning_rate": 1.0879681845811964e-05,
"loss": 0.8759,
"step": 250
},
{
"epoch": 1.0714285714285714,
"grad_norm": 0.7695442905422751,
"learning_rate": 1.0513585832939104e-05,
"loss": 0.8921,
"step": 255
},
{
"epoch": 1.092436974789916,
"grad_norm": 0.7197984706671763,
"learning_rate": 1.0146798122093167e-05,
"loss": 0.8465,
"step": 260
},
{
"epoch": 1.1134453781512605,
"grad_norm": 0.7436730962206078,
"learning_rate": 9.779812703372538e-06,
"loss": 0.8983,
"step": 265
},
{
"epoch": 1.134453781512605,
"grad_norm": 0.7419938040761785,
"learning_rate": 9.41312383314878e-06,
"loss": 0.86,
"step": 270
},
{
"epoch": 1.1554621848739495,
"grad_norm": 0.7727780995520752,
"learning_rate": 9.047225368401622e-06,
"loss": 0.8751,
"step": 275
},
{
"epoch": 1.1764705882352942,
"grad_norm": 0.7184055924425042,
"learning_rate": 8.682610101591813e-06,
"loss": 0.8633,
"step": 280
},
{
"epoch": 1.1974789915966386,
"grad_norm": 0.7424505911824795,
"learning_rate": 8.319769096967681e-06,
"loss": 0.881,
"step": 285
},
{
"epoch": 1.2184873949579833,
"grad_norm": 0.7831013281249656,
"learning_rate": 7.95919102919926e-06,
"loss": 0.8758,
"step": 290
},
{
"epoch": 1.2394957983193278,
"grad_norm": 0.7218415137209162,
"learning_rate": 7.601361525230713e-06,
"loss": 0.848,
"step": 295
},
{
"epoch": 1.2605042016806722,
"grad_norm": 0.727646718952851,
"learning_rate": 7.246762510237404e-06,
"loss": 0.8928,
"step": 300
},
{
"epoch": 1.2815126050420167,
"grad_norm": 0.7318861363154572,
"learning_rate": 6.89587155856853e-06,
"loss": 0.8861,
"step": 305
},
{
"epoch": 1.3025210084033614,
"grad_norm": 0.7850969062028121,
"learning_rate": 6.549161250549474e-06,
"loss": 0.8785,
"step": 310
},
{
"epoch": 1.3235294117647058,
"grad_norm": 0.7424275388998128,
"learning_rate": 6.207098536010083e-06,
"loss": 0.8732,
"step": 315
},
{
"epoch": 1.3445378151260505,
"grad_norm": 0.7785063907714577,
"learning_rate": 5.8701441053961185e-06,
"loss": 0.8923,
"step": 320
},
{
"epoch": 1.365546218487395,
"grad_norm": 0.7101355435294052,
"learning_rate": 5.538751769310842e-06,
"loss": 0.8756,
"step": 325
},
{
"epoch": 1.3865546218487395,
"grad_norm": 0.7360750873465238,
"learning_rate": 5.213367847322408e-06,
"loss": 0.8657,
"step": 330
},
{
"epoch": 1.407563025210084,
"grad_norm": 0.7493101168945231,
"learning_rate": 4.894430566860144e-06,
"loss": 0.8565,
"step": 335
},
{
"epoch": 1.4285714285714286,
"grad_norm": 0.7246837540339062,
"learning_rate": 4.58236947300939e-06,
"loss": 0.867,
"step": 340
},
{
"epoch": 1.449579831932773,
"grad_norm": 0.7052644676750524,
"learning_rate": 4.277604849999666e-06,
"loss": 0.8594,
"step": 345
},
{
"epoch": 1.4705882352941178,
"grad_norm": 0.7349952399246719,
"learning_rate": 3.980547155165429e-06,
"loss": 0.8533,
"step": 350
},
{
"epoch": 1.4915966386554622,
"grad_norm": 0.7284230275061265,
"learning_rate": 3.691596466141666e-06,
"loss": 0.9056,
"step": 355
},
{
"epoch": 1.5126050420168067,
"grad_norm": 0.7193222649134673,
"learning_rate": 3.4111419420388904e-06,
"loss": 0.8662,
"step": 360
},
{
"epoch": 1.5336134453781511,
"grad_norm": 0.731566479431413,
"learning_rate": 3.139561299323206e-06,
"loss": 0.8587,
"step": 365
},
{
"epoch": 1.5546218487394958,
"grad_norm": 0.7391733181416821,
"learning_rate": 2.877220303107373e-06,
"loss": 0.8602,
"step": 370
},
{
"epoch": 1.5756302521008403,
"grad_norm": 0.7390429200298151,
"learning_rate": 2.624472274537925e-06,
"loss": 0.8701,
"step": 375
},
{
"epoch": 1.596638655462185,
"grad_norm": 0.741782430807988,
"learning_rate": 2.381657614941858e-06,
"loss": 0.8588,
"step": 380
},
{
"epoch": 1.6176470588235294,
"grad_norm": 0.7082521628110617,
"learning_rate": 2.149103347373753e-06,
"loss": 0.8738,
"step": 385
},
{
"epoch": 1.638655462184874,
"grad_norm": 0.7382222604301619,
"learning_rate": 1.927122676180756e-06,
"loss": 0.8501,
"step": 390
},
{
"epoch": 1.6596638655462184,
"grad_norm": 0.7170057238227614,
"learning_rate": 1.7160145651786098e-06,
"loss": 0.8628,
"step": 395
},
{
"epoch": 1.680672268907563,
"grad_norm": 0.7735455354416457,
"learning_rate": 1.516063335006851e-06,
"loss": 0.8799,
"step": 400
},
{
"epoch": 1.7016806722689075,
"grad_norm": 0.7544008999017865,
"learning_rate": 1.3275382802054704e-06,
"loss": 0.8584,
"step": 405
},
{
"epoch": 1.7226890756302522,
"grad_norm": 0.7234290509257019,
"learning_rate": 1.1506933065287062e-06,
"loss": 0.8878,
"step": 410
},
{
"epoch": 1.7436974789915967,
"grad_norm": 0.7179219619602794,
"learning_rate": 9.85766588984508e-07,
"loss": 0.8701,
"step": 415
},
{
"epoch": 1.7647058823529411,
"grad_norm": 0.710095564820816,
"learning_rate": 8.329802510601559e-07,
"loss": 0.8732,
"step": 420
},
{
"epoch": 1.7857142857142856,
"grad_norm": 0.7465304276755028,
"learning_rate": 6.925400655661118e-07,
"loss": 0.897,
"step": 425
},
{
"epoch": 1.8067226890756303,
"grad_norm": 0.7039905037617595,
"learning_rate": 5.646351775009617e-07,
"loss": 0.8484,
"step": 430
},
{
"epoch": 1.8277310924369747,
"grad_norm": 0.7170317726401236,
"learning_rate": 4.494378493107232e-07,
"loss": 0.8518,
"step": 435
},
{
"epoch": 1.8487394957983194,
"grad_norm": 0.7399232574218224,
"learning_rate": 3.471032288855869e-07,
"loss": 0.8629,
"step": 440
},
{
"epoch": 1.8697478991596639,
"grad_norm": 0.7309518709786924,
"learning_rate": 2.577691406065708e-07,
"loss": 0.8723,
"step": 445
},
{
"epoch": 1.8907563025210083,
"grad_norm": 0.7524923842002688,
"learning_rate": 1.8155589972348453e-07,
"loss": 0.8655,
"step": 450
},
{
"epoch": 1.9117647058823528,
"grad_norm": 0.7470763175141618,
"learning_rate": 1.1856615031422214e-07,
"loss": 0.8656,
"step": 455
},
{
"epoch": 1.9327731092436975,
"grad_norm": 0.7413561376156461,
"learning_rate": 6.888472704359661e-08,
"loss": 0.8555,
"step": 460
},
{
"epoch": 1.9537815126050422,
"grad_norm": 0.725024144075873,
"learning_rate": 3.2578540907926e-08,
"loss": 0.8683,
"step": 465
},
{
"epoch": 1.9747899159663866,
"grad_norm": 0.7137402433851012,
"learning_rate": 9.696489119221942e-09,
"loss": 0.8736,
"step": 470
},
{
"epoch": 1.995798319327731,
"grad_norm": 0.7264722107888033,
"learning_rate": 2.6938925037689467e-10,
"loss": 0.8776,
"step": 475
},
{
"epoch": 2.0,
"eval_loss": 1.1365480422973633,
"eval_runtime": 5.9679,
"eval_samples_per_second": 24.799,
"eval_steps_per_second": 0.503,
"step": 476
},
{
"epoch": 2.0,
"step": 476,
"total_flos": 124772558045184.0,
"train_loss": 1.015126781929441,
"train_runtime": 3305.096,
"train_samples_per_second": 9.214,
"train_steps_per_second": 0.144
}
],
"logging_steps": 5,
"max_steps": 476,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 124772558045184.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}