ardaspear's picture
Training in progress, step 200, checkpoint
d42c447 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.11273957158962795,
"eval_steps": 17,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0005636978579481398,
"eval_loss": 6.74771785736084,
"eval_runtime": 83.4995,
"eval_samples_per_second": 35.785,
"eval_steps_per_second": 4.479,
"step": 1
},
{
"epoch": 0.0016910935738444193,
"grad_norm": 26.02037811279297,
"learning_rate": 3e-05,
"loss": 6.6508,
"step": 3
},
{
"epoch": 0.0033821871476888386,
"grad_norm": 18.14887809753418,
"learning_rate": 6e-05,
"loss": 4.3309,
"step": 6
},
{
"epoch": 0.005073280721533258,
"grad_norm": 3.2000560760498047,
"learning_rate": 9e-05,
"loss": 1.1379,
"step": 9
},
{
"epoch": 0.006764374295377677,
"grad_norm": 7.714017391204834,
"learning_rate": 9.997266286704631e-05,
"loss": 0.787,
"step": 12
},
{
"epoch": 0.008455467869222097,
"grad_norm": 1.9846014976501465,
"learning_rate": 9.98292246503335e-05,
"loss": 0.705,
"step": 15
},
{
"epoch": 0.009582863585118376,
"eval_loss": 0.700009822845459,
"eval_runtime": 83.9952,
"eval_samples_per_second": 35.573,
"eval_steps_per_second": 4.453,
"step": 17
},
{
"epoch": 0.010146561443066516,
"grad_norm": 1.9030941724777222,
"learning_rate": 9.956320346634876e-05,
"loss": 0.7221,
"step": 18
},
{
"epoch": 0.011837655016910935,
"grad_norm": 2.7882025241851807,
"learning_rate": 9.917525374361912e-05,
"loss": 0.7034,
"step": 21
},
{
"epoch": 0.013528748590755355,
"grad_norm": 1.3202898502349854,
"learning_rate": 9.86663298624003e-05,
"loss": 0.7214,
"step": 24
},
{
"epoch": 0.015219842164599774,
"grad_norm": 0.6887256503105164,
"learning_rate": 9.803768380684242e-05,
"loss": 0.6916,
"step": 27
},
{
"epoch": 0.016910935738444193,
"grad_norm": 2.1117467880249023,
"learning_rate": 9.729086208503174e-05,
"loss": 0.7063,
"step": 30
},
{
"epoch": 0.018602029312288614,
"grad_norm": 0.500767707824707,
"learning_rate": 9.642770192448536e-05,
"loss": 0.7011,
"step": 33
},
{
"epoch": 0.019165727170236752,
"eval_loss": 0.6982219219207764,
"eval_runtime": 84.042,
"eval_samples_per_second": 35.554,
"eval_steps_per_second": 4.45,
"step": 34
},
{
"epoch": 0.020293122886133032,
"grad_norm": 0.8041173815727234,
"learning_rate": 9.545032675245813e-05,
"loss": 0.6928,
"step": 36
},
{
"epoch": 0.021984216459977453,
"grad_norm": 0.7337102293968201,
"learning_rate": 9.43611409721806e-05,
"loss": 0.7076,
"step": 39
},
{
"epoch": 0.02367531003382187,
"grad_norm": 0.6326396465301514,
"learning_rate": 9.316282404787871e-05,
"loss": 0.7006,
"step": 42
},
{
"epoch": 0.02536640360766629,
"grad_norm": 0.4257495403289795,
"learning_rate": 9.185832391312644e-05,
"loss": 0.7013,
"step": 45
},
{
"epoch": 0.02705749718151071,
"grad_norm": 0.2765394151210785,
"learning_rate": 9.045084971874738e-05,
"loss": 0.6911,
"step": 48
},
{
"epoch": 0.02874859075535513,
"grad_norm": 0.4698239862918854,
"learning_rate": 8.894386393810563e-05,
"loss": 0.6933,
"step": 51
},
{
"epoch": 0.02874859075535513,
"eval_loss": 0.6953115463256836,
"eval_runtime": 84.0384,
"eval_samples_per_second": 35.555,
"eval_steps_per_second": 4.45,
"step": 51
},
{
"epoch": 0.030439684329199548,
"grad_norm": 0.7247682213783264,
"learning_rate": 8.73410738492077e-05,
"loss": 0.7067,
"step": 54
},
{
"epoch": 0.032130777903043965,
"grad_norm": 0.6278282999992371,
"learning_rate": 8.564642241456986e-05,
"loss": 0.697,
"step": 57
},
{
"epoch": 0.033821871476888386,
"grad_norm": 0.45123350620269775,
"learning_rate": 8.386407858128706e-05,
"loss": 0.6992,
"step": 60
},
{
"epoch": 0.03551296505073281,
"grad_norm": 0.5516684651374817,
"learning_rate": 8.199842702516583e-05,
"loss": 0.683,
"step": 63
},
{
"epoch": 0.03720405862457723,
"grad_norm": 1.170713186264038,
"learning_rate": 8.005405736415126e-05,
"loss": 0.7026,
"step": 66
},
{
"epoch": 0.038331454340473504,
"eval_loss": 0.696951150894165,
"eval_runtime": 84.0534,
"eval_samples_per_second": 35.549,
"eval_steps_per_second": 4.45,
"step": 68
},
{
"epoch": 0.03889515219842165,
"grad_norm": 1.1506222486495972,
"learning_rate": 7.803575286758364e-05,
"loss": 0.7467,
"step": 69
},
{
"epoch": 0.040586245772266064,
"grad_norm": 0.87104731798172,
"learning_rate": 7.594847868906076e-05,
"loss": 0.6972,
"step": 72
},
{
"epoch": 0.042277339346110485,
"grad_norm": 0.43808355927467346,
"learning_rate": 7.379736965185368e-05,
"loss": 0.6875,
"step": 75
},
{
"epoch": 0.043968432919954906,
"grad_norm": 0.8352026343345642,
"learning_rate": 7.158771761692464e-05,
"loss": 0.6814,
"step": 78
},
{
"epoch": 0.04565952649379933,
"grad_norm": 1.1601524353027344,
"learning_rate": 6.932495846462261e-05,
"loss": 0.7048,
"step": 81
},
{
"epoch": 0.04735062006764374,
"grad_norm": 1.253566861152649,
"learning_rate": 6.701465872208216e-05,
"loss": 0.6852,
"step": 84
},
{
"epoch": 0.047914317925591886,
"eval_loss": 0.6991522312164307,
"eval_runtime": 84.042,
"eval_samples_per_second": 35.554,
"eval_steps_per_second": 4.45,
"step": 85
},
{
"epoch": 0.04904171364148816,
"grad_norm": 1.2970808744430542,
"learning_rate": 6.466250186922325e-05,
"loss": 0.703,
"step": 87
},
{
"epoch": 0.05073280721533258,
"grad_norm": 0.752733588218689,
"learning_rate": 6.227427435703997e-05,
"loss": 0.6913,
"step": 90
},
{
"epoch": 0.052423900789177004,
"grad_norm": 0.5592336654663086,
"learning_rate": 5.985585137257401e-05,
"loss": 0.684,
"step": 93
},
{
"epoch": 0.05411499436302142,
"grad_norm": 0.7148630023002625,
"learning_rate": 5.74131823855921e-05,
"loss": 0.7043,
"step": 96
},
{
"epoch": 0.05580608793686584,
"grad_norm": 0.5819842219352722,
"learning_rate": 5.495227651252315e-05,
"loss": 0.6832,
"step": 99
},
{
"epoch": 0.05749718151071026,
"grad_norm": 0.6519930958747864,
"learning_rate": 5.247918773366112e-05,
"loss": 0.6876,
"step": 102
},
{
"epoch": 0.05749718151071026,
"eval_loss": 0.6744697690010071,
"eval_runtime": 84.0459,
"eval_samples_per_second": 35.552,
"eval_steps_per_second": 4.45,
"step": 102
},
{
"epoch": 0.05918827508455468,
"grad_norm": 1.0141414403915405,
"learning_rate": 5e-05,
"loss": 0.6754,
"step": 105
},
{
"epoch": 0.060879368658399095,
"grad_norm": 2.331167697906494,
"learning_rate": 4.7520812266338885e-05,
"loss": 0.6436,
"step": 108
},
{
"epoch": 0.06257046223224352,
"grad_norm": 3.2144436836242676,
"learning_rate": 4.504772348747687e-05,
"loss": 0.6441,
"step": 111
},
{
"epoch": 0.06426155580608793,
"grad_norm": 3.9336791038513184,
"learning_rate": 4.2586817614407895e-05,
"loss": 0.5887,
"step": 114
},
{
"epoch": 0.06595264937993235,
"grad_norm": 2.9044606685638428,
"learning_rate": 4.0144148627425993e-05,
"loss": 0.6684,
"step": 117
},
{
"epoch": 0.06708004509582864,
"eval_loss": 0.6285394430160522,
"eval_runtime": 84.0476,
"eval_samples_per_second": 35.551,
"eval_steps_per_second": 4.45,
"step": 119
},
{
"epoch": 0.06764374295377677,
"grad_norm": 3.0893630981445312,
"learning_rate": 3.772572564296005e-05,
"loss": 0.6821,
"step": 120
},
{
"epoch": 0.0693348365276212,
"grad_norm": 2.5429139137268066,
"learning_rate": 3.533749813077677e-05,
"loss": 0.5834,
"step": 123
},
{
"epoch": 0.07102593010146561,
"grad_norm": 3.898667812347412,
"learning_rate": 3.298534127791785e-05,
"loss": 0.6205,
"step": 126
},
{
"epoch": 0.07271702367531004,
"grad_norm": 3.492971897125244,
"learning_rate": 3.0675041535377405e-05,
"loss": 0.551,
"step": 129
},
{
"epoch": 0.07440811724915446,
"grad_norm": 3.596611261367798,
"learning_rate": 2.8412282383075363e-05,
"loss": 0.6237,
"step": 132
},
{
"epoch": 0.07609921082299888,
"grad_norm": 5.098214626312256,
"learning_rate": 2.6202630348146324e-05,
"loss": 0.6905,
"step": 135
},
{
"epoch": 0.07666290868094701,
"eval_loss": 0.6011126637458801,
"eval_runtime": 84.0372,
"eval_samples_per_second": 35.556,
"eval_steps_per_second": 4.45,
"step": 136
},
{
"epoch": 0.0777903043968433,
"grad_norm": 3.057682514190674,
"learning_rate": 2.405152131093926e-05,
"loss": 0.614,
"step": 138
},
{
"epoch": 0.0794813979706877,
"grad_norm": 2.849776268005371,
"learning_rate": 2.196424713241637e-05,
"loss": 0.55,
"step": 141
},
{
"epoch": 0.08117249154453213,
"grad_norm": 4.259140491485596,
"learning_rate": 1.9945942635848748e-05,
"loss": 0.5797,
"step": 144
},
{
"epoch": 0.08286358511837655,
"grad_norm": 3.7241506576538086,
"learning_rate": 1.800157297483417e-05,
"loss": 0.6275,
"step": 147
},
{
"epoch": 0.08455467869222097,
"grad_norm": 2.4338207244873047,
"learning_rate": 1.6135921418712956e-05,
"loss": 0.577,
"step": 150
},
{
"epoch": 0.08624577226606539,
"grad_norm": 2.8285350799560547,
"learning_rate": 1.435357758543015e-05,
"loss": 0.6133,
"step": 153
},
{
"epoch": 0.08624577226606539,
"eval_loss": 0.5718837380409241,
"eval_runtime": 84.0552,
"eval_samples_per_second": 35.548,
"eval_steps_per_second": 4.449,
"step": 153
},
{
"epoch": 0.08793686583990981,
"grad_norm": 3.2385404109954834,
"learning_rate": 1.2658926150792322e-05,
"loss": 0.607,
"step": 156
},
{
"epoch": 0.08962795941375423,
"grad_norm": 3.1674704551696777,
"learning_rate": 1.1056136061894384e-05,
"loss": 0.5506,
"step": 159
},
{
"epoch": 0.09131905298759865,
"grad_norm": 3.280355930328369,
"learning_rate": 9.549150281252633e-06,
"loss": 0.5688,
"step": 162
},
{
"epoch": 0.09301014656144306,
"grad_norm": 3.4832043647766113,
"learning_rate": 8.141676086873572e-06,
"loss": 0.6335,
"step": 165
},
{
"epoch": 0.09470124013528748,
"grad_norm": 4.496728420257568,
"learning_rate": 6.837175952121306e-06,
"loss": 0.5274,
"step": 168
},
{
"epoch": 0.09582863585118377,
"eval_loss": 0.5605720281600952,
"eval_runtime": 84.0529,
"eval_samples_per_second": 35.549,
"eval_steps_per_second": 4.45,
"step": 170
},
{
"epoch": 0.0963923337091319,
"grad_norm": 2.9714503288269043,
"learning_rate": 5.6388590278194096e-06,
"loss": 0.5568,
"step": 171
},
{
"epoch": 0.09808342728297632,
"grad_norm": 2.8310670852661133,
"learning_rate": 4.549673247541875e-06,
"loss": 0.6169,
"step": 174
},
{
"epoch": 0.09977452085682074,
"grad_norm": 3.3684370517730713,
"learning_rate": 3.5722980755146517e-06,
"loss": 0.4749,
"step": 177
},
{
"epoch": 0.10146561443066517,
"grad_norm": 3.3378119468688965,
"learning_rate": 2.7091379149682685e-06,
"loss": 0.5579,
"step": 180
},
{
"epoch": 0.10315670800450959,
"grad_norm": 3.996790885925293,
"learning_rate": 1.962316193157593e-06,
"loss": 0.5408,
"step": 183
},
{
"epoch": 0.10484780157835401,
"grad_norm": 3.091064453125,
"learning_rate": 1.333670137599713e-06,
"loss": 0.5679,
"step": 186
},
{
"epoch": 0.10541149943630214,
"eval_loss": 0.5559670925140381,
"eval_runtime": 84.033,
"eval_samples_per_second": 35.557,
"eval_steps_per_second": 4.451,
"step": 187
},
{
"epoch": 0.10653889515219842,
"grad_norm": 3.5897178649902344,
"learning_rate": 8.247462563808817e-07,
"loss": 0.5888,
"step": 189
},
{
"epoch": 0.10822998872604284,
"grad_norm": 4.172910690307617,
"learning_rate": 4.367965336512403e-07,
"loss": 0.654,
"step": 192
},
{
"epoch": 0.10992108229988726,
"grad_norm": 7.166314125061035,
"learning_rate": 1.7077534966650766e-07,
"loss": 0.6831,
"step": 195
},
{
"epoch": 0.11161217587373168,
"grad_norm": 3.00948429107666,
"learning_rate": 2.7337132953697554e-08,
"loss": 0.5601,
"step": 198
}
],
"logging_steps": 3,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 17,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.4428946137088e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}