qwen3-sft-scenario-to-code / trainer_state.json
hiepnkv's picture
Upload folder using huggingface_hub
32610ca verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.9521472392638035,
"eval_steps": 51,
"global_step": 505,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05889570552147239,
"grad_norm": 0.17814360558986664,
"learning_rate": 9.803921568627451e-06,
"loss": 0.1429,
"step": 6
},
{
"epoch": 0.11779141104294479,
"grad_norm": 0.19341157376766205,
"learning_rate": 2.1568627450980395e-05,
"loss": 0.1421,
"step": 12
},
{
"epoch": 0.17668711656441718,
"grad_norm": 0.21557286381721497,
"learning_rate": 3.3333333333333335e-05,
"loss": 0.1137,
"step": 18
},
{
"epoch": 0.23558282208588957,
"grad_norm": 0.0923461839556694,
"learning_rate": 4.5098039215686275e-05,
"loss": 0.0903,
"step": 24
},
{
"epoch": 0.294478527607362,
"grad_norm": 0.0747547447681427,
"learning_rate": 5.6862745098039215e-05,
"loss": 0.0541,
"step": 30
},
{
"epoch": 0.35337423312883437,
"grad_norm": 0.06449007987976074,
"learning_rate": 6.862745098039216e-05,
"loss": 0.056,
"step": 36
},
{
"epoch": 0.41226993865030676,
"grad_norm": 0.06695199757814407,
"learning_rate": 8.039215686274511e-05,
"loss": 0.0378,
"step": 42
},
{
"epoch": 0.47116564417177914,
"grad_norm": 0.057652052491903305,
"learning_rate": 9.215686274509804e-05,
"loss": 0.0333,
"step": 48
},
{
"epoch": 0.5006134969325153,
"eval_loss": 0.04454955831170082,
"eval_runtime": 99.1976,
"eval_samples_per_second": 0.917,
"eval_steps_per_second": 0.917,
"step": 51
},
{
"epoch": 0.5300613496932516,
"grad_norm": 0.07416237890720367,
"learning_rate": 9.99952117026961e-05,
"loss": 0.0317,
"step": 54
},
{
"epoch": 0.588957055214724,
"grad_norm": 0.0759715884923935,
"learning_rate": 9.992340558396519e-05,
"loss": 0.0333,
"step": 60
},
{
"epoch": 0.6478527607361964,
"grad_norm": 0.06889687478542328,
"learning_rate": 9.976555313435489e-05,
"loss": 0.0265,
"step": 66
},
{
"epoch": 0.7067484662576687,
"grad_norm": 0.06610561907291412,
"learning_rate": 9.952192642312712e-05,
"loss": 0.0267,
"step": 72
},
{
"epoch": 0.7656441717791411,
"grad_norm": 0.05757423862814903,
"learning_rate": 9.919294535722452e-05,
"loss": 0.0276,
"step": 78
},
{
"epoch": 0.8245398773006135,
"grad_norm": 0.05512451380491257,
"learning_rate": 9.877917695753274e-05,
"loss": 0.0224,
"step": 84
},
{
"epoch": 0.8834355828220859,
"grad_norm": 0.06681577116250992,
"learning_rate": 9.828133438158205e-05,
"loss": 0.02,
"step": 90
},
{
"epoch": 0.9423312883435583,
"grad_norm": 0.07138796895742416,
"learning_rate": 9.770027569437253e-05,
"loss": 0.0263,
"step": 96
},
{
"epoch": 1.0,
"grad_norm": 0.06135529652237892,
"learning_rate": 9.703700238944158e-05,
"loss": 0.0189,
"step": 102
},
{
"epoch": 1.0,
"eval_loss": 0.027291039004921913,
"eval_runtime": 90.02,
"eval_samples_per_second": 1.011,
"eval_steps_per_second": 1.011,
"step": 102
},
{
"epoch": 1.0588957055214725,
"grad_norm": 0.09823279082775116,
"learning_rate": 9.629265766272292e-05,
"loss": 0.0158,
"step": 108
},
{
"epoch": 1.1177914110429448,
"grad_norm": 0.05707687512040138,
"learning_rate": 9.54685244421718e-05,
"loss": 0.017,
"step": 114
},
{
"epoch": 1.1766871165644173,
"grad_norm": 0.06987980008125305,
"learning_rate": 9.456602317655275e-05,
"loss": 0.0169,
"step": 120
},
{
"epoch": 1.2355828220858895,
"grad_norm": 0.06296130269765854,
"learning_rate": 9.358670938720113e-05,
"loss": 0.0162,
"step": 126
},
{
"epoch": 1.294478527607362,
"grad_norm": 0.059489328414201736,
"learning_rate": 9.253227098697803e-05,
"loss": 0.0166,
"step": 132
},
{
"epoch": 1.3533742331288343,
"grad_norm": 0.06373760849237442,
"learning_rate": 9.140452537103942e-05,
"loss": 0.0188,
"step": 138
},
{
"epoch": 1.4122699386503068,
"grad_norm": 0.06238365173339844,
"learning_rate": 9.020541628443395e-05,
"loss": 0.0161,
"step": 144
},
{
"epoch": 1.471165644171779,
"grad_norm": 0.071172334253788,
"learning_rate": 8.893701047192833e-05,
"loss": 0.0196,
"step": 150
},
{
"epoch": 1.5006134969325153,
"eval_loss": 0.024013910442590714,
"eval_runtime": 90.4486,
"eval_samples_per_second": 1.006,
"eval_steps_per_second": 1.006,
"step": 153
},
{
"epoch": 1.5300613496932516,
"grad_norm": 0.06220275163650513,
"learning_rate": 8.760149411583437e-05,
"loss": 0.0185,
"step": 156
},
{
"epoch": 1.588957055214724,
"grad_norm": 0.06537278741598129,
"learning_rate": 8.620116906797739e-05,
"loss": 0.0227,
"step": 162
},
{
"epoch": 1.6478527607361964,
"grad_norm": 0.04823810234665871,
"learning_rate": 8.473844888230065e-05,
"loss": 0.019,
"step": 168
},
{
"epoch": 1.7067484662576686,
"grad_norm": 0.049262188374996185,
"learning_rate": 8.321585465494349e-05,
"loss": 0.0158,
"step": 174
},
{
"epoch": 1.7656441717791411,
"grad_norm": 0.07977139949798584,
"learning_rate": 8.163601067896343e-05,
"loss": 0.0154,
"step": 180
},
{
"epoch": 1.8245398773006136,
"grad_norm": 0.07933256775140762,
"learning_rate": 8.000163992119146e-05,
"loss": 0.0203,
"step": 186
},
{
"epoch": 1.883435582822086,
"grad_norm": 0.047850318253040314,
"learning_rate": 7.831555932901642e-05,
"loss": 0.0146,
"step": 192
},
{
"epoch": 1.9423312883435582,
"grad_norm": 0.07077929377555847,
"learning_rate": 7.658067497518772e-05,
"loss": 0.0164,
"step": 198
},
{
"epoch": 2.0,
"grad_norm": 0.08206511288881302,
"learning_rate": 7.479997704900437e-05,
"loss": 0.0164,
"step": 204
},
{
"epoch": 2.0,
"eval_loss": 0.022676732391119003,
"eval_runtime": 102.7598,
"eval_samples_per_second": 0.886,
"eval_steps_per_second": 0.886,
"step": 204
},
{
"epoch": 2.0588957055214725,
"grad_norm": 0.04856789857149124,
"learning_rate": 7.297653470252358e-05,
"loss": 0.0124,
"step": 210
},
{
"epoch": 2.117791411042945,
"grad_norm": 0.05966215953230858,
"learning_rate": 7.111349076067187e-05,
"loss": 0.013,
"step": 216
},
{
"epoch": 2.176687116564417,
"grad_norm": 0.08179495483636856,
"learning_rate": 6.921405630437584e-05,
"loss": 0.0104,
"step": 222
},
{
"epoch": 2.2355828220858895,
"grad_norm": 0.06349746882915497,
"learning_rate": 6.728150513604941e-05,
"loss": 0.0139,
"step": 228
},
{
"epoch": 2.294478527607362,
"grad_norm": 0.07469939440488815,
"learning_rate": 6.531916813697615e-05,
"loss": 0.0149,
"step": 234
},
{
"epoch": 2.3533742331288345,
"grad_norm": 0.06438795477151871,
"learning_rate": 6.333042752631243e-05,
"loss": 0.0121,
"step": 240
},
{
"epoch": 2.4122699386503066,
"grad_norm": 0.05141801759600639,
"learning_rate": 6.131871103160643e-05,
"loss": 0.0136,
"step": 246
},
{
"epoch": 2.471165644171779,
"grad_norm": 0.0645705834031105,
"learning_rate": 5.928748598088024e-05,
"loss": 0.0139,
"step": 252
},
{
"epoch": 2.500613496932515,
"eval_loss": 0.022207412868738174,
"eval_runtime": 88.2288,
"eval_samples_per_second": 1.031,
"eval_steps_per_second": 1.031,
"step": 255
},
{
"epoch": 2.5300613496932516,
"grad_norm": 0.04924144223332405,
"learning_rate": 5.7240253326457936e-05,
"loss": 0.0152,
"step": 258
},
{
"epoch": 2.588957055214724,
"grad_norm": 0.061027299612760544,
"learning_rate": 5.518054161083994e-05,
"loss": 0.0114,
"step": 264
},
{
"epoch": 2.647852760736196,
"grad_norm": 0.0684848427772522,
"learning_rate": 5.31119008850239e-05,
"loss": 0.0128,
"step": 270
},
{
"epoch": 2.7067484662576686,
"grad_norm": 0.07513795047998428,
"learning_rate": 5.1037896589754134e-05,
"loss": 0.0169,
"step": 276
},
{
"epoch": 2.765644171779141,
"grad_norm": 0.049461428076028824,
"learning_rate": 4.896210341024587e-05,
"loss": 0.0139,
"step": 282
},
{
"epoch": 2.8245398773006136,
"grad_norm": 0.06591672450304031,
"learning_rate": 4.6888099114976096e-05,
"loss": 0.0136,
"step": 288
},
{
"epoch": 2.883435582822086,
"grad_norm": 0.0519632063806057,
"learning_rate": 4.481945838916006e-05,
"loss": 0.0185,
"step": 294
},
{
"epoch": 2.942331288343558,
"grad_norm": 0.0653291717171669,
"learning_rate": 4.2759746673542076e-05,
"loss": 0.0124,
"step": 300
},
{
"epoch": 3.0,
"grad_norm": 0.04251919314265251,
"learning_rate": 4.0712514019119775e-05,
"loss": 0.012,
"step": 306
},
{
"epoch": 3.0,
"eval_loss": 0.0213442575186491,
"eval_runtime": 89.4474,
"eval_samples_per_second": 1.017,
"eval_steps_per_second": 1.017,
"step": 306
},
{
"epoch": 3.0588957055214725,
"grad_norm": 0.05854284018278122,
"learning_rate": 3.868128896839357e-05,
"loss": 0.0135,
"step": 312
},
{
"epoch": 3.117791411042945,
"grad_norm": 0.056230898946523666,
"learning_rate": 3.666957247368757e-05,
"loss": 0.0082,
"step": 318
},
{
"epoch": 3.176687116564417,
"grad_norm": 0.05258898064494133,
"learning_rate": 3.4680831863023865e-05,
"loss": 0.0137,
"step": 324
},
{
"epoch": 3.2355828220858895,
"grad_norm": 0.06439245492219925,
"learning_rate": 3.2718494863950586e-05,
"loss": 0.0106,
"step": 330
},
{
"epoch": 3.294478527607362,
"grad_norm": 0.07195457816123962,
"learning_rate": 3.078594369562417e-05,
"loss": 0.0116,
"step": 336
},
{
"epoch": 3.3533742331288345,
"grad_norm": 0.04599051550030708,
"learning_rate": 2.8886509239328146e-05,
"loss": 0.0096,
"step": 342
},
{
"epoch": 3.4122699386503066,
"grad_norm": 0.08732595294713974,
"learning_rate": 2.7023465297476423e-05,
"loss": 0.0146,
"step": 348
},
{
"epoch": 3.471165644171779,
"grad_norm": 0.06491376459598541,
"learning_rate": 2.520002295099564e-05,
"loss": 0.0091,
"step": 354
},
{
"epoch": 3.500613496932515,
"eval_loss": 0.02049410715699196,
"eval_runtime": 98.1898,
"eval_samples_per_second": 0.927,
"eval_steps_per_second": 0.927,
"step": 357
},
{
"epoch": 3.5300613496932516,
"grad_norm": 0.048662226647138596,
"learning_rate": 2.341932502481226e-05,
"loss": 0.0133,
"step": 360
},
{
"epoch": 3.588957055214724,
"grad_norm": 0.05924804136157036,
"learning_rate": 2.1684440670983568e-05,
"loss": 0.0092,
"step": 366
},
{
"epoch": 3.647852760736196,
"grad_norm": 0.06925608217716217,
"learning_rate": 1.9998360078808547e-05,
"loss": 0.0106,
"step": 372
},
{
"epoch": 3.7067484662576686,
"grad_norm": 0.07263399660587311,
"learning_rate": 1.836398932103658e-05,
"loss": 0.0109,
"step": 378
},
{
"epoch": 3.765644171779141,
"grad_norm": 0.04859050735831261,
"learning_rate": 1.6784145345056517e-05,
"loss": 0.0135,
"step": 384
},
{
"epoch": 3.8245398773006136,
"grad_norm": 0.06092414632439613,
"learning_rate": 1.5261551117699357e-05,
"loss": 0.0099,
"step": 390
},
{
"epoch": 3.883435582822086,
"grad_norm": 0.06852217763662338,
"learning_rate": 1.3798830932022617e-05,
"loss": 0.0088,
"step": 396
},
{
"epoch": 3.942331288343558,
"grad_norm": 0.07443215698003769,
"learning_rate": 1.239850588416565e-05,
"loss": 0.0089,
"step": 402
},
{
"epoch": 4.0,
"grad_norm": 0.08083483576774597,
"learning_rate": 1.1062989528071682e-05,
"loss": 0.0103,
"step": 408
},
{
"epoch": 4.0,
"eval_loss": 0.0203021802008152,
"eval_runtime": 94.7737,
"eval_samples_per_second": 0.96,
"eval_steps_per_second": 0.96,
"step": 408
},
{
"epoch": 4.058895705521472,
"grad_norm": 0.07016477733850479,
"learning_rate": 9.79458371556607e-06,
"loss": 0.0087,
"step": 414
},
{
"epoch": 4.117791411042945,
"grad_norm": 0.06698858737945557,
"learning_rate": 8.595474628960598e-06,
"loss": 0.0084,
"step": 420
},
{
"epoch": 4.176687116564417,
"grad_norm": 0.08916748315095901,
"learning_rate": 7.4677290130219794e-06,
"loss": 0.0114,
"step": 426
},
{
"epoch": 4.23558282208589,
"grad_norm": 0.0681818425655365,
"learning_rate": 6.413290612798884e-06,
"loss": 0.0108,
"step": 432
},
{
"epoch": 4.294478527607362,
"grad_norm": 0.07073990255594254,
"learning_rate": 5.4339768234472625e-06,
"loss": 0.0087,
"step": 438
},
{
"epoch": 4.353374233128834,
"grad_norm": 0.06345146149396896,
"learning_rate": 4.531475557828202e-06,
"loss": 0.0068,
"step": 444
},
{
"epoch": 4.412269938650307,
"grad_norm": 0.06121087819337845,
"learning_rate": 3.7073423372770753e-06,
"loss": 0.0096,
"step": 450
},
{
"epoch": 4.471165644171779,
"grad_norm": 0.063334159553051,
"learning_rate": 2.9629976105584266e-06,
"loss": 0.0113,
"step": 456
},
{
"epoch": 4.500613496932515,
"eval_loss": 0.02043687365949154,
"eval_runtime": 89.8099,
"eval_samples_per_second": 1.013,
"eval_steps_per_second": 1.013,
"step": 459
},
{
"epoch": 4.530061349693252,
"grad_norm": 0.05264800414443016,
"learning_rate": 2.299724305627482e-06,
"loss": 0.0132,
"step": 462
},
{
"epoch": 4.588957055214724,
"grad_norm": 0.0718545988202095,
"learning_rate": 1.7186656184179472e-06,
"loss": 0.0089,
"step": 468
},
{
"epoch": 4.647852760736196,
"grad_norm": 0.05546210706233978,
"learning_rate": 1.2208230424672562e-06,
"loss": 0.0085,
"step": 474
},
{
"epoch": 4.706748466257669,
"grad_norm": 0.08636888116598129,
"learning_rate": 8.0705464277549e-07,
"loss": 0.0098,
"step": 480
},
{
"epoch": 4.765644171779141,
"grad_norm": 0.06814199686050415,
"learning_rate": 4.780735768728895e-07,
"loss": 0.0094,
"step": 486
},
{
"epoch": 4.824539877300613,
"grad_norm": 0.08393464237451553,
"learning_rate": 2.3444686564511043e-07,
"loss": 0.0076,
"step": 492
},
{
"epoch": 4.883435582822086,
"grad_norm": 0.05681199952960014,
"learning_rate": 7.659441603481421e-08,
"loss": 0.0075,
"step": 498
},
{
"epoch": 4.942331288343558,
"grad_norm": 0.055789653211832047,
"learning_rate": 4.7882973039037326e-09,
"loss": 0.0077,
"step": 504
},
{
"epoch": 4.9521472392638035,
"step": 505,
"total_flos": 9.445599206692454e+17,
"train_loss": 0.020985854302745053,
"train_runtime": 14796.948,
"train_samples_per_second": 0.275,
"train_steps_per_second": 0.034
}
],
"logging_steps": 6,
"max_steps": 505,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 51,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 9.445599206692454e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}