utopia-32b / trainer_state.json
wheattoast11's picture
Upload folder using huggingface_hub
c615e79 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.8,
"eval_steps": 500,
"global_step": 350,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"completion_length": 253.0375,
"epoch": 0.08,
"grad_norm": 0.34916529059410095,
"kl": 0.00048295065280399283,
"learning_rate": 3.6000000000000003e-06,
"loss": 0.0,
"reward": 0.6274130865931511,
"reward_std": 0.03938147491426207,
"rewards/<lambda>": 0.6274130865931511,
"step": 10
},
{
"completion_length": 253.225,
"epoch": 0.16,
"grad_norm": 0.41210684180259705,
"kl": 0.000624238243472064,
"learning_rate": 7.600000000000001e-06,
"loss": 0.0,
"reward": 0.6234804779291153,
"reward_std": 0.039024536509532484,
"rewards/<lambda>": 0.6234804779291153,
"step": 20
},
{
"completion_length": 254.025,
"epoch": 0.24,
"grad_norm": 0.24894775450229645,
"kl": 0.000612898166582454,
"learning_rate": 1.16e-05,
"loss": 0.0,
"reward": 0.6120496317744255,
"reward_std": 0.03366693327843677,
"rewards/<lambda>": 0.6120496317744255,
"step": 30
},
{
"completion_length": 252.25,
"epoch": 0.32,
"grad_norm": 0.38096368312835693,
"kl": 0.0006592169374926016,
"learning_rate": 1.5600000000000003e-05,
"loss": 0.0,
"reward": 0.6164205402135849,
"reward_std": 0.042428855784237385,
"rewards/<lambda>": 0.6164205402135849,
"step": 40
},
{
"completion_length": 251.275,
"epoch": 0.4,
"grad_norm": 0.33936625719070435,
"kl": 0.0006831091290223412,
"learning_rate": 1.9600000000000002e-05,
"loss": 0.0,
"reward": 0.6328483298420906,
"reward_std": 0.035810240707360205,
"rewards/<lambda>": 0.6328483298420906,
"step": 50
},
{
"completion_length": 254.4125,
"epoch": 0.48,
"grad_norm": 0.4410642385482788,
"kl": 0.0009464225804549642,
"learning_rate": 1.9600000000000002e-05,
"loss": 0.0,
"reward": 0.6187735944986343,
"reward_std": 0.03256268355762586,
"rewards/<lambda>": 0.6187735944986343,
"step": 60
},
{
"completion_length": 255.4625,
"epoch": 0.56,
"grad_norm": 0.45320507884025574,
"kl": 0.0017867269460111856,
"learning_rate": 1.9155555555555558e-05,
"loss": 0.0001,
"reward": 0.619798681139946,
"reward_std": 0.02807733681984246,
"rewards/<lambda>": 0.619798681139946,
"step": 70
},
{
"completion_length": 253.925,
"epoch": 0.64,
"grad_norm": 0.43050986528396606,
"kl": 0.0034164128388511016,
"learning_rate": 1.8711111111111113e-05,
"loss": 0.0001,
"reward": 0.6333043664693833,
"reward_std": 0.03672210559016094,
"rewards/<lambda>": 0.6333043664693833,
"step": 80
},
{
"completion_length": 251.0625,
"epoch": 0.72,
"grad_norm": 0.39146214723587036,
"kl": 0.0044442670623539016,
"learning_rate": 1.826666666666667e-05,
"loss": 0.0002,
"reward": 0.6258660212159157,
"reward_std": 0.03840005891397595,
"rewards/<lambda>": 0.6258660212159157,
"step": 90
},
{
"completion_length": 245.4875,
"epoch": 0.8,
"grad_norm": 0.5947660207748413,
"kl": 0.009713826864026488,
"learning_rate": 1.782222222222222e-05,
"loss": 0.0004,
"reward": 0.647130835056305,
"reward_std": 0.033473254850832745,
"rewards/<lambda>": 0.647130835056305,
"step": 100
},
{
"completion_length": 253.6625,
"epoch": 0.88,
"grad_norm": 0.487610399723053,
"kl": 0.015179099002853035,
"learning_rate": 1.737777777777778e-05,
"loss": 0.0006,
"reward": 0.6606008350849152,
"reward_std": 0.043368036416359244,
"rewards/<lambda>": 0.6606008350849152,
"step": 110
},
{
"completion_length": 247.3875,
"epoch": 0.96,
"grad_norm": 0.4823659062385559,
"kl": 0.023328896961174905,
"learning_rate": 1.6933333333333336e-05,
"loss": 0.0009,
"reward": 0.6569811046123505,
"reward_std": 0.03937327671446837,
"rewards/<lambda>": 0.6569811046123505,
"step": 120
},
{
"completion_length": 240.6125,
"epoch": 1.04,
"grad_norm": 0.6331102848052979,
"kl": 0.04820071374997496,
"learning_rate": 1.648888888888889e-05,
"loss": 0.0019,
"reward": 0.678092373907566,
"reward_std": 0.025905024044914172,
"rewards/<lambda>": 0.678092373907566,
"step": 130
},
{
"completion_length": 243.7125,
"epoch": 1.12,
"grad_norm": 0.8691033720970154,
"kl": 0.06938900966197252,
"learning_rate": 1.6044444444444444e-05,
"loss": 0.0028,
"reward": 0.6855655789375306,
"reward_std": 0.021212380210636185,
"rewards/<lambda>": 0.6855655789375306,
"step": 140
},
{
"completion_length": 254.3125,
"epoch": 1.2,
"grad_norm": 0.4045356214046478,
"kl": 0.06224800143390894,
"learning_rate": 1.5600000000000003e-05,
"loss": 0.0025,
"reward": 0.7006005674600602,
"reward_std": 0.013145922217518091,
"rewards/<lambda>": 0.7006005674600602,
"step": 150
},
{
"completion_length": 253.6125,
"epoch": 1.28,
"grad_norm": 0.38577979803085327,
"kl": 0.06106488471850753,
"learning_rate": 1.5155555555555557e-05,
"loss": 0.0024,
"reward": 0.6971443966031075,
"reward_std": 0.016035666200332345,
"rewards/<lambda>": 0.6971443966031075,
"step": 160
},
{
"completion_length": 253.3125,
"epoch": 1.3599999999999999,
"grad_norm": 0.47867605090141296,
"kl": 0.060870288498699666,
"learning_rate": 1.4711111111111111e-05,
"loss": 0.0024,
"reward": 0.7054681569337845,
"reward_std": 0.013500441558426246,
"rewards/<lambda>": 0.7054681569337845,
"step": 170
},
{
"completion_length": 256.0,
"epoch": 1.44,
"grad_norm": 0.5765718817710876,
"kl": 0.06626366656273604,
"learning_rate": 1.4266666666666668e-05,
"loss": 0.0027,
"reward": 0.6981858894228935,
"reward_std": 0.013308836764190346,
"rewards/<lambda>": 0.6981858894228935,
"step": 180
},
{
"completion_length": 256.0,
"epoch": 1.52,
"grad_norm": 0.721457302570343,
"kl": 0.056253846967592835,
"learning_rate": 1.3822222222222224e-05,
"loss": 0.0022,
"reward": 0.6894399926066399,
"reward_std": 0.030957136023789644,
"rewards/<lambda>": 0.6894399926066399,
"step": 190
},
{
"completion_length": 256.0,
"epoch": 1.6,
"grad_norm": 0.46415644884109497,
"kl": 0.05959339723922312,
"learning_rate": 1.3377777777777778e-05,
"loss": 0.0024,
"reward": 0.7021724998950958,
"reward_std": 0.022326894686557353,
"rewards/<lambda>": 0.7021724998950958,
"step": 200
},
{
"completion_length": 255.4,
"epoch": 1.6800000000000002,
"grad_norm": 0.5963376760482788,
"kl": 0.058718588296324016,
"learning_rate": 1.2933333333333334e-05,
"loss": 0.0023,
"reward": 0.6913566589355469,
"reward_std": 0.020402386551722884,
"rewards/<lambda>": 0.6913566589355469,
"step": 210
},
{
"completion_length": 256.0,
"epoch": 1.76,
"grad_norm": 0.5936807990074158,
"kl": 0.05593314995057881,
"learning_rate": 1.2488888888888891e-05,
"loss": 0.0022,
"reward": 0.6903435245156289,
"reward_std": 0.02344267221633345,
"rewards/<lambda>": 0.6903435245156289,
"step": 220
},
{
"completion_length": 255.0625,
"epoch": 1.8399999999999999,
"grad_norm": 0.3360937237739563,
"kl": 0.06351863332092762,
"learning_rate": 1.2044444444444445e-05,
"loss": 0.0025,
"reward": 0.6909130409359932,
"reward_std": 0.021237177739385514,
"rewards/<lambda>": 0.6909130409359932,
"step": 230
},
{
"completion_length": 255.725,
"epoch": 1.92,
"grad_norm": 0.6370587348937988,
"kl": 0.057419772166758774,
"learning_rate": 1.16e-05,
"loss": 0.0023,
"reward": 0.6980561569333077,
"reward_std": 0.021599416993558405,
"rewards/<lambda>": 0.6980561569333077,
"step": 240
},
{
"completion_length": 255.025,
"epoch": 2.0,
"grad_norm": 0.494765967130661,
"kl": 0.05669800685718655,
"learning_rate": 1.1155555555555556e-05,
"loss": 0.0023,
"reward": 0.7012724861502647,
"reward_std": 0.020417724532308057,
"rewards/<lambda>": 0.7012724861502647,
"step": 250
},
{
"completion_length": 254.475,
"epoch": 2.08,
"grad_norm": 0.13030649721622467,
"kl": 0.05255108489654958,
"learning_rate": 1.0711111111111112e-05,
"loss": 0.0021,
"reward": 0.6929124936461448,
"reward_std": 0.02159857752267271,
"rewards/<lambda>": 0.6929124936461448,
"step": 260
},
{
"completion_length": 252.7125,
"epoch": 2.16,
"grad_norm": 0.46067020297050476,
"kl": 0.05210385215468705,
"learning_rate": 1.0266666666666668e-05,
"loss": 0.0021,
"reward": 0.7017899960279464,
"reward_std": 0.019325232884148134,
"rewards/<lambda>": 0.7017899960279464,
"step": 270
},
{
"completion_length": 250.9625,
"epoch": 2.24,
"grad_norm": 0.5625087022781372,
"kl": 0.04940431362483651,
"learning_rate": 9.822222222222223e-06,
"loss": 0.002,
"reward": 0.6993249982595444,
"reward_std": 0.02508107841713354,
"rewards/<lambda>": 0.6993249982595444,
"step": 280
},
{
"completion_length": 251.0875,
"epoch": 2.32,
"grad_norm": 0.610172688961029,
"kl": 0.06100269835442305,
"learning_rate": 9.377777777777779e-06,
"loss": 0.0024,
"reward": 0.7006610661745072,
"reward_std": 0.02047629818553105,
"rewards/<lambda>": 0.7006610661745072,
"step": 290
},
{
"completion_length": 254.3125,
"epoch": 2.4,
"grad_norm": 0.5110147595405579,
"kl": 0.07290698895230889,
"learning_rate": 8.933333333333333e-06,
"loss": 0.0029,
"reward": 0.7114591643214225,
"reward_std": 0.016045435919659214,
"rewards/<lambda>": 0.7114591643214225,
"step": 300
},
{
"completion_length": 255.35,
"epoch": 2.48,
"grad_norm": 0.5776111483573914,
"kl": 0.07195442449301481,
"learning_rate": 8.48888888888889e-06,
"loss": 0.0029,
"reward": 0.7157138809561729,
"reward_std": 0.01624344631563872,
"rewards/<lambda>": 0.7157138809561729,
"step": 310
},
{
"completion_length": 254.95,
"epoch": 2.56,
"grad_norm": 0.5838586688041687,
"kl": 0.06572490665130318,
"learning_rate": 8.044444444444444e-06,
"loss": 0.0026,
"reward": 0.7057199940085411,
"reward_std": 0.016100824170280248,
"rewards/<lambda>": 0.7057199940085411,
"step": 320
},
{
"completion_length": 253.5125,
"epoch": 2.64,
"grad_norm": 0.5740565657615662,
"kl": 0.05695616342127323,
"learning_rate": 7.600000000000001e-06,
"loss": 0.0023,
"reward": 0.7053502678871155,
"reward_std": 0.017047954676672816,
"rewards/<lambda>": 0.7053502678871155,
"step": 330
},
{
"completion_length": 252.8375,
"epoch": 2.7199999999999998,
"grad_norm": 0.6114740371704102,
"kl": 0.051503782346844676,
"learning_rate": 7.155555555555556e-06,
"loss": 0.0021,
"reward": 0.6978502199053764,
"reward_std": 0.027463712746975944,
"rewards/<lambda>": 0.6978502199053764,
"step": 340
},
{
"completion_length": 252.775,
"epoch": 2.8,
"grad_norm": 0.6278762221336365,
"kl": 0.0527677777223289,
"learning_rate": 6.711111111111111e-06,
"loss": 0.0021,
"reward": 0.7142949908971786,
"reward_std": 0.01839892326388508,
"rewards/<lambda>": 0.7142949908971786,
"step": 350
}
],
"logging_steps": 10,
"max_steps": 500,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}