|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9893390191897654, |
|
"eval_steps": 100, |
|
"global_step": 58, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"completion_length": 608.9531555175781, |
|
"epoch": 0.017057569296375266, |
|
"grad_norm": 2.6430623531341553, |
|
"kl": 0.0, |
|
"learning_rate": 5e-07, |
|
"loss": 0.0, |
|
"reward": 0.647321455180645, |
|
"reward_std": 0.32679445296525955, |
|
"rewards/accuracy_reward": 0.6462053880095482, |
|
"rewards/format_reward": 0.0011160714784637094, |
|
"step": 1 |
|
}, |
|
{ |
|
"completion_length": 617.5315542221069, |
|
"epoch": 0.08528784648187633, |
|
"grad_norm": 2.203550338745117, |
|
"kl": 0.00031819939613342285, |
|
"learning_rate": 2.5e-06, |
|
"loss": 0.0, |
|
"reward": 0.6328125279396772, |
|
"reward_std": 0.3299180152826011, |
|
"rewards/accuracy_reward": 0.6325335092842579, |
|
"rewards/format_reward": 0.00027901786961592734, |
|
"step": 5 |
|
}, |
|
{ |
|
"completion_length": 614.0942283630371, |
|
"epoch": 0.17057569296375266, |
|
"grad_norm": 23014.91015625, |
|
"kl": 1.7154921531677245, |
|
"learning_rate": 2.956412726139078e-06, |
|
"loss": 0.0684, |
|
"reward": 0.7095982447266579, |
|
"reward_std": 0.2823256587609649, |
|
"rewards/accuracy_reward": 0.7089286014437676, |
|
"rewards/format_reward": 0.0006696428870782256, |
|
"step": 10 |
|
}, |
|
{ |
|
"completion_length": 627.2895385742188, |
|
"epoch": 0.255863539445629, |
|
"grad_norm": 17940.5390625, |
|
"kl": 2.3679866790771484, |
|
"learning_rate": 2.7836719084521715e-06, |
|
"loss": 0.0948, |
|
"reward": 0.7515625342726707, |
|
"reward_std": 0.24004750959575177, |
|
"rewards/accuracy_reward": 0.7515625342726707, |
|
"rewards/format_reward": 0.0, |
|
"step": 15 |
|
}, |
|
{ |
|
"completion_length": 611.387979888916, |
|
"epoch": 0.3411513859275053, |
|
"grad_norm": 10021.2578125, |
|
"kl": 3.927571487426758, |
|
"learning_rate": 2.4946839873611927e-06, |
|
"loss": 0.1568, |
|
"reward": 0.7631696775555611, |
|
"reward_std": 0.2211073562502861, |
|
"rewards/accuracy_reward": 0.7631696775555611, |
|
"rewards/format_reward": 0.0, |
|
"step": 20 |
|
}, |
|
{ |
|
"completion_length": 619.4598495483399, |
|
"epoch": 0.42643923240938164, |
|
"grad_norm": 0.10919574648141861, |
|
"kl": 0.0038677215576171874, |
|
"learning_rate": 2.1156192081791355e-06, |
|
"loss": 0.0002, |
|
"reward": 0.7560268223285675, |
|
"reward_std": 0.20058796666562556, |
|
"rewards/accuracy_reward": 0.7560268223285675, |
|
"rewards/format_reward": 0.0, |
|
"step": 25 |
|
}, |
|
{ |
|
"completion_length": 620.3272598266601, |
|
"epoch": 0.511727078891258, |
|
"grad_norm": 0.1221357136964798, |
|
"kl": 0.0031772613525390624, |
|
"learning_rate": 1.6808050203829845e-06, |
|
"loss": 0.0001, |
|
"reward": 0.7466518178582191, |
|
"reward_std": 0.2014338149689138, |
|
"rewards/accuracy_reward": 0.7466518178582191, |
|
"rewards/format_reward": 0.0, |
|
"step": 30 |
|
}, |
|
{ |
|
"completion_length": 605.8370811462403, |
|
"epoch": 0.5970149253731343, |
|
"grad_norm": 0.12291086465120316, |
|
"kl": 0.0034273147583007814, |
|
"learning_rate": 1.2296174432791415e-06, |
|
"loss": 0.0001, |
|
"reward": 0.7455357447266578, |
|
"reward_std": 0.1848256574012339, |
|
"rewards/accuracy_reward": 0.7455357447266578, |
|
"rewards/format_reward": 0.0, |
|
"step": 35 |
|
}, |
|
{ |
|
"completion_length": 599.0658744812011, |
|
"epoch": 0.6823027718550106, |
|
"grad_norm": 0.41419172286987305, |
|
"kl": 0.003479766845703125, |
|
"learning_rate": 8.029152419343472e-07, |
|
"loss": 0.0001, |
|
"reward": 0.758482177555561, |
|
"reward_std": 0.19037142880260943, |
|
"rewards/accuracy_reward": 0.758482177555561, |
|
"rewards/format_reward": 0.0, |
|
"step": 40 |
|
}, |
|
{ |
|
"completion_length": 612.0359664916992, |
|
"epoch": 0.767590618336887, |
|
"grad_norm": 0.51832515001297, |
|
"kl": 0.003022003173828125, |
|
"learning_rate": 4.3933982822017883e-07, |
|
"loss": 0.0001, |
|
"reward": 0.7497768223285675, |
|
"reward_std": 0.1870642237365246, |
|
"rewards/accuracy_reward": 0.7497768223285675, |
|
"rewards/format_reward": 0.0, |
|
"step": 45 |
|
}, |
|
{ |
|
"completion_length": 614.3917701721191, |
|
"epoch": 0.8528784648187633, |
|
"grad_norm": 0.08213239908218384, |
|
"kl": 0.03495864868164063, |
|
"learning_rate": 1.718159615201853e-07, |
|
"loss": 0.0014, |
|
"reward": 0.7412946790456771, |
|
"reward_std": 0.19202891541644931, |
|
"rewards/accuracy_reward": 0.7412946790456771, |
|
"rewards/format_reward": 0.0, |
|
"step": 50 |
|
}, |
|
{ |
|
"completion_length": 604.3663208007813, |
|
"epoch": 0.9381663113006397, |
|
"grad_norm": 0.5027450323104858, |
|
"kl": 0.0035785675048828126, |
|
"learning_rate": 2.4570139579284723e-08, |
|
"loss": 0.0001, |
|
"reward": 0.772098246216774, |
|
"reward_std": 0.18952333889901637, |
|
"rewards/accuracy_reward": 0.772098246216774, |
|
"rewards/format_reward": 0.0, |
|
"step": 55 |
|
}, |
|
{ |
|
"completion_length": 594.7775586446127, |
|
"epoch": 0.9893390191897654, |
|
"kl": 0.0032895406087239585, |
|
"reward": 0.772693489988645, |
|
"reward_std": 0.17002321127802134, |
|
"rewards/accuracy_reward": 0.772693489988645, |
|
"rewards/format_reward": 0.0, |
|
"step": 58, |
|
"total_flos": 0.0, |
|
"train_loss": 0.027603623356527666, |
|
"train_runtime": 13992.0401, |
|
"train_samples_per_second": 0.536, |
|
"train_steps_per_second": 0.004 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 58, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": false, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|