|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9893390191897654, |
|
"eval_steps": 100, |
|
"global_step": 58, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"completion_length": 616.8067230224609, |
|
"epoch": 0.08528784648187633, |
|
"grad_norm": 3.238640308380127, |
|
"kl": 0.00016660690307617186, |
|
"learning_rate": 2.5e-06, |
|
"loss": 0.0, |
|
"reward": 0.6430803842842578, |
|
"reward_std": 0.3440178133547306, |
|
"rewards/accuracy_reward": 0.6424107439815998, |
|
"rewards/format_reward": 0.0006696428870782256, |
|
"step": 5 |
|
}, |
|
{ |
|
"completion_length": 612.4875190734863, |
|
"epoch": 0.17057569296375266, |
|
"grad_norm": 0.19049902260303497, |
|
"kl": 0.0018734931945800781, |
|
"learning_rate": 2.956412726139078e-06, |
|
"loss": 0.0001, |
|
"reward": 0.7142857521772384, |
|
"reward_std": 0.2628630593419075, |
|
"rewards/accuracy_reward": 0.7142857521772384, |
|
"rewards/format_reward": 0.0, |
|
"step": 10 |
|
}, |
|
{ |
|
"completion_length": 621.501594543457, |
|
"epoch": 0.255863539445629, |
|
"grad_norm": 4.2737298011779785, |
|
"kl": 1.1404457092285156, |
|
"learning_rate": 2.7836719084521715e-06, |
|
"loss": 0.0456, |
|
"reward": 0.7477678894996643, |
|
"reward_std": 0.23944801036268473, |
|
"rewards/accuracy_reward": 0.7477678894996643, |
|
"rewards/format_reward": 0.0, |
|
"step": 15 |
|
}, |
|
{ |
|
"completion_length": 609.3560501098633, |
|
"epoch": 0.3411513859275053, |
|
"grad_norm": 0.15062430500984192, |
|
"kl": 0.007084274291992187, |
|
"learning_rate": 2.4946839873611927e-06, |
|
"loss": 0.0003, |
|
"reward": 0.7607143253087998, |
|
"reward_std": 0.22700632140040397, |
|
"rewards/accuracy_reward": 0.7607143253087998, |
|
"rewards/format_reward": 0.0, |
|
"step": 20 |
|
}, |
|
{ |
|
"completion_length": 614.4294906616211, |
|
"epoch": 0.42643923240938164, |
|
"grad_norm": 1.7865530252456665, |
|
"kl": 0.005679512023925781, |
|
"learning_rate": 2.1156192081791355e-06, |
|
"loss": 0.0002, |
|
"reward": 0.756473246216774, |
|
"reward_std": 0.1937894519418478, |
|
"rewards/accuracy_reward": 0.756473246216774, |
|
"rewards/format_reward": 0.0, |
|
"step": 25 |
|
}, |
|
{ |
|
"completion_length": 614.8489112854004, |
|
"epoch": 0.511727078891258, |
|
"grad_norm": 0.26249098777770996, |
|
"kl": 0.0034336090087890626, |
|
"learning_rate": 1.6808050203829845e-06, |
|
"loss": 0.0001, |
|
"reward": 0.7555803924798965, |
|
"reward_std": 0.1806625969707966, |
|
"rewards/accuracy_reward": 0.7555803924798965, |
|
"rewards/format_reward": 0.0, |
|
"step": 30 |
|
}, |
|
{ |
|
"completion_length": 604.0652038574219, |
|
"epoch": 0.5970149253731343, |
|
"grad_norm": 0.15777133405208588, |
|
"kl": 0.0031137466430664062, |
|
"learning_rate": 1.2296174432791415e-06, |
|
"loss": 0.0001, |
|
"reward": 0.7430803917348385, |
|
"reward_std": 0.1779588583856821, |
|
"rewards/accuracy_reward": 0.7430803917348385, |
|
"rewards/format_reward": 0.0, |
|
"step": 35 |
|
}, |
|
{ |
|
"completion_length": 589.3364120483399, |
|
"epoch": 0.6823027718550106, |
|
"grad_norm": 0.14344827830791473, |
|
"kl": 0.003224945068359375, |
|
"learning_rate": 8.029152419343472e-07, |
|
"loss": 0.0001, |
|
"reward": 0.7691964641213417, |
|
"reward_std": 0.16417927835136653, |
|
"rewards/accuracy_reward": 0.7691964641213417, |
|
"rewards/format_reward": 0.0, |
|
"step": 40 |
|
}, |
|
{ |
|
"completion_length": 607.6384132385253, |
|
"epoch": 0.767590618336887, |
|
"grad_norm": 0.0754118412733078, |
|
"kl": 0.0033246994018554686, |
|
"learning_rate": 4.3933982822017883e-07, |
|
"loss": 0.0001, |
|
"reward": 0.7488839641213417, |
|
"reward_std": 0.18731108466163277, |
|
"rewards/accuracy_reward": 0.7488839641213417, |
|
"rewards/format_reward": 0.0, |
|
"step": 45 |
|
}, |
|
{ |
|
"completion_length": 608.0522613525391, |
|
"epoch": 0.8528784648187633, |
|
"grad_norm": 0.07660669833421707, |
|
"kl": 0.003369140625, |
|
"learning_rate": 1.718159615201853e-07, |
|
"loss": 0.0001, |
|
"reward": 0.7475446820259094, |
|
"reward_std": 0.18219188433140515, |
|
"rewards/accuracy_reward": 0.7475446820259094, |
|
"rewards/format_reward": 0.0, |
|
"step": 50 |
|
}, |
|
{ |
|
"completion_length": 604.2169937133789, |
|
"epoch": 0.9381663113006397, |
|
"grad_norm": 0.19257374107837677, |
|
"kl": 0.003157806396484375, |
|
"learning_rate": 2.4570139579284723e-08, |
|
"loss": 0.0001, |
|
"reward": 0.7812500342726707, |
|
"reward_std": 0.18055678810924292, |
|
"rewards/accuracy_reward": 0.7812500342726707, |
|
"rewards/format_reward": 0.0, |
|
"step": 55 |
|
}, |
|
{ |
|
"completion_length": 595.3329900105795, |
|
"epoch": 0.9893390191897654, |
|
"kl": 0.0034383138020833335, |
|
"reward": 0.7645089626312256, |
|
"reward_std": 0.16559721545005837, |
|
"rewards/accuracy_reward": 0.7645089626312256, |
|
"rewards/format_reward": 0.0, |
|
"step": 58, |
|
"total_flos": 0.0, |
|
"train_loss": 0.004416576032875535, |
|
"train_runtime": 7299.7333, |
|
"train_samples_per_second": 1.027, |
|
"train_steps_per_second": 0.008 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 58, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": false, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|