|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.0, |
|
"eval_steps": 100, |
|
"global_step": 201, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"completion_length": 601.5053852081298, |
|
"epoch": 0.14925373134328357, |
|
"grad_norm": 6.400096893310547, |
|
"kl": 0.000240480899810791, |
|
"learning_rate": 1.4285714285714286e-06, |
|
"loss": 0.0, |
|
"reward": 0.671540210954845, |
|
"reward_std": 0.3266597702167928, |
|
"rewards/accuracy_reward": 0.671540210954845, |
|
"rewards/format_reward": 0.0, |
|
"step": 10 |
|
}, |
|
{ |
|
"completion_length": 627.042663192749, |
|
"epoch": 0.29850746268656714, |
|
"grad_norm": 0.10202192515134811, |
|
"kl": 0.0013910531997680664, |
|
"learning_rate": 2.8571428571428573e-06, |
|
"loss": 0.0001, |
|
"reward": 0.7212053906172514, |
|
"reward_std": 0.26295756818726657, |
|
"rewards/accuracy_reward": 0.7212053906172514, |
|
"rewards/format_reward": 0.0, |
|
"step": 20 |
|
}, |
|
{ |
|
"completion_length": 598.0592895507813, |
|
"epoch": 0.44776119402985076, |
|
"grad_norm": 0.07559198141098022, |
|
"kl": 0.003163862228393555, |
|
"learning_rate": 2.981532510892707e-06, |
|
"loss": 0.0001, |
|
"reward": 0.7546875355765224, |
|
"reward_std": 0.18810537890531123, |
|
"rewards/accuracy_reward": 0.7546875355765224, |
|
"rewards/format_reward": 0.0, |
|
"step": 30 |
|
}, |
|
{ |
|
"completion_length": 594.8722389221191, |
|
"epoch": 0.5970149253731343, |
|
"grad_norm": 0.09930674731731415, |
|
"kl": 0.0029309749603271484, |
|
"learning_rate": 2.9182778633989753e-06, |
|
"loss": 0.0001, |
|
"reward": 0.7710937844589353, |
|
"reward_std": 0.17225130782462655, |
|
"rewards/accuracy_reward": 0.7710937844589353, |
|
"rewards/format_reward": 0.0, |
|
"step": 40 |
|
}, |
|
{ |
|
"completion_length": 580.8643112182617, |
|
"epoch": 0.746268656716418, |
|
"grad_norm": 0.07715580612421036, |
|
"kl": 0.0032186985015869142, |
|
"learning_rate": 2.811929560709094e-06, |
|
"loss": 0.0001, |
|
"reward": 0.7782366409897804, |
|
"reward_std": 0.168875193875283, |
|
"rewards/accuracy_reward": 0.7782366409897804, |
|
"rewards/format_reward": 0.0, |
|
"step": 50 |
|
}, |
|
{ |
|
"completion_length": 567.9614114761353, |
|
"epoch": 0.8955223880597015, |
|
"grad_norm": 0.1152808889746666, |
|
"kl": 0.0035730838775634766, |
|
"learning_rate": 2.6657189421854562e-06, |
|
"loss": 0.0001, |
|
"reward": 0.7762277115136385, |
|
"reward_std": 0.16049637314863502, |
|
"rewards/accuracy_reward": 0.7762277115136385, |
|
"rewards/format_reward": 0.0, |
|
"step": 60 |
|
}, |
|
{ |
|
"completion_length": 550.2290409088134, |
|
"epoch": 1.044776119402985, |
|
"grad_norm": 0.09865733981132507, |
|
"kl": 0.0038765430450439452, |
|
"learning_rate": 2.484088543485761e-06, |
|
"loss": 0.0002, |
|
"reward": 0.7911458652466535, |
|
"reward_std": 0.150591628998518, |
|
"rewards/accuracy_reward": 0.7911458652466535, |
|
"rewards/format_reward": 0.0, |
|
"step": 70 |
|
}, |
|
{ |
|
"completion_length": 537.502813911438, |
|
"epoch": 1.1940298507462686, |
|
"grad_norm": 0.09184063225984573, |
|
"kl": 0.004058647155761719, |
|
"learning_rate": 2.2725571123650813e-06, |
|
"loss": 0.0002, |
|
"reward": 0.8042411081492901, |
|
"reward_std": 0.1556034866720438, |
|
"rewards/accuracy_reward": 0.8042411081492901, |
|
"rewards/format_reward": 0.0, |
|
"step": 80 |
|
}, |
|
{ |
|
"completion_length": 540.724467086792, |
|
"epoch": 1.3432835820895521, |
|
"grad_norm": 0.13207751512527466, |
|
"kl": 0.0038210868835449217, |
|
"learning_rate": 2.03755192431795e-06, |
|
"loss": 0.0002, |
|
"reward": 0.7966518238186836, |
|
"reward_std": 0.16528417109511792, |
|
"rewards/accuracy_reward": 0.7966518238186836, |
|
"rewards/format_reward": 0.0, |
|
"step": 90 |
|
}, |
|
{ |
|
"completion_length": 541.9515859603882, |
|
"epoch": 1.4925373134328357, |
|
"grad_norm": 0.07470321655273438, |
|
"kl": 0.003845405578613281, |
|
"learning_rate": 1.7862134930648174e-06, |
|
"loss": 0.0002, |
|
"reward": 0.796093787625432, |
|
"reward_std": 0.14775474979542197, |
|
"rewards/accuracy_reward": 0.796093787625432, |
|
"rewards/format_reward": 0.0, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.4925373134328357, |
|
"eval_completion_length": 540.39465481418, |
|
"eval_kl": 0.004954966298350088, |
|
"eval_loss": 0.00019825901836156845, |
|
"eval_reward": 0.6999750570400611, |
|
"eval_reward_std": 0.19887771770045473, |
|
"eval_rewards/accuracy_reward": 0.6999750570400611, |
|
"eval_rewards/format_reward": 0.0, |
|
"eval_runtime": 7024.4867, |
|
"eval_samples_per_second": 0.712, |
|
"eval_steps_per_second": 0.102, |
|
"step": 100 |
|
}, |
|
{ |
|
"completion_length": 561.2457847595215, |
|
"epoch": 1.6417910447761193, |
|
"grad_norm": 0.08826518058776855, |
|
"kl": 0.003845024108886719, |
|
"learning_rate": 1.5261786096559255e-06, |
|
"loss": 0.0002, |
|
"reward": 0.7954241424798966, |
|
"reward_std": 0.17211202825419605, |
|
"rewards/accuracy_reward": 0.7954241424798966, |
|
"rewards/format_reward": 0.0, |
|
"step": 110 |
|
}, |
|
{ |
|
"completion_length": 552.2967874526978, |
|
"epoch": 1.7910447761194028, |
|
"grad_norm": 0.22947834432125092, |
|
"kl": 0.003882312774658203, |
|
"learning_rate": 1.2653483024396534e-06, |
|
"loss": 0.0002, |
|
"reward": 0.7954241406172514, |
|
"reward_std": 0.1543394286185503, |
|
"rewards/accuracy_reward": 0.7954241406172514, |
|
"rewards/format_reward": 0.0, |
|
"step": 120 |
|
}, |
|
{ |
|
"completion_length": 534.2573900222778, |
|
"epoch": 1.9402985074626866, |
|
"grad_norm": 0.14124220609664917, |
|
"kl": 0.004029464721679687, |
|
"learning_rate": 1.0116477683142654e-06, |
|
"loss": 0.0002, |
|
"reward": 0.8051339661702513, |
|
"reward_std": 0.15696909232065082, |
|
"rewards/accuracy_reward": 0.8051339661702513, |
|
"rewards/format_reward": 0.0, |
|
"step": 130 |
|
}, |
|
{ |
|
"completion_length": 544.0703746795655, |
|
"epoch": 2.08955223880597, |
|
"grad_norm": 0.10850396007299423, |
|
"kl": 0.003820991516113281, |
|
"learning_rate": 7.727855696304945e-07, |
|
"loss": 0.0002, |
|
"reward": 0.8154018180444836, |
|
"reward_std": 0.15202449564822018, |
|
"rewards/accuracy_reward": 0.8154018180444836, |
|
"rewards/format_reward": 0.0, |
|
"step": 140 |
|
}, |
|
{ |
|
"completion_length": 549.2021450042724, |
|
"epoch": 2.2388059701492535, |
|
"grad_norm": 0.11342689394950867, |
|
"kl": 0.0039272308349609375, |
|
"learning_rate": 5.560194134252441e-07, |
|
"loss": 0.0002, |
|
"reward": 0.795312536507845, |
|
"reward_std": 0.158915360737592, |
|
"rewards/accuracy_reward": 0.795312536507845, |
|
"rewards/format_reward": 0.0, |
|
"step": 150 |
|
}, |
|
{ |
|
"completion_length": 532.3224586486816, |
|
"epoch": 2.388059701492537, |
|
"grad_norm": 0.14040400087833405, |
|
"kl": 0.004098033905029297, |
|
"learning_rate": 3.67935629665842e-07, |
|
"loss": 0.0002, |
|
"reward": 0.816071467846632, |
|
"reward_std": 0.1492480550892651, |
|
"rewards/accuracy_reward": 0.816071467846632, |
|
"rewards/format_reward": 0.0, |
|
"step": 160 |
|
}, |
|
{ |
|
"completion_length": 539.504601097107, |
|
"epoch": 2.5373134328358207, |
|
"grad_norm": 0.09679315984249115, |
|
"kl": 0.0059491157531738285, |
|
"learning_rate": 2.1424904894683168e-07, |
|
"loss": 0.0002, |
|
"reward": 0.8157366402447224, |
|
"reward_std": 0.1421221026685089, |
|
"rewards/accuracy_reward": 0.8157366402447224, |
|
"rewards/format_reward": 0.0, |
|
"step": 170 |
|
}, |
|
{ |
|
"completion_length": 547.4501371383667, |
|
"epoch": 2.6865671641791042, |
|
"grad_norm": 0.10368100553750992, |
|
"kl": 0.003998279571533203, |
|
"learning_rate": 9.962936025419756e-08, |
|
"loss": 0.0002, |
|
"reward": 0.8022321790456772, |
|
"reward_std": 0.1627280049957335, |
|
"rewards/accuracy_reward": 0.8022321790456772, |
|
"rewards/format_reward": 0.0, |
|
"step": 180 |
|
}, |
|
{ |
|
"completion_length": 533.6452249526977, |
|
"epoch": 2.835820895522388, |
|
"grad_norm": 0.09484420716762543, |
|
"kl": 0.0039805412292480465, |
|
"learning_rate": 2.7559224828504036e-08, |
|
"loss": 0.0002, |
|
"reward": 0.8222098549827933, |
|
"reward_std": 0.14822139525786043, |
|
"rewards/accuracy_reward": 0.8222098549827933, |
|
"rewards/format_reward": 0.0, |
|
"step": 190 |
|
}, |
|
{ |
|
"completion_length": 546.433731842041, |
|
"epoch": 2.9850746268656714, |
|
"grad_norm": 0.11888264119625092, |
|
"kl": 0.003950977325439453, |
|
"learning_rate": 2.2845726541309565e-10, |
|
"loss": 0.0002, |
|
"reward": 0.8064732495695353, |
|
"reward_std": 0.15783933801576494, |
|
"rewards/accuracy_reward": 0.8064732495695353, |
|
"rewards/format_reward": 0.0, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 2.9850746268656714, |
|
"eval_completion_length": 538.2288205660307, |
|
"eval_kl": 0.004983488496366914, |
|
"eval_loss": 0.00019947961845900863, |
|
"eval_reward": 0.7005245058478176, |
|
"eval_reward_std": 0.19627383342989674, |
|
"eval_rewards/accuracy_reward": 0.7005245058478176, |
|
"eval_rewards/format_reward": 0.0, |
|
"eval_runtime": 7022.2087, |
|
"eval_samples_per_second": 0.712, |
|
"eval_steps_per_second": 0.102, |
|
"step": 200 |
|
}, |
|
{ |
|
"completion_length": 547.8125114440918, |
|
"epoch": 3.0, |
|
"kl": 0.005582332611083984, |
|
"reward": 0.8750000018626451, |
|
"reward_std": 0.0765409953892231, |
|
"rewards/accuracy_reward": 0.8750000018626451, |
|
"rewards/format_reward": 0.0, |
|
"step": 201, |
|
"total_flos": 0.0, |
|
"train_loss": 0.0001433196965044953, |
|
"train_runtime": 49513.7568, |
|
"train_samples_per_second": 0.454, |
|
"train_steps_per_second": 0.004 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 201, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": false, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|