|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9965010496850945, |
|
"eval_steps": 100, |
|
"global_step": 178, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"completion_length": 621.5997104644775, |
|
"epoch": 0.005598320503848845, |
|
"grad_norm": 0.018829375505447388, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.26314485468901694, |
|
"reward_std": 0.09563918481580913, |
|
"rewards/semantic_entropy_math_reward": 0.26314485468901694, |
|
"step": 1 |
|
}, |
|
{ |
|
"completion_length": 600.1235256195068, |
|
"epoch": 0.01119664100769769, |
|
"grad_norm": 0.021261176094412804, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.2620287789031863, |
|
"reward_std": 0.09228823287412524, |
|
"rewards/semantic_entropy_math_reward": 0.2620287789031863, |
|
"step": 2 |
|
}, |
|
{ |
|
"completion_length": 640.598970413208, |
|
"epoch": 0.016794961511546535, |
|
"grad_norm": 0.0211239755153656, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.2310267963912338, |
|
"reward_std": 0.08434095047414303, |
|
"rewards/semantic_entropy_math_reward": 0.2310267963912338, |
|
"step": 3 |
|
}, |
|
{ |
|
"completion_length": 643.0141410827637, |
|
"epoch": 0.02239328201539538, |
|
"grad_norm": 0.021212320774793625, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.22259425604715943, |
|
"reward_std": 0.09978899103589356, |
|
"rewards/semantic_entropy_math_reward": 0.22259425604715943, |
|
"step": 4 |
|
}, |
|
{ |
|
"completion_length": 623.704626083374, |
|
"epoch": 0.02799160251924423, |
|
"grad_norm": 0.01992746628820896, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.3144841371104121, |
|
"reward_std": 0.09517159080132842, |
|
"rewards/semantic_entropy_math_reward": 0.3144841371104121, |
|
"step": 5 |
|
}, |
|
{ |
|
"completion_length": 602.6532821655273, |
|
"epoch": 0.03358992302309307, |
|
"grad_norm": 0.027432788163423538, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.3053075410425663, |
|
"reward_std": 0.11108083021827042, |
|
"rewards/semantic_entropy_math_reward": 0.3053075410425663, |
|
"step": 6 |
|
}, |
|
{ |
|
"completion_length": 632.2589473724365, |
|
"epoch": 0.03918824352694192, |
|
"grad_norm": 0.024943526834249496, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.2508680587634444, |
|
"reward_std": 0.06983336608391255, |
|
"rewards/semantic_entropy_math_reward": 0.2508680587634444, |
|
"step": 7 |
|
}, |
|
{ |
|
"completion_length": 578.0238265991211, |
|
"epoch": 0.04478656403079076, |
|
"grad_norm": 0.030510740354657173, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.3060516007244587, |
|
"reward_std": 0.09902600082568824, |
|
"rewards/semantic_entropy_math_reward": 0.3060516007244587, |
|
"step": 8 |
|
}, |
|
{ |
|
"completion_length": 656.7507629394531, |
|
"epoch": 0.05038488453463961, |
|
"grad_norm": 0.03022916242480278, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.2726934589445591, |
|
"reward_std": 0.08185443677939475, |
|
"rewards/semantic_entropy_math_reward": 0.2726934589445591, |
|
"step": 9 |
|
}, |
|
{ |
|
"completion_length": 687.5104351043701, |
|
"epoch": 0.05598320503848846, |
|
"grad_norm": 0.03694581612944603, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.24987599975429475, |
|
"reward_std": 0.08242391713429242, |
|
"rewards/semantic_entropy_math_reward": 0.24987599975429475, |
|
"step": 10 |
|
}, |
|
{ |
|
"completion_length": 726.5305213928223, |
|
"epoch": 0.0615815255423373, |
|
"grad_norm": 0.033508434891700745, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.21453373739495873, |
|
"reward_std": 0.08386916620656848, |
|
"rewards/semantic_entropy_math_reward": 0.21453373739495873, |
|
"step": 11 |
|
}, |
|
{ |
|
"completion_length": 744.8936157226562, |
|
"epoch": 0.06717984604618614, |
|
"grad_norm": 0.030294176191091537, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.196924609830603, |
|
"reward_std": 0.08265715674497187, |
|
"rewards/semantic_entropy_math_reward": 0.196924609830603, |
|
"step": 12 |
|
}, |
|
{ |
|
"completion_length": 744.174861907959, |
|
"epoch": 0.072778166550035, |
|
"grad_norm": 0.03118356317281723, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.19890873844269663, |
|
"reward_std": 0.08003655297216028, |
|
"rewards/semantic_entropy_math_reward": 0.19890873844269663, |
|
"step": 13 |
|
}, |
|
{ |
|
"completion_length": 765.6265029907227, |
|
"epoch": 0.07837648705388384, |
|
"grad_norm": 0.02831243723630905, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.18092759023420513, |
|
"reward_std": 0.06603722460567951, |
|
"rewards/semantic_entropy_math_reward": 0.18092759023420513, |
|
"step": 14 |
|
}, |
|
{ |
|
"completion_length": 758.721736907959, |
|
"epoch": 0.08397480755773268, |
|
"grad_norm": 0.03191084787249565, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.18340774439275265, |
|
"reward_std": 0.07208729872945696, |
|
"rewards/semantic_entropy_math_reward": 0.18340774439275265, |
|
"step": 15 |
|
}, |
|
{ |
|
"completion_length": 662.6317081451416, |
|
"epoch": 0.08957312806158152, |
|
"grad_norm": 0.03559156879782677, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.26698909467086196, |
|
"reward_std": 0.10032190917991102, |
|
"rewards/semantic_entropy_math_reward": 0.26698909467086196, |
|
"step": 16 |
|
}, |
|
{ |
|
"completion_length": 690.2068557739258, |
|
"epoch": 0.09517144856543037, |
|
"grad_norm": 0.040907151997089386, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.22792659618426114, |
|
"reward_std": 0.07990038132993504, |
|
"rewards/semantic_entropy_math_reward": 0.22792659618426114, |
|
"step": 17 |
|
}, |
|
{ |
|
"completion_length": 714.0669784545898, |
|
"epoch": 0.10076976906927922, |
|
"grad_norm": 0.07471004873514175, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.21217758627608418, |
|
"reward_std": 0.09499240922741592, |
|
"rewards/semantic_entropy_math_reward": 0.21217758627608418, |
|
"step": 18 |
|
}, |
|
{ |
|
"completion_length": 671.5535850524902, |
|
"epoch": 0.10636808957312806, |
|
"grad_norm": 0.07692024111747742, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.21106151002459228, |
|
"reward_std": 0.09371245314832777, |
|
"rewards/semantic_entropy_math_reward": 0.21106151002459228, |
|
"step": 19 |
|
}, |
|
{ |
|
"completion_length": 599.3058185577393, |
|
"epoch": 0.11196641007697691, |
|
"grad_norm": 0.09223277866840363, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.2771577457897365, |
|
"reward_std": 0.10236490913666785, |
|
"rewards/semantic_entropy_math_reward": 0.2771577457897365, |
|
"step": 20 |
|
}, |
|
{ |
|
"completion_length": 512.9620628356934, |
|
"epoch": 0.11756473058082575, |
|
"grad_norm": 0.13926267623901367, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.34102183766663074, |
|
"reward_std": 0.1208265540190041, |
|
"rewards/semantic_entropy_math_reward": 0.34102183766663074, |
|
"step": 21 |
|
}, |
|
{ |
|
"completion_length": 479.3913764953613, |
|
"epoch": 0.1231630510846746, |
|
"grad_norm": 0.3304852247238159, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.2891865149140358, |
|
"reward_std": 0.11506026284769177, |
|
"rewards/semantic_entropy_math_reward": 0.2891865149140358, |
|
"step": 22 |
|
}, |
|
{ |
|
"completion_length": 389.78051376342773, |
|
"epoch": 0.12876137158852344, |
|
"grad_norm": 0.6791955232620239, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.3836805671453476, |
|
"reward_std": 0.12086579436436296, |
|
"rewards/semantic_entropy_math_reward": 0.3836805671453476, |
|
"step": 23 |
|
}, |
|
{ |
|
"completion_length": 364.2626552581787, |
|
"epoch": 0.13435969209237228, |
|
"grad_norm": 1.3652504682540894, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.32800100184977055, |
|
"reward_std": 0.11237839260138571, |
|
"rewards/semantic_entropy_math_reward": 0.32800100184977055, |
|
"step": 24 |
|
}, |
|
{ |
|
"completion_length": 343.3311061859131, |
|
"epoch": 0.13995801259622112, |
|
"grad_norm": 1.9864494800567627, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.27318949438631535, |
|
"reward_std": 0.10484480275772512, |
|
"rewards/semantic_entropy_math_reward": 0.27318949438631535, |
|
"step": 25 |
|
}, |
|
{ |
|
"completion_length": 363.51934814453125, |
|
"epoch": 0.14555633310007, |
|
"grad_norm": 5.861116886138916, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.18601191136986017, |
|
"reward_std": 0.07814609340857714, |
|
"rewards/semantic_entropy_math_reward": 0.18601191136986017, |
|
"step": 26 |
|
}, |
|
{ |
|
"completion_length": 637.8519458770752, |
|
"epoch": 0.15115465360391883, |
|
"grad_norm": 7.510809898376465, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.07031250384170562, |
|
"reward_std": 0.03756278438959271, |
|
"rewards/semantic_entropy_math_reward": 0.07031250384170562, |
|
"step": 27 |
|
}, |
|
{ |
|
"completion_length": 925.8988342285156, |
|
"epoch": 0.15675297410776767, |
|
"grad_norm": 3.905956745147705, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.0363343273056671, |
|
"reward_std": 0.018063054973026738, |
|
"rewards/semantic_entropy_math_reward": 0.0363343273056671, |
|
"step": 28 |
|
}, |
|
{ |
|
"completion_length": 894.2656402587891, |
|
"epoch": 0.16235129461161651, |
|
"grad_norm": 3.104583501815796, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.08692956482991576, |
|
"reward_std": 0.043345644196961075, |
|
"rewards/semantic_entropy_math_reward": 0.08692956482991576, |
|
"step": 29 |
|
}, |
|
{ |
|
"completion_length": 774.4866256713867, |
|
"epoch": 0.16794961511546536, |
|
"grad_norm": 17.407346725463867, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.13442460726946592, |
|
"reward_std": 0.07532014499884099, |
|
"rewards/semantic_entropy_math_reward": 0.13442460726946592, |
|
"step": 30 |
|
}, |
|
{ |
|
"completion_length": 434.0379524230957, |
|
"epoch": 0.1735479356193142, |
|
"grad_norm": 7.19148588180542, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.2876984179019928, |
|
"reward_std": 0.108892708318308, |
|
"rewards/semantic_entropy_math_reward": 0.2876984179019928, |
|
"step": 31 |
|
}, |
|
{ |
|
"completion_length": 405.40477180480957, |
|
"epoch": 0.17914625612316304, |
|
"grad_norm": 1.139600157737732, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.34771826211363077, |
|
"reward_std": 0.13636958738788962, |
|
"rewards/semantic_entropy_math_reward": 0.34771826211363077, |
|
"step": 32 |
|
}, |
|
{ |
|
"completion_length": 463.8638458251953, |
|
"epoch": 0.1847445766270119, |
|
"grad_norm": 1.6658934354782104, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.35949901584535837, |
|
"reward_std": 0.12283765664324164, |
|
"rewards/semantic_entropy_math_reward": 0.35949901584535837, |
|
"step": 33 |
|
}, |
|
{ |
|
"completion_length": 486.4196491241455, |
|
"epoch": 0.19034289713086075, |
|
"grad_norm": 1.7289444208145142, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.3540426651015878, |
|
"reward_std": 0.10729875811375678, |
|
"rewards/semantic_entropy_math_reward": 0.3540426651015878, |
|
"step": 34 |
|
}, |
|
{ |
|
"completion_length": 529.6004600524902, |
|
"epoch": 0.1959412176347096, |
|
"grad_norm": 2.8688220977783203, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.3628472303971648, |
|
"reward_std": 0.11101956386119127, |
|
"rewards/semantic_entropy_math_reward": 0.3628472303971648, |
|
"step": 35 |
|
}, |
|
{ |
|
"completion_length": 517.907751083374, |
|
"epoch": 0.20153953813855843, |
|
"grad_norm": 2.089806318283081, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.3950892863795161, |
|
"reward_std": 0.11218473897315562, |
|
"rewards/semantic_entropy_math_reward": 0.3950892863795161, |
|
"step": 36 |
|
}, |
|
{ |
|
"completion_length": 558.5573024749756, |
|
"epoch": 0.20713785864240727, |
|
"grad_norm": 11.361101150512695, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.3583829468116164, |
|
"reward_std": 0.11303460272029042, |
|
"rewards/semantic_entropy_math_reward": 0.3583829468116164, |
|
"step": 37 |
|
}, |
|
{ |
|
"completion_length": 640.2663822174072, |
|
"epoch": 0.21273617914625612, |
|
"grad_norm": 9.671446800231934, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.30580358393490314, |
|
"reward_std": 0.09917579509783536, |
|
"rewards/semantic_entropy_math_reward": 0.30580358393490314, |
|
"step": 38 |
|
}, |
|
{ |
|
"completion_length": 656.6250057220459, |
|
"epoch": 0.21833449965010496, |
|
"grad_norm": 4.101109981536865, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.25942460889928043, |
|
"reward_std": 0.08745632297359407, |
|
"rewards/semantic_entropy_math_reward": 0.25942460889928043, |
|
"step": 39 |
|
}, |
|
{ |
|
"completion_length": 694.5468864440918, |
|
"epoch": 0.22393282015395383, |
|
"grad_norm": 1.9280970096588135, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.21515377657487988, |
|
"reward_std": 0.10014541284181178, |
|
"rewards/semantic_entropy_math_reward": 0.21515377657487988, |
|
"step": 40 |
|
}, |
|
{ |
|
"completion_length": 682.7931613922119, |
|
"epoch": 0.22953114065780267, |
|
"grad_norm": 1.6009750366210938, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.24652778403833508, |
|
"reward_std": 0.09196118649560958, |
|
"rewards/semantic_entropy_math_reward": 0.24652778403833508, |
|
"step": 41 |
|
}, |
|
{ |
|
"completion_length": 691.9077568054199, |
|
"epoch": 0.2351294611616515, |
|
"grad_norm": 0.7951090335845947, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.24776786682195961, |
|
"reward_std": 0.09380459249950945, |
|
"rewards/semantic_entropy_math_reward": 0.24776786682195961, |
|
"step": 42 |
|
}, |
|
{ |
|
"completion_length": 655.6860256195068, |
|
"epoch": 0.24072778166550035, |
|
"grad_norm": 0.602069079875946, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.25818453216925263, |
|
"reward_std": 0.08527852385304868, |
|
"rewards/semantic_entropy_math_reward": 0.25818453216925263, |
|
"step": 43 |
|
}, |
|
{ |
|
"completion_length": 615.9233703613281, |
|
"epoch": 0.2463261021693492, |
|
"grad_norm": 4.384374618530273, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.3162202490493655, |
|
"reward_std": 0.09647805755957961, |
|
"rewards/semantic_entropy_math_reward": 0.3162202490493655, |
|
"step": 44 |
|
}, |
|
{ |
|
"completion_length": 572.6949520111084, |
|
"epoch": 0.25192442267319803, |
|
"grad_norm": 3.9872536659240723, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.3339533871039748, |
|
"reward_std": 0.10575204784981906, |
|
"rewards/semantic_entropy_math_reward": 0.3339533871039748, |
|
"step": 45 |
|
}, |
|
{ |
|
"completion_length": 504.4799213409424, |
|
"epoch": 0.2575227431770469, |
|
"grad_norm": 0.722204327583313, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.3819444477558136, |
|
"reward_std": 0.12700667465105653, |
|
"rewards/semantic_entropy_math_reward": 0.3819444477558136, |
|
"step": 46 |
|
}, |
|
{ |
|
"completion_length": 444.5267906188965, |
|
"epoch": 0.2631210636808957, |
|
"grad_norm": 1.130468487739563, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.37996032927185297, |
|
"reward_std": 0.11253955401480198, |
|
"rewards/semantic_entropy_math_reward": 0.37996032927185297, |
|
"step": 47 |
|
}, |
|
{ |
|
"completion_length": 446.88840103149414, |
|
"epoch": 0.26871938418474456, |
|
"grad_norm": 0.16186973452568054, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.38616071827709675, |
|
"reward_std": 0.12775122001767159, |
|
"rewards/semantic_entropy_math_reward": 0.38616071827709675, |
|
"step": 48 |
|
}, |
|
{ |
|
"completion_length": 465.04167556762695, |
|
"epoch": 0.2743177046885934, |
|
"grad_norm": 0.16776132583618164, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.33407739456743, |
|
"reward_std": 0.08897499740123749, |
|
"rewards/semantic_entropy_math_reward": 0.33407739456743, |
|
"step": 49 |
|
}, |
|
{ |
|
"completion_length": 447.424861907959, |
|
"epoch": 0.27991602519244224, |
|
"grad_norm": 0.3498786687850952, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.3691716268658638, |
|
"reward_std": 0.11830427148379385, |
|
"rewards/semantic_entropy_math_reward": 0.3691716268658638, |
|
"step": 50 |
|
}, |
|
{ |
|
"completion_length": 418.9003047943115, |
|
"epoch": 0.28551434569629114, |
|
"grad_norm": 0.4594447612762451, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.36669147573411465, |
|
"reward_std": 0.11925438744947314, |
|
"rewards/semantic_entropy_math_reward": 0.36669147573411465, |
|
"step": 51 |
|
}, |
|
{ |
|
"completion_length": 423.0595302581787, |
|
"epoch": 0.29111266620014, |
|
"grad_norm": 1.4399406909942627, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.36941965389996767, |
|
"reward_std": 0.11310782423242927, |
|
"rewards/semantic_entropy_math_reward": 0.36941965389996767, |
|
"step": 52 |
|
}, |
|
{ |
|
"completion_length": 548.479923248291, |
|
"epoch": 0.2967109867039888, |
|
"grad_norm": 46.641822814941406, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.36706350184977055, |
|
"reward_std": 0.10590148437768221, |
|
"rewards/semantic_entropy_math_reward": 0.36706350184977055, |
|
"step": 53 |
|
}, |
|
{ |
|
"completion_length": 583.7537307739258, |
|
"epoch": 0.30230930720783766, |
|
"grad_norm": 3.891123056411743, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.3219246044754982, |
|
"reward_std": 0.11164773069322109, |
|
"rewards/semantic_entropy_math_reward": 0.3219246044754982, |
|
"step": 54 |
|
}, |
|
{ |
|
"completion_length": 569.5848350524902, |
|
"epoch": 0.3079076277116865, |
|
"grad_norm": 5.51741886138916, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.3412698497995734, |
|
"reward_std": 0.11707511683925986, |
|
"rewards/semantic_entropy_math_reward": 0.3412698497995734, |
|
"step": 55 |
|
}, |
|
{ |
|
"completion_length": 589.2455463409424, |
|
"epoch": 0.31350594821553535, |
|
"grad_norm": 2.6841273307800293, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.33382937777787447, |
|
"reward_std": 0.109538255026564, |
|
"rewards/semantic_entropy_math_reward": 0.33382937777787447, |
|
"step": 56 |
|
}, |
|
{ |
|
"completion_length": 536.4256038665771, |
|
"epoch": 0.3191042687193842, |
|
"grad_norm": 17.737791061401367, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.4088541707023978, |
|
"reward_std": 0.12808961933478713, |
|
"rewards/semantic_entropy_math_reward": 0.4088541707023978, |
|
"step": 57 |
|
}, |
|
{ |
|
"completion_length": 424.6919708251953, |
|
"epoch": 0.32470258922323303, |
|
"grad_norm": 1.9888161420822144, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.3665674636140466, |
|
"reward_std": 0.12649336433969438, |
|
"rewards/semantic_entropy_math_reward": 0.3665674636140466, |
|
"step": 58 |
|
}, |
|
{ |
|
"completion_length": 412.2708396911621, |
|
"epoch": 0.33030090972708187, |
|
"grad_norm": 0.3387698531150818, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.3368055559694767, |
|
"reward_std": 0.1261643674224615, |
|
"rewards/semantic_entropy_math_reward": 0.3368055559694767, |
|
"step": 59 |
|
}, |
|
{ |
|
"completion_length": 390.2321529388428, |
|
"epoch": 0.3358992302309307, |
|
"grad_norm": 0.5467884540557861, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.4055059552192688, |
|
"reward_std": 0.13236648589372635, |
|
"rewards/semantic_entropy_math_reward": 0.4055059552192688, |
|
"step": 60 |
|
}, |
|
{ |
|
"completion_length": 407.4642925262451, |
|
"epoch": 0.34149755073477955, |
|
"grad_norm": 0.5977727770805359, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.385416685603559, |
|
"reward_std": 0.12455174163915217, |
|
"rewards/semantic_entropy_math_reward": 0.385416685603559, |
|
"step": 61 |
|
}, |
|
{ |
|
"completion_length": 368.8139953613281, |
|
"epoch": 0.3470958712386284, |
|
"grad_norm": 0.7032583355903625, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.3903769990429282, |
|
"reward_std": 0.14396332437172532, |
|
"rewards/semantic_entropy_math_reward": 0.3903769990429282, |
|
"step": 62 |
|
}, |
|
{ |
|
"completion_length": 399.3407802581787, |
|
"epoch": 0.35269419174247724, |
|
"grad_norm": 1.6613659858703613, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.3821924654766917, |
|
"reward_std": 0.13627444114536047, |
|
"rewards/semantic_entropy_math_reward": 0.3821924654766917, |
|
"step": 63 |
|
}, |
|
{ |
|
"completion_length": 386.2135524749756, |
|
"epoch": 0.3582925122463261, |
|
"grad_norm": 0.297186940908432, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.4055059542879462, |
|
"reward_std": 0.1174642383120954, |
|
"rewards/semantic_entropy_math_reward": 0.4055059542879462, |
|
"step": 64 |
|
}, |
|
{ |
|
"completion_length": 414.6153335571289, |
|
"epoch": 0.363890832750175, |
|
"grad_norm": 0.2781192660331726, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.37723215017467737, |
|
"reward_std": 0.1255739361513406, |
|
"rewards/semantic_entropy_math_reward": 0.37723215017467737, |
|
"step": 65 |
|
}, |
|
{ |
|
"completion_length": 402.4501533508301, |
|
"epoch": 0.3694891532540238, |
|
"grad_norm": 0.2593112587928772, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.3885168796405196, |
|
"reward_std": 0.12460076110437512, |
|
"rewards/semantic_entropy_math_reward": 0.3885168796405196, |
|
"step": 66 |
|
}, |
|
{ |
|
"completion_length": 417.7031364440918, |
|
"epoch": 0.37508747375787266, |
|
"grad_norm": 0.24701371788978577, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.37400794960558414, |
|
"reward_std": 0.1167642290238291, |
|
"rewards/semantic_entropy_math_reward": 0.37400794960558414, |
|
"step": 67 |
|
}, |
|
{ |
|
"completion_length": 397.74777603149414, |
|
"epoch": 0.3806857942617215, |
|
"grad_norm": 0.2930716872215271, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.4553571604192257, |
|
"reward_std": 0.11352779855951667, |
|
"rewards/semantic_entropy_math_reward": 0.4553571604192257, |
|
"step": 68 |
|
}, |
|
{ |
|
"completion_length": 415.2507495880127, |
|
"epoch": 0.38628411476557034, |
|
"grad_norm": 0.28370365500450134, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.3591269953176379, |
|
"reward_std": 0.1410878817550838, |
|
"rewards/semantic_entropy_math_reward": 0.3591269953176379, |
|
"step": 69 |
|
}, |
|
{ |
|
"completion_length": 415.7284297943115, |
|
"epoch": 0.3918824352694192, |
|
"grad_norm": 0.25602635741233826, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.36582342255860567, |
|
"reward_std": 0.12171732308343053, |
|
"rewards/semantic_entropy_math_reward": 0.36582342255860567, |
|
"step": 70 |
|
}, |
|
{ |
|
"completion_length": 399.8206958770752, |
|
"epoch": 0.397480755773268, |
|
"grad_norm": 0.2549217641353607, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.462301604449749, |
|
"reward_std": 0.13242140784859657, |
|
"rewards/semantic_entropy_math_reward": 0.462301604449749, |
|
"step": 71 |
|
}, |
|
{ |
|
"completion_length": 405.83185386657715, |
|
"epoch": 0.40307907627711687, |
|
"grad_norm": 0.26034626364707947, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.38876488897949457, |
|
"reward_std": 0.12143889861181378, |
|
"rewards/semantic_entropy_math_reward": 0.38876488897949457, |
|
"step": 72 |
|
}, |
|
{ |
|
"completion_length": 389.92634773254395, |
|
"epoch": 0.4086773967809657, |
|
"grad_norm": 0.3175303041934967, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.3635912863537669, |
|
"reward_std": 0.1250197091139853, |
|
"rewards/semantic_entropy_math_reward": 0.3635912863537669, |
|
"step": 73 |
|
}, |
|
{ |
|
"completion_length": 405.5067024230957, |
|
"epoch": 0.41427571728481455, |
|
"grad_norm": 0.3240727484226227, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.3521825484931469, |
|
"reward_std": 0.1221736806910485, |
|
"rewards/semantic_entropy_math_reward": 0.3521825484931469, |
|
"step": 74 |
|
}, |
|
{ |
|
"completion_length": 395.6436080932617, |
|
"epoch": 0.4198740377886634, |
|
"grad_norm": 0.6251773834228516, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.39942957274615765, |
|
"reward_std": 0.13623498589731753, |
|
"rewards/semantic_entropy_math_reward": 0.39942957274615765, |
|
"step": 75 |
|
}, |
|
{ |
|
"completion_length": 390.2894401550293, |
|
"epoch": 0.42547235829251223, |
|
"grad_norm": 0.8764235377311707, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.428075410425663, |
|
"reward_std": 0.12979253917001188, |
|
"rewards/semantic_entropy_math_reward": 0.428075410425663, |
|
"step": 76 |
|
}, |
|
{ |
|
"completion_length": 400.01861000061035, |
|
"epoch": 0.4310706787963611, |
|
"grad_norm": 0.8677442669868469, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.37165179289877415, |
|
"reward_std": 0.12289492227137089, |
|
"rewards/semantic_entropy_math_reward": 0.37165179289877415, |
|
"step": 77 |
|
}, |
|
{ |
|
"completion_length": 377.8936080932617, |
|
"epoch": 0.4366689993002099, |
|
"grad_norm": 0.9437581896781921, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.373139888048172, |
|
"reward_std": 0.1251775654964149, |
|
"rewards/semantic_entropy_math_reward": 0.373139888048172, |
|
"step": 78 |
|
}, |
|
{ |
|
"completion_length": 340.2700939178467, |
|
"epoch": 0.44226731980405876, |
|
"grad_norm": 2.5648627281188965, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.3627232192084193, |
|
"reward_std": 0.15555713046342134, |
|
"rewards/semantic_entropy_math_reward": 0.3627232192084193, |
|
"step": 79 |
|
}, |
|
{ |
|
"completion_length": 268.25521659851074, |
|
"epoch": 0.44786564030790765, |
|
"grad_norm": 4.964110851287842, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.32589286658912897, |
|
"reward_std": 0.1293578795157373, |
|
"rewards/semantic_entropy_math_reward": 0.32589286658912897, |
|
"step": 80 |
|
}, |
|
{ |
|
"completion_length": 245.03795051574707, |
|
"epoch": 0.4534639608117565, |
|
"grad_norm": 2.0234344005584717, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.34858631901443005, |
|
"reward_std": 0.1360030840151012, |
|
"rewards/semantic_entropy_math_reward": 0.34858631901443005, |
|
"step": 81 |
|
}, |
|
{ |
|
"completion_length": 201.16071891784668, |
|
"epoch": 0.45906228131560534, |
|
"grad_norm": 10.089159965515137, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.34126985911279917, |
|
"reward_std": 0.14876779448240995, |
|
"rewards/semantic_entropy_math_reward": 0.34126985911279917, |
|
"step": 82 |
|
}, |
|
{ |
|
"completion_length": 163.12500381469727, |
|
"epoch": 0.4646606018194542, |
|
"grad_norm": 7.054829120635986, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.33816965017467737, |
|
"reward_std": 0.14539710199460387, |
|
"rewards/semantic_entropy_math_reward": 0.33816965017467737, |
|
"step": 83 |
|
}, |
|
{ |
|
"completion_length": 172.52455711364746, |
|
"epoch": 0.470258922323303, |
|
"grad_norm": 3.74849796295166, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.3324652798473835, |
|
"reward_std": 0.14292793814092875, |
|
"rewards/semantic_entropy_math_reward": 0.3324652798473835, |
|
"step": 84 |
|
}, |
|
{ |
|
"completion_length": 190.39881229400635, |
|
"epoch": 0.47585724282715186, |
|
"grad_norm": 3.7803595066070557, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.36879961006343365, |
|
"reward_std": 0.14196715434081852, |
|
"rewards/semantic_entropy_math_reward": 0.36879961006343365, |
|
"step": 85 |
|
}, |
|
{ |
|
"completion_length": 202.71949863433838, |
|
"epoch": 0.4814555633310007, |
|
"grad_norm": 7.561800479888916, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.3539186557754874, |
|
"reward_std": 0.13645589677616954, |
|
"rewards/semantic_entropy_math_reward": 0.3539186557754874, |
|
"step": 86 |
|
}, |
|
{ |
|
"completion_length": 192.92336750030518, |
|
"epoch": 0.48705388383484954, |
|
"grad_norm": 6.106121063232422, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.41753472574055195, |
|
"reward_std": 0.14698259718716145, |
|
"rewards/semantic_entropy_math_reward": 0.41753472574055195, |
|
"step": 87 |
|
}, |
|
{ |
|
"completion_length": 192.75223636627197, |
|
"epoch": 0.4926522043386984, |
|
"grad_norm": 7.5285186767578125, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.3844246091321111, |
|
"reward_std": 0.154494424816221, |
|
"rewards/semantic_entropy_math_reward": 0.3844246091321111, |
|
"step": 88 |
|
}, |
|
{ |
|
"completion_length": 196.7128028869629, |
|
"epoch": 0.4982505248425472, |
|
"grad_norm": 5.367832183837891, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.40811011102050543, |
|
"reward_std": 0.14661366492509842, |
|
"rewards/semantic_entropy_math_reward": 0.40811011102050543, |
|
"step": 89 |
|
}, |
|
{ |
|
"completion_length": 200.94940757751465, |
|
"epoch": 0.5038488453463961, |
|
"grad_norm": 18.42447853088379, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.4272073404863477, |
|
"reward_std": 0.14780932199209929, |
|
"rewards/semantic_entropy_math_reward": 0.4272073404863477, |
|
"step": 90 |
|
}, |
|
{ |
|
"completion_length": 204.03497314453125, |
|
"epoch": 0.509447165850245, |
|
"grad_norm": 7.817193984985352, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.418278768658638, |
|
"reward_std": 0.13225925154983997, |
|
"rewards/semantic_entropy_math_reward": 0.418278768658638, |
|
"step": 91 |
|
}, |
|
{ |
|
"completion_length": 192.34896183013916, |
|
"epoch": 0.5150454863540938, |
|
"grad_norm": 6.789738655090332, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.47457837499678135, |
|
"reward_std": 0.14893107814714313, |
|
"rewards/semantic_entropy_math_reward": 0.47457837499678135, |
|
"step": 92 |
|
}, |
|
{ |
|
"completion_length": 210.90179061889648, |
|
"epoch": 0.5206438068579426, |
|
"grad_norm": 8.528867721557617, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.41455854661762714, |
|
"reward_std": 0.1263797995634377, |
|
"rewards/semantic_entropy_math_reward": 0.41455854661762714, |
|
"step": 93 |
|
}, |
|
{ |
|
"completion_length": 222.63169956207275, |
|
"epoch": 0.5262421273617914, |
|
"grad_norm": 10.166954040527344, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.41530259046703577, |
|
"reward_std": 0.1387968505732715, |
|
"rewards/semantic_entropy_math_reward": 0.41530259046703577, |
|
"step": 94 |
|
}, |
|
{ |
|
"completion_length": 208.9508981704712, |
|
"epoch": 0.5318404478656403, |
|
"grad_norm": 8.634490966796875, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.45324901584535837, |
|
"reward_std": 0.1519172815605998, |
|
"rewards/semantic_entropy_math_reward": 0.45324901584535837, |
|
"step": 95 |
|
}, |
|
{ |
|
"completion_length": 240.5915231704712, |
|
"epoch": 0.5374387683694891, |
|
"grad_norm": 15.801514625549316, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.4201389057561755, |
|
"reward_std": 0.15532761020585895, |
|
"rewards/semantic_entropy_math_reward": 0.4201389057561755, |
|
"step": 96 |
|
}, |
|
{ |
|
"completion_length": 288.0282793045044, |
|
"epoch": 0.543037088873338, |
|
"grad_norm": 7.69626522064209, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.49342758767306805, |
|
"reward_std": 0.1336272112093866, |
|
"rewards/semantic_entropy_math_reward": 0.49342758767306805, |
|
"step": 97 |
|
}, |
|
{ |
|
"completion_length": 570.5766410827637, |
|
"epoch": 0.5486354093771868, |
|
"grad_norm": 9.325577735900879, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.4603174738585949, |
|
"reward_std": 0.14188312110491097, |
|
"rewards/semantic_entropy_math_reward": 0.4603174738585949, |
|
"step": 98 |
|
}, |
|
{ |
|
"completion_length": 924.8459968566895, |
|
"epoch": 0.5542337298810357, |
|
"grad_norm": 4.683976650238037, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.4341518022119999, |
|
"reward_std": 0.15863031102344394, |
|
"rewards/semantic_entropy_math_reward": 0.4341518022119999, |
|
"step": 99 |
|
}, |
|
{ |
|
"completion_length": 975.7284393310547, |
|
"epoch": 0.5598320503848845, |
|
"grad_norm": 6.508670806884766, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.43588790111243725, |
|
"reward_std": 0.1634892332367599, |
|
"rewards/semantic_entropy_math_reward": 0.43588790111243725, |
|
"step": 100 |
|
}, |
|
{ |
|
"completion_length": 979.6651954650879, |
|
"epoch": 0.5654303708887334, |
|
"grad_norm": 6.556097507476807, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.4521329505369067, |
|
"reward_std": 0.16528919152915478, |
|
"rewards/semantic_entropy_math_reward": 0.4521329505369067, |
|
"step": 101 |
|
}, |
|
{ |
|
"completion_length": 966.6339378356934, |
|
"epoch": 0.5710286913925823, |
|
"grad_norm": 1.9696649312973022, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.45213295705616474, |
|
"reward_std": 0.1434005326591432, |
|
"rewards/semantic_entropy_math_reward": 0.45213295705616474, |
|
"step": 102 |
|
}, |
|
{ |
|
"completion_length": 933.5625114440918, |
|
"epoch": 0.5766270118964311, |
|
"grad_norm": 2.5271127223968506, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.4157986231148243, |
|
"reward_std": 0.15396162681281567, |
|
"rewards/semantic_entropy_math_reward": 0.4157986231148243, |
|
"step": 103 |
|
}, |
|
{ |
|
"completion_length": 609.2589454650879, |
|
"epoch": 0.58222533240028, |
|
"grad_norm": 2.110628366470337, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.5197172742336988, |
|
"reward_std": 0.1446234486065805, |
|
"rewards/semantic_entropy_math_reward": 0.5197172742336988, |
|
"step": 104 |
|
}, |
|
{ |
|
"completion_length": 299.1971788406372, |
|
"epoch": 0.5878236529041287, |
|
"grad_norm": 1.4388288259506226, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.49330358020961285, |
|
"reward_std": 0.15013255970552564, |
|
"rewards/semantic_entropy_math_reward": 0.49330358020961285, |
|
"step": 105 |
|
}, |
|
{ |
|
"completion_length": 281.92931747436523, |
|
"epoch": 0.5934219734079776, |
|
"grad_norm": 1.1156909465789795, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.5328621100634336, |
|
"reward_std": 0.16049590334296227, |
|
"rewards/semantic_entropy_math_reward": 0.5328621100634336, |
|
"step": 106 |
|
}, |
|
{ |
|
"completion_length": 277.87053871154785, |
|
"epoch": 0.5990202939118264, |
|
"grad_norm": 1.5050427913665771, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.4991319552063942, |
|
"reward_std": 0.13887666864320636, |
|
"rewards/semantic_entropy_math_reward": 0.4991319552063942, |
|
"step": 107 |
|
}, |
|
{ |
|
"completion_length": 244.03050994873047, |
|
"epoch": 0.6046186144156753, |
|
"grad_norm": 2.5947532653808594, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.5357143003493547, |
|
"reward_std": 0.15269030537456274, |
|
"rewards/semantic_entropy_math_reward": 0.5357143003493547, |
|
"step": 108 |
|
}, |
|
{ |
|
"completion_length": 137.79092693328857, |
|
"epoch": 0.6102169349195241, |
|
"grad_norm": 4.043277263641357, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.5419146958738565, |
|
"reward_std": 0.1557740089483559, |
|
"rewards/semantic_entropy_math_reward": 0.5419146958738565, |
|
"step": 109 |
|
}, |
|
{ |
|
"completion_length": 107.52009153366089, |
|
"epoch": 0.615815255423373, |
|
"grad_norm": 1.8183155059814453, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.5765129253268242, |
|
"reward_std": 0.12702399701811373, |
|
"rewards/semantic_entropy_math_reward": 0.5765129253268242, |
|
"step": 110 |
|
}, |
|
{ |
|
"completion_length": 96.88541841506958, |
|
"epoch": 0.6214135759272218, |
|
"grad_norm": 2.091888904571533, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.6276041902601719, |
|
"reward_std": 0.13468103110790253, |
|
"rewards/semantic_entropy_math_reward": 0.6276041902601719, |
|
"step": 111 |
|
}, |
|
{ |
|
"completion_length": 138.0691990852356, |
|
"epoch": 0.6270118964310707, |
|
"grad_norm": 1.2132967710494995, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.5658482331782579, |
|
"reward_std": 0.14125833846628666, |
|
"rewards/semantic_entropy_math_reward": 0.5658482331782579, |
|
"step": 112 |
|
}, |
|
{ |
|
"completion_length": 168.62128067016602, |
|
"epoch": 0.6326102169349195, |
|
"grad_norm": 1.3657702207565308, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.5846974309533834, |
|
"reward_std": 0.12009491049684584, |
|
"rewards/semantic_entropy_math_reward": 0.5846974309533834, |
|
"step": 113 |
|
}, |
|
{ |
|
"completion_length": 172.63616180419922, |
|
"epoch": 0.6382085374387684, |
|
"grad_norm": 4.615123748779297, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.6726190745830536, |
|
"reward_std": 0.14088664995506406, |
|
"rewards/semantic_entropy_math_reward": 0.6726190745830536, |
|
"step": 114 |
|
}, |
|
{ |
|
"completion_length": 184.50372314453125, |
|
"epoch": 0.6438068579426172, |
|
"grad_norm": 7.566853046417236, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.623387910425663, |
|
"reward_std": 0.1348249651491642, |
|
"rewards/semantic_entropy_math_reward": 0.623387910425663, |
|
"step": 115 |
|
}, |
|
{ |
|
"completion_length": 268.1763458251953, |
|
"epoch": 0.6494051784464661, |
|
"grad_norm": 3.787036657333374, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.5989583469927311, |
|
"reward_std": 0.14310514647513628, |
|
"rewards/semantic_entropy_math_reward": 0.5989583469927311, |
|
"step": 116 |
|
}, |
|
{ |
|
"completion_length": 424.9270887374878, |
|
"epoch": 0.655003498950315, |
|
"grad_norm": 1.769083857536316, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.6443452537059784, |
|
"reward_std": 0.1348385107703507, |
|
"rewards/semantic_entropy_math_reward": 0.6443452537059784, |
|
"step": 117 |
|
}, |
|
{ |
|
"completion_length": 627.7061138153076, |
|
"epoch": 0.6606018194541637, |
|
"grad_norm": 7.572482109069824, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.6216518022119999, |
|
"reward_std": 0.12173205520957708, |
|
"rewards/semantic_entropy_math_reward": 0.6216518022119999, |
|
"step": 118 |
|
}, |
|
{ |
|
"completion_length": 597.2857189178467, |
|
"epoch": 0.6662001399580126, |
|
"grad_norm": 4.5395965576171875, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.6326885130256414, |
|
"reward_std": 0.11796635785140097, |
|
"rewards/semantic_entropy_math_reward": 0.6326885130256414, |
|
"step": 119 |
|
}, |
|
{ |
|
"completion_length": 499.34674072265625, |
|
"epoch": 0.6717984604618614, |
|
"grad_norm": 9.828434944152832, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.6607142873108387, |
|
"reward_std": 0.12943885754793882, |
|
"rewards/semantic_entropy_math_reward": 0.6607142873108387, |
|
"step": 120 |
|
}, |
|
{ |
|
"completion_length": 416.2641429901123, |
|
"epoch": 0.6773967809657103, |
|
"grad_norm": 4.24314022064209, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.7173859365284443, |
|
"reward_std": 0.10959133435972035, |
|
"rewards/semantic_entropy_math_reward": 0.7173859365284443, |
|
"step": 121 |
|
}, |
|
{ |
|
"completion_length": 133.405508518219, |
|
"epoch": 0.6829951014695591, |
|
"grad_norm": 5.226346969604492, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.6529017835855484, |
|
"reward_std": 0.13599514495581388, |
|
"rewards/semantic_entropy_math_reward": 0.6529017835855484, |
|
"step": 122 |
|
}, |
|
{ |
|
"completion_length": 108.45610427856445, |
|
"epoch": 0.688593421973408, |
|
"grad_norm": 4.747099876403809, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.6951884962618351, |
|
"reward_std": 0.1218845120165497, |
|
"rewards/semantic_entropy_math_reward": 0.6951884962618351, |
|
"step": 123 |
|
}, |
|
{ |
|
"completion_length": 116.35714483261108, |
|
"epoch": 0.6941917424772568, |
|
"grad_norm": 5.495078086853027, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.7322668917477131, |
|
"reward_std": 0.10345165804028511, |
|
"rewards/semantic_entropy_math_reward": 0.7322668917477131, |
|
"step": 124 |
|
}, |
|
{ |
|
"completion_length": 190.98735427856445, |
|
"epoch": 0.6997900629811057, |
|
"grad_norm": 4.758305072784424, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.6990327574312687, |
|
"reward_std": 0.1072727048303932, |
|
"rewards/semantic_entropy_math_reward": 0.6990327574312687, |
|
"step": 125 |
|
}, |
|
{ |
|
"completion_length": 154.2269356250763, |
|
"epoch": 0.7053883834849545, |
|
"grad_norm": 4.895815372467041, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.7566964458674192, |
|
"reward_std": 0.10861217533238232, |
|
"rewards/semantic_entropy_math_reward": 0.7566964458674192, |
|
"step": 126 |
|
}, |
|
{ |
|
"completion_length": 219.0520896911621, |
|
"epoch": 0.7109867039888034, |
|
"grad_norm": 5.073260307312012, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.7007688824087381, |
|
"reward_std": 0.10281824320554733, |
|
"rewards/semantic_entropy_math_reward": 0.7007688824087381, |
|
"step": 127 |
|
}, |
|
{ |
|
"completion_length": 150.2425627708435, |
|
"epoch": 0.7165850244926522, |
|
"grad_norm": 6.930428981781006, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.7514881230890751, |
|
"reward_std": 0.11211708444170654, |
|
"rewards/semantic_entropy_math_reward": 0.7514881230890751, |
|
"step": 128 |
|
}, |
|
{ |
|
"completion_length": 150.25967574119568, |
|
"epoch": 0.722183344996501, |
|
"grad_norm": 4.981149196624756, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.7196180820465088, |
|
"reward_std": 0.1129911975003779, |
|
"rewards/semantic_entropy_math_reward": 0.7196180820465088, |
|
"step": 129 |
|
}, |
|
{ |
|
"completion_length": 129.3750023841858, |
|
"epoch": 0.72778166550035, |
|
"grad_norm": 5.079310417175293, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.7373512163758278, |
|
"reward_std": 0.10283095168415457, |
|
"rewards/semantic_entropy_math_reward": 0.7373512163758278, |
|
"step": 130 |
|
}, |
|
{ |
|
"completion_length": 134.2485146522522, |
|
"epoch": 0.7333799860041987, |
|
"grad_norm": 5.917436599731445, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.7232142984867096, |
|
"reward_std": 0.1203189252410084, |
|
"rewards/semantic_entropy_math_reward": 0.7232142984867096, |
|
"step": 131 |
|
}, |
|
{ |
|
"completion_length": 107.0967288017273, |
|
"epoch": 0.7389783065080476, |
|
"grad_norm": 8.360037803649902, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.7357390988618135, |
|
"reward_std": 0.10952205723151565, |
|
"rewards/semantic_entropy_math_reward": 0.7357390988618135, |
|
"step": 132 |
|
}, |
|
{ |
|
"completion_length": 128.16592502593994, |
|
"epoch": 0.7445766270118964, |
|
"grad_norm": 7.823781490325928, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.7218502275645733, |
|
"reward_std": 0.11286869831383228, |
|
"rewards/semantic_entropy_math_reward": 0.7218502275645733, |
|
"step": 133 |
|
}, |
|
{ |
|
"completion_length": 106.69270944595337, |
|
"epoch": 0.7501749475157453, |
|
"grad_norm": 7.141363143920898, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.7478918954730034, |
|
"reward_std": 0.10708213225007057, |
|
"rewards/semantic_entropy_math_reward": 0.7478918954730034, |
|
"step": 134 |
|
}, |
|
{ |
|
"completion_length": 93.68006038665771, |
|
"epoch": 0.7557732680195941, |
|
"grad_norm": 5.968332767486572, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.7960069701075554, |
|
"reward_std": 0.08689452323596925, |
|
"rewards/semantic_entropy_math_reward": 0.7960069701075554, |
|
"step": 135 |
|
}, |
|
{ |
|
"completion_length": 90.8816967010498, |
|
"epoch": 0.761371588523443, |
|
"grad_norm": 4.925920486450195, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.7625248245894909, |
|
"reward_std": 0.10826884070411325, |
|
"rewards/semantic_entropy_math_reward": 0.7625248245894909, |
|
"step": 136 |
|
}, |
|
{ |
|
"completion_length": 106.96056652069092, |
|
"epoch": 0.7669699090272918, |
|
"grad_norm": 6.21066427230835, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.7124256156384945, |
|
"reward_std": 0.12395848939195275, |
|
"rewards/semantic_entropy_math_reward": 0.7124256156384945, |
|
"step": 137 |
|
}, |
|
{ |
|
"completion_length": 113.4546160697937, |
|
"epoch": 0.7725682295311407, |
|
"grad_norm": 4.43303108215332, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.7115575522184372, |
|
"reward_std": 0.12078050547279418, |
|
"rewards/semantic_entropy_math_reward": 0.7115575522184372, |
|
"step": 138 |
|
}, |
|
{ |
|
"completion_length": 140.69047927856445, |
|
"epoch": 0.7781665500349895, |
|
"grad_norm": 3.2451171875, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.694320447742939, |
|
"reward_std": 0.1306492048315704, |
|
"rewards/semantic_entropy_math_reward": 0.694320447742939, |
|
"step": 139 |
|
}, |
|
{ |
|
"completion_length": 142.95833444595337, |
|
"epoch": 0.7837648705388384, |
|
"grad_norm": 6.136082649230957, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.7182539887726307, |
|
"reward_std": 0.15217532915994525, |
|
"rewards/semantic_entropy_math_reward": 0.7182539887726307, |
|
"step": 140 |
|
}, |
|
{ |
|
"completion_length": 142.6287226676941, |
|
"epoch": 0.7893631910426872, |
|
"grad_norm": 2.892977476119995, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.7118055783212185, |
|
"reward_std": 0.13839181000366807, |
|
"rewards/semantic_entropy_math_reward": 0.7118055783212185, |
|
"step": 141 |
|
}, |
|
{ |
|
"completion_length": 157.30431842803955, |
|
"epoch": 0.794961511546536, |
|
"grad_norm": 4.3441619873046875, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.6943204514682293, |
|
"reward_std": 0.14929893938824534, |
|
"rewards/semantic_entropy_math_reward": 0.6943204514682293, |
|
"step": 142 |
|
}, |
|
{ |
|
"completion_length": 168.61533069610596, |
|
"epoch": 0.8005598320503848, |
|
"grad_norm": 1.7150957584381104, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.686507947742939, |
|
"reward_std": 0.1514957039617002, |
|
"rewards/semantic_entropy_math_reward": 0.686507947742939, |
|
"step": 143 |
|
}, |
|
{ |
|
"completion_length": 162.0543179512024, |
|
"epoch": 0.8061581525542337, |
|
"grad_norm": 1.2334315776824951, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.6346726343035698, |
|
"reward_std": 0.14308730140328407, |
|
"rewards/semantic_entropy_math_reward": 0.6346726343035698, |
|
"step": 144 |
|
}, |
|
{ |
|
"completion_length": 265.3921184539795, |
|
"epoch": 0.8117564730580826, |
|
"grad_norm": 2.8437411785125732, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.5422867126762867, |
|
"reward_std": 0.17818555515259504, |
|
"rewards/semantic_entropy_math_reward": 0.5422867126762867, |
|
"step": 145 |
|
}, |
|
{ |
|
"completion_length": 358.7314052581787, |
|
"epoch": 0.8173547935619314, |
|
"grad_norm": 1.9524171352386475, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.4378720261156559, |
|
"reward_std": 0.194162187166512, |
|
"rewards/semantic_entropy_math_reward": 0.4378720261156559, |
|
"step": 146 |
|
}, |
|
{ |
|
"completion_length": 268.26190662384033, |
|
"epoch": 0.8229531140657803, |
|
"grad_norm": 0.9712325930595398, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.5739087481051683, |
|
"reward_std": 0.19231022708117962, |
|
"rewards/semantic_entropy_math_reward": 0.5739087481051683, |
|
"step": 147 |
|
}, |
|
{ |
|
"completion_length": 247.8772373199463, |
|
"epoch": 0.8285514345696291, |
|
"grad_norm": 0.29546335339546204, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.5668402947485447, |
|
"reward_std": 0.14728267351165414, |
|
"rewards/semantic_entropy_math_reward": 0.5668402947485447, |
|
"step": 148 |
|
}, |
|
{ |
|
"completion_length": 286.6562547683716, |
|
"epoch": 0.834149755073478, |
|
"grad_norm": 0.19167956709861755, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.5652281939983368, |
|
"reward_std": 0.16519550560042262, |
|
"rewards/semantic_entropy_math_reward": 0.5652281939983368, |
|
"step": 149 |
|
}, |
|
{ |
|
"completion_length": 299.7745580673218, |
|
"epoch": 0.8397480755773268, |
|
"grad_norm": 0.16220510005950928, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.563988110050559, |
|
"reward_std": 0.16206924617290497, |
|
"rewards/semantic_entropy_math_reward": 0.563988110050559, |
|
"step": 150 |
|
}, |
|
{ |
|
"completion_length": 323.96875381469727, |
|
"epoch": 0.8453463960811757, |
|
"grad_norm": 0.11583292484283447, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.5203373022377491, |
|
"reward_std": 0.17140463134273887, |
|
"rewards/semantic_entropy_math_reward": 0.5203373022377491, |
|
"step": 151 |
|
}, |
|
{ |
|
"completion_length": 333.31994819641113, |
|
"epoch": 0.8509447165850245, |
|
"grad_norm": 0.09664779901504517, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.503100199624896, |
|
"reward_std": 0.17030652752146125, |
|
"rewards/semantic_entropy_math_reward": 0.503100199624896, |
|
"step": 152 |
|
}, |
|
{ |
|
"completion_length": 319.05804443359375, |
|
"epoch": 0.8565430370888734, |
|
"grad_norm": 0.08762703090906143, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.5343502033501863, |
|
"reward_std": 0.1655228640884161, |
|
"rewards/semantic_entropy_math_reward": 0.5343502033501863, |
|
"step": 153 |
|
}, |
|
{ |
|
"completion_length": 346.2261962890625, |
|
"epoch": 0.8621413575927221, |
|
"grad_norm": 0.07872086763381958, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.5121527835726738, |
|
"reward_std": 0.15845369640737772, |
|
"rewards/semantic_entropy_math_reward": 0.5121527835726738, |
|
"step": 154 |
|
}, |
|
{ |
|
"completion_length": 357.5372085571289, |
|
"epoch": 0.867739678096571, |
|
"grad_norm": 0.0757875069975853, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.5648561716079712, |
|
"reward_std": 0.1613363642245531, |
|
"rewards/semantic_entropy_math_reward": 0.5648561716079712, |
|
"step": 155 |
|
}, |
|
{ |
|
"completion_length": 353.3415222167969, |
|
"epoch": 0.8733379986004198, |
|
"grad_norm": 0.09012839198112488, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.49231152422726154, |
|
"reward_std": 0.1589321019127965, |
|
"rewards/semantic_entropy_math_reward": 0.49231152422726154, |
|
"step": 156 |
|
}, |
|
{ |
|
"completion_length": 354.72173500061035, |
|
"epoch": 0.8789363191042687, |
|
"grad_norm": 0.24965420365333557, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.5529513861984015, |
|
"reward_std": 0.16521730786189437, |
|
"rewards/semantic_entropy_math_reward": 0.5529513861984015, |
|
"step": 157 |
|
}, |
|
{ |
|
"completion_length": 348.0320014953613, |
|
"epoch": 0.8845346396081175, |
|
"grad_norm": 0.06840746104717255, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.5613839365541935, |
|
"reward_std": 0.1481709172949195, |
|
"rewards/semantic_entropy_math_reward": 0.5613839365541935, |
|
"step": 158 |
|
}, |
|
{ |
|
"completion_length": 350.35715103149414, |
|
"epoch": 0.8901329601119664, |
|
"grad_norm": 0.13703550398349762, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.5834573470056057, |
|
"reward_std": 0.164536252617836, |
|
"rewards/semantic_entropy_math_reward": 0.5834573470056057, |
|
"step": 159 |
|
}, |
|
{ |
|
"completion_length": 358.2440528869629, |
|
"epoch": 0.8957312806158153, |
|
"grad_norm": 0.09526026993989944, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.5683283749967813, |
|
"reward_std": 0.14862215495668352, |
|
"rewards/semantic_entropy_math_reward": 0.5683283749967813, |
|
"step": 160 |
|
}, |
|
{ |
|
"completion_length": 374.41518211364746, |
|
"epoch": 0.9013296011196641, |
|
"grad_norm": 0.09629662334918976, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.5731646921485662, |
|
"reward_std": 0.15710427099838853, |
|
"rewards/semantic_entropy_math_reward": 0.5731646921485662, |
|
"step": 161 |
|
}, |
|
{ |
|
"completion_length": 359.19643211364746, |
|
"epoch": 0.906927921623513, |
|
"grad_norm": 0.10354594886302948, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.5668402947485447, |
|
"reward_std": 0.1442330675199628, |
|
"rewards/semantic_entropy_math_reward": 0.5668402947485447, |
|
"step": 162 |
|
}, |
|
{ |
|
"completion_length": 363.5342330932617, |
|
"epoch": 0.9125262421273618, |
|
"grad_norm": 0.09924127161502838, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.6071428768336773, |
|
"reward_std": 0.15955356042832136, |
|
"rewards/semantic_entropy_math_reward": 0.6071428768336773, |
|
"step": 163 |
|
}, |
|
{ |
|
"completion_length": 344.59301376342773, |
|
"epoch": 0.9181245626312107, |
|
"grad_norm": 0.10809105634689331, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.6684027947485447, |
|
"reward_std": 0.14918948477134109, |
|
"rewards/semantic_entropy_math_reward": 0.6684027947485447, |
|
"step": 164 |
|
}, |
|
{ |
|
"completion_length": 361.6019401550293, |
|
"epoch": 0.9237228831350595, |
|
"grad_norm": 0.08925337344408035, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.713541679084301, |
|
"reward_std": 0.14462757343426347, |
|
"rewards/semantic_entropy_math_reward": 0.713541679084301, |
|
"step": 165 |
|
}, |
|
{ |
|
"completion_length": 358.32738876342773, |
|
"epoch": 0.9293212036389084, |
|
"grad_norm": 0.10002636164426804, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.8019593358039856, |
|
"reward_std": 0.101166230160743, |
|
"rewards/semantic_entropy_math_reward": 0.8019593358039856, |
|
"step": 166 |
|
}, |
|
{ |
|
"completion_length": 349.5200939178467, |
|
"epoch": 0.9349195241427571, |
|
"grad_norm": 0.0706295296549797, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.8885168805718422, |
|
"reward_std": 0.0957920125219971, |
|
"rewards/semantic_entropy_math_reward": 0.8885168805718422, |
|
"step": 167 |
|
}, |
|
{ |
|
"completion_length": 357.4538764953613, |
|
"epoch": 0.940517844646606, |
|
"grad_norm": 0.04306298866868019, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.9391121119260788, |
|
"reward_std": 0.04490397102199495, |
|
"rewards/semantic_entropy_math_reward": 0.9391121119260788, |
|
"step": 168 |
|
}, |
|
{ |
|
"completion_length": 344.33408546447754, |
|
"epoch": 0.9461161651504548, |
|
"grad_norm": 0.05653183534741402, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.9515129216015339, |
|
"reward_std": 0.03180735826026648, |
|
"rewards/semantic_entropy_math_reward": 0.9515129216015339, |
|
"step": 169 |
|
}, |
|
{ |
|
"completion_length": 344.59078216552734, |
|
"epoch": 0.9517144856543037, |
|
"grad_norm": 0.033216774463653564, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.9871031977236271, |
|
"reward_std": 0.008495657471939921, |
|
"rewards/semantic_entropy_math_reward": 0.9871031977236271, |
|
"step": 170 |
|
}, |
|
{ |
|
"completion_length": 346.92560386657715, |
|
"epoch": 0.9573128061581525, |
|
"grad_norm": 0.0, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 1.0, |
|
"reward_std": 0.0, |
|
"rewards/semantic_entropy_math_reward": 1.0, |
|
"step": 171 |
|
}, |
|
{ |
|
"completion_length": 357.2983684539795, |
|
"epoch": 0.9629111266620014, |
|
"grad_norm": 0.001862065983004868, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.9986359179019928, |
|
"reward_std": 0.0021478806156665087, |
|
"rewards/semantic_entropy_math_reward": 0.9986359179019928, |
|
"step": 172 |
|
}, |
|
{ |
|
"completion_length": 338.4322986602783, |
|
"epoch": 0.9685094471658502, |
|
"grad_norm": 0.0, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 1.0, |
|
"reward_std": 0.0, |
|
"rewards/semantic_entropy_math_reward": 1.0, |
|
"step": 173 |
|
}, |
|
{ |
|
"completion_length": 340.3586368560791, |
|
"epoch": 0.9741077676696991, |
|
"grad_norm": 0.0, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 1.0, |
|
"reward_std": 0.0, |
|
"rewards/semantic_entropy_math_reward": 1.0, |
|
"step": 174 |
|
}, |
|
{ |
|
"completion_length": 351.07218170166016, |
|
"epoch": 0.979706088173548, |
|
"grad_norm": 0.005613424815237522, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.9986359179019928, |
|
"reward_std": 0.0021478806156665087, |
|
"rewards/semantic_entropy_math_reward": 0.9986359179019928, |
|
"step": 175 |
|
}, |
|
{ |
|
"completion_length": 346.41890716552734, |
|
"epoch": 0.9853044086773968, |
|
"grad_norm": 0.0, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 1.0, |
|
"reward_std": 0.0, |
|
"rewards/semantic_entropy_math_reward": 1.0, |
|
"step": 176 |
|
}, |
|
{ |
|
"completion_length": 352.03422927856445, |
|
"epoch": 0.9909027291812457, |
|
"grad_norm": 0.0, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 1.0, |
|
"reward_std": 0.0, |
|
"rewards/semantic_entropy_math_reward": 1.0, |
|
"step": 177 |
|
}, |
|
{ |
|
"completion_length": 361.8452491760254, |
|
"epoch": 0.9965010496850945, |
|
"grad_norm": 0.008886351250112057, |
|
"learning_rate": 1e-06, |
|
"loss": 0.0, |
|
"reward": 0.9972718358039856, |
|
"reward_std": 0.004295761231333017, |
|
"rewards/semantic_entropy_math_reward": 0.9972718358039856, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 0.9965010496850945, |
|
"step": 178, |
|
"total_flos": 0.0, |
|
"train_loss": 0.0, |
|
"train_runtime": 5.6459, |
|
"train_samples_per_second": 3542.367, |
|
"train_steps_per_second": 31.527 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 178, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 10, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|