|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9973333333333333, |
|
"eval_steps": 500, |
|
"global_step": 187, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"completion_length": 598.9556373596191, |
|
"epoch": 0.02666666666666667, |
|
"grad_norm": 9.657825469970703, |
|
"kl": 0.00011155605316162109, |
|
"learning_rate": 7.894736842105263e-07, |
|
"loss": 0.0, |
|
"reward": 0.6343750096857548, |
|
"reward_std": 0.32928847298026087, |
|
"rewards/accuracy_reward": 0.6337500102818012, |
|
"rewards/format_reward": 0.0006250000093132258, |
|
"step": 5 |
|
}, |
|
{ |
|
"completion_length": 605.9112594604492, |
|
"epoch": 0.05333333333333334, |
|
"grad_norm": 1.3849772214889526, |
|
"kl": 0.00035059452056884766, |
|
"learning_rate": 1.5789473684210526e-06, |
|
"loss": 0.0, |
|
"reward": 0.6206250086426734, |
|
"reward_std": 0.30763257276266814, |
|
"rewards/accuracy_reward": 0.6200000092387199, |
|
"rewards/format_reward": 0.0006250000093132258, |
|
"step": 10 |
|
}, |
|
{ |
|
"completion_length": 640.480638885498, |
|
"epoch": 0.08, |
|
"grad_norm": 0.2819180488586426, |
|
"kl": 0.0017528533935546875, |
|
"learning_rate": 2.368421052631579e-06, |
|
"loss": 0.0001, |
|
"reward": 0.6893750086426735, |
|
"reward_std": 0.28186101242899897, |
|
"rewards/accuracy_reward": 0.6893750086426735, |
|
"rewards/format_reward": 0.0, |
|
"step": 15 |
|
}, |
|
{ |
|
"completion_length": 641.2750106811524, |
|
"epoch": 0.10666666666666667, |
|
"grad_norm": 0.2619169354438782, |
|
"kl": 0.003242778778076172, |
|
"learning_rate": 2.99973774136912e-06, |
|
"loss": 0.0001, |
|
"reward": 0.6918750137090683, |
|
"reward_std": 0.280092066898942, |
|
"rewards/accuracy_reward": 0.6918750137090683, |
|
"rewards/format_reward": 0.0, |
|
"step": 20 |
|
}, |
|
{ |
|
"completion_length": 651.3437652587891, |
|
"epoch": 0.13333333333333333, |
|
"grad_norm": 0.7606603503227234, |
|
"kl": 0.0034598350524902345, |
|
"learning_rate": 2.9905683148398643e-06, |
|
"loss": 0.0001, |
|
"reward": 0.6881250128149986, |
|
"reward_std": 0.2426654167473316, |
|
"rewards/accuracy_reward": 0.6881250128149986, |
|
"rewards/format_reward": 0.0, |
|
"step": 25 |
|
}, |
|
{ |
|
"completion_length": 618.8025131225586, |
|
"epoch": 0.16, |
|
"grad_norm": 18.173175811767578, |
|
"kl": 1.1331443786621094, |
|
"learning_rate": 2.968377518846473e-06, |
|
"loss": 0.0453, |
|
"reward": 0.7312500067055225, |
|
"reward_std": 0.21250597070902585, |
|
"rewards/accuracy_reward": 0.7312500067055225, |
|
"rewards/format_reward": 0.0, |
|
"step": 30 |
|
}, |
|
{ |
|
"completion_length": 594.2168823242188, |
|
"epoch": 0.18666666666666668, |
|
"grad_norm": 50.74555587768555, |
|
"kl": 0.35082550048828126, |
|
"learning_rate": 2.933359208679211e-06, |
|
"loss": 0.014, |
|
"reward": 0.7662500098347664, |
|
"reward_std": 0.19725151248276235, |
|
"rewards/accuracy_reward": 0.7662500098347664, |
|
"rewards/format_reward": 0.0, |
|
"step": 35 |
|
}, |
|
{ |
|
"completion_length": 605.8262619018554, |
|
"epoch": 0.21333333333333335, |
|
"grad_norm": 0.8362184762954712, |
|
"kl": 0.11929092407226563, |
|
"learning_rate": 2.88581929876693e-06, |
|
"loss": 0.0048, |
|
"reward": 0.7368750087916851, |
|
"reward_std": 0.19084826521575451, |
|
"rewards/accuracy_reward": 0.7368750087916851, |
|
"rewards/format_reward": 0.0, |
|
"step": 40 |
|
}, |
|
{ |
|
"completion_length": 587.2862632751464, |
|
"epoch": 0.24, |
|
"grad_norm": 17.15748405456543, |
|
"kl": 0.3153179168701172, |
|
"learning_rate": 2.8261730902569146e-06, |
|
"loss": 0.0126, |
|
"reward": 0.771250007301569, |
|
"reward_std": 0.1733900425955653, |
|
"rewards/accuracy_reward": 0.771250007301569, |
|
"rewards/format_reward": 0.0, |
|
"step": 45 |
|
}, |
|
{ |
|
"completion_length": 587.2462539672852, |
|
"epoch": 0.26666666666666666, |
|
"grad_norm": 1.1150826215744019, |
|
"kl": 0.0309173583984375, |
|
"learning_rate": 2.7549416430096295e-06, |
|
"loss": 0.0012, |
|
"reward": 0.7643750108778476, |
|
"reward_std": 0.20076387487351893, |
|
"rewards/accuracy_reward": 0.7643750108778476, |
|
"rewards/format_reward": 0.0, |
|
"step": 50 |
|
}, |
|
{ |
|
"completion_length": 596.9281379699707, |
|
"epoch": 0.29333333333333333, |
|
"grad_norm": 0.9277231693267822, |
|
"kl": 0.026005935668945313, |
|
"learning_rate": 2.6727472237020448e-06, |
|
"loss": 0.001, |
|
"reward": 0.7562500096857547, |
|
"reward_std": 0.20265843532979488, |
|
"rewards/accuracy_reward": 0.7562500096857547, |
|
"rewards/format_reward": 0.0, |
|
"step": 55 |
|
}, |
|
{ |
|
"completion_length": 557.7656349182129, |
|
"epoch": 0.32, |
|
"grad_norm": 78.5399398803711, |
|
"kl": 0.11486587524414063, |
|
"learning_rate": 2.58030786980419e-06, |
|
"loss": 0.0046, |
|
"reward": 0.7831250101327896, |
|
"reward_std": 0.16795916706323624, |
|
"rewards/accuracy_reward": 0.7831250101327896, |
|
"rewards/format_reward": 0.0, |
|
"step": 60 |
|
}, |
|
{ |
|
"completion_length": 540.9593856811523, |
|
"epoch": 0.3466666666666667, |
|
"grad_norm": 2.0994198322296143, |
|
"kl": 0.02927398681640625, |
|
"learning_rate": 2.4784311169171817e-06, |
|
"loss": 0.0012, |
|
"reward": 0.8068750128149986, |
|
"reward_std": 0.1683789264410734, |
|
"rewards/accuracy_reward": 0.8068750128149986, |
|
"rewards/format_reward": 0.0, |
|
"step": 65 |
|
}, |
|
{ |
|
"completion_length": 608.050633239746, |
|
"epoch": 0.37333333333333335, |
|
"grad_norm": 0.15975548326969147, |
|
"kl": 11.121460723876954, |
|
"learning_rate": 2.368006944269709e-06, |
|
"loss": 0.4441, |
|
"reward": 0.756250013038516, |
|
"reward_std": 0.19869754556566477, |
|
"rewards/accuracy_reward": 0.756250013038516, |
|
"rewards/format_reward": 0.0, |
|
"step": 70 |
|
}, |
|
{ |
|
"completion_length": 556.0112609863281, |
|
"epoch": 0.4, |
|
"grad_norm": 0.257069855928421, |
|
"kl": 0.0086212158203125, |
|
"learning_rate": 2.25e-06, |
|
"loss": 0.0003, |
|
"reward": 0.8018750116229058, |
|
"reward_std": 0.16034170743077994, |
|
"rewards/accuracy_reward": 0.8018750116229058, |
|
"rewards/format_reward": 0.0, |
|
"step": 75 |
|
}, |
|
{ |
|
"completion_length": 582.3343910217285, |
|
"epoch": 0.4266666666666667, |
|
"grad_norm": 0.13077034056186676, |
|
"kl": 0.006656646728515625, |
|
"learning_rate": 2.1254411741419925e-06, |
|
"loss": 0.0003, |
|
"reward": 0.7481250077486038, |
|
"reward_std": 0.1724422551691532, |
|
"rewards/accuracy_reward": 0.7481250077486038, |
|
"rewards/format_reward": 0.0, |
|
"step": 80 |
|
}, |
|
{ |
|
"completion_length": 565.938136291504, |
|
"epoch": 0.4533333333333333, |
|
"grad_norm": 0.8749185800552368, |
|
"kl": 1.2233882904052735, |
|
"learning_rate": 1.9954185929327507e-06, |
|
"loss": 0.0489, |
|
"reward": 0.7856250107288361, |
|
"reward_std": 0.17018861770629884, |
|
"rewards/accuracy_reward": 0.7856250107288361, |
|
"rewards/format_reward": 0.0, |
|
"step": 85 |
|
}, |
|
{ |
|
"completion_length": 597.7000122070312, |
|
"epoch": 0.48, |
|
"grad_norm": 0.44738760590553284, |
|
"kl": 0.14383926391601562, |
|
"learning_rate": 1.8610681131134598e-06, |
|
"loss": 0.0058, |
|
"reward": 0.7462500132620334, |
|
"reward_std": 0.18847194407135248, |
|
"rewards/accuracy_reward": 0.7462500132620334, |
|
"rewards/format_reward": 0.0, |
|
"step": 90 |
|
}, |
|
{ |
|
"completion_length": 594.4781372070313, |
|
"epoch": 0.5066666666666667, |
|
"grad_norm": 0.20932912826538086, |
|
"kl": 0.019523239135742186, |
|
"learning_rate": 1.7235633992642616e-06, |
|
"loss": 0.0008, |
|
"reward": 0.7800000101327896, |
|
"reward_std": 0.18672875873744488, |
|
"rewards/accuracy_reward": 0.7800000101327896, |
|
"rewards/format_reward": 0.0, |
|
"step": 95 |
|
}, |
|
{ |
|
"completion_length": 567.590631866455, |
|
"epoch": 0.5333333333333333, |
|
"grad_norm": 0.26683634519577026, |
|
"kl": 0.00815887451171875, |
|
"learning_rate": 1.5841056708557877e-06, |
|
"loss": 0.0003, |
|
"reward": 0.8012500151991844, |
|
"reward_std": 0.15466174986213446, |
|
"rewards/accuracy_reward": 0.8012500151991844, |
|
"rewards/format_reward": 0.0, |
|
"step": 100 |
|
}, |
|
{ |
|
"completion_length": 602.0687614440918, |
|
"epoch": 0.56, |
|
"grad_norm": 0.24808332324028015, |
|
"kl": 0.046283721923828125, |
|
"learning_rate": 1.4439132085855116e-06, |
|
"loss": 0.0018, |
|
"reward": 0.7306250102818013, |
|
"reward_std": 0.2066970258951187, |
|
"rewards/accuracy_reward": 0.7306250102818013, |
|
"rewards/format_reward": 0.0, |
|
"step": 105 |
|
}, |
|
{ |
|
"completion_length": 561.5981369018555, |
|
"epoch": 0.5866666666666667, |
|
"grad_norm": 0.13664592802524567, |
|
"kl": 0.00771636962890625, |
|
"learning_rate": 1.304210711669923e-06, |
|
"loss": 0.0003, |
|
"reward": 0.7881250090897083, |
|
"reward_std": 0.16074271276593208, |
|
"rewards/accuracy_reward": 0.7881250090897083, |
|
"rewards/format_reward": 0.0, |
|
"step": 110 |
|
}, |
|
{ |
|
"completion_length": 572.8368896484375, |
|
"epoch": 0.6133333333333333, |
|
"grad_norm": 61.19581985473633, |
|
"kl": 0.08488540649414063, |
|
"learning_rate": 1.1662185990655286e-06, |
|
"loss": 0.0034, |
|
"reward": 0.7856250144541264, |
|
"reward_std": 0.15731600020080805, |
|
"rewards/accuracy_reward": 0.7856250144541264, |
|
"rewards/format_reward": 0.0, |
|
"step": 115 |
|
}, |
|
{ |
|
"completion_length": 566.652507019043, |
|
"epoch": 0.64, |
|
"grad_norm": 0.6947761178016663, |
|
"kl": 0.00809326171875, |
|
"learning_rate": 1.0311423480815335e-06, |
|
"loss": 0.0003, |
|
"reward": 0.7475000098347664, |
|
"reward_std": 0.17467193938791753, |
|
"rewards/accuracy_reward": 0.7475000098347664, |
|
"rewards/format_reward": 0.0, |
|
"step": 120 |
|
}, |
|
{ |
|
"completion_length": 578.0793846130371, |
|
"epoch": 0.6666666666666666, |
|
"grad_norm": 0.2171238362789154, |
|
"kl": 0.4027862548828125, |
|
"learning_rate": 9.001619635203888e-07, |
|
"loss": 0.0162, |
|
"reward": 0.7456250101327896, |
|
"reward_std": 0.15293692518025637, |
|
"rewards/accuracy_reward": 0.7456250101327896, |
|
"rewards/format_reward": 0.0, |
|
"step": 125 |
|
}, |
|
{ |
|
"completion_length": 565.3787605285645, |
|
"epoch": 0.6933333333333334, |
|
"grad_norm": 0.3446357548236847, |
|
"kl": 0.038055801391601564, |
|
"learning_rate": 7.744216693421403e-07, |
|
"loss": 0.0015, |
|
"reward": 0.7831250131130219, |
|
"reward_std": 0.1700244778767228, |
|
"rewards/accuracy_reward": 0.7831250131130219, |
|
"rewards/format_reward": 0.0, |
|
"step": 130 |
|
}, |
|
{ |
|
"completion_length": 582.8950141906738, |
|
"epoch": 0.72, |
|
"grad_norm": 0.15175624191761017, |
|
"kl": 0.008423233032226562, |
|
"learning_rate": 6.550199129045669e-07, |
|
"loss": 0.0003, |
|
"reward": 0.7750000104308128, |
|
"reward_std": 0.16627665907144545, |
|
"rewards/accuracy_reward": 0.7750000104308128, |
|
"rewards/format_reward": 0.0, |
|
"step": 135 |
|
}, |
|
{ |
|
"completion_length": 574.6862632751465, |
|
"epoch": 0.7466666666666667, |
|
"grad_norm": 0.24874858558177948, |
|
"kl": 0.018523406982421876, |
|
"learning_rate": 5.429997691004874e-07, |
|
"loss": 0.0007, |
|
"reward": 0.7637500140815974, |
|
"reward_std": 0.17574753649532796, |
|
"rewards/accuracy_reward": 0.7637500140815974, |
|
"rewards/format_reward": 0.0, |
|
"step": 140 |
|
}, |
|
{ |
|
"completion_length": 582.8781341552734, |
|
"epoch": 0.7733333333333333, |
|
"grad_norm": 0.22961439192295074, |
|
"kl": 0.00735931396484375, |
|
"learning_rate": 4.3933982822017883e-07, |
|
"loss": 0.0003, |
|
"reward": 0.7487500086426735, |
|
"reward_std": 0.16670059841126203, |
|
"rewards/accuracy_reward": 0.7487500086426735, |
|
"rewards/format_reward": 0.0, |
|
"step": 145 |
|
}, |
|
{ |
|
"completion_length": 591.0300117492676, |
|
"epoch": 0.8, |
|
"grad_norm": 0.1450783908367157, |
|
"kl": 0.0070438385009765625, |
|
"learning_rate": 3.449456471411058e-07, |
|
"loss": 0.0003, |
|
"reward": 0.7575000070035458, |
|
"reward_std": 0.17982883434742689, |
|
"rewards/accuracy_reward": 0.7575000070035458, |
|
"rewards/format_reward": 0.0, |
|
"step": 150 |
|
}, |
|
{ |
|
"completion_length": 595.1437629699707, |
|
"epoch": 0.8266666666666667, |
|
"grad_norm": 0.1316715031862259, |
|
"kl": 0.012836456298828125, |
|
"learning_rate": 2.6064183852600797e-07, |
|
"loss": 0.0005, |
|
"reward": 0.7687500096857548, |
|
"reward_std": 0.1724598340690136, |
|
"rewards/accuracy_reward": 0.7687500096857548, |
|
"rewards/format_reward": 0.0, |
|
"step": 155 |
|
}, |
|
{ |
|
"completion_length": 572.5243888854981, |
|
"epoch": 0.8533333333333334, |
|
"grad_norm": 0.17138485610485077, |
|
"kl": 0.007579803466796875, |
|
"learning_rate": 1.8716486713686948e-07, |
|
"loss": 0.0003, |
|
"reward": 0.7725000090897083, |
|
"reward_std": 0.18208168745040892, |
|
"rewards/accuracy_reward": 0.7725000090897083, |
|
"rewards/format_reward": 0.0, |
|
"step": 160 |
|
}, |
|
{ |
|
"completion_length": 587.6368865966797, |
|
"epoch": 0.88, |
|
"grad_norm": 0.37863653898239136, |
|
"kl": 0.00740203857421875, |
|
"learning_rate": 1.251566161950357e-07, |
|
"loss": 0.0003, |
|
"reward": 0.7600000105798245, |
|
"reward_std": 0.19197216629981995, |
|
"rewards/accuracy_reward": 0.7600000105798245, |
|
"rewards/format_reward": 0.0, |
|
"step": 165 |
|
}, |
|
{ |
|
"completion_length": 590.2731330871582, |
|
"epoch": 0.9066666666666666, |
|
"grad_norm": 0.17426247894763947, |
|
"kl": 0.008208465576171876, |
|
"learning_rate": 7.515877999073101e-08, |
|
"loss": 0.0003, |
|
"reward": 0.7662500113248825, |
|
"reward_std": 0.19529659319669007, |
|
"rewards/accuracy_reward": 0.7662500113248825, |
|
"rewards/format_reward": 0.0, |
|
"step": 170 |
|
}, |
|
{ |
|
"completion_length": 580.357511138916, |
|
"epoch": 0.9333333333333333, |
|
"grad_norm": 3.413297176361084, |
|
"kl": 0.05416526794433594, |
|
"learning_rate": 3.7608131727264573e-08, |
|
"loss": 0.0022, |
|
"reward": 0.7906250134110451, |
|
"reward_std": 0.17527884822338818, |
|
"rewards/accuracy_reward": 0.7906250134110451, |
|
"rewards/format_reward": 0.0, |
|
"step": 175 |
|
}, |
|
{ |
|
"completion_length": 597.2575073242188, |
|
"epoch": 0.96, |
|
"grad_norm": 0.16546441614627838, |
|
"kl": 0.00909271240234375, |
|
"learning_rate": 1.2832707939284426e-08, |
|
"loss": 0.0004, |
|
"reward": 0.7381250090897084, |
|
"reward_std": 0.1613092703744769, |
|
"rewards/accuracy_reward": 0.7381250090897084, |
|
"rewards/format_reward": 0.0, |
|
"step": 180 |
|
}, |
|
{ |
|
"completion_length": 548.148137664795, |
|
"epoch": 0.9866666666666667, |
|
"grad_norm": 0.48810043931007385, |
|
"kl": 0.007534408569335937, |
|
"learning_rate": 1.0489428174020875e-09, |
|
"loss": 0.0003, |
|
"reward": 0.7881250143051147, |
|
"reward_std": 0.1571642367169261, |
|
"rewards/accuracy_reward": 0.7881250143051147, |
|
"rewards/format_reward": 0.0, |
|
"step": 185 |
|
}, |
|
{ |
|
"completion_length": 565.9296970367432, |
|
"epoch": 0.9973333333333333, |
|
"kl": 0.0072765350341796875, |
|
"reward": 0.8171875178813934, |
|
"reward_std": 0.1507129198871553, |
|
"rewards/accuracy_reward": 0.8171875178813934, |
|
"rewards/format_reward": 0.0, |
|
"step": 187, |
|
"total_flos": 0.0, |
|
"train_loss": 0.01644946206223318, |
|
"train_runtime": 16174.6118, |
|
"train_samples_per_second": 0.464, |
|
"train_steps_per_second": 0.012 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 187, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": false, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|