|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 500, |
|
"global_step": 712, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0014044943820224719, |
|
"grad_norm": 0.9453608393669128, |
|
"learning_rate": 6.9444444444444435e-09, |
|
"logits/chosen": -3.205078125, |
|
"logits/rejected": -3.185546875, |
|
"logps/chosen": -43.59375, |
|
"logps/rejected": -42.640625, |
|
"loss": 0.6914, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.014044943820224719, |
|
"grad_norm": 1.151463508605957, |
|
"learning_rate": 6.944444444444444e-08, |
|
"logits/chosen": -3.24609375, |
|
"logits/rejected": -3.196831703186035, |
|
"logps/chosen": -42.70138931274414, |
|
"logps/rejected": -41.57638931274414, |
|
"loss": 0.6923, |
|
"rewards/accuracies": 0.2465277761220932, |
|
"rewards/chosen": 9.75396906142123e-05, |
|
"rewards/margins": -0.00013128916907589883, |
|
"rewards/rejected": 0.00022856394934933633, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.028089887640449437, |
|
"grad_norm": 0.5042410492897034, |
|
"learning_rate": 1.3888888888888888e-07, |
|
"logits/chosen": -3.255859375, |
|
"logits/rejected": -3.21484375, |
|
"logps/chosen": -42.06718826293945, |
|
"logps/rejected": -40.62968826293945, |
|
"loss": 0.6911, |
|
"rewards/accuracies": 0.30937498807907104, |
|
"rewards/chosen": 0.006805038545280695, |
|
"rewards/margins": 0.0023165703751146793, |
|
"rewards/rejected": 0.004488563630729914, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.042134831460674156, |
|
"grad_norm": 0.6343653202056885, |
|
"learning_rate": 2.0833333333333333e-07, |
|
"logits/chosen": -3.262500047683716, |
|
"logits/rejected": -3.232617139816284, |
|
"logps/chosen": -41.40625, |
|
"logps/rejected": -40.610939025878906, |
|
"loss": 0.6869, |
|
"rewards/accuracies": 0.4468750059604645, |
|
"rewards/chosen": 0.027604103088378906, |
|
"rewards/margins": 0.012437248602509499, |
|
"rewards/rejected": 0.015170956030488014, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.056179775280898875, |
|
"grad_norm": 0.7174843549728394, |
|
"learning_rate": 2.7777777777777776e-07, |
|
"logits/chosen": -3.279296875, |
|
"logits/rejected": -3.2447266578674316, |
|
"logps/chosen": -40.29218673706055, |
|
"logps/rejected": -39.842185974121094, |
|
"loss": 0.68, |
|
"rewards/accuracies": 0.5078125, |
|
"rewards/chosen": 0.03814506530761719, |
|
"rewards/margins": 0.027390670031309128, |
|
"rewards/rejected": 0.010743332095444202, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.0702247191011236, |
|
"grad_norm": 0.5838291645050049, |
|
"learning_rate": 3.472222222222222e-07, |
|
"logits/chosen": -3.2763671875, |
|
"logits/rejected": -3.230664014816284, |
|
"logps/chosen": -40.84687423706055, |
|
"logps/rejected": -40.31562423706055, |
|
"loss": 0.6619, |
|
"rewards/accuracies": 0.609375, |
|
"rewards/chosen": 0.041875459253787994, |
|
"rewards/margins": 0.06587791442871094, |
|
"rewards/rejected": -0.023966407403349876, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.08426966292134831, |
|
"grad_norm": 0.8917225003242493, |
|
"learning_rate": 4.1666666666666667e-07, |
|
"logits/chosen": -3.2505860328674316, |
|
"logits/rejected": -3.1898436546325684, |
|
"logps/chosen": -41.443748474121094, |
|
"logps/rejected": -43.939064025878906, |
|
"loss": 0.6155, |
|
"rewards/accuracies": 0.6890624761581421, |
|
"rewards/chosen": 0.011277198791503906, |
|
"rewards/margins": 0.17209243774414062, |
|
"rewards/rejected": -0.1607826203107834, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.09831460674157304, |
|
"grad_norm": 1.416306734085083, |
|
"learning_rate": 4.861111111111111e-07, |
|
"logits/chosen": -3.2109375, |
|
"logits/rejected": -3.1263670921325684, |
|
"logps/chosen": -46.9375, |
|
"logps/rejected": -53.76874923706055, |
|
"loss": 0.5336, |
|
"rewards/accuracies": 0.7015625238418579, |
|
"rewards/chosen": -0.21380920708179474, |
|
"rewards/margins": 0.407858282327652, |
|
"rewards/rejected": -0.6218963861465454, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.11235955056179775, |
|
"grad_norm": 0.6724388599395752, |
|
"learning_rate": 4.998072590601808e-07, |
|
"logits/chosen": -3.2593750953674316, |
|
"logits/rejected": -3.081835985183716, |
|
"logps/chosen": -44.157814025878906, |
|
"logps/rejected": -63.279685974121094, |
|
"loss": 0.405, |
|
"rewards/accuracies": 0.7203124761581421, |
|
"rewards/chosen": -0.117925263941288, |
|
"rewards/margins": 1.0123169422149658, |
|
"rewards/rejected": -1.1299316883087158, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.12640449438202248, |
|
"grad_norm": 0.8954005837440491, |
|
"learning_rate": 4.990247583129217e-07, |
|
"logits/chosen": -3.230273485183716, |
|
"logits/rejected": -2.9839844703674316, |
|
"logps/chosen": -45.54218673706055, |
|
"logps/rejected": -78.46562194824219, |
|
"loss": 0.3109, |
|
"rewards/accuracies": 0.7515624761581421, |
|
"rewards/chosen": -0.15528163313865662, |
|
"rewards/margins": 1.7075684070587158, |
|
"rewards/rejected": -1.863037109375, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.1404494382022472, |
|
"grad_norm": 0.5155877470970154, |
|
"learning_rate": 4.976423351108942e-07, |
|
"logits/chosen": -3.2529296875, |
|
"logits/rejected": NaN, |
|
"logps/chosen": -48.826560974121094, |
|
"logps/rejected": -96.4312515258789, |
|
"loss": 0.2579, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -0.3163391053676605, |
|
"rewards/margins": 2.467578172683716, |
|
"rewards/rejected": -2.783496141433716, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.1544943820224719, |
|
"grad_norm": 0.9068161249160767, |
|
"learning_rate": 4.95663319832678e-07, |
|
"logits/chosen": -3.236328125, |
|
"logits/rejected": -2.843554735183716, |
|
"logps/chosen": -45.657814025878906, |
|
"logps/rejected": -107.41874694824219, |
|
"loss": 0.2278, |
|
"rewards/accuracies": 0.753125011920929, |
|
"rewards/chosen": -0.1908990889787674, |
|
"rewards/margins": 3.155956983566284, |
|
"rewards/rejected": -3.34814453125, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.16853932584269662, |
|
"grad_norm": 0.5081749558448792, |
|
"learning_rate": 4.930924800994191e-07, |
|
"logits/chosen": -3.252734422683716, |
|
"logits/rejected": -2.847460985183716, |
|
"logps/chosen": -46.87812423706055, |
|
"logps/rejected": -118.3499984741211, |
|
"loss": 0.2113, |
|
"rewards/accuracies": 0.776562511920929, |
|
"rewards/chosen": -0.22010573744773865, |
|
"rewards/margins": 3.623046875, |
|
"rewards/rejected": -3.8433594703674316, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.18258426966292135, |
|
"grad_norm": 0.6637352108955383, |
|
"learning_rate": 4.899360092892144e-07, |
|
"logits/chosen": -3.2525391578674316, |
|
"logits/rejected": -2.763671875, |
|
"logps/chosen": -47.01093673706055, |
|
"logps/rejected": -124.7437515258789, |
|
"loss": 0.2106, |
|
"rewards/accuracies": 0.7671874761581421, |
|
"rewards/chosen": -0.23588410019874573, |
|
"rewards/margins": 3.9537110328674316, |
|
"rewards/rejected": -4.188086032867432, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.19662921348314608, |
|
"grad_norm": 0.3586008548736572, |
|
"learning_rate": 4.862015116167195e-07, |
|
"logits/chosen": -3.271484375, |
|
"logits/rejected": -2.750781297683716, |
|
"logps/chosen": -42.41093826293945, |
|
"logps/rejected": -132.1125030517578, |
|
"loss": 0.182, |
|
"rewards/accuracies": 0.785937488079071, |
|
"rewards/chosen": 0.008197021670639515, |
|
"rewards/margins": 4.537890434265137, |
|
"rewards/rejected": -4.529687404632568, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.21067415730337077, |
|
"grad_norm": 0.7338384985923767, |
|
"learning_rate": 4.81897983813931e-07, |
|
"logits/chosen": -3.2681641578674316, |
|
"logits/rejected": NaN, |
|
"logps/chosen": -39.69062423706055, |
|
"logps/rejected": -129.3562469482422, |
|
"loss": 0.1964, |
|
"rewards/accuracies": 0.7593749761581421, |
|
"rewards/chosen": 0.10731048882007599, |
|
"rewards/margins": 4.583203315734863, |
|
"rewards/rejected": -4.477246284484863, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.2247191011235955, |
|
"grad_norm": 0.7118776440620422, |
|
"learning_rate": 4.770357934562704e-07, |
|
"logits/chosen": -3.279296875, |
|
"logits/rejected": -2.6996092796325684, |
|
"logps/chosen": -44.157814025878906, |
|
"logps/rejected": -138.63125610351562, |
|
"loss": 0.1941, |
|
"rewards/accuracies": 0.7828124761581421, |
|
"rewards/chosen": -0.10118408501148224, |
|
"rewards/margins": 4.762890815734863, |
|
"rewards/rejected": -4.8642578125, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.23876404494382023, |
|
"grad_norm": 0.20937258005142212, |
|
"learning_rate": 4.716266539861866e-07, |
|
"logits/chosen": -3.1851563453674316, |
|
"logits/rejected": -2.635546922683716, |
|
"logps/chosen": -51.939064025878906, |
|
"logps/rejected": -140.15625, |
|
"loss": 0.1944, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -0.5132009387016296, |
|
"rewards/margins": 4.476171970367432, |
|
"rewards/rejected": -4.986914157867432, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.25280898876404495, |
|
"grad_norm": 1.1107814311981201, |
|
"learning_rate": 4.6568359649444796e-07, |
|
"logits/chosen": -3.2699217796325684, |
|
"logits/rejected": -2.663281202316284, |
|
"logps/chosen": -37.44843673706055, |
|
"logps/rejected": -139.5437469482422, |
|
"loss": 0.1877, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": 0.19300994277000427, |
|
"rewards/margins": 5.172265529632568, |
|
"rewards/rejected": -4.980273246765137, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.26685393258426965, |
|
"grad_norm": 0.1760079711675644, |
|
"learning_rate": 4.592209383271023e-07, |
|
"logits/chosen": -3.15234375, |
|
"logits/rejected": NaN, |
|
"logps/chosen": -50.7109375, |
|
"logps/rejected": -142.33749389648438, |
|
"loss": 0.2011, |
|
"rewards/accuracies": 0.753125011920929, |
|
"rewards/chosen": -0.4693801999092102, |
|
"rewards/margins": 4.672265529632568, |
|
"rewards/rejected": -5.140820503234863, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.2808988764044944, |
|
"grad_norm": 0.3540495038032532, |
|
"learning_rate": 4.5225424859373684e-07, |
|
"logits/chosen": -3.2144532203674316, |
|
"logits/rejected": -2.646484375, |
|
"logps/chosen": -44.23749923706055, |
|
"logps/rejected": -146.5, |
|
"loss": 0.1823, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -0.061879731714725494, |
|
"rewards/margins": 5.176171779632568, |
|
"rewards/rejected": -5.240038871765137, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.2949438202247191, |
|
"grad_norm": 0.4451713263988495, |
|
"learning_rate": 4.448003106601291e-07, |
|
"logits/chosen": -3.2412109375, |
|
"logits/rejected": -2.6431641578674316, |
|
"logps/chosen": -39.69843673706055, |
|
"logps/rejected": -141.1062469482422, |
|
"loss": 0.2101, |
|
"rewards/accuracies": 0.7671874761581421, |
|
"rewards/chosen": 0.11627502739429474, |
|
"rewards/margins": 5.123827934265137, |
|
"rewards/rejected": -5.008593559265137, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.3089887640449438, |
|
"grad_norm": 1.5930163860321045, |
|
"learning_rate": 4.3687708171564917e-07, |
|
"logits/chosen": -3.2291016578674316, |
|
"logits/rejected": -2.604296922683716, |
|
"logps/chosen": -42.740623474121094, |
|
"logps/rejected": -145.375, |
|
"loss": 0.2042, |
|
"rewards/accuracies": 0.7578125, |
|
"rewards/chosen": -0.070429228246212, |
|
"rewards/margins": 5.188672065734863, |
|
"rewards/rejected": -5.260156154632568, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.32303370786516855, |
|
"grad_norm": 0.27790066599845886, |
|
"learning_rate": 4.2850364951281705e-07, |
|
"logits/chosen": -3.2845702171325684, |
|
"logits/rejected": -2.6654295921325684, |
|
"logps/chosen": -39.85625076293945, |
|
"logps/rejected": -149.9250030517578, |
|
"loss": 0.1742, |
|
"rewards/accuracies": 0.778124988079071, |
|
"rewards/chosen": 0.12183837592601776, |
|
"rewards/margins": 5.572656154632568, |
|
"rewards/rejected": -5.451562404632568, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.33707865168539325, |
|
"grad_norm": 0.32155928015708923, |
|
"learning_rate": 4.1970018638323547e-07, |
|
"logits/chosen": -3.26953125, |
|
"logits/rejected": -2.6566405296325684, |
|
"logps/chosen": -41.8671875, |
|
"logps/rejected": -152.64999389648438, |
|
"loss": 0.174, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": 0.051790811121463776, |
|
"rewards/margins": 5.608593940734863, |
|
"rewards/rejected": -5.559179782867432, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.351123595505618, |
|
"grad_norm": 0.2504253685474396, |
|
"learning_rate": 4.1048790064067573e-07, |
|
"logits/chosen": -3.254687547683716, |
|
"logits/rejected": -2.630078077316284, |
|
"logps/chosen": -40.02812576293945, |
|
"logps/rejected": -154.9250030517578, |
|
"loss": 0.1722, |
|
"rewards/accuracies": 0.785937488079071, |
|
"rewards/chosen": 0.126708984375, |
|
"rewards/margins": 5.812109470367432, |
|
"rewards/rejected": -5.681640625, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.3651685393258427, |
|
"grad_norm": 1.5456242561340332, |
|
"learning_rate": 4.0088898548839285e-07, |
|
"logits/chosen": -3.2398438453674316, |
|
"logits/rejected": -2.6322264671325684, |
|
"logps/chosen": -42.142189025878906, |
|
"logps/rejected": -149.5749969482422, |
|
"loss": 0.1996, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -0.08585663139820099, |
|
"rewards/margins": 5.431640625, |
|
"rewards/rejected": -5.516211032867432, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.3792134831460674, |
|
"grad_norm": 0.1563502699136734, |
|
"learning_rate": 3.9092656555375414e-07, |
|
"logits/chosen": -3.268359422683716, |
|
"logits/rejected": -2.6371092796325684, |
|
"logps/chosen": -41.34375, |
|
"logps/rejected": -160.625, |
|
"loss": 0.159, |
|
"rewards/accuracies": 0.796875, |
|
"rewards/chosen": 0.07463989406824112, |
|
"rewards/margins": 6.014843940734863, |
|
"rewards/rejected": -5.940234184265137, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.39325842696629215, |
|
"grad_norm": 0.414302796125412, |
|
"learning_rate": 3.806246411789872e-07, |
|
"logits/chosen": -3.282421827316284, |
|
"logits/rejected": NaN, |
|
"logps/chosen": -39.970314025878906, |
|
"logps/rejected": -151.84375, |
|
"loss": 0.1905, |
|
"rewards/accuracies": 0.7640625238418579, |
|
"rewards/chosen": 0.1959686279296875, |
|
"rewards/margins": 5.774609565734863, |
|
"rewards/rejected": -5.579492092132568, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.40730337078651685, |
|
"grad_norm": 0.11994462460279465, |
|
"learning_rate": 3.700080306022528e-07, |
|
"logits/chosen": -3.199023485183716, |
|
"logits/rejected": -2.582226514816284, |
|
"logps/chosen": -51.41093826293945, |
|
"logps/rejected": -164.3125, |
|
"loss": 0.1673, |
|
"rewards/accuracies": 0.7890625, |
|
"rewards/chosen": -0.46795958280563354, |
|
"rewards/margins": 5.684179782867432, |
|
"rewards/rejected": -6.149609565734863, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.42134831460674155, |
|
"grad_norm": 0.3142966330051422, |
|
"learning_rate": 3.5910231016833546e-07, |
|
"logits/chosen": -3.1810545921325684, |
|
"logits/rejected": -2.5703125, |
|
"logps/chosen": -50.90625, |
|
"logps/rejected": -161.7375030517578, |
|
"loss": 0.1775, |
|
"rewards/accuracies": 0.7796875238418579, |
|
"rewards/chosen": -0.4352920651435852, |
|
"rewards/margins": 5.612890720367432, |
|
"rewards/rejected": -6.050000190734863, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.4353932584269663, |
|
"grad_norm": 0.5336220264434814, |
|
"learning_rate": 3.4793375271298895e-07, |
|
"logits/chosen": -3.252148389816284, |
|
"logits/rejected": NaN, |
|
"logps/chosen": -41.959373474121094, |
|
"logps/rejected": -157.24374389648438, |
|
"loss": 0.1855, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": 0.02679443359375, |
|
"rewards/margins": 5.8720703125, |
|
"rewards/rejected": -5.84375, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.449438202247191, |
|
"grad_norm": 0.11426942050457001, |
|
"learning_rate": 3.3652926426937325e-07, |
|
"logits/chosen": -3.280078172683716, |
|
"logits/rejected": -2.6470704078674316, |
|
"logps/chosen": -40.0546875, |
|
"logps/rejected": -156.4812469482422, |
|
"loss": 0.1744, |
|
"rewards/accuracies": 0.770312488079071, |
|
"rewards/chosen": 0.06229095533490181, |
|
"rewards/margins": 5.884179592132568, |
|
"rewards/rejected": -5.8212890625, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.46348314606741575, |
|
"grad_norm": 0.12852512300014496, |
|
"learning_rate": 3.249163192490642e-07, |
|
"logits/chosen": -3.245898485183716, |
|
"logits/rejected": -2.5863280296325684, |
|
"logps/chosen": -43.52031326293945, |
|
"logps/rejected": -163.125, |
|
"loss": 0.1654, |
|
"rewards/accuracies": 0.785937488079071, |
|
"rewards/chosen": -0.09057464450597763, |
|
"rewards/margins": 6.042578220367432, |
|
"rewards/rejected": -6.135937690734863, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.47752808988764045, |
|
"grad_norm": 1.0368801355361938, |
|
"learning_rate": 3.1312289425378944e-07, |
|
"logits/chosen": -3.237109422683716, |
|
"logits/rejected": -2.5757813453674316, |
|
"logps/chosen": -45.65156173706055, |
|
"logps/rejected": -166.43124389648438, |
|
"loss": 0.1851, |
|
"rewards/accuracies": 0.7718750238418579, |
|
"rewards/chosen": -0.196772962808609, |
|
"rewards/margins": 6.102734565734863, |
|
"rewards/rejected": -6.302343845367432, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.49157303370786515, |
|
"grad_norm": 0.2768089473247528, |
|
"learning_rate": 3.011774006773449e-07, |
|
"logits/chosen": -3.1851563453674316, |
|
"logits/rejected": -2.553515672683716, |
|
"logps/chosen": -50.12968826293945, |
|
"logps/rejected": -172.3125, |
|
"loss": 0.1616, |
|
"rewards/accuracies": 0.801562488079071, |
|
"rewards/chosen": -0.33214110136032104, |
|
"rewards/margins": 6.178515434265137, |
|
"rewards/rejected": -6.510156154632568, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.5056179775280899, |
|
"grad_norm": 0.5607307553291321, |
|
"learning_rate": 2.8910861626005773e-07, |
|
"logits/chosen": -3.2269530296325684, |
|
"logits/rejected": -2.5611329078674316, |
|
"logps/chosen": -45.09375, |
|
"logps/rejected": -164.16250610351562, |
|
"loss": 0.1745, |
|
"rewards/accuracies": 0.7718750238418579, |
|
"rewards/chosen": -0.21467895805835724, |
|
"rewards/margins": 6.003515720367432, |
|
"rewards/rejected": -6.216406345367432, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.5196629213483146, |
|
"grad_norm": 0.45973560214042664, |
|
"learning_rate": 2.7694561576068983e-07, |
|
"logits/chosen": -3.203906297683716, |
|
"logits/rejected": -2.5244140625, |
|
"logps/chosen": -46.931251525878906, |
|
"logps/rejected": -171.2375030517578, |
|
"loss": 0.1627, |
|
"rewards/accuracies": 0.796875, |
|
"rewards/chosen": -0.20502586662769318, |
|
"rewards/margins": 6.284570217132568, |
|
"rewards/rejected": -6.489062309265137, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.5337078651685393, |
|
"grad_norm": 0.33313286304473877, |
|
"learning_rate": 2.647177009127972e-07, |
|
"logits/chosen": -3.1996092796325684, |
|
"logits/rejected": -2.5083985328674316, |
|
"logps/chosen": -46.064064025878906, |
|
"logps/rejected": -171.14999389648438, |
|
"loss": 0.1645, |
|
"rewards/accuracies": 0.7984374761581421, |
|
"rewards/chosen": -0.1926528960466385, |
|
"rewards/margins": 6.312890529632568, |
|
"rewards/rejected": -6.504101753234863, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.547752808988764, |
|
"grad_norm": 0.3406722843647003, |
|
"learning_rate": 2.524543298342874e-07, |
|
"logits/chosen": -3.216992139816284, |
|
"logits/rejected": -2.5326170921325684, |
|
"logps/chosen": -43.662498474121094, |
|
"logps/rejected": -168.7937469482422, |
|
"loss": 0.1589, |
|
"rewards/accuracies": 0.792187511920929, |
|
"rewards/chosen": -0.07064209133386612, |
|
"rewards/margins": 6.324023246765137, |
|
"rewards/rejected": -6.3935546875, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.5617977528089888, |
|
"grad_norm": 0.2776348292827606, |
|
"learning_rate": 2.401850460602329e-07, |
|
"logits/chosen": -3.2457032203674316, |
|
"logits/rejected": -2.5503907203674316, |
|
"logps/chosen": -41.400001525878906, |
|
"logps/rejected": -166.88125610351562, |
|
"loss": 0.1747, |
|
"rewards/accuracies": 0.7828124761581421, |
|
"rewards/chosen": 0.02814788743853569, |
|
"rewards/margins": 6.317187309265137, |
|
"rewards/rejected": -6.288281440734863, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.5758426966292135, |
|
"grad_norm": 1.0503939390182495, |
|
"learning_rate": 2.2793940736990766e-07, |
|
"logits/chosen": -3.241992235183716, |
|
"logits/rejected": -2.546093702316284, |
|
"logps/chosen": -40.43437576293945, |
|
"logps/rejected": -163.3874969482422, |
|
"loss": 0.1881, |
|
"rewards/accuracies": 0.7718750238418579, |
|
"rewards/chosen": 0.005574035458266735, |
|
"rewards/margins": 6.197851657867432, |
|
"rewards/rejected": -6.190625190734863, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.5898876404494382, |
|
"grad_norm": 1.4323946237564087, |
|
"learning_rate": 2.1574691457950803e-07, |
|
"logits/chosen": -3.208203077316284, |
|
"logits/rejected": NaN, |
|
"logps/chosen": -43.95781326293945, |
|
"logps/rejected": -171.10000610351562, |
|
"loss": 0.1664, |
|
"rewards/accuracies": 0.7890625, |
|
"rewards/chosen": -0.07436218112707138, |
|
"rewards/margins": 6.463086128234863, |
|
"rewards/rejected": -6.539453029632568, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.6039325842696629, |
|
"grad_norm": 0.678728461265564, |
|
"learning_rate": 2.036369404721023e-07, |
|
"logits/chosen": -3.1888670921325684, |
|
"logits/rejected": NaN, |
|
"logps/chosen": -47.20624923706055, |
|
"logps/rejected": -169.1062469482422, |
|
"loss": 0.166, |
|
"rewards/accuracies": 0.778124988079071, |
|
"rewards/chosen": -0.290365606546402, |
|
"rewards/margins": 6.1806640625, |
|
"rewards/rejected": -6.469531059265137, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.6179775280898876, |
|
"grad_norm": 0.20119936764240265, |
|
"learning_rate": 1.9163865903602372e-07, |
|
"logits/chosen": -3.2255859375, |
|
"logits/rejected": -2.520703077316284, |
|
"logps/chosen": -45.240623474121094, |
|
"logps/rejected": -167.2624969482422, |
|
"loss": 0.1816, |
|
"rewards/accuracies": 0.7671874761581421, |
|
"rewards/chosen": -0.21324768662452698, |
|
"rewards/margins": 6.177538871765137, |
|
"rewards/rejected": -6.390625, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.6320224719101124, |
|
"grad_norm": 0.7942313551902771, |
|
"learning_rate": 1.7978097518217702e-07, |
|
"logits/chosen": -3.1763672828674316, |
|
"logits/rejected": -2.5126953125, |
|
"logps/chosen": -49.80937576293945, |
|
"logps/rejected": -172.6374969482422, |
|
"loss": 0.1685, |
|
"rewards/accuracies": 0.796875, |
|
"rewards/chosen": -0.3480590879917145, |
|
"rewards/margins": 6.218359470367432, |
|
"rewards/rejected": -6.568554878234863, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.6460674157303371, |
|
"grad_norm": 0.9307264685630798, |
|
"learning_rate": 1.6809245510957666e-07, |
|
"logits/chosen": -3.2232422828674316, |
|
"logits/rejected": -2.4925780296325684, |
|
"logps/chosen": -44.875, |
|
"logps/rejected": -171.28750610351562, |
|
"loss": 0.1721, |
|
"rewards/accuracies": 0.7796875238418579, |
|
"rewards/chosen": -0.14088821411132812, |
|
"rewards/margins": 6.381640434265137, |
|
"rewards/rejected": -6.520312309265137, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.6601123595505618, |
|
"grad_norm": 1.6537760496139526, |
|
"learning_rate": 1.5660125748687093e-07, |
|
"logits/chosen": -3.2333984375, |
|
"logits/rejected": -2.518749952316284, |
|
"logps/chosen": -43.334373474121094, |
|
"logps/rejected": -167.8125, |
|
"loss": 0.1794, |
|
"rewards/accuracies": 0.770312488079071, |
|
"rewards/chosen": -0.06363830715417862, |
|
"rewards/margins": 6.300976753234863, |
|
"rewards/rejected": -6.363671779632568, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.6741573033707865, |
|
"grad_norm": 0.6378122568130493, |
|
"learning_rate": 1.4533506561564305e-07, |
|
"logits/chosen": -3.2416014671325684, |
|
"logits/rejected": -2.546093702316284, |
|
"logps/chosen": -39.32500076293945, |
|
"logps/rejected": -166.10000610351562, |
|
"loss": 0.1712, |
|
"rewards/accuracies": 0.7890625, |
|
"rewards/chosen": 0.16349944472312927, |
|
"rewards/margins": 6.383008003234863, |
|
"rewards/rejected": -6.220312595367432, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.6882022471910112, |
|
"grad_norm": 0.21734459698200226, |
|
"learning_rate": 1.343210207389125e-07, |
|
"logits/chosen": -3.216015577316284, |
|
"logits/rejected": NaN, |
|
"logps/chosen": -39.485939025878906, |
|
"logps/rejected": -162.99374389648438, |
|
"loss": 0.1864, |
|
"rewards/accuracies": 0.7640625238418579, |
|
"rewards/chosen": 0.08720092475414276, |
|
"rewards/margins": 6.254492282867432, |
|
"rewards/rejected": -6.165625095367432, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.702247191011236, |
|
"grad_norm": 0.3136639893054962, |
|
"learning_rate": 1.2358565665550387e-07, |
|
"logits/chosen": -3.216015577316284, |
|
"logits/rejected": NaN, |
|
"logps/chosen": -42.5078125, |
|
"logps/rejected": -166.36874389648438, |
|
"loss": 0.1841, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": 0.017351532354950905, |
|
"rewards/margins": 6.283398628234863, |
|
"rewards/rejected": -6.266992092132568, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.7162921348314607, |
|
"grad_norm": 0.07860163599252701, |
|
"learning_rate": 1.1315483579780094e-07, |
|
"logits/chosen": -3.210742235183716, |
|
"logits/rejected": -2.499218702316284, |
|
"logps/chosen": -41.615623474121094, |
|
"logps/rejected": -166.30624389648438, |
|
"loss": 0.1831, |
|
"rewards/accuracies": 0.7828124761581421, |
|
"rewards/chosen": -0.01008453406393528, |
|
"rewards/margins": 6.287304878234863, |
|
"rewards/rejected": -6.297070503234863, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.7303370786516854, |
|
"grad_norm": 0.173310786485672, |
|
"learning_rate": 1.0305368692688174e-07, |
|
"logits/chosen": -3.195507764816284, |
|
"logits/rejected": -2.504101514816284, |
|
"logps/chosen": -45.17656326293945, |
|
"logps/rejected": -168.6062469482422, |
|
"loss": 0.1724, |
|
"rewards/accuracies": 0.7828124761581421, |
|
"rewards/chosen": -0.14428405463695526, |
|
"rewards/margins": 6.204297065734863, |
|
"rewards/rejected": -6.347070217132568, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.7443820224719101, |
|
"grad_norm": 0.3090410828590393, |
|
"learning_rate": 9.330654459513266e-08, |
|
"logits/chosen": -3.169726610183716, |
|
"logits/rejected": -2.4736328125, |
|
"logps/chosen": -46.984375, |
|
"logps/rejected": -162.36874389648438, |
|
"loss": 0.2013, |
|
"rewards/accuracies": 0.7515624761581421, |
|
"rewards/chosen": -0.3089355528354645, |
|
"rewards/margins": 5.836718559265137, |
|
"rewards/rejected": -6.145312309265137, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.7584269662921348, |
|
"grad_norm": 0.249381884932518, |
|
"learning_rate": 8.393689052217964e-08, |
|
"logits/chosen": -3.1357421875, |
|
"logits/rejected": -2.484179735183716, |
|
"logps/chosen": -49.49687576293945, |
|
"logps/rejected": -167.35000610351562, |
|
"loss": 0.1759, |
|
"rewards/accuracies": 0.770312488079071, |
|
"rewards/chosen": -0.393869012594223, |
|
"rewards/margins": 5.953320503234863, |
|
"rewards/rejected": -6.347460746765137, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.7724719101123596, |
|
"grad_norm": 0.2429146021604538, |
|
"learning_rate": 7.49672970253691e-08, |
|
"logits/chosen": -3.1318359375, |
|
"logits/rejected": -2.4886717796325684, |
|
"logps/chosen": -52.857810974121094, |
|
"logps/rejected": -171.19375610351562, |
|
"loss": 0.1722, |
|
"rewards/accuracies": 0.7953125238418579, |
|
"rewards/chosen": -0.4801391661167145, |
|
"rewards/margins": 5.990429878234863, |
|
"rewards/rejected": -6.468359470367432, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.7865168539325843, |
|
"grad_norm": 0.5875958204269409, |
|
"learning_rate": 6.641937264107867e-08, |
|
"logits/chosen": -3.171093702316284, |
|
"logits/rejected": -2.4808592796325684, |
|
"logps/chosen": -47.681251525878906, |
|
"logps/rejected": -174.71875, |
|
"loss": 0.1605, |
|
"rewards/accuracies": 0.7984374761581421, |
|
"rewards/chosen": -0.26392096281051636, |
|
"rewards/margins": 6.382421970367432, |
|
"rewards/rejected": -6.64453125, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.800561797752809, |
|
"grad_norm": 0.37553030252456665, |
|
"learning_rate": 5.831371006785962e-08, |
|
"logits/chosen": -3.1826171875, |
|
"logits/rejected": -2.4847655296325684, |
|
"logps/chosen": -48.498435974121094, |
|
"logps/rejected": -169.1125030517578, |
|
"loss": 0.1773, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -0.3075141906738281, |
|
"rewards/margins": 6.108984470367432, |
|
"rewards/rejected": -6.417578220367432, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.8146067415730337, |
|
"grad_norm": 0.31017985939979553, |
|
"learning_rate": 5.066983655682325e-08, |
|
"logits/chosen": -3.141796827316284, |
|
"logits/rejected": -2.505859375, |
|
"logps/chosen": -50.5703125, |
|
"logps/rejected": -166.83749389648438, |
|
"loss": 0.1899, |
|
"rewards/accuracies": 0.7671874761581421, |
|
"rewards/chosen": -0.4396209716796875, |
|
"rewards/margins": 5.888671875, |
|
"rewards/rejected": -6.325585842132568, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.8286516853932584, |
|
"grad_norm": 0.8791071772575378, |
|
"learning_rate": 4.3506166868781755e-08, |
|
"logits/chosen": -3.1845703125, |
|
"logits/rejected": -2.4937500953674316, |
|
"logps/chosen": -48.103126525878906, |
|
"logps/rejected": -170.8625030517578, |
|
"loss": 0.1698, |
|
"rewards/accuracies": 0.785937488079071, |
|
"rewards/chosen": -0.27438658475875854, |
|
"rewards/margins": 6.2060546875, |
|
"rewards/rejected": -6.482421875, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.8426966292134831, |
|
"grad_norm": 0.6104283928871155, |
|
"learning_rate": 3.683995891147695e-08, |
|
"logits/chosen": -3.176953077316284, |
|
"logits/rejected": NaN, |
|
"logps/chosen": -45.896873474121094, |
|
"logps/rejected": -168.60000610351562, |
|
"loss": 0.1721, |
|
"rewards/accuracies": 0.784375011920929, |
|
"rewards/chosen": -0.21481475234031677, |
|
"rewards/margins": 6.201952934265137, |
|
"rewards/rejected": -6.416015625, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.8567415730337079, |
|
"grad_norm": 0.31073251366615295, |
|
"learning_rate": 3.0687272163768986e-08, |
|
"logits/chosen": -3.1650390625, |
|
"logits/rejected": -2.513671875, |
|
"logps/chosen": -46.66093826293945, |
|
"logps/rejected": -170.8625030517578, |
|
"loss": 0.1649, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -0.24439087510108948, |
|
"rewards/margins": 6.258593559265137, |
|
"rewards/rejected": -6.502148628234863, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.8707865168539326, |
|
"grad_norm": 0.2421959638595581, |
|
"learning_rate": 2.5062928986944676e-08, |
|
"logits/chosen": -3.170117139816284, |
|
"logits/rejected": -2.5238280296325684, |
|
"logps/chosen": -46.318748474121094, |
|
"logps/rejected": -171.50625610351562, |
|
"loss": 0.1595, |
|
"rewards/accuracies": 0.7953125238418579, |
|
"rewards/chosen": -0.15370789170265198, |
|
"rewards/margins": 6.348242282867432, |
|
"rewards/rejected": -6.501953125, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.8848314606741573, |
|
"grad_norm": 0.292059987783432, |
|
"learning_rate": 1.9980478916351296e-08, |
|
"logits/chosen": -3.185742139816284, |
|
"logits/rejected": -2.491992235183716, |
|
"logps/chosen": -46.803123474121094, |
|
"logps/rejected": -171.0437469482422, |
|
"loss": 0.1893, |
|
"rewards/accuracies": 0.778124988079071, |
|
"rewards/chosen": -0.1725509613752365, |
|
"rewards/margins": 6.320703029632568, |
|
"rewards/rejected": -6.491796970367432, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.898876404494382, |
|
"grad_norm": 0.6732786297798157, |
|
"learning_rate": 1.5452166019378987e-08, |
|
"logits/chosen": -3.2027344703674316, |
|
"logits/rejected": -2.5123047828674316, |
|
"logps/chosen": -46.25, |
|
"logps/rejected": -172.4250030517578, |
|
"loss": 0.1712, |
|
"rewards/accuracies": 0.793749988079071, |
|
"rewards/chosen": -0.16961669921875, |
|
"rewards/margins": 6.3662109375, |
|
"rewards/rejected": -6.537890434265137, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.9129213483146067, |
|
"grad_norm": 0.25237613916397095, |
|
"learning_rate": 1.1488899398429896e-08, |
|
"logits/chosen": -3.2007813453674316, |
|
"logits/rejected": -2.5160155296325684, |
|
"logps/chosen": -43.58906173706055, |
|
"logps/rejected": -170.09375, |
|
"loss": 0.1787, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -0.10042724758386612, |
|
"rewards/margins": 6.383008003234863, |
|
"rewards/rejected": -6.482421875, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.9269662921348315, |
|
"grad_norm": 0.09695342183113098, |
|
"learning_rate": 8.100226909935059e-09, |
|
"logits/chosen": -3.2056641578674316, |
|
"logits/rejected": -2.5228514671325684, |
|
"logps/chosen": -43.27656173706055, |
|
"logps/rejected": -166.16250610351562, |
|
"loss": 0.1876, |
|
"rewards/accuracies": 0.7718750238418579, |
|
"rewards/chosen": -0.07465209811925888, |
|
"rewards/margins": 6.197656154632568, |
|
"rewards/rejected": -6.272265434265137, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.9410112359550562, |
|
"grad_norm": 0.15045014023780823, |
|
"learning_rate": 5.2943121627319346e-09, |
|
"logits/chosen": -3.207812547683716, |
|
"logits/rejected": -2.5152344703674316, |
|
"logps/chosen": -43.06562423706055, |
|
"logps/rejected": -166.84375, |
|
"loss": 0.1818, |
|
"rewards/accuracies": 0.770312488079071, |
|
"rewards/chosen": -0.07712707668542862, |
|
"rewards/margins": 6.268164157867432, |
|
"rewards/rejected": -6.342382907867432, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.9550561797752809, |
|
"grad_norm": 0.31066492199897766, |
|
"learning_rate": 3.077914851215585e-09, |
|
"logits/chosen": -3.2230467796325684, |
|
"logits/rejected": -2.5365233421325684, |
|
"logps/chosen": -44.46562576293945, |
|
"logps/rejected": -170.8625030517578, |
|
"loss": 0.1733, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -0.07046356052160263, |
|
"rewards/margins": 6.385156154632568, |
|
"rewards/rejected": -6.457812309265137, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.9691011235955056, |
|
"grad_norm": 0.11322323232889175, |
|
"learning_rate": 1.4563744706429514e-09, |
|
"logits/chosen": -3.216015577316284, |
|
"logits/rejected": -2.50390625, |
|
"logps/chosen": -41.93281173706055, |
|
"logps/rejected": -168.16250610351562, |
|
"loss": 0.1693, |
|
"rewards/accuracies": 0.778124988079071, |
|
"rewards/chosen": -0.01323547400534153, |
|
"rewards/margins": 6.383593559265137, |
|
"rewards/rejected": -6.3974609375, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 0.9831460674157303, |
|
"grad_norm": 0.28659555315971375, |
|
"learning_rate": 4.3359745382104405e-10, |
|
"logits/chosen": -3.21484375, |
|
"logits/rejected": NaN, |
|
"logps/chosen": -42.96406173706055, |
|
"logps/rejected": -169.0, |
|
"loss": 0.1708, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -0.045589447021484375, |
|
"rewards/margins": 6.397265434265137, |
|
"rewards/rejected": -6.444140434265137, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.9971910112359551, |
|
"grad_norm": 0.5906934142112732, |
|
"learning_rate": 1.2047760167999133e-11, |
|
"logits/chosen": -3.2037110328674316, |
|
"logits/rejected": -2.529101610183716, |
|
"logps/chosen": -44.228126525878906, |
|
"logps/rejected": -170.83749389648438, |
|
"loss": 0.1657, |
|
"rewards/accuracies": 0.7984374761581421, |
|
"rewards/chosen": -0.09162139892578125, |
|
"rewards/margins": 6.3896484375, |
|
"rewards/rejected": -6.482421875, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 712, |
|
"total_flos": 0.0, |
|
"train_loss": 0.23233672156092827, |
|
"train_runtime": 2773.067, |
|
"train_samples_per_second": 16.43, |
|
"train_steps_per_second": 0.257 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 712, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": false, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|