DS-7B-Qwen-distil-KTO-keep-alt / trainer_state.json
samitizerxu's picture
Upload folder using huggingface_hub
b379dd8 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9993753903810119,
"eval_steps": 200,
"global_step": 800,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.006246096189881324,
"grad_norm": 116.66677856445312,
"kl": 0.4583333432674408,
"learning_rate": 1.5625e-07,
"logits/chosen": 763024214.0595745,
"logits/rejected": 257919736.68571427,
"logps/chosen": -2094.8425531914895,
"logps/rejected": -1494.726530612245,
"loss": 0.5016,
"rewards/chosen": 1.0933048329454786,
"rewards/margins": -85873591.85363396,
"rewards/rejected": 85873592.94693878,
"step": 5
},
{
"epoch": 0.012492192379762648,
"grad_norm": 110.63726806640625,
"kl": 0.38749998807907104,
"learning_rate": 3.125e-07,
"logits/chosen": 921293980.2033899,
"logits/rejected": 220243934.4262295,
"logps/chosen": -2251.3898305084745,
"logps/rejected": -1449.1803278688524,
"loss": 0.4946,
"rewards/chosen": 1.1281716298248807,
"rewards/margins": -41765219.59313984,
"rewards/rejected": 41765220.72131147,
"step": 10
},
{
"epoch": 0.018738288569643973,
"grad_norm": 121.6174087524414,
"kl": 1.6833332777023315,
"learning_rate": 4.6875000000000006e-07,
"logits/chosen": 587306067.5021459,
"logits/rejected": 338677312.2591093,
"logps/chosen": -2082.74678111588,
"logps/rejected": -1634.7206477732793,
"loss": 0.4687,
"rewards/chosen": 2.346424020922747,
"rewards/margins": -31779756.811470717,
"rewards/rejected": 31779759.157894738,
"step": 15
},
{
"epoch": 0.024984384759525295,
"grad_norm": 68.7260971069336,
"kl": 4.829166889190674,
"learning_rate": 6.25e-07,
"logits/chosen": 647625703.424,
"logits/rejected": 289995090.3652174,
"logps/chosen": -1992.704,
"logps/rejected": -1608.904347826087,
"loss": 0.4112,
"rewards/chosen": 2.184742919921875,
"rewards/margins": -24643590.719604906,
"rewards/rejected": 24643592.904347826,
"step": 20
},
{
"epoch": 0.03123048094940662,
"grad_norm": 51.2025260925293,
"kl": 9.495833396911621,
"learning_rate": 7.8125e-07,
"logits/chosen": 728003276.3408072,
"logits/rejected": 248500271.81322956,
"logps/chosen": -2006.2421524663678,
"logps/rejected": -1371.8910505836575,
"loss": 0.2966,
"rewards/chosen": 3.2302421261911434,
"rewards/margins": 6841244.132965862,
"rewards/rejected": -6841240.902723735,
"step": 25
},
{
"epoch": 0.037476577139287946,
"grad_norm": 27.76913070678711,
"kl": 22.079166412353516,
"learning_rate": 9.375000000000001e-07,
"logits/chosen": 918946912.2735043,
"logits/rejected": 535911848.58536583,
"logps/chosen": -2157.948717948718,
"logps/rejected": -1666.081300813008,
"loss": 0.1937,
"rewards/chosen": 5.411676912226229,
"rewards/margins": -80127611.6289735,
"rewards/rejected": 80127617.04065041,
"step": 30
},
{
"epoch": 0.04372267332916927,
"grad_norm": 33.331947326660156,
"kl": 38.80833435058594,
"learning_rate": 1.0937500000000001e-06,
"logits/chosen": 992420333.4939759,
"logits/rejected": 541292180.5021645,
"logps/chosen": -2068.9477911646586,
"logps/rejected": -1451.3593073593074,
"loss": 0.1621,
"rewards/chosen": 7.359968193084839,
"rewards/margins": -22775727.497174665,
"rewards/rejected": 22775734.85714286,
"step": 35
},
{
"epoch": 0.04996876951905059,
"grad_norm": 19.21103286743164,
"kl": 66.71666717529297,
"learning_rate": 1.25e-06,
"logits/chosen": 1329413437.2392156,
"logits/rejected": 860745746.2044444,
"logps/chosen": -2012.3607843137254,
"logps/rejected": -1481.9555555555555,
"loss": 0.1239,
"rewards/chosen": 10.802983302696079,
"rewards/margins": -30159119.365905587,
"rewards/rejected": 30159130.16888889,
"step": 40
},
{
"epoch": 0.056214865708931916,
"grad_norm": 10.822747230529785,
"kl": 82.54166412353516,
"learning_rate": 1.40625e-06,
"logits/chosen": 1361087192.9491525,
"logits/rejected": 947972868.1967213,
"logps/chosen": -1899.3898305084747,
"logps/rejected": -1508.983606557377,
"loss": 0.1131,
"rewards/chosen": 13.423981230137711,
"rewards/margins": -74581901.85470729,
"rewards/rejected": 74581915.27868852,
"step": 45
},
{
"epoch": 0.06246096189881324,
"grad_norm": 7.292627811431885,
"kl": 118.73332977294922,
"learning_rate": 1.5625e-06,
"logits/chosen": 1479708155.697479,
"logits/rejected": 1002230673.983471,
"logps/chosen": -1897.546218487395,
"logps/rejected": -1337.9173553719008,
"loss": 0.11,
"rewards/chosen": 17.611668690913866,
"rewards/margins": -83405586.35527346,
"rewards/rejected": 83405603.96694215,
"step": 50
},
{
"epoch": 0.06870705808869457,
"grad_norm": 7.088067054748535,
"kl": 150.61666870117188,
"learning_rate": 1.71875e-06,
"logits/chosen": 1500882604.119149,
"logits/rejected": 1047514584.2938776,
"logps/chosen": -1893.1744680851064,
"logps/rejected": -1392.3265306122448,
"loss": 0.1315,
"rewards/chosen": 22.309776013962765,
"rewards/margins": -67494124.82491787,
"rewards/rejected": 67494147.13469388,
"step": 55
},
{
"epoch": 0.07495315427857589,
"grad_norm": 10.061450958251953,
"kl": 160.98333740234375,
"learning_rate": 1.8750000000000003e-06,
"logits/chosen": 1491436649.9310346,
"logits/rejected": 1136318133.6774194,
"logps/chosen": -1901.5172413793102,
"logps/rejected": -1438.0645161290322,
"loss": 0.1269,
"rewards/chosen": 22.11644245016164,
"rewards/margins": -68507179.94807367,
"rewards/rejected": 68507202.06451613,
"step": 60
},
{
"epoch": 0.08119925046845722,
"grad_norm": 5.119268417358398,
"kl": 169.3000030517578,
"learning_rate": 2.0312500000000002e-06,
"logits/chosen": 1529824284.1834862,
"logits/rejected": 1174020908.946565,
"logps/chosen": -1950.0183486238532,
"logps/rejected": -1506.6870229007634,
"loss": 0.1184,
"rewards/chosen": 21.517562446244266,
"rewards/margins": -64286292.00915511,
"rewards/rejected": 64286313.52671756,
"step": 65
},
{
"epoch": 0.08744534665833854,
"grad_norm": 7.102128028869629,
"kl": 189.86666870117188,
"learning_rate": 2.1875000000000002e-06,
"logits/chosen": 1418054483.2263374,
"logits/rejected": 1115578679.0886075,
"logps/chosen": -1872.625514403292,
"logps/rejected": -1406.649789029536,
"loss": 0.1345,
"rewards/chosen": 25.48732076260288,
"rewards/margins": -91184910.47892396,
"rewards/rejected": 91184935.96624473,
"step": 70
},
{
"epoch": 0.09369144284821987,
"grad_norm": 6.652032852172852,
"kl": 205.64999389648438,
"learning_rate": 2.3437500000000002e-06,
"logits/chosen": 1280121794.313253,
"logits/rejected": 1075884368.9004328,
"logps/chosen": -1890.8273092369477,
"logps/rejected": -1434.874458874459,
"loss": 0.1308,
"rewards/chosen": 25.247278175200805,
"rewards/margins": -58559015.0297781,
"rewards/rejected": 58559040.27705628,
"step": 75
},
{
"epoch": 0.09993753903810118,
"grad_norm": 7.803961277008057,
"kl": 215.3000030517578,
"learning_rate": 2.5e-06,
"logits/chosen": 1125117697.0622406,
"logits/rejected": 1087781335.2970712,
"logps/chosen": -1695.8672199170123,
"logps/rejected": -1510.2928870292887,
"loss": 0.1215,
"rewards/chosen": 27.956099260892117,
"rewards/margins": -64467297.36607647,
"rewards/rejected": 64467325.322175734,
"step": 80
},
{
"epoch": 0.1061836352279825,
"grad_norm": 2.8079729080200195,
"kl": 220.39999389648438,
"learning_rate": 2.65625e-06,
"logits/chosen": 1052407833.2839506,
"logits/rejected": 1024613604.9957806,
"logps/chosen": -1662.0246913580247,
"logps/rejected": -1444.3206751054852,
"loss": 0.1301,
"rewards/chosen": 28.417864261831276,
"rewards/margins": -75106206.45133404,
"rewards/rejected": 75106234.8691983,
"step": 85
},
{
"epoch": 0.11242973141786383,
"grad_norm": 8.319477081298828,
"kl": 240.06666564941406,
"learning_rate": 2.8125e-06,
"logits/chosen": 995378244.2666667,
"logits/rejected": 974057198.9333333,
"logps/chosen": -1697.0666666666666,
"logps/rejected": -1485.6,
"loss": 0.1139,
"rewards/chosen": 30.144661458333335,
"rewards/margins": -43094211.98867188,
"rewards/rejected": 43094242.13333333,
"step": 90
},
{
"epoch": 0.11867582760774516,
"grad_norm": 7.453002452850342,
"kl": 252.8333282470703,
"learning_rate": 2.96875e-06,
"logits/chosen": 1268567244.8,
"logits/rejected": 996391867.7333333,
"logps/chosen": -1984.1333333333334,
"logps/rejected": -1418.2666666666667,
"loss": 0.1239,
"rewards/chosen": 33.62305908203125,
"rewards/margins": -149487650.6436076,
"rewards/rejected": 149487684.26666668,
"step": 95
},
{
"epoch": 0.12492192379762648,
"grad_norm": 2.9787914752960205,
"kl": 253.6666717529297,
"learning_rate": 3.125e-06,
"logits/chosen": 1126902571.1686275,
"logits/rejected": 1192673644.088889,
"logps/chosen": -1734.650980392157,
"logps/rejected": -1441.9911111111112,
"loss": 0.1281,
"rewards/chosen": 28.739571844362747,
"rewards/margins": -46898356.61153927,
"rewards/rejected": 46898385.351111114,
"step": 100
},
{
"epoch": 0.1311680199875078,
"grad_norm": 5.351078987121582,
"kl": 260.0333251953125,
"learning_rate": 3.28125e-06,
"logits/chosen": 1035580677.3556485,
"logits/rejected": 1086481369.759336,
"logps/chosen": -1673.2384937238494,
"logps/rejected": -1407.734439834025,
"loss": 0.0947,
"rewards/chosen": 30.54708543083159,
"rewards/margins": -57016977.784864776,
"rewards/rejected": 57017008.33195021,
"step": 105
},
{
"epoch": 0.13741411617738913,
"grad_norm": 2.6211190223693848,
"kl": 257.4333190917969,
"learning_rate": 3.4375e-06,
"logits/chosen": 992500800.5765766,
"logits/rejected": 1126154367.007752,
"logps/chosen": -1670.1981981981983,
"logps/rejected": -1532.8992248062016,
"loss": 0.086,
"rewards/chosen": 30.3542700415259,
"rewards/margins": -29103875.38216407,
"rewards/rejected": 29103905.73643411,
"step": 110
},
{
"epoch": 0.14366021236727045,
"grad_norm": 6.189810752868652,
"kl": 277.76666259765625,
"learning_rate": 3.59375e-06,
"logits/chosen": 1039450317.6569037,
"logits/rejected": 1082791774.5394192,
"logps/chosen": -1658.510460251046,
"logps/rejected": -1438.3402489626556,
"loss": 0.1093,
"rewards/chosen": 35.08254609048117,
"rewards/margins": -68647662.9755452,
"rewards/rejected": 68647698.05809128,
"step": 115
},
{
"epoch": 0.14990630855715179,
"grad_norm": 2.9020302295684814,
"kl": 283.3666687011719,
"learning_rate": 3.7500000000000005e-06,
"logits/chosen": 1015666845.5384616,
"logits/rejected": 921090759.9656652,
"logps/chosen": -1648.582995951417,
"logps/rejected": -1456.6866952789699,
"loss": 0.1231,
"rewards/chosen": 38.58084514170041,
"rewards/margins": -49614536.938468166,
"rewards/rejected": 49614575.519313306,
"step": 120
},
{
"epoch": 0.1561524047470331,
"grad_norm": 3.2500624656677246,
"kl": 274.9333190917969,
"learning_rate": 3.90625e-06,
"logits/chosen": 739230529.0847458,
"logits/rejected": 916730460.3278688,
"logps/chosen": -1657.7627118644068,
"logps/rejected": -1521.1803278688524,
"loss": 0.1015,
"rewards/chosen": 39.418812897245765,
"rewards/margins": -48657276.25331825,
"rewards/rejected": 48657315.67213115,
"step": 125
},
{
"epoch": 0.16239850093691444,
"grad_norm": 2.6585230827331543,
"kl": 280.6000061035156,
"learning_rate": 4.0625000000000005e-06,
"logits/chosen": 573097664.2700422,
"logits/rejected": 1011293297.7777778,
"logps/chosen": -1583.5274261603377,
"logps/rejected": -1520.0658436213992,
"loss": 0.1055,
"rewards/chosen": 37.8720703125,
"rewards/margins": -6502969.469493474,
"rewards/rejected": 6503007.341563786,
"step": 130
},
{
"epoch": 0.16864459712679575,
"grad_norm": 4.475651741027832,
"kl": 298.6333312988281,
"learning_rate": 4.21875e-06,
"logits/chosen": 636761140.2039216,
"logits/rejected": 859953488.7822223,
"logps/chosen": -1657.349019607843,
"logps/rejected": -1411.9822222222222,
"loss": 0.0988,
"rewards/chosen": 39.78631663602941,
"rewards/margins": -48714953.422572255,
"rewards/rejected": 48714993.20888889,
"step": 135
},
{
"epoch": 0.1748906933166771,
"grad_norm": 2.648059606552124,
"kl": 290.9333190917969,
"learning_rate": 4.3750000000000005e-06,
"logits/chosen": 607103567.2635983,
"logits/rejected": 967892210.1908714,
"logps/chosen": -1529.5732217573222,
"logps/rejected": -1455.9336099585062,
"loss": 0.0941,
"rewards/chosen": 39.2620211166318,
"rewards/margins": -50780460.65499133,
"rewards/rejected": 50780499.917012446,
"step": 140
},
{
"epoch": 0.1811367895065584,
"grad_norm": 4.189403533935547,
"kl": 297.6333312988281,
"learning_rate": 4.53125e-06,
"logits/chosen": 732500241.0666667,
"logits/rejected": 973196492.8,
"logps/chosen": -1670.4,
"logps/rejected": -1503.0666666666666,
"loss": 0.1089,
"rewards/chosen": 35.4449462890625,
"rewards/margins": -47891689.35505371,
"rewards/rejected": 47891724.8,
"step": 145
},
{
"epoch": 0.18738288569643974,
"grad_norm": 1.987473726272583,
"kl": 320.6000061035156,
"learning_rate": 4.6875000000000004e-06,
"logits/chosen": 786999262.4262295,
"logits/rejected": 786822994.440678,
"logps/chosen": -1613.311475409836,
"logps/rejected": -1424.677966101695,
"loss": 0.1051,
"rewards/chosen": 43.21544409579918,
"rewards/margins": -41480495.90319997,
"rewards/rejected": 41480539.118644066,
"step": 150
},
{
"epoch": 0.19362898188632105,
"grad_norm": 2.4980077743530273,
"kl": 303.5,
"learning_rate": 4.84375e-06,
"logits/chosen": 865481696.4229075,
"logits/rejected": 753880553.7391304,
"logps/chosen": -1596.9691629955946,
"logps/rejected": -1416.7272727272727,
"loss": 0.0884,
"rewards/chosen": 35.61597647645925,
"rewards/margins": -22194391.324734986,
"rewards/rejected": 22194426.94071146,
"step": 155
},
{
"epoch": 0.19987507807620236,
"grad_norm": 2.3918511867523193,
"kl": 324.6000061035156,
"learning_rate": 5e-06,
"logits/chosen": 928669237.248,
"logits/rejected": 849364796.1043478,
"logps/chosen": -1578.88,
"logps/rejected": -1521.6695652173912,
"loss": 0.0844,
"rewards/chosen": 37.32334375,
"rewards/margins": -24807967.6853519,
"rewards/rejected": 24808005.00869565,
"step": 160
},
{
"epoch": 0.2061211742660837,
"grad_norm": 3.1913163661956787,
"kl": 323.3666687011719,
"learning_rate": 4.999851262500375e-06,
"logits/chosen": 989165542.0759493,
"logits/rejected": 743453329.3827161,
"logps/chosen": -1582.3122362869199,
"logps/rejected": -1438.6172839506173,
"loss": 0.0912,
"rewards/chosen": 43.50049034150844,
"rewards/margins": -5548838.145600193,
"rewards/rejected": 5548881.6460905345,
"step": 165
},
{
"epoch": 0.212367270455965,
"grad_norm": 1.8797273635864258,
"kl": 319.6000061035156,
"learning_rate": 4.999405067699773e-06,
"logits/chosen": 816467482.0338984,
"logits/rejected": 570425344.0,
"logps/chosen": -1562.7796610169491,
"logps/rejected": -1319.4754098360656,
"loss": 0.0781,
"rewards/chosen": 46.163491459216104,
"rewards/margins": -39313294.688967556,
"rewards/rejected": 39313340.85245901,
"step": 170
},
{
"epoch": 0.21861336664584635,
"grad_norm": 3.3823835849761963,
"kl": 350.0,
"learning_rate": 4.998661468690914e-06,
"logits/chosen": 859967620.1290323,
"logits/rejected": 695513229.2413793,
"logps/chosen": -1611.8709677419354,
"logps/rejected": -1512.8275862068965,
"loss": 0.1091,
"rewards/chosen": 46.6678466796875,
"rewards/margins": -36436881.33215332,
"rewards/rejected": 36436928.0,
"step": 175
},
{
"epoch": 0.22485946283572766,
"grad_norm": 5.648082733154297,
"kl": 328.3999938964844,
"learning_rate": 4.997620553954645e-06,
"logits/chosen": 589463686.8477366,
"logits/rejected": 560731546.464135,
"logps/chosen": -1461.7283950617284,
"logps/rejected": -1366.6835443037974,
"loss": 0.0899,
"rewards/chosen": 48.1337890625,
"rewards/margins": -25789452.355662413,
"rewards/rejected": 25789500.489451475,
"step": 180
},
{
"epoch": 0.231105559025609,
"grad_norm": 2.2540504932403564,
"kl": 370.29998779296875,
"learning_rate": 4.996282447349408e-06,
"logits/chosen": 559837283.902439,
"logits/rejected": 561749945.982906,
"logps/chosen": -1595.4471544715448,
"logps/rejected": -1394.940170940171,
"loss": 0.0976,
"rewards/chosen": 53.93244648755081,
"rewards/margins": -43921896.700032145,
"rewards/rejected": 43921950.63247863,
"step": 185
},
{
"epoch": 0.23735165521549031,
"grad_norm": 2.868093490600586,
"kl": 339.73333740234375,
"learning_rate": 4.994647308096509e-06,
"logits/chosen": 453137114.8215768,
"logits/rejected": 583618097.2719666,
"logps/chosen": -1479.6348547717841,
"logps/rejected": -1402.3096234309623,
"loss": 0.116,
"rewards/chosen": 47.16783097769709,
"rewards/margins": -11266927.359365676,
"rewards/rejected": 11266974.527196653,
"step": 190
},
{
"epoch": 0.24359775140537165,
"grad_norm": 2.977546215057373,
"kl": 345.9666748046875,
"learning_rate": 4.992715330761167e-06,
"logits/chosen": 526250719.17948717,
"logits/rejected": 581422604.4878049,
"logps/chosen": -1470.5641025641025,
"logps/rejected": -1387.0569105691056,
"loss": 0.1131,
"rewards/chosen": 51.28399188701923,
"rewards/margins": -30845854.927390225,
"rewards/rejected": 30845906.211382113,
"step": 195
},
{
"epoch": 0.24984384759525297,
"grad_norm": 4.593957424163818,
"kl": 341.76666259765625,
"learning_rate": 4.990486745229364e-06,
"logits/chosen": 636709686.7008547,
"logits/rejected": 676817445.4634147,
"logps/chosen": -1580.854700854701,
"logps/rejected": -1433.4959349593496,
"loss": 0.0944,
"rewards/chosen": 44.716037326388886,
"rewards/margins": -2637613.8205480394,
"rewards/rejected": 2637658.536585366,
"step": 200
},
{
"epoch": 0.24984384759525297,
"eval_kl": 358.7391357421875,
"eval_logits/chosen": 631036890.3675048,
"eval_logits/rejected": 673337517.769697,
"eval_logps/chosen": -1513.725338491296,
"eval_logps/rejected": -1423.1353535353535,
"eval_loss": 0.0891943946480751,
"eval_rewards/chosen": 50.3120164410058,
"eval_rewards/margins": -21855587.15060982,
"eval_rewards/rejected": 21855637.462626263,
"eval_runtime": 640.4487,
"eval_samples_per_second": 6.316,
"eval_steps_per_second": 0.395,
"step": 200
},
{
"epoch": 0.2560899437851343,
"grad_norm": 4.819016933441162,
"kl": 377.70001220703125,
"learning_rate": 4.987961816680493e-06,
"logits/chosen": 683028876.3870968,
"logits/rejected": 758753209.3793104,
"logps/chosen": -1516.774193548387,
"logps/rejected": -1466.7586206896551,
"loss": 0.0996,
"rewards/chosen": 56.12273185483871,
"rewards/margins": -41749567.04968194,
"rewards/rejected": 41749623.172413796,
"step": 205
},
{
"epoch": 0.2623360399750156,
"grad_norm": 2.632314682006836,
"kl": 348.29998779296875,
"learning_rate": 4.985140845555799e-06,
"logits/chosen": 697918600.8097166,
"logits/rejected": 637678218.4377682,
"logps/chosen": -1600.1943319838056,
"logps/rejected": -1350.0429184549357,
"loss": 0.1253,
"rewards/chosen": 54.92004443952429,
"rewards/margins": -39001740.650771014,
"rewards/rejected": 39001795.57081545,
"step": 210
},
{
"epoch": 0.26858213616489696,
"grad_norm": 2.870805263519287,
"kl": 350.8333435058594,
"learning_rate": 4.982024167522638e-06,
"logits/chosen": 527803308.06557375,
"logits/rejected": 660025274.5762712,
"logps/chosen": -1443.016393442623,
"logps/rejected": -1426.4406779661017,
"loss": 0.1019,
"rewards/chosen": 50.20684314164959,
"rewards/margins": -13903121.047394147,
"rewards/rejected": 13903171.254237289,
"step": 215
},
{
"epoch": 0.27482823235477827,
"grad_norm": 7.444933891296387,
"kl": 368.73333740234375,
"learning_rate": 4.978612153434527e-06,
"logits/chosen": 546643296.5245901,
"logits/rejected": 507599646.37288135,
"logps/chosen": -1576.2622950819673,
"logps/rejected": -1438.1016949152543,
"loss": 0.0991,
"rewards/chosen": 47.69948290215164,
"rewards/margins": -18468254.402212013,
"rewards/rejected": 18468302.101694915,
"step": 220
},
{
"epoch": 0.2810743285446596,
"grad_norm": 0.719657838344574,
"kl": 369.5,
"learning_rate": 4.97490520928702e-06,
"logits/chosen": 393574952.3651452,
"logits/rejected": 569064717.9246862,
"logps/chosen": -1517.5435684647302,
"logps/rejected": -1512.3012552301254,
"loss": 0.108,
"rewards/chosen": 46.48416023729253,
"rewards/margins": -8972016.043036416,
"rewards/rejected": 8972062.527196653,
"step": 225
},
{
"epoch": 0.2873204247345409,
"grad_norm": 4.509027481079102,
"kl": 349.29998779296875,
"learning_rate": 4.970903776169403e-06,
"logits/chosen": 381775766.974359,
"logits/rejected": 486850426.796748,
"logps/chosen": -1601.6410256410256,
"logps/rejected": -1370.4065040650407,
"loss": 0.0894,
"rewards/chosen": 49.49356887686966,
"rewards/margins": -17199426.018626243,
"rewards/rejected": 17199475.51219512,
"step": 230
},
{
"epoch": 0.29356652092442226,
"grad_norm": 3.1007473468780518,
"kl": 356.6000061035156,
"learning_rate": 4.966608330212198e-06,
"logits/chosen": 246713048.94915253,
"logits/rejected": 590331098.2295082,
"logps/chosen": -1364.2033898305085,
"logps/rejected": -1417.967213114754,
"loss": 0.0745,
"rewards/chosen": 53.196975966631356,
"rewards/margins": -17711701.294827312,
"rewards/rejected": 17711754.491803277,
"step": 235
},
{
"epoch": 0.29981261711430357,
"grad_norm": 1.0693767070770264,
"kl": 363.70001220703125,
"learning_rate": 4.962019382530521e-06,
"logits/chosen": 238992315.73333332,
"logits/rejected": 727606886.4,
"logps/chosen": -1513.1291666666666,
"logps/rejected": -1630.2666666666667,
"loss": 0.1331,
"rewards/chosen": 52.389933268229164,
"rewards/margins": -38082861.74340007,
"rewards/rejected": 38082914.13333333,
"step": 240
},
{
"epoch": 0.3060587133041849,
"grad_norm": 1.2437098026275635,
"kl": 341.76666259765625,
"learning_rate": 4.957137479163253e-06,
"logits/chosen": 186218359.46666667,
"logits/rejected": 268291276.8,
"logps/chosen": -1443.6,
"logps/rejected": -1312.8666666666666,
"loss": 0.0924,
"rewards/chosen": 49.6269287109375,
"rewards/margins": -20172466.639737956,
"rewards/rejected": 20172516.266666666,
"step": 245
},
{
"epoch": 0.3123048094940662,
"grad_norm": 1.5108606815338135,
"kl": 359.4333190917969,
"learning_rate": 4.9519632010080765e-06,
"logits/chosen": 545861161.9672132,
"logits/rejected": 418665072.8135593,
"logps/chosen": -1472.9180327868853,
"logps/rejected": -1367.7288135593221,
"loss": 0.1153,
"rewards/chosen": 42.937391937756146,
"rewards/margins": -10470006.689726705,
"rewards/rejected": 10470049.627118643,
"step": 250
},
{
"epoch": 0.3185509056839475,
"grad_norm": 2.7949297428131104,
"kl": 338.8333435058594,
"learning_rate": 4.9464971637523465e-06,
"logits/chosen": 841517825.1583711,
"logits/rejected": 609372452.5714285,
"logps/chosen": -1627.2217194570135,
"logps/rejected": -1443.7065637065637,
"loss": 0.0885,
"rewards/chosen": 48.748851102941174,
"rewards/margins": -26131600.98087863,
"rewards/rejected": 26131649.72972973,
"step": 255
},
{
"epoch": 0.32479700187382887,
"grad_norm": 1.7337244749069214,
"kl": 370.8666687011719,
"learning_rate": 4.9407400177998335e-06,
"logits/chosen": 640891377.2510288,
"logits/rejected": 656771374.4472574,
"logps/chosen": -1558.2551440329219,
"logps/rejected": -1530.1940928270042,
"loss": 0.1031,
"rewards/chosen": 46.06944846322016,
"rewards/margins": -44654921.82928571,
"rewards/rejected": 44654967.898734175,
"step": 260
},
{
"epoch": 0.3310430980637102,
"grad_norm": 1.5853413343429565,
"kl": 369.3333435058594,
"learning_rate": 4.9346924481933345e-06,
"logits/chosen": 461213069.5529412,
"logits/rejected": 386304719.07555556,
"logps/chosen": -1592.2196078431373,
"logps/rejected": -1394.8444444444444,
"loss": 0.1039,
"rewards/chosen": 47.77142693014706,
"rewards/margins": -23714219.81968418,
"rewards/rejected": 23714267.591111112,
"step": 265
},
{
"epoch": 0.3372891942535915,
"grad_norm": 5.782619953155518,
"kl": 337.1666564941406,
"learning_rate": 4.928355174533153e-06,
"logits/chosen": 341149324.5023256,
"logits/rejected": 302084853.3735849,
"logps/chosen": -1583.739534883721,
"logps/rejected": -1379.622641509434,
"loss": 0.0806,
"rewards/chosen": 50.5806640625,
"rewards/margins": 121700.19575840211,
"rewards/rejected": -121649.61509433962,
"step": 270
},
{
"epoch": 0.3435352904434728,
"grad_norm": 2.3726727962493896,
"kl": 349.76666259765625,
"learning_rate": 4.9217289508914836e-06,
"logits/chosen": 311504913.88646287,
"logits/rejected": 560720794.0079681,
"logps/chosen": -1507.8427947598254,
"logps/rejected": -1589.9282868525897,
"loss": 0.0565,
"rewards/chosen": 49.57000972298035,
"rewards/margins": -21957986.318436492,
"rewards/rejected": 21958035.888446216,
"step": 275
},
{
"epoch": 0.3497813866333542,
"grad_norm": 0.7196549773216248,
"kl": 324.1666564941406,
"learning_rate": 4.914814565722671e-06,
"logits/chosen": 295960576.0,
"logits/rejected": 450128081.63779527,
"logps/chosen": -1468.7433628318583,
"logps/rejected": -1489.5118110236222,
"loss": 0.0737,
"rewards/chosen": 47.86092194413717,
"rewards/margins": -10037388.611519001,
"rewards/rejected": 10037436.472440945,
"step": 280
},
{
"epoch": 0.3560274828232355,
"grad_norm": 1.9283077716827393,
"kl": 367.9333190917969,
"learning_rate": 4.907612841769407e-06,
"logits/chosen": 423632864.1245136,
"logits/rejected": 392181530.40358746,
"logps/chosen": -1474.739299610895,
"logps/rejected": -1542.6726457399104,
"loss": 0.091,
"rewards/chosen": 51.97092731639105,
"rewards/margins": -1841733.7151713383,
"rewards/rejected": 1841785.6860986548,
"step": 285
},
{
"epoch": 0.3622735790131168,
"grad_norm": 1.7306408882141113,
"kl": 331.0666809082031,
"learning_rate": 4.900124635964823e-06,
"logits/chosen": 253768665.11392406,
"logits/rejected": 425903091.3580247,
"logps/chosen": -1416.5738396624472,
"logps/rejected": -1596.4444444444443,
"loss": 0.1082,
"rewards/chosen": 47.69798259493671,
"rewards/margins": -4647375.2485194625,
"rewards/rejected": 4647422.946502058,
"step": 290
},
{
"epoch": 0.3685196752029981,
"grad_norm": 1.1886324882507324,
"kl": 312.8833312988281,
"learning_rate": 4.8923508393305224e-06,
"logits/chosen": 333225353.84615386,
"logits/rejected": 267209547.29411766,
"logps/chosen": -1454.076923076923,
"logps/rejected": -1452.5294117647059,
"loss": 0.0691,
"rewards/chosen": 46.84036020132211,
"rewards/margins": -18551674.571404506,
"rewards/rejected": 18551721.411764707,
"step": 295
},
{
"epoch": 0.3747657713928795,
"grad_norm": 1.4832735061645508,
"kl": 345.0333251953125,
"learning_rate": 4.884292376870567e-06,
"logits/chosen": 251636303.26359832,
"logits/rejected": 135267391.73443982,
"logps/chosen": -1426.3430962343095,
"logps/rejected": -1469.609958506224,
"loss": 0.0914,
"rewards/chosen": 49.49623676124477,
"rewards/margins": -1139935.89795411,
"rewards/rejected": 1139985.3941908714,
"step": 300
},
{
"epoch": 0.3810118675827608,
"grad_norm": 2.8397765159606934,
"kl": 313.3333435058594,
"learning_rate": 4.875950207461403e-06,
"logits/chosen": 39220267.02521008,
"logits/rejected": -187643108.49586776,
"logps/chosen": -1383.6638655462184,
"logps/rejected": -1436.1652892561983,
"loss": 0.0686,
"rewards/chosen": 51.476660976890756,
"rewards/margins": -7085320.820859684,
"rewards/rejected": 7085372.297520661,
"step": 305
},
{
"epoch": 0.3872579637726421,
"grad_norm": 1.789415717124939,
"kl": 348.8333435058594,
"learning_rate": 4.867325323737765e-06,
"logits/chosen": 47862690.167330675,
"logits/rejected": -399679166.04366815,
"logps/chosen": -1544.5418326693227,
"logps/rejected": -1370.480349344978,
"loss": 0.1215,
"rewards/chosen": 53.38380073456175,
"rewards/margins": -2953413.0266796146,
"rewards/rejected": 2953466.4104803493,
"step": 310
},
{
"epoch": 0.3935040599625234,
"grad_norm": 2.056408643722534,
"kl": 353.8999938964844,
"learning_rate": 4.858418751974564e-06,
"logits/chosen": 163274429.48031497,
"logits/rejected": -516202133.5221239,
"logps/chosen": -1566.1102362204724,
"logps/rejected": -1494.4424778761063,
"loss": 0.1019,
"rewards/chosen": 53.50947342519685,
"rewards/margins": -18813197.322384983,
"rewards/rejected": 18813250.831858408,
"step": 315
},
{
"epoch": 0.3997501561524047,
"grad_norm": 1.931511402130127,
"kl": 337.4333190917969,
"learning_rate": 4.849231551964771e-06,
"logits/chosen": 35473859.25423729,
"logits/rejected": -309613551.21311474,
"logps/chosen": -1304.7457627118645,
"logps/rejected": -1470.0983606557377,
"loss": 0.0629,
"rewards/chosen": 45.01601810778602,
"rewards/margins": -11103810.197096646,
"rewards/rejected": 11103855.213114753,
"step": 320
},
{
"epoch": 0.4059962523422861,
"grad_norm": 1.5798239707946777,
"kl": 345.73333740234375,
"learning_rate": 4.839764816893315e-06,
"logits/chosen": 356868378.48275864,
"logits/rejected": -383262984.2580645,
"logps/chosen": -1635.3103448275863,
"logps/rejected": -1358.967741935484,
"loss": 0.0973,
"rewards/chosen": 44.951710668103445,
"rewards/margins": -22694117.628934495,
"rewards/rejected": 22694162.580645163,
"step": 325
},
{
"epoch": 0.4122423485321674,
"grad_norm": 2.102696418762207,
"kl": 361.3666687011719,
"learning_rate": 4.830019673206997e-06,
"logits/chosen": 375195714.0645161,
"logits/rejected": -311598821.51724136,
"logps/chosen": -1594.3225806451612,
"logps/rejected": -1432.8275862068965,
"loss": 0.0887,
"rewards/chosen": 51.812842584425404,
"rewards/margins": -5921475.911295347,
"rewards/rejected": 5921527.724137931,
"step": 330
},
{
"epoch": 0.4184884447220487,
"grad_norm": 2.166639566421509,
"kl": 345.79998779296875,
"learning_rate": 4.8199972804804615e-06,
"logits/chosen": 103189217.94331984,
"logits/rejected": -472300231.9656652,
"logps/chosen": -1557.5060728744938,
"logps/rejected": -1455.519313304721,
"loss": 0.1055,
"rewards/chosen": 48.8040193256579,
"rewards/margins": -8893623.573663076,
"rewards/rejected": 8893672.377682403,
"step": 335
},
{
"epoch": 0.42473454091193,
"grad_norm": 2.084704875946045,
"kl": 367.1666564941406,
"learning_rate": 4.809698831278217e-06,
"logits/chosen": 46631794.47154471,
"logits/rejected": -579562294.7008547,
"logps/chosen": -1487.3495934959349,
"logps/rejected": -1438.4957264957266,
"loss": 0.1104,
"rewards/chosen": 54.046732088414636,
"rewards/margins": 3384455.551005593,
"rewards/rejected": -3384401.5042735045,
"step": 340
},
{
"epoch": 0.4309806371018114,
"grad_norm": 1.7686203718185425,
"kl": 361.9333190917969,
"learning_rate": 4.799125551012731e-06,
"logits/chosen": 187324373.1548117,
"logits/rejected": -473956352.0,
"logps/chosen": -1488.4686192468619,
"logps/rejected": -1503.1369294605809,
"loss": 0.0718,
"rewards/chosen": 51.36217802039749,
"rewards/margins": -14683061.268527374,
"rewards/rejected": 14683112.630705394,
"step": 345
},
{
"epoch": 0.4372267332916927,
"grad_norm": 1.8258565664291382,
"kl": 373.1333312988281,
"learning_rate": 4.788278697798619e-06,
"logits/chosen": 254064045.41935483,
"logits/rejected": -444822210.20689654,
"logps/chosen": -1586.1935483870968,
"logps/rejected": -1342.0689655172414,
"loss": 0.1023,
"rewards/chosen": 57.96908470892137,
"rewards/margins": -6941889.617122187,
"rewards/rejected": 6941947.586206896,
"step": 350
},
{
"epoch": 0.443472829481574,
"grad_norm": 2.480158567428589,
"kl": 339.4333190917969,
"learning_rate": 4.77715956230294e-06,
"logits/chosen": -27441811.525423728,
"logits/rejected": -414471151.21311474,
"logps/chosen": -1416.915254237288,
"logps/rejected": -1494.950819672131,
"loss": 0.1236,
"rewards/chosen": 53.628765558792374,
"rewards/margins": -2121243.682709851,
"rewards/rejected": 2121297.3114754097,
"step": 355
},
{
"epoch": 0.4497189256714553,
"grad_norm": 2.491856813430786,
"kl": 345.29998779296875,
"learning_rate": 4.765769467591626e-06,
"logits/chosen": 31055325.866666667,
"logits/rejected": -794077866.6666666,
"logps/chosen": -1370.2666666666667,
"logps/rejected": -1355.0666666666666,
"loss": 0.0978,
"rewards/chosen": 55.763850911458334,
"rewards/margins": 1939260.5638509116,
"rewards/rejected": -1939204.8,
"step": 360
},
{
"epoch": 0.45596502186133664,
"grad_norm": 1.9237349033355713,
"kl": 356.79998779296875,
"learning_rate": 4.75410976897204e-06,
"logits/chosen": 18777031.83122363,
"logits/rejected": -744350875.9176955,
"logps/chosen": -1626.464135021097,
"logps/rejected": -1527.3086419753085,
"loss": 0.0933,
"rewards/chosen": 58.655549512130804,
"rewards/margins": -8199585.525520447,
"rewards/rejected": 8199644.181069959,
"step": 365
},
{
"epoch": 0.462211118051218,
"grad_norm": 1.0969043970108032,
"kl": 337.0666809082031,
"learning_rate": 4.742181853831721e-06,
"logits/chosen": -78409220.23140496,
"logits/rejected": -775291981.4453782,
"logps/chosen": -1493.0247933884298,
"logps/rejected": -1540.5042016806722,
"loss": 0.0874,
"rewards/chosen": 53.18406185433884,
"rewards/margins": 5562913.822717316,
"rewards/rejected": -5562860.638655462,
"step": 370
},
{
"epoch": 0.4684572142410993,
"grad_norm": 0.9217363595962524,
"kl": 320.29998779296875,
"learning_rate": 4.729987141473286e-06,
"logits/chosen": -225435484.812749,
"logits/rejected": -1214406691.7729259,
"logps/chosen": -1473.1474103585658,
"logps/rejected": -1494.2183406113538,
"loss": 0.1034,
"rewards/chosen": 45.022698269422314,
"rewards/margins": -20387059.396515705,
"rewards/rejected": 20387104.419213973,
"step": 375
},
{
"epoch": 0.47470331043098063,
"grad_norm": 1.943109393119812,
"kl": 350.0333251953125,
"learning_rate": 4.717527082945555e-06,
"logits/chosen": -90390528.0,
"logits/rejected": -1083253906.2857144,
"logps/chosen": -1475.875,
"logps/rejected": -1701.142857142857,
"loss": 0.1113,
"rewards/chosen": 54.31856155395508,
"rewards/margins": -1936764.824295589,
"rewards/rejected": 1936819.142857143,
"step": 380
},
{
"epoch": 0.48094940662086194,
"grad_norm": 2.9538779258728027,
"kl": 337.5,
"learning_rate": 4.704803160870888e-06,
"logits/chosen": -34277903.650655024,
"logits/rejected": -937418588.812749,
"logps/chosen": -1378.8646288209607,
"logps/rejected": -1622.9482071713148,
"loss": 0.07,
"rewards/chosen": 49.1605016716703,
"rewards/margins": -2133147.2060321933,
"rewards/rejected": 2133196.3665338648,
"step": 385
},
{
"epoch": 0.4871955028107433,
"grad_norm": 2.6595630645751953,
"kl": 388.5666809082031,
"learning_rate": 4.69181688926877e-06,
"logits/chosen": -68481698.44176707,
"logits/rejected": -716681269.1948051,
"logps/chosen": -1465.9598393574297,
"logps/rejected": -1529.8354978354978,
"loss": 0.1118,
"rewards/chosen": 59.72445641942771,
"rewards/margins": -2215760.7430760483,
"rewards/rejected": 2215820.4675324676,
"step": 390
},
{
"epoch": 0.4934415990006246,
"grad_norm": 1.581071376800537,
"kl": 343.0,
"learning_rate": 4.678569813375654e-06,
"logits/chosen": -185697343.0900474,
"logits/rejected": -681059857.1301116,
"logps/chosen": -1491.4881516587677,
"logps/rejected": -1469.5018587360594,
"loss": 0.0735,
"rewards/chosen": 55.95927595527251,
"rewards/margins": 8371792.702770379,
"rewards/rejected": -8371736.743494424,
"step": 395
},
{
"epoch": 0.49968769519050593,
"grad_norm": 1.2356195449829102,
"kl": 371.3666687011719,
"learning_rate": 4.665063509461098e-06,
"logits/chosen": -118915627.38983051,
"logits/rejected": -537414538.4918033,
"logps/chosen": -1546.7796610169491,
"logps/rejected": -1465.5081967213114,
"loss": 0.0858,
"rewards/chosen": 58.09507829051907,
"rewards/margins": 6046328.521307799,
"rewards/rejected": -6046270.426229509,
"step": 400
},
{
"epoch": 0.49968769519050593,
"eval_kl": 370.019775390625,
"eval_logits/chosen": -210247537.3926499,
"eval_logits/rejected": -512924984.37171715,
"eval_logps/chosen": -1478.0309477756286,
"eval_logps/rejected": -1445.3737373737374,
"eval_loss": 0.08667240291833878,
"eval_rewards/chosen": 53.16212826402321,
"eval_rewards/margins": 7052260.483340385,
"eval_rewards/rejected": -7052207.321212121,
"eval_runtime": 640.6043,
"eval_samples_per_second": 6.314,
"eval_steps_per_second": 0.395,
"step": 400
},
{
"epoch": 0.5059337913803873,
"grad_norm": 1.3852691650390625,
"kl": 372.70001220703125,
"learning_rate": 4.651299584640198e-06,
"logits/chosen": -246257647.3495935,
"logits/rejected": -504503969.9145299,
"logps/chosen": -1464.1951219512196,
"logps/rejected": -1501.8119658119658,
"loss": 0.1236,
"rewards/chosen": 42.6781035632622,
"rewards/margins": 18530582.98579587,
"rewards/rejected": -18530540.307692308,
"step": 405
},
{
"epoch": 0.5121798875702686,
"grad_norm": 2.134881019592285,
"kl": 359.29998779296875,
"learning_rate": 4.637279676682367e-06,
"logits/chosen": -342756045.2320675,
"logits/rejected": -462322768.06584364,
"logps/chosen": -1523.1729957805908,
"logps/rejected": -1381.0041152263375,
"loss": 0.0915,
"rewards/chosen": 51.85403893064346,
"rewards/margins": -1119816.1953437852,
"rewards/rejected": 1119868.049382716,
"step": 410
},
{
"epoch": 0.5184259837601499,
"grad_norm": 1.7998765707015991,
"kl": 375.6666564941406,
"learning_rate": 4.623005453816447e-06,
"logits/chosen": -257030625.40239045,
"logits/rejected": -576705352.6637554,
"logps/chosen": -1588.398406374502,
"logps/rejected": -1496.3842794759826,
"loss": 0.1064,
"rewards/chosen": 55.176921221364545,
"rewards/margins": 15692810.460764015,
"rewards/rejected": -15692755.283842795,
"step": 415
},
{
"epoch": 0.5246720799500312,
"grad_norm": 1.7100578546524048,
"kl": 369.79998779296875,
"learning_rate": 4.608478614532215e-06,
"logits/chosen": -132033194.66666667,
"logits/rejected": -565794133.3333334,
"logps/chosen": -1572.2666666666667,
"logps/rejected": -1489.4666666666667,
"loss": 0.0754,
"rewards/chosen": 50.544681803385416,
"rewards/margins": 18390476.144681804,
"rewards/rejected": -18390425.6,
"step": 420
},
{
"epoch": 0.5309181761399125,
"grad_norm": 2.343752145767212,
"kl": 354.6000061035156,
"learning_rate": 4.59370088737827e-06,
"logits/chosen": -96648747.88571429,
"logits/rejected": -456213107.4723404,
"logps/chosen": -1476.7020408163266,
"logps/rejected": -1570.4510638297872,
"loss": 0.09,
"rewards/chosen": 49.13263711734694,
"rewards/margins": 19609119.702849884,
"rewards/rejected": -19609070.570212767,
"step": 425
},
{
"epoch": 0.5371642723297939,
"grad_norm": 1.4434489011764526,
"kl": 314.0666809082031,
"learning_rate": 4.578674030756364e-06,
"logits/chosen": -89136869.51724137,
"logits/rejected": -684162014.967742,
"logps/chosen": -1368.1379310344828,
"logps/rejected": -1453.4193548387098,
"loss": 0.0911,
"rewards/chosen": 39.44354037580819,
"rewards/margins": 13124843.572572634,
"rewards/rejected": -13124804.129032258,
"step": 430
},
{
"epoch": 0.5434103685196752,
"grad_norm": 1.5906319618225098,
"kl": 351.6000061035156,
"learning_rate": 4.5633998327121595e-06,
"logits/chosen": -210262649.3172691,
"logits/rejected": -455542721.93939394,
"logps/chosen": -1494.29718875502,
"logps/rejected": -1558.5800865800866,
"loss": 0.1078,
"rewards/chosen": 57.3788827183735,
"rewards/margins": 11366640.236025576,
"rewards/rejected": -11366582.857142856,
"step": 435
},
{
"epoch": 0.5496564647095565,
"grad_norm": 0.6508600115776062,
"kl": 353.76666259765625,
"learning_rate": 4.54788011072248e-06,
"logits/chosen": -370147328.0,
"logits/rejected": -258707238.66122448,
"logps/chosen": -1327.1148936170214,
"logps/rejected": -1479.7714285714285,
"loss": 0.0935,
"rewards/chosen": 56.83659408244681,
"rewards/margins": -5150336.110344693,
"rewards/rejected": 5150392.9469387755,
"step": 440
},
{
"epoch": 0.5559025608994379,
"grad_norm": 2.842862606048584,
"kl": 358.98333740234375,
"learning_rate": 4.532116711479039e-06,
"logits/chosen": -289068373.3333333,
"logits/rejected": -292463082.1196581,
"logps/chosen": -1573.7886178861788,
"logps/rejected": -1477.8803418803418,
"loss": 0.0997,
"rewards/chosen": 52.84806910569106,
"rewards/margins": 18087146.454906713,
"rewards/rejected": -18087093.606837608,
"step": 445
},
{
"epoch": 0.5621486570893192,
"grad_norm": 1.5763447284698486,
"kl": 379.70001220703125,
"learning_rate": 4.516111510668707e-06,
"logits/chosen": -332198668.94941634,
"logits/rejected": -401510565.30941707,
"logps/chosen": -1566.7548638132296,
"logps/rejected": -1400.7533632286995,
"loss": 0.1167,
"rewards/chosen": 60.13694674124513,
"rewards/margins": 37355274.77371804,
"rewards/rejected": -37355214.6367713,
"step": 450
},
{
"epoch": 0.5683947532792005,
"grad_norm": 2.1485183238983154,
"kl": 345.0,
"learning_rate": 4.499866412750324e-06,
"logits/chosen": -337919572.59130436,
"logits/rejected": -610292203.52,
"logps/chosen": -1506.0869565217392,
"logps/rejected": -1416.576,
"loss": 0.0855,
"rewards/chosen": 42.14344429347826,
"rewards/margins": 16386423.967444293,
"rewards/rejected": -16386381.824,
"step": 455
},
{
"epoch": 0.5746408494690818,
"grad_norm": 2.107536792755127,
"kl": 378.3666687011719,
"learning_rate": 4.4833833507280884e-06,
"logits/chosen": -258783879.13754648,
"logits/rejected": -399432912.6824645,
"logps/chosen": -1535.1672862453531,
"logps/rejected": -1555.8483412322275,
"loss": 0.1178,
"rewards/chosen": 57.77426158805762,
"rewards/margins": 38428579.328763954,
"rewards/rejected": -38428521.55450237,
"step": 460
},
{
"epoch": 0.5808869456589631,
"grad_norm": 3.102811336517334,
"kl": 390.3666687011719,
"learning_rate": 4.466664285921543e-06,
"logits/chosen": -51058047.02290076,
"logits/rejected": -438891585.7614679,
"logps/chosen": -1564.824427480916,
"logps/rejected": -1454.4587155963302,
"loss": 0.1278,
"rewards/chosen": 65.5237058683206,
"rewards/margins": 23682733.560403116,
"rewards/rejected": -23682668.036697246,
"step": 465
},
{
"epoch": 0.5871330418488445,
"grad_norm": 0.9413681626319885,
"kl": 328.9333190917969,
"learning_rate": 4.4497112077322045e-06,
"logits/chosen": -152921083.60515022,
"logits/rejected": -376898862.6396761,
"logps/chosen": -1384.6523605150214,
"logps/rejected": -1450.1052631578948,
"loss": 0.0891,
"rewards/chosen": 47.79664028969957,
"rewards/margins": 6825927.63469697,
"rewards/rejected": -6825879.83805668,
"step": 470
},
{
"epoch": 0.5933791380387258,
"grad_norm": 2.5080554485321045,
"kl": 356.1666564941406,
"learning_rate": 4.432526133406843e-06,
"logits/chosen": -447649817.70711297,
"logits/rejected": -445022615.90041494,
"logps/chosen": -1432.1004184100418,
"logps/rejected": -1572.5145228215767,
"loss": 0.0826,
"rewards/chosen": 55.74282492154812,
"rewards/margins": 9518347.128717037,
"rewards/rejected": -9518291.385892116,
"step": 475
},
{
"epoch": 0.5996252342286071,
"grad_norm": 1.6492382287979126,
"kl": 355.1333312988281,
"learning_rate": 4.415111107797445e-06,
"logits/chosen": -344561655.84063745,
"logits/rejected": -424582846.04366815,
"logps/chosen": -1563.6653386454184,
"logps/rejected": -1488.9082969432313,
"loss": 0.1065,
"rewards/chosen": 47.48057768924303,
"rewards/margins": 6437849.401975069,
"rewards/rejected": -6437801.92139738,
"step": 480
},
{
"epoch": 0.6058713304184884,
"grad_norm": 3.074094533920288,
"kl": 381.9333190917969,
"learning_rate": 4.397468203117905e-06,
"logits/chosen": -288593281.024,
"logits/rejected": -461102177.9478261,
"logps/chosen": -1617.92,
"logps/rejected": -1586.3652173913044,
"loss": 0.0895,
"rewards/chosen": 55.55388671875,
"rewards/margins": -3992440.5852437164,
"rewards/rejected": 3992496.139130435,
"step": 485
},
{
"epoch": 0.6121174266083698,
"grad_norm": 1.6913539171218872,
"kl": 379.4666748046875,
"learning_rate": 4.379599518697444e-06,
"logits/chosen": -392446512.6692015,
"logits/rejected": -512980774.9308756,
"logps/chosen": -1402.1596958174905,
"logps/rejected": -1331.0230414746543,
"loss": 0.1057,
"rewards/chosen": 50.04800751544677,
"rewards/margins": 4576812.739251755,
"rewards/rejected": -4576762.69124424,
"step": 490
},
{
"epoch": 0.6183635227982511,
"grad_norm": 1.958149790763855,
"kl": 344.79998779296875,
"learning_rate": 4.3615071807308165e-06,
"logits/chosen": -468381422.93333334,
"logits/rejected": -774355899.7333333,
"logps/chosen": -1462.5333333333333,
"logps/rejected": -1496.6,
"loss": 0.0993,
"rewards/chosen": 55.822733561197914,
"rewards/margins": 22640237.156066895,
"rewards/rejected": -22640181.333333332,
"step": 495
},
{
"epoch": 0.6246096189881324,
"grad_norm": 2.8394651412963867,
"kl": 323.04998779296875,
"learning_rate": 4.34319334202531e-06,
"logits/chosen": -594912421.7095436,
"logits/rejected": -977983582.2594142,
"logps/chosen": -1415.966804979253,
"logps/rejected": -1626.8451882845188,
"loss": 0.0739,
"rewards/chosen": 49.25540553682573,
"rewards/margins": 6751152.18427583,
"rewards/rejected": -6751102.928870293,
"step": 500
},
{
"epoch": 0.6308557151780138,
"grad_norm": 1.808105230331421,
"kl": 294.4166564941406,
"learning_rate": 4.324660181744589e-06,
"logits/chosen": -547166021.8181819,
"logits/rejected": -1214759128.5286343,
"logps/chosen": -1386.1185770750988,
"logps/rejected": -1553.057268722467,
"loss": 0.0818,
"rewards/chosen": 58.81477349925889,
"rewards/margins": 42604187.80155764,
"rewards/rejected": -42604128.98678414,
"step": 505
},
{
"epoch": 0.637101811367895,
"grad_norm": 2.6478588581085205,
"kl": 282.5,
"learning_rate": 4.305909905149389e-06,
"logits/chosen": -299325648.97959185,
"logits/rejected": -1448980323.1319149,
"logps/chosen": -1492.6367346938775,
"logps/rejected": -1649.8382978723405,
"loss": 0.061,
"rewards/chosen": 58.450175382653065,
"rewards/margins": 16237909.684217935,
"rewards/rejected": -16237851.234042553,
"step": 510
},
{
"epoch": 0.6433479075577764,
"grad_norm": 2.0377795696258545,
"kl": 277.3208312988281,
"learning_rate": 4.2869447433351165e-06,
"logits/chosen": -297782366.6554622,
"logits/rejected": -1797935205.553719,
"logps/chosen": -1581.7142857142858,
"logps/rejected": -1638.3471074380166,
"loss": 0.0794,
"rewards/chosen": 51.58574054621849,
"rewards/margins": 8687982.296484347,
"rewards/rejected": -8687930.710743802,
"step": 515
},
{
"epoch": 0.6495940037476577,
"grad_norm": 1.192832589149475,
"kl": 161.88333129882812,
"learning_rate": 4.267766952966369e-06,
"logits/chosen": -465031805.15555555,
"logits/rejected": -2354600024.345098,
"logps/chosen": -1563.0222222222221,
"logps/rejected": -1950.6196078431371,
"loss": 0.0339,
"rewards/chosen": 41.315043402777775,
"rewards/margins": 4282941.393474775,
"rewards/rejected": -4282900.0784313725,
"step": 520
},
{
"epoch": 0.655840099937539,
"grad_norm": 2.186739206314087,
"kl": 80.5999984741211,
"learning_rate": 4.248378816008418e-06,
"logits/chosen": -843879312.209607,
"logits/rejected": -3130855766.6932273,
"logps/chosen": -1512.3144104803494,
"logps/rejected": -2192.191235059761,
"loss": 0.0387,
"rewards/chosen": 47.68359801446506,
"rewards/margins": 26062714.13778128,
"rewards/rejected": -26062666.454183266,
"step": 525
},
{
"epoch": 0.6620861961274204,
"grad_norm": 1.2925801277160645,
"kl": 111.38333129882812,
"learning_rate": 4.228782639455674e-06,
"logits/chosen": -785828956.2918454,
"logits/rejected": -2260101559.449393,
"logps/chosen": -1546.6437768240344,
"logps/rejected": -2102.412955465587,
"loss": 0.0262,
"rewards/chosen": 46.72353641362661,
"rewards/margins": 54497844.9421599,
"rewards/rejected": -54497798.21862348,
"step": 530
},
{
"epoch": 0.6683322923173017,
"grad_norm": 1.605604887008667,
"kl": 66.55833435058594,
"learning_rate": 4.2089807550571786e-06,
"logits/chosen": -444578600.8739496,
"logits/rejected": -2686018416.132231,
"logps/chosen": -1527.5294117647059,
"logps/rejected": -2166.7438016528927,
"loss": 0.0224,
"rewards/chosen": 52.38012900472689,
"rewards/margins": 10841648.148724046,
"rewards/rejected": -10841595.768595042,
"step": 535
},
{
"epoch": 0.674578388507183,
"grad_norm": 0.6878402233123779,
"kl": 81.24166870117188,
"learning_rate": 4.188975519039151e-06,
"logits/chosen": -626616434.688,
"logits/rejected": -2936158688.8347826,
"logps/chosen": -1452.16,
"logps/rejected": -2022.9565217391305,
"loss": 0.0303,
"rewards/chosen": 50.23778515625,
"rewards/margins": 3875481.1943068956,
"rewards/rejected": -3875430.9565217393,
"step": 540
},
{
"epoch": 0.6808244846970644,
"grad_norm": 3.325066566467285,
"kl": 88.92082977294922,
"learning_rate": 4.168769311824619e-06,
"logits/chosen": -718940253.4771785,
"logits/rejected": -2297285233.539749,
"logps/chosen": -1545.0290456431535,
"logps/rejected": -1931.6485355648535,
"loss": 0.0296,
"rewards/chosen": 48.45470532935685,
"rewards/margins": 35838846.64717395,
"rewards/rejected": -35838798.19246862,
"step": 545
},
{
"epoch": 0.6870705808869456,
"grad_norm": 1.9588119983673096,
"kl": 174.1750030517578,
"learning_rate": 4.1483645377501726e-06,
"logits/chosen": -284824458.7480916,
"logits/rejected": -1198599327.706422,
"logps/chosen": -1543.0839694656488,
"logps/rejected": -1775.633027522936,
"loss": 0.0587,
"rewards/chosen": 39.873020783635496,
"rewards/margins": -3450648.420557198,
"rewards/rejected": 3450688.2935779816,
"step": 550
},
{
"epoch": 0.693316677076827,
"grad_norm": 0.15676426887512207,
"kl": 169.35833740234375,
"learning_rate": 4.127763624779873e-06,
"logits/chosen": -119682921.28138529,
"logits/rejected": -928474042.0883534,
"logps/chosen": -1460.5021645021645,
"logps/rejected": -1611.9518072289156,
"loss": 0.0322,
"rewards/chosen": 50.89810775162338,
"rewards/margins": -11360139.30269546,
"rewards/rejected": 11360190.200803213,
"step": 555
},
{
"epoch": 0.6995627732667083,
"grad_norm": 0.06072574853897095,
"kl": 89.75833129882812,
"learning_rate": 4.106969024216348e-06,
"logits/chosen": -114480607.59493671,
"logits/rejected": -2679107364.872428,
"logps/chosen": -1609.3164556962026,
"logps/rejected": -1946.732510288066,
"loss": 0.0382,
"rewards/chosen": 53.48231886207806,
"rewards/margins": -20413582.46829842,
"rewards/rejected": 20413635.950617284,
"step": 560
},
{
"epoch": 0.7058088694565896,
"grad_norm": 1.8465758562088013,
"kl": 24.049999237060547,
"learning_rate": 4.085983210409114e-06,
"logits/chosen": -160582556.6244726,
"logits/rejected": -4038683239.2427983,
"logps/chosen": -1587.9831223628692,
"logps/rejected": -2597.135802469136,
"loss": 0.0218,
"rewards/chosen": 50.0565664556962,
"rewards/margins": 1382332.5915458796,
"rewards/rejected": -1382282.534979424,
"step": 565
},
{
"epoch": 0.712054965646471,
"grad_norm": 0.00014929051394574344,
"kl": 23.33333396911621,
"learning_rate": 4.064808680460149e-06,
"logits/chosen": -68750113.39130434,
"logits/rejected": -3962644201.472,
"logps/chosen": -1610.017391304348,
"logps/rejected": -2563.84,
"loss": 0.024,
"rewards/chosen": 46.589041270380434,
"rewards/margins": -8454041.60295873,
"rewards/rejected": 8454088.192,
"step": 570
},
{
"epoch": 0.7183010618363522,
"grad_norm": 0.03587708622217178,
"kl": 22.733333587646484,
"learning_rate": 4.043447953926763e-06,
"logits/chosen": -123084586.29565218,
"logits/rejected": -3554890743.808,
"logps/chosen": -1611.9652173913043,
"logps/rejected": -2491.136,
"loss": 0.023,
"rewards/chosen": 50.09292629076087,
"rewards/margins": 7622541.740926291,
"rewards/rejected": -7622491.648,
"step": 575
},
{
"epoch": 0.7245471580262336,
"grad_norm": 0.06506123393774033,
"kl": 25.579166412353516,
"learning_rate": 4.021903572521802e-06,
"logits/chosen": -45582472.53333333,
"logits/rejected": -3599831313.0666666,
"logps/chosen": -1660.9333333333334,
"logps/rejected": -2363.4666666666667,
"loss": 0.0177,
"rewards/chosen": 51.65968424479167,
"rewards/margins": -18986135.006982423,
"rewards/rejected": 18986186.666666668,
"step": 580
},
{
"epoch": 0.730793254216115,
"grad_norm": 0.004665783606469631,
"kl": 13.354166984558105,
"learning_rate": 4.000178099811203e-06,
"logits/chosen": -82861664.73732719,
"logits/rejected": -3341536610.311787,
"logps/chosen": -1628.1658986175116,
"logps/rejected": -2418.7376425855514,
"loss": 0.0169,
"rewards/chosen": 44.27093984014977,
"rewards/margins": -4328426.603584874,
"rewards/rejected": 4328470.874524714,
"step": 585
},
{
"epoch": 0.7370393504059962,
"grad_norm": 0.7978938817977905,
"kl": 21.72916603088379,
"learning_rate": 3.978274120908957e-06,
"logits/chosen": -186259499.94849786,
"logits/rejected": -3371473252.534413,
"logps/chosen": -1648.755364806867,
"logps/rejected": -2399.093117408907,
"loss": 0.0145,
"rewards/chosen": 41.02078443535408,
"rewards/margins": 4383058.478274315,
"rewards/rejected": -4383017.457489879,
"step": 590
},
{
"epoch": 0.7432854465958776,
"grad_norm": 0.05162600055336952,
"kl": 55.141666412353516,
"learning_rate": 3.956194242169506e-06,
"logits/chosen": -199533438.92436975,
"logits/rejected": -3286393170.512397,
"logps/chosen": -1541.9159663865546,
"logps/rejected": -2091.7685950413224,
"loss": 0.0325,
"rewards/chosen": 45.741264279149156,
"rewards/margins": -4093033.7959258035,
"rewards/rejected": 4093079.5371900825,
"step": 595
},
{
"epoch": 0.749531542785759,
"grad_norm": 1.5164387226104736,
"kl": 82.81666564941406,
"learning_rate": 3.933941090877615e-06,
"logits/chosen": -188560736.9531915,
"logits/rejected": -2792379287.5102043,
"logps/chosen": -1619.3361702127659,
"logps/rejected": -2233.208163265306,
"loss": 0.021,
"rewards/chosen": 49.129488031914896,
"rewards/margins": -826816.7643895191,
"rewards/rejected": 826865.8938775511,
"step": 600
},
{
"epoch": 0.749531542785759,
"eval_kl": 136.79397583007812,
"eval_logits/chosen": -320818114.59961313,
"eval_logits/rejected": -2315103287.8545456,
"eval_logps/chosen": -1513.0328820116054,
"eval_logps/rejected": -1956.0080808080809,
"eval_loss": 0.02713991515338421,
"eval_rewards/chosen": 50.27917976305609,
"eval_rewards/margins": 11528678.772109056,
"eval_rewards/rejected": -11528628.492929293,
"eval_runtime": 640.8536,
"eval_samples_per_second": 6.312,
"eval_steps_per_second": 0.395,
"step": 600
},
{
"epoch": 0.7557776389756402,
"grad_norm": 2.8158020973205566,
"kl": 138.05416870117188,
"learning_rate": 3.911517314935752e-06,
"logits/chosen": -266606025.5319149,
"logits/rejected": -2278140497.502041,
"logps/chosen": -1721.9404255319148,
"logps/rejected": -1823.3469387755101,
"loss": 0.0452,
"rewards/chosen": 49.07104803856383,
"rewards/margins": 4880222.001660284,
"rewards/rejected": -4880172.930612245,
"step": 605
},
{
"epoch": 0.7620237351655216,
"grad_norm": 1.02464759349823,
"kl": 175.93333435058594,
"learning_rate": 3.888925582549006e-06,
"logits/chosen": -262432676.79352227,
"logits/rejected": -1933484137.476395,
"logps/chosen": -1712.1943319838056,
"logps/rejected": -1873.854077253219,
"loss": 0.0411,
"rewards/chosen": 50.334348114878544,
"rewards/margins": 2964819.8450777284,
"rewards/rejected": -2964769.5107296137,
"step": 610
},
{
"epoch": 0.7682698313554028,
"grad_norm": 1.6535738706588745,
"kl": 211.09165954589844,
"learning_rate": 3.866168581907609e-06,
"logits/chosen": -351476043.88932806,
"logits/rejected": -1097745899.7004406,
"logps/chosen": -1526.7667984189723,
"logps/rejected": -1907.7004405286343,
"loss": 0.0431,
"rewards/chosen": 52.42443799407115,
"rewards/margins": 19615485.164526097,
"rewards/rejected": -19615432.740088105,
"step": 615
},
{
"epoch": 0.7745159275452842,
"grad_norm": 1.2876919507980347,
"kl": 227.76666259765625,
"learning_rate": 3.8432490208670605e-06,
"logits/chosen": -318065091.2542373,
"logits/rejected": -671862179.6721312,
"logps/chosen": -1523.2542372881355,
"logps/rejected": -1821.377049180328,
"loss": 0.0444,
"rewards/chosen": 53.57854707362288,
"rewards/margins": 4319505.644120844,
"rewards/rejected": -4319452.065573771,
"step": 620
},
{
"epoch": 0.7807620237351656,
"grad_norm": 2.2276229858398438,
"kl": 182.30833435058594,
"learning_rate": 3.82016962662592e-06,
"logits/chosen": -279810331.1336405,
"logits/rejected": -645603857.5209125,
"logps/chosen": -1435.8709677419354,
"logps/rejected": -1685.9011406844106,
"loss": 0.0345,
"rewards/chosen": 48.26090869815668,
"rewards/margins": 16381825.051783223,
"rewards/rejected": -16381776.790874524,
"step": 625
},
{
"epoch": 0.7870081199250468,
"grad_norm": 2.2775721549987793,
"kl": 211.5,
"learning_rate": 3.796933145401304e-06,
"logits/chosen": -10083805.866666667,
"logits/rejected": -1062032725.3333334,
"logps/chosen": -1574.8,
"logps/rejected": -1685.7333333333333,
"loss": 0.0462,
"rewards/chosen": 56.797526041666664,
"rewards/margins": -14378736.802473959,
"rewards/rejected": 14378793.6,
"step": 630
},
{
"epoch": 0.7932542161149282,
"grad_norm": 0.07378248870372772,
"kl": 202.61666870117188,
"learning_rate": 3.773542342102105e-06,
"logits/chosen": -278961709.7886179,
"logits/rejected": -1514538080.2735043,
"logps/chosen": -1536.260162601626,
"logps/rejected": -1685.8803418803418,
"loss": 0.0386,
"rewards/chosen": 51.39226054369919,
"rewards/margins": 21470320.657217808,
"rewards/rejected": -21470269.264957264,
"step": 635
},
{
"epoch": 0.7995003123048094,
"grad_norm": 0.7576724290847778,
"kl": 172.5416717529297,
"learning_rate": 3.7500000000000005e-06,
"logits/chosen": -356179968.0,
"logits/rejected": -2250393892.571429,
"logps/chosen": -1516.0,
"logps/rejected": -1893.4285714285713,
"loss": 0.051,
"rewards/chosen": 53.19654846191406,
"rewards/margins": 14372629.196548462,
"rewards/rejected": -14372576.0,
"step": 640
},
{
"epoch": 0.8057464084946908,
"grad_norm": 0.0,
"kl": 102.68333435058594,
"learning_rate": 3.7263089203982698e-06,
"logits/chosen": 101189978.00913242,
"logits/rejected": -2397663436.0153255,
"logps/chosen": -1720.109589041096,
"logps/rejected": -2050.206896551724,
"loss": 0.0174,
"rewards/chosen": 55.50405786244292,
"rewards/margins": -22854268.971037924,
"rewards/rejected": 22854324.475095786,
"step": 645
},
{
"epoch": 0.8119925046845722,
"grad_norm": 1.2345607280731201,
"kl": 101.40833282470703,
"learning_rate": 3.7024719222984696e-06,
"logits/chosen": 93082681.62770563,
"logits/rejected": -2185956701.558233,
"logps/chosen": -1410.3549783549784,
"logps/rejected": -1973.0763052208836,
"loss": 0.0334,
"rewards/chosen": 41.46657281520563,
"rewards/margins": 485957.8039222128,
"rewards/rejected": -485916.3373493976,
"step": 650
},
{
"epoch": 0.8182386008744534,
"grad_norm": 1.5449272394180298,
"kl": 233.19166564941406,
"learning_rate": 3.6784918420649952e-06,
"logits/chosen": 125160275.22633745,
"logits/rejected": -1470731812.7257383,
"logps/chosen": -1487.8024691358025,
"logps/rejected": -1760.4050632911392,
"loss": 0.0414,
"rewards/chosen": 51.10508696630659,
"rewards/margins": -503803.34217041766,
"rewards/rejected": 503854.44725738396,
"step": 655
},
{
"epoch": 0.8244846970643348,
"grad_norm": 1.7102289199829102,
"kl": 210.16250610351562,
"learning_rate": 3.654371533087586e-06,
"logits/chosen": -153904637.96015936,
"logits/rejected": -1703949736.8034935,
"logps/chosen": -1523.5059760956176,
"logps/rejected": -1825.53711790393,
"loss": 0.0511,
"rewards/chosen": 51.47202595866534,
"rewards/margins": -11561518.903519893,
"rewards/rejected": 11561570.375545852,
"step": 660
},
{
"epoch": 0.8307307932542161,
"grad_norm": 0.10204288363456726,
"kl": 212.35833740234375,
"learning_rate": 3.6301138654418e-06,
"logits/chosen": -389819261.52895755,
"logits/rejected": -1877226231.8914027,
"logps/chosen": -1590.054054054054,
"logps/rejected": -1905.6651583710407,
"loss": 0.0442,
"rewards/chosen": 55.06306180622587,
"rewards/margins": 49894978.64677221,
"rewards/rejected": -49894923.58371041,
"step": 665
},
{
"epoch": 0.8369768894440974,
"grad_norm": 0.03783709183335304,
"kl": 169.375,
"learning_rate": 3.6057217255475034e-06,
"logits/chosen": -503698552.22672063,
"logits/rejected": -2175421672.9270387,
"logps/chosen": -1393.748987854251,
"logps/rejected": -1875.9141630901288,
"loss": 0.0245,
"rewards/chosen": 55.69806427125506,
"rewards/margins": 31500282.170167275,
"rewards/rejected": -31500226.472103003,
"step": 670
},
{
"epoch": 0.8432229856339788,
"grad_norm": 2.866633892059326,
"kl": 166.69166564941406,
"learning_rate": 3.5811980158254156e-06,
"logits/chosen": -492701266.1728395,
"logits/rejected": -2529324589.367089,
"logps/chosen": -1534.3539094650205,
"logps/rejected": -1847.223628691983,
"loss": 0.071,
"rewards/chosen": 47.784866898148145,
"rewards/margins": 21554439.818622172,
"rewards/rejected": -21554392.033755273,
"step": 675
},
{
"epoch": 0.84946908182386,
"grad_norm": 0.18893325328826904,
"kl": 127.42500305175781,
"learning_rate": 3.556545654351749e-06,
"logits/chosen": -482326881.1034483,
"logits/rejected": -2595327075.096774,
"logps/chosen": -1636.4137931034484,
"logps/rejected": -1988.774193548387,
"loss": 0.0159,
"rewards/chosen": 48.64041453394397,
"rewards/margins": 6761048.89847905,
"rewards/rejected": -6761000.258064516,
"step": 680
},
{
"epoch": 0.8557151780137414,
"grad_norm": 0.03448840603232384,
"kl": 138.22084045410156,
"learning_rate": 3.531767574510987e-06,
"logits/chosen": -554006615.5213675,
"logits/rejected": -2420829508.6829267,
"logps/chosen": -1462.837606837607,
"logps/rejected": -2026.1463414634147,
"loss": 0.0316,
"rewards/chosen": 49.53289847088675,
"rewards/margins": 21187200.622329365,
"rewards/rejected": -21187151.089430895,
"step": 685
},
{
"epoch": 0.8619612742036228,
"grad_norm": 1.088128685951233,
"kl": 189.2083282470703,
"learning_rate": 3.5068667246468437e-06,
"logits/chosen": -323208633.2357724,
"logits/rejected": -1934111875.2820513,
"logps/chosen": -1403.5772357723577,
"logps/rejected": -1723.3504273504273,
"loss": 0.0325,
"rewards/chosen": 49.48591526930894,
"rewards/margins": -771720.4969907135,
"rewards/rejected": 771769.9829059829,
"step": 690
},
{
"epoch": 0.868207370393504,
"grad_norm": 2.381688117980957,
"kl": 228.81666564941406,
"learning_rate": 3.481846067711436e-06,
"logits/chosen": -274805228.87966806,
"logits/rejected": -1835806497.205021,
"logps/chosen": -1443.3858921161825,
"logps/rejected": -1687.0292887029289,
"loss": 0.0513,
"rewards/chosen": 49.824506450985474,
"rewards/margins": 6338691.765929045,
"rewards/rejected": -6338641.941422594,
"step": 695
},
{
"epoch": 0.8744534665833854,
"grad_norm": 1.0362893342971802,
"kl": 274.10833740234375,
"learning_rate": 3.4567085809127247e-06,
"logits/chosen": -263217944.77419356,
"logits/rejected": -1258797409.1034484,
"logps/chosen": -1521.1612903225807,
"logps/rejected": -1691.5862068965516,
"loss": 0.0462,
"rewards/chosen": 50.86510049143145,
"rewards/margins": 8044617.4857901465,
"rewards/rejected": -8044566.620689655,
"step": 700
},
{
"epoch": 0.8806995627732667,
"grad_norm": 1.926090121269226,
"kl": 289.6333312988281,
"learning_rate": 3.4314572553602577e-06,
"logits/chosen": -373032056.73362446,
"logits/rejected": -826461702.1195219,
"logps/chosen": -1489.1877729257642,
"logps/rejected": -1553.2749003984063,
"loss": 0.0693,
"rewards/chosen": 48.6717385371179,
"rewards/margins": 10300293.325124992,
"rewards/rejected": -10300244.653386455,
"step": 705
},
{
"epoch": 0.886945658963148,
"grad_norm": 2.2127394676208496,
"kl": 309.98333740234375,
"learning_rate": 3.406095095709254e-06,
"logits/chosen": -449325429.11740893,
"logits/rejected": -790185272.0343348,
"logps/chosen": -1486.7692307692307,
"logps/rejected": -1538.3347639484978,
"loss": 0.0561,
"rewards/chosen": 56.23212139423077,
"rewards/margins": 13225402.978902511,
"rewards/rejected": -13225346.746781116,
"step": 710
},
{
"epoch": 0.8931917551530294,
"grad_norm": 1.1908198595046997,
"kl": 275.14166259765625,
"learning_rate": 3.3806251198030843e-06,
"logits/chosen": -674069715.5702479,
"logits/rejected": -1077795142.9915967,
"logps/chosen": -1509.9504132231405,
"logps/rejected": -1646.2521008403362,
"loss": 0.0592,
"rewards/chosen": 55.71608664772727,
"rewards/margins": 1421531.8841538746,
"rewards/rejected": -1421476.168067227,
"step": 715
},
{
"epoch": 0.8994378513429107,
"grad_norm": 0.20443065464496613,
"kl": 221.72500610351562,
"learning_rate": 3.3550503583141726e-06,
"logits/chosen": -591554614.3716815,
"logits/rejected": -1264764299.0866141,
"logps/chosen": -1439.2920353982302,
"logps/rejected": -1669.9212598425197,
"loss": 0.0458,
"rewards/chosen": 52.98895101631637,
"rewards/margins": 10349140.233045503,
"rewards/rejected": -10349087.244094487,
"step": 720
},
{
"epoch": 0.905683947532792,
"grad_norm": 1.3105542659759521,
"kl": 201.68333435058594,
"learning_rate": 3.3293738543833807e-06,
"logits/chosen": -319022639.3277311,
"logits/rejected": -1492478950.6115704,
"logps/chosen": -1498.218487394958,
"logps/rejected": -1772.4297520661157,
"loss": 0.0446,
"rewards/chosen": 50.74974149816177,
"rewards/margins": 1107236.2704026552,
"rewards/rejected": -1107185.520661157,
"step": 725
},
{
"epoch": 0.9119300437226733,
"grad_norm": 0.8841028213500977,
"kl": 197.63333129882812,
"learning_rate": 3.303598663257904e-06,
"logits/chosen": -220504861.45132744,
"logits/rejected": -1356576622.8661418,
"logps/chosen": -1498.3362831858408,
"logps/rejected": -1776.755905511811,
"loss": 0.0464,
"rewards/chosen": 44.9149915306969,
"rewards/margins": -5135916.785795871,
"rewards/rejected": 5135961.700787402,
"step": 730
},
{
"epoch": 0.9181761399125546,
"grad_norm": 2.0225307941436768,
"kl": 190.76666259765625,
"learning_rate": 3.277727851927727e-06,
"logits/chosen": -65942323.2,
"logits/rejected": -1717986918.4,
"logps/chosen": -1528.3333333333333,
"logps/rejected": -1754.1333333333334,
"loss": 0.0349,
"rewards/chosen": 54.71465250651042,
"rewards/margins": -17492478.618680827,
"rewards/rejected": 17492533.333333332,
"step": 735
},
{
"epoch": 0.924422236102436,
"grad_norm": 2.063528060913086,
"kl": 142.30833435058594,
"learning_rate": 3.2517644987606827e-06,
"logits/chosen": 66764193.18518519,
"logits/rejected": -1825793241.2121212,
"logps/chosen": -1486.888888888889,
"logps/rejected": -1923.2727272727273,
"loss": 0.0417,
"rewards/chosen": 53.85865614149306,
"rewards/margins": -6370393.171646888,
"rewards/rejected": 6370447.03030303,
"step": 740
},
{
"epoch": 0.9306683322923173,
"grad_norm": 1.3004655838012695,
"kl": 142.44166564941406,
"learning_rate": 3.225711693136156e-06,
"logits/chosen": 70914651.79282868,
"logits/rejected": -2056784113.467249,
"logps/chosen": -1554.4860557768925,
"logps/rejected": -1979.9475982532751,
"loss": 0.0384,
"rewards/chosen": 48.3206937873506,
"rewards/margins": -11652987.836511454,
"rewards/rejected": 11653036.15720524,
"step": 745
},
{
"epoch": 0.9369144284821986,
"grad_norm": 1.4171018600463867,
"kl": 130.48333740234375,
"learning_rate": 3.199572535077481e-06,
"logits/chosen": -12038372.532188842,
"logits/rejected": -2139264849.8785424,
"logps/chosen": -1477.5965665236051,
"logps/rejected": -1908.987854251012,
"loss": 0.0385,
"rewards/chosen": 46.0073598444206,
"rewards/margins": 5270583.853513691,
"rewards/rejected": -5270537.846153846,
"step": 750
},
{
"epoch": 0.94316052467208,
"grad_norm": 0.09198802709579468,
"kl": 131.65834045410156,
"learning_rate": 3.173350134883066e-06,
"logits/chosen": -136213255.89427313,
"logits/rejected": -1772151463.9683795,
"logps/chosen": -1524.7224669603524,
"logps/rejected": -1885.3438735177865,
"loss": 0.0153,
"rewards/chosen": 51.28870543089207,
"rewards/margins": 13132515.351946538,
"rewards/rejected": -13132464.063241107,
"step": 755
},
{
"epoch": 0.9494066208619613,
"grad_norm": 1.3991014957427979,
"kl": 134.39999389648438,
"learning_rate": 3.147047612756302e-06,
"logits/chosen": -176903705.25560537,
"logits/rejected": -1983824190.7548637,
"logps/chosen": -1627.1210762331839,
"logps/rejected": -2024.964980544747,
"loss": 0.0407,
"rewards/chosen": 53.05085569646861,
"rewards/margins": -14139337.953035355,
"rewards/rejected": 14139391.00389105,
"step": 760
},
{
"epoch": 0.9556527170518426,
"grad_norm": 1.079622507095337,
"kl": 134.00833129882812,
"learning_rate": 3.120668098434291e-06,
"logits/chosen": -444668686.5691057,
"logits/rejected": -2044526031.8632479,
"logps/chosen": -1559.219512195122,
"logps/rejected": -2039.3846153846155,
"loss": 0.0349,
"rewards/chosen": 46.08423447027439,
"rewards/margins": 484669.26372164977,
"rewards/rejected": -484623.1794871795,
"step": 765
},
{
"epoch": 0.9618988132417239,
"grad_norm": 2.1247825622558594,
"kl": 173.84165954589844,
"learning_rate": 3.094214730815433e-06,
"logits/chosen": -334601040.33472806,
"logits/rejected": -1739400493.6763484,
"logps/chosen": -1507.7489539748954,
"logps/rejected": -1843.1203319502074,
"loss": 0.0627,
"rewards/chosen": 54.40384741108787,
"rewards/margins": 15768593.225424174,
"rewards/rejected": -15768538.821576763,
"step": 770
},
{
"epoch": 0.9681449094316052,
"grad_norm": 1.7619072198867798,
"kl": 187.1666717529297,
"learning_rate": 3.0676906575859335e-06,
"logits/chosen": -414374765.71428573,
"logits/rejected": -1798628478.9421487,
"logps/chosen": -1502.3865546218487,
"logps/rejected": -1810.5123966942149,
"loss": 0.0286,
"rewards/chosen": 41.90288126969538,
"rewards/margins": 6427740.6797407735,
"rewards/rejected": -6427698.776859504,
"step": 775
},
{
"epoch": 0.9743910056214866,
"grad_norm": 1.2919367551803589,
"kl": 171.06666564941406,
"learning_rate": 3.0410990348452572e-06,
"logits/chosen": -272265037.9130435,
"logits/rejected": -1723221409.792,
"logps/chosen": -1605.7043478260869,
"logps/rejected": -1845.248,
"loss": 0.044,
"rewards/chosen": 47.08596297554348,
"rewards/margins": 9132751.853962975,
"rewards/rejected": -9132704.768,
"step": 780
},
{
"epoch": 0.9806371018113679,
"grad_norm": 0.8556254506111145,
"kl": 176.6458282470703,
"learning_rate": 3.0144430267305874e-06,
"logits/chosen": -508988111.07555556,
"logits/rejected": -1627455745.0039215,
"logps/chosen": -1427.6622222222222,
"logps/rejected": -1869.0509803921568,
"loss": 0.0208,
"rewards/chosen": 53.00231336805555,
"rewards/margins": 10830550.633685917,
"rewards/rejected": -10830497.631372549,
"step": 785
},
{
"epoch": 0.9868831980012492,
"grad_norm": 2.9301369190216064,
"kl": 212.9499969482422,
"learning_rate": 2.9877258050403214e-06,
"logits/chosen": -519565405.8015267,
"logits/rejected": -1667255079.9266055,
"logps/chosen": -1437.1908396946565,
"logps/rejected": -1811.9633027522937,
"loss": 0.0375,
"rewards/chosen": 57.16154281965649,
"rewards/margins": 25719160.574386854,
"rewards/rejected": -25719103.412844036,
"step": 790
},
{
"epoch": 0.9931292941911305,
"grad_norm": 1.1394269466400146,
"kl": 213.3249969482422,
"learning_rate": 2.9609505488566585e-06,
"logits/chosen": -600712120.5581396,
"logits/rejected": -1835518118.054054,
"logps/chosen": -1653.7054263565892,
"logps/rejected": -1867.6756756756756,
"loss": 0.0498,
"rewards/chosen": 51.81218583454457,
"rewards/margins": 37509377.07344709,
"rewards/rejected": -37509325.26126126,
"step": 795
},
{
"epoch": 0.9993753903810119,
"grad_norm": 0.3533008396625519,
"kl": 138.0500030517578,
"learning_rate": 2.9341204441673267e-06,
"logits/chosen": -576359643.1091703,
"logits/rejected": -1930248779.4741037,
"logps/chosen": -1551.3711790393013,
"logps/rejected": -1936.0637450199204,
"loss": 0.0229,
"rewards/chosen": 48.6797983760917,
"rewards/margins": 5309753.859081244,
"rewards/rejected": -5309705.179282868,
"step": 800
},
{
"epoch": 0.9993753903810119,
"eval_kl": 164.8888397216797,
"eval_logits/chosen": -603077229.926499,
"eval_logits/rejected": -2010029103.579798,
"eval_logps/chosen": -1488.5996131528047,
"eval_logps/rejected": -1886.9171717171716,
"eval_loss": 0.03083220310509205,
"eval_rewards/chosen": 52.287400265957444,
"eval_rewards/margins": 19438170.558107335,
"eval_rewards/rejected": -19438118.27070707,
"eval_runtime": 640.097,
"eval_samples_per_second": 6.319,
"eval_steps_per_second": 0.395,
"step": 800
}
],
"logging_steps": 5,
"max_steps": 1600,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 200,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}