zephyr-7b-dpo-full / trainer_state.json
li-muyang's picture
Model save
b35502d verified
raw
history blame
27 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9984301412872841,
"eval_steps": 500,
"global_step": 477,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0020931449502878076,
"grad_norm": 18.07411787458952,
"learning_rate": 1.0416666666666666e-08,
"logits/chosen": -2.6982662677764893,
"logits/rejected": -2.639620304107666,
"logps/chosen": -340.48919677734375,
"logps/rejected": -383.30560302734375,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.020931449502878074,
"grad_norm": 9.9608972008456,
"learning_rate": 1.0416666666666667e-07,
"logits/chosen": -2.4643380641937256,
"logits/rejected": -2.41880464553833,
"logps/chosen": -332.1871032714844,
"logps/rejected": -297.4084167480469,
"loss": 0.693,
"rewards/accuracies": 0.5277777910232544,
"rewards/chosen": 0.0005977663677185774,
"rewards/margins": 0.0007145923445932567,
"rewards/rejected": -0.00011682594777084887,
"step": 10
},
{
"epoch": 0.04186289900575615,
"grad_norm": 13.745638539501607,
"learning_rate": 2.0833333333333333e-07,
"logits/chosen": -2.563354969024658,
"logits/rejected": -2.4802138805389404,
"logps/chosen": -330.6456604003906,
"logps/rejected": -303.16796875,
"loss": 0.6926,
"rewards/accuracies": 0.5,
"rewards/chosen": 0.0026146548334509134,
"rewards/margins": 0.0005810384755022824,
"rewards/rejected": 0.0020336161833256483,
"step": 20
},
{
"epoch": 0.06279434850863422,
"grad_norm": 7.832516309339627,
"learning_rate": 3.1249999999999997e-07,
"logits/chosen": -2.532294511795044,
"logits/rejected": -2.4404759407043457,
"logps/chosen": -312.492431640625,
"logps/rejected": -266.53936767578125,
"loss": 0.6904,
"rewards/accuracies": 0.65625,
"rewards/chosen": 0.01948517933487892,
"rewards/margins": 0.006482087075710297,
"rewards/rejected": 0.0130030931904912,
"step": 30
},
{
"epoch": 0.0837257980115123,
"grad_norm": 7.9616158798093855,
"learning_rate": 4.1666666666666667e-07,
"logits/chosen": -2.431403636932373,
"logits/rejected": -2.379568099975586,
"logps/chosen": -308.171630859375,
"logps/rejected": -279.072998046875,
"loss": 0.6845,
"rewards/accuracies": 0.71875,
"rewards/chosen": 0.046426668763160706,
"rewards/margins": 0.019384978339076042,
"rewards/rejected": 0.027041692286729813,
"step": 40
},
{
"epoch": 0.10465724751439037,
"grad_norm": 12.138395522818126,
"learning_rate": 4.999731868769026e-07,
"logits/chosen": -2.467078685760498,
"logits/rejected": -2.3770642280578613,
"logps/chosen": -292.5061950683594,
"logps/rejected": -287.78692626953125,
"loss": 0.6768,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": 0.05952076241374016,
"rewards/margins": 0.031759217381477356,
"rewards/rejected": 0.027761543169617653,
"step": 50
},
{
"epoch": 0.12558869701726844,
"grad_norm": 12.71284691980484,
"learning_rate": 4.990353313429303e-07,
"logits/chosen": -2.5058963298797607,
"logits/rejected": -2.4367785453796387,
"logps/chosen": -269.9830322265625,
"logps/rejected": -261.7590026855469,
"loss": 0.6648,
"rewards/accuracies": 0.71875,
"rewards/chosen": 0.054543137550354004,
"rewards/margins": 0.06885507702827454,
"rewards/rejected": -0.01431194506585598,
"step": 60
},
{
"epoch": 0.14652014652014653,
"grad_norm": 8.977266411290348,
"learning_rate": 4.967625656594781e-07,
"logits/chosen": -2.4574174880981445,
"logits/rejected": -2.4204437732696533,
"logps/chosen": -323.3358459472656,
"logps/rejected": -313.7773742675781,
"loss": 0.6448,
"rewards/accuracies": 0.6875,
"rewards/chosen": -0.02133224718272686,
"rewards/margins": 0.09954627603292465,
"rewards/rejected": -0.12087850272655487,
"step": 70
},
{
"epoch": 0.1674515960230246,
"grad_norm": 15.44090832738,
"learning_rate": 4.93167072587771e-07,
"logits/chosen": -2.5686161518096924,
"logits/rejected": -2.459707260131836,
"logps/chosen": -367.8087463378906,
"logps/rejected": -306.7381286621094,
"loss": 0.6262,
"rewards/accuracies": 0.7437499761581421,
"rewards/chosen": -0.07196881622076035,
"rewards/margins": 0.20668184757232666,
"rewards/rejected": -0.2786506414413452,
"step": 80
},
{
"epoch": 0.18838304552590268,
"grad_norm": 17.357813817512778,
"learning_rate": 4.882681251368548e-07,
"logits/chosen": -2.484760284423828,
"logits/rejected": -2.4333367347717285,
"logps/chosen": -286.19573974609375,
"logps/rejected": -318.17987060546875,
"loss": 0.6072,
"rewards/accuracies": 0.71875,
"rewards/chosen": -0.13759762048721313,
"rewards/margins": 0.2588348686695099,
"rewards/rejected": -0.396432489156723,
"step": 90
},
{
"epoch": 0.20931449502878074,
"grad_norm": 11.555230239819865,
"learning_rate": 4.820919832540181e-07,
"logits/chosen": -2.481173038482666,
"logits/rejected": -2.436263084411621,
"logps/chosen": -343.57391357421875,
"logps/rejected": -353.5834655761719,
"loss": 0.6047,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.17556986212730408,
"rewards/margins": 0.37368106842041016,
"rewards/rejected": -0.5492509603500366,
"step": 100
},
{
"epoch": 0.2302459445316588,
"grad_norm": 12.709794889392892,
"learning_rate": 4.7467175306295647e-07,
"logits/chosen": -2.3503360748291016,
"logits/rejected": -2.259538173675537,
"logps/chosen": -345.1012268066406,
"logps/rejected": -358.5026550292969,
"loss": 0.6012,
"rewards/accuracies": 0.643750011920929,
"rewards/chosen": -0.3211168348789215,
"rewards/margins": 0.30802685022354126,
"rewards/rejected": -0.6291436553001404,
"step": 110
},
{
"epoch": 0.25117739403453687,
"grad_norm": 16.24550776128439,
"learning_rate": 4.6604720940421207e-07,
"logits/chosen": -1.8585760593414307,
"logits/rejected": -1.812425971031189,
"logps/chosen": -319.6259765625,
"logps/rejected": -346.91424560546875,
"loss": 0.5874,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": -0.20474886894226074,
"rewards/margins": 0.4103702902793884,
"rewards/rejected": -0.6151191592216492,
"step": 120
},
{
"epoch": 0.272108843537415,
"grad_norm": 18.658464566773528,
"learning_rate": 4.5626458262912735e-07,
"logits/chosen": -1.4963523149490356,
"logits/rejected": -1.3689486980438232,
"logps/chosen": -381.5474548339844,
"logps/rejected": -392.7295837402344,
"loss": 0.5767,
"rewards/accuracies": 0.6625000238418579,
"rewards/chosen": -0.6478039622306824,
"rewards/margins": 0.3431198000907898,
"rewards/rejected": -0.9909237623214722,
"step": 130
},
{
"epoch": 0.29304029304029305,
"grad_norm": 14.61768252656386,
"learning_rate": 4.453763107901675e-07,
"logits/chosen": -1.2942148447036743,
"logits/rejected": -1.0670334100723267,
"logps/chosen": -374.21197509765625,
"logps/rejected": -370.75750732421875,
"loss": 0.5762,
"rewards/accuracies": 0.731249988079071,
"rewards/chosen": -0.3039678633213043,
"rewards/margins": 0.4882018566131592,
"rewards/rejected": -0.7921696901321411,
"step": 140
},
{
"epoch": 0.3139717425431711,
"grad_norm": 22.141164123200294,
"learning_rate": 4.3344075855595097e-07,
"logits/chosen": -1.035481572151184,
"logits/rejected": -0.8223312497138977,
"logps/chosen": -351.9337463378906,
"logps/rejected": -350.9710693359375,
"loss": 0.5711,
"rewards/accuracies": 0.675000011920929,
"rewards/chosen": -0.4184446334838867,
"rewards/margins": 0.40214234590530396,
"rewards/rejected": -0.8205869793891907,
"step": 150
},
{
"epoch": 0.3349031920460492,
"grad_norm": 16.775320749910513,
"learning_rate": 4.2052190435769554e-07,
"logits/chosen": -0.8283090591430664,
"logits/rejected": -0.4383368492126465,
"logps/chosen": -351.2560119628906,
"logps/rejected": -372.83636474609375,
"loss": 0.5738,
"rewards/accuracies": 0.71875,
"rewards/chosen": -0.5103927850723267,
"rewards/margins": 0.5579272508621216,
"rewards/rejected": -1.0683200359344482,
"step": 160
},
{
"epoch": 0.35583464154892724,
"grad_norm": 18.321929450201765,
"learning_rate": 4.0668899744407567e-07,
"logits/chosen": -0.2253287136554718,
"logits/rejected": 0.010831773281097412,
"logps/chosen": -337.71563720703125,
"logps/rejected": -353.23150634765625,
"loss": 0.5681,
"rewards/accuracies": 0.6937500238418579,
"rewards/chosen": -0.5377098917961121,
"rewards/margins": 0.43757739663124084,
"rewards/rejected": -0.9752873182296753,
"step": 170
},
{
"epoch": 0.37676609105180536,
"grad_norm": 17.583054376284373,
"learning_rate": 3.920161866827889e-07,
"logits/chosen": -0.036564283072948456,
"logits/rejected": 0.21061678230762482,
"logps/chosen": -348.42608642578125,
"logps/rejected": -363.5872802734375,
"loss": 0.5376,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -0.5707951784133911,
"rewards/margins": 0.45119351148605347,
"rewards/rejected": -1.0219886302947998,
"step": 180
},
{
"epoch": 0.3976975405546834,
"grad_norm": 23.074661957066084,
"learning_rate": 3.765821230985757e-07,
"logits/chosen": 0.14501239359378815,
"logits/rejected": 0.1892128884792328,
"logps/chosen": -327.35870361328125,
"logps/rejected": -371.1208801269531,
"loss": 0.5554,
"rewards/accuracies": 0.71875,
"rewards/chosen": -0.48090630769729614,
"rewards/margins": 0.45326748490333557,
"rewards/rejected": -0.9341737627983093,
"step": 190
},
{
"epoch": 0.4186289900575615,
"grad_norm": 22.069261987026525,
"learning_rate": 3.604695382782159e-07,
"logits/chosen": 0.5265301465988159,
"logits/rejected": 0.6243599653244019,
"logps/chosen": -359.69854736328125,
"logps/rejected": -418.3558044433594,
"loss": 0.5655,
"rewards/accuracies": 0.7124999761581421,
"rewards/chosen": -0.7841523885726929,
"rewards/margins": 0.46337419748306274,
"rewards/rejected": -1.2475265264511108,
"step": 200
},
{
"epoch": 0.43956043956043955,
"grad_norm": 17.42416201312016,
"learning_rate": 3.4376480090239047e-07,
"logits/chosen": -0.36963820457458496,
"logits/rejected": 0.10797449201345444,
"logps/chosen": -408.05328369140625,
"logps/rejected": -385.1399841308594,
"loss": 0.573,
"rewards/accuracies": 0.737500011920929,
"rewards/chosen": -0.7120479345321655,
"rewards/margins": 0.45911431312561035,
"rewards/rejected": -1.1711622476577759,
"step": 210
},
{
"epoch": 0.4604918890633176,
"grad_norm": 21.650857513168543,
"learning_rate": 3.265574537815398e-07,
"logits/chosen": -1.1279886960983276,
"logits/rejected": -1.0101404190063477,
"logps/chosen": -330.86920166015625,
"logps/rejected": -375.27581787109375,
"loss": 0.5521,
"rewards/accuracies": 0.71875,
"rewards/chosen": -0.5428093075752258,
"rewards/margins": 0.5479553937911987,
"rewards/rejected": -1.0907647609710693,
"step": 220
},
{
"epoch": 0.48142333856619574,
"grad_norm": 21.29968622459755,
"learning_rate": 3.0893973387735683e-07,
"logits/chosen": -1.4807205200195312,
"logits/rejected": -1.2653987407684326,
"logps/chosen": -349.1932067871094,
"logps/rejected": -402.3145751953125,
"loss": 0.5591,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -0.6056884527206421,
"rewards/margins": 0.7848646640777588,
"rewards/rejected": -1.3905531167984009,
"step": 230
},
{
"epoch": 0.5023547880690737,
"grad_norm": 21.54037562544609,
"learning_rate": 2.910060778827554e-07,
"logits/chosen": -1.4609298706054688,
"logits/rejected": -1.2633426189422607,
"logps/chosen": -361.18109130859375,
"logps/rejected": -385.7818298339844,
"loss": 0.5293,
"rewards/accuracies": 0.7124999761581421,
"rewards/chosen": -0.4389161169528961,
"rewards/margins": 0.5328649878501892,
"rewards/rejected": -0.9717811346054077,
"step": 240
},
{
"epoch": 0.5232862375719518,
"grad_norm": 19.10437589239632,
"learning_rate": 2.7285261601056697e-07,
"logits/chosen": -1.1038726568222046,
"logits/rejected": -0.7872940897941589,
"logps/chosen": -367.0468444824219,
"logps/rejected": -402.64837646484375,
"loss": 0.5392,
"rewards/accuracies": 0.8062499761581421,
"rewards/chosen": -0.618547260761261,
"rewards/margins": 0.7416694760322571,
"rewards/rejected": -1.360216736793518,
"step": 250
},
{
"epoch": 0.54421768707483,
"grad_norm": 27.812573286232187,
"learning_rate": 2.5457665670441937e-07,
"logits/chosen": -0.946252703666687,
"logits/rejected": -0.8400171399116516,
"logps/chosen": -360.14691162109375,
"logps/rejected": -391.15576171875,
"loss": 0.5386,
"rewards/accuracies": 0.675000011920929,
"rewards/chosen": -0.7270759344100952,
"rewards/margins": 0.5733479261398315,
"rewards/rejected": -1.3004238605499268,
"step": 260
},
{
"epoch": 0.565149136577708,
"grad_norm": 19.996060331745692,
"learning_rate": 2.3627616503391812e-07,
"logits/chosen": -1.0181360244750977,
"logits/rejected": -0.8419178128242493,
"logps/chosen": -381.3074645996094,
"logps/rejected": -421.84503173828125,
"loss": 0.5439,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.6155784726142883,
"rewards/margins": 0.7110739946365356,
"rewards/rejected": -1.3266522884368896,
"step": 270
},
{
"epoch": 0.5860805860805861,
"grad_norm": 20.456176579909233,
"learning_rate": 2.1804923757009882e-07,
"logits/chosen": -0.8568949699401855,
"logits/rejected": -0.6260634660720825,
"logps/chosen": -355.68487548828125,
"logps/rejected": -365.54150390625,
"loss": 0.5521,
"rewards/accuracies": 0.706250011920929,
"rewards/chosen": -0.5479617118835449,
"rewards/margins": 0.554092288017273,
"rewards/rejected": -1.1020539999008179,
"step": 280
},
{
"epoch": 0.6070120355834642,
"grad_norm": 21.855087428817388,
"learning_rate": 1.9999357655598891e-07,
"logits/chosen": -0.9505099058151245,
"logits/rejected": -0.7808119654655457,
"logps/chosen": -334.37713623046875,
"logps/rejected": -388.1415100097656,
"loss": 0.5459,
"rewards/accuracies": 0.7562500238418579,
"rewards/chosen": -0.6734660863876343,
"rewards/margins": 0.5256973505020142,
"rewards/rejected": -1.1991634368896484,
"step": 290
},
{
"epoch": 0.6279434850863422,
"grad_norm": 20.553743451672336,
"learning_rate": 1.8220596619089573e-07,
"logits/chosen": -0.9063513875007629,
"logits/rejected": -0.6456217765808105,
"logps/chosen": -422.3011169433594,
"logps/rejected": -447.60308837890625,
"loss": 0.5201,
"rewards/accuracies": 0.699999988079071,
"rewards/chosen": -0.6219363212585449,
"rewards/margins": 0.6523629426956177,
"rewards/rejected": -1.2742993831634521,
"step": 300
},
{
"epoch": 0.6488749345892203,
"grad_norm": 19.16702342627125,
"learning_rate": 1.647817538357072e-07,
"logits/chosen": -0.8550491333007812,
"logits/rejected": -0.6107693910598755,
"logps/chosen": -392.9826354980469,
"logps/rejected": -404.90277099609375,
"loss": 0.5161,
"rewards/accuracies": 0.762499988079071,
"rewards/chosen": -0.6534227132797241,
"rewards/margins": 0.7227972149848938,
"rewards/rejected": -1.3762199878692627,
"step": 310
},
{
"epoch": 0.6698063840920984,
"grad_norm": 20.684056407375124,
"learning_rate": 1.478143389201113e-07,
"logits/chosen": -0.7066868543624878,
"logits/rejected": -0.4419567584991455,
"logps/chosen": -337.99554443359375,
"logps/rejected": -378.85528564453125,
"loss": 0.5178,
"rewards/accuracies": 0.8187500238418579,
"rewards/chosen": -0.5666108131408691,
"rewards/margins": 0.76667320728302,
"rewards/rejected": -1.3332840204238892,
"step": 320
},
{
"epoch": 0.6907378335949764,
"grad_norm": 23.16918692489251,
"learning_rate": 1.3139467229135998e-07,
"logits/chosen": -0.9136036038398743,
"logits/rejected": -0.8735296130180359,
"logps/chosen": -343.11474609375,
"logps/rejected": -400.88055419921875,
"loss": 0.5291,
"rewards/accuracies": 0.78125,
"rewards/chosen": -0.5132071375846863,
"rewards/margins": 0.6378787755966187,
"rewards/rejected": -1.1510859727859497,
"step": 330
},
{
"epoch": 0.7116692830978545,
"grad_norm": 25.09011052065921,
"learning_rate": 1.1561076868822755e-07,
"logits/chosen": -0.8220704197883606,
"logits/rejected": -0.6656316518783569,
"logps/chosen": -376.2585144042969,
"logps/rejected": -415.30108642578125,
"loss": 0.5298,
"rewards/accuracies": 0.706250011920929,
"rewards/chosen": -0.5471745133399963,
"rewards/margins": 0.6663635969161987,
"rewards/rejected": -1.2135381698608398,
"step": 340
},
{
"epoch": 0.7326007326007326,
"grad_norm": 23.913230474173087,
"learning_rate": 1.0054723495346482e-07,
"logits/chosen": -0.8336887359619141,
"logits/rejected": -0.5391756892204285,
"logps/chosen": -333.2408752441406,
"logps/rejected": -374.46160888671875,
"loss": 0.5046,
"rewards/accuracies": 0.7749999761581421,
"rewards/chosen": -0.46794453263282776,
"rewards/margins": 0.6849315762519836,
"rewards/rejected": -1.1528760194778442,
"step": 350
},
{
"epoch": 0.7535321821036107,
"grad_norm": 18.671801968846303,
"learning_rate": 8.628481651367875e-08,
"logits/chosen": -0.6670598983764648,
"logits/rejected": -0.3578875660896301,
"logps/chosen": -389.22222900390625,
"logps/rejected": -422.08209228515625,
"loss": 0.5407,
"rewards/accuracies": 0.7437499761581421,
"rewards/chosen": -0.5574816465377808,
"rewards/margins": 0.730495035648346,
"rewards/rejected": -1.287976861000061,
"step": 360
},
{
"epoch": 0.7744636316064888,
"grad_norm": 34.13531738812179,
"learning_rate": 7.289996455765748e-08,
"logits/chosen": -0.6234959363937378,
"logits/rejected": -0.37343746423721313,
"logps/chosen": -344.8446960449219,
"logps/rejected": -383.56939697265625,
"loss": 0.5266,
"rewards/accuracies": 0.768750011920929,
"rewards/chosen": -0.540058434009552,
"rewards/margins": 0.757440984249115,
"rewards/rejected": -1.297499418258667,
"step": 370
},
{
"epoch": 0.7953950811093669,
"grad_norm": 21.95611942170756,
"learning_rate": 6.046442623320145e-08,
"logits/chosen": -0.3392348885536194,
"logits/rejected": -0.2354392111301422,
"logps/chosen": -356.6414794921875,
"logps/rejected": -457.1575622558594,
"loss": 0.5252,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.7903109788894653,
"rewards/margins": 0.8445862531661987,
"rewards/rejected": -1.634897232055664,
"step": 380
},
{
"epoch": 0.8163265306122449,
"grad_norm": 25.808898227359457,
"learning_rate": 4.904486005914027e-08,
"logits/chosen": -0.7156884074211121,
"logits/rejected": -0.4770375192165375,
"logps/chosen": -443.89642333984375,
"logps/rejected": -474.5353088378906,
"loss": 0.5136,
"rewards/accuracies": 0.7250000238418579,
"rewards/chosen": -0.6931928396224976,
"rewards/margins": 0.6563575863838196,
"rewards/rejected": -1.349550485610962,
"step": 390
},
{
"epoch": 0.837257980115123,
"grad_norm": 20.792087565548005,
"learning_rate": 3.8702478614051345e-08,
"logits/chosen": -0.577874481678009,
"logits/rejected": -0.3143076002597809,
"logps/chosen": -343.8722839355469,
"logps/rejected": -391.06610107421875,
"loss": 0.528,
"rewards/accuracies": 0.7437499761581421,
"rewards/chosen": -0.6408818960189819,
"rewards/margins": 0.6500493884086609,
"rewards/rejected": -1.290931224822998,
"step": 400
},
{
"epoch": 0.858189429618001,
"grad_norm": 24.33600208096771,
"learning_rate": 2.9492720416985e-08,
"logits/chosen": -0.859704852104187,
"logits/rejected": -0.572528600692749,
"logps/chosen": -388.897705078125,
"logps/rejected": -415.8624572753906,
"loss": 0.5399,
"rewards/accuracies": 0.7562500238418579,
"rewards/chosen": -0.6071329712867737,
"rewards/margins": 0.7060071229934692,
"rewards/rejected": -1.3131401538848877,
"step": 410
},
{
"epoch": 0.8791208791208791,
"grad_norm": 24.28511601342855,
"learning_rate": 2.1464952759020856e-08,
"logits/chosen": -0.64429771900177,
"logits/rejected": -0.47082147002220154,
"logps/chosen": -341.98504638671875,
"logps/rejected": -408.1618957519531,
"loss": 0.5184,
"rewards/accuracies": 0.7124999761581421,
"rewards/chosen": -0.6186074614524841,
"rewards/margins": 0.6798468828201294,
"rewards/rejected": -1.2984542846679688,
"step": 420
},
{
"epoch": 0.9000523286237572,
"grad_norm": 24.32668129564916,
"learning_rate": 1.4662207078575684e-08,
"logits/chosen": -0.627988338470459,
"logits/rejected": -0.37057411670684814,
"logps/chosen": -384.9641418457031,
"logps/rejected": -431.1815490722656,
"loss": 0.5035,
"rewards/accuracies": 0.7437499761581421,
"rewards/chosen": -0.5622997283935547,
"rewards/margins": 0.669585108757019,
"rewards/rejected": -1.2318848371505737,
"step": 430
},
{
"epoch": 0.9209837781266352,
"grad_norm": 20.42397496588787,
"learning_rate": 9.12094829893642e-09,
"logits/chosen": -0.8050533533096313,
"logits/rejected": -0.5536060929298401,
"logps/chosen": -338.9949035644531,
"logps/rejected": -367.10296630859375,
"loss": 0.5177,
"rewards/accuracies": 0.71875,
"rewards/chosen": -0.5558196306228638,
"rewards/margins": 0.6245731711387634,
"rewards/rejected": -1.1803929805755615,
"step": 440
},
{
"epoch": 0.9419152276295133,
"grad_norm": 22.341141934616147,
"learning_rate": 4.8708793644441086e-09,
"logits/chosen": -0.513416588306427,
"logits/rejected": -0.3268742263317108,
"logps/chosen": -371.934814453125,
"logps/rejected": -432.7196350097656,
"loss": 0.5312,
"rewards/accuracies": 0.75,
"rewards/chosen": -0.6752878427505493,
"rewards/margins": 0.7238127589225769,
"rewards/rejected": -1.399100661277771,
"step": 450
},
{
"epoch": 0.9628466771323915,
"grad_norm": 24.32627212356958,
"learning_rate": 1.9347820230782295e-09,
"logits/chosen": -0.7187199592590332,
"logits/rejected": -0.3386891782283783,
"logps/chosen": -361.8067932128906,
"logps/rejected": -383.07525634765625,
"loss": 0.5169,
"rewards/accuracies": 0.762499988079071,
"rewards/chosen": -0.6250866651535034,
"rewards/margins": 0.7100397348403931,
"rewards/rejected": -1.335126280784607,
"step": 460
},
{
"epoch": 0.9837781266352695,
"grad_norm": 22.68698991416887,
"learning_rate": 3.2839470889836627e-10,
"logits/chosen": -0.6799210906028748,
"logits/rejected": -0.4490284025669098,
"logps/chosen": -387.94158935546875,
"logps/rejected": -422.15155029296875,
"loss": 0.5164,
"rewards/accuracies": 0.706250011920929,
"rewards/chosen": -0.5430302023887634,
"rewards/margins": 0.6616371273994446,
"rewards/rejected": -1.204667329788208,
"step": 470
},
{
"epoch": 0.9984301412872841,
"eval_logits/chosen": -0.44416600465774536,
"eval_logits/rejected": -0.26567625999450684,
"eval_logps/chosen": -342.55780029296875,
"eval_logps/rejected": -420.65606689453125,
"eval_loss": 0.533210277557373,
"eval_rewards/accuracies": 0.76953125,
"eval_rewards/chosen": -0.568053126335144,
"eval_rewards/margins": 0.7595610618591309,
"eval_rewards/rejected": -1.3276140689849854,
"eval_runtime": 169.453,
"eval_samples_per_second": 11.803,
"eval_steps_per_second": 0.189,
"step": 477
},
{
"epoch": 0.9984301412872841,
"step": 477,
"total_flos": 0.0,
"train_loss": 0.5657099517886244,
"train_runtime": 14255.2941,
"train_samples_per_second": 4.289,
"train_steps_per_second": 0.033
}
],
"logging_steps": 10,
"max_steps": 477,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}