|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.998691442030882, |
|
"eval_steps": 100, |
|
"global_step": 477, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.002093692750588851, |
|
"grad_norm": 2.7231976257852213, |
|
"learning_rate": 1.0416666666666666e-08, |
|
"logits/chosen": 0.31339484453201294, |
|
"logits/rejected": 0.2590007483959198, |
|
"logps/chosen": -265.7616271972656, |
|
"logps/rejected": -290.8130798339844, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.02093692750588851, |
|
"grad_norm": 2.7958259005099717, |
|
"learning_rate": 1.0416666666666667e-07, |
|
"logits/chosen": -0.009028661996126175, |
|
"logits/rejected": 0.00938305165618658, |
|
"logps/chosen": -317.1330871582031, |
|
"logps/rejected": -284.6680908203125, |
|
"loss": 0.6933, |
|
"rewards/accuracies": 0.4652777910232544, |
|
"rewards/chosen": 0.00040221353992819786, |
|
"rewards/margins": 0.0006802480202168226, |
|
"rewards/rejected": -0.00027803477132692933, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.04187385501177702, |
|
"grad_norm": 2.497078745176454, |
|
"learning_rate": 2.0833333333333333e-07, |
|
"logits/chosen": 0.1788354218006134, |
|
"logits/rejected": 0.09145273268222809, |
|
"logps/chosen": -283.5021057128906, |
|
"logps/rejected": -266.12744140625, |
|
"loss": 0.6932, |
|
"rewards/accuracies": 0.48750001192092896, |
|
"rewards/chosen": -0.00023679334844928235, |
|
"rewards/margins": -0.0011466725263744593, |
|
"rewards/rejected": 0.0009098790469579399, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.06281078251766553, |
|
"grad_norm": 2.8191113293581522, |
|
"learning_rate": 3.1249999999999997e-07, |
|
"logits/chosen": 0.3110828995704651, |
|
"logits/rejected": 0.2543540596961975, |
|
"logps/chosen": -313.4383850097656, |
|
"logps/rejected": -255.025390625, |
|
"loss": 0.6927, |
|
"rewards/accuracies": 0.5625, |
|
"rewards/chosen": 0.005203199107199907, |
|
"rewards/margins": 0.0011228162329643965, |
|
"rewards/rejected": 0.0040803831070661545, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.08374771002355404, |
|
"grad_norm": 2.4973076097829425, |
|
"learning_rate": 4.1666666666666667e-07, |
|
"logits/chosen": 0.23149657249450684, |
|
"logits/rejected": 0.15309187769889832, |
|
"logps/chosen": -254.95547485351562, |
|
"logps/rejected": -243.8360595703125, |
|
"loss": 0.6909, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": 0.016948821023106575, |
|
"rewards/margins": 0.004978521727025509, |
|
"rewards/rejected": 0.011970298364758492, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.10468463752944256, |
|
"grad_norm": 2.7323166032086212, |
|
"learning_rate": 4.999731868769026e-07, |
|
"logits/chosen": 0.037649042904376984, |
|
"logits/rejected": -0.018303174525499344, |
|
"logps/chosen": -272.1968078613281, |
|
"logps/rejected": -257.0528259277344, |
|
"loss": 0.6879, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": 0.03623826429247856, |
|
"rewards/margins": 0.0119241988286376, |
|
"rewards/rejected": 0.024314064532518387, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.12562156503533106, |
|
"grad_norm": 2.7451511783135, |
|
"learning_rate": 4.990353313429303e-07, |
|
"logits/chosen": 0.05454826354980469, |
|
"logits/rejected": 0.009258508682250977, |
|
"logps/chosen": -252.8040008544922, |
|
"logps/rejected": -243.5467071533203, |
|
"loss": 0.6843, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": 0.05339987948536873, |
|
"rewards/margins": 0.02153691276907921, |
|
"rewards/rejected": 0.03186297044157982, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.14655849254121958, |
|
"grad_norm": 2.547524054758782, |
|
"learning_rate": 4.967625656594781e-07, |
|
"logits/chosen": 0.06600433588027954, |
|
"logits/rejected": 0.12342119216918945, |
|
"logps/chosen": -307.1918640136719, |
|
"logps/rejected": -281.8422546386719, |
|
"loss": 0.6779, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": 0.047131381928920746, |
|
"rewards/margins": 0.02366420440375805, |
|
"rewards/rejected": 0.023467179387807846, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.16749542004710807, |
|
"grad_norm": 2.8385797928682264, |
|
"learning_rate": 4.93167072587771e-07, |
|
"logits/chosen": 0.13859248161315918, |
|
"logits/rejected": -0.07527128607034683, |
|
"logps/chosen": -301.5353088378906, |
|
"logps/rejected": -246.93539428710938, |
|
"loss": 0.6711, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": 0.02880977652966976, |
|
"rewards/margins": 0.054922521114349365, |
|
"rewards/rejected": -0.026112744584679604, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.1884323475529966, |
|
"grad_norm": 2.868384421677804, |
|
"learning_rate": 4.882681251368548e-07, |
|
"logits/chosen": 0.006677967496216297, |
|
"logits/rejected": -0.14207637310028076, |
|
"logps/chosen": -266.31610107421875, |
|
"logps/rejected": -247.4255828857422, |
|
"loss": 0.6581, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -0.03523142263293266, |
|
"rewards/margins": 0.0684022605419159, |
|
"rewards/rejected": -0.10363368690013885, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.2093692750588851, |
|
"grad_norm": 3.873092150128916, |
|
"learning_rate": 4.820919832540181e-07, |
|
"logits/chosen": -0.12544338405132294, |
|
"logits/rejected": -0.21319980919361115, |
|
"logps/chosen": -296.1676940917969, |
|
"logps/rejected": -293.9172058105469, |
|
"loss": 0.6439, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -0.01791384071111679, |
|
"rewards/margins": 0.1594783067703247, |
|
"rewards/rejected": -0.1773921251296997, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.2093692750588851, |
|
"eval_logits/chosen": 0.06579093635082245, |
|
"eval_logits/rejected": -0.07981658726930618, |
|
"eval_logps/chosen": -307.8224792480469, |
|
"eval_logps/rejected": -283.386962890625, |
|
"eval_loss": 0.6410184502601624, |
|
"eval_rewards/accuracies": 0.628000020980835, |
|
"eval_rewards/chosen": -0.07293682545423508, |
|
"eval_rewards/margins": 0.10445311665534973, |
|
"eval_rewards/rejected": -0.1773899346590042, |
|
"eval_runtime": 87.7355, |
|
"eval_samples_per_second": 22.796, |
|
"eval_steps_per_second": 1.425, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.23030620256477363, |
|
"grad_norm": 3.574469392292687, |
|
"learning_rate": 4.7467175306295647e-07, |
|
"logits/chosen": 0.00253683771006763, |
|
"logits/rejected": -0.24452033638954163, |
|
"logps/chosen": -308.55859375, |
|
"logps/rejected": -284.8210144042969, |
|
"loss": 0.6409, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -0.11748101562261581, |
|
"rewards/margins": 0.15921394526958466, |
|
"rewards/rejected": -0.27669501304626465, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.2512431300706621, |
|
"grad_norm": 4.167910804987986, |
|
"learning_rate": 4.6604720940421207e-07, |
|
"logits/chosen": 0.00922430120408535, |
|
"logits/rejected": -0.1478758305311203, |
|
"logps/chosen": -269.90643310546875, |
|
"logps/rejected": -271.8093566894531, |
|
"loss": 0.6248, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.047450125217437744, |
|
"rewards/margins": 0.1616283804178238, |
|
"rewards/rejected": -0.20907850563526154, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.2721800575765506, |
|
"grad_norm": 4.033884187153266, |
|
"learning_rate": 4.5626458262912735e-07, |
|
"logits/chosen": 0.0875096470117569, |
|
"logits/rejected": -0.04221571609377861, |
|
"logps/chosen": -307.96002197265625, |
|
"logps/rejected": -288.478515625, |
|
"loss": 0.6088, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.10714157670736313, |
|
"rewards/margins": 0.26042240858078003, |
|
"rewards/rejected": -0.36756399273872375, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.29311698508243916, |
|
"grad_norm": 4.25995975361505, |
|
"learning_rate": 4.453763107901675e-07, |
|
"logits/chosen": -0.07363971322774887, |
|
"logits/rejected": -0.16642406582832336, |
|
"logps/chosen": -321.75311279296875, |
|
"logps/rejected": -296.7992858886719, |
|
"loss": 0.6049, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.0883590430021286, |
|
"rewards/margins": 0.2593180239200592, |
|
"rewards/rejected": -0.3476770520210266, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.31405391258832765, |
|
"grad_norm": 4.821484890123611, |
|
"learning_rate": 4.3344075855595097e-07, |
|
"logits/chosen": 0.07841499894857407, |
|
"logits/rejected": -0.14008447527885437, |
|
"logps/chosen": -298.29852294921875, |
|
"logps/rejected": -295.29400634765625, |
|
"loss": 0.6107, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -0.10341843217611313, |
|
"rewards/margins": 0.3007178008556366, |
|
"rewards/rejected": -0.4041362404823303, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.33499084009421615, |
|
"grad_norm": 4.255771334239287, |
|
"learning_rate": 4.2052190435769554e-07, |
|
"logits/chosen": 0.12391266971826553, |
|
"logits/rejected": -0.03050372563302517, |
|
"logps/chosen": -290.95452880859375, |
|
"logps/rejected": -316.73565673828125, |
|
"loss": 0.5994, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -0.22425119578838348, |
|
"rewards/margins": 0.349310040473938, |
|
"rewards/rejected": -0.5735613107681274, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.3559277676001047, |
|
"grad_norm": 4.50821485477488, |
|
"learning_rate": 4.0668899744407567e-07, |
|
"logits/chosen": -0.0050708516500890255, |
|
"logits/rejected": -0.09919281303882599, |
|
"logps/chosen": -282.9065856933594, |
|
"logps/rejected": -282.0905456542969, |
|
"loss": 0.6045, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": -0.24741128087043762, |
|
"rewards/margins": 0.26661524176597595, |
|
"rewards/rejected": -0.5140265226364136, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.3768646951059932, |
|
"grad_norm": 5.725819764506176, |
|
"learning_rate": 3.920161866827889e-07, |
|
"logits/chosen": 0.0031631961464881897, |
|
"logits/rejected": -0.0937231034040451, |
|
"logps/chosen": -284.6784973144531, |
|
"logps/rejected": -321.97430419921875, |
|
"loss": 0.5824, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.2094871550798416, |
|
"rewards/margins": 0.27800601720809937, |
|
"rewards/rejected": -0.4874931871891022, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.39780162261188173, |
|
"grad_norm": 5.059197052882832, |
|
"learning_rate": 3.765821230985757e-07, |
|
"logits/chosen": -0.01767856441438198, |
|
"logits/rejected": -0.05129119008779526, |
|
"logps/chosen": -287.68365478515625, |
|
"logps/rejected": -333.1453552246094, |
|
"loss": 0.5846, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": -0.1571994572877884, |
|
"rewards/margins": 0.337828129529953, |
|
"rewards/rejected": -0.4950276017189026, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.4187385501177702, |
|
"grad_norm": 5.8875457116163075, |
|
"learning_rate": 3.604695382782159e-07, |
|
"logits/chosen": 0.014894035644829273, |
|
"logits/rejected": -0.006296065635979176, |
|
"logps/chosen": -300.3623962402344, |
|
"logps/rejected": -313.85174560546875, |
|
"loss": 0.5947, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": -0.29044660925865173, |
|
"rewards/margins": 0.20099826157093048, |
|
"rewards/rejected": -0.4914449155330658, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.4187385501177702, |
|
"eval_logits/chosen": 0.10439297556877136, |
|
"eval_logits/rejected": -0.04178494215011597, |
|
"eval_logps/chosen": -330.70782470703125, |
|
"eval_logps/rejected": -325.9523620605469, |
|
"eval_loss": 0.5742711424827576, |
|
"eval_rewards/accuracies": 0.6800000071525574, |
|
"eval_rewards/chosen": -0.3017900884151459, |
|
"eval_rewards/margins": 0.3012538552284241, |
|
"eval_rewards/rejected": -0.6030439734458923, |
|
"eval_runtime": 87.9732, |
|
"eval_samples_per_second": 22.734, |
|
"eval_steps_per_second": 1.421, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.4396754776236587, |
|
"grad_norm": 5.899001971857581, |
|
"learning_rate": 3.4376480090239047e-07, |
|
"logits/chosen": 0.1516989767551422, |
|
"logits/rejected": -0.09838330000638962, |
|
"logps/chosen": -331.3431091308594, |
|
"logps/rejected": -322.9111022949219, |
|
"loss": 0.5869, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.22792911529541016, |
|
"rewards/margins": 0.3638264834880829, |
|
"rewards/rejected": -0.5917555689811707, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.46061240512954726, |
|
"grad_norm": 4.922560393149178, |
|
"learning_rate": 3.265574537815398e-07, |
|
"logits/chosen": -0.1273319274187088, |
|
"logits/rejected": -0.1170346736907959, |
|
"logps/chosen": -275.1965026855469, |
|
"logps/rejected": -299.39886474609375, |
|
"loss": 0.5784, |
|
"rewards/accuracies": 0.6312500238418579, |
|
"rewards/chosen": -0.24686738848686218, |
|
"rewards/margins": 0.23145708441734314, |
|
"rewards/rejected": -0.4783244729042053, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.48154933263543576, |
|
"grad_norm": 5.531206209353442, |
|
"learning_rate": 3.0893973387735683e-07, |
|
"logits/chosen": 0.12400732189416885, |
|
"logits/rejected": 0.05335181951522827, |
|
"logps/chosen": -280.760498046875, |
|
"logps/rejected": -336.8385314941406, |
|
"loss": 0.575, |
|
"rewards/accuracies": 0.78125, |
|
"rewards/chosen": -0.19060644507408142, |
|
"rewards/margins": 0.4444652199745178, |
|
"rewards/rejected": -0.6350716352462769, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.5024862601413242, |
|
"grad_norm": 5.199721713252603, |
|
"learning_rate": 2.910060778827554e-07, |
|
"logits/chosen": 0.06820286810398102, |
|
"logits/rejected": 0.023341525346040726, |
|
"logps/chosen": -289.61431884765625, |
|
"logps/rejected": -310.34649658203125, |
|
"loss": 0.5587, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.23245009779930115, |
|
"rewards/margins": 0.31617048382759094, |
|
"rewards/rejected": -0.5486205816268921, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.5234231876472127, |
|
"grad_norm": 5.697878494001561, |
|
"learning_rate": 2.7285261601056697e-07, |
|
"logits/chosen": 0.062075383961200714, |
|
"logits/rejected": -0.07256612926721573, |
|
"logps/chosen": -341.1910400390625, |
|
"logps/rejected": -346.7120666503906, |
|
"loss": 0.5674, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -0.32050999999046326, |
|
"rewards/margins": 0.43985146284103394, |
|
"rewards/rejected": -0.7603614926338196, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.5443601151531012, |
|
"grad_norm": 6.965226254526502, |
|
"learning_rate": 2.5457665670441937e-07, |
|
"logits/chosen": 0.014003160409629345, |
|
"logits/rejected": -0.04488863795995712, |
|
"logps/chosen": -329.73480224609375, |
|
"logps/rejected": -335.5534973144531, |
|
"loss": 0.5611, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.4078213572502136, |
|
"rewards/margins": 0.4492790102958679, |
|
"rewards/rejected": -0.8571003675460815, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.5652970426589898, |
|
"grad_norm": 5.2324008587349145, |
|
"learning_rate": 2.3627616503391812e-07, |
|
"logits/chosen": 0.09336505830287933, |
|
"logits/rejected": -0.09427224844694138, |
|
"logps/chosen": -319.9281921386719, |
|
"logps/rejected": -335.8837890625, |
|
"loss": 0.5603, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -0.2503654658794403, |
|
"rewards/margins": 0.42599910497665405, |
|
"rewards/rejected": -0.676364541053772, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.5862339701648783, |
|
"grad_norm": 7.574276409195053, |
|
"learning_rate": 2.1804923757009882e-07, |
|
"logits/chosen": 0.015473452396690845, |
|
"logits/rejected": -0.06568828970193863, |
|
"logps/chosen": -314.6773681640625, |
|
"logps/rejected": -301.77789306640625, |
|
"loss": 0.567, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.23857524991035461, |
|
"rewards/margins": 0.3460666239261627, |
|
"rewards/rejected": -0.5846418142318726, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.6071708976707668, |
|
"grad_norm": 5.79483769704303, |
|
"learning_rate": 1.9999357655598891e-07, |
|
"logits/chosen": -0.024038776755332947, |
|
"logits/rejected": -0.007775201462209225, |
|
"logps/chosen": -312.35601806640625, |
|
"logps/rejected": -347.37744140625, |
|
"loss": 0.5569, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": -0.37719154357910156, |
|
"rewards/margins": 0.3905247151851654, |
|
"rewards/rejected": -0.7677162885665894, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.6281078251766553, |
|
"grad_norm": 5.443007230280701, |
|
"learning_rate": 1.8220596619089573e-07, |
|
"logits/chosen": 0.08472506701946259, |
|
"logits/rejected": -0.027022168040275574, |
|
"logps/chosen": -335.15899658203125, |
|
"logps/rejected": -348.7984313964844, |
|
"loss": 0.5465, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.2704711854457855, |
|
"rewards/margins": 0.47332271933555603, |
|
"rewards/rejected": -0.7437939047813416, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.6281078251766553, |
|
"eval_logits/chosen": 0.13741473853588104, |
|
"eval_logits/rejected": 0.004349248018115759, |
|
"eval_logps/chosen": -326.75640869140625, |
|
"eval_logps/rejected": -327.65399169921875, |
|
"eval_loss": 0.558218240737915, |
|
"eval_rewards/accuracies": 0.7080000042915344, |
|
"eval_rewards/chosen": -0.2622760236263275, |
|
"eval_rewards/margins": 0.35778409242630005, |
|
"eval_rewards/rejected": -0.6200602054595947, |
|
"eval_runtime": 87.8556, |
|
"eval_samples_per_second": 22.765, |
|
"eval_steps_per_second": 1.423, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.6490447526825438, |
|
"grad_norm": 4.82766676690984, |
|
"learning_rate": 1.647817538357072e-07, |
|
"logits/chosen": -0.022204505279660225, |
|
"logits/rejected": -0.05295448750257492, |
|
"logps/chosen": -332.45025634765625, |
|
"logps/rejected": -308.87054443359375, |
|
"loss": 0.5477, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -0.17240366339683533, |
|
"rewards/margins": 0.5077041983604431, |
|
"rewards/rejected": -0.6801078915596008, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.6699816801884323, |
|
"grad_norm": 5.792439725578247, |
|
"learning_rate": 1.478143389201113e-07, |
|
"logits/chosen": 0.11390535533428192, |
|
"logits/rejected": 0.0557434968650341, |
|
"logps/chosen": -301.05926513671875, |
|
"logps/rejected": -313.931396484375, |
|
"loss": 0.539, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -0.2538585662841797, |
|
"rewards/margins": 0.47795993089675903, |
|
"rewards/rejected": -0.7318183779716492, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.6909186076943209, |
|
"grad_norm": 5.385922961239142, |
|
"learning_rate": 1.3139467229135998e-07, |
|
"logits/chosen": 0.056433577090501785, |
|
"logits/rejected": 0.003667551325634122, |
|
"logps/chosen": -312.01531982421875, |
|
"logps/rejected": -341.01568603515625, |
|
"loss": 0.5483, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.30724188685417175, |
|
"rewards/margins": 0.4886665940284729, |
|
"rewards/rejected": -0.7959084510803223, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.7118555352002094, |
|
"grad_norm": 5.832198529858635, |
|
"learning_rate": 1.1561076868822755e-07, |
|
"logits/chosen": 0.19773051142692566, |
|
"logits/rejected": 0.07149332761764526, |
|
"logps/chosen": -314.9180908203125, |
|
"logps/rejected": -340.6194763183594, |
|
"loss": 0.5516, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.3371719717979431, |
|
"rewards/margins": 0.4226114749908447, |
|
"rewards/rejected": -0.7597833871841431, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.7327924627060979, |
|
"grad_norm": 6.878576068920105, |
|
"learning_rate": 1.0054723495346482e-07, |
|
"logits/chosen": 0.0181022509932518, |
|
"logits/rejected": -0.025855189189314842, |
|
"logps/chosen": -305.97833251953125, |
|
"logps/rejected": -320.78485107421875, |
|
"loss": 0.5305, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.33639293909072876, |
|
"rewards/margins": 0.4484967291355133, |
|
"rewards/rejected": -0.7848896980285645, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.7537293902119864, |
|
"grad_norm": 5.936494076324989, |
|
"learning_rate": 8.628481651367875e-08, |
|
"logits/chosen": 0.16836483776569366, |
|
"logits/rejected": 0.018577728420495987, |
|
"logps/chosen": -354.95379638671875, |
|
"logps/rejected": -352.0823974609375, |
|
"loss": 0.5593, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.3025088906288147, |
|
"rewards/margins": 0.5337738990783691, |
|
"rewards/rejected": -0.8362827301025391, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.7746663177178749, |
|
"grad_norm": 6.645162255689744, |
|
"learning_rate": 7.289996455765748e-08, |
|
"logits/chosen": 0.010707542300224304, |
|
"logits/rejected": -0.08555378764867783, |
|
"logps/chosen": -316.3924560546875, |
|
"logps/rejected": -317.36712646484375, |
|
"loss": 0.5592, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.3052045702934265, |
|
"rewards/margins": 0.4817258417606354, |
|
"rewards/rejected": -0.7869305610656738, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.7956032452237635, |
|
"grad_norm": 6.455066388510761, |
|
"learning_rate": 6.046442623320145e-08, |
|
"logits/chosen": -0.03245114907622337, |
|
"logits/rejected": -0.15504470467567444, |
|
"logps/chosen": -328.40972900390625, |
|
"logps/rejected": -350.3254089355469, |
|
"loss": 0.5456, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.3893979489803314, |
|
"rewards/margins": 0.5254009962081909, |
|
"rewards/rejected": -0.9147989153862, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.816540172729652, |
|
"grad_norm": 5.83609294237127, |
|
"learning_rate": 4.904486005914027e-08, |
|
"logits/chosen": 0.13171380758285522, |
|
"logits/rejected": 0.06406144797801971, |
|
"logps/chosen": -325.7466125488281, |
|
"logps/rejected": -354.4484558105469, |
|
"loss": 0.5475, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -0.2866363227367401, |
|
"rewards/margins": 0.433144748210907, |
|
"rewards/rejected": -0.7197811007499695, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.8374771002355405, |
|
"grad_norm": 5.347334288876349, |
|
"learning_rate": 3.8702478614051345e-08, |
|
"logits/chosen": 0.01393158733844757, |
|
"logits/rejected": -0.0664617270231247, |
|
"logps/chosen": -263.78369140625, |
|
"logps/rejected": -308.0682678222656, |
|
"loss": 0.5513, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -0.26276373863220215, |
|
"rewards/margins": 0.5350881218910217, |
|
"rewards/rejected": -0.7978518605232239, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.8374771002355405, |
|
"eval_logits/chosen": 0.14339293539524078, |
|
"eval_logits/rejected": 0.009749370627105236, |
|
"eval_logps/chosen": -335.09014892578125, |
|
"eval_logps/rejected": -340.0029296875, |
|
"eval_loss": 0.5496612787246704, |
|
"eval_rewards/accuracies": 0.7039999961853027, |
|
"eval_rewards/chosen": -0.3456135392189026, |
|
"eval_rewards/margins": 0.3979361355304718, |
|
"eval_rewards/rejected": -0.7435497045516968, |
|
"eval_runtime": 87.8296, |
|
"eval_samples_per_second": 22.771, |
|
"eval_steps_per_second": 1.423, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.8584140277414289, |
|
"grad_norm": 5.348647796985859, |
|
"learning_rate": 2.9492720416985e-08, |
|
"logits/chosen": 0.11225803196430206, |
|
"logits/rejected": -0.09922051429748535, |
|
"logps/chosen": -355.53363037109375, |
|
"logps/rejected": -353.32647705078125, |
|
"loss": 0.558, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -0.37236982583999634, |
|
"rewards/margins": 0.4548061788082123, |
|
"rewards/rejected": -0.8271759152412415, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.8793509552473174, |
|
"grad_norm": 5.72027736083062, |
|
"learning_rate": 2.1464952759020856e-08, |
|
"logits/chosen": 0.17516909539699554, |
|
"logits/rejected": 0.07303206622600555, |
|
"logps/chosen": -310.9562683105469, |
|
"logps/rejected": -369.8873291015625, |
|
"loss": 0.5383, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -0.35336142778396606, |
|
"rewards/margins": 0.5296350717544556, |
|
"rewards/rejected": -0.8829964399337769, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.9002878827532059, |
|
"grad_norm": 6.6876151729248505, |
|
"learning_rate": 1.4662207078575684e-08, |
|
"logits/chosen": 0.15267853438854218, |
|
"logits/rejected": -0.08590138703584671, |
|
"logps/chosen": -335.16015625, |
|
"logps/rejected": -326.2135314941406, |
|
"loss": 0.5334, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -0.27824270725250244, |
|
"rewards/margins": 0.5145388841629028, |
|
"rewards/rejected": -0.7927817106246948, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.9212248102590945, |
|
"grad_norm": 5.06263513080775, |
|
"learning_rate": 9.12094829893642e-09, |
|
"logits/chosen": 0.11619402468204498, |
|
"logits/rejected": 0.055844999849796295, |
|
"logps/chosen": -309.276123046875, |
|
"logps/rejected": -312.76873779296875, |
|
"loss": 0.5423, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -0.29939916729927063, |
|
"rewards/margins": 0.43951496481895447, |
|
"rewards/rejected": -0.7389141321182251, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.942161737764983, |
|
"grad_norm": 6.405797457991359, |
|
"learning_rate": 4.8708793644441086e-09, |
|
"logits/chosen": 0.12115111202001572, |
|
"logits/rejected": 0.04374055936932564, |
|
"logps/chosen": -309.6786804199219, |
|
"logps/rejected": -352.6421203613281, |
|
"loss": 0.5424, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.28138723969459534, |
|
"rewards/margins": 0.5625799894332886, |
|
"rewards/rejected": -0.8439672589302063, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.9630986652708715, |
|
"grad_norm": 5.426672640268625, |
|
"learning_rate": 1.9347820230782295e-09, |
|
"logits/chosen": 0.07451046258211136, |
|
"logits/rejected": -0.14011576771736145, |
|
"logps/chosen": -309.74957275390625, |
|
"logps/rejected": -322.1665954589844, |
|
"loss": 0.5512, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -0.29780006408691406, |
|
"rewards/margins": 0.5288690328598022, |
|
"rewards/rejected": -0.8266690969467163, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.98403559277676, |
|
"grad_norm": 5.948627082038746, |
|
"learning_rate": 3.2839470889836627e-10, |
|
"logits/chosen": 0.15837179124355316, |
|
"logits/rejected": 0.020618747919797897, |
|
"logps/chosen": -314.57257080078125, |
|
"logps/rejected": -353.36029052734375, |
|
"loss": 0.5305, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": -0.2985171377658844, |
|
"rewards/margins": 0.5004598498344421, |
|
"rewards/rejected": -0.7989770174026489, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.998691442030882, |
|
"step": 477, |
|
"total_flos": 0.0, |
|
"train_loss": 0.5905132868504874, |
|
"train_runtime": 7548.8448, |
|
"train_samples_per_second": 8.098, |
|
"train_steps_per_second": 0.063 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 477, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|