|
{ |
|
"best_global_step": null, |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.8361314731615037, |
|
"eval_steps": 150, |
|
"global_step": 3000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.14187751241428234, |
|
"grad_norm": 19.588634490966797, |
|
"learning_rate": 9.763481551561022e-07, |
|
"logits/chosen": -2.82487154006958, |
|
"logits/rejected": -2.916430950164795, |
|
"logps/chosen": -288.4345397949219, |
|
"logps/rejected": -191.39353942871094, |
|
"loss": 0.4926, |
|
"rewards/accuracies": 0.8491666913032532, |
|
"rewards/chosen": 0.3584444224834442, |
|
"rewards/margins": 0.5347671508789062, |
|
"rewards/rejected": -0.17632275819778442, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.14187751241428234, |
|
"eval_logits/chosen": -2.941565990447998, |
|
"eval_logits/rejected": -3.0292787551879883, |
|
"eval_logps/chosen": -290.8563232421875, |
|
"eval_logps/rejected": -199.278076171875, |
|
"eval_loss": 0.2873661518096924, |
|
"eval_rewards/accuracies": 0.9881852269172668, |
|
"eval_rewards/chosen": 0.28964486718177795, |
|
"eval_rewards/margins": 1.2328312397003174, |
|
"eval_rewards/rejected": -0.9431862831115723, |
|
"eval_runtime": 562.5858, |
|
"eval_samples_per_second": 3.759, |
|
"eval_steps_per_second": 1.881, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.2837550248285647, |
|
"grad_norm": 4.974961280822754, |
|
"learning_rate": 9.526963103122043e-07, |
|
"logits/chosen": -2.976027727127075, |
|
"logits/rejected": -3.071249485015869, |
|
"logps/chosen": -284.5093078613281, |
|
"logps/rejected": -204.3736572265625, |
|
"loss": 0.1775, |
|
"rewards/accuracies": 0.9891666769981384, |
|
"rewards/chosen": 0.5465984344482422, |
|
"rewards/margins": 2.021888494491577, |
|
"rewards/rejected": -1.475290060043335, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.2837550248285647, |
|
"eval_logits/chosen": -2.971857786178589, |
|
"eval_logits/rejected": -3.0642826557159424, |
|
"eval_logps/chosen": -286.28594970703125, |
|
"eval_logps/rejected": -209.5599822998047, |
|
"eval_loss": 0.11425645649433136, |
|
"eval_rewards/accuracies": 0.9910207986831665, |
|
"eval_rewards/chosen": 0.7466850876808167, |
|
"eval_rewards/margins": 2.7180631160736084, |
|
"eval_rewards/rejected": -1.9713778495788574, |
|
"eval_runtime": 562.6097, |
|
"eval_samples_per_second": 3.759, |
|
"eval_steps_per_second": 1.881, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.425632537242847, |
|
"grad_norm": 22.92677116394043, |
|
"learning_rate": 9.290444654683065e-07, |
|
"logits/chosen": -2.9028356075286865, |
|
"logits/rejected": -3.013791084289551, |
|
"logps/chosen": -282.4086608886719, |
|
"logps/rejected": -209.1995849609375, |
|
"loss": 0.0941, |
|
"rewards/accuracies": 0.9916666746139526, |
|
"rewards/chosen": 0.9992213845252991, |
|
"rewards/margins": 3.071294069290161, |
|
"rewards/rejected": -2.0720725059509277, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.425632537242847, |
|
"eval_logits/chosen": -2.90273118019104, |
|
"eval_logits/rejected": -3.002680540084839, |
|
"eval_logps/chosen": -282.59246826171875, |
|
"eval_logps/rejected": -213.33204650878906, |
|
"eval_loss": 0.07543039321899414, |
|
"eval_rewards/accuracies": 0.9933837652206421, |
|
"eval_rewards/chosen": 1.1160286664962769, |
|
"eval_rewards/margins": 3.464611768722534, |
|
"eval_rewards/rejected": -2.348583459854126, |
|
"eval_runtime": 563.5688, |
|
"eval_samples_per_second": 3.753, |
|
"eval_steps_per_second": 1.877, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.5675100496571294, |
|
"grad_norm": 22.853031158447266, |
|
"learning_rate": 9.053926206244087e-07, |
|
"logits/chosen": -2.9159042835235596, |
|
"logits/rejected": -3.0238444805145264, |
|
"logps/chosen": -284.7449951171875, |
|
"logps/rejected": -216.9216766357422, |
|
"loss": 0.0687, |
|
"rewards/accuracies": 0.9958333373069763, |
|
"rewards/chosen": 0.9207213521003723, |
|
"rewards/margins": 3.7362918853759766, |
|
"rewards/rejected": -2.815570592880249, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.5675100496571294, |
|
"eval_logits/chosen": -2.8956897258758545, |
|
"eval_logits/rejected": -3.010921001434326, |
|
"eval_logps/chosen": -283.6835632324219, |
|
"eval_logps/rejected": -220.16329956054688, |
|
"eval_loss": 0.058039747178554535, |
|
"eval_rewards/accuracies": 0.9948015213012695, |
|
"eval_rewards/chosen": 1.0069197416305542, |
|
"eval_rewards/margins": 4.038629531860352, |
|
"eval_rewards/rejected": -3.031709671020508, |
|
"eval_runtime": 563.2115, |
|
"eval_samples_per_second": 3.755, |
|
"eval_steps_per_second": 1.879, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.7093875620714116, |
|
"grad_norm": 4.9486188888549805, |
|
"learning_rate": 8.817407757805109e-07, |
|
"logits/chosen": -2.9240283966064453, |
|
"logits/rejected": -3.0465874671936035, |
|
"logps/chosen": -282.1627197265625, |
|
"logps/rejected": -223.1807098388672, |
|
"loss": 0.0522, |
|
"rewards/accuracies": 0.9950000047683716, |
|
"rewards/chosen": 0.8215107321739197, |
|
"rewards/margins": 4.242582321166992, |
|
"rewards/rejected": -3.421071767807007, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.7093875620714116, |
|
"eval_logits/chosen": -2.9431002140045166, |
|
"eval_logits/rejected": -3.0670864582061768, |
|
"eval_logps/chosen": -285.1332092285156, |
|
"eval_logps/rejected": -226.6123046875, |
|
"eval_loss": 0.0461815781891346, |
|
"eval_rewards/accuracies": 0.9943289160728455, |
|
"eval_rewards/chosen": 0.8619542717933655, |
|
"eval_rewards/margins": 4.538564205169678, |
|
"eval_rewards/rejected": -3.676610231399536, |
|
"eval_runtime": 562.549, |
|
"eval_samples_per_second": 3.76, |
|
"eval_steps_per_second": 1.881, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.851265074485694, |
|
"grad_norm": 0.37568044662475586, |
|
"learning_rate": 8.580889309366131e-07, |
|
"logits/chosen": -2.9640204906463623, |
|
"logits/rejected": -3.1055846214294434, |
|
"logps/chosen": -286.8418273925781, |
|
"logps/rejected": -232.3475799560547, |
|
"loss": 0.0416, |
|
"rewards/accuracies": 0.9958333373069763, |
|
"rewards/chosen": 0.5082718729972839, |
|
"rewards/margins": 4.854653358459473, |
|
"rewards/rejected": -4.346381664276123, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.851265074485694, |
|
"eval_logits/chosen": -2.946694850921631, |
|
"eval_logits/rejected": -3.080843210220337, |
|
"eval_logps/chosen": -286.02020263671875, |
|
"eval_logps/rejected": -232.0843048095703, |
|
"eval_loss": 0.039623409509658813, |
|
"eval_rewards/accuracies": 0.9957466721534729, |
|
"eval_rewards/chosen": 0.7732585668563843, |
|
"eval_rewards/margins": 4.997069835662842, |
|
"eval_rewards/rejected": -4.223811626434326, |
|
"eval_runtime": 563.1761, |
|
"eval_samples_per_second": 3.755, |
|
"eval_steps_per_second": 1.879, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.9931425868999764, |
|
"grad_norm": 10.330118179321289, |
|
"learning_rate": 8.344370860927152e-07, |
|
"logits/chosen": -2.9750888347625732, |
|
"logits/rejected": -3.1029014587402344, |
|
"logps/chosen": -288.4482421875, |
|
"logps/rejected": -237.4921417236328, |
|
"loss": 0.0382, |
|
"rewards/accuracies": 0.9941666722297668, |
|
"rewards/chosen": 0.47679439187049866, |
|
"rewards/margins": 5.323023796081543, |
|
"rewards/rejected": -4.846229076385498, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 0.9931425868999764, |
|
"eval_logits/chosen": -2.9789628982543945, |
|
"eval_logits/rejected": -3.1136224269866943, |
|
"eval_logps/chosen": -287.87921142578125, |
|
"eval_logps/rejected": -238.6780548095703, |
|
"eval_loss": 0.0340222492814064, |
|
"eval_rewards/accuracies": 0.9948015213012695, |
|
"eval_rewards/chosen": 0.587354838848114, |
|
"eval_rewards/margins": 5.470540523529053, |
|
"eval_rewards/rejected": -4.883185863494873, |
|
"eval_runtime": 563.0622, |
|
"eval_samples_per_second": 3.756, |
|
"eval_steps_per_second": 1.879, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 1.1343107117521873, |
|
"grad_norm": 2.3795278072357178, |
|
"learning_rate": 8.107852412488174e-07, |
|
"logits/chosen": -3.019599676132202, |
|
"logits/rejected": -3.1642258167266846, |
|
"logps/chosen": -289.4803466796875, |
|
"logps/rejected": -247.00460815429688, |
|
"loss": 0.0232, |
|
"rewards/accuracies": 0.999162495136261, |
|
"rewards/chosen": 0.016025420278310776, |
|
"rewards/margins": 5.90906286239624, |
|
"rewards/rejected": -5.893036842346191, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.1343107117521873, |
|
"eval_logits/chosen": -3.02445650100708, |
|
"eval_logits/rejected": -3.166768789291382, |
|
"eval_logps/chosen": -292.52386474609375, |
|
"eval_logps/rejected": -247.54971313476562, |
|
"eval_loss": 0.0312405526638031, |
|
"eval_rewards/accuracies": 0.9938563108444214, |
|
"eval_rewards/chosen": 0.12289439141750336, |
|
"eval_rewards/margins": 5.893245220184326, |
|
"eval_rewards/rejected": -5.770349979400635, |
|
"eval_runtime": 562.6683, |
|
"eval_samples_per_second": 3.759, |
|
"eval_steps_per_second": 1.88, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.2761882241664697, |
|
"grad_norm": 0.9838615655899048, |
|
"learning_rate": 7.871333964049196e-07, |
|
"logits/chosen": -3.0383057594299316, |
|
"logits/rejected": -3.189190149307251, |
|
"logps/chosen": -291.999267578125, |
|
"logps/rejected": -253.01220703125, |
|
"loss": 0.0322, |
|
"rewards/accuracies": 0.9950000047683716, |
|
"rewards/chosen": 0.02593408152461052, |
|
"rewards/margins": 6.261542797088623, |
|
"rewards/rejected": -6.2356085777282715, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 1.2761882241664697, |
|
"eval_logits/chosen": -3.048924446105957, |
|
"eval_logits/rejected": -3.1801936626434326, |
|
"eval_logps/chosen": -296.5158996582031, |
|
"eval_logps/rejected": -256.31951904296875, |
|
"eval_loss": 0.0295143760740757, |
|
"eval_rewards/accuracies": 0.9914934039115906, |
|
"eval_rewards/chosen": -0.2763078212738037, |
|
"eval_rewards/margins": 6.371026039123535, |
|
"eval_rewards/rejected": -6.647334098815918, |
|
"eval_runtime": 563.5207, |
|
"eval_samples_per_second": 3.753, |
|
"eval_steps_per_second": 1.877, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 1.4180657365807519, |
|
"grad_norm": 0.705359697341919, |
|
"learning_rate": 7.634815515610217e-07, |
|
"logits/chosen": -3.047039270401001, |
|
"logits/rejected": -3.1857874393463135, |
|
"logps/chosen": -298.34417724609375, |
|
"logps/rejected": -261.52630615234375, |
|
"loss": 0.0185, |
|
"rewards/accuracies": 0.9983333349227905, |
|
"rewards/chosen": -0.4116480052471161, |
|
"rewards/margins": 6.801379203796387, |
|
"rewards/rejected": -7.213026523590088, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.4180657365807519, |
|
"eval_logits/chosen": -3.0422451496124268, |
|
"eval_logits/rejected": -3.189257860183716, |
|
"eval_logps/chosen": -293.9241027832031, |
|
"eval_logps/rejected": -256.4940185546875, |
|
"eval_loss": 0.025707272812724113, |
|
"eval_rewards/accuracies": 0.9938563108444214, |
|
"eval_rewards/chosen": -0.017133429646492004, |
|
"eval_rewards/margins": 6.64764928817749, |
|
"eval_rewards/rejected": -6.664782524108887, |
|
"eval_runtime": 562.6032, |
|
"eval_samples_per_second": 3.759, |
|
"eval_steps_per_second": 1.881, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 1.5599432489950344, |
|
"grad_norm": 0.3177180290222168, |
|
"learning_rate": 7.398297067171239e-07, |
|
"logits/chosen": -3.0570619106292725, |
|
"logits/rejected": -3.196061611175537, |
|
"logps/chosen": -296.9585266113281, |
|
"logps/rejected": -262.5743103027344, |
|
"loss": 0.0336, |
|
"rewards/accuracies": 0.9958333373069763, |
|
"rewards/chosen": -0.3190596401691437, |
|
"rewards/margins": 7.057786464691162, |
|
"rewards/rejected": -7.376845836639404, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 1.5599432489950344, |
|
"eval_logits/chosen": -3.0481271743774414, |
|
"eval_logits/rejected": -3.193105459213257, |
|
"eval_logps/chosen": -294.719482421875, |
|
"eval_logps/rejected": -261.1886291503906, |
|
"eval_loss": 0.024315308779478073, |
|
"eval_rewards/accuracies": 0.9938563108444214, |
|
"eval_rewards/chosen": -0.09667125344276428, |
|
"eval_rewards/margins": 7.037571430206299, |
|
"eval_rewards/rejected": -7.134242534637451, |
|
"eval_runtime": 562.6021, |
|
"eval_samples_per_second": 3.759, |
|
"eval_steps_per_second": 1.881, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 1.7018207614093166, |
|
"grad_norm": 1.28083336353302, |
|
"learning_rate": 7.16177861873226e-07, |
|
"logits/chosen": -3.0715489387512207, |
|
"logits/rejected": -3.202450752258301, |
|
"logps/chosen": -296.61590576171875, |
|
"logps/rejected": -270.3304443359375, |
|
"loss": 0.0175, |
|
"rewards/accuracies": 0.9950000047683716, |
|
"rewards/chosen": -0.5375873446464539, |
|
"rewards/margins": 7.566472053527832, |
|
"rewards/rejected": -8.104060173034668, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1.7018207614093166, |
|
"eval_logits/chosen": -3.0735578536987305, |
|
"eval_logits/rejected": -3.224771022796631, |
|
"eval_logps/chosen": -295.2926025390625, |
|
"eval_logps/rejected": -264.1260681152344, |
|
"eval_loss": 0.02150208130478859, |
|
"eval_rewards/accuracies": 0.9957466721534729, |
|
"eval_rewards/chosen": -0.15398064255714417, |
|
"eval_rewards/margins": 7.2740068435668945, |
|
"eval_rewards/rejected": -7.427987098693848, |
|
"eval_runtime": 562.9492, |
|
"eval_samples_per_second": 3.757, |
|
"eval_steps_per_second": 1.879, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 1.843698273823599, |
|
"grad_norm": 0.21602575480937958, |
|
"learning_rate": 6.925260170293283e-07, |
|
"logits/chosen": -3.103926181793213, |
|
"logits/rejected": -3.251049280166626, |
|
"logps/chosen": -296.3301086425781, |
|
"logps/rejected": -273.81640625, |
|
"loss": 0.0155, |
|
"rewards/accuracies": 0.9975000023841858, |
|
"rewards/chosen": -0.617112398147583, |
|
"rewards/margins": 7.931570529937744, |
|
"rewards/rejected": -8.54868221282959, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 1.843698273823599, |
|
"eval_logits/chosen": -3.1030561923980713, |
|
"eval_logits/rejected": -3.2201902866363525, |
|
"eval_logps/chosen": -301.2184143066406, |
|
"eval_logps/rejected": -281.40008544921875, |
|
"eval_loss": 0.022298738360404968, |
|
"eval_rewards/accuracies": 0.9943289160728455, |
|
"eval_rewards/chosen": -0.746565043926239, |
|
"eval_rewards/margins": 8.40882396697998, |
|
"eval_rewards/rejected": -9.155388832092285, |
|
"eval_runtime": 563.115, |
|
"eval_samples_per_second": 3.756, |
|
"eval_steps_per_second": 1.879, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 1.9855757862378813, |
|
"grad_norm": 0.49206382036209106, |
|
"learning_rate": 6.688741721854304e-07, |
|
"logits/chosen": -3.116490125656128, |
|
"logits/rejected": -3.230647087097168, |
|
"logps/chosen": -301.8218078613281, |
|
"logps/rejected": -286.8536376953125, |
|
"loss": 0.0169, |
|
"rewards/accuracies": 0.9975000023841858, |
|
"rewards/chosen": -1.0506435632705688, |
|
"rewards/margins": 8.755069732666016, |
|
"rewards/rejected": -9.80571460723877, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 1.9855757862378813, |
|
"eval_logits/chosen": -3.11275053024292, |
|
"eval_logits/rejected": -3.2121145725250244, |
|
"eval_logps/chosen": -302.8241271972656, |
|
"eval_logps/rejected": -287.6995544433594, |
|
"eval_loss": 0.021507343277335167, |
|
"eval_rewards/accuracies": 0.9938563108444214, |
|
"eval_rewards/chosen": -0.9071367383003235, |
|
"eval_rewards/margins": 8.878198623657227, |
|
"eval_rewards/rejected": -9.785334587097168, |
|
"eval_runtime": 562.6464, |
|
"eval_samples_per_second": 3.759, |
|
"eval_steps_per_second": 1.88, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 2.126743911090092, |
|
"grad_norm": 0.9229658246040344, |
|
"learning_rate": 6.452223273415326e-07, |
|
"logits/chosen": -3.1183557510375977, |
|
"logits/rejected": -3.2241806983947754, |
|
"logps/chosen": -295.1089172363281, |
|
"logps/rejected": -285.6842346191406, |
|
"loss": 0.0114, |
|
"rewards/accuracies": 0.9966499209403992, |
|
"rewards/chosen": -0.7439343929290771, |
|
"rewards/margins": 9.040164947509766, |
|
"rewards/rejected": -9.784099578857422, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 2.126743911090092, |
|
"eval_logits/chosen": -3.11570405960083, |
|
"eval_logits/rejected": -3.198913335800171, |
|
"eval_logps/chosen": -303.6178894042969, |
|
"eval_logps/rejected": -293.61297607421875, |
|
"eval_loss": 0.02255001664161682, |
|
"eval_rewards/accuracies": 0.9933837652206421, |
|
"eval_rewards/chosen": -0.9865113496780396, |
|
"eval_rewards/margins": 9.390168190002441, |
|
"eval_rewards/rejected": -10.376679420471191, |
|
"eval_runtime": 562.5858, |
|
"eval_samples_per_second": 3.759, |
|
"eval_steps_per_second": 1.881, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 2.2686214235043747, |
|
"grad_norm": 1.9584112167358398, |
|
"learning_rate": 6.215704824976348e-07, |
|
"logits/chosen": -3.115262985229492, |
|
"logits/rejected": -3.222729206085205, |
|
"logps/chosen": -299.4010009765625, |
|
"logps/rejected": -296.6014404296875, |
|
"loss": 0.0315, |
|
"rewards/accuracies": 0.9975000023841858, |
|
"rewards/chosen": -1.011648416519165, |
|
"rewards/margins": 9.797198295593262, |
|
"rewards/rejected": -10.808846473693848, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 2.2686214235043747, |
|
"eval_logits/chosen": -3.1395390033721924, |
|
"eval_logits/rejected": -3.2528908252716064, |
|
"eval_logps/chosen": -303.15643310546875, |
|
"eval_logps/rejected": -292.42431640625, |
|
"eval_loss": 0.019048307090997696, |
|
"eval_rewards/accuracies": 0.9948015213012695, |
|
"eval_rewards/chosen": -0.9403614401817322, |
|
"eval_rewards/margins": 9.317451477050781, |
|
"eval_rewards/rejected": -10.2578125, |
|
"eval_runtime": 563.9817, |
|
"eval_samples_per_second": 3.75, |
|
"eval_steps_per_second": 1.876, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 2.410498935918657, |
|
"grad_norm": 1.9372230768203735, |
|
"learning_rate": 5.979186376537369e-07, |
|
"logits/chosen": -3.1377475261688232, |
|
"logits/rejected": -3.264430046081543, |
|
"logps/chosen": -303.7994384765625, |
|
"logps/rejected": -298.91485595703125, |
|
"loss": 0.012, |
|
"rewards/accuracies": 0.9983333349227905, |
|
"rewards/chosen": -1.0909898281097412, |
|
"rewards/margins": 9.924967765808105, |
|
"rewards/rejected": -11.015958786010742, |
|
"step": 2550 |
|
}, |
|
{ |
|
"epoch": 2.410498935918657, |
|
"eval_logits/chosen": -3.118379592895508, |
|
"eval_logits/rejected": -3.2569830417633057, |
|
"eval_logps/chosen": -298.316162109375, |
|
"eval_logps/rejected": -283.5343322753906, |
|
"eval_loss": 0.017369672656059265, |
|
"eval_rewards/accuracies": 0.996219277381897, |
|
"eval_rewards/chosen": -0.4563392996788025, |
|
"eval_rewards/margins": 8.912469863891602, |
|
"eval_rewards/rejected": -9.368810653686523, |
|
"eval_runtime": 563.7804, |
|
"eval_samples_per_second": 3.751, |
|
"eval_steps_per_second": 1.877, |
|
"step": 2550 |
|
}, |
|
{ |
|
"epoch": 2.5523764483329394, |
|
"grad_norm": 2.599371910095215, |
|
"learning_rate": 5.742667928098392e-07, |
|
"logits/chosen": -3.1383397579193115, |
|
"logits/rejected": -3.241206169128418, |
|
"logps/chosen": -307.1145324707031, |
|
"logps/rejected": -304.4930114746094, |
|
"loss": 0.012, |
|
"rewards/accuracies": 0.9983333349227905, |
|
"rewards/chosen": -1.1029903888702393, |
|
"rewards/margins": 10.304451942443848, |
|
"rewards/rejected": -11.407442092895508, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 2.5523764483329394, |
|
"eval_logits/chosen": -3.127473831176758, |
|
"eval_logits/rejected": -3.245652198791504, |
|
"eval_logps/chosen": -301.93988037109375, |
|
"eval_logps/rejected": -291.9409484863281, |
|
"eval_loss": 0.01718134619295597, |
|
"eval_rewards/accuracies": 0.996219277381897, |
|
"eval_rewards/chosen": -0.8187108635902405, |
|
"eval_rewards/margins": 9.390762329101562, |
|
"eval_rewards/rejected": -10.209474563598633, |
|
"eval_runtime": 564.551, |
|
"eval_samples_per_second": 3.746, |
|
"eval_steps_per_second": 1.874, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 2.6942539607472216, |
|
"grad_norm": 0.546609103679657, |
|
"learning_rate": 5.506149479659413e-07, |
|
"logits/chosen": -3.126167058944702, |
|
"logits/rejected": -3.2429487705230713, |
|
"logps/chosen": -300.4310302734375, |
|
"logps/rejected": -300.19415283203125, |
|
"loss": 0.0325, |
|
"rewards/accuracies": 0.9975000023841858, |
|
"rewards/chosen": -0.9882186651229858, |
|
"rewards/margins": 10.136972427368164, |
|
"rewards/rejected": -11.125190734863281, |
|
"step": 2850 |
|
}, |
|
{ |
|
"epoch": 2.6942539607472216, |
|
"eval_logits/chosen": -3.134120225906372, |
|
"eval_logits/rejected": -3.2371535301208496, |
|
"eval_logps/chosen": -302.8757629394531, |
|
"eval_logps/rejected": -298.766357421875, |
|
"eval_loss": 0.01783796213567257, |
|
"eval_rewards/accuracies": 0.9952741265296936, |
|
"eval_rewards/chosen": -0.9122979044914246, |
|
"eval_rewards/margins": 9.979716300964355, |
|
"eval_rewards/rejected": -10.892014503479004, |
|
"eval_runtime": 563.1261, |
|
"eval_samples_per_second": 3.756, |
|
"eval_steps_per_second": 1.879, |
|
"step": 2850 |
|
}, |
|
{ |
|
"epoch": 2.8361314731615037, |
|
"grad_norm": 0.13827018439769745, |
|
"learning_rate": 5.269631031220436e-07, |
|
"logits/chosen": -3.1727616786956787, |
|
"logits/rejected": -3.2297608852386475, |
|
"logps/chosen": -307.07745361328125, |
|
"logps/rejected": -316.3017272949219, |
|
"loss": 0.0076, |
|
"rewards/accuracies": 0.9983333349227905, |
|
"rewards/chosen": -1.8074291944503784, |
|
"rewards/margins": 11.044861793518066, |
|
"rewards/rejected": -12.852290153503418, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 2.8361314731615037, |
|
"eval_logits/chosen": -3.1847798824310303, |
|
"eval_logits/rejected": -3.0981080532073975, |
|
"eval_logps/chosen": -336.5445861816406, |
|
"eval_logps/rejected": -362.6168212890625, |
|
"eval_loss": 0.037136226892471313, |
|
"eval_rewards/accuracies": 0.9867674708366394, |
|
"eval_rewards/chosen": -4.279180526733398, |
|
"eval_rewards/margins": 12.997879981994629, |
|
"eval_rewards/rejected": -17.277057647705078, |
|
"eval_runtime": 765.7084, |
|
"eval_samples_per_second": 2.762, |
|
"eval_steps_per_second": 1.382, |
|
"step": 3000 |
|
} |
|
], |
|
"logging_steps": 150, |
|
"max_steps": 6342, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 6, |
|
"save_steps": 150, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|