diff --git "a/trainer_state.json" "b/trainer_state.json" new file mode 100644--- /dev/null +++ "b/trainer_state.json" @@ -0,0 +1,5715 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 3.0, + "eval_steps": 500, + "global_step": 3744, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.0008012820512820513, + "grad_norm": 64.04028419295403, + "learning_rate": 1.3333333333333333e-09, + "logits/chosen": 0.12353515625, + "logits/rejected": 0.224609375, + "logps/chosen": -66.0, + "logps/rejected": -100.0, + "loss": 0.6914, + "rewards/accuracies": 0.0, + "rewards/chosen": 0.0, + "rewards/margins": 0.0, + "rewards/rejected": 0.0, + "step": 1 + }, + { + "epoch": 0.008012820512820512, + "grad_norm": 86.24089113539306, + "learning_rate": 1.3333333333333334e-08, + "logits/chosen": 0.1748046875, + "logits/rejected": 0.1181640625, + "logps/chosen": -189.0, + "logps/rejected": -129.0, + "loss": 0.6925, + "rewards/accuracies": 0.3333333432674408, + "rewards/chosen": 0.006256103515625, + "rewards/margins": 0.009033203125, + "rewards/rejected": -0.0027923583984375, + "step": 10 + }, + { + "epoch": 0.016025641025641024, + "grad_norm": 89.22477084650417, + "learning_rate": 2.6666666666666667e-08, + "logits/chosen": 0.052490234375, + "logits/rejected": 0.1142578125, + "logps/chosen": -206.0, + "logps/rejected": -130.0, + "loss": 0.6951, + "rewards/accuracies": 0.22499999403953552, + "rewards/chosen": -0.014404296875, + "rewards/margins": -0.0194091796875, + "rewards/rejected": 0.0050048828125, + "step": 20 + }, + { + "epoch": 0.02403846153846154, + "grad_norm": 93.71269866728797, + "learning_rate": 4e-08, + "logits/chosen": 0.0869140625, + "logits/rejected": 0.0299072265625, + "logps/chosen": -183.0, + "logps/rejected": -135.0, + "loss": 0.6887, + "rewards/accuracies": 0.4000000059604645, + "rewards/chosen": 0.0087890625, + "rewards/margins": 0.0181884765625, + "rewards/rejected": -0.0093994140625, + "step": 30 + }, + { + "epoch": 0.03205128205128205, + "grad_norm": 89.05552052917865, + "learning_rate": 5.3333333333333334e-08, + "logits/chosen": 0.2392578125, + "logits/rejected": 0.13671875, + "logps/chosen": -205.0, + "logps/rejected": -138.0, + "loss": 0.6902, + "rewards/accuracies": 0.32499998807907104, + "rewards/chosen": 0.005615234375, + "rewards/margins": 0.00311279296875, + "rewards/rejected": 0.0025177001953125, + "step": 40 + }, + { + "epoch": 0.04006410256410257, + "grad_norm": 87.83482919892937, + "learning_rate": 6.666666666666667e-08, + "logits/chosen": 0.1875, + "logits/rejected": 0.0576171875, + "logps/chosen": -158.0, + "logps/rejected": -119.5, + "loss": 0.6711, + "rewards/accuracies": 0.4749999940395355, + "rewards/chosen": 0.022216796875, + "rewards/margins": 0.035888671875, + "rewards/rejected": -0.0137939453125, + "step": 50 + }, + { + "epoch": 0.04807692307692308, + "grad_norm": 82.90814662285723, + "learning_rate": 8e-08, + "logits/chosen": 0.36328125, + "logits/rejected": 0.166015625, + "logps/chosen": -224.0, + "logps/rejected": -134.0, + "loss": 0.6447, + "rewards/accuracies": 0.800000011920929, + "rewards/chosen": 0.0634765625, + "rewards/margins": 0.11279296875, + "rewards/rejected": -0.04931640625, + "step": 60 + }, + { + "epoch": 0.05608974358974359, + "grad_norm": 83.47212098678433, + "learning_rate": 9.333333333333334e-08, + "logits/chosen": 0.306640625, + "logits/rejected": 0.298828125, + "logps/chosen": -196.0, + "logps/rejected": -132.0, + "loss": 0.6162, + "rewards/accuracies": 0.8999999761581421, + "rewards/chosen": 0.07177734375, + "rewards/margins": 0.154296875, + "rewards/rejected": -0.08203125, + "step": 70 + }, + { + "epoch": 0.0641025641025641, + "grad_norm": 72.70460499175852, + "learning_rate": 1.0666666666666667e-07, + "logits/chosen": 0.248046875, + "logits/rejected": 0.201171875, + "logps/chosen": -202.0, + "logps/rejected": -136.0, + "loss": 0.5689, + "rewards/accuracies": 1.0, + "rewards/chosen": 0.140625, + "rewards/margins": 0.294921875, + "rewards/rejected": -0.154296875, + "step": 80 + }, + { + "epoch": 0.07211538461538461, + "grad_norm": 65.1729310926141, + "learning_rate": 1.2e-07, + "logits/chosen": 0.2255859375, + "logits/rejected": 0.064453125, + "logps/chosen": -182.0, + "logps/rejected": -130.0, + "loss": 0.5326, + "rewards/accuracies": 1.0, + "rewards/chosen": 0.1494140625, + "rewards/margins": 0.34765625, + "rewards/rejected": -0.197265625, + "step": 90 + }, + { + "epoch": 0.08012820512820513, + "grad_norm": 52.76168152275494, + "learning_rate": 1.3333333333333334e-07, + "logits/chosen": 0.38671875, + "logits/rejected": 0.11474609375, + "logps/chosen": -209.0, + "logps/rejected": -153.0, + "loss": 0.4703, + "rewards/accuracies": 1.0, + "rewards/chosen": 0.2578125, + "rewards/margins": 0.51953125, + "rewards/rejected": -0.259765625, + "step": 100 + }, + { + "epoch": 0.08814102564102565, + "grad_norm": 47.501730286877184, + "learning_rate": 1.4666666666666666e-07, + "logits/chosen": 0.296875, + "logits/rejected": 0.3125, + "logps/chosen": -191.0, + "logps/rejected": -149.0, + "loss": 0.3988, + "rewards/accuracies": 1.0, + "rewards/chosen": 0.33203125, + "rewards/margins": 0.80078125, + "rewards/rejected": -0.466796875, + "step": 110 + }, + { + "epoch": 0.09615384615384616, + "grad_norm": 40.40573175478169, + "learning_rate": 1.6e-07, + "logits/chosen": 0.1884765625, + "logits/rejected": 0.283203125, + "logps/chosen": -212.0, + "logps/rejected": -145.0, + "loss": 0.3222, + "rewards/accuracies": 1.0, + "rewards/chosen": 0.421875, + "rewards/margins": 1.015625, + "rewards/rejected": -0.59375, + "step": 120 + }, + { + "epoch": 0.10416666666666667, + "grad_norm": 27.672418710748364, + "learning_rate": 1.7333333333333332e-07, + "logits/chosen": 0.359375, + "logits/rejected": 0.365234375, + "logps/chosen": -220.0, + "logps/rejected": -156.0, + "loss": 0.2564, + "rewards/accuracies": 1.0, + "rewards/chosen": 0.66015625, + "rewards/margins": 1.453125, + "rewards/rejected": -0.79296875, + "step": 130 + }, + { + "epoch": 0.11217948717948718, + "grad_norm": 30.919301140228264, + "learning_rate": 1.8666666666666667e-07, + "logits/chosen": 0.1298828125, + "logits/rejected": 0.412109375, + "logps/chosen": -193.0, + "logps/rejected": -151.0, + "loss": 0.196, + "rewards/accuracies": 1.0, + "rewards/chosen": 0.6875, + "rewards/margins": 1.828125, + "rewards/rejected": -1.140625, + "step": 140 + }, + { + "epoch": 0.1201923076923077, + "grad_norm": 21.901759461519386, + "learning_rate": 2e-07, + "logits/chosen": 0.32421875, + "logits/rejected": 0.30859375, + "logps/chosen": -207.0, + "logps/rejected": -150.0, + "loss": 0.1502, + "rewards/accuracies": 1.0, + "rewards/chosen": 0.69921875, + "rewards/margins": 2.046875, + "rewards/rejected": -1.3515625, + "step": 150 + }, + { + "epoch": 0.1282051282051282, + "grad_norm": 29.817507103981917, + "learning_rate": 2.1333333333333334e-07, + "logits/chosen": 0.216796875, + "logits/rejected": 0.384765625, + "logps/chosen": -209.0, + "logps/rejected": -143.0, + "loss": 0.1123, + "rewards/accuracies": 1.0, + "rewards/chosen": 0.84765625, + "rewards/margins": 2.359375, + "rewards/rejected": -1.5078125, + "step": 160 + }, + { + "epoch": 0.1362179487179487, + "grad_norm": 8.498490051434544, + "learning_rate": 2.2666666666666663e-07, + "logits/chosen": 0.51171875, + "logits/rejected": 0.46875, + "logps/chosen": -182.0, + "logps/rejected": -146.0, + "loss": 0.0913, + "rewards/accuracies": 1.0, + "rewards/chosen": 1.046875, + "rewards/margins": 3.0, + "rewards/rejected": -1.9453125, + "step": 170 + }, + { + "epoch": 0.14423076923076922, + "grad_norm": 7.296587918545859, + "learning_rate": 2.4e-07, + "logits/chosen": 0.234375, + "logits/rejected": 0.2216796875, + "logps/chosen": -224.0, + "logps/rejected": -172.0, + "loss": 0.0551, + "rewards/accuracies": 1.0, + "rewards/chosen": 1.4296875, + "rewards/margins": 3.734375, + "rewards/rejected": -2.296875, + "step": 180 + }, + { + "epoch": 0.15224358974358973, + "grad_norm": 6.034643267260365, + "learning_rate": 2.533333333333333e-07, + "logits/chosen": 0.443359375, + "logits/rejected": 0.333984375, + "logps/chosen": -198.0, + "logps/rejected": -168.0, + "loss": 0.0434, + "rewards/accuracies": 1.0, + "rewards/chosen": 1.515625, + "rewards/margins": 4.1875, + "rewards/rejected": -2.671875, + "step": 190 + }, + { + "epoch": 0.16025641025641027, + "grad_norm": 3.782776063442764, + "learning_rate": 2.6666666666666667e-07, + "logits/chosen": 0.451171875, + "logits/rejected": 0.5390625, + "logps/chosen": -174.0, + "logps/rejected": -156.0, + "loss": 0.042, + "rewards/accuracies": 1.0, + "rewards/chosen": 1.75, + "rewards/margins": 4.6875, + "rewards/rejected": -2.9375, + "step": 200 + }, + { + "epoch": 0.16826923076923078, + "grad_norm": 2.101068154945795, + "learning_rate": 2.8e-07, + "logits/chosen": 0.57421875, + "logits/rejected": 0.578125, + "logps/chosen": -171.0, + "logps/rejected": -160.0, + "loss": 0.0221, + "rewards/accuracies": 1.0, + "rewards/chosen": 1.609375, + "rewards/margins": 4.9375, + "rewards/rejected": -3.328125, + "step": 210 + }, + { + "epoch": 0.1762820512820513, + "grad_norm": 2.1370635684042583, + "learning_rate": 2.933333333333333e-07, + "logits/chosen": 0.39453125, + "logits/rejected": 0.388671875, + "logps/chosen": -145.0, + "logps/rejected": -164.0, + "loss": 0.0218, + "rewards/accuracies": 0.9750000238418579, + "rewards/chosen": 1.5625, + "rewards/margins": 5.375, + "rewards/rejected": -3.828125, + "step": 220 + }, + { + "epoch": 0.1842948717948718, + "grad_norm": 6.202641598231744, + "learning_rate": 3.066666666666666e-07, + "logits/chosen": 0.5546875, + "logits/rejected": 0.78125, + "logps/chosen": -143.0, + "logps/rejected": -166.0, + "loss": 0.0142, + "rewards/accuracies": 1.0, + "rewards/chosen": 2.0625, + "rewards/margins": 6.09375, + "rewards/rejected": -4.03125, + "step": 230 + }, + { + "epoch": 0.19230769230769232, + "grad_norm": 3.408195034795853, + "learning_rate": 3.2e-07, + "logits/chosen": 0.416015625, + "logits/rejected": 0.640625, + "logps/chosen": -172.0, + "logps/rejected": -169.0, + "loss": 0.0257, + "rewards/accuracies": 0.9750000238418579, + "rewards/chosen": 1.75, + "rewards/margins": 5.9375, + "rewards/rejected": -4.1875, + "step": 240 + }, + { + "epoch": 0.20032051282051283, + "grad_norm": 0.7951516189038192, + "learning_rate": 3.333333333333333e-07, + "logits/chosen": 0.443359375, + "logits/rejected": 0.439453125, + "logps/chosen": -180.0, + "logps/rejected": -169.0, + "loss": 0.0042, + "rewards/accuracies": 1.0, + "rewards/chosen": 2.421875, + "rewards/margins": 7.15625, + "rewards/rejected": -4.71875, + "step": 250 + }, + { + "epoch": 0.20833333333333334, + "grad_norm": 0.6828867328882736, + "learning_rate": 3.4666666666666665e-07, + "logits/chosen": 0.671875, + "logits/rejected": 0.5859375, + "logps/chosen": -105.5, + "logps/rejected": -189.0, + "loss": 0.002, + "rewards/accuracies": 1.0, + "rewards/chosen": 2.421875, + "rewards/margins": 8.1875, + "rewards/rejected": -5.75, + "step": 260 + }, + { + "epoch": 0.21634615384615385, + "grad_norm": 2.181808594179521, + "learning_rate": 3.6e-07, + "logits/chosen": 0.7109375, + "logits/rejected": 0.5859375, + "logps/chosen": -148.0, + "logps/rejected": -187.0, + "loss": 0.0079, + "rewards/accuracies": 1.0, + "rewards/chosen": 2.5, + "rewards/margins": 7.09375, + "rewards/rejected": -4.59375, + "step": 270 + }, + { + "epoch": 0.22435897435897437, + "grad_norm": 0.38268137385988604, + "learning_rate": 3.7333333333333334e-07, + "logits/chosen": 0.5234375, + "logits/rejected": 0.59765625, + "logps/chosen": -188.0, + "logps/rejected": -190.0, + "loss": 0.0051, + "rewards/accuracies": 1.0, + "rewards/chosen": 2.625, + "rewards/margins": 8.3125, + "rewards/rejected": -5.6875, + "step": 280 + }, + { + "epoch": 0.23237179487179488, + "grad_norm": 0.4512252150149582, + "learning_rate": 3.8666666666666664e-07, + "logits/chosen": 0.5546875, + "logits/rejected": 0.55078125, + "logps/chosen": -151.0, + "logps/rejected": -190.0, + "loss": 0.0025, + "rewards/accuracies": 1.0, + "rewards/chosen": 2.65625, + "rewards/margins": 8.625, + "rewards/rejected": -6.0, + "step": 290 + }, + { + "epoch": 0.2403846153846154, + "grad_norm": 0.584501118833293, + "learning_rate": 4e-07, + "logits/chosen": 0.287109375, + "logits/rejected": 0.5703125, + "logps/chosen": -160.0, + "logps/rejected": -183.0, + "loss": 0.0222, + "rewards/accuracies": 0.9750000238418579, + "rewards/chosen": 2.71875, + "rewards/margins": 8.4375, + "rewards/rejected": -5.75, + "step": 300 + }, + { + "epoch": 0.2483974358974359, + "grad_norm": 1.2282710731271989, + "learning_rate": 4.1333333333333333e-07, + "logits/chosen": 0.5234375, + "logits/rejected": 0.5703125, + "logps/chosen": -162.0, + "logps/rejected": -217.0, + "loss": 0.0014, + "rewards/accuracies": 1.0, + "rewards/chosen": 2.90625, + "rewards/margins": 9.5625, + "rewards/rejected": -6.65625, + "step": 310 + }, + { + "epoch": 0.2564102564102564, + "grad_norm": 0.22314938141969023, + "learning_rate": 4.266666666666667e-07, + "logits/chosen": 0.36328125, + "logits/rejected": 0.6796875, + "logps/chosen": -147.0, + "logps/rejected": -208.0, + "loss": 0.0025, + "rewards/accuracies": 1.0, + "rewards/chosen": 2.625, + "rewards/margins": 9.625, + "rewards/rejected": -6.96875, + "step": 320 + }, + { + "epoch": 0.2644230769230769, + "grad_norm": 4.201699473979923, + "learning_rate": 4.3999999999999997e-07, + "logits/chosen": 0.4375, + "logits/rejected": 0.52734375, + "logps/chosen": -203.0, + "logps/rejected": -194.0, + "loss": 0.0057, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.046875, + "rewards/margins": 9.6875, + "rewards/rejected": -6.65625, + "step": 330 + }, + { + "epoch": 0.2724358974358974, + "grad_norm": 0.024959093936575825, + "learning_rate": 4.5333333333333326e-07, + "logits/chosen": 0.5234375, + "logits/rejected": 0.73828125, + "logps/chosen": -148.0, + "logps/rejected": -197.0, + "loss": 0.0016, + "rewards/accuracies": 1.0, + "rewards/chosen": 2.8125, + "rewards/margins": 9.5625, + "rewards/rejected": -6.75, + "step": 340 + }, + { + "epoch": 0.28044871794871795, + "grad_norm": 0.5323887354832039, + "learning_rate": 4.6666666666666666e-07, + "logits/chosen": 0.59765625, + "logits/rejected": 0.64453125, + "logps/chosen": -171.0, + "logps/rejected": -211.0, + "loss": 0.0008, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.140625, + "rewards/margins": 10.4375, + "rewards/rejected": -7.3125, + "step": 350 + }, + { + "epoch": 0.28846153846153844, + "grad_norm": 1.2240810840279064, + "learning_rate": 4.8e-07, + "logits/chosen": 0.458984375, + "logits/rejected": 0.59375, + "logps/chosen": -157.0, + "logps/rejected": -223.0, + "loss": 0.0018, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.109375, + "rewards/margins": 10.9375, + "rewards/rejected": -7.8125, + "step": 360 + }, + { + "epoch": 0.296474358974359, + "grad_norm": 0.04698426123066838, + "learning_rate": 4.933333333333333e-07, + "logits/chosen": 0.5625, + "logits/rejected": 0.75, + "logps/chosen": -144.0, + "logps/rejected": -225.0, + "loss": 0.0081, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.34375, + "rewards/margins": 11.375, + "rewards/rejected": -8.0625, + "step": 370 + }, + { + "epoch": 0.30448717948717946, + "grad_norm": 0.14652045043202064, + "learning_rate": 4.992579400415554e-07, + "logits/chosen": 0.451171875, + "logits/rejected": 0.796875, + "logps/chosen": -155.0, + "logps/rejected": -226.0, + "loss": 0.001, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.21875, + "rewards/margins": 11.625, + "rewards/rejected": -8.375, + "step": 380 + }, + { + "epoch": 0.3125, + "grad_norm": 0.005895445560068654, + "learning_rate": 4.97773820124666e-07, + "logits/chosen": 0.248046875, + "logits/rejected": 0.7421875, + "logps/chosen": -182.0, + "logps/rejected": -225.0, + "loss": 0.002, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.25, + "rewards/margins": 11.4375, + "rewards/rejected": -8.1875, + "step": 390 + }, + { + "epoch": 0.32051282051282054, + "grad_norm": 0.05299990848269769, + "learning_rate": 4.962897002077768e-07, + "logits/chosen": 0.46875, + "logits/rejected": 0.6015625, + "logps/chosen": -195.0, + "logps/rejected": -221.0, + "loss": 0.0013, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.46875, + "rewards/margins": 11.3125, + "rewards/rejected": -7.8125, + "step": 400 + }, + { + "epoch": 0.328525641025641, + "grad_norm": 0.003060182358037053, + "learning_rate": 4.948055802908874e-07, + "logits/chosen": 0.609375, + "logits/rejected": 0.5546875, + "logps/chosen": -156.0, + "logps/rejected": -216.0, + "loss": 0.0071, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.375, + "rewards/margins": 11.125, + "rewards/rejected": -7.78125, + "step": 410 + }, + { + "epoch": 0.33653846153846156, + "grad_norm": 5.391503234606234, + "learning_rate": 4.933214603739982e-07, + "logits/chosen": 0.458984375, + "logits/rejected": 0.68359375, + "logps/chosen": -194.0, + "logps/rejected": -225.0, + "loss": 0.0012, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.34375, + "rewards/margins": 11.625, + "rewards/rejected": -8.25, + "step": 420 + }, + { + "epoch": 0.34455128205128205, + "grad_norm": 0.016009321940122195, + "learning_rate": 4.918373404571089e-07, + "logits/chosen": 0.58203125, + "logits/rejected": 0.7734375, + "logps/chosen": -178.0, + "logps/rejected": -230.0, + "loss": 0.0011, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.1875, + "rewards/margins": 12.0, + "rewards/rejected": -8.8125, + "step": 430 + }, + { + "epoch": 0.3525641025641026, + "grad_norm": 0.04011636896023994, + "learning_rate": 4.903532205402196e-07, + "logits/chosen": 0.5625, + "logits/rejected": 0.57421875, + "logps/chosen": -172.0, + "logps/rejected": -211.0, + "loss": 0.0047, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.125, + "rewards/margins": 11.125, + "rewards/rejected": -7.96875, + "step": 440 + }, + { + "epoch": 0.3605769230769231, + "grad_norm": 1.884252459181768, + "learning_rate": 4.888691006233304e-07, + "logits/chosen": 0.640625, + "logits/rejected": 0.7578125, + "logps/chosen": -144.0, + "logps/rejected": -220.0, + "loss": 0.0201, + "rewards/accuracies": 0.9750000238418579, + "rewards/chosen": 3.28125, + "rewards/margins": 12.125, + "rewards/rejected": -8.8125, + "step": 450 + }, + { + "epoch": 0.3685897435897436, + "grad_norm": 0.024673460286282534, + "learning_rate": 4.873849807064411e-07, + "logits/chosen": 0.78125, + "logits/rejected": 0.79296875, + "logps/chosen": -121.0, + "logps/rejected": -219.0, + "loss": 0.0003, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.4375, + "rewards/margins": 12.3125, + "rewards/rejected": -8.875, + "step": 460 + }, + { + "epoch": 0.3766025641025641, + "grad_norm": 1.0653271799430997, + "learning_rate": 4.859008607895517e-07, + "logits/chosen": 0.8046875, + "logits/rejected": 0.8828125, + "logps/chosen": -141.0, + "logps/rejected": -223.0, + "loss": 0.0169, + "rewards/accuracies": 0.9750000238418579, + "rewards/chosen": 3.34375, + "rewards/margins": 12.1875, + "rewards/rejected": -8.8125, + "step": 470 + }, + { + "epoch": 0.38461538461538464, + "grad_norm": 0.5656275667559196, + "learning_rate": 4.844167408726625e-07, + "logits/chosen": 0.462890625, + "logits/rejected": 0.65625, + "logps/chosen": -128.0, + "logps/rejected": -209.0, + "loss": 0.0007, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.015625, + "rewards/margins": 11.625, + "rewards/rejected": -8.625, + "step": 480 + }, + { + "epoch": 0.3926282051282051, + "grad_norm": 8.514426200575306, + "learning_rate": 4.829326209557732e-07, + "logits/chosen": 0.796875, + "logits/rejected": 0.91015625, + "logps/chosen": -170.0, + "logps/rejected": -224.0, + "loss": 0.0028, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.28125, + "rewards/margins": 12.75, + "rewards/rejected": -9.4375, + "step": 490 + }, + { + "epoch": 0.40064102564102566, + "grad_norm": 0.03178905527763376, + "learning_rate": 4.814485010388839e-07, + "logits/chosen": 0.8359375, + "logits/rejected": 1.0546875, + "logps/chosen": -158.0, + "logps/rejected": -250.0, + "loss": 0.0004, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.71875, + "rewards/margins": 14.0, + "rewards/rejected": -10.25, + "step": 500 + }, + { + "epoch": 0.40865384615384615, + "grad_norm": 1.4325610546983785, + "learning_rate": 4.799643811219946e-07, + "logits/chosen": 0.248046875, + "logits/rejected": 0.63671875, + "logps/chosen": -203.0, + "logps/rejected": -249.0, + "loss": 0.0016, + "rewards/accuracies": 1.0, + "rewards/chosen": 2.46875, + "rewards/margins": 12.1875, + "rewards/rejected": -9.6875, + "step": 510 + }, + { + "epoch": 0.4166666666666667, + "grad_norm": 0.12295978223631013, + "learning_rate": 4.784802612051053e-07, + "logits/chosen": 0.625, + "logits/rejected": 0.703125, + "logps/chosen": -177.0, + "logps/rejected": -226.0, + "loss": 0.0001, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.625, + "rewards/margins": 13.4375, + "rewards/rejected": -9.75, + "step": 520 + }, + { + "epoch": 0.42467948717948717, + "grad_norm": 0.0028166962467554534, + "learning_rate": 4.769961412882161e-07, + "logits/chosen": 0.40625, + "logits/rejected": 0.64453125, + "logps/chosen": -203.0, + "logps/rejected": -247.0, + "loss": 0.0001, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.6875, + "rewards/margins": 13.75, + "rewards/rejected": -10.0625, + "step": 530 + }, + { + "epoch": 0.4326923076923077, + "grad_norm": 0.010744699989711674, + "learning_rate": 4.755120213713268e-07, + "logits/chosen": 0.58984375, + "logits/rejected": 0.99609375, + "logps/chosen": -183.0, + "logps/rejected": -231.0, + "loss": 0.0008, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.796875, + "rewards/margins": 13.3125, + "rewards/rejected": -9.5, + "step": 540 + }, + { + "epoch": 0.4407051282051282, + "grad_norm": 0.021039050905194416, + "learning_rate": 4.740279014544375e-07, + "logits/chosen": 0.25390625, + "logits/rejected": 0.69140625, + "logps/chosen": -195.0, + "logps/rejected": -236.0, + "loss": 0.0002, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.25, + "rewards/margins": 13.1875, + "rewards/rejected": -9.9375, + "step": 550 + }, + { + "epoch": 0.44871794871794873, + "grad_norm": 0.000724403343700553, + "learning_rate": 4.725437815375482e-07, + "logits/chosen": 0.458984375, + "logits/rejected": 0.62109375, + "logps/chosen": -180.0, + "logps/rejected": -233.0, + "loss": 0.0002, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.453125, + "rewards/margins": 13.625, + "rewards/rejected": -10.25, + "step": 560 + }, + { + "epoch": 0.4567307692307692, + "grad_norm": 0.3100975481455866, + "learning_rate": 4.710596616206589e-07, + "logits/chosen": 0.5546875, + "logits/rejected": 0.79296875, + "logps/chosen": -224.0, + "logps/rejected": -233.0, + "loss": 0.0008, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.8125, + "rewards/margins": 13.3125, + "rewards/rejected": -9.5, + "step": 570 + }, + { + "epoch": 0.46474358974358976, + "grad_norm": 0.05039213843756819, + "learning_rate": 4.6957554170376963e-07, + "logits/chosen": 0.671875, + "logits/rejected": 0.734375, + "logps/chosen": -157.0, + "logps/rejected": -236.0, + "loss": 0.0002, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.671875, + "rewards/margins": 13.875, + "rewards/rejected": -10.25, + "step": 580 + }, + { + "epoch": 0.47275641025641024, + "grad_norm": 0.008808070533990717, + "learning_rate": 4.680914217868804e-07, + "logits/chosen": 0.71484375, + "logits/rejected": 0.95703125, + "logps/chosen": -174.0, + "logps/rejected": -244.0, + "loss": 0.0068, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.53125, + "rewards/margins": 13.6875, + "rewards/rejected": -10.125, + "step": 590 + }, + { + "epoch": 0.4807692307692308, + "grad_norm": 0.001963727046741742, + "learning_rate": 4.666073018699911e-07, + "logits/chosen": 0.5703125, + "logits/rejected": 1.046875, + "logps/chosen": -168.0, + "logps/rejected": -239.0, + "loss": 0.0001, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.6875, + "rewards/margins": 13.5625, + "rewards/rejected": -9.875, + "step": 600 + }, + { + "epoch": 0.48878205128205127, + "grad_norm": 0.05259445682378445, + "learning_rate": 4.6512318195310177e-07, + "logits/chosen": 0.6484375, + "logits/rejected": 0.8515625, + "logps/chosen": -155.0, + "logps/rejected": -233.0, + "loss": 0.0003, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.171875, + "rewards/margins": 13.625, + "rewards/rejected": -10.5, + "step": 610 + }, + { + "epoch": 0.4967948717948718, + "grad_norm": 0.016747218645922837, + "learning_rate": 4.636390620362125e-07, + "logits/chosen": 0.625, + "logits/rejected": 0.7890625, + "logps/chosen": -198.0, + "logps/rejected": -227.0, + "loss": 0.0006, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.625, + "rewards/margins": 12.9375, + "rewards/rejected": -9.3125, + "step": 620 + }, + { + "epoch": 0.5048076923076923, + "grad_norm": 0.008135550511695944, + "learning_rate": 4.621549421193232e-07, + "logits/chosen": 0.373046875, + "logits/rejected": 0.609375, + "logps/chosen": -167.0, + "logps/rejected": -248.0, + "loss": 0.0002, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.453125, + "rewards/margins": 14.625, + "rewards/rejected": -11.1875, + "step": 630 + }, + { + "epoch": 0.5128205128205128, + "grad_norm": 0.0014877380037952115, + "learning_rate": 4.606708222024339e-07, + "logits/chosen": 0.48046875, + "logits/rejected": 0.91015625, + "logps/chosen": -176.0, + "logps/rejected": -246.0, + "loss": 0.0012, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.59375, + "rewards/margins": 13.8125, + "rewards/rejected": -10.1875, + "step": 640 + }, + { + "epoch": 0.5208333333333334, + "grad_norm": 0.03734119070490188, + "learning_rate": 4.591867022855446e-07, + "logits/chosen": 0.8203125, + "logits/rejected": 1.109375, + "logps/chosen": -140.0, + "logps/rejected": -248.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.53125, + "rewards/margins": 15.25, + "rewards/rejected": -11.6875, + "step": 650 + }, + { + "epoch": 0.5288461538461539, + "grad_norm": 45.62168008765583, + "learning_rate": 4.577025823686554e-07, + "logits/chosen": 0.734375, + "logits/rejected": 1.015625, + "logps/chosen": -138.0, + "logps/rejected": -244.0, + "loss": 0.0068, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.59375, + "rewards/margins": 14.9375, + "rewards/rejected": -11.3125, + "step": 660 + }, + { + "epoch": 0.5368589743589743, + "grad_norm": 0.020868978181350698, + "learning_rate": 4.562184624517661e-07, + "logits/chosen": 0.66796875, + "logits/rejected": 0.98828125, + "logps/chosen": -168.0, + "logps/rejected": -238.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.578125, + "rewards/margins": 14.0625, + "rewards/rejected": -10.5, + "step": 670 + }, + { + "epoch": 0.5448717948717948, + "grad_norm": 0.0030554953116709673, + "learning_rate": 4.547343425348768e-07, + "logits/chosen": 0.92578125, + "logits/rejected": 1.2890625, + "logps/chosen": -162.0, + "logps/rejected": -237.0, + "loss": 0.0025, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.28125, + "rewards/margins": 14.3125, + "rewards/rejected": -11.0, + "step": 680 + }, + { + "epoch": 0.5528846153846154, + "grad_norm": 0.10267699602656194, + "learning_rate": 4.5325022261798753e-07, + "logits/chosen": 0.76171875, + "logits/rejected": 0.703125, + "logps/chosen": -161.0, + "logps/rejected": -242.0, + "loss": 0.0001, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.140625, + "rewards/margins": 14.5, + "rewards/rejected": -11.375, + "step": 690 + }, + { + "epoch": 0.5608974358974359, + "grad_norm": 1.060654904436611, + "learning_rate": 4.517661027010982e-07, + "logits/chosen": 0.5625, + "logits/rejected": 0.7265625, + "logps/chosen": -162.0, + "logps/rejected": -247.0, + "loss": 0.0057, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.25, + "rewards/margins": 14.375, + "rewards/rejected": -11.125, + "step": 700 + }, + { + "epoch": 0.5689102564102564, + "grad_norm": 0.01282600929322917, + "learning_rate": 4.502819827842089e-07, + "logits/chosen": 0.83984375, + "logits/rejected": 1.1328125, + "logps/chosen": -160.0, + "logps/rejected": -258.0, + "loss": 0.0001, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.796875, + "rewards/margins": 15.5625, + "rewards/rejected": -11.75, + "step": 710 + }, + { + "epoch": 0.5769230769230769, + "grad_norm": 0.000603232958011393, + "learning_rate": 4.487978628673196e-07, + "logits/chosen": 0.6953125, + "logits/rejected": 0.8359375, + "logps/chosen": -154.0, + "logps/rejected": -246.0, + "loss": 0.0021, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.546875, + "rewards/margins": 14.875, + "rewards/rejected": -11.375, + "step": 720 + }, + { + "epoch": 0.5849358974358975, + "grad_norm": 0.0035524082009581967, + "learning_rate": 4.473137429504304e-07, + "logits/chosen": 0.58203125, + "logits/rejected": 0.765625, + "logps/chosen": -185.0, + "logps/rejected": -258.0, + "loss": 0.0001, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.71875, + "rewards/margins": 15.875, + "rewards/rejected": -12.1875, + "step": 730 + }, + { + "epoch": 0.592948717948718, + "grad_norm": 0.001638664095401059, + "learning_rate": 4.458296230335411e-07, + "logits/chosen": 0.71875, + "logits/rejected": 0.89453125, + "logps/chosen": -172.0, + "logps/rejected": -239.0, + "loss": 0.0001, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.609375, + "rewards/margins": 14.625, + "rewards/rejected": -11.0, + "step": 740 + }, + { + "epoch": 0.6009615384615384, + "grad_norm": 0.00017829436826559875, + "learning_rate": 4.443455031166518e-07, + "logits/chosen": 0.67578125, + "logits/rejected": 0.86328125, + "logps/chosen": -189.0, + "logps/rejected": -249.0, + "loss": 0.0011, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.671875, + "rewards/margins": 15.3125, + "rewards/rejected": -11.6875, + "step": 750 + }, + { + "epoch": 0.6089743589743589, + "grad_norm": 0.007873615629627333, + "learning_rate": 4.4286138319976253e-07, + "logits/chosen": 0.71484375, + "logits/rejected": 0.98828125, + "logps/chosen": -164.0, + "logps/rejected": -264.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.859375, + "rewards/margins": 16.375, + "rewards/rejected": -12.5, + "step": 760 + }, + { + "epoch": 0.6169871794871795, + "grad_norm": 0.01393802348712383, + "learning_rate": 4.4137726328287324e-07, + "logits/chosen": 0.94140625, + "logits/rejected": 1.0859375, + "logps/chosen": -167.0, + "logps/rejected": -256.0, + "loss": 0.01, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.515625, + "rewards/margins": 16.25, + "rewards/rejected": -12.75, + "step": 770 + }, + { + "epoch": 0.625, + "grad_norm": 0.00011925426636780416, + "learning_rate": 4.3989314336598395e-07, + "logits/chosen": 0.890625, + "logits/rejected": 0.8828125, + "logps/chosen": -131.0, + "logps/rejected": -266.0, + "loss": 0.0004, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.4375, + "rewards/margins": 16.5, + "rewards/rejected": -13.0625, + "step": 780 + }, + { + "epoch": 0.6330128205128205, + "grad_norm": 0.0017273716901997926, + "learning_rate": 4.384090234490946e-07, + "logits/chosen": 0.7734375, + "logits/rejected": 1.09375, + "logps/chosen": -154.0, + "logps/rejected": -250.0, + "loss": 0.0001, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.578125, + "rewards/margins": 15.625, + "rewards/rejected": -12.0, + "step": 790 + }, + { + "epoch": 0.6410256410256411, + "grad_norm": 0.0024078267573534673, + "learning_rate": 4.369249035322054e-07, + "logits/chosen": 0.62109375, + "logits/rejected": 0.76171875, + "logps/chosen": -152.0, + "logps/rejected": -274.0, + "loss": 0.0002, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.25, + "rewards/margins": 15.75, + "rewards/rejected": -12.5, + "step": 800 + }, + { + "epoch": 0.6490384615384616, + "grad_norm": 0.0025633329633488206, + "learning_rate": 4.354407836153161e-07, + "logits/chosen": 0.7109375, + "logits/rejected": 0.97265625, + "logps/chosen": -172.0, + "logps/rejected": -242.0, + "loss": 0.0032, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.265625, + "rewards/margins": 15.5, + "rewards/rejected": -12.1875, + "step": 810 + }, + { + "epoch": 0.657051282051282, + "grad_norm": 7.62401527642808, + "learning_rate": 4.339566636984268e-07, + "logits/chosen": 0.765625, + "logits/rejected": 0.91796875, + "logps/chosen": -173.0, + "logps/rejected": -264.0, + "loss": 0.0031, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.765625, + "rewards/margins": 15.5, + "rewards/rejected": -11.75, + "step": 820 + }, + { + "epoch": 0.6650641025641025, + "grad_norm": 0.0018391316218238597, + "learning_rate": 4.324725437815375e-07, + "logits/chosen": 0.5390625, + "logits/rejected": 0.69140625, + "logps/chosen": -188.0, + "logps/rejected": -266.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.515625, + "rewards/margins": 16.25, + "rewards/rejected": -12.75, + "step": 830 + }, + { + "epoch": 0.6730769230769231, + "grad_norm": 0.0009646190758050507, + "learning_rate": 4.3098842386464824e-07, + "logits/chosen": 0.62890625, + "logits/rejected": 1.078125, + "logps/chosen": -183.0, + "logps/rejected": -282.0, + "loss": 0.0001, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.1875, + "rewards/margins": 17.25, + "rewards/rejected": -14.125, + "step": 840 + }, + { + "epoch": 0.6810897435897436, + "grad_norm": 0.0008610524729650137, + "learning_rate": 4.2950430394775895e-07, + "logits/chosen": 0.478515625, + "logits/rejected": 0.98046875, + "logps/chosen": -178.0, + "logps/rejected": -274.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.625, + "rewards/margins": 17.5, + "rewards/rejected": -13.875, + "step": 850 + }, + { + "epoch": 0.6891025641025641, + "grad_norm": 0.007096618898808949, + "learning_rate": 4.280201840308697e-07, + "logits/chosen": 0.828125, + "logits/rejected": 1.1796875, + "logps/chosen": -157.0, + "logps/rejected": -280.0, + "loss": 0.0006, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.59375, + "rewards/margins": 17.625, + "rewards/rejected": -14.0625, + "step": 860 + }, + { + "epoch": 0.6971153846153846, + "grad_norm": 0.14010136528237552, + "learning_rate": 4.2653606411398043e-07, + "logits/chosen": 0.59375, + "logits/rejected": 1.140625, + "logps/chosen": -190.0, + "logps/rejected": -253.0, + "loss": 0.0001, + "rewards/accuracies": 1.0, + "rewards/chosen": 4.0, + "rewards/margins": 16.375, + "rewards/rejected": -12.375, + "step": 870 + }, + { + "epoch": 0.7051282051282052, + "grad_norm": 0.0023988494206444587, + "learning_rate": 4.2505194419709114e-07, + "logits/chosen": 0.55859375, + "logits/rejected": 0.80859375, + "logps/chosen": -156.0, + "logps/rejected": -260.0, + "loss": 0.0002, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.4375, + "rewards/margins": 16.25, + "rewards/rejected": -12.875, + "step": 880 + }, + { + "epoch": 0.7131410256410257, + "grad_norm": 6.087390791135428e-05, + "learning_rate": 4.235678242802018e-07, + "logits/chosen": 0.58203125, + "logits/rejected": 0.984375, + "logps/chosen": -164.0, + "logps/rejected": -274.0, + "loss": 0.0003, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.609375, + "rewards/margins": 17.75, + "rewards/rejected": -14.1875, + "step": 890 + }, + { + "epoch": 0.7211538461538461, + "grad_norm": 0.0008312749795051527, + "learning_rate": 4.220837043633125e-07, + "logits/chosen": 0.6875, + "logits/rejected": 0.90234375, + "logps/chosen": -154.0, + "logps/rejected": -278.0, + "loss": 0.0002, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.3125, + "rewards/margins": 16.625, + "rewards/rejected": -13.3125, + "step": 900 + }, + { + "epoch": 0.7291666666666666, + "grad_norm": 0.02956438917542033, + "learning_rate": 4.2059958444642323e-07, + "logits/chosen": 0.8671875, + "logits/rejected": 1.046875, + "logps/chosen": -163.0, + "logps/rejected": -262.0, + "loss": 0.0165, + "rewards/accuracies": 0.9750000238418579, + "rewards/chosen": 3.390625, + "rewards/margins": 16.0, + "rewards/rejected": -12.625, + "step": 910 + }, + { + "epoch": 0.7371794871794872, + "grad_norm": 3.7536937637117935, + "learning_rate": 4.1911546452953394e-07, + "logits/chosen": 0.625, + "logits/rejected": 0.89453125, + "logps/chosen": -193.0, + "logps/rejected": -272.0, + "loss": 0.0025, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.78125, + "rewards/margins": 17.25, + "rewards/rejected": -13.5, + "step": 920 + }, + { + "epoch": 0.7451923076923077, + "grad_norm": 0.004851799456712299, + "learning_rate": 4.176313446126447e-07, + "logits/chosen": 0.439453125, + "logits/rejected": 1.015625, + "logps/chosen": -205.0, + "logps/rejected": -264.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.75, + "rewards/margins": 16.75, + "rewards/rejected": -12.9375, + "step": 930 + }, + { + "epoch": 0.7532051282051282, + "grad_norm": 0.00015565385815992762, + "learning_rate": 4.161472246957554e-07, + "logits/chosen": 0.65625, + "logits/rejected": 0.9140625, + "logps/chosen": -140.0, + "logps/rejected": -262.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.375, + "rewards/margins": 17.0, + "rewards/rejected": -13.5625, + "step": 940 + }, + { + "epoch": 0.7612179487179487, + "grad_norm": 4.411289254772126e-06, + "learning_rate": 4.1466310477886614e-07, + "logits/chosen": 0.765625, + "logits/rejected": 1.0703125, + "logps/chosen": -174.0, + "logps/rejected": -276.0, + "loss": 0.0022, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.6875, + "rewards/margins": 17.875, + "rewards/rejected": -14.125, + "step": 950 + }, + { + "epoch": 0.7692307692307693, + "grad_norm": 0.0012155773934020653, + "learning_rate": 4.1317898486197685e-07, + "logits/chosen": 0.50390625, + "logits/rejected": 0.48046875, + "logps/chosen": -198.0, + "logps/rejected": -274.0, + "loss": 0.0008, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.5625, + "rewards/margins": 16.75, + "rewards/rejected": -13.125, + "step": 960 + }, + { + "epoch": 0.7772435897435898, + "grad_norm": 0.00027944466244255936, + "learning_rate": 4.1169486494508756e-07, + "logits/chosen": 0.83984375, + "logits/rejected": 1.40625, + "logps/chosen": -145.0, + "logps/rejected": -278.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.546875, + "rewards/margins": 17.875, + "rewards/rejected": -14.375, + "step": 970 + }, + { + "epoch": 0.7852564102564102, + "grad_norm": 8.829716265401248e-05, + "learning_rate": 4.102107450281982e-07, + "logits/chosen": 0.88671875, + "logits/rejected": 1.1328125, + "logps/chosen": -126.5, + "logps/rejected": -256.0, + "loss": 0.0003, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.625, + "rewards/margins": 17.0, + "rewards/rejected": -13.375, + "step": 980 + }, + { + "epoch": 0.7932692307692307, + "grad_norm": 5.5344861594260826e-05, + "learning_rate": 4.0872662511130894e-07, + "logits/chosen": 0.515625, + "logits/rejected": 1.0859375, + "logps/chosen": -202.0, + "logps/rejected": -286.0, + "loss": 0.0005, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.578125, + "rewards/margins": 18.0, + "rewards/rejected": -14.4375, + "step": 990 + }, + { + "epoch": 0.8012820512820513, + "grad_norm": 0.00011381421860377532, + "learning_rate": 4.072425051944197e-07, + "logits/chosen": 0.80859375, + "logits/rejected": 0.8515625, + "logps/chosen": -145.0, + "logps/rejected": -292.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.46875, + "rewards/margins": 18.375, + "rewards/rejected": -14.9375, + "step": 1000 + }, + { + "epoch": 0.8092948717948718, + "grad_norm": 5.535336680872587, + "learning_rate": 4.057583852775304e-07, + "logits/chosen": 0.69921875, + "logits/rejected": 0.828125, + "logps/chosen": -173.0, + "logps/rejected": -256.0, + "loss": 0.001, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.4375, + "rewards/margins": 16.75, + "rewards/rejected": -13.375, + "step": 1010 + }, + { + "epoch": 0.8173076923076923, + "grad_norm": 0.0005514714398320138, + "learning_rate": 4.0427426536064113e-07, + "logits/chosen": 0.765625, + "logits/rejected": 1.0703125, + "logps/chosen": -125.0, + "logps/rejected": -268.0, + "loss": 0.0001, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.5, + "rewards/margins": 18.375, + "rewards/rejected": -14.875, + "step": 1020 + }, + { + "epoch": 0.8253205128205128, + "grad_norm": 0.0007561637659334397, + "learning_rate": 4.0279014544375184e-07, + "logits/chosen": 0.68359375, + "logits/rejected": 0.984375, + "logps/chosen": -186.0, + "logps/rejected": -274.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.84375, + "rewards/margins": 17.875, + "rewards/rejected": -14.0, + "step": 1030 + }, + { + "epoch": 0.8333333333333334, + "grad_norm": 0.01109823538677163, + "learning_rate": 4.0130602552686256e-07, + "logits/chosen": 0.486328125, + "logits/rejected": 0.859375, + "logps/chosen": -159.0, + "logps/rejected": -276.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.53125, + "rewards/margins": 18.875, + "rewards/rejected": -15.375, + "step": 1040 + }, + { + "epoch": 0.8413461538461539, + "grad_norm": 9.567710249044793e-05, + "learning_rate": 3.9982190560997327e-07, + "logits/chosen": 0.76953125, + "logits/rejected": 1.1328125, + "logps/chosen": -173.0, + "logps/rejected": -288.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.40625, + "rewards/margins": 18.0, + "rewards/rejected": -14.5625, + "step": 1050 + }, + { + "epoch": 0.8493589743589743, + "grad_norm": 0.004699002732439855, + "learning_rate": 3.98337785693084e-07, + "logits/chosen": 0.80078125, + "logits/rejected": 0.8203125, + "logps/chosen": -163.0, + "logps/rejected": -284.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.46875, + "rewards/margins": 18.5, + "rewards/rejected": -15.0, + "step": 1060 + }, + { + "epoch": 0.8573717948717948, + "grad_norm": 5.1388350064007634e-06, + "learning_rate": 3.968536657761947e-07, + "logits/chosen": 0.8046875, + "logits/rejected": 0.78125, + "logps/chosen": -167.0, + "logps/rejected": -274.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.6875, + "rewards/margins": 18.0, + "rewards/rejected": -14.375, + "step": 1070 + }, + { + "epoch": 0.8653846153846154, + "grad_norm": 0.029972344027948593, + "learning_rate": 3.953695458593054e-07, + "logits/chosen": 0.77734375, + "logits/rejected": 1.15625, + "logps/chosen": -184.0, + "logps/rejected": -280.0, + "loss": 0.0045, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.078125, + "rewards/margins": 17.375, + "rewards/rejected": -14.25, + "step": 1080 + }, + { + "epoch": 0.8733974358974359, + "grad_norm": 0.018098759436406774, + "learning_rate": 3.938854259424161e-07, + "logits/chosen": 0.75390625, + "logits/rejected": 0.9296875, + "logps/chosen": -126.0, + "logps/rejected": -264.0, + "loss": 0.0001, + "rewards/accuracies": 1.0, + "rewards/chosen": 2.90625, + "rewards/margins": 17.0, + "rewards/rejected": -14.0625, + "step": 1090 + }, + { + "epoch": 0.8814102564102564, + "grad_norm": 8.008272391872532e-05, + "learning_rate": 3.9240130602552684e-07, + "logits/chosen": 0.80859375, + "logits/rejected": 1.234375, + "logps/chosen": -134.0, + "logps/rejected": -282.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.125, + "rewards/margins": 18.375, + "rewards/rejected": -15.3125, + "step": 1100 + }, + { + "epoch": 0.8894230769230769, + "grad_norm": 0.009049533722692323, + "learning_rate": 3.9091718610863755e-07, + "logits/chosen": 0.71484375, + "logits/rejected": 1.203125, + "logps/chosen": -180.0, + "logps/rejected": -280.0, + "loss": 0.0001, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.265625, + "rewards/margins": 17.5, + "rewards/rejected": -14.1875, + "step": 1110 + }, + { + "epoch": 0.8974358974358975, + "grad_norm": 8.244045878983639e-05, + "learning_rate": 3.8943306619174827e-07, + "logits/chosen": 0.609375, + "logits/rejected": 0.984375, + "logps/chosen": -146.0, + "logps/rejected": -282.0, + "loss": 0.0001, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.4375, + "rewards/margins": 18.375, + "rewards/rejected": -15.0, + "step": 1120 + }, + { + "epoch": 0.905448717948718, + "grad_norm": 0.00613655578170994, + "learning_rate": 3.87948946274859e-07, + "logits/chosen": 0.78515625, + "logits/rejected": 1.0, + "logps/chosen": -158.0, + "logps/rejected": -272.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.59375, + "rewards/margins": 18.0, + "rewards/rejected": -14.4375, + "step": 1130 + }, + { + "epoch": 0.9134615384615384, + "grad_norm": 8.217111266575298e-06, + "learning_rate": 3.8646482635796975e-07, + "logits/chosen": 0.7734375, + "logits/rejected": 1.0859375, + "logps/chosen": -178.0, + "logps/rejected": -288.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.6875, + "rewards/margins": 18.5, + "rewards/rejected": -14.875, + "step": 1140 + }, + { + "epoch": 0.9214743589743589, + "grad_norm": 0.7364035065807161, + "learning_rate": 3.8498070644108046e-07, + "logits/chosen": 0.890625, + "logits/rejected": 1.15625, + "logps/chosen": -165.0, + "logps/rejected": -286.0, + "loss": 0.0002, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.53125, + "rewards/margins": 18.5, + "rewards/rejected": -15.0, + "step": 1150 + }, + { + "epoch": 0.9294871794871795, + "grad_norm": 0.1496693561497382, + "learning_rate": 3.834965865241911e-07, + "logits/chosen": 0.9921875, + "logits/rejected": 1.1875, + "logps/chosen": -178.0, + "logps/rejected": -282.0, + "loss": 0.0004, + "rewards/accuracies": 1.0, + "rewards/chosen": 2.84375, + "rewards/margins": 17.75, + "rewards/rejected": -14.875, + "step": 1160 + }, + { + "epoch": 0.9375, + "grad_norm": 0.00037493575561416294, + "learning_rate": 3.8201246660730183e-07, + "logits/chosen": 0.890625, + "logits/rejected": 1.0703125, + "logps/chosen": -136.0, + "logps/rejected": -284.0, + "loss": 0.0001, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.421875, + "rewards/margins": 18.75, + "rewards/rejected": -15.3125, + "step": 1170 + }, + { + "epoch": 0.9455128205128205, + "grad_norm": 0.004982903246651787, + "learning_rate": 3.8052834669041255e-07, + "logits/chosen": 0.921875, + "logits/rejected": 1.28125, + "logps/chosen": -167.0, + "logps/rejected": -288.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.53125, + "rewards/margins": 19.25, + "rewards/rejected": -15.6875, + "step": 1180 + }, + { + "epoch": 0.9535256410256411, + "grad_norm": 6.72396395145147e-05, + "learning_rate": 3.7904422677352326e-07, + "logits/chosen": 0.72265625, + "logits/rejected": 1.15625, + "logps/chosen": -181.0, + "logps/rejected": -306.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.359375, + "rewards/margins": 19.625, + "rewards/rejected": -16.25, + "step": 1190 + }, + { + "epoch": 0.9615384615384616, + "grad_norm": 0.000412374591287098, + "learning_rate": 3.77560106856634e-07, + "logits/chosen": 0.734375, + "logits/rejected": 1.1953125, + "logps/chosen": -168.0, + "logps/rejected": -306.0, + "loss": 0.0005, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.71875, + "rewards/margins": 18.875, + "rewards/rejected": -15.125, + "step": 1200 + }, + { + "epoch": 0.969551282051282, + "grad_norm": 0.0011780079032558514, + "learning_rate": 3.7607598693974474e-07, + "logits/chosen": 0.7890625, + "logits/rejected": 1.1015625, + "logps/chosen": -158.0, + "logps/rejected": -304.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.34375, + "rewards/margins": 19.625, + "rewards/rejected": -16.25, + "step": 1210 + }, + { + "epoch": 0.9775641025641025, + "grad_norm": 0.019042612389581243, + "learning_rate": 3.7459186702285545e-07, + "logits/chosen": 0.76171875, + "logits/rejected": 0.71484375, + "logps/chosen": -152.0, + "logps/rejected": -294.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.625, + "rewards/margins": 19.0, + "rewards/rejected": -15.375, + "step": 1220 + }, + { + "epoch": 0.9855769230769231, + "grad_norm": 0.001017013474787819, + "learning_rate": 3.7310774710596617e-07, + "logits/chosen": 0.9375, + "logits/rejected": 1.4140625, + "logps/chosen": -170.0, + "logps/rejected": -286.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.5625, + "rewards/margins": 19.25, + "rewards/rejected": -15.6875, + "step": 1230 + }, + { + "epoch": 0.9935897435897436, + "grad_norm": 0.0001296021532768441, + "learning_rate": 3.716236271890769e-07, + "logits/chosen": 1.2109375, + "logits/rejected": 1.3984375, + "logps/chosen": -127.0, + "logps/rejected": -284.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.78125, + "rewards/margins": 19.25, + "rewards/rejected": -15.4375, + "step": 1240 + }, + { + "epoch": 1.0, + "eval_logits/chosen": 0.76953125, + "eval_logits/rejected": 1.2890625, + "eval_logps/chosen": -168.0, + "eval_logps/rejected": -296.0, + "eval_loss": 6.842174479970708e-05, + "eval_rewards/accuracies": 1.0, + "eval_rewards/chosen": 3.453125, + "eval_rewards/margins": 18.75, + "eval_rewards/rejected": -15.3125, + "eval_runtime": 25.3915, + "eval_samples_per_second": 7.837, + "eval_steps_per_second": 0.985, + "step": 1248 + }, + { + "epoch": 1.001602564102564, + "grad_norm": 1.5097157370383436e-06, + "learning_rate": 3.7013950727218754e-07, + "logits/chosen": 0.90234375, + "logits/rejected": 1.2890625, + "logps/chosen": -149.0, + "logps/rejected": -280.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.4375, + "rewards/margins": 19.375, + "rewards/rejected": -15.875, + "step": 1250 + }, + { + "epoch": 1.0096153846153846, + "grad_norm": 6.567245720525397e-06, + "learning_rate": 3.6865538735529826e-07, + "logits/chosen": 0.8359375, + "logits/rejected": 1.046875, + "logps/chosen": -135.0, + "logps/rejected": -284.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.640625, + "rewards/margins": 18.5, + "rewards/rejected": -14.875, + "step": 1260 + }, + { + "epoch": 1.017628205128205, + "grad_norm": 2.7660339460230295e-07, + "learning_rate": 3.6717126743840897e-07, + "logits/chosen": 0.75390625, + "logits/rejected": 1.203125, + "logps/chosen": -176.0, + "logps/rejected": -296.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.71875, + "rewards/margins": 19.25, + "rewards/rejected": -15.5, + "step": 1270 + }, + { + "epoch": 1.0256410256410255, + "grad_norm": 0.00038016083445366024, + "learning_rate": 3.6568714752151974e-07, + "logits/chosen": 0.90625, + "logits/rejected": 1.203125, + "logps/chosen": -158.0, + "logps/rejected": -302.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.53125, + "rewards/margins": 19.0, + "rewards/rejected": -15.5, + "step": 1280 + }, + { + "epoch": 1.0336538461538463, + "grad_norm": 1.642467149992957e-05, + "learning_rate": 3.6420302760463045e-07, + "logits/chosen": 0.79296875, + "logits/rejected": 1.171875, + "logps/chosen": -163.0, + "logps/rejected": -292.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.46875, + "rewards/margins": 19.375, + "rewards/rejected": -15.9375, + "step": 1290 + }, + { + "epoch": 1.0416666666666667, + "grad_norm": 1.456498508279097e-05, + "learning_rate": 3.6271890768774116e-07, + "logits/chosen": 0.83984375, + "logits/rejected": 1.296875, + "logps/chosen": -138.0, + "logps/rejected": -276.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.390625, + "rewards/margins": 18.875, + "rewards/rejected": -15.4375, + "step": 1300 + }, + { + "epoch": 1.0496794871794872, + "grad_norm": 0.2184132961337863, + "learning_rate": 3.612347877708519e-07, + "logits/chosen": 0.796875, + "logits/rejected": 1.1015625, + "logps/chosen": -126.0, + "logps/rejected": -302.0, + "loss": 0.0013, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.59375, + "rewards/margins": 20.25, + "rewards/rejected": -16.625, + "step": 1310 + }, + { + "epoch": 1.0576923076923077, + "grad_norm": 0.00027060412424146214, + "learning_rate": 3.597506678539626e-07, + "logits/chosen": 0.78125, + "logits/rejected": 1.4375, + "logps/chosen": -162.0, + "logps/rejected": -294.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.78125, + "rewards/margins": 19.0, + "rewards/rejected": -15.1875, + "step": 1320 + }, + { + "epoch": 1.0657051282051282, + "grad_norm": 0.0005949930902462416, + "learning_rate": 3.582665479370733e-07, + "logits/chosen": 1.015625, + "logits/rejected": 1.1640625, + "logps/chosen": -133.0, + "logps/rejected": -280.0, + "loss": 0.0001, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.765625, + "rewards/margins": 18.625, + "rewards/rejected": -14.8125, + "step": 1330 + }, + { + "epoch": 1.0737179487179487, + "grad_norm": 3.821536446255942e-05, + "learning_rate": 3.5678242802018396e-07, + "logits/chosen": 0.98828125, + "logits/rejected": 1.0234375, + "logps/chosen": -169.0, + "logps/rejected": -284.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.8125, + "rewards/margins": 19.0, + "rewards/rejected": -15.25, + "step": 1340 + }, + { + "epoch": 1.0817307692307692, + "grad_norm": 0.0003595307379009215, + "learning_rate": 3.5529830810329473e-07, + "logits/chosen": 0.703125, + "logits/rejected": 1.109375, + "logps/chosen": -163.0, + "logps/rejected": -282.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.984375, + "rewards/margins": 18.875, + "rewards/rejected": -14.9375, + "step": 1350 + }, + { + "epoch": 1.0897435897435896, + "grad_norm": 0.21495162145805216, + "learning_rate": 3.5381418818640544e-07, + "logits/chosen": 0.828125, + "logits/rejected": 1.1796875, + "logps/chosen": -150.0, + "logps/rejected": -288.0, + "loss": 0.0192, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.421875, + "rewards/margins": 19.625, + "rewards/rejected": -16.25, + "step": 1360 + }, + { + "epoch": 1.0977564102564104, + "grad_norm": 0.00022987129229484713, + "learning_rate": 3.5233006826951616e-07, + "logits/chosen": 1.0390625, + "logits/rejected": 0.9921875, + "logps/chosen": -118.0, + "logps/rejected": -298.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.421875, + "rewards/margins": 20.0, + "rewards/rejected": -16.5, + "step": 1370 + }, + { + "epoch": 1.1057692307692308, + "grad_norm": 0.0001366686827476735, + "learning_rate": 3.5084594835262687e-07, + "logits/chosen": 0.80078125, + "logits/rejected": 0.98046875, + "logps/chosen": -125.0, + "logps/rejected": -284.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.546875, + "rewards/margins": 19.875, + "rewards/rejected": -16.375, + "step": 1380 + }, + { + "epoch": 1.1137820512820513, + "grad_norm": 1.2520041285180355e-05, + "learning_rate": 3.493618284357376e-07, + "logits/chosen": 0.90234375, + "logits/rejected": 1.3984375, + "logps/chosen": -168.0, + "logps/rejected": -312.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.4375, + "rewards/margins": 19.125, + "rewards/rejected": -15.75, + "step": 1390 + }, + { + "epoch": 1.1217948717948718, + "grad_norm": 0.002926505425137915, + "learning_rate": 3.478777085188483e-07, + "logits/chosen": 0.578125, + "logits/rejected": 0.921875, + "logps/chosen": -153.0, + "logps/rejected": -298.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.84375, + "rewards/margins": 20.0, + "rewards/rejected": -16.125, + "step": 1400 + }, + { + "epoch": 1.1298076923076923, + "grad_norm": 0.01017669683918559, + "learning_rate": 3.46393588601959e-07, + "logits/chosen": 0.94921875, + "logits/rejected": 1.125, + "logps/chosen": -126.0, + "logps/rejected": -308.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.46875, + "rewards/margins": 21.125, + "rewards/rejected": -17.625, + "step": 1410 + }, + { + "epoch": 1.1378205128205128, + "grad_norm": 4.668022191357913e-06, + "learning_rate": 3.449094686850698e-07, + "logits/chosen": 0.8984375, + "logits/rejected": 1.3984375, + "logps/chosen": -155.0, + "logps/rejected": -314.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.984375, + "rewards/margins": 20.75, + "rewards/rejected": -16.75, + "step": 1420 + }, + { + "epoch": 1.1458333333333333, + "grad_norm": 4.630494393827752e-05, + "learning_rate": 3.434253487681805e-07, + "logits/chosen": 0.671875, + "logits/rejected": 0.94140625, + "logps/chosen": -177.0, + "logps/rejected": -296.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.53125, + "rewards/margins": 19.375, + "rewards/rejected": -15.8125, + "step": 1430 + }, + { + "epoch": 1.1538461538461537, + "grad_norm": 0.0006577813730898143, + "learning_rate": 3.4194122885129115e-07, + "logits/chosen": 0.73828125, + "logits/rejected": 0.94921875, + "logps/chosen": -180.0, + "logps/rejected": -282.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.6875, + "rewards/margins": 18.125, + "rewards/rejected": -14.4375, + "step": 1440 + }, + { + "epoch": 1.1618589743589745, + "grad_norm": 0.0004865147564756555, + "learning_rate": 3.4045710893440187e-07, + "logits/chosen": 0.66796875, + "logits/rejected": 1.1640625, + "logps/chosen": -170.0, + "logps/rejected": -304.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.65625, + "rewards/margins": 20.75, + "rewards/rejected": -17.125, + "step": 1450 + }, + { + "epoch": 1.169871794871795, + "grad_norm": 8.243889755055517e-05, + "learning_rate": 3.389729890175126e-07, + "logits/chosen": 0.73828125, + "logits/rejected": 1.1171875, + "logps/chosen": -174.0, + "logps/rejected": -300.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 4.03125, + "rewards/margins": 20.25, + "rewards/rejected": -16.25, + "step": 1460 + }, + { + "epoch": 1.1778846153846154, + "grad_norm": 0.055297406196161324, + "learning_rate": 3.374888691006233e-07, + "logits/chosen": 0.68359375, + "logits/rejected": 1.0078125, + "logps/chosen": -180.0, + "logps/rejected": -292.0, + "loss": 0.0001, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.9375, + "rewards/margins": 19.875, + "rewards/rejected": -15.9375, + "step": 1470 + }, + { + "epoch": 1.185897435897436, + "grad_norm": 3.940056875422956e-06, + "learning_rate": 3.36004749183734e-07, + "logits/chosen": 0.7734375, + "logits/rejected": 1.1875, + "logps/chosen": -187.0, + "logps/rejected": -282.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.8125, + "rewards/margins": 19.0, + "rewards/rejected": -15.1875, + "step": 1480 + }, + { + "epoch": 1.1939102564102564, + "grad_norm": 0.02924640642898095, + "learning_rate": 3.3452062926684477e-07, + "logits/chosen": 0.77734375, + "logits/rejected": 0.87890625, + "logps/chosen": -151.0, + "logps/rejected": -294.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.359375, + "rewards/margins": 19.625, + "rewards/rejected": -16.25, + "step": 1490 + }, + { + "epoch": 1.2019230769230769, + "grad_norm": 0.003264557976511741, + "learning_rate": 3.330365093499555e-07, + "logits/chosen": 0.7109375, + "logits/rejected": 0.8828125, + "logps/chosen": -181.0, + "logps/rejected": -280.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.96875, + "rewards/margins": 18.75, + "rewards/rejected": -14.75, + "step": 1500 + }, + { + "epoch": 1.2099358974358974, + "grad_norm": 1.5994308395955108e-05, + "learning_rate": 3.315523894330662e-07, + "logits/chosen": 0.63671875, + "logits/rejected": 0.77734375, + "logps/chosen": -180.0, + "logps/rejected": -306.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.703125, + "rewards/margins": 20.625, + "rewards/rejected": -16.875, + "step": 1510 + }, + { + "epoch": 1.217948717948718, + "grad_norm": 0.0006042891458313199, + "learning_rate": 3.300682695161769e-07, + "logits/chosen": 0.6484375, + "logits/rejected": 1.328125, + "logps/chosen": -158.0, + "logps/rejected": -294.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.765625, + "rewards/margins": 19.625, + "rewards/rejected": -15.9375, + "step": 1520 + }, + { + "epoch": 1.2259615384615385, + "grad_norm": 0.00021178126794762857, + "learning_rate": 3.2858414959928757e-07, + "logits/chosen": 0.65625, + "logits/rejected": 1.078125, + "logps/chosen": -116.0, + "logps/rejected": -290.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.265625, + "rewards/margins": 20.625, + "rewards/rejected": -17.375, + "step": 1530 + }, + { + "epoch": 1.233974358974359, + "grad_norm": 0.0010273708366364401, + "learning_rate": 3.271000296823983e-07, + "logits/chosen": 0.65625, + "logits/rejected": 1.375, + "logps/chosen": -152.0, + "logps/rejected": -308.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.28125, + "rewards/margins": 20.25, + "rewards/rejected": -17.0, + "step": 1540 + }, + { + "epoch": 1.2419871794871795, + "grad_norm": 3.07402349065646e-06, + "learning_rate": 3.25615909765509e-07, + "logits/chosen": 0.921875, + "logits/rejected": 1.4453125, + "logps/chosen": -142.0, + "logps/rejected": -304.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.171875, + "rewards/margins": 20.125, + "rewards/rejected": -16.875, + "step": 1550 + }, + { + "epoch": 1.25, + "grad_norm": 0.0011000584431525772, + "learning_rate": 3.2413178984861977e-07, + "logits/chosen": 0.70703125, + "logits/rejected": 1.0390625, + "logps/chosen": -117.0, + "logps/rejected": -292.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.578125, + "rewards/margins": 20.0, + "rewards/rejected": -16.375, + "step": 1560 + }, + { + "epoch": 1.2580128205128205, + "grad_norm": 0.000452369898432101, + "learning_rate": 3.226476699317305e-07, + "logits/chosen": 0.671875, + "logits/rejected": 1.0703125, + "logps/chosen": -150.0, + "logps/rejected": -316.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.796875, + "rewards/margins": 21.25, + "rewards/rejected": -17.375, + "step": 1570 + }, + { + "epoch": 1.266025641025641, + "grad_norm": 0.00026586619030460033, + "learning_rate": 3.211635500148412e-07, + "logits/chosen": 0.8671875, + "logits/rejected": 1.1640625, + "logps/chosen": -160.0, + "logps/rejected": -306.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.578125, + "rewards/margins": 20.25, + "rewards/rejected": -16.75, + "step": 1580 + }, + { + "epoch": 1.2740384615384617, + "grad_norm": 2.7345902360690067e-06, + "learning_rate": 3.196794300979519e-07, + "logits/chosen": 0.76171875, + "logits/rejected": 1.140625, + "logps/chosen": -185.0, + "logps/rejected": -312.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.65625, + "rewards/margins": 20.75, + "rewards/rejected": -17.125, + "step": 1590 + }, + { + "epoch": 1.282051282051282, + "grad_norm": 1.5071041983034111e-06, + "learning_rate": 3.181953101810626e-07, + "logits/chosen": 0.53515625, + "logits/rejected": 0.96875, + "logps/chosen": -153.0, + "logps/rejected": -300.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.90625, + "rewards/margins": 20.375, + "rewards/rejected": -16.5, + "step": 1600 + }, + { + "epoch": 1.2900641025641026, + "grad_norm": 9.809509824262037e-05, + "learning_rate": 3.1671119026417333e-07, + "logits/chosen": 0.71875, + "logits/rejected": 0.9921875, + "logps/chosen": -145.0, + "logps/rejected": -302.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.203125, + "rewards/margins": 19.625, + "rewards/rejected": -16.375, + "step": 1610 + }, + { + "epoch": 1.2980769230769231, + "grad_norm": 0.00019153122395682397, + "learning_rate": 3.15227070347284e-07, + "logits/chosen": 0.8828125, + "logits/rejected": 1.1015625, + "logps/chosen": -159.0, + "logps/rejected": -292.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.96875, + "rewards/margins": 19.5, + "rewards/rejected": -15.625, + "step": 1620 + }, + { + "epoch": 1.3060897435897436, + "grad_norm": 1.1285816682202219e-06, + "learning_rate": 3.1374295043039476e-07, + "logits/chosen": 0.91015625, + "logits/rejected": 1.359375, + "logps/chosen": -139.0, + "logps/rejected": -304.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.46875, + "rewards/margins": 21.75, + "rewards/rejected": -18.25, + "step": 1630 + }, + { + "epoch": 1.314102564102564, + "grad_norm": 0.00014596821162342946, + "learning_rate": 3.122588305135055e-07, + "logits/chosen": 0.8984375, + "logits/rejected": 0.99609375, + "logps/chosen": -164.0, + "logps/rejected": -300.0, + "loss": 0.0003, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.0625, + "rewards/margins": 19.625, + "rewards/rejected": -16.5, + "step": 1640 + }, + { + "epoch": 1.3221153846153846, + "grad_norm": 6.180429625577289e-05, + "learning_rate": 3.107747105966162e-07, + "logits/chosen": 0.5859375, + "logits/rejected": 0.91015625, + "logps/chosen": -140.0, + "logps/rejected": -304.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.6875, + "rewards/margins": 21.5, + "rewards/rejected": -17.75, + "step": 1650 + }, + { + "epoch": 1.330128205128205, + "grad_norm": 1.528718002747695e-05, + "learning_rate": 3.092905906797269e-07, + "logits/chosen": 0.96484375, + "logits/rejected": 1.25, + "logps/chosen": -160.0, + "logps/rejected": -312.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.59375, + "rewards/margins": 21.125, + "rewards/rejected": -17.5, + "step": 1660 + }, + { + "epoch": 1.3381410256410255, + "grad_norm": 0.0018059387091916529, + "learning_rate": 3.078064707628376e-07, + "logits/chosen": 0.96484375, + "logits/rejected": 1.3515625, + "logps/chosen": -150.0, + "logps/rejected": -298.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.265625, + "rewards/margins": 20.875, + "rewards/rejected": -17.625, + "step": 1670 + }, + { + "epoch": 1.3461538461538463, + "grad_norm": 9.308236631197583e-05, + "learning_rate": 3.0632235084594833e-07, + "logits/chosen": 0.94140625, + "logits/rejected": 1.21875, + "logps/chosen": -154.0, + "logps/rejected": -298.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.59375, + "rewards/margins": 20.25, + "rewards/rejected": -16.75, + "step": 1680 + }, + { + "epoch": 1.3541666666666667, + "grad_norm": 1.0292832754568623e-05, + "learning_rate": 3.048382309290591e-07, + "logits/chosen": 0.9453125, + "logits/rejected": 1.1953125, + "logps/chosen": -149.0, + "logps/rejected": -314.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.578125, + "rewards/margins": 21.0, + "rewards/rejected": -17.375, + "step": 1690 + }, + { + "epoch": 1.3621794871794872, + "grad_norm": 0.002936471913731386, + "learning_rate": 3.033541110121698e-07, + "logits/chosen": 0.67578125, + "logits/rejected": 0.8515625, + "logps/chosen": -178.0, + "logps/rejected": -304.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.609375, + "rewards/margins": 20.75, + "rewards/rejected": -17.125, + "step": 1700 + }, + { + "epoch": 1.3701923076923077, + "grad_norm": 0.00010907511941114075, + "learning_rate": 3.018699910952805e-07, + "logits/chosen": 0.796875, + "logits/rejected": 0.9609375, + "logps/chosen": -171.0, + "logps/rejected": -296.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.625, + "rewards/margins": 19.375, + "rewards/rejected": -15.75, + "step": 1710 + }, + { + "epoch": 1.3782051282051282, + "grad_norm": 0.004723298049564672, + "learning_rate": 3.003858711783912e-07, + "logits/chosen": 0.93359375, + "logits/rejected": 1.34375, + "logps/chosen": -128.0, + "logps/rejected": -304.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.40625, + "rewards/margins": 20.75, + "rewards/rejected": -17.375, + "step": 1720 + }, + { + "epoch": 1.3862179487179487, + "grad_norm": 0.0037479058484858564, + "learning_rate": 2.989017512615019e-07, + "logits/chosen": 0.81640625, + "logits/rejected": 1.3359375, + "logps/chosen": -187.0, + "logps/rejected": -316.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.84375, + "rewards/margins": 20.625, + "rewards/rejected": -16.75, + "step": 1730 + }, + { + "epoch": 1.3942307692307692, + "grad_norm": 6.403847125969602e-06, + "learning_rate": 2.974176313446126e-07, + "logits/chosen": 0.8984375, + "logits/rejected": 1.40625, + "logps/chosen": -167.0, + "logps/rejected": -304.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.46875, + "rewards/margins": 20.5, + "rewards/rejected": -17.125, + "step": 1740 + }, + { + "epoch": 1.4022435897435899, + "grad_norm": 3.170124782559705e-06, + "learning_rate": 2.959335114277233e-07, + "logits/chosen": 0.4765625, + "logits/rejected": 0.98046875, + "logps/chosen": -200.0, + "logps/rejected": -312.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.71875, + "rewards/margins": 20.75, + "rewards/rejected": -17.0, + "step": 1750 + }, + { + "epoch": 1.4102564102564101, + "grad_norm": 0.002625958073479466, + "learning_rate": 2.944493915108341e-07, + "logits/chosen": 0.8125, + "logits/rejected": 0.984375, + "logps/chosen": -145.0, + "logps/rejected": -288.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.375, + "rewards/margins": 19.375, + "rewards/rejected": -16.0, + "step": 1760 + }, + { + "epoch": 1.4182692307692308, + "grad_norm": 1.7345808005103415e-05, + "learning_rate": 2.929652715939448e-07, + "logits/chosen": 0.66015625, + "logits/rejected": 1.203125, + "logps/chosen": -186.0, + "logps/rejected": -304.0, + "loss": 0.0008, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.53125, + "rewards/margins": 20.875, + "rewards/rejected": -17.375, + "step": 1770 + }, + { + "epoch": 1.4262820512820513, + "grad_norm": 3.70136218823452e-06, + "learning_rate": 2.914811516770555e-07, + "logits/chosen": 0.486328125, + "logits/rejected": 1.0078125, + "logps/chosen": -209.0, + "logps/rejected": -320.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.59375, + "rewards/margins": 20.875, + "rewards/rejected": -17.375, + "step": 1780 + }, + { + "epoch": 1.4342948717948718, + "grad_norm": 1.1884302830057285e-05, + "learning_rate": 2.8999703176016623e-07, + "logits/chosen": 0.8046875, + "logits/rejected": 1.34375, + "logps/chosen": -176.0, + "logps/rejected": -296.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.0625, + "rewards/margins": 19.625, + "rewards/rejected": -16.625, + "step": 1790 + }, + { + "epoch": 1.4423076923076923, + "grad_norm": 0.0004609700315428155, + "learning_rate": 2.8851291184327694e-07, + "logits/chosen": 0.703125, + "logits/rejected": 1.09375, + "logps/chosen": -164.0, + "logps/rejected": -322.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.078125, + "rewards/margins": 22.125, + "rewards/rejected": -19.125, + "step": 1800 + }, + { + "epoch": 1.4503205128205128, + "grad_norm": 0.0006305642233578017, + "learning_rate": 2.870287919263876e-07, + "logits/chosen": 0.72265625, + "logits/rejected": 1.1953125, + "logps/chosen": -185.0, + "logps/rejected": -312.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 2.875, + "rewards/margins": 20.875, + "rewards/rejected": -18.0, + "step": 1810 + }, + { + "epoch": 1.4583333333333333, + "grad_norm": 3.4339114874993125e-06, + "learning_rate": 2.855446720094983e-07, + "logits/chosen": 0.7421875, + "logits/rejected": 1.3203125, + "logps/chosen": -165.0, + "logps/rejected": -316.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.75, + "rewards/margins": 21.25, + "rewards/rejected": -17.5, + "step": 1820 + }, + { + "epoch": 1.4663461538461537, + "grad_norm": 0.002348896026755423, + "learning_rate": 2.840605520926091e-07, + "logits/chosen": 0.7265625, + "logits/rejected": 0.98046875, + "logps/chosen": -181.0, + "logps/rejected": -304.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.25, + "rewards/margins": 20.875, + "rewards/rejected": -17.625, + "step": 1830 + }, + { + "epoch": 1.4743589743589745, + "grad_norm": 4.445170925547984e-05, + "learning_rate": 2.825764321757198e-07, + "logits/chosen": 0.78515625, + "logits/rejected": 0.93359375, + "logps/chosen": -172.0, + "logps/rejected": -302.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.515625, + "rewards/margins": 20.875, + "rewards/rejected": -17.375, + "step": 1840 + }, + { + "epoch": 1.482371794871795, + "grad_norm": 0.16403277766161206, + "learning_rate": 2.810923122588305e-07, + "logits/chosen": 0.7265625, + "logits/rejected": 1.0859375, + "logps/chosen": -162.0, + "logps/rejected": -300.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.390625, + "rewards/margins": 19.75, + "rewards/rejected": -16.375, + "step": 1850 + }, + { + "epoch": 1.4903846153846154, + "grad_norm": 0.0008420288269927029, + "learning_rate": 2.796081923419412e-07, + "logits/chosen": 0.71875, + "logits/rejected": 1.3125, + "logps/chosen": -197.0, + "logps/rejected": -324.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.4375, + "rewards/margins": 21.5, + "rewards/rejected": -18.0, + "step": 1860 + }, + { + "epoch": 1.498397435897436, + "grad_norm": 0.00014053022075140328, + "learning_rate": 2.7812407242505194e-07, + "logits/chosen": 0.9921875, + "logits/rejected": 1.515625, + "logps/chosen": -160.0, + "logps/rejected": -328.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.828125, + "rewards/margins": 23.0, + "rewards/rejected": -19.125, + "step": 1870 + }, + { + "epoch": 1.5064102564102564, + "grad_norm": 8.433668468764786e-05, + "learning_rate": 2.7663995250816265e-07, + "logits/chosen": 1.0390625, + "logits/rejected": 1.09375, + "logps/chosen": -150.0, + "logps/rejected": -326.0, + "loss": 0.0001, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.21875, + "rewards/margins": 22.25, + "rewards/rejected": -19.0, + "step": 1880 + }, + { + "epoch": 1.5144230769230769, + "grad_norm": 3.8086614833106377e-06, + "learning_rate": 2.7515583259127337e-07, + "logits/chosen": 0.5, + "logits/rejected": 1.0234375, + "logps/chosen": -216.0, + "logps/rejected": -316.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.765625, + "rewards/margins": 22.125, + "rewards/rejected": -18.375, + "step": 1890 + }, + { + "epoch": 1.5224358974358974, + "grad_norm": 5.226952675441339e-07, + "learning_rate": 2.736717126743841e-07, + "logits/chosen": 0.72265625, + "logits/rejected": 1.0859375, + "logps/chosen": -165.0, + "logps/rejected": -334.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.703125, + "rewards/margins": 22.75, + "rewards/rejected": -19.0, + "step": 1900 + }, + { + "epoch": 1.530448717948718, + "grad_norm": 2.2091624737157388e-05, + "learning_rate": 2.721875927574948e-07, + "logits/chosen": 0.75, + "logits/rejected": 1.2578125, + "logps/chosen": -189.0, + "logps/rejected": -316.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.84375, + "rewards/margins": 22.125, + "rewards/rejected": -18.25, + "step": 1910 + }, + { + "epoch": 1.5384615384615383, + "grad_norm": 0.00010413244451900578, + "learning_rate": 2.707034728406055e-07, + "logits/chosen": 0.76171875, + "logits/rejected": 1.265625, + "logps/chosen": -182.0, + "logps/rejected": -312.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.6875, + "rewards/margins": 21.5, + "rewards/rejected": -17.75, + "step": 1920 + }, + { + "epoch": 1.546474358974359, + "grad_norm": 5.596299324175904e-05, + "learning_rate": 2.692193529237162e-07, + "logits/chosen": 0.90234375, + "logits/rejected": 0.984375, + "logps/chosen": -145.0, + "logps/rejected": -328.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.515625, + "rewards/margins": 23.375, + "rewards/rejected": -19.875, + "step": 1930 + }, + { + "epoch": 1.5544871794871795, + "grad_norm": 9.505443984664321e-07, + "learning_rate": 2.6773523300682693e-07, + "logits/chosen": 0.8828125, + "logits/rejected": 1.28125, + "logps/chosen": -150.0, + "logps/rejected": -306.0, + "loss": 0.0029, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.15625, + "rewards/margins": 20.875, + "rewards/rejected": -17.75, + "step": 1940 + }, + { + "epoch": 1.5625, + "grad_norm": 7.9835406414024e-05, + "learning_rate": 2.6625111308993765e-07, + "logits/chosen": 0.78125, + "logits/rejected": 1.25, + "logps/chosen": -155.0, + "logps/rejected": -310.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.65625, + "rewards/margins": 22.125, + "rewards/rejected": -18.5, + "step": 1950 + }, + { + "epoch": 1.5705128205128205, + "grad_norm": 5.207840607481746e-06, + "learning_rate": 2.6476699317304836e-07, + "logits/chosen": 0.77734375, + "logits/rejected": 1.2734375, + "logps/chosen": -197.0, + "logps/rejected": -308.0, + "loss": 0.0001, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.484375, + "rewards/margins": 21.25, + "rewards/rejected": -17.75, + "step": 1960 + }, + { + "epoch": 1.578525641025641, + "grad_norm": 0.0055069480205136, + "learning_rate": 2.6328287325615913e-07, + "logits/chosen": 0.55859375, + "logits/rejected": 0.9765625, + "logps/chosen": -181.0, + "logps/rejected": -310.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.5625, + "rewards/margins": 20.875, + "rewards/rejected": -17.25, + "step": 1970 + }, + { + "epoch": 1.5865384615384617, + "grad_norm": 1.396497021073787e-05, + "learning_rate": 2.6179875333926984e-07, + "logits/chosen": 0.890625, + "logits/rejected": 1.0546875, + "logps/chosen": -162.0, + "logps/rejected": -330.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.890625, + "rewards/margins": 23.375, + "rewards/rejected": -19.5, + "step": 1980 + }, + { + "epoch": 1.594551282051282, + "grad_norm": 0.0017364799756809, + "learning_rate": 2.603146334223805e-07, + "logits/chosen": 0.74609375, + "logits/rejected": 1.09375, + "logps/chosen": -167.0, + "logps/rejected": -302.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.296875, + "rewards/margins": 20.5, + "rewards/rejected": -17.25, + "step": 1990 + }, + { + "epoch": 1.6025641025641026, + "grad_norm": 1.0811679994728376e-05, + "learning_rate": 2.588305135054912e-07, + "logits/chosen": 0.7890625, + "logits/rejected": 1.0390625, + "logps/chosen": -190.0, + "logps/rejected": -322.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 4.3125, + "rewards/margins": 23.125, + "rewards/rejected": -18.75, + "step": 2000 + }, + { + "epoch": 1.6105769230769231, + "grad_norm": 1.0141558630107686e-07, + "learning_rate": 2.5734639358860193e-07, + "logits/chosen": 0.80078125, + "logits/rejected": 1.421875, + "logps/chosen": -174.0, + "logps/rejected": -316.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.765625, + "rewards/margins": 21.625, + "rewards/rejected": -17.875, + "step": 2010 + }, + { + "epoch": 1.6185897435897436, + "grad_norm": 3.9088308067055224e-06, + "learning_rate": 2.5586227367171264e-07, + "logits/chosen": 0.86328125, + "logits/rejected": 1.15625, + "logps/chosen": -163.0, + "logps/rejected": -302.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.28125, + "rewards/margins": 21.0, + "rewards/rejected": -17.75, + "step": 2020 + }, + { + "epoch": 1.626602564102564, + "grad_norm": 4.13722853892595e-06, + "learning_rate": 2.5437815375482335e-07, + "logits/chosen": 0.84375, + "logits/rejected": 1.0703125, + "logps/chosen": -168.0, + "logps/rejected": -338.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.25, + "rewards/margins": 22.0, + "rewards/rejected": -18.75, + "step": 2030 + }, + { + "epoch": 1.6346153846153846, + "grad_norm": 3.401296459983001e-05, + "learning_rate": 2.528940338379341e-07, + "logits/chosen": 1.0234375, + "logits/rejected": 1.1875, + "logps/chosen": -159.0, + "logps/rejected": -326.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.859375, + "rewards/margins": 23.0, + "rewards/rejected": -19.125, + "step": 2040 + }, + { + "epoch": 1.6426282051282053, + "grad_norm": 2.957293101803746e-07, + "learning_rate": 2.5140991392104483e-07, + "logits/chosen": 0.87109375, + "logits/rejected": 1.28125, + "logps/chosen": -97.0, + "logps/rejected": -328.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.203125, + "rewards/margins": 22.625, + "rewards/rejected": -19.375, + "step": 2050 + }, + { + "epoch": 1.6506410256410255, + "grad_norm": 5.9562249969748895e-06, + "learning_rate": 2.4992579400415555e-07, + "logits/chosen": 0.796875, + "logits/rejected": 1.0390625, + "logps/chosen": -165.0, + "logps/rejected": -296.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.71875, + "rewards/margins": 21.25, + "rewards/rejected": -17.5, + "step": 2060 + }, + { + "epoch": 1.6586538461538463, + "grad_norm": 6.175150368480905e-06, + "learning_rate": 2.4844167408726626e-07, + "logits/chosen": 0.984375, + "logits/rejected": 1.4375, + "logps/chosen": -160.0, + "logps/rejected": -324.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.8125, + "rewards/margins": 22.125, + "rewards/rejected": -18.25, + "step": 2070 + }, + { + "epoch": 1.6666666666666665, + "grad_norm": 2.6561524482376626e-05, + "learning_rate": 2.469575541703769e-07, + "logits/chosen": 0.83203125, + "logits/rejected": 1.1796875, + "logps/chosen": -169.0, + "logps/rejected": -322.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.546875, + "rewards/margins": 22.625, + "rewards/rejected": -19.0, + "step": 2080 + }, + { + "epoch": 1.6746794871794872, + "grad_norm": 4.068151187669672e-06, + "learning_rate": 2.454734342534877e-07, + "logits/chosen": 0.96875, + "logits/rejected": 1.171875, + "logps/chosen": -184.0, + "logps/rejected": -298.0, + "loss": 0.0034, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.703125, + "rewards/margins": 20.625, + "rewards/rejected": -16.875, + "step": 2090 + }, + { + "epoch": 1.6826923076923077, + "grad_norm": 2.6204722144054077e-05, + "learning_rate": 2.439893143365984e-07, + "logits/chosen": 1.015625, + "logits/rejected": 1.296875, + "logps/chosen": -155.0, + "logps/rejected": -314.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.625, + "rewards/margins": 22.25, + "rewards/rejected": -18.625, + "step": 2100 + }, + { + "epoch": 1.6907051282051282, + "grad_norm": 2.398210733527027e-06, + "learning_rate": 2.425051944197091e-07, + "logits/chosen": 0.8984375, + "logits/rejected": 1.1640625, + "logps/chosen": -142.0, + "logps/rejected": -338.0, + "loss": 0.0002, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.390625, + "rewards/margins": 23.375, + "rewards/rejected": -20.0, + "step": 2110 + }, + { + "epoch": 1.6987179487179487, + "grad_norm": 4.12694708820085e-06, + "learning_rate": 2.4102107450281983e-07, + "logits/chosen": 0.8984375, + "logits/rejected": 1.3359375, + "logps/chosen": -160.0, + "logps/rejected": -308.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.03125, + "rewards/margins": 19.75, + "rewards/rejected": -16.625, + "step": 2120 + }, + { + "epoch": 1.7067307692307692, + "grad_norm": 0.00013371742562856578, + "learning_rate": 2.3953695458593054e-07, + "logits/chosen": 0.86328125, + "logits/rejected": 1.109375, + "logps/chosen": -146.0, + "logps/rejected": -328.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.75, + "rewards/margins": 23.75, + "rewards/rejected": -19.875, + "step": 2130 + }, + { + "epoch": 1.7147435897435899, + "grad_norm": 7.636681790511307e-06, + "learning_rate": 2.3805283466904126e-07, + "logits/chosen": 1.0078125, + "logits/rejected": 1.1796875, + "logps/chosen": -166.0, + "logps/rejected": -310.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.8125, + "rewards/margins": 21.625, + "rewards/rejected": -17.75, + "step": 2140 + }, + { + "epoch": 1.7227564102564101, + "grad_norm": 8.958009719967443e-07, + "learning_rate": 2.3656871475215194e-07, + "logits/chosen": 0.70703125, + "logits/rejected": 1.1875, + "logps/chosen": -154.0, + "logps/rejected": -338.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.28125, + "rewards/margins": 22.5, + "rewards/rejected": -19.25, + "step": 2150 + }, + { + "epoch": 1.7307692307692308, + "grad_norm": 3.747660209974031e-05, + "learning_rate": 2.3508459483526268e-07, + "logits/chosen": 0.9453125, + "logits/rejected": 1.390625, + "logps/chosen": -186.0, + "logps/rejected": -312.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.84375, + "rewards/margins": 22.125, + "rewards/rejected": -18.25, + "step": 2160 + }, + { + "epoch": 1.7387820512820513, + "grad_norm": 4.869196145111484e-07, + "learning_rate": 2.336004749183734e-07, + "logits/chosen": 0.81640625, + "logits/rejected": 1.140625, + "logps/chosen": -149.0, + "logps/rejected": -320.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.328125, + "rewards/margins": 22.75, + "rewards/rejected": -19.375, + "step": 2170 + }, + { + "epoch": 1.7467948717948718, + "grad_norm": 0.00026582023320058276, + "learning_rate": 2.321163550014841e-07, + "logits/chosen": 0.796875, + "logits/rejected": 1.3046875, + "logps/chosen": -133.0, + "logps/rejected": -336.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.15625, + "rewards/margins": 22.75, + "rewards/rejected": -19.625, + "step": 2180 + }, + { + "epoch": 1.7548076923076923, + "grad_norm": 2.8032704053085966e-06, + "learning_rate": 2.3063223508459482e-07, + "logits/chosen": 0.8125, + "logits/rejected": 1.1171875, + "logps/chosen": -172.0, + "logps/rejected": -318.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.375, + "rewards/margins": 22.375, + "rewards/rejected": -19.0, + "step": 2190 + }, + { + "epoch": 1.7628205128205128, + "grad_norm": 0.0028628851089617393, + "learning_rate": 2.2914811516770554e-07, + "logits/chosen": 0.859375, + "logits/rejected": 1.1796875, + "logps/chosen": -185.0, + "logps/rejected": -300.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.25, + "rewards/margins": 19.875, + "rewards/rejected": -16.625, + "step": 2200 + }, + { + "epoch": 1.7708333333333335, + "grad_norm": 0.0001342614273603189, + "learning_rate": 2.2766399525081625e-07, + "logits/chosen": 1.0859375, + "logits/rejected": 1.1640625, + "logps/chosen": -177.0, + "logps/rejected": -320.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.875, + "rewards/margins": 21.75, + "rewards/rejected": -17.875, + "step": 2210 + }, + { + "epoch": 1.7788461538461537, + "grad_norm": 4.955335994263196e-07, + "learning_rate": 2.2617987533392696e-07, + "logits/chosen": 0.828125, + "logits/rejected": 1.21875, + "logps/chosen": -158.0, + "logps/rejected": -318.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.25, + "rewards/margins": 21.5, + "rewards/rejected": -18.25, + "step": 2220 + }, + { + "epoch": 1.7868589743589745, + "grad_norm": 0.0014087673231885866, + "learning_rate": 2.246957554170377e-07, + "logits/chosen": 0.7265625, + "logits/rejected": 1.03125, + "logps/chosen": -188.0, + "logps/rejected": -316.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.40625, + "rewards/margins": 21.5, + "rewards/rejected": -18.125, + "step": 2230 + }, + { + "epoch": 1.7948717948717947, + "grad_norm": 0.00019680490630025088, + "learning_rate": 2.232116355001484e-07, + "logits/chosen": 0.7578125, + "logits/rejected": 1.1875, + "logps/chosen": -167.0, + "logps/rejected": -326.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.21875, + "rewards/margins": 21.25, + "rewards/rejected": -18.0, + "step": 2240 + }, + { + "epoch": 1.8028846153846154, + "grad_norm": 1.3924207072166965e-06, + "learning_rate": 2.217275155832591e-07, + "logits/chosen": 0.72265625, + "logits/rejected": 0.98046875, + "logps/chosen": -162.0, + "logps/rejected": -336.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 4.03125, + "rewards/margins": 23.75, + "rewards/rejected": -19.625, + "step": 2250 + }, + { + "epoch": 1.810897435897436, + "grad_norm": 2.7880513613496112e-05, + "learning_rate": 2.2024339566636982e-07, + "logits/chosen": 1.046875, + "logits/rejected": 1.3671875, + "logps/chosen": -152.0, + "logps/rejected": -288.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.796875, + "rewards/margins": 19.875, + "rewards/rejected": -16.125, + "step": 2260 + }, + { + "epoch": 1.8189102564102564, + "grad_norm": 2.01578072742099e-05, + "learning_rate": 2.1875927574948056e-07, + "logits/chosen": 0.9375, + "logits/rejected": 1.015625, + "logps/chosen": -170.0, + "logps/rejected": -328.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.5625, + "rewards/margins": 22.25, + "rewards/rejected": -18.75, + "step": 2270 + }, + { + "epoch": 1.8269230769230769, + "grad_norm": 0.0001453323384470551, + "learning_rate": 2.1727515583259127e-07, + "logits/chosen": 1.0390625, + "logits/rejected": 1.3671875, + "logps/chosen": -159.0, + "logps/rejected": -316.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.40625, + "rewards/margins": 20.75, + "rewards/rejected": -17.375, + "step": 2280 + }, + { + "epoch": 1.8349358974358974, + "grad_norm": 4.835283260495599e-06, + "learning_rate": 2.1579103591570196e-07, + "logits/chosen": 0.8359375, + "logits/rejected": 1.375, + "logps/chosen": -153.0, + "logps/rejected": -314.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.8125, + "rewards/margins": 22.125, + "rewards/rejected": -18.25, + "step": 2290 + }, + { + "epoch": 1.842948717948718, + "grad_norm": 9.787977293423134e-05, + "learning_rate": 2.143069159988127e-07, + "logits/chosen": 1.0546875, + "logits/rejected": 1.3671875, + "logps/chosen": -180.0, + "logps/rejected": -320.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.59375, + "rewards/margins": 21.375, + "rewards/rejected": -17.75, + "step": 2300 + }, + { + "epoch": 1.8509615384615383, + "grad_norm": 0.003511562539920723, + "learning_rate": 2.128227960819234e-07, + "logits/chosen": 0.8515625, + "logits/rejected": 1.2890625, + "logps/chosen": -156.0, + "logps/rejected": -334.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.21875, + "rewards/margins": 23.5, + "rewards/rejected": -20.25, + "step": 2310 + }, + { + "epoch": 1.858974358974359, + "grad_norm": 0.0017406830607479289, + "learning_rate": 2.1133867616503413e-07, + "logits/chosen": 0.84375, + "logits/rejected": 1.4375, + "logps/chosen": -135.0, + "logps/rejected": -326.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.4375, + "rewards/margins": 22.875, + "rewards/rejected": -19.375, + "step": 2320 + }, + { + "epoch": 1.8669871794871795, + "grad_norm": 1.757377642753892e-05, + "learning_rate": 2.0985455624814487e-07, + "logits/chosen": 0.66015625, + "logits/rejected": 0.60546875, + "logps/chosen": -167.0, + "logps/rejected": -296.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.734375, + "rewards/margins": 21.125, + "rewards/rejected": -17.375, + "step": 2330 + }, + { + "epoch": 1.875, + "grad_norm": 9.585856156991564e-05, + "learning_rate": 2.0837043633125555e-07, + "logits/chosen": 0.7265625, + "logits/rejected": 1.0859375, + "logps/chosen": -189.0, + "logps/rejected": -318.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.71875, + "rewards/margins": 21.875, + "rewards/rejected": -18.125, + "step": 2340 + }, + { + "epoch": 1.8830128205128205, + "grad_norm": 2.717371131495447e-06, + "learning_rate": 2.0688631641436627e-07, + "logits/chosen": 0.68359375, + "logits/rejected": 0.91015625, + "logps/chosen": -173.0, + "logps/rejected": -310.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.953125, + "rewards/margins": 21.875, + "rewards/rejected": -17.875, + "step": 2350 + }, + { + "epoch": 1.891025641025641, + "grad_norm": 2.557631865211708e-07, + "learning_rate": 2.0540219649747698e-07, + "logits/chosen": 0.9140625, + "logits/rejected": 1.53125, + "logps/chosen": -175.0, + "logps/rejected": -328.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.8125, + "rewards/margins": 23.25, + "rewards/rejected": -19.375, + "step": 2360 + }, + { + "epoch": 1.8990384615384617, + "grad_norm": 8.115167511795904e-05, + "learning_rate": 2.0391807658058772e-07, + "logits/chosen": 0.91015625, + "logits/rejected": 1.2109375, + "logps/chosen": -186.0, + "logps/rejected": -312.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.953125, + "rewards/margins": 22.125, + "rewards/rejected": -18.25, + "step": 2370 + }, + { + "epoch": 1.907051282051282, + "grad_norm": 5.5980367488511575e-06, + "learning_rate": 2.024339566636984e-07, + "logits/chosen": 0.52734375, + "logits/rejected": 1.0859375, + "logps/chosen": -178.0, + "logps/rejected": -316.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.703125, + "rewards/margins": 22.375, + "rewards/rejected": -18.75, + "step": 2380 + }, + { + "epoch": 1.9150641025641026, + "grad_norm": 0.0025820105156682694, + "learning_rate": 2.0094983674680912e-07, + "logits/chosen": 0.8828125, + "logits/rejected": 1.3359375, + "logps/chosen": -150.0, + "logps/rejected": -300.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.34375, + "rewards/margins": 20.875, + "rewards/rejected": -17.5, + "step": 2390 + }, + { + "epoch": 1.9230769230769231, + "grad_norm": 7.136824036042445e-07, + "learning_rate": 1.9946571682991986e-07, + "logits/chosen": 0.78125, + "logits/rejected": 1.140625, + "logps/chosen": -149.0, + "logps/rejected": -328.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 2.890625, + "rewards/margins": 22.5, + "rewards/rejected": -19.625, + "step": 2400 + }, + { + "epoch": 1.9310897435897436, + "grad_norm": 0.0003371284846547073, + "learning_rate": 1.9798159691303057e-07, + "logits/chosen": 0.7890625, + "logits/rejected": 1.0078125, + "logps/chosen": -182.0, + "logps/rejected": -332.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 2.953125, + "rewards/margins": 23.0, + "rewards/rejected": -20.125, + "step": 2410 + }, + { + "epoch": 1.939102564102564, + "grad_norm": 7.032612597839408e-05, + "learning_rate": 1.964974769961413e-07, + "logits/chosen": 0.8125, + "logits/rejected": 1.0234375, + "logps/chosen": -155.0, + "logps/rejected": -330.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 2.890625, + "rewards/margins": 22.5, + "rewards/rejected": -19.625, + "step": 2420 + }, + { + "epoch": 1.9471153846153846, + "grad_norm": 0.00020818829472435283, + "learning_rate": 1.9501335707925197e-07, + "logits/chosen": 0.87890625, + "logits/rejected": 1.171875, + "logps/chosen": -192.0, + "logps/rejected": -338.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.546875, + "rewards/margins": 23.125, + "rewards/rejected": -19.625, + "step": 2430 + }, + { + "epoch": 1.9551282051282053, + "grad_norm": 8.466910776291965e-07, + "learning_rate": 1.9352923716236271e-07, + "logits/chosen": 0.7421875, + "logits/rejected": 1.375, + "logps/chosen": -212.0, + "logps/rejected": -330.0, + "loss": 0.0001, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.5, + "rewards/margins": 22.125, + "rewards/rejected": -18.625, + "step": 2440 + }, + { + "epoch": 1.9631410256410255, + "grad_norm": 3.7142533186252094e-06, + "learning_rate": 1.9204511724547343e-07, + "logits/chosen": 0.72265625, + "logits/rejected": 1.265625, + "logps/chosen": -172.0, + "logps/rejected": -348.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.875, + "rewards/margins": 23.625, + "rewards/rejected": -19.75, + "step": 2450 + }, + { + "epoch": 1.9711538461538463, + "grad_norm": 1.4211106326699326e-06, + "learning_rate": 1.9056099732858414e-07, + "logits/chosen": 0.9140625, + "logits/rejected": 1.2421875, + "logps/chosen": -160.0, + "logps/rejected": -332.0, + "loss": 0.0001, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.84375, + "rewards/margins": 24.0, + "rewards/rejected": -20.125, + "step": 2460 + }, + { + "epoch": 1.9791666666666665, + "grad_norm": 7.012330169691276e-07, + "learning_rate": 1.8907687741169488e-07, + "logits/chosen": 0.52734375, + "logits/rejected": 1.1640625, + "logps/chosen": -200.0, + "logps/rejected": -336.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.4375, + "rewards/margins": 22.0, + "rewards/rejected": -18.5, + "step": 2470 + }, + { + "epoch": 1.9871794871794872, + "grad_norm": 0.00015207925609359054, + "learning_rate": 1.8759275749480557e-07, + "logits/chosen": 0.921875, + "logits/rejected": 1.421875, + "logps/chosen": -203.0, + "logps/rejected": -328.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.296875, + "rewards/margins": 22.5, + "rewards/rejected": -19.25, + "step": 2480 + }, + { + "epoch": 1.9951923076923077, + "grad_norm": 1.0755143190145868e-06, + "learning_rate": 1.8610863757791628e-07, + "logits/chosen": 0.84375, + "logits/rejected": 1.3515625, + "logps/chosen": -168.0, + "logps/rejected": -332.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.75, + "rewards/margins": 23.75, + "rewards/rejected": -20.0, + "step": 2490 + }, + { + "epoch": 2.0, + "eval_logits/chosen": 0.84375, + "eval_logits/rejected": 1.3984375, + "eval_logps/chosen": -169.0, + "eval_logps/rejected": -334.0, + "eval_loss": 1.841681842051912e-05, + "eval_rewards/accuracies": 1.0, + "eval_rewards/chosen": 3.4375, + "eval_rewards/margins": 22.5, + "eval_rewards/rejected": -19.125, + "eval_runtime": 25.8897, + "eval_samples_per_second": 7.686, + "eval_steps_per_second": 0.966, + "step": 2496 + }, + { + "epoch": 2.003205128205128, + "grad_norm": 4.521723680059775e-05, + "learning_rate": 1.84624517661027e-07, + "logits/chosen": 0.8828125, + "logits/rejected": 1.203125, + "logps/chosen": -166.0, + "logps/rejected": -320.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.625, + "rewards/margins": 22.875, + "rewards/rejected": -19.25, + "step": 2500 + }, + { + "epoch": 2.011217948717949, + "grad_norm": 3.030267966817534e-06, + "learning_rate": 1.8314039774413774e-07, + "logits/chosen": 0.640625, + "logits/rejected": 1.1171875, + "logps/chosen": -195.0, + "logps/rejected": -332.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.28125, + "rewards/margins": 23.125, + "rewards/rejected": -19.875, + "step": 2510 + }, + { + "epoch": 2.019230769230769, + "grad_norm": 6.205803823998353e-07, + "learning_rate": 1.8165627782724842e-07, + "logits/chosen": 0.796875, + "logits/rejected": 1.1328125, + "logps/chosen": -141.0, + "logps/rejected": -340.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.3125, + "rewards/margins": 23.875, + "rewards/rejected": -20.5, + "step": 2520 + }, + { + "epoch": 2.02724358974359, + "grad_norm": 1.5474883916264918e-05, + "learning_rate": 1.8017215791035914e-07, + "logits/chosen": 0.87109375, + "logits/rejected": 1.375, + "logps/chosen": -172.0, + "logps/rejected": -338.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.21875, + "rewards/margins": 23.25, + "rewards/rejected": -20.125, + "step": 2530 + }, + { + "epoch": 2.03525641025641, + "grad_norm": 3.136476138411595e-06, + "learning_rate": 1.7868803799346988e-07, + "logits/chosen": 0.9140625, + "logits/rejected": 1.2578125, + "logps/chosen": -159.0, + "logps/rejected": -326.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.890625, + "rewards/margins": 23.0, + "rewards/rejected": -19.125, + "step": 2540 + }, + { + "epoch": 2.043269230769231, + "grad_norm": 2.0185773664202382e-07, + "learning_rate": 1.772039180765806e-07, + "logits/chosen": 0.8203125, + "logits/rejected": 1.46875, + "logps/chosen": -157.0, + "logps/rejected": -316.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.96875, + "rewards/margins": 22.0, + "rewards/rejected": -18.0, + "step": 2550 + }, + { + "epoch": 2.051282051282051, + "grad_norm": 6.762579805129387e-06, + "learning_rate": 1.757197981596913e-07, + "logits/chosen": 0.6953125, + "logits/rejected": 1.2578125, + "logps/chosen": -175.0, + "logps/rejected": -320.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.828125, + "rewards/margins": 22.375, + "rewards/rejected": -18.5, + "step": 2560 + }, + { + "epoch": 2.059294871794872, + "grad_norm": 1.272394096308946e-06, + "learning_rate": 1.74235678242802e-07, + "logits/chosen": 0.9921875, + "logits/rejected": 1.2734375, + "logps/chosen": -168.0, + "logps/rejected": -338.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.6875, + "rewards/margins": 23.125, + "rewards/rejected": -19.5, + "step": 2570 + }, + { + "epoch": 2.0673076923076925, + "grad_norm": 0.00022224801984980412, + "learning_rate": 1.7275155832591273e-07, + "logits/chosen": 0.86328125, + "logits/rejected": 1.2734375, + "logps/chosen": -124.0, + "logps/rejected": -326.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.484375, + "rewards/margins": 23.5, + "rewards/rejected": -20.0, + "step": 2580 + }, + { + "epoch": 2.0753205128205128, + "grad_norm": 5.872515304133134e-06, + "learning_rate": 1.7126743840902344e-07, + "logits/chosen": 0.9140625, + "logits/rejected": 1.15625, + "logps/chosen": -160.0, + "logps/rejected": -318.0, + "loss": 0.0001, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.734375, + "rewards/margins": 22.5, + "rewards/rejected": -18.875, + "step": 2590 + }, + { + "epoch": 2.0833333333333335, + "grad_norm": 0.0015141429378734963, + "learning_rate": 1.6978331849213416e-07, + "logits/chosen": 0.7578125, + "logits/rejected": 1.1484375, + "logps/chosen": -183.0, + "logps/rejected": -336.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.328125, + "rewards/margins": 23.125, + "rewards/rejected": -19.875, + "step": 2600 + }, + { + "epoch": 2.0913461538461537, + "grad_norm": 2.991210518482803e-06, + "learning_rate": 1.6829919857524487e-07, + "logits/chosen": 0.81640625, + "logits/rejected": 0.8671875, + "logps/chosen": -140.0, + "logps/rejected": -342.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 2.96875, + "rewards/margins": 23.5, + "rewards/rejected": -20.5, + "step": 2610 + }, + { + "epoch": 2.0993589743589745, + "grad_norm": 5.742496354978091e-06, + "learning_rate": 1.6681507865835558e-07, + "logits/chosen": 0.6953125, + "logits/rejected": 1.3203125, + "logps/chosen": -186.0, + "logps/rejected": -338.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.265625, + "rewards/margins": 23.125, + "rewards/rejected": -19.875, + "step": 2620 + }, + { + "epoch": 2.1073717948717947, + "grad_norm": 3.757183821571958e-05, + "learning_rate": 1.653309587414663e-07, + "logits/chosen": 0.98046875, + "logits/rejected": 1.3046875, + "logps/chosen": -150.0, + "logps/rejected": -316.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.46875, + "rewards/margins": 21.875, + "rewards/rejected": -18.375, + "step": 2630 + }, + { + "epoch": 2.1153846153846154, + "grad_norm": 1.1309302661788921e-05, + "learning_rate": 1.63846838824577e-07, + "logits/chosen": 0.796875, + "logits/rejected": 1.21875, + "logps/chosen": -195.0, + "logps/rejected": -330.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.015625, + "rewards/margins": 22.125, + "rewards/rejected": -19.125, + "step": 2640 + }, + { + "epoch": 2.123397435897436, + "grad_norm": 0.00011609396836253147, + "learning_rate": 1.6236271890768775e-07, + "logits/chosen": 0.8828125, + "logits/rejected": 1.578125, + "logps/chosen": -143.0, + "logps/rejected": -334.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.53125, + "rewards/margins": 23.125, + "rewards/rejected": -19.625, + "step": 2650 + }, + { + "epoch": 2.1314102564102564, + "grad_norm": 2.37606345324713e-07, + "learning_rate": 1.6087859899079844e-07, + "logits/chosen": 0.703125, + "logits/rejected": 1.40625, + "logps/chosen": -175.0, + "logps/rejected": -334.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.71875, + "rewards/margins": 24.0, + "rewards/rejected": -20.25, + "step": 2660 + }, + { + "epoch": 2.139423076923077, + "grad_norm": 1.7790769529114564e-05, + "learning_rate": 1.5939447907390915e-07, + "logits/chosen": 0.59765625, + "logits/rejected": 1.21875, + "logps/chosen": -207.0, + "logps/rejected": -352.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.453125, + "rewards/margins": 23.625, + "rewards/rejected": -20.25, + "step": 2670 + }, + { + "epoch": 2.1474358974358974, + "grad_norm": 1.2324246884581975e-07, + "learning_rate": 1.579103591570199e-07, + "logits/chosen": 0.9296875, + "logits/rejected": 1.3828125, + "logps/chosen": -185.0, + "logps/rejected": -334.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.34375, + "rewards/margins": 23.125, + "rewards/rejected": -19.75, + "step": 2680 + }, + { + "epoch": 2.155448717948718, + "grad_norm": 8.415390201667563e-05, + "learning_rate": 1.564262392401306e-07, + "logits/chosen": 0.828125, + "logits/rejected": 1.421875, + "logps/chosen": -173.0, + "logps/rejected": -338.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.5625, + "rewards/margins": 24.125, + "rewards/rejected": -20.5, + "step": 2690 + }, + { + "epoch": 2.1634615384615383, + "grad_norm": 0.0011339204404605804, + "learning_rate": 1.549421193232413e-07, + "logits/chosen": 0.82421875, + "logits/rejected": 1.15625, + "logps/chosen": -168.0, + "logps/rejected": -342.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.578125, + "rewards/margins": 23.875, + "rewards/rejected": -20.25, + "step": 2700 + }, + { + "epoch": 2.171474358974359, + "grad_norm": 6.851096339481569e-06, + "learning_rate": 1.5345799940635203e-07, + "logits/chosen": 0.53125, + "logits/rejected": 1.2734375, + "logps/chosen": -197.0, + "logps/rejected": -348.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.328125, + "rewards/margins": 24.25, + "rewards/rejected": -20.875, + "step": 2710 + }, + { + "epoch": 2.1794871794871793, + "grad_norm": 0.0003726374461810356, + "learning_rate": 1.5197387948946275e-07, + "logits/chosen": 1.03125, + "logits/rejected": 1.453125, + "logps/chosen": -169.0, + "logps/rejected": -320.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.8125, + "rewards/margins": 22.5, + "rewards/rejected": -18.75, + "step": 2720 + }, + { + "epoch": 2.1875, + "grad_norm": 2.8940339945841385e-05, + "learning_rate": 1.5048975957257346e-07, + "logits/chosen": 0.78125, + "logits/rejected": 1.1171875, + "logps/chosen": -168.0, + "logps/rejected": -340.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 2.96875, + "rewards/margins": 23.125, + "rewards/rejected": -20.125, + "step": 2730 + }, + { + "epoch": 2.1955128205128207, + "grad_norm": 0.002859780208342844, + "learning_rate": 1.4900563965568417e-07, + "logits/chosen": 0.8671875, + "logits/rejected": 1.4140625, + "logps/chosen": -171.0, + "logps/rejected": -312.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.453125, + "rewards/margins": 21.0, + "rewards/rejected": -17.625, + "step": 2740 + }, + { + "epoch": 2.203525641025641, + "grad_norm": 8.642451938502949e-05, + "learning_rate": 1.4752151973879489e-07, + "logits/chosen": 0.92578125, + "logits/rejected": 1.0703125, + "logps/chosen": -152.0, + "logps/rejected": -322.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.625, + "rewards/margins": 23.125, + "rewards/rejected": -19.5, + "step": 2750 + }, + { + "epoch": 2.2115384615384617, + "grad_norm": 0.004973797903281045, + "learning_rate": 1.460373998219056e-07, + "logits/chosen": 1.171875, + "logits/rejected": 1.5859375, + "logps/chosen": -154.0, + "logps/rejected": -312.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.296875, + "rewards/margins": 22.625, + "rewards/rejected": -19.25, + "step": 2760 + }, + { + "epoch": 2.219551282051282, + "grad_norm": 0.0007041585520470051, + "learning_rate": 1.4455327990501631e-07, + "logits/chosen": 0.796875, + "logits/rejected": 1.015625, + "logps/chosen": -140.0, + "logps/rejected": -338.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.421875, + "rewards/margins": 24.75, + "rewards/rejected": -21.25, + "step": 2770 + }, + { + "epoch": 2.2275641025641026, + "grad_norm": 0.00045000223905260884, + "learning_rate": 1.4306915998812705e-07, + "logits/chosen": 1.0625, + "logits/rejected": 1.4375, + "logps/chosen": -171.0, + "logps/rejected": -336.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.328125, + "rewards/margins": 23.375, + "rewards/rejected": -20.0, + "step": 2780 + }, + { + "epoch": 2.235576923076923, + "grad_norm": 3.7708664164834145e-07, + "learning_rate": 1.4158504007123777e-07, + "logits/chosen": 0.921875, + "logits/rejected": 1.34375, + "logps/chosen": -188.0, + "logps/rejected": -308.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.359375, + "rewards/margins": 22.0, + "rewards/rejected": -18.75, + "step": 2790 + }, + { + "epoch": 2.2435897435897436, + "grad_norm": 2.820076649705414e-06, + "learning_rate": 1.4010092015434845e-07, + "logits/chosen": 0.765625, + "logits/rejected": 1.375, + "logps/chosen": -211.0, + "logps/rejected": -330.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.703125, + "rewards/margins": 22.625, + "rewards/rejected": -19.0, + "step": 2800 + }, + { + "epoch": 2.251602564102564, + "grad_norm": 7.476346576947421e-06, + "learning_rate": 1.3861680023745917e-07, + "logits/chosen": 0.96875, + "logits/rejected": 1.171875, + "logps/chosen": -150.0, + "logps/rejected": -342.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 2.984375, + "rewards/margins": 23.5, + "rewards/rejected": -20.5, + "step": 2810 + }, + { + "epoch": 2.2596153846153846, + "grad_norm": 7.318508636096753e-07, + "learning_rate": 1.371326803205699e-07, + "logits/chosen": 0.94140625, + "logits/rejected": 1.4296875, + "logps/chosen": -164.0, + "logps/rejected": -322.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.46875, + "rewards/margins": 22.625, + "rewards/rejected": -19.125, + "step": 2820 + }, + { + "epoch": 2.2676282051282053, + "grad_norm": 4.8320214702453866e-05, + "learning_rate": 1.3564856040368062e-07, + "logits/chosen": 0.94140625, + "logits/rejected": 1.21875, + "logps/chosen": -160.0, + "logps/rejected": -332.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.5, + "rewards/margins": 22.75, + "rewards/rejected": -19.25, + "step": 2830 + }, + { + "epoch": 2.2756410256410255, + "grad_norm": 5.030183317968072e-06, + "learning_rate": 1.341644404867913e-07, + "logits/chosen": 0.76953125, + "logits/rejected": 1.0703125, + "logps/chosen": -185.0, + "logps/rejected": -328.0, + "loss": 0.0005, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.671875, + "rewards/margins": 23.0, + "rewards/rejected": -19.25, + "step": 2840 + }, + { + "epoch": 2.2836538461538463, + "grad_norm": 0.00014825087080426772, + "learning_rate": 1.3268032056990205e-07, + "logits/chosen": 0.78515625, + "logits/rejected": 1.3203125, + "logps/chosen": -181.0, + "logps/rejected": -338.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.796875, + "rewards/margins": 23.5, + "rewards/rejected": -19.75, + "step": 2850 + }, + { + "epoch": 2.2916666666666665, + "grad_norm": 1.3011868588303958e-06, + "learning_rate": 1.3119620065301276e-07, + "logits/chosen": 1.0625, + "logits/rejected": 1.3359375, + "logps/chosen": -146.0, + "logps/rejected": -332.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.09375, + "rewards/margins": 23.25, + "rewards/rejected": -20.125, + "step": 2860 + }, + { + "epoch": 2.2996794871794872, + "grad_norm": 4.246488981116065e-06, + "learning_rate": 1.2971208073612347e-07, + "logits/chosen": 0.86328125, + "logits/rejected": 1.4921875, + "logps/chosen": -184.0, + "logps/rejected": -332.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.578125, + "rewards/margins": 22.875, + "rewards/rejected": -19.25, + "step": 2870 + }, + { + "epoch": 2.3076923076923075, + "grad_norm": 7.270042186092499e-06, + "learning_rate": 1.282279608192342e-07, + "logits/chosen": 0.66015625, + "logits/rejected": 1.390625, + "logps/chosen": -144.0, + "logps/rejected": -326.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.203125, + "rewards/margins": 23.75, + "rewards/rejected": -20.5, + "step": 2880 + }, + { + "epoch": 2.315705128205128, + "grad_norm": 1.4457369601091267e-08, + "learning_rate": 1.267438409023449e-07, + "logits/chosen": 1.0703125, + "logits/rejected": 1.296875, + "logps/chosen": -122.5, + "logps/rejected": -336.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.015625, + "rewards/margins": 23.375, + "rewards/rejected": -20.25, + "step": 2890 + }, + { + "epoch": 2.323717948717949, + "grad_norm": 1.2540641729400349e-06, + "learning_rate": 1.2525972098545562e-07, + "logits/chosen": 0.98828125, + "logits/rejected": 1.1484375, + "logps/chosen": -187.0, + "logps/rejected": -330.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.640625, + "rewards/margins": 22.5, + "rewards/rejected": -18.875, + "step": 2900 + }, + { + "epoch": 2.331730769230769, + "grad_norm": 1.0058948099975264e-05, + "learning_rate": 1.2377560106856633e-07, + "logits/chosen": 0.9140625, + "logits/rejected": 1.296875, + "logps/chosen": -146.0, + "logps/rejected": -342.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.234375, + "rewards/margins": 23.75, + "rewards/rejected": -20.625, + "step": 2910 + }, + { + "epoch": 2.33974358974359, + "grad_norm": 3.2768050524307317e-06, + "learning_rate": 1.2229148115167704e-07, + "logits/chosen": 0.87109375, + "logits/rejected": 1.0, + "logps/chosen": -145.0, + "logps/rejected": -336.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.3125, + "rewards/margins": 23.75, + "rewards/rejected": -20.5, + "step": 2920 + }, + { + "epoch": 2.34775641025641, + "grad_norm": 8.279411059572999e-06, + "learning_rate": 1.2080736123478776e-07, + "logits/chosen": 0.71484375, + "logits/rejected": 1.296875, + "logps/chosen": -183.0, + "logps/rejected": -338.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 2.734375, + "rewards/margins": 22.625, + "rewards/rejected": -19.875, + "step": 2930 + }, + { + "epoch": 2.355769230769231, + "grad_norm": 5.097273243887312e-06, + "learning_rate": 1.1932324131789847e-07, + "logits/chosen": 0.9375, + "logits/rejected": 1.4453125, + "logps/chosen": -161.0, + "logps/rejected": -360.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.15625, + "rewards/margins": 24.75, + "rewards/rejected": -21.625, + "step": 2940 + }, + { + "epoch": 2.363782051282051, + "grad_norm": 0.001364815117768273, + "learning_rate": 1.178391214010092e-07, + "logits/chosen": 0.82421875, + "logits/rejected": 1.0546875, + "logps/chosen": -196.0, + "logps/rejected": -340.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.5, + "rewards/margins": 22.5, + "rewards/rejected": -18.875, + "step": 2950 + }, + { + "epoch": 2.371794871794872, + "grad_norm": 0.00013766237352927737, + "learning_rate": 1.1635500148411991e-07, + "logits/chosen": 0.828125, + "logits/rejected": 1.53125, + "logps/chosen": -160.0, + "logps/rejected": -324.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.46875, + "rewards/margins": 22.875, + "rewards/rejected": -19.5, + "step": 2960 + }, + { + "epoch": 2.3798076923076925, + "grad_norm": 7.072038044334629e-06, + "learning_rate": 1.1487088156723062e-07, + "logits/chosen": 0.97265625, + "logits/rejected": 1.4609375, + "logps/chosen": -148.0, + "logps/rejected": -318.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.1875, + "rewards/margins": 22.75, + "rewards/rejected": -19.625, + "step": 2970 + }, + { + "epoch": 2.3878205128205128, + "grad_norm": 1.434535069439472e-06, + "learning_rate": 1.1338676165034135e-07, + "logits/chosen": 0.87890625, + "logits/rejected": 0.93359375, + "logps/chosen": -147.0, + "logps/rejected": -342.0, + "loss": 0.0002, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.203125, + "rewards/margins": 22.75, + "rewards/rejected": -19.5, + "step": 2980 + }, + { + "epoch": 2.3958333333333335, + "grad_norm": 0.0003150427679969826, + "learning_rate": 1.1190264173345205e-07, + "logits/chosen": 0.78125, + "logits/rejected": 1.0703125, + "logps/chosen": -185.0, + "logps/rejected": -330.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.375, + "rewards/margins": 22.875, + "rewards/rejected": -19.5, + "step": 2990 + }, + { + "epoch": 2.4038461538461537, + "grad_norm": 7.914823589207877e-05, + "learning_rate": 1.1041852181656278e-07, + "logits/chosen": 0.9296875, + "logits/rejected": 1.421875, + "logps/chosen": -151.0, + "logps/rejected": -342.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.703125, + "rewards/margins": 23.375, + "rewards/rejected": -19.75, + "step": 3000 + }, + { + "epoch": 2.4118589743589745, + "grad_norm": 0.00026905024279143275, + "learning_rate": 1.0893440189967348e-07, + "logits/chosen": 0.76171875, + "logits/rejected": 1.34375, + "logps/chosen": -161.0, + "logps/rejected": -354.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.40625, + "rewards/margins": 24.5, + "rewards/rejected": -21.125, + "step": 3010 + }, + { + "epoch": 2.4198717948717947, + "grad_norm": 2.5621741265848455e-06, + "learning_rate": 1.074502819827842e-07, + "logits/chosen": 0.83203125, + "logits/rejected": 1.3515625, + "logps/chosen": -155.0, + "logps/rejected": -352.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.46875, + "rewards/margins": 23.25, + "rewards/rejected": -19.875, + "step": 3020 + }, + { + "epoch": 2.4278846153846154, + "grad_norm": 0.0006476017033990356, + "learning_rate": 1.0596616206589493e-07, + "logits/chosen": 0.83203125, + "logits/rejected": 1.3046875, + "logps/chosen": -176.0, + "logps/rejected": -326.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.03125, + "rewards/margins": 22.0, + "rewards/rejected": -19.0, + "step": 3030 + }, + { + "epoch": 2.435897435897436, + "grad_norm": 1.1941928602219868e-05, + "learning_rate": 1.0448204214900563e-07, + "logits/chosen": 1.1171875, + "logits/rejected": 1.125, + "logps/chosen": -167.0, + "logps/rejected": -336.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.21875, + "rewards/margins": 22.75, + "rewards/rejected": -19.5, + "step": 3040 + }, + { + "epoch": 2.4439102564102564, + "grad_norm": 4.511019063462089e-06, + "learning_rate": 1.0299792223211636e-07, + "logits/chosen": 0.96875, + "logits/rejected": 1.2421875, + "logps/chosen": -173.0, + "logps/rejected": -338.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.421875, + "rewards/margins": 22.75, + "rewards/rejected": -19.375, + "step": 3050 + }, + { + "epoch": 2.451923076923077, + "grad_norm": 1.1909188473574983e-06, + "learning_rate": 1.0151380231522706e-07, + "logits/chosen": 0.71875, + "logits/rejected": 1.1953125, + "logps/chosen": -189.0, + "logps/rejected": -338.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.703125, + "rewards/margins": 23.875, + "rewards/rejected": -20.125, + "step": 3060 + }, + { + "epoch": 2.4599358974358974, + "grad_norm": 0.00016413461567918458, + "learning_rate": 1.0002968239833778e-07, + "logits/chosen": 0.8515625, + "logits/rejected": 1.140625, + "logps/chosen": -176.0, + "logps/rejected": -360.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.046875, + "rewards/margins": 24.0, + "rewards/rejected": -21.0, + "step": 3070 + }, + { + "epoch": 2.467948717948718, + "grad_norm": 0.0004207874699143046, + "learning_rate": 9.854556248144849e-08, + "logits/chosen": 1.0625, + "logits/rejected": 1.0703125, + "logps/chosen": -171.0, + "logps/rejected": -336.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.265625, + "rewards/margins": 23.0, + "rewards/rejected": -19.75, + "step": 3080 + }, + { + "epoch": 2.4759615384615383, + "grad_norm": 0.0013603810220500937, + "learning_rate": 9.706144256455921e-08, + "logits/chosen": 0.859375, + "logits/rejected": 1.4453125, + "logps/chosen": -201.0, + "logps/rejected": -330.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.75, + "rewards/margins": 23.0, + "rewards/rejected": -19.25, + "step": 3090 + }, + { + "epoch": 2.483974358974359, + "grad_norm": 1.8119361476861766e-07, + "learning_rate": 9.557732264766994e-08, + "logits/chosen": 1.125, + "logits/rejected": 1.65625, + "logps/chosen": -181.0, + "logps/rejected": -320.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 2.859375, + "rewards/margins": 21.875, + "rewards/rejected": -19.0, + "step": 3100 + }, + { + "epoch": 2.4919871794871793, + "grad_norm": 1.0849727745118161e-05, + "learning_rate": 9.409320273078064e-08, + "logits/chosen": 0.75, + "logits/rejected": 1.25, + "logps/chosen": -194.0, + "logps/rejected": -346.0, + "loss": 0.0001, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.65625, + "rewards/margins": 25.125, + "rewards/rejected": -21.375, + "step": 3110 + }, + { + "epoch": 2.5, + "grad_norm": 0.00190454125856642, + "learning_rate": 9.260908281389137e-08, + "logits/chosen": 0.79296875, + "logits/rejected": 1.1328125, + "logps/chosen": -174.0, + "logps/rejected": -318.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.671875, + "rewards/margins": 23.0, + "rewards/rejected": -19.25, + "step": 3120 + }, + { + "epoch": 2.5080128205128203, + "grad_norm": 4.817138155607492e-05, + "learning_rate": 9.112496289700207e-08, + "logits/chosen": 1.078125, + "logits/rejected": 1.0546875, + "logps/chosen": -151.0, + "logps/rejected": -336.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.6875, + "rewards/margins": 23.5, + "rewards/rejected": -19.875, + "step": 3130 + }, + { + "epoch": 2.516025641025641, + "grad_norm": 4.1634510560200736e-07, + "learning_rate": 8.964084298011279e-08, + "logits/chosen": 0.89453125, + "logits/rejected": 1.21875, + "logps/chosen": -165.0, + "logps/rejected": -350.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.03125, + "rewards/margins": 23.625, + "rewards/rejected": -20.625, + "step": 3140 + }, + { + "epoch": 2.5240384615384617, + "grad_norm": 0.0030510730836868943, + "learning_rate": 8.815672306322349e-08, + "logits/chosen": 0.83984375, + "logits/rejected": 1.3359375, + "logps/chosen": -150.0, + "logps/rejected": -338.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.09375, + "rewards/margins": 23.5, + "rewards/rejected": -20.375, + "step": 3150 + }, + { + "epoch": 2.532051282051282, + "grad_norm": 4.333397926948427e-05, + "learning_rate": 8.667260314633422e-08, + "logits/chosen": 0.8203125, + "logits/rejected": 1.328125, + "logps/chosen": -144.0, + "logps/rejected": -350.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.65625, + "rewards/margins": 24.625, + "rewards/rejected": -21.0, + "step": 3160 + }, + { + "epoch": 2.5400641025641026, + "grad_norm": 3.816937452899327e-05, + "learning_rate": 8.518848322944495e-08, + "logits/chosen": 0.83203125, + "logits/rejected": 1.1953125, + "logps/chosen": -184.0, + "logps/rejected": -330.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.515625, + "rewards/margins": 23.25, + "rewards/rejected": -19.75, + "step": 3170 + }, + { + "epoch": 2.5480769230769234, + "grad_norm": 4.438174995412161e-06, + "learning_rate": 8.370436331255565e-08, + "logits/chosen": 0.9921875, + "logits/rejected": 1.234375, + "logps/chosen": -137.0, + "logps/rejected": -334.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.546875, + "rewards/margins": 23.75, + "rewards/rejected": -20.25, + "step": 3180 + }, + { + "epoch": 2.5560897435897436, + "grad_norm": 1.2674210936494033e-05, + "learning_rate": 8.222024339566637e-08, + "logits/chosen": 0.9765625, + "logits/rejected": 1.1953125, + "logps/chosen": -189.0, + "logps/rejected": -338.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.828125, + "rewards/margins": 22.625, + "rewards/rejected": -18.75, + "step": 3190 + }, + { + "epoch": 2.564102564102564, + "grad_norm": 2.2470915894489486e-06, + "learning_rate": 8.073612347877707e-08, + "logits/chosen": 0.7578125, + "logits/rejected": 0.94140625, + "logps/chosen": -210.0, + "logps/rejected": -336.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 2.8125, + "rewards/margins": 22.875, + "rewards/rejected": -20.0, + "step": 3200 + }, + { + "epoch": 2.5721153846153846, + "grad_norm": 0.0013392786326408394, + "learning_rate": 7.92520035618878e-08, + "logits/chosen": 0.70703125, + "logits/rejected": 1.0625, + "logps/chosen": -166.0, + "logps/rejected": -330.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.28125, + "rewards/margins": 23.125, + "rewards/rejected": -19.875, + "step": 3210 + }, + { + "epoch": 2.5801282051282053, + "grad_norm": 1.1479055755141908e-06, + "learning_rate": 7.776788364499851e-08, + "logits/chosen": 0.392578125, + "logits/rejected": 1.03125, + "logps/chosen": -176.0, + "logps/rejected": -342.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.3125, + "rewards/margins": 23.75, + "rewards/rejected": -20.5, + "step": 3220 + }, + { + "epoch": 2.5881410256410255, + "grad_norm": 6.57721816711125e-08, + "learning_rate": 7.628376372810923e-08, + "logits/chosen": 0.859375, + "logits/rejected": 0.92578125, + "logps/chosen": -161.0, + "logps/rejected": -348.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.3125, + "rewards/margins": 25.5, + "rewards/rejected": -22.25, + "step": 3230 + }, + { + "epoch": 2.5961538461538463, + "grad_norm": 8.654157902362506e-07, + "learning_rate": 7.479964381121995e-08, + "logits/chosen": 0.84375, + "logits/rejected": 1.34375, + "logps/chosen": -131.0, + "logps/rejected": -296.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.34375, + "rewards/margins": 21.125, + "rewards/rejected": -17.75, + "step": 3240 + }, + { + "epoch": 2.6041666666666665, + "grad_norm": 2.107173176911769e-05, + "learning_rate": 7.331552389433065e-08, + "logits/chosen": 0.92578125, + "logits/rejected": 1.2421875, + "logps/chosen": -182.0, + "logps/rejected": -332.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.75, + "rewards/margins": 22.625, + "rewards/rejected": -18.875, + "step": 3250 + }, + { + "epoch": 2.6121794871794872, + "grad_norm": 3.0995378385019935e-05, + "learning_rate": 7.183140397744138e-08, + "logits/chosen": 0.78515625, + "logits/rejected": 1.1484375, + "logps/chosen": -185.0, + "logps/rejected": -330.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.53125, + "rewards/margins": 23.375, + "rewards/rejected": -19.75, + "step": 3260 + }, + { + "epoch": 2.6201923076923075, + "grad_norm": 7.163491430995317e-06, + "learning_rate": 7.034728406055208e-08, + "logits/chosen": 1.0390625, + "logits/rejected": 1.03125, + "logps/chosen": -163.0, + "logps/rejected": -336.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.5, + "rewards/margins": 23.625, + "rewards/rejected": -20.125, + "step": 3270 + }, + { + "epoch": 2.628205128205128, + "grad_norm": 3.2825173935148675e-06, + "learning_rate": 6.886316414366281e-08, + "logits/chosen": 0.9921875, + "logits/rejected": 1.1875, + "logps/chosen": -167.0, + "logps/rejected": -346.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.578125, + "rewards/margins": 23.875, + "rewards/rejected": -20.375, + "step": 3280 + }, + { + "epoch": 2.636217948717949, + "grad_norm": 3.7018247685930666e-06, + "learning_rate": 6.737904422677352e-08, + "logits/chosen": 0.64453125, + "logits/rejected": 1.125, + "logps/chosen": -154.0, + "logps/rejected": -340.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.28125, + "rewards/margins": 25.0, + "rewards/rejected": -21.625, + "step": 3290 + }, + { + "epoch": 2.644230769230769, + "grad_norm": 1.5235500032868652e-06, + "learning_rate": 6.589492430988424e-08, + "logits/chosen": 0.86328125, + "logits/rejected": 1.171875, + "logps/chosen": -158.0, + "logps/rejected": -326.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 4.0625, + "rewards/margins": 23.0, + "rewards/rejected": -19.0, + "step": 3300 + }, + { + "epoch": 2.65224358974359, + "grad_norm": 2.4028100208495416e-05, + "learning_rate": 6.441080439299495e-08, + "logits/chosen": 0.9453125, + "logits/rejected": 1.203125, + "logps/chosen": -174.0, + "logps/rejected": -330.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.484375, + "rewards/margins": 23.375, + "rewards/rejected": -19.875, + "step": 3310 + }, + { + "epoch": 2.66025641025641, + "grad_norm": 0.0006641311306163846, + "learning_rate": 6.292668447610566e-08, + "logits/chosen": 1.03125, + "logits/rejected": 1.375, + "logps/chosen": -154.0, + "logps/rejected": -348.0, + "loss": 0.0002, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.671875, + "rewards/margins": 24.25, + "rewards/rejected": -20.5, + "step": 3320 + }, + { + "epoch": 2.668269230769231, + "grad_norm": 1.3261677467269389e-05, + "learning_rate": 6.144256455921639e-08, + "logits/chosen": 1.171875, + "logits/rejected": 1.671875, + "logps/chosen": -136.0, + "logps/rejected": -342.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.6875, + "rewards/margins": 24.0, + "rewards/rejected": -20.375, + "step": 3330 + }, + { + "epoch": 2.676282051282051, + "grad_norm": 2.3998995719683365e-07, + "learning_rate": 5.99584446423271e-08, + "logits/chosen": 0.890625, + "logits/rejected": 1.2890625, + "logps/chosen": -212.0, + "logps/rejected": -320.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.40625, + "rewards/margins": 22.875, + "rewards/rejected": -19.5, + "step": 3340 + }, + { + "epoch": 2.684294871794872, + "grad_norm": 1.1770292696948012e-07, + "learning_rate": 5.847432472543781e-08, + "logits/chosen": 0.83984375, + "logits/rejected": 1.6484375, + "logps/chosen": -168.0, + "logps/rejected": -342.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.484375, + "rewards/margins": 24.5, + "rewards/rejected": -21.0, + "step": 3350 + }, + { + "epoch": 2.6923076923076925, + "grad_norm": 2.900580842341117e-07, + "learning_rate": 5.699020480854853e-08, + "logits/chosen": 0.91796875, + "logits/rejected": 1.09375, + "logps/chosen": -142.0, + "logps/rejected": -352.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.53125, + "rewards/margins": 25.625, + "rewards/rejected": -22.125, + "step": 3360 + }, + { + "epoch": 2.7003205128205128, + "grad_norm": 1.4745279648366565e-05, + "learning_rate": 5.550608489165924e-08, + "logits/chosen": 0.84375, + "logits/rejected": 1.09375, + "logps/chosen": -150.0, + "logps/rejected": -324.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.578125, + "rewards/margins": 23.25, + "rewards/rejected": -19.75, + "step": 3370 + }, + { + "epoch": 2.7083333333333335, + "grad_norm": 1.4514272796826e-06, + "learning_rate": 5.4021964974769963e-08, + "logits/chosen": 0.92578125, + "logits/rejected": 1.359375, + "logps/chosen": -163.0, + "logps/rejected": -332.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.375, + "rewards/margins": 23.0, + "rewards/rejected": -19.625, + "step": 3380 + }, + { + "epoch": 2.7163461538461537, + "grad_norm": 0.0005888690086543229, + "learning_rate": 5.253784505788068e-08, + "logits/chosen": 1.03125, + "logits/rejected": 1.0234375, + "logps/chosen": -155.0, + "logps/rejected": -328.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.125, + "rewards/margins": 22.625, + "rewards/rejected": -19.5, + "step": 3390 + }, + { + "epoch": 2.7243589743589745, + "grad_norm": 2.0042622885174553e-05, + "learning_rate": 5.105372514099139e-08, + "logits/chosen": 0.87890625, + "logits/rejected": 1.390625, + "logps/chosen": -144.0, + "logps/rejected": -336.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.6875, + "rewards/margins": 23.5, + "rewards/rejected": -19.875, + "step": 3400 + }, + { + "epoch": 2.7323717948717947, + "grad_norm": 5.635967172397906e-05, + "learning_rate": 4.9569605224102104e-08, + "logits/chosen": 0.99609375, + "logits/rejected": 1.4375, + "logps/chosen": -171.0, + "logps/rejected": -336.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.546875, + "rewards/margins": 24.125, + "rewards/rejected": -20.625, + "step": 3410 + }, + { + "epoch": 2.7403846153846154, + "grad_norm": 6.551150056642263e-06, + "learning_rate": 4.808548530721282e-08, + "logits/chosen": 1.0078125, + "logits/rejected": 1.359375, + "logps/chosen": -172.0, + "logps/rejected": -350.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.609375, + "rewards/margins": 24.5, + "rewards/rejected": -20.875, + "step": 3420 + }, + { + "epoch": 2.748397435897436, + "grad_norm": 2.170737569638303e-05, + "learning_rate": 4.660136539032353e-08, + "logits/chosen": 1.09375, + "logits/rejected": 1.3203125, + "logps/chosen": -170.0, + "logps/rejected": -348.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.40625, + "rewards/margins": 24.75, + "rewards/rejected": -21.375, + "step": 3430 + }, + { + "epoch": 2.7564102564102564, + "grad_norm": 3.662218737713611e-05, + "learning_rate": 4.511724547343425e-08, + "logits/chosen": 0.734375, + "logits/rejected": 1.3671875, + "logps/chosen": -139.0, + "logps/rejected": -334.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.359375, + "rewards/margins": 23.5, + "rewards/rejected": -20.125, + "step": 3440 + }, + { + "epoch": 2.7644230769230766, + "grad_norm": 2.0092823057660767e-07, + "learning_rate": 4.363312555654497e-08, + "logits/chosen": 1.0078125, + "logits/rejected": 1.1640625, + "logps/chosen": -178.0, + "logps/rejected": -334.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.53125, + "rewards/margins": 22.5, + "rewards/rejected": -19.0, + "step": 3450 + }, + { + "epoch": 2.7724358974358974, + "grad_norm": 8.047688007594987e-05, + "learning_rate": 4.2149005639655685e-08, + "logits/chosen": 0.8046875, + "logits/rejected": 1.2578125, + "logps/chosen": -163.0, + "logps/rejected": -354.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.46875, + "rewards/margins": 24.75, + "rewards/rejected": -21.25, + "step": 3460 + }, + { + "epoch": 2.780448717948718, + "grad_norm": 0.0006711480616614469, + "learning_rate": 4.06648857227664e-08, + "logits/chosen": 0.76953125, + "logits/rejected": 1.40625, + "logps/chosen": -167.0, + "logps/rejected": -330.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.421875, + "rewards/margins": 23.125, + "rewards/rejected": -19.75, + "step": 3470 + }, + { + "epoch": 2.7884615384615383, + "grad_norm": 0.0003280497102533483, + "learning_rate": 3.918076580587711e-08, + "logits/chosen": 0.6875, + "logits/rejected": 1.1796875, + "logps/chosen": -174.0, + "logps/rejected": -350.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.265625, + "rewards/margins": 23.125, + "rewards/rejected": -19.875, + "step": 3480 + }, + { + "epoch": 2.796474358974359, + "grad_norm": 6.798795586197906e-05, + "learning_rate": 3.7696645888987825e-08, + "logits/chosen": 0.87109375, + "logits/rejected": 0.99609375, + "logps/chosen": -147.0, + "logps/rejected": -356.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.265625, + "rewards/margins": 24.625, + "rewards/rejected": -21.375, + "step": 3490 + }, + { + "epoch": 2.8044871794871797, + "grad_norm": 4.364786537805849e-06, + "learning_rate": 3.621252597209854e-08, + "logits/chosen": 0.91796875, + "logits/rejected": 1.3984375, + "logps/chosen": -159.0, + "logps/rejected": -338.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.375, + "rewards/margins": 23.625, + "rewards/rejected": -20.25, + "step": 3500 + }, + { + "epoch": 2.8125, + "grad_norm": 1.9387537093442454e-08, + "learning_rate": 3.4728406055209265e-08, + "logits/chosen": 0.75390625, + "logits/rejected": 1.1171875, + "logps/chosen": -185.0, + "logps/rejected": -348.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.578125, + "rewards/margins": 25.125, + "rewards/rejected": -21.5, + "step": 3510 + }, + { + "epoch": 2.8205128205128203, + "grad_norm": 1.3880341861407807e-05, + "learning_rate": 3.324428613831998e-08, + "logits/chosen": 1.015625, + "logits/rejected": 1.296875, + "logps/chosen": -143.0, + "logps/rejected": -358.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.53125, + "rewards/margins": 24.75, + "rewards/rejected": -21.25, + "step": 3520 + }, + { + "epoch": 2.828525641025641, + "grad_norm": 1.8304365557650966e-05, + "learning_rate": 3.176016622143069e-08, + "logits/chosen": 1.046875, + "logits/rejected": 1.203125, + "logps/chosen": -167.0, + "logps/rejected": -340.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.78125, + "rewards/margins": 24.25, + "rewards/rejected": -20.5, + "step": 3530 + }, + { + "epoch": 2.8365384615384617, + "grad_norm": 5.07839088524003e-07, + "learning_rate": 3.0276046304541406e-08, + "logits/chosen": 0.94140625, + "logits/rejected": 1.203125, + "logps/chosen": -170.0, + "logps/rejected": -344.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.140625, + "rewards/margins": 23.75, + "rewards/rejected": -20.625, + "step": 3540 + }, + { + "epoch": 2.844551282051282, + "grad_norm": 6.497502370566641e-06, + "learning_rate": 2.879192638765212e-08, + "logits/chosen": 0.7578125, + "logits/rejected": 1.09375, + "logps/chosen": -180.0, + "logps/rejected": -320.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.796875, + "rewards/margins": 22.125, + "rewards/rejected": -18.375, + "step": 3550 + }, + { + "epoch": 2.8525641025641026, + "grad_norm": 2.0919183207981193e-05, + "learning_rate": 2.7307806470762836e-08, + "logits/chosen": 0.93359375, + "logits/rejected": 1.4609375, + "logps/chosen": -171.0, + "logps/rejected": -334.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.65625, + "rewards/margins": 23.25, + "rewards/rejected": -19.5, + "step": 3560 + }, + { + "epoch": 2.8605769230769234, + "grad_norm": 5.495725650489294e-08, + "learning_rate": 2.5823686553873553e-08, + "logits/chosen": 0.8359375, + "logits/rejected": 1.140625, + "logps/chosen": -166.0, + "logps/rejected": -366.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 2.984375, + "rewards/margins": 25.125, + "rewards/rejected": -22.125, + "step": 3570 + }, + { + "epoch": 2.8685897435897436, + "grad_norm": 0.00027587029209878266, + "learning_rate": 2.4339566636984267e-08, + "logits/chosen": 0.92578125, + "logits/rejected": 1.46875, + "logps/chosen": -178.0, + "logps/rejected": -314.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.78125, + "rewards/margins": 21.75, + "rewards/rejected": -18.0, + "step": 3580 + }, + { + "epoch": 2.876602564102564, + "grad_norm": 3.145906163646421e-06, + "learning_rate": 2.2855446720094983e-08, + "logits/chosen": 0.93359375, + "logits/rejected": 1.2109375, + "logps/chosen": -168.0, + "logps/rejected": -356.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.390625, + "rewards/margins": 24.875, + "rewards/rejected": -21.5, + "step": 3590 + }, + { + "epoch": 2.8846153846153846, + "grad_norm": 0.002758379167699333, + "learning_rate": 2.1371326803205697e-08, + "logits/chosen": 0.92578125, + "logits/rejected": 1.46875, + "logps/chosen": -172.0, + "logps/rejected": -328.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.875, + "rewards/margins": 23.25, + "rewards/rejected": -19.375, + "step": 3600 + }, + { + "epoch": 2.8926282051282053, + "grad_norm": 3.8764981035982807e-07, + "learning_rate": 1.9887206886316414e-08, + "logits/chosen": 0.8046875, + "logits/rejected": 1.4375, + "logps/chosen": -166.0, + "logps/rejected": -364.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 2.875, + "rewards/margins": 25.25, + "rewards/rejected": -22.5, + "step": 3610 + }, + { + "epoch": 2.9006410256410255, + "grad_norm": 2.459298995862145e-05, + "learning_rate": 1.840308696942713e-08, + "logits/chosen": 0.9296875, + "logits/rejected": 1.2109375, + "logps/chosen": -170.0, + "logps/rejected": -346.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.390625, + "rewards/margins": 24.0, + "rewards/rejected": -20.5, + "step": 3620 + }, + { + "epoch": 2.9086538461538463, + "grad_norm": 0.0033663621977632068, + "learning_rate": 1.6918967052537844e-08, + "logits/chosen": 0.90234375, + "logits/rejected": 1.1015625, + "logps/chosen": -158.0, + "logps/rejected": -342.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.640625, + "rewards/margins": 24.625, + "rewards/rejected": -21.0, + "step": 3630 + }, + { + "epoch": 2.9166666666666665, + "grad_norm": 2.0820327560442435e-05, + "learning_rate": 1.5434847135648558e-08, + "logits/chosen": 1.0, + "logits/rejected": 1.4140625, + "logps/chosen": -125.0, + "logps/rejected": -336.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.078125, + "rewards/margins": 23.625, + "rewards/rejected": -20.625, + "step": 3640 + }, + { + "epoch": 2.9246794871794872, + "grad_norm": 9.867503905955064e-07, + "learning_rate": 1.3950727218759274e-08, + "logits/chosen": 0.8359375, + "logits/rejected": 1.2421875, + "logps/chosen": -178.0, + "logps/rejected": -354.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.328125, + "rewards/margins": 24.75, + "rewards/rejected": -21.5, + "step": 3650 + }, + { + "epoch": 2.9326923076923075, + "grad_norm": 3.192135749286814e-05, + "learning_rate": 1.2466607301869991e-08, + "logits/chosen": 0.88671875, + "logits/rejected": 1.3671875, + "logps/chosen": -144.0, + "logps/rejected": -348.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.578125, + "rewards/margins": 24.875, + "rewards/rejected": -21.25, + "step": 3660 + }, + { + "epoch": 2.940705128205128, + "grad_norm": 2.715406001289612e-08, + "learning_rate": 1.0982487384980706e-08, + "logits/chosen": 1.046875, + "logits/rejected": 1.203125, + "logps/chosen": -157.0, + "logps/rejected": -344.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.4375, + "rewards/margins": 24.125, + "rewards/rejected": -20.75, + "step": 3670 + }, + { + "epoch": 2.948717948717949, + "grad_norm": 2.672696571203794e-07, + "learning_rate": 9.498367468091422e-09, + "logits/chosen": 0.78515625, + "logits/rejected": 1.1484375, + "logps/chosen": -177.0, + "logps/rejected": -348.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.890625, + "rewards/margins": 24.875, + "rewards/rejected": -21.0, + "step": 3680 + }, + { + "epoch": 2.956730769230769, + "grad_norm": 7.489910666014967e-05, + "learning_rate": 8.014247551202137e-09, + "logits/chosen": 0.54296875, + "logits/rejected": 1.1953125, + "logps/chosen": -206.0, + "logps/rejected": -338.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.75, + "rewards/margins": 23.625, + "rewards/rejected": -19.875, + "step": 3690 + }, + { + "epoch": 2.96474358974359, + "grad_norm": 1.3844115894190923e-05, + "learning_rate": 6.530127634312852e-09, + "logits/chosen": 0.85546875, + "logits/rejected": 1.0703125, + "logps/chosen": -172.0, + "logps/rejected": -328.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.671875, + "rewards/margins": 22.5, + "rewards/rejected": -18.875, + "step": 3700 + }, + { + "epoch": 2.97275641025641, + "grad_norm": 2.3143826423249172e-05, + "learning_rate": 5.046007717423567e-09, + "logits/chosen": 1.0390625, + "logits/rejected": 1.0390625, + "logps/chosen": -152.0, + "logps/rejected": -332.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.46875, + "rewards/margins": 24.75, + "rewards/rejected": -21.25, + "step": 3710 + }, + { + "epoch": 2.980769230769231, + "grad_norm": 9.095521401560934e-05, + "learning_rate": 3.561887800534283e-09, + "logits/chosen": 0.90625, + "logits/rejected": 0.86328125, + "logps/chosen": -127.0, + "logps/rejected": -326.0, + "loss": 0.0002, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.171875, + "rewards/margins": 23.75, + "rewards/rejected": -20.5, + "step": 3720 + }, + { + "epoch": 2.988782051282051, + "grad_norm": 2.7589599160714447e-07, + "learning_rate": 2.0777678836449987e-09, + "logits/chosen": 0.73046875, + "logits/rejected": 1.078125, + "logps/chosen": -193.0, + "logps/rejected": -336.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.25, + "rewards/margins": 24.5, + "rewards/rejected": -21.125, + "step": 3730 + }, + { + "epoch": 2.996794871794872, + "grad_norm": 5.964151597883472e-05, + "learning_rate": 5.936479667557139e-10, + "logits/chosen": 0.83203125, + "logits/rejected": 1.3046875, + "logps/chosen": -153.0, + "logps/rejected": -336.0, + "loss": 0.0, + "rewards/accuracies": 1.0, + "rewards/chosen": 3.609375, + "rewards/margins": 23.125, + "rewards/rejected": -19.5, + "step": 3740 + }, + { + "epoch": 3.0, + "eval_logits/chosen": 0.88671875, + "eval_logits/rejected": 1.4453125, + "eval_logps/chosen": -170.0, + "eval_logps/rejected": -344.0, + "eval_loss": 2.2770025225327117e-06, + "eval_rewards/accuracies": 1.0, + "eval_rewards/chosen": 3.296875, + "eval_rewards/margins": 23.5, + "eval_rewards/rejected": -20.25, + "eval_runtime": 32.9212, + "eval_samples_per_second": 6.045, + "eval_steps_per_second": 0.759, + "step": 3744 + }, + { + "epoch": 3.0, + "step": 3744, + "total_flos": 0.0, + "train_loss": 0.022058238131815282, + "train_runtime": 10847.6777, + "train_samples_per_second": 2.759, + "train_steps_per_second": 0.345 + } + ], + "logging_steps": 10, + "max_steps": 3744, + "num_input_tokens_seen": 0, + "num_train_epochs": 3, + "save_steps": 500, + "stateful_callbacks": { + "TrainerControl": { + "args": { + "should_epoch_stop": false, + "should_evaluate": false, + "should_log": false, + "should_save": true, + "should_training_stop": true + }, + "attributes": {} + } + }, + "total_flos": 0.0, + "train_batch_size": 4, + "trial_name": null, + "trial_params": null +}