|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9968652037617555, |
|
"eval_steps": 500, |
|
"global_step": 159, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 3.125e-08, |
|
"logits/chosen": -2.6679039001464844, |
|
"logits/rejected": -2.599691390991211, |
|
"logps/chosen": -164.941650390625, |
|
"logps/pi_response": -79.30451965332031, |
|
"logps/ref_response": -79.30451965332031, |
|
"logps/rejected": -111.52906799316406, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 3.1249999999999997e-07, |
|
"logits/chosen": -2.6902599334716797, |
|
"logits/rejected": -2.705916404724121, |
|
"logps/chosen": -236.0708770751953, |
|
"logps/pi_response": -118.64096069335938, |
|
"logps/ref_response": -118.30906677246094, |
|
"logps/rejected": -228.03765869140625, |
|
"loss": 0.6928, |
|
"rewards/accuracies": 0.4444444477558136, |
|
"rewards/chosen": -0.0036212028935551643, |
|
"rewards/margins": -0.0003877294948324561, |
|
"rewards/rejected": -0.0032334737479686737, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.990353313429303e-07, |
|
"logits/chosen": -2.7183661460876465, |
|
"logits/rejected": -2.6849148273468018, |
|
"logps/chosen": -265.54229736328125, |
|
"logps/pi_response": -142.77395629882812, |
|
"logps/ref_response": -130.1978302001953, |
|
"logps/rejected": -228.3546905517578, |
|
"loss": 0.6905, |
|
"rewards/accuracies": 0.581250011920929, |
|
"rewards/chosen": -0.12290030717849731, |
|
"rewards/margins": 0.00855380017310381, |
|
"rewards/rejected": -0.13145411014556885, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 4.882681251368548e-07, |
|
"logits/chosen": -2.555771827697754, |
|
"logits/rejected": -2.520658254623413, |
|
"logps/chosen": -249.33755493164062, |
|
"logps/pi_response": -142.53103637695312, |
|
"logps/ref_response": -112.13728332519531, |
|
"logps/rejected": -248.5088653564453, |
|
"loss": 0.6779, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": -0.22909781336784363, |
|
"rewards/margins": 0.043774768710136414, |
|
"rewards/rejected": -0.2728726267814636, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 4.6604720940421207e-07, |
|
"logits/chosen": -2.379845142364502, |
|
"logits/rejected": -2.3809399604797363, |
|
"logps/chosen": -273.18988037109375, |
|
"logps/pi_response": -181.37307739257812, |
|
"logps/ref_response": -109.49589538574219, |
|
"logps/rejected": -302.9486999511719, |
|
"loss": 0.6705, |
|
"rewards/accuracies": 0.581250011920929, |
|
"rewards/chosen": -0.6705134510993958, |
|
"rewards/margins": 0.07195498049259186, |
|
"rewards/rejected": -0.7424684762954712, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 4.3344075855595097e-07, |
|
"logits/chosen": -2.2819459438323975, |
|
"logits/rejected": -2.274500608444214, |
|
"logps/chosen": -335.9717102050781, |
|
"logps/pi_response": -223.96981811523438, |
|
"logps/ref_response": -117.62721252441406, |
|
"logps/rejected": -327.4072570800781, |
|
"loss": 0.6741, |
|
"rewards/accuracies": 0.53125, |
|
"rewards/chosen": -1.0777838230133057, |
|
"rewards/margins": 0.031330838799476624, |
|
"rewards/rejected": -1.1091147661209106, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 3.920161866827889e-07, |
|
"logits/chosen": -2.2090117931365967, |
|
"logits/rejected": -2.1789093017578125, |
|
"logps/chosen": -308.4948425292969, |
|
"logps/pi_response": -217.4617156982422, |
|
"logps/ref_response": -107.9961929321289, |
|
"logps/rejected": -323.3466796875, |
|
"loss": 0.662, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": -0.8825389742851257, |
|
"rewards/margins": 0.26200079917907715, |
|
"rewards/rejected": -1.1445398330688477, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 3.4376480090239047e-07, |
|
"logits/chosen": -2.2137115001678467, |
|
"logits/rejected": -2.212620496749878, |
|
"logps/chosen": -287.8365478515625, |
|
"logps/pi_response": -201.22882080078125, |
|
"logps/ref_response": -113.01625061035156, |
|
"logps/rejected": -314.63323974609375, |
|
"loss": 0.6625, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -0.7743679285049438, |
|
"rewards/margins": 0.14890065789222717, |
|
"rewards/rejected": -0.9232686161994934, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 2.910060778827554e-07, |
|
"logits/chosen": -2.2056334018707275, |
|
"logits/rejected": -2.197784900665283, |
|
"logps/chosen": -296.95391845703125, |
|
"logps/pi_response": -206.1053924560547, |
|
"logps/ref_response": -101.80767822265625, |
|
"logps/rejected": -338.78887939453125, |
|
"loss": 0.6727, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": -0.9507198333740234, |
|
"rewards/margins": 0.19975939393043518, |
|
"rewards/rejected": -1.1504793167114258, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 2.3627616503391812e-07, |
|
"logits/chosen": -2.2584753036499023, |
|
"logits/rejected": -2.251711130142212, |
|
"logps/chosen": -307.7080383300781, |
|
"logps/pi_response": -200.96620178222656, |
|
"logps/ref_response": -100.00450897216797, |
|
"logps/rejected": -278.3091735839844, |
|
"loss": 0.666, |
|
"rewards/accuracies": 0.581250011920929, |
|
"rewards/chosen": -0.8939422369003296, |
|
"rewards/margins": 0.15028740465641022, |
|
"rewards/rejected": -1.0442297458648682, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 1.8220596619089573e-07, |
|
"logits/chosen": -2.1924281120300293, |
|
"logits/rejected": -2.1820199489593506, |
|
"logps/chosen": -307.92962646484375, |
|
"logps/pi_response": -219.1331024169922, |
|
"logps/ref_response": -112.0215072631836, |
|
"logps/rejected": -334.48773193359375, |
|
"loss": 0.6639, |
|
"rewards/accuracies": 0.612500011920929, |
|
"rewards/chosen": -0.9324432611465454, |
|
"rewards/margins": 0.1684987097978592, |
|
"rewards/rejected": -1.1009418964385986, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 1.3139467229135998e-07, |
|
"logits/chosen": -2.2497620582580566, |
|
"logits/rejected": -2.256614923477173, |
|
"logps/chosen": -307.80267333984375, |
|
"logps/pi_response": -205.81588745117188, |
|
"logps/ref_response": -100.42203521728516, |
|
"logps/rejected": -310.6170349121094, |
|
"loss": 0.6527, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": -0.9657202959060669, |
|
"rewards/margins": 0.10005203634500504, |
|
"rewards/rejected": -1.065772294998169, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 8.628481651367875e-08, |
|
"logits/chosen": -2.239522933959961, |
|
"logits/rejected": -2.205223321914673, |
|
"logps/chosen": -338.15972900390625, |
|
"logps/pi_response": -245.11599731445312, |
|
"logps/ref_response": -129.8436279296875, |
|
"logps/rejected": -348.4112854003906, |
|
"loss": 0.6479, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": -1.022566556930542, |
|
"rewards/margins": 0.14734260737895966, |
|
"rewards/rejected": -1.1699092388153076, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 4.904486005914027e-08, |
|
"logits/chosen": -2.2750391960144043, |
|
"logits/rejected": -2.278750419616699, |
|
"logps/chosen": -296.6529846191406, |
|
"logps/pi_response": -215.0723114013672, |
|
"logps/ref_response": -111.54500579833984, |
|
"logps/rejected": -353.0396423339844, |
|
"loss": 0.6476, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": -0.9499589204788208, |
|
"rewards/margins": 0.08809442818164825, |
|
"rewards/rejected": -1.038053274154663, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 2.1464952759020856e-08, |
|
"logits/chosen": -2.266298770904541, |
|
"logits/rejected": -2.2696375846862793, |
|
"logps/chosen": -326.1199951171875, |
|
"logps/pi_response": -240.04541015625, |
|
"logps/ref_response": -119.8094711303711, |
|
"logps/rejected": -347.0991516113281, |
|
"loss": 0.6424, |
|
"rewards/accuracies": 0.6312500238418579, |
|
"rewards/chosen": -0.9663877487182617, |
|
"rewards/margins": 0.2315327674150467, |
|
"rewards/rejected": -1.197920560836792, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 4.8708793644441086e-09, |
|
"logits/chosen": -2.2334301471710205, |
|
"logits/rejected": -2.22126841545105, |
|
"logps/chosen": -300.8511047363281, |
|
"logps/pi_response": -202.55142211914062, |
|
"logps/ref_response": -101.10533142089844, |
|
"logps/rejected": -299.7745666503906, |
|
"loss": 0.6286, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -0.8617112040519714, |
|
"rewards/margins": 0.21587765216827393, |
|
"rewards/rejected": -1.0775889158248901, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 159, |
|
"total_flos": 0.0, |
|
"train_loss": 0.6631228009109977, |
|
"train_runtime": 4191.0271, |
|
"train_samples_per_second": 4.862, |
|
"train_steps_per_second": 0.038 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 159, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|