|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9905956112852664, |
|
"eval_steps": 500, |
|
"global_step": 79, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.25e-08, |
|
"logits/chosen": -2.9087584018707275, |
|
"logits/rejected": -2.8338208198547363, |
|
"logps/chosen": -352.96319580078125, |
|
"logps/pi_response": -77.43819427490234, |
|
"logps/ref_response": -77.43819427490234, |
|
"logps/rejected": -170.26690673828125, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.990217055187362e-07, |
|
"logits/chosen": -2.787048101425171, |
|
"logits/rejected": -2.764052391052246, |
|
"logps/chosen": -237.8766326904297, |
|
"logps/pi_response": -73.74913024902344, |
|
"logps/ref_response": -73.52912139892578, |
|
"logps/rejected": -170.56431579589844, |
|
"loss": 0.6907, |
|
"rewards/accuracies": 0.5451388955116272, |
|
"rewards/chosen": 0.005484491586685181, |
|
"rewards/margins": 0.004212961066514254, |
|
"rewards/rejected": 0.0012715309858322144, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 4.655786431300069e-07, |
|
"logits/chosen": -2.7454662322998047, |
|
"logits/rejected": -2.6918036937713623, |
|
"logps/chosen": -248.79116821289062, |
|
"logps/pi_response": -79.92262268066406, |
|
"logps/ref_response": -71.4201431274414, |
|
"logps/rejected": -174.75128173828125, |
|
"loss": 0.6604, |
|
"rewards/accuracies": 0.715624988079071, |
|
"rewards/chosen": 0.04207359999418259, |
|
"rewards/margins": 0.0811639204621315, |
|
"rewards/rejected": -0.039090320467948914, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 3.9061232191019517e-07, |
|
"logits/chosen": -2.6588845252990723, |
|
"logits/rejected": -2.6236281394958496, |
|
"logps/chosen": -234.9491729736328, |
|
"logps/pi_response": -105.2069320678711, |
|
"logps/ref_response": -68.3199691772461, |
|
"logps/rejected": -190.48977661132812, |
|
"loss": 0.6266, |
|
"rewards/accuracies": 0.7406250238418579, |
|
"rewards/chosen": -0.09634245932102203, |
|
"rewards/margins": 0.1879693567752838, |
|
"rewards/rejected": -0.28431180119514465, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 2.8856223324132555e-07, |
|
"logits/chosen": -2.665360450744629, |
|
"logits/rejected": -2.64331316947937, |
|
"logps/chosen": -259.9814453125, |
|
"logps/pi_response": -145.37118530273438, |
|
"logps/ref_response": -75.2870864868164, |
|
"logps/rejected": -230.64920043945312, |
|
"loss": 0.5797, |
|
"rewards/accuracies": 0.746874988079071, |
|
"rewards/chosen": -0.22867317497730255, |
|
"rewards/margins": 0.351193904876709, |
|
"rewards/rejected": -0.5798671245574951, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 1.7908455541642582e-07, |
|
"logits/chosen": -2.667282819747925, |
|
"logits/rejected": -2.633934497833252, |
|
"logps/chosen": -287.79718017578125, |
|
"logps/pi_response": -155.01898193359375, |
|
"logps/ref_response": -74.21197509765625, |
|
"logps/rejected": -249.22134399414062, |
|
"loss": 0.5499, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -0.3261149525642395, |
|
"rewards/margins": 0.43611159920692444, |
|
"rewards/rejected": -0.7622265815734863, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 8.32661172908373e-08, |
|
"logits/chosen": -2.6649065017700195, |
|
"logits/rejected": -2.6298487186431885, |
|
"logps/chosen": -257.4251403808594, |
|
"logps/pi_response": -161.20509338378906, |
|
"logps/ref_response": -66.34608459472656, |
|
"logps/rejected": -266.110107421875, |
|
"loss": 0.5296, |
|
"rewards/accuracies": 0.793749988079071, |
|
"rewards/chosen": -0.42365479469299316, |
|
"rewards/margins": 0.5627300143241882, |
|
"rewards/rejected": -0.9863847494125366, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 1.956279997278043e-08, |
|
"logits/chosen": -2.6613144874572754, |
|
"logits/rejected": -2.624793529510498, |
|
"logps/chosen": -307.1275329589844, |
|
"logps/pi_response": -187.70135498046875, |
|
"logps/ref_response": -77.6395034790039, |
|
"logps/rejected": -278.43206787109375, |
|
"loss": 0.5188, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": -0.4925355017185211, |
|
"rewards/margins": 0.6049482822418213, |
|
"rewards/rejected": -1.0974838733673096, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"step": 79, |
|
"total_flos": 0.0, |
|
"train_loss": 0.586772628977329, |
|
"train_runtime": 4676.9161, |
|
"train_samples_per_second": 4.357, |
|
"train_steps_per_second": 0.017 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 79, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|