|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9936305732484076, |
|
"eval_steps": 500, |
|
"global_step": 39, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.025477707006369428, |
|
"grad_norm": 11.761317513728676, |
|
"learning_rate": 1.25e-07, |
|
"logits/chosen": -2.9363813400268555, |
|
"logits/rejected": -2.873398780822754, |
|
"logps/chosen": -280.836181640625, |
|
"logps/pi_response": -66.34120178222656, |
|
"logps/ref_response": -66.34120178222656, |
|
"logps/rejected": -96.96368408203125, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.25477707006369427, |
|
"grad_norm": 9.078871255108242, |
|
"learning_rate": 4.646121984004665e-07, |
|
"logits/chosen": -2.75911808013916, |
|
"logits/rejected": -2.72881817817688, |
|
"logps/chosen": -251.74717712402344, |
|
"logps/pi_response": -72.53483581542969, |
|
"logps/ref_response": -70.61516571044922, |
|
"logps/rejected": -110.18606567382812, |
|
"loss": 0.6805, |
|
"rewards/accuracies": 0.6180555820465088, |
|
"rewards/chosen": 0.021727772429585457, |
|
"rewards/margins": 0.03498953580856323, |
|
"rewards/rejected": -0.013261763378977776, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.5095541401273885, |
|
"grad_norm": 5.079835303490505, |
|
"learning_rate": 2.8355831645441387e-07, |
|
"logits/chosen": -2.6435434818267822, |
|
"logits/rejected": -2.6189682483673096, |
|
"logps/chosen": -222.6739044189453, |
|
"logps/pi_response": -94.57807922363281, |
|
"logps/ref_response": -69.6859359741211, |
|
"logps/rejected": -131.5478515625, |
|
"loss": 0.6342, |
|
"rewards/accuracies": 0.746874988079071, |
|
"rewards/chosen": -0.048926979303359985, |
|
"rewards/margins": 0.1345483958721161, |
|
"rewards/rejected": -0.18347537517547607, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.7643312101910829, |
|
"grad_norm": 6.043410433015879, |
|
"learning_rate": 7.723433775328384e-08, |
|
"logits/chosen": -2.6093688011169434, |
|
"logits/rejected": -2.569550037384033, |
|
"logps/chosen": -253.96371459960938, |
|
"logps/pi_response": -118.07237243652344, |
|
"logps/ref_response": -69.45159912109375, |
|
"logps/rejected": -148.10458374023438, |
|
"loss": 0.5973, |
|
"rewards/accuracies": 0.7593749761581421, |
|
"rewards/chosen": -0.14226220548152924, |
|
"rewards/margins": 0.27111202478408813, |
|
"rewards/rejected": -0.41337427496910095, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.9936305732484076, |
|
"step": 39, |
|
"total_flos": 0.0, |
|
"train_loss": 0.6264889301397861, |
|
"train_runtime": 1684.0634, |
|
"train_samples_per_second": 5.938, |
|
"train_steps_per_second": 0.023 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 39, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|