ShenaoZ's picture
Model save
84fe547 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9874476987447699,
"eval_steps": 500,
"global_step": 59,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"learning_rate": 8.333333333333333e-08,
"logits/chosen": -2.8084447383880615,
"logits/rejected": -2.7193427085876465,
"logps/chosen": -297.1988220214844,
"logps/pi_response": -75.58995056152344,
"logps/ref_response": -75.58995056152344,
"logps/rejected": -175.9150848388672,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.17,
"learning_rate": 4.930057285201027e-07,
"logits/chosen": -2.7457406520843506,
"logits/rejected": -2.7058539390563965,
"logps/chosen": -238.87890625,
"logps/pi_response": -74.1884765625,
"logps/ref_response": -73.4278335571289,
"logps/rejected": -189.71603393554688,
"loss": 0.6894,
"rewards/accuracies": 0.5555555820465088,
"rewards/chosen": 0.004993405658751726,
"rewards/margins": 0.006586252711713314,
"rewards/rejected": -0.0015928474022075534,
"step": 10
},
{
"epoch": 0.33,
"learning_rate": 4.187457503795526e-07,
"logits/chosen": -2.7167129516601562,
"logits/rejected": -2.6783037185668945,
"logps/chosen": -234.54824829101562,
"logps/pi_response": -87.90349578857422,
"logps/ref_response": -74.08830261230469,
"logps/rejected": -173.56919860839844,
"loss": 0.6533,
"rewards/accuracies": 0.7093750238418579,
"rewards/chosen": 0.001131695113144815,
"rewards/margins": 0.0829634889960289,
"rewards/rejected": -0.08183179050683975,
"step": 20
},
{
"epoch": 0.5,
"learning_rate": 2.8691164100062034e-07,
"logits/chosen": -2.64365816116333,
"logits/rejected": -2.619050979614258,
"logps/chosen": -278.97296142578125,
"logps/pi_response": -129.8077392578125,
"logps/ref_response": -80.98223876953125,
"logps/rejected": -202.1917266845703,
"loss": 0.6103,
"rewards/accuracies": 0.753125011920929,
"rewards/chosen": -0.11994215101003647,
"rewards/margins": 0.24995431303977966,
"rewards/rejected": -0.3698965013027191,
"step": 30
},
{
"epoch": 0.67,
"learning_rate": 1.4248369943086995e-07,
"logits/chosen": -2.6128714084625244,
"logits/rejected": -2.5885796546936035,
"logps/chosen": -244.5124053955078,
"logps/pi_response": -130.47091674804688,
"logps/ref_response": -68.78013610839844,
"logps/rejected": -215.06298828125,
"loss": 0.5858,
"rewards/accuracies": 0.7093750238418579,
"rewards/chosen": -0.2616512179374695,
"rewards/margins": 0.23390412330627441,
"rewards/rejected": -0.4955553412437439,
"step": 40
},
{
"epoch": 0.84,
"learning_rate": 3.473909705816111e-08,
"logits/chosen": -2.6257190704345703,
"logits/rejected": -2.6010611057281494,
"logps/chosen": -284.50152587890625,
"logps/pi_response": -152.08297729492188,
"logps/ref_response": -70.03689575195312,
"logps/rejected": -235.3142547607422,
"loss": 0.559,
"rewards/accuracies": 0.7093750238418579,
"rewards/chosen": -0.36042410135269165,
"rewards/margins": 0.31209221482276917,
"rewards/rejected": -0.6725163459777832,
"step": 50
},
{
"epoch": 0.99,
"step": 59,
"total_flos": 0.0,
"train_loss": 0.6087713403216863,
"train_runtime": 3479.9494,
"train_samples_per_second": 4.392,
"train_steps_per_second": 0.017
}
],
"logging_steps": 10,
"max_steps": 59,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}