ShenaoZ's picture
Model save
87a505d verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 48,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.020833333333333332,
"grad_norm": 8.363889637744881,
"learning_rate": 1e-07,
"logits/chosen": -2.8258156776428223,
"logits/rejected": -2.859372615814209,
"logps/chosen": -247.59397888183594,
"logps/pi_response": -72.04409790039062,
"logps/ref_response": -72.04409790039062,
"logps/rejected": -180.8425750732422,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.20833333333333334,
"grad_norm": 9.068207013086154,
"learning_rate": 4.83504027183137e-07,
"logits/chosen": -2.7512786388397217,
"logits/rejected": -2.7331552505493164,
"logps/chosen": -219.46205139160156,
"logps/pi_response": -65.90792083740234,
"logps/ref_response": -65.22713470458984,
"logps/rejected": -164.32415771484375,
"loss": 0.6879,
"rewards/accuracies": 0.5833333134651184,
"rewards/chosen": 0.015046126209199429,
"rewards/margins": 0.013462345115840435,
"rewards/rejected": 0.001583782723173499,
"step": 10
},
{
"epoch": 0.4166666666666667,
"grad_norm": 6.854500780501718,
"learning_rate": 3.643105808261596e-07,
"logits/chosen": -2.7228639125823975,
"logits/rejected": -2.692481756210327,
"logps/chosen": -250.13565063476562,
"logps/pi_response": -79.09022521972656,
"logps/ref_response": -66.51771545410156,
"logps/rejected": -163.60781860351562,
"loss": 0.6553,
"rewards/accuracies": 0.7437499761581421,
"rewards/chosen": 0.031493671238422394,
"rewards/margins": 0.09395480155944824,
"rewards/rejected": -0.062461137771606445,
"step": 20
},
{
"epoch": 0.625,
"grad_norm": 6.610726899137987,
"learning_rate": 1.8676665440207977e-07,
"logits/chosen": -2.642488956451416,
"logits/rejected": -2.608694076538086,
"logps/chosen": -258.0551452636719,
"logps/pi_response": -108.9342041015625,
"logps/ref_response": -70.92167663574219,
"logps/rejected": -199.82693481445312,
"loss": 0.6245,
"rewards/accuracies": 0.7281249761581421,
"rewards/chosen": -0.08083241432905197,
"rewards/margins": 0.20813515782356262,
"rewards/rejected": -0.2889675498008728,
"step": 30
},
{
"epoch": 0.8333333333333334,
"grad_norm": 6.863576606705015,
"learning_rate": 4.1500545527530544e-08,
"logits/chosen": -2.6711089611053467,
"logits/rejected": -2.641400098800659,
"logps/chosen": -272.2811584472656,
"logps/pi_response": -128.9022216796875,
"logps/ref_response": -76.34043884277344,
"logps/rejected": -220.0448455810547,
"loss": 0.6048,
"rewards/accuracies": 0.7124999761581421,
"rewards/chosen": -0.16047780215740204,
"rewards/margins": 0.25879645347595215,
"rewards/rejected": -0.419274240732193,
"step": 40
},
{
"epoch": 1.0,
"step": 48,
"total_flos": 0.0,
"train_loss": 0.6321735282739004,
"train_runtime": 2185.2623,
"train_samples_per_second": 5.595,
"train_steps_per_second": 0.022
}
],
"logging_steps": 10,
"max_steps": 48,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}