ShenaoZ's picture
Model save
d18c5d8 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9874476987447699,
"eval_steps": 500,
"global_step": 59,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.016736401673640166,
"grad_norm": 7.7724597597287675,
"learning_rate": 8.333333333333333e-08,
"logits/chosen": -2.807276487350464,
"logits/rejected": -2.7759768962860107,
"logps/chosen": -315.42626953125,
"logps/rejected": -227.5915985107422,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.16736401673640167,
"grad_norm": 7.43868551766701,
"learning_rate": 4.930057285201027e-07,
"logits/chosen": -2.75545072555542,
"logits/rejected": -2.7460691928863525,
"logps/chosen": -271.9334716796875,
"logps/rejected": -260.69281005859375,
"loss": 0.6914,
"rewards/accuracies": 0.5208333134651184,
"rewards/chosen": 0.005364930257201195,
"rewards/margins": 0.0036840070970356464,
"rewards/rejected": 0.001680923392996192,
"step": 10
},
{
"epoch": 0.33472803347280333,
"grad_norm": 6.731470928419319,
"learning_rate": 4.187457503795526e-07,
"logits/chosen": -2.787079334259033,
"logits/rejected": -2.768202543258667,
"logps/chosen": -261.4521789550781,
"logps/rejected": -250.88571166992188,
"loss": 0.6745,
"rewards/accuracies": 0.7093750238418579,
"rewards/chosen": 0.03229519724845886,
"rewards/margins": 0.04072761535644531,
"rewards/rejected": -0.008432422764599323,
"step": 20
},
{
"epoch": 0.502092050209205,
"grad_norm": 7.353543442886458,
"learning_rate": 2.8691164100062034e-07,
"logits/chosen": -2.7879936695098877,
"logits/rejected": -2.7689452171325684,
"logps/chosen": -293.8031921386719,
"logps/rejected": -254.1697235107422,
"loss": 0.6488,
"rewards/accuracies": 0.6781250238418579,
"rewards/chosen": -0.02347097545862198,
"rewards/margins": 0.12664587795734406,
"rewards/rejected": -0.15011683106422424,
"step": 30
},
{
"epoch": 0.6694560669456067,
"grad_norm": 8.125000777751103,
"learning_rate": 1.4248369943086995e-07,
"logits/chosen": -2.7606043815612793,
"logits/rejected": -2.7380428314208984,
"logps/chosen": -264.1278381347656,
"logps/rejected": -255.55029296875,
"loss": 0.6353,
"rewards/accuracies": 0.6812499761581421,
"rewards/chosen": -0.11208363622426987,
"rewards/margins": 0.1603127121925354,
"rewards/rejected": -0.27239635586738586,
"step": 40
},
{
"epoch": 0.8368200836820083,
"grad_norm": 8.574824255298934,
"learning_rate": 3.473909705816111e-08,
"logits/chosen": -2.770317554473877,
"logits/rejected": -2.7504336833953857,
"logps/chosen": -279.09698486328125,
"logps/rejected": -287.2370300292969,
"loss": 0.6202,
"rewards/accuracies": 0.628125011920929,
"rewards/chosen": -0.22088325023651123,
"rewards/margins": 0.13636226952075958,
"rewards/rejected": -0.3572455048561096,
"step": 50
},
{
"epoch": 0.9874476987447699,
"step": 59,
"total_flos": 0.0,
"train_loss": 0.6490422426644018,
"train_runtime": 1622.868,
"train_samples_per_second": 9.417,
"train_steps_per_second": 0.036
}
],
"logging_steps": 10,
"max_steps": 59,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}