|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9905956112852664, |
|
"eval_steps": 500, |
|
"global_step": 79, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.012539184952978056, |
|
"grad_norm": 7.019775825313518, |
|
"learning_rate": 6.25e-08, |
|
"logits/chosen": -2.8930726051330566, |
|
"logits/rejected": -2.8629798889160156, |
|
"logps/chosen": -357.41943359375, |
|
"logps/rejected": -303.773193359375, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.12539184952978055, |
|
"grad_norm": 7.210056679422731, |
|
"learning_rate": 4.990217055187362e-07, |
|
"logits/chosen": -2.7940280437469482, |
|
"logits/rejected": -2.7685537338256836, |
|
"logps/chosen": -260.2227783203125, |
|
"logps/rejected": -250.45333862304688, |
|
"loss": 0.692, |
|
"rewards/accuracies": 0.5034722089767456, |
|
"rewards/chosen": 0.0014079404063522816, |
|
"rewards/margins": 0.0026502651162445545, |
|
"rewards/rejected": -0.0012423248263075948, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.2507836990595611, |
|
"grad_norm": 6.938600485483928, |
|
"learning_rate": 4.655786431300069e-07, |
|
"logits/chosen": -2.7941102981567383, |
|
"logits/rejected": -2.7533040046691895, |
|
"logps/chosen": -273.05743408203125, |
|
"logps/rejected": -248.7327880859375, |
|
"loss": 0.6766, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": 0.03514797240495682, |
|
"rewards/margins": 0.03713010624051094, |
|
"rewards/rejected": -0.0019821308087557554, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.3761755485893417, |
|
"grad_norm": 7.437427526131843, |
|
"learning_rate": 3.9061232191019517e-07, |
|
"logits/chosen": -2.7480266094207764, |
|
"logits/rejected": -2.7372658252716064, |
|
"logps/chosen": -258.54705810546875, |
|
"logps/rejected": -261.3839416503906, |
|
"loss": 0.6524, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -0.0330645926296711, |
|
"rewards/margins": 0.083428755402565, |
|
"rewards/rejected": -0.1164933443069458, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.5015673981191222, |
|
"grad_norm": 9.260816868546575, |
|
"learning_rate": 2.8856223324132555e-07, |
|
"logits/chosen": -2.7586328983306885, |
|
"logits/rejected": -2.7407941818237305, |
|
"logps/chosen": -277.0296325683594, |
|
"logps/rejected": -269.89105224609375, |
|
"loss": 0.6274, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.15935972332954407, |
|
"rewards/margins": 0.19388534128665924, |
|
"rewards/rejected": -0.3532450497150421, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.6269592476489029, |
|
"grad_norm": 13.440996020894056, |
|
"learning_rate": 1.7908455541642582e-07, |
|
"logits/chosen": -2.758403778076172, |
|
"logits/rejected": -2.735839366912842, |
|
"logps/chosen": -311.75067138671875, |
|
"logps/rejected": -301.5154724121094, |
|
"loss": 0.6091, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.2620142996311188, |
|
"rewards/margins": 0.27029839158058167, |
|
"rewards/rejected": -0.5323126316070557, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.7523510971786834, |
|
"grad_norm": 10.982206859063186, |
|
"learning_rate": 8.32661172908373e-08, |
|
"logits/chosen": -2.7525084018707275, |
|
"logits/rejected": -2.7330567836761475, |
|
"logps/chosen": -266.4403381347656, |
|
"logps/rejected": -292.4945373535156, |
|
"loss": 0.5938, |
|
"rewards/accuracies": 0.7406250238418579, |
|
"rewards/chosen": -0.28137117624282837, |
|
"rewards/margins": 0.3296356201171875, |
|
"rewards/rejected": -0.6110068559646606, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.877742946708464, |
|
"grad_norm": 10.390233056901057, |
|
"learning_rate": 1.956279997278043e-08, |
|
"logits/chosen": -2.7587547302246094, |
|
"logits/rejected": -2.733307123184204, |
|
"logps/chosen": -305.1893615722656, |
|
"logps/rejected": -302.3923034667969, |
|
"loss": 0.5905, |
|
"rewards/accuracies": 0.703125, |
|
"rewards/chosen": -0.31602221727371216, |
|
"rewards/margins": 0.29854491353034973, |
|
"rewards/rejected": -0.6145671606063843, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.9905956112852664, |
|
"step": 79, |
|
"total_flos": 0.0, |
|
"train_loss": 0.6319818134549298, |
|
"train_runtime": 2210.8428, |
|
"train_samples_per_second": 9.217, |
|
"train_steps_per_second": 0.036 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 79, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|