|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9936305732484076, |
|
"eval_steps": 500, |
|
"global_step": 78, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.012738853503184714, |
|
"grad_norm": 13.343631461650636, |
|
"learning_rate": 6.25e-08, |
|
"logits/chosen": -2.923123598098755, |
|
"logits/rejected": -2.883568048477173, |
|
"logps/chosen": -275.7285461425781, |
|
"logps/pi_response": -74.39646911621094, |
|
"logps/ref_response": -74.39646911621094, |
|
"logps/rejected": -99.70209503173828, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.12738853503184713, |
|
"grad_norm": 11.253513207180946, |
|
"learning_rate": 4.989935734988097e-07, |
|
"logits/chosen": -2.8164381980895996, |
|
"logits/rejected": -2.781937837600708, |
|
"logps/chosen": -227.93551635742188, |
|
"logps/pi_response": -64.4197998046875, |
|
"logps/ref_response": -63.84031295776367, |
|
"logps/rejected": -105.435302734375, |
|
"loss": 0.6891, |
|
"rewards/accuracies": 0.5208333134651184, |
|
"rewards/chosen": 0.005453579593449831, |
|
"rewards/margins": 0.006863526534289122, |
|
"rewards/rejected": -0.0014099470572546124, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.25477707006369427, |
|
"grad_norm": 7.603005231304034, |
|
"learning_rate": 4.646121984004665e-07, |
|
"logits/chosen": -2.661804676055908, |
|
"logits/rejected": -2.629075050354004, |
|
"logps/chosen": -276.488037109375, |
|
"logps/pi_response": -90.92379760742188, |
|
"logps/ref_response": -75.4796142578125, |
|
"logps/rejected": -123.3162612915039, |
|
"loss": 0.6422, |
|
"rewards/accuracies": 0.8062499761581421, |
|
"rewards/chosen": 0.035294655710458755, |
|
"rewards/margins": 0.16240255534648895, |
|
"rewards/rejected": -0.1271079182624817, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.3821656050955414, |
|
"grad_norm": 6.985251610579986, |
|
"learning_rate": 3.877242453630256e-07, |
|
"logits/chosen": -2.6086363792419434, |
|
"logits/rejected": -2.5879595279693604, |
|
"logps/chosen": -249.6334686279297, |
|
"logps/pi_response": -124.77403259277344, |
|
"logps/ref_response": -73.44969177246094, |
|
"logps/rejected": -150.97244262695312, |
|
"loss": 0.6136, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": -0.165732279419899, |
|
"rewards/margins": 0.25328412652015686, |
|
"rewards/rejected": -0.41901636123657227, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.5095541401273885, |
|
"grad_norm": 10.763929928294964, |
|
"learning_rate": 2.8355831645441387e-07, |
|
"logits/chosen": -2.5407841205596924, |
|
"logits/rejected": -2.5103654861450195, |
|
"logps/chosen": -229.60879516601562, |
|
"logps/pi_response": -129.8967742919922, |
|
"logps/ref_response": -65.92216491699219, |
|
"logps/rejected": -167.57904052734375, |
|
"loss": 0.5969, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -0.27106621861457825, |
|
"rewards/margins": 0.2314259111881256, |
|
"rewards/rejected": -0.5024920701980591, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.6369426751592356, |
|
"grad_norm": 10.451471450915024, |
|
"learning_rate": 1.7274575140626315e-07, |
|
"logits/chosen": -2.5744354724884033, |
|
"logits/rejected": -2.5362110137939453, |
|
"logps/chosen": -273.7886047363281, |
|
"logps/pi_response": -153.47801208496094, |
|
"logps/ref_response": -70.1203842163086, |
|
"logps/rejected": -174.98007202148438, |
|
"loss": 0.5556, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -0.27356070280075073, |
|
"rewards/margins": 0.4625588357448578, |
|
"rewards/rejected": -0.7361195683479309, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.7643312101910829, |
|
"grad_norm": 11.045205015520507, |
|
"learning_rate": 7.723433775328384e-08, |
|
"logits/chosen": -2.589782476425171, |
|
"logits/rejected": -2.533975124359131, |
|
"logps/chosen": -272.664306640625, |
|
"logps/pi_response": -169.4038543701172, |
|
"logps/ref_response": -68.78282165527344, |
|
"logps/rejected": -199.9068145751953, |
|
"loss": 0.5591, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -0.3962182402610779, |
|
"rewards/margins": 0.48118796944618225, |
|
"rewards/rejected": -0.877406120300293, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.89171974522293, |
|
"grad_norm": 9.344569317622456, |
|
"learning_rate": 1.5941282340065697e-08, |
|
"logits/chosen": -2.5518054962158203, |
|
"logits/rejected": -2.5584278106689453, |
|
"logps/chosen": -255.5514678955078, |
|
"logps/pi_response": -165.05784606933594, |
|
"logps/ref_response": -72.47773742675781, |
|
"logps/rejected": -196.47874450683594, |
|
"loss": 0.5526, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": -0.327322781085968, |
|
"rewards/margins": 0.45833224058151245, |
|
"rewards/rejected": -0.7856550812721252, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.9936305732484076, |
|
"step": 78, |
|
"total_flos": 0.0, |
|
"train_loss": 0.5938225159278283, |
|
"train_runtime": 1654.7204, |
|
"train_samples_per_second": 6.043, |
|
"train_steps_per_second": 0.047 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 78, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|