|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.21999853334311104, |
|
"eval_steps": 5500, |
|
"global_step": 33000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 9.997777792592494e-05, |
|
"loss": 0.0307, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.995555585184987e-05, |
|
"loss": 0.0312, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.993333377777482e-05, |
|
"loss": 0.0311, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 9.991111170369976e-05, |
|
"loss": 0.0309, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.988888962962469e-05, |
|
"loss": 0.0311, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.986666755554963e-05, |
|
"loss": 0.0309, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 9.984444548147457e-05, |
|
"loss": 0.031, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.982222340739951e-05, |
|
"loss": 0.0309, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.980000133332444e-05, |
|
"loss": 0.0312, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 9.977777925924938e-05, |
|
"loss": 0.031, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.975555718517432e-05, |
|
"loss": 0.031, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"eval_loss": 0.031031738966703415, |
|
"eval_runtime": 12112.511, |
|
"eval_samples_per_second": 2.064, |
|
"eval_steps_per_second": 0.344, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.973333511109927e-05, |
|
"loss": 0.0308, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 9.971111303702421e-05, |
|
"loss": 0.0311, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.968889096294914e-05, |
|
"loss": 0.031, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.966666888887408e-05, |
|
"loss": 0.0313, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 9.964444681479902e-05, |
|
"loss": 0.0311, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.962222474072396e-05, |
|
"loss": 0.031, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.960000266664889e-05, |
|
"loss": 0.0313, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 9.957778059257383e-05, |
|
"loss": 0.0313, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.955555851849878e-05, |
|
"loss": 0.031, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.953333644442371e-05, |
|
"loss": 0.0318, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 9.951111437034864e-05, |
|
"loss": 0.0316, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"eval_loss": 0.030834877863526344, |
|
"eval_runtime": 12107.9455, |
|
"eval_samples_per_second": 2.065, |
|
"eval_steps_per_second": 0.344, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.948889229627358e-05, |
|
"loss": 0.0311, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.946667022219853e-05, |
|
"loss": 0.0308, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 9.944444814812346e-05, |
|
"loss": 0.0308, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 9.94222260740484e-05, |
|
"loss": 0.0309, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 9.940000399997333e-05, |
|
"loss": 0.0308, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 9.937778192589828e-05, |
|
"loss": 0.0313, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 9.935555985182321e-05, |
|
"loss": 0.0311, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 9.933333777774815e-05, |
|
"loss": 0.0312, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 9.93111157036731e-05, |
|
"loss": 0.0311, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.928889362959803e-05, |
|
"loss": 0.031, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.926667155552298e-05, |
|
"loss": 0.031, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"eval_loss": 0.03085586242377758, |
|
"eval_runtime": 12089.5064, |
|
"eval_samples_per_second": 2.068, |
|
"eval_steps_per_second": 0.345, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 9.924444948144791e-05, |
|
"loss": 0.0309, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 9.922222740737285e-05, |
|
"loss": 0.0311, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 9.920000533329778e-05, |
|
"loss": 0.0315, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 9.917778325922273e-05, |
|
"loss": 0.0308, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 9.915556118514766e-05, |
|
"loss": 0.0315, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 9.91333391110726e-05, |
|
"loss": 0.0309, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 9.911111703699753e-05, |
|
"loss": 0.0313, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 9.908889496292248e-05, |
|
"loss": 0.031, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 9.906667288884741e-05, |
|
"loss": 0.0311, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 9.904445081477235e-05, |
|
"loss": 0.0314, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 9.902222874069728e-05, |
|
"loss": 0.0311, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"eval_loss": 0.030636409297585487, |
|
"eval_runtime": 12087.8542, |
|
"eval_samples_per_second": 2.068, |
|
"eval_steps_per_second": 0.345, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 9.900000666662223e-05, |
|
"loss": 0.0311, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 9.897778459254717e-05, |
|
"loss": 0.0311, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 9.89555625184721e-05, |
|
"loss": 0.0307, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 9.893334044439703e-05, |
|
"loss": 0.0308, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 9.891111837032197e-05, |
|
"loss": 0.0306, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 9.888889629624692e-05, |
|
"loss": 0.031, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 9.886667422217187e-05, |
|
"loss": 0.0311, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 9.88444521480968e-05, |
|
"loss": 0.0307, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 9.882223007402173e-05, |
|
"loss": 0.0309, |
|
"step": 26500 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 9.880000799994667e-05, |
|
"loss": 0.0308, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 9.877778592587162e-05, |
|
"loss": 0.0308, |
|
"step": 27500 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"eval_loss": 0.030515629798173904, |
|
"eval_runtime": 12091.2293, |
|
"eval_samples_per_second": 2.068, |
|
"eval_steps_per_second": 0.345, |
|
"step": 27500 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 9.875556385179655e-05, |
|
"loss": 0.0313, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 9.873334177772149e-05, |
|
"loss": 0.031, |
|
"step": 28500 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 9.871111970364642e-05, |
|
"loss": 0.0311, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 9.868889762957137e-05, |
|
"loss": 0.031, |
|
"step": 29500 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 9.86666755554963e-05, |
|
"loss": 0.0312, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 9.864445348142124e-05, |
|
"loss": 0.0307, |
|
"step": 30500 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 9.862223140734617e-05, |
|
"loss": 0.0309, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 9.860000933327112e-05, |
|
"loss": 0.0308, |
|
"step": 31500 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 9.857778725919605e-05, |
|
"loss": 0.0309, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 9.855556518512099e-05, |
|
"loss": 0.0311, |
|
"step": 32500 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 9.853334311104592e-05, |
|
"loss": 0.0307, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"eval_loss": 0.030391648411750793, |
|
"eval_runtime": 12101.6748, |
|
"eval_samples_per_second": 2.066, |
|
"eval_steps_per_second": 0.344, |
|
"step": 33000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 2250015, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 15, |
|
"save_steps": 5500, |
|
"total_flos": 1.714489493815296e+18, |
|
"train_batch_size": 6, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|