|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"global_step": 22940, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 1.5566625155666254e-05, |
|
"loss": 9.0543, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 3.113325031133251e-05, |
|
"loss": 6.7369, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 4.6699875466998756e-05, |
|
"loss": 6.0998, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 6.226650062266502e-05, |
|
"loss": 5.4282, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 7.783312577833126e-05, |
|
"loss": 4.749, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 9.339975093399751e-05, |
|
"loss": 4.3351, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 9.990941915030131e-05, |
|
"loss": 4.0473, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 9.975216073068553e-05, |
|
"loss": 3.8251, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 9.959490231106974e-05, |
|
"loss": 3.6631, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 9.943764389145395e-05, |
|
"loss": 3.532, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"learning_rate": 9.928038547183816e-05, |
|
"loss": 3.4285, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"learning_rate": 9.912312705222237e-05, |
|
"loss": 3.3414, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 9.89658686326066e-05, |
|
"loss": 3.2622, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 9.880861021299081e-05, |
|
"loss": 3.2048, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 9.865135179337502e-05, |
|
"loss": 3.1463, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 9.849409337375923e-05, |
|
"loss": 3.0932, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 9.833683495414345e-05, |
|
"loss": 3.0433, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 9.817957653452766e-05, |
|
"loss": 3.0056, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"learning_rate": 9.802231811491189e-05, |
|
"loss": 2.9611, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 9.78650596952961e-05, |
|
"loss": 2.9247, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"learning_rate": 9.77078012756803e-05, |
|
"loss": 2.8874, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 9.755054285606452e-05, |
|
"loss": 2.8569, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 9.739328443644873e-05, |
|
"loss": 2.8276, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"learning_rate": 9.723602601683295e-05, |
|
"loss": 2.7987, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"learning_rate": 9.70790821140564e-05, |
|
"loss": 2.7716, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 9.69218236944406e-05, |
|
"loss": 2.745, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 9.676456527482482e-05, |
|
"loss": 2.7229, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"learning_rate": 9.660730685520904e-05, |
|
"loss": 2.7021, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 9.645036295243249e-05, |
|
"loss": 2.679, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 9.62931045328167e-05, |
|
"loss": 2.6618, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"learning_rate": 9.61358461132009e-05, |
|
"loss": 2.6444, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 9.597858769358512e-05, |
|
"loss": 2.6249, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"learning_rate": 9.582164379080856e-05, |
|
"loss": 2.6169, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"learning_rate": 9.566438537119277e-05, |
|
"loss": 2.5936, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 9.5507126951577e-05, |
|
"loss": 2.5854, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 9.535018304880043e-05, |
|
"loss": 2.5693, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 9.519292462918465e-05, |
|
"loss": 2.5525, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 0.83, |
|
"learning_rate": 9.503566620956886e-05, |
|
"loss": 2.539, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 9.487840778995309e-05, |
|
"loss": 2.5256, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 9.47211493703373e-05, |
|
"loss": 2.5207, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"learning_rate": 9.45638909507215e-05, |
|
"loss": 2.5017, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 9.440663253110572e-05, |
|
"loss": 2.4895, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 9.424968862832916e-05, |
|
"loss": 2.4808, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 9.409243020871337e-05, |
|
"loss": 2.4713, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 9.39351717890976e-05, |
|
"loss": 2.4602, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.5603946985569819, |
|
"eval_loss": 2.3326776027679443, |
|
"eval_runtime": 361.5158, |
|
"eval_samples_per_second": 492.238, |
|
"eval_steps_per_second": 30.765, |
|
"step": 22940 |
|
} |
|
], |
|
"max_steps": 321160, |
|
"num_train_epochs": 14, |
|
"total_flos": 1.7360137911435264e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|