llama-160m-qnli / trainer_state.json
Cheng98's picture
Upload folder using huggingface_hub
d3232cb verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.0,
"global_step": 16370,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.15,
"learning_rate": 1.938912645082468e-05,
"loss": 0.8164,
"step": 500
},
{
"epoch": 0.31,
"learning_rate": 1.8778252901649362e-05,
"loss": 0.8115,
"step": 1000
},
{
"epoch": 0.46,
"learning_rate": 1.816737935247404e-05,
"loss": 0.8157,
"step": 1500
},
{
"epoch": 0.61,
"learning_rate": 1.755650580329872e-05,
"loss": 0.8115,
"step": 2000
},
{
"epoch": 0.76,
"learning_rate": 1.69456322541234e-05,
"loss": 0.813,
"step": 2500
},
{
"epoch": 0.92,
"learning_rate": 1.6334758704948076e-05,
"loss": 0.8195,
"step": 3000
},
{
"epoch": 1.07,
"learning_rate": 1.5723885155772757e-05,
"loss": 0.8159,
"step": 3500
},
{
"epoch": 1.22,
"learning_rate": 1.5113011606597437e-05,
"loss": 0.817,
"step": 4000
},
{
"epoch": 1.37,
"learning_rate": 1.4502138057422115e-05,
"loss": 0.8126,
"step": 4500
},
{
"epoch": 1.53,
"learning_rate": 1.3891264508246794e-05,
"loss": 0.8166,
"step": 5000
},
{
"epoch": 1.68,
"learning_rate": 1.3280390959071474e-05,
"loss": 0.8129,
"step": 5500
},
{
"epoch": 1.83,
"learning_rate": 1.2669517409896153e-05,
"loss": 0.816,
"step": 6000
},
{
"epoch": 1.99,
"learning_rate": 1.2058643860720831e-05,
"loss": 0.8149,
"step": 6500
},
{
"epoch": 2.14,
"learning_rate": 1.1447770311545512e-05,
"loss": 0.8151,
"step": 7000
},
{
"epoch": 2.29,
"learning_rate": 1.083689676237019e-05,
"loss": 0.817,
"step": 7500
},
{
"epoch": 2.44,
"learning_rate": 1.0226023213194869e-05,
"loss": 0.8087,
"step": 8000
},
{
"epoch": 2.6,
"learning_rate": 9.615149664019549e-06,
"loss": 0.8124,
"step": 8500
},
{
"epoch": 2.75,
"learning_rate": 9.004276114844227e-06,
"loss": 0.8164,
"step": 9000
},
{
"epoch": 2.9,
"learning_rate": 8.393402565668908e-06,
"loss": 0.8146,
"step": 9500
},
{
"epoch": 3.05,
"learning_rate": 7.782529016493586e-06,
"loss": 0.8202,
"step": 10000
},
{
"epoch": 3.21,
"learning_rate": 7.171655467318266e-06,
"loss": 0.8123,
"step": 10500
},
{
"epoch": 3.36,
"learning_rate": 6.560781918142944e-06,
"loss": 0.8136,
"step": 11000
},
{
"epoch": 3.51,
"learning_rate": 5.949908368967624e-06,
"loss": 0.8131,
"step": 11500
},
{
"epoch": 3.67,
"learning_rate": 5.339034819792304e-06,
"loss": 0.8264,
"step": 12000
},
{
"epoch": 3.82,
"learning_rate": 4.728161270616982e-06,
"loss": 0.8086,
"step": 12500
},
{
"epoch": 3.97,
"learning_rate": 4.117287721441662e-06,
"loss": 0.8162,
"step": 13000
},
{
"epoch": 4.12,
"learning_rate": 3.506414172266341e-06,
"loss": 0.8159,
"step": 13500
},
{
"epoch": 4.28,
"learning_rate": 2.8955406230910206e-06,
"loss": 0.8139,
"step": 14000
},
{
"epoch": 4.43,
"learning_rate": 2.2846670739156996e-06,
"loss": 0.8151,
"step": 14500
},
{
"epoch": 4.58,
"learning_rate": 1.6737935247403788e-06,
"loss": 0.8118,
"step": 15000
},
{
"epoch": 4.73,
"learning_rate": 1.0629199755650582e-06,
"loss": 0.8212,
"step": 15500
},
{
"epoch": 4.89,
"learning_rate": 4.5204642638973736e-07,
"loss": 0.8104,
"step": 16000
},
{
"epoch": 5.0,
"step": 16370,
"total_flos": 4.555745093910528e+16,
"train_loss": 0.8148820433153492,
"train_runtime": 3646.2431,
"train_samples_per_second": 143.631,
"train_steps_per_second": 4.49
}
],
"max_steps": 16370,
"num_train_epochs": 5,
"total_flos": 4.555745093910528e+16,
"trial_name": null,
"trial_params": null
}