hw1 / run-3 /checkpoint-8552 /trainer_state.json
wennycooper's picture
Training in progress, epoch 4
c76d34d verified
raw
history blame contribute delete
No virus
4.93 kB
{
"best_metric": 0.5108951669698317,
"best_model_checkpoint": "distilbert-base-uncased-finetuned-cola/run-3/checkpoint-8552",
"epoch": 4.0,
"eval_steps": 500,
"global_step": 8552,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.23386342376052385,
"grad_norm": 9.916498184204102,
"learning_rate": 1.0352460846585169e-05,
"loss": 0.588,
"step": 500
},
{
"epoch": 0.4677268475210477,
"grad_norm": 2.0442557334899902,
"learning_rate": 9.709610570468354e-06,
"loss": 0.5642,
"step": 1000
},
{
"epoch": 0.7015902712815716,
"grad_norm": 25.158737182617188,
"learning_rate": 9.066760294351542e-06,
"loss": 0.5358,
"step": 1500
},
{
"epoch": 0.9354536950420954,
"grad_norm": 21.2142276763916,
"learning_rate": 8.423910018234727e-06,
"loss": 0.5288,
"step": 2000
},
{
"epoch": 1.0,
"eval_loss": 0.6297594904899597,
"eval_matthews_correlation": 0.4181278600904661,
"eval_runtime": 0.8372,
"eval_samples_per_second": 1245.804,
"eval_steps_per_second": 78.833,
"step": 2138
},
{
"epoch": 1.1693171188026192,
"grad_norm": 40.90157699584961,
"learning_rate": 7.781059742117914e-06,
"loss": 0.4866,
"step": 2500
},
{
"epoch": 1.4031805425631432,
"grad_norm": 0.11679931730031967,
"learning_rate": 7.138209466001099e-06,
"loss": 0.4601,
"step": 3000
},
{
"epoch": 1.637043966323667,
"grad_norm": 9.670472145080566,
"learning_rate": 6.495359189884285e-06,
"loss": 0.4758,
"step": 3500
},
{
"epoch": 1.8709073900841908,
"grad_norm": 1.609247088432312,
"learning_rate": 5.852508913767472e-06,
"loss": 0.4517,
"step": 4000
},
{
"epoch": 2.0,
"eval_loss": 0.9118176102638245,
"eval_matthews_correlation": 0.4622674649892012,
"eval_runtime": 0.7476,
"eval_samples_per_second": 1395.206,
"eval_steps_per_second": 88.287,
"step": 4276
},
{
"epoch": 2.1047708138447145,
"grad_norm": 0.1278693974018097,
"learning_rate": 5.209658637650659e-06,
"loss": 0.3716,
"step": 4500
},
{
"epoch": 2.3386342376052385,
"grad_norm": 0.046489011496305466,
"learning_rate": 4.566808361533844e-06,
"loss": 0.3199,
"step": 5000
},
{
"epoch": 2.5724976613657624,
"grad_norm": 8.200950622558594,
"learning_rate": 3.923958085417031e-06,
"loss": 0.3667,
"step": 5500
},
{
"epoch": 2.8063610851262863,
"grad_norm": 0.06558237969875336,
"learning_rate": 3.281107809300217e-06,
"loss": 0.3323,
"step": 6000
},
{
"epoch": 3.0,
"eval_loss": 0.9285444021224976,
"eval_matthews_correlation": 0.5015847043424765,
"eval_runtime": 0.7214,
"eval_samples_per_second": 1445.788,
"eval_steps_per_second": 91.488,
"step": 6414
},
{
"epoch": 3.0402245088868103,
"grad_norm": 0.10314546525478363,
"learning_rate": 2.6382575331834036e-06,
"loss": 0.325,
"step": 6500
},
{
"epoch": 3.2740879326473338,
"grad_norm": 0.09269363433122635,
"learning_rate": 1.99540725706659e-06,
"loss": 0.2483,
"step": 7000
},
{
"epoch": 3.5079513564078577,
"grad_norm": 0.033062130212783813,
"learning_rate": 1.3525569809497761e-06,
"loss": 0.2065,
"step": 7500
},
{
"epoch": 3.7418147801683816,
"grad_norm": 2.666268825531006,
"learning_rate": 7.097067048329624e-07,
"loss": 0.2449,
"step": 8000
},
{
"epoch": 3.9756782039289056,
"grad_norm": 0.12107378989458084,
"learning_rate": 6.685642871614863e-08,
"loss": 0.2523,
"step": 8500
},
{
"epoch": 4.0,
"eval_loss": 1.092543125152588,
"eval_matthews_correlation": 0.5108951669698317,
"eval_runtime": 0.8843,
"eval_samples_per_second": 1179.471,
"eval_steps_per_second": 74.636,
"step": 8552
}
],
"logging_steps": 500,
"max_steps": 8552,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 138564521149608.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": {
"learning_rate": 1.0995311122701982e-05,
"num_train_epochs": 4,
"per_device_train_batch_size": 4,
"seed": 21
}
}