|
{ |
|
"best_metric": 0.11086247861385345, |
|
"best_model_checkpoint": "output_pipe/prom_300_notata/origin/checkpoint-1000", |
|
"epoch": 4.0, |
|
"eval_steps": 200, |
|
"global_step": 2656, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.15060240963855423, |
|
"grad_norm": 22.862253189086914, |
|
"learning_rate": 2.9470452801227934e-05, |
|
"loss": 0.3165, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.30120481927710846, |
|
"grad_norm": 15.997178077697754, |
|
"learning_rate": 2.831926323867997e-05, |
|
"loss": 0.174, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.30120481927710846, |
|
"eval_accuracy": 0.9440361786319955, |
|
"eval_f1": 0.9440350340667568, |
|
"eval_loss": 0.13831785321235657, |
|
"eval_matthews_correlation": 0.8884885605641855, |
|
"eval_precision": 0.9443041112978641, |
|
"eval_recall": 0.9441844573233038, |
|
"eval_runtime": 1.5317, |
|
"eval_samples_per_second": 3464.713, |
|
"eval_steps_per_second": 54.187, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.45180722891566266, |
|
"grad_norm": 9.916468620300293, |
|
"learning_rate": 2.7168073676132003e-05, |
|
"loss": 0.1727, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.6024096385542169, |
|
"grad_norm": 8.223430633544922, |
|
"learning_rate": 2.6016884113584036e-05, |
|
"loss": 0.1306, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.6024096385542169, |
|
"eval_accuracy": 0.9470510646316186, |
|
"eval_f1": 0.9469116079894582, |
|
"eval_loss": 0.1320875585079193, |
|
"eval_matthews_correlation": 0.8970111970995432, |
|
"eval_precision": 0.9503975774077025, |
|
"eval_recall": 0.946621567301194, |
|
"eval_runtime": 1.5336, |
|
"eval_samples_per_second": 3460.585, |
|
"eval_steps_per_second": 54.123, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.7530120481927711, |
|
"grad_norm": 8.465655326843262, |
|
"learning_rate": 2.4865694551036073e-05, |
|
"loss": 0.1449, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.9036144578313253, |
|
"grad_norm": 8.037919998168945, |
|
"learning_rate": 2.3714504988488106e-05, |
|
"loss": 0.1268, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.9036144578313253, |
|
"eval_accuracy": 0.9502543810062182, |
|
"eval_f1": 0.9502384353255368, |
|
"eval_loss": 0.1223142221570015, |
|
"eval_matthews_correlation": 0.9019972975694879, |
|
"eval_precision": 0.9514557669338246, |
|
"eval_recall": 0.9505419934872148, |
|
"eval_runtime": 1.5394, |
|
"eval_samples_per_second": 3447.515, |
|
"eval_steps_per_second": 53.918, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.0542168674698795, |
|
"grad_norm": 5.252899646759033, |
|
"learning_rate": 2.256331542594014e-05, |
|
"loss": 0.0967, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.2048192771084336, |
|
"grad_norm": 2.8755605220794678, |
|
"learning_rate": 2.1412125863392172e-05, |
|
"loss": 0.0793, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.2048192771084336, |
|
"eval_accuracy": 0.9534576973808178, |
|
"eval_f1": 0.9534544978603143, |
|
"eval_loss": 0.12600094079971313, |
|
"eval_matthews_correlation": 0.9075729094229542, |
|
"eval_precision": 0.9539269039717451, |
|
"eval_recall": 0.9536460489075334, |
|
"eval_runtime": 1.546, |
|
"eval_samples_per_second": 3432.697, |
|
"eval_steps_per_second": 53.686, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 1.355421686746988, |
|
"grad_norm": 15.925186157226562, |
|
"learning_rate": 2.026093630084421e-05, |
|
"loss": 0.0632, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 1.5060240963855422, |
|
"grad_norm": 5.883889198303223, |
|
"learning_rate": 1.9109746738296238e-05, |
|
"loss": 0.0664, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.5060240963855422, |
|
"eval_accuracy": 0.9634445072545694, |
|
"eval_f1": 0.9634296411014179, |
|
"eval_loss": 0.11086247861385345, |
|
"eval_matthews_correlation": 0.9270444420166869, |
|
"eval_precision": 0.9637054919711341, |
|
"eval_recall": 0.9633390224799887, |
|
"eval_runtime": 1.5519, |
|
"eval_samples_per_second": 3419.769, |
|
"eval_steps_per_second": 53.484, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 1.6566265060240963, |
|
"grad_norm": 9.317992210388184, |
|
"learning_rate": 1.7958557175748274e-05, |
|
"loss": 0.0651, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1.8072289156626506, |
|
"grad_norm": 7.534521102905273, |
|
"learning_rate": 1.6807367613200308e-05, |
|
"loss": 0.0644, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.8072289156626506, |
|
"eval_accuracy": 0.9570378745053703, |
|
"eval_f1": 0.9570372017874856, |
|
"eval_loss": 0.14527226984500885, |
|
"eval_matthews_correlation": 0.9144670396513508, |
|
"eval_precision": 0.9572853548448508, |
|
"eval_recall": 0.9571816906821948, |
|
"eval_runtime": 1.5434, |
|
"eval_samples_per_second": 3438.416, |
|
"eval_steps_per_second": 53.776, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 1.9578313253012047, |
|
"grad_norm": 7.254329681396484, |
|
"learning_rate": 1.565617805065234e-05, |
|
"loss": 0.0635, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 2.108433734939759, |
|
"grad_norm": 6.914297580718994, |
|
"learning_rate": 1.4504988488104375e-05, |
|
"loss": 0.0374, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 2.108433734939759, |
|
"eval_accuracy": 0.9628792161296401, |
|
"eval_f1": 0.9628401830979684, |
|
"eval_loss": 0.18788941204547882, |
|
"eval_matthews_correlation": 0.9266187165348982, |
|
"eval_precision": 0.9639729425818305, |
|
"eval_recall": 0.9626467230259594, |
|
"eval_runtime": 1.5439, |
|
"eval_samples_per_second": 3437.375, |
|
"eval_steps_per_second": 53.76, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 2.2590361445783134, |
|
"grad_norm": 0.7644485235214233, |
|
"learning_rate": 1.3353798925556408e-05, |
|
"loss": 0.0279, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 2.4096385542168672, |
|
"grad_norm": 8.384675979614258, |
|
"learning_rate": 1.2202609363008443e-05, |
|
"loss": 0.0164, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 2.4096385542168672, |
|
"eval_accuracy": 0.9657056717542868, |
|
"eval_f1": 0.9656756314944064, |
|
"eval_loss": 0.16530963778495789, |
|
"eval_matthews_correlation": 0.9320662060488338, |
|
"eval_precision": 0.9665640323608415, |
|
"eval_recall": 0.9655027778622123, |
|
"eval_runtime": 1.5424, |
|
"eval_samples_per_second": 3440.766, |
|
"eval_steps_per_second": 53.813, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 2.5602409638554215, |
|
"grad_norm": 18.41074562072754, |
|
"learning_rate": 1.1051419800460476e-05, |
|
"loss": 0.0179, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 2.710843373493976, |
|
"grad_norm": 0.21532420814037323, |
|
"learning_rate": 9.90023023791251e-06, |
|
"loss": 0.0251, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 2.710843373493976, |
|
"eval_accuracy": 0.9591106086301112, |
|
"eval_f1": 0.95910930195127, |
|
"eval_loss": 0.22635453939437866, |
|
"eval_matthews_correlation": 0.9187108433086283, |
|
"eval_precision": 0.9594386335644487, |
|
"eval_recall": 0.9592722248152282, |
|
"eval_runtime": 1.5425, |
|
"eval_samples_per_second": 3440.442, |
|
"eval_steps_per_second": 53.808, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 2.86144578313253, |
|
"grad_norm": 5.3087263107299805, |
|
"learning_rate": 8.749040675364544e-06, |
|
"loss": 0.0224, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 3.0120481927710845, |
|
"grad_norm": 0.07524475455284119, |
|
"learning_rate": 7.597851112816577e-06, |
|
"loss": 0.0179, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 3.0120481927710845, |
|
"eval_accuracy": 0.9692858488788393, |
|
"eval_f1": 0.9692716697192839, |
|
"eval_loss": 0.17637819051742554, |
|
"eval_matthews_correlation": 0.9387831838919006, |
|
"eval_precision": 0.9696159708142937, |
|
"eval_recall": 0.9691673202840899, |
|
"eval_runtime": 1.5414, |
|
"eval_samples_per_second": 3442.9, |
|
"eval_steps_per_second": 53.846, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 3.1626506024096384, |
|
"grad_norm": 0.002716244664043188, |
|
"learning_rate": 6.446661550268611e-06, |
|
"loss": 0.0032, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 3.3132530120481927, |
|
"grad_norm": 0.0046457210555672646, |
|
"learning_rate": 5.295471987720644e-06, |
|
"loss": 0.0037, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 3.3132530120481927, |
|
"eval_accuracy": 0.9677784058790277, |
|
"eval_f1": 0.9677761908263713, |
|
"eval_loss": 0.21041572093963623, |
|
"eval_matthews_correlation": 0.9355606983847059, |
|
"eval_precision": 0.9677642707707517, |
|
"eval_recall": 0.9677964281666167, |
|
"eval_runtime": 1.5409, |
|
"eval_samples_per_second": 3444.045, |
|
"eval_steps_per_second": 53.864, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 3.463855421686747, |
|
"grad_norm": 7.352254390716553, |
|
"learning_rate": 4.144282425172678e-06, |
|
"loss": 0.0021, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 3.6144578313253013, |
|
"grad_norm": 0.1295173019170761, |
|
"learning_rate": 2.993092862624712e-06, |
|
"loss": 0.0014, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 3.6144578313253013, |
|
"eval_accuracy": 0.9683436970039571, |
|
"eval_f1": 0.9683377061336159, |
|
"eval_loss": 0.22055356204509735, |
|
"eval_matthews_correlation": 0.936697700228757, |
|
"eval_precision": 0.9683925624578065, |
|
"eval_recall": 0.9683051418503684, |
|
"eval_runtime": 1.5412, |
|
"eval_samples_per_second": 3443.434, |
|
"eval_steps_per_second": 53.854, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 3.765060240963855, |
|
"grad_norm": 0.0010891439160332084, |
|
"learning_rate": 1.8419033000767458e-06, |
|
"loss": 0.0035, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 3.9156626506024095, |
|
"grad_norm": 0.010914706625044346, |
|
"learning_rate": 6.907137375287798e-07, |
|
"loss": 0.0026, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 3.9156626506024095, |
|
"eval_accuracy": 0.9690974185038628, |
|
"eval_f1": 0.9690902179184598, |
|
"eval_loss": 0.21992520987987518, |
|
"eval_matthews_correlation": 0.938226978614608, |
|
"eval_precision": 0.9691836691199455, |
|
"eval_recall": 0.9690433199920572, |
|
"eval_runtime": 1.5378, |
|
"eval_samples_per_second": 3450.927, |
|
"eval_steps_per_second": 53.972, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"step": 2656, |
|
"total_flos": 8308929553956864.0, |
|
"train_loss": 0.06583031266366682, |
|
"train_runtime": 193.8482, |
|
"train_samples_per_second": 875.984, |
|
"train_steps_per_second": 13.701 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 2656, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 200, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 8308929553956864.0, |
|
"train_batch_size": 64, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|