|
{ |
|
"best_metric": 0.5273802876472473, |
|
"best_model_checkpoint": "output_pipe/prom_300_tata/origin/checkpoint-200", |
|
"epoch": 4.0, |
|
"eval_steps": 200, |
|
"global_step": 308, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.2987012987012987, |
|
"grad_norm": 16.239261627197266, |
|
"learning_rate": 2.4767441860465116e-05, |
|
"loss": 0.6103, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.5974025974025974, |
|
"grad_norm": 17.608713150024414, |
|
"learning_rate": 1.313953488372093e-05, |
|
"loss": 0.3571, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 2.5974025974025974, |
|
"eval_accuracy": 0.7748776508972267, |
|
"eval_f1": 0.7730506728478527, |
|
"eval_loss": 0.5273802876472473, |
|
"eval_matthews_correlation": 0.5791078000958941, |
|
"eval_precision": 0.7973376111673984, |
|
"eval_recall": 0.7819739511049428, |
|
"eval_runtime": 0.1894, |
|
"eval_samples_per_second": 3236.113, |
|
"eval_steps_per_second": 52.791, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 3.896103896103896, |
|
"grad_norm": 13.628073692321777, |
|
"learning_rate": 1.5116279069767443e-06, |
|
"loss": 0.1742, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"step": 308, |
|
"total_flos": 946505703112704.0, |
|
"train_loss": 0.37393945765185665, |
|
"train_runtime": 21.4272, |
|
"train_samples_per_second": 915.473, |
|
"train_steps_per_second": 14.374 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 308, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 200, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 946505703112704.0, |
|
"train_batch_size": 64, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|