Theoreticallyhugo's picture
Training in progress, epoch 1, checkpoint
dc1fed1 verified
{
"best_metric": 5.741369932366069e-06,
"best_model_checkpoint": "insuff_supported_arguments/checkpoint-4628",
"epoch": 1.0,
"eval_steps": 500,
"global_step": 4628,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.11,
"grad_norm": 0.03457513824105263,
"learning_rate": 1.9279746470757707e-05,
"loss": 0.1457,
"step": 500
},
{
"epoch": 0.22,
"grad_norm": 0.0055348570458590984,
"learning_rate": 1.8559492941515416e-05,
"loss": 0.0066,
"step": 1000
},
{
"epoch": 0.32,
"grad_norm": 0.002291068434715271,
"learning_rate": 1.783923941227312e-05,
"loss": 0.0014,
"step": 1500
},
{
"epoch": 0.43,
"grad_norm": 0.003473889548331499,
"learning_rate": 1.7118985883030827e-05,
"loss": 0.0114,
"step": 2000
},
{
"epoch": 0.54,
"grad_norm": 0.0016763019375503063,
"learning_rate": 1.6398732353788535e-05,
"loss": 0.0031,
"step": 2500
},
{
"epoch": 0.65,
"grad_norm": 0.0010241195559501648,
"learning_rate": 1.567847882454624e-05,
"loss": 0.0012,
"step": 3000
},
{
"epoch": 0.76,
"grad_norm": 0.00029286000062711537,
"learning_rate": 1.4958225295303948e-05,
"loss": 0.0,
"step": 3500
},
{
"epoch": 0.86,
"grad_norm": 0.0003028396749868989,
"learning_rate": 1.4237971766061655e-05,
"loss": 0.0,
"step": 4000
},
{
"epoch": 0.97,
"grad_norm": 0.0002893279306590557,
"learning_rate": 1.3517718236819362e-05,
"loss": 0.0,
"step": 4500
},
{
"epoch": 1.0,
"eval_NEGATIVE": {
"f1-score": 1.0,
"precision": 1.0,
"recall": 1.0,
"support": 13620.0
},
"eval_POSITIVE": {
"f1-score": 1.0,
"precision": 1.0,
"recall": 1.0,
"support": 6960.0
},
"eval_accuracy": 1.0,
"eval_loss": 5.741369932366069e-06,
"eval_macro avg": {
"f1-score": 1.0,
"precision": 1.0,
"recall": 1.0,
"support": 20580.0
},
"eval_runtime": 11.5238,
"eval_samples_per_second": 1785.875,
"eval_steps_per_second": 111.682,
"eval_weighted avg": {
"f1-score": 1.0,
"precision": 1.0,
"recall": 1.0,
"support": 20580.0
},
"step": 4628
}
],
"logging_steps": 500,
"max_steps": 13884,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"total_flos": 2984335256486880.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}