|
{ |
|
"best_metric": 0.8311111111111111, |
|
"best_model_checkpoint": "train_binary/05-13-2024_12:08:27/checkpoint-160", |
|
"epoch": 5.0, |
|
"eval_steps": 20, |
|
"global_step": 200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 7.1047492027282715, |
|
"learning_rate": 5e-05, |
|
"loss": 1.2022, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"eval_acc_product": 0.2339592488915767, |
|
"eval_loss": 0.6791220903396606, |
|
"eval_overall_acc": 0.7155555555555555, |
|
"eval_runtime": 4.3626, |
|
"eval_samples_per_second": 206.3, |
|
"eval_steps_per_second": 1.834, |
|
"eval_type_descriptive_acc": 0.5418502202643172, |
|
"eval_type_expository_acc": 0.9829059829059829, |
|
"eval_type_narrative_acc": 0.6559633027522935, |
|
"eval_type_persuasive_acc": 0.669683257918552, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 9.341121673583984, |
|
"learning_rate": 4.4444444444444447e-05, |
|
"loss": 0.634, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_acc_product": 0.374576132841691, |
|
"eval_loss": 0.5208084583282471, |
|
"eval_overall_acc": 0.79, |
|
"eval_runtime": 4.3469, |
|
"eval_samples_per_second": 207.046, |
|
"eval_steps_per_second": 1.84, |
|
"eval_type_descriptive_acc": 0.6519823788546255, |
|
"eval_type_expository_acc": 0.9358974358974359, |
|
"eval_type_narrative_acc": 0.8027522935779816, |
|
"eval_type_persuasive_acc": 0.7647058823529411, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"grad_norm": 12.583885192871094, |
|
"learning_rate": 3.888888888888889e-05, |
|
"loss": 0.4102, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.5, |
|
"eval_acc_product": 0.37668167775444333, |
|
"eval_loss": 0.5568819046020508, |
|
"eval_overall_acc": 0.8033333333333333, |
|
"eval_runtime": 4.3493, |
|
"eval_samples_per_second": 206.93, |
|
"eval_steps_per_second": 1.839, |
|
"eval_type_descriptive_acc": 0.5374449339207048, |
|
"eval_type_expository_acc": 0.9829059829059829, |
|
"eval_type_narrative_acc": 0.8165137614678899, |
|
"eval_type_persuasive_acc": 0.8733031674208145, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 18.182939529418945, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.3891, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_acc_product": 0.3805953902407894, |
|
"eval_loss": 0.528384804725647, |
|
"eval_overall_acc": 0.8, |
|
"eval_runtime": 4.3488, |
|
"eval_samples_per_second": 206.952, |
|
"eval_steps_per_second": 1.84, |
|
"eval_type_descriptive_acc": 0.6035242290748899, |
|
"eval_type_expository_acc": 0.9914529914529915, |
|
"eval_type_narrative_acc": 0.8623853211009175, |
|
"eval_type_persuasive_acc": 0.7375565610859729, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"grad_norm": 6.487391471862793, |
|
"learning_rate": 2.777777777777778e-05, |
|
"loss": 0.2423, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.5, |
|
"eval_acc_product": 0.3477393972726718, |
|
"eval_loss": 0.7446629405021667, |
|
"eval_overall_acc": 0.7888888888888889, |
|
"eval_runtime": 4.3484, |
|
"eval_samples_per_second": 206.972, |
|
"eval_steps_per_second": 1.84, |
|
"eval_type_descriptive_acc": 0.6607929515418502, |
|
"eval_type_expository_acc": 0.9358974358974359, |
|
"eval_type_narrative_acc": 0.5779816513761468, |
|
"eval_type_persuasive_acc": 0.9728506787330317, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": 10.529717445373535, |
|
"learning_rate": 2.2222222222222223e-05, |
|
"loss": 0.27, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_acc_product": 0.3687367678193366, |
|
"eval_loss": 0.5524727702140808, |
|
"eval_overall_acc": 0.8033333333333333, |
|
"eval_runtime": 4.361, |
|
"eval_samples_per_second": 206.376, |
|
"eval_steps_per_second": 1.834, |
|
"eval_type_descriptive_acc": 0.5154185022026432, |
|
"eval_type_expository_acc": 0.9700854700854701, |
|
"eval_type_narrative_acc": 0.7798165137614679, |
|
"eval_type_persuasive_acc": 0.9457013574660633, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"grad_norm": 8.022017478942871, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 0.116, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 3.5, |
|
"eval_acc_product": 0.42496128327762595, |
|
"eval_loss": 0.666808009147644, |
|
"eval_overall_acc": 0.8177777777777778, |
|
"eval_runtime": 4.3416, |
|
"eval_samples_per_second": 207.296, |
|
"eval_steps_per_second": 1.843, |
|
"eval_type_descriptive_acc": 0.6519823788546255, |
|
"eval_type_expository_acc": 0.9786324786324786, |
|
"eval_type_narrative_acc": 0.7706422018348624, |
|
"eval_type_persuasive_acc": 0.8642533936651584, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 11.408710479736328, |
|
"learning_rate": 1.1111111111111112e-05, |
|
"loss": 0.1078, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_acc_product": 0.4513722367715261, |
|
"eval_loss": 0.655240535736084, |
|
"eval_overall_acc": 0.8311111111111111, |
|
"eval_runtime": 4.3513, |
|
"eval_samples_per_second": 206.837, |
|
"eval_steps_per_second": 1.839, |
|
"eval_type_descriptive_acc": 0.6740088105726872, |
|
"eval_type_expository_acc": 0.9871794871794872, |
|
"eval_type_narrative_acc": 0.7385321100917431, |
|
"eval_type_persuasive_acc": 0.918552036199095, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"grad_norm": 8.358388900756836, |
|
"learning_rate": 5.555555555555556e-06, |
|
"loss": 0.0381, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 4.5, |
|
"eval_acc_product": 0.4348685552569168, |
|
"eval_loss": 0.7252246141433716, |
|
"eval_overall_acc": 0.8222222222222222, |
|
"eval_runtime": 4.3445, |
|
"eval_samples_per_second": 207.158, |
|
"eval_steps_per_second": 1.841, |
|
"eval_type_descriptive_acc": 0.7224669603524229, |
|
"eval_type_expository_acc": 0.9871794871794872, |
|
"eval_type_narrative_acc": 0.7018348623853211, |
|
"eval_type_persuasive_acc": 0.8687782805429864, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 8.94113540649414, |
|
"learning_rate": 0.0, |
|
"loss": 0.0417, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_acc_product": 0.45515444892534596, |
|
"eval_loss": 0.7260475754737854, |
|
"eval_overall_acc": 0.83, |
|
"eval_runtime": 4.34, |
|
"eval_samples_per_second": 207.373, |
|
"eval_steps_per_second": 1.843, |
|
"eval_type_descriptive_acc": 0.7268722466960352, |
|
"eval_type_expository_acc": 0.9871794871794872, |
|
"eval_type_narrative_acc": 0.7339449541284404, |
|
"eval_type_persuasive_acc": 0.8642533936651584, |
|
"step": 200 |
|
} |
|
], |
|
"logging_steps": 20, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 20, |
|
"total_flos": 5941102588416000.0, |
|
"train_batch_size": 128, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|