Text Classification
Transformers
Safetensors
English
llama
text-generation-inference
HallOumi-8B-classifier / trainer_state.json
Nessii013's picture
Upload trainer_state.json with huggingface_hub
7240e26 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 50,
"global_step": 320,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03125,
"grad_norm": 159.0,
"learning_rate": 1.8750000000000002e-05,
"loss": 1.1247,
"num_input_tokens_seen": 1465248,
"step": 10
},
{
"epoch": 0.0625,
"grad_norm": 65.0,
"learning_rate": 2.9987186375809513e-05,
"loss": 0.638,
"num_input_tokens_seen": 2893200,
"step": 20
},
{
"epoch": 0.09375,
"grad_norm": 10.0625,
"learning_rate": 2.984328439990804e-05,
"loss": 0.2887,
"num_input_tokens_seen": 4373616,
"step": 30
},
{
"epoch": 0.125,
"grad_norm": 14.875,
"learning_rate": 2.9541003989089956e-05,
"loss": 0.2392,
"num_input_tokens_seen": 5755248,
"step": 40
},
{
"epoch": 0.15625,
"grad_norm": 9.8125,
"learning_rate": 2.9083570487361445e-05,
"loss": 0.226,
"num_input_tokens_seen": 7213808,
"step": 50
},
{
"epoch": 0.1875,
"grad_norm": 19.0,
"learning_rate": 2.8475864728379682e-05,
"loss": 0.2489,
"num_input_tokens_seen": 8649584,
"step": 60
},
{
"epoch": 0.21875,
"grad_norm": 27.75,
"learning_rate": 2.7724370956763605e-05,
"loss": 0.2462,
"num_input_tokens_seen": 10054416,
"step": 70
},
{
"epoch": 0.25,
"grad_norm": 12.5,
"learning_rate": 2.6837107640945904e-05,
"loss": 0.2133,
"num_input_tokens_seen": 11521216,
"step": 80
},
{
"epoch": 0.28125,
"grad_norm": 8.625,
"learning_rate": 2.5823541915795932e-05,
"loss": 0.2035,
"num_input_tokens_seen": 12912576,
"step": 90
},
{
"epoch": 0.3125,
"grad_norm": 4.25,
"learning_rate": 2.469448856791411e-05,
"loss": 0.2226,
"num_input_tokens_seen": 14369728,
"step": 100
},
{
"epoch": 0.34375,
"grad_norm": 22.625,
"learning_rate": 2.3461994641428768e-05,
"loss": 0.2314,
"num_input_tokens_seen": 15796256,
"step": 110
},
{
"epoch": 0.375,
"grad_norm": 19.875,
"learning_rate": 2.2139210895556104e-05,
"loss": 0.2564,
"num_input_tokens_seen": 17240400,
"step": 120
},
{
"epoch": 0.40625,
"grad_norm": 7.78125,
"learning_rate": 2.074025148547635e-05,
"loss": 0.2442,
"num_input_tokens_seen": 18663216,
"step": 130
},
{
"epoch": 0.4375,
"grad_norm": 12.125,
"learning_rate": 1.928004336373658e-05,
"loss": 0.2115,
"num_input_tokens_seen": 20069056,
"step": 140
},
{
"epoch": 0.46875,
"grad_norm": 21.0,
"learning_rate": 1.777416700907338e-05,
"loss": 0.1656,
"num_input_tokens_seen": 21471248,
"step": 150
},
{
"epoch": 0.5,
"grad_norm": 10.6875,
"learning_rate": 1.623869018208499e-05,
"loss": 0.1906,
"num_input_tokens_seen": 22898704,
"step": 160
},
{
"epoch": 0.53125,
"grad_norm": 8.4375,
"learning_rate": 1.4689996481586688e-05,
"loss": 0.1883,
"num_input_tokens_seen": 24365264,
"step": 170
},
{
"epoch": 0.5625,
"grad_norm": 14.0,
"learning_rate": 1.3144610530959784e-05,
"loss": 0.2115,
"num_input_tokens_seen": 25870176,
"step": 180
},
{
"epoch": 0.59375,
"grad_norm": 16.25,
"learning_rate": 1.1619021659762912e-05,
"loss": 0.1644,
"num_input_tokens_seen": 27346880,
"step": 190
},
{
"epoch": 0.625,
"grad_norm": 8.375,
"learning_rate": 1.0129507961929749e-05,
"loss": 0.2046,
"num_input_tokens_seen": 28825968,
"step": 200
},
{
"epoch": 0.65625,
"grad_norm": 13.3125,
"learning_rate": 8.691962607859386e-06,
"loss": 0.1767,
"num_input_tokens_seen": 30278096,
"step": 210
},
{
"epoch": 0.6875,
"grad_norm": 6.96875,
"learning_rate": 7.321724263655989e-06,
"loss": 0.1739,
"num_input_tokens_seen": 31701248,
"step": 220
},
{
"epoch": 0.71875,
"grad_norm": 3.765625,
"learning_rate": 6.0334134269513865e-06,
"loss": 0.1648,
"num_input_tokens_seen": 33175280,
"step": 230
},
{
"epoch": 0.75,
"grad_norm": 37.5,
"learning_rate": 4.840776425613887e-06,
"loss": 0.1662,
"num_input_tokens_seen": 34680976,
"step": 240
},
{
"epoch": 0.78125,
"grad_norm": 6.3125,
"learning_rate": 3.756538743883111e-06,
"loss": 0.1867,
"num_input_tokens_seen": 36137360,
"step": 250
},
{
"epoch": 0.8125,
"grad_norm": 3.46875,
"learning_rate": 2.792269240947076e-06,
"loss": 0.1601,
"num_input_tokens_seen": 37577552,
"step": 260
},
{
"epoch": 0.84375,
"grad_norm": 6.0,
"learning_rate": 1.958256710754496e-06,
"loss": 0.1679,
"num_input_tokens_seen": 39032832,
"step": 270
},
{
"epoch": 0.875,
"grad_norm": 8.125,
"learning_rate": 1.2634001001741375e-06,
"loss": 0.1518,
"num_input_tokens_seen": 40447376,
"step": 280
},
{
"epoch": 0.90625,
"grad_norm": 5.96875,
"learning_rate": 7.151135568777839e-07,
"loss": 0.171,
"num_input_tokens_seen": 41890144,
"step": 290
},
{
"epoch": 0.9375,
"grad_norm": 17.5,
"learning_rate": 3.192473200896828e-07,
"loss": 0.1702,
"num_input_tokens_seen": 43291456,
"step": 300
},
{
"epoch": 0.96875,
"grad_norm": 20.5,
"learning_rate": 8.002529830136163e-08,
"loss": 0.1595,
"num_input_tokens_seen": 44726672,
"step": 310
},
{
"epoch": 1.0,
"grad_norm": 13.3125,
"learning_rate": 0.0,
"loss": 0.1722,
"num_input_tokens_seen": 46209888,
"step": 320
},
{
"epoch": 1.0,
"eval_cnn_balanced_accuracy": 0.5759533564450047,
"eval_cnn_f1_score": 0.2608695652173913,
"eval_cnn_loss": 0.31287845969200134,
"eval_cnn_pr_auc": 0.42626378805395937,
"eval_cnn_roc_auc": 0.7679027909094093,
"eval_cnn_runtime": 10.722,
"eval_cnn_samples_per_second": 52.043,
"eval_cnn_steps_per_second": 0.839,
"num_input_tokens_seen": 46209888,
"step": 320
},
{
"epoch": 1.0,
"eval_xsum_balanced_accuracy": 0.749277038750723,
"eval_xsum_f1_score": 0.7258382642998027,
"eval_xsum_loss": 0.5868125557899475,
"eval_xsum_pr_auc": 0.7976212402287128,
"eval_xsum_roc_auc": 0.8217916586337639,
"eval_xsum_runtime": 9.7581,
"eval_xsum_samples_per_second": 57.183,
"eval_xsum_steps_per_second": 0.922,
"num_input_tokens_seen": 46209888,
"step": 320
},
{
"epoch": 1.0,
"eval_medias_balanced_accuracy": 0.7074867769288893,
"eval_medias_f1_score": 0.5704225352112676,
"eval_medias_loss": 0.45626452565193176,
"eval_medias_pr_auc": 0.6584352779944264,
"eval_medias_roc_auc": 0.7960236336159854,
"eval_medias_runtime": 9.9504,
"eval_medias_samples_per_second": 72.962,
"eval_medias_steps_per_second": 1.206,
"num_input_tokens_seen": 46209888,
"step": 320
},
{
"epoch": 1.0,
"eval_meetb_balanced_accuracy": 0.7702572347266881,
"eval_meetb_f1_score": 0.6498194945848376,
"eval_meetb_loss": 0.316259503364563,
"eval_meetb_pr_auc": 0.7402648937803408,
"eval_meetb_roc_auc": 0.8713504823151126,
"eval_meetb_runtime": 10.6662,
"eval_meetb_samples_per_second": 72.378,
"eval_meetb_steps_per_second": 1.219,
"num_input_tokens_seen": 46209888,
"step": 320
},
{
"epoch": 1.0,
"eval_wice_balanced_accuracy": 0.8154247364773681,
"eval_wice_f1_score": 0.8679245283018868,
"eval_wice_loss": 0.4058681130409241,
"eval_wice_pr_auc": 0.9493679290266217,
"eval_wice_roc_auc": 0.8869679395995185,
"eval_wice_runtime": 123.4927,
"eval_wice_samples_per_second": 2.899,
"eval_wice_steps_per_second": 0.049,
"num_input_tokens_seen": 46209888,
"step": 320
},
{
"epoch": 1.0,
"eval_reveal_balanced_accuracy": 0.8941412213740458,
"eval_reveal_f1_score": 0.9196141479099679,
"eval_reveal_loss": 0.2713985741138458,
"eval_reveal_pr_auc": 0.9884053490966663,
"eval_reveal_roc_auc": 0.960442748091603,
"eval_reveal_runtime": 7.8148,
"eval_reveal_samples_per_second": 218.815,
"eval_reveal_steps_per_second": 3.455,
"num_input_tokens_seen": 46209888,
"step": 320
},
{
"epoch": 1.0,
"eval_claim_verify_balanced_accuracy": 0.7654793545023335,
"eval_claim_verify_f1_score": 0.6621160409556314,
"eval_claim_verify_loss": 0.47898775339126587,
"eval_claim_verify_pr_auc": 0.6755500302254687,
"eval_claim_verify_roc_auc": 0.8519738375912951,
"eval_claim_verify_runtime": 261.7647,
"eval_claim_verify_samples_per_second": 4.156,
"eval_claim_verify_steps_per_second": 0.065,
"num_input_tokens_seen": 46209888,
"step": 320
},
{
"epoch": 1.0,
"eval_fact_check_balanced_accuracy": 0.7718129805113534,
"eval_fact_check_f1_score": 0.9008264462809917,
"eval_fact_check_loss": 0.39186760783195496,
"eval_fact_check_pr_auc": 0.9477958353480682,
"eval_fact_check_roc_auc": 0.8752134364384051,
"eval_fact_check_runtime": 5.283,
"eval_fact_check_samples_per_second": 296.421,
"eval_fact_check_steps_per_second": 4.732,
"num_input_tokens_seen": 46209888,
"step": 320
},
{
"epoch": 1.0,
"eval_expertqa_balanced_accuracy": 0.5946286054753636,
"eval_expertqa_f1_score": 0.36883320281910725,
"eval_expertqa_loss": 1.1040500402450562,
"eval_expertqa_pr_auc": 0.2890662808738864,
"eval_expertqa_roc_auc": 0.6258950520788967,
"eval_expertqa_runtime": 536.4175,
"eval_expertqa_samples_per_second": 6.901,
"eval_expertqa_steps_per_second": 0.108,
"num_input_tokens_seen": 46209888,
"step": 320
},
{
"epoch": 1.0,
"eval_lfqa_balanced_accuracy": 0.8775737079235312,
"eval_lfqa_f1_score": 0.8557275541795665,
"eval_lfqa_loss": 0.27868813276290894,
"eval_lfqa_pr_auc": 0.9362703923849833,
"eval_lfqa_roc_auc": 0.9523769464424847,
"eval_lfqa_runtime": 12.7663,
"eval_lfqa_samples_per_second": 149.691,
"eval_lfqa_steps_per_second": 2.35,
"num_input_tokens_seen": 46209888,
"step": 320
},
{
"epoch": 1.0,
"eval_ragtruth_balanced_accuracy": 0.7859350741200968,
"eval_ragtruth_f1_score": 0.5078776645041705,
"eval_ragtruth_loss": 0.26025664806365967,
"eval_ragtruth_pr_auc": 0.4400226298715947,
"eval_ragtruth_roc_auc": 0.8972721245465168,
"eval_ragtruth_runtime": 149.8477,
"eval_ragtruth_samples_per_second": 109.251,
"eval_ragtruth_steps_per_second": 1.708,
"num_input_tokens_seen": 46209888,
"step": 320
},
{
"epoch": 1.0,
"eval_halloumi_synthetic_balanced_accuracy": 0.7681937112954458,
"eval_halloumi_synthetic_f1_score": 0.6775431861804223,
"eval_halloumi_synthetic_loss": 0.34176450967788696,
"eval_halloumi_synthetic_pr_auc": 0.7894696954251237,
"eval_halloumi_synthetic_roc_auc": 0.9044560224857751,
"eval_halloumi_synthetic_runtime": 38.8927,
"eval_halloumi_synthetic_samples_per_second": 53.712,
"eval_halloumi_synthetic_steps_per_second": 0.848,
"num_input_tokens_seen": 46209888,
"step": 320
},
{
"epoch": 1.0,
"num_input_tokens_seen": 46209888,
"step": 320,
"total_flos": 2.418947678713938e+17,
"train_loss": 0.2418923556804657,
"train_runtime": 3758.6466,
"train_samples_per_second": 10.887,
"train_steps_per_second": 0.085,
"train_tokens_per_second": 1545.744
}
],
"logging_steps": 10,
"max_steps": 320,
"num_input_tokens_seen": 46209888,
"num_train_epochs": 1,
"save_steps": 0,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.418947678713938e+17,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}