mistral-based-NIDS / checkpoint-39 /trainer_state.json
caffeinatedcherrychic's picture
Upload folder using huggingface_hub
d68a778 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.12,
"eval_steps": 4,
"global_step": 39,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.08,
"grad_norm": 102.28898620605469,
"learning_rate": 2e-05,
"loss": 6.6367,
"step": 1
},
{
"epoch": 0.08,
"eval_loss": 7.300913333892822,
"eval_runtime": 1.3523,
"eval_samples_per_second": 8.873,
"eval_steps_per_second": 4.437,
"step": 1
},
{
"epoch": 0.16,
"grad_norm": 103.4541015625,
"learning_rate": 4e-05,
"loss": 7.0616,
"step": 2
},
{
"epoch": 0.24,
"grad_norm": 67.47515869140625,
"learning_rate": 6e-05,
"loss": 4.686,
"step": 3
},
{
"epoch": 0.32,
"grad_norm": 72.36919403076172,
"learning_rate": 8e-05,
"loss": 2.3866,
"step": 4
},
{
"epoch": 0.32,
"eval_loss": 0.7137572169303894,
"eval_runtime": 1.3532,
"eval_samples_per_second": 8.868,
"eval_steps_per_second": 4.434,
"step": 4
},
{
"epoch": 0.4,
"grad_norm": 16.83085060119629,
"learning_rate": 0.0001,
"loss": 0.6844,
"step": 5
},
{
"epoch": 0.48,
"grad_norm": 25.897714614868164,
"learning_rate": 0.00012,
"loss": 0.914,
"step": 6
},
{
"epoch": 0.56,
"grad_norm": 18.89151382446289,
"learning_rate": 0.00014,
"loss": 0.63,
"step": 7
},
{
"epoch": 0.64,
"grad_norm": 27.15555763244629,
"learning_rate": 0.00016,
"loss": 0.948,
"step": 8
},
{
"epoch": 0.64,
"eval_loss": 1.0445994138717651,
"eval_runtime": 1.356,
"eval_samples_per_second": 8.85,
"eval_steps_per_second": 4.425,
"step": 8
},
{
"epoch": 0.72,
"grad_norm": 20.812381744384766,
"learning_rate": 0.00018,
"loss": 1.0285,
"step": 9
},
{
"epoch": 0.8,
"grad_norm": 56.3886604309082,
"learning_rate": 0.0002,
"loss": 1.3756,
"step": 10
},
{
"epoch": 0.88,
"grad_norm": 6.24803352355957,
"learning_rate": 0.00019981755542233177,
"loss": 0.5178,
"step": 11
},
{
"epoch": 0.96,
"grad_norm": 8.379430770874023,
"learning_rate": 0.0001992708874098054,
"loss": 0.6822,
"step": 12
},
{
"epoch": 0.96,
"eval_loss": 1.3959709405899048,
"eval_runtime": 1.3583,
"eval_samples_per_second": 8.835,
"eval_steps_per_second": 4.417,
"step": 12
},
{
"epoch": 1.04,
"grad_norm": 20.744348526000977,
"learning_rate": 0.00019836199069471437,
"loss": 1.3762,
"step": 13
},
{
"epoch": 1.12,
"grad_norm": 4.800480842590332,
"learning_rate": 0.0001970941817426052,
"loss": 0.5248,
"step": 14
},
{
"epoch": 1.2,
"grad_norm": 11.284302711486816,
"learning_rate": 0.00019547208665085457,
"loss": 0.8094,
"step": 15
},
{
"epoch": 1.28,
"grad_norm": 5.787976264953613,
"learning_rate": 0.0001935016242685415,
"loss": 0.5222,
"step": 16
},
{
"epoch": 1.28,
"eval_loss": 0.9023411870002747,
"eval_runtime": 1.3623,
"eval_samples_per_second": 8.808,
"eval_steps_per_second": 4.404,
"step": 16
},
{
"epoch": 1.36,
"grad_norm": 21.48629379272461,
"learning_rate": 0.00019118998459920902,
"loss": 0.8027,
"step": 17
},
{
"epoch": 1.44,
"grad_norm": 38.0982666015625,
"learning_rate": 0.000188545602565321,
"loss": 1.7772,
"step": 18
},
{
"epoch": 1.52,
"grad_norm": 10.824837684631348,
"learning_rate": 0.00018557812723014476,
"loss": 0.7737,
"step": 19
},
{
"epoch": 1.6,
"grad_norm": 9.1353120803833,
"learning_rate": 0.00018229838658936564,
"loss": 0.534,
"step": 20
},
{
"epoch": 1.6,
"eval_loss": 0.4847445785999298,
"eval_runtime": 1.3637,
"eval_samples_per_second": 8.799,
"eval_steps_per_second": 4.4,
"step": 20
},
{
"epoch": 1.68,
"grad_norm": 3.8411033153533936,
"learning_rate": 0.00017871834806090501,
"loss": 0.3201,
"step": 21
},
{
"epoch": 1.76,
"grad_norm": 23.888507843017578,
"learning_rate": 0.00017485107481711012,
"loss": 2.2541,
"step": 22
},
{
"epoch": 1.84,
"grad_norm": 8.5956392288208,
"learning_rate": 0.00017071067811865476,
"loss": 0.8177,
"step": 23
},
{
"epoch": 1.92,
"grad_norm": 3.825141191482544,
"learning_rate": 0.00016631226582407952,
"loss": 0.4624,
"step": 24
},
{
"epoch": 1.92,
"eval_loss": 0.5740255117416382,
"eval_runtime": 1.3655,
"eval_samples_per_second": 8.788,
"eval_steps_per_second": 4.394,
"step": 24
},
{
"epoch": 2.0,
"grad_norm": 3.558993101119995,
"learning_rate": 0.00016167188726285434,
"loss": 0.3714,
"step": 25
},
{
"epoch": 2.08,
"grad_norm": 11.759211540222168,
"learning_rate": 0.00015680647467311557,
"loss": 0.6562,
"step": 26
},
{
"epoch": 2.16,
"grad_norm": 96.2179183959961,
"learning_rate": 0.00015173378141776568,
"loss": 1.5141,
"step": 27
},
{
"epoch": 2.24,
"grad_norm": 31.022045135498047,
"learning_rate": 0.00014647231720437686,
"loss": 0.7753,
"step": 28
},
{
"epoch": 2.24,
"eval_loss": 0.3771994113922119,
"eval_runtime": 1.3676,
"eval_samples_per_second": 8.775,
"eval_steps_per_second": 4.387,
"step": 28
},
{
"epoch": 2.32,
"grad_norm": 3.5004501342773438,
"learning_rate": 0.0001410412805452757,
"loss": 0.2649,
"step": 29
},
{
"epoch": 2.4,
"grad_norm": 5.16464376449585,
"learning_rate": 0.00013546048870425356,
"loss": 0.171,
"step": 30
},
{
"epoch": 2.48,
"grad_norm": 25.634010314941406,
"learning_rate": 0.00012975030538552032,
"loss": 0.9172,
"step": 31
},
{
"epoch": 2.56,
"grad_norm": 7.102908134460449,
"learning_rate": 0.0001239315664287558,
"loss": 0.3324,
"step": 32
},
{
"epoch": 2.56,
"eval_loss": 0.29374203085899353,
"eval_runtime": 1.3678,
"eval_samples_per_second": 8.773,
"eval_steps_per_second": 4.387,
"step": 32
},
{
"epoch": 2.64,
"grad_norm": 6.236325263977051,
"learning_rate": 0.0001180255037813906,
"loss": 0.4932,
"step": 33
},
{
"epoch": 2.72,
"grad_norm": 4.445058345794678,
"learning_rate": 0.0001120536680255323,
"loss": 0.1284,
"step": 34
},
{
"epoch": 2.8,
"grad_norm": 6.94170618057251,
"learning_rate": 0.00010603784974222861,
"loss": 0.1547,
"step": 35
},
{
"epoch": 2.88,
"grad_norm": 5.656033039093018,
"learning_rate": 0.0001,
"loss": 0.1973,
"step": 36
},
{
"epoch": 2.88,
"eval_loss": 0.5674905180931091,
"eval_runtime": 1.3681,
"eval_samples_per_second": 8.771,
"eval_steps_per_second": 4.386,
"step": 36
},
{
"epoch": 2.96,
"grad_norm": 18.19667625427246,
"learning_rate": 9.396215025777139e-05,
"loss": 0.4884,
"step": 37
},
{
"epoch": 3.04,
"grad_norm": 17.964893341064453,
"learning_rate": 8.79463319744677e-05,
"loss": 0.5526,
"step": 38
},
{
"epoch": 3.12,
"grad_norm": 5.015590190887451,
"learning_rate": 8.197449621860943e-05,
"loss": 0.2116,
"step": 39
}
],
"logging_steps": 1,
"max_steps": 62,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 13,
"total_flos": 3414704284237824.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}