idefics3-8b-qlora / checkpoint-250 /trainer_state.json
Tchalla12's picture
Upload folder using huggingface_hub
6957921 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"eval_steps": 500,
"global_step": 250,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.4,
"grad_norm": 0.5595075488090515,
"learning_rate": 0.000482,
"loss": 0.4307,
"step": 10
},
{
"epoch": 0.8,
"grad_norm": 0.566727340221405,
"learning_rate": 0.000462,
"loss": 0.0524,
"step": 20
},
{
"epoch": 1.2,
"grad_norm": 0.2078760862350464,
"learning_rate": 0.000442,
"loss": 0.026,
"step": 30
},
{
"epoch": 1.6,
"grad_norm": 0.25045984983444214,
"learning_rate": 0.000422,
"loss": 0.0261,
"step": 40
},
{
"epoch": 2.0,
"grad_norm": 0.20396892726421356,
"learning_rate": 0.000402,
"loss": 0.0199,
"step": 50
},
{
"epoch": 2.4,
"grad_norm": 0.08542954921722412,
"learning_rate": 0.000382,
"loss": 0.028,
"step": 60
},
{
"epoch": 2.8,
"grad_norm": 0.17754589021205902,
"learning_rate": 0.000362,
"loss": 0.0154,
"step": 70
},
{
"epoch": 3.2,
"grad_norm": 0.10964138805866241,
"learning_rate": 0.000342,
"loss": 0.0158,
"step": 80
},
{
"epoch": 3.6,
"grad_norm": 1.3268883228302002,
"learning_rate": 0.000322,
"loss": 0.0156,
"step": 90
},
{
"epoch": 4.0,
"grad_norm": 0.16379483044147491,
"learning_rate": 0.000302,
"loss": 0.0157,
"step": 100
},
{
"epoch": 4.4,
"grad_norm": 0.1214568242430687,
"learning_rate": 0.00028199999999999997,
"loss": 0.0125,
"step": 110
},
{
"epoch": 4.8,
"grad_norm": 0.13405731320381165,
"learning_rate": 0.000262,
"loss": 0.0129,
"step": 120
},
{
"epoch": 5.2,
"grad_norm": 0.016486013308167458,
"learning_rate": 0.000242,
"loss": 0.0125,
"step": 130
},
{
"epoch": 5.6,
"grad_norm": 1.5530146360397339,
"learning_rate": 0.000222,
"loss": 0.0117,
"step": 140
},
{
"epoch": 6.0,
"grad_norm": 0.1322641223669052,
"learning_rate": 0.000202,
"loss": 0.0137,
"step": 150
},
{
"epoch": 6.4,
"grad_norm": 0.15241874754428864,
"learning_rate": 0.000182,
"loss": 0.0108,
"step": 160
},
{
"epoch": 6.8,
"grad_norm": 0.05570691078901291,
"learning_rate": 0.000162,
"loss": 0.0108,
"step": 170
},
{
"epoch": 7.2,
"grad_norm": 0.059169329702854156,
"learning_rate": 0.00014199999999999998,
"loss": 0.0113,
"step": 180
},
{
"epoch": 7.6,
"grad_norm": 0.0516737699508667,
"learning_rate": 0.000122,
"loss": 0.0093,
"step": 190
},
{
"epoch": 8.0,
"grad_norm": 0.22752004861831665,
"learning_rate": 0.000102,
"loss": 0.0097,
"step": 200
},
{
"epoch": 8.4,
"grad_norm": 0.28640028834342957,
"learning_rate": 8.2e-05,
"loss": 0.0092,
"step": 210
},
{
"epoch": 8.8,
"grad_norm": 0.053172893822193146,
"learning_rate": 6.2e-05,
"loss": 0.0087,
"step": 220
},
{
"epoch": 9.2,
"grad_norm": 0.0015004288870841265,
"learning_rate": 4.2000000000000004e-05,
"loss": 0.0091,
"step": 230
},
{
"epoch": 9.6,
"grad_norm": 0.0008665521745570004,
"learning_rate": 2.2e-05,
"loss": 0.0087,
"step": 240
},
{
"epoch": 10.0,
"grad_norm": 0.002254044869914651,
"learning_rate": 2e-06,
"loss": 0.0088,
"step": 250
}
],
"logging_steps": 10,
"max_steps": 250,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.1558177088783744e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}