layoutlmv3-finetune-bioes-new / trainer_state.json
parthesh111's picture
Upload 13 files
a154e14 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 20.0,
"eval_steps": 500,
"global_step": 820,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.24691358024691357,
"grad_norm": 3.9386653900146484,
"learning_rate": 4.945121951219512e-05,
"loss": 2.6292,
"step": 10
},
{
"epoch": 0.49382716049382713,
"grad_norm": 2.194047451019287,
"learning_rate": 4.884146341463415e-05,
"loss": 1.7588,
"step": 20
},
{
"epoch": 0.7407407407407407,
"grad_norm": 3.3464252948760986,
"learning_rate": 4.823170731707317e-05,
"loss": 1.2321,
"step": 30
},
{
"epoch": 0.9876543209876543,
"grad_norm": 4.9754157066345215,
"learning_rate": 4.76219512195122e-05,
"loss": 1.1094,
"step": 40
},
{
"epoch": 1.2222222222222223,
"grad_norm": 1.6857168674468994,
"learning_rate": 4.701219512195122e-05,
"loss": 0.8016,
"step": 50
},
{
"epoch": 1.4691358024691357,
"grad_norm": 1.711855173110962,
"learning_rate": 4.640243902439025e-05,
"loss": 0.6045,
"step": 60
},
{
"epoch": 1.7160493827160495,
"grad_norm": 3.110994338989258,
"learning_rate": 4.5792682926829275e-05,
"loss": 0.611,
"step": 70
},
{
"epoch": 1.9629629629629628,
"grad_norm": 2.2832303047180176,
"learning_rate": 4.5182926829268296e-05,
"loss": 0.4029,
"step": 80
},
{
"epoch": 2.197530864197531,
"grad_norm": 2.3041841983795166,
"learning_rate": 4.457317073170732e-05,
"loss": 0.4292,
"step": 90
},
{
"epoch": 2.4444444444444446,
"grad_norm": 1.5648462772369385,
"learning_rate": 4.3963414634146346e-05,
"loss": 0.3602,
"step": 100
},
{
"epoch": 2.691358024691358,
"grad_norm": 0.8374887704849243,
"learning_rate": 4.335365853658537e-05,
"loss": 0.309,
"step": 110
},
{
"epoch": 2.9382716049382713,
"grad_norm": 0.9409658908843994,
"learning_rate": 4.2743902439024395e-05,
"loss": 0.2612,
"step": 120
},
{
"epoch": 3.1728395061728394,
"grad_norm": 3.718695878982544,
"learning_rate": 4.2134146341463416e-05,
"loss": 0.2936,
"step": 130
},
{
"epoch": 3.419753086419753,
"grad_norm": 3.2449347972869873,
"learning_rate": 4.152439024390244e-05,
"loss": 0.1758,
"step": 140
},
{
"epoch": 3.6666666666666665,
"grad_norm": 1.2654057741165161,
"learning_rate": 4.0914634146341465e-05,
"loss": 0.2181,
"step": 150
},
{
"epoch": 3.9135802469135803,
"grad_norm": 0.7008330225944519,
"learning_rate": 4.030487804878049e-05,
"loss": 0.1788,
"step": 160
},
{
"epoch": 4.148148148148148,
"grad_norm": 0.927452802658081,
"learning_rate": 3.9695121951219514e-05,
"loss": 0.2255,
"step": 170
},
{
"epoch": 4.395061728395062,
"grad_norm": 0.9944292902946472,
"learning_rate": 3.908536585365854e-05,
"loss": 0.2719,
"step": 180
},
{
"epoch": 4.6419753086419755,
"grad_norm": 1.1106724739074707,
"learning_rate": 3.847560975609756e-05,
"loss": 0.1458,
"step": 190
},
{
"epoch": 4.888888888888889,
"grad_norm": 3.588369369506836,
"learning_rate": 3.786585365853659e-05,
"loss": 0.2225,
"step": 200
},
{
"epoch": 5.1234567901234565,
"grad_norm": 14.498685836791992,
"learning_rate": 3.725609756097561e-05,
"loss": 0.1737,
"step": 210
},
{
"epoch": 5.37037037037037,
"grad_norm": 15.858694076538086,
"learning_rate": 3.664634146341463e-05,
"loss": 0.1728,
"step": 220
},
{
"epoch": 5.617283950617284,
"grad_norm": 3.0714364051818848,
"learning_rate": 3.603658536585366e-05,
"loss": 0.0774,
"step": 230
},
{
"epoch": 5.864197530864198,
"grad_norm": 0.775265634059906,
"learning_rate": 3.542682926829268e-05,
"loss": 0.2107,
"step": 240
},
{
"epoch": 6.098765432098766,
"grad_norm": 1.262907862663269,
"learning_rate": 3.48170731707317e-05,
"loss": 0.1312,
"step": 250
},
{
"epoch": 6.345679012345679,
"grad_norm": 0.7409253120422363,
"learning_rate": 3.420731707317074e-05,
"loss": 0.1074,
"step": 260
},
{
"epoch": 6.592592592592593,
"grad_norm": 2.9899144172668457,
"learning_rate": 3.359756097560976e-05,
"loss": 0.2688,
"step": 270
},
{
"epoch": 6.839506172839506,
"grad_norm": 0.6718606352806091,
"learning_rate": 3.298780487804878e-05,
"loss": 0.0848,
"step": 280
},
{
"epoch": 7.074074074074074,
"grad_norm": 0.3312334716320038,
"learning_rate": 3.237804878048781e-05,
"loss": 0.0607,
"step": 290
},
{
"epoch": 7.320987654320987,
"grad_norm": 2.2608187198638916,
"learning_rate": 3.176829268292683e-05,
"loss": 0.1274,
"step": 300
},
{
"epoch": 7.567901234567901,
"grad_norm": 0.7812129855155945,
"learning_rate": 3.115853658536586e-05,
"loss": 0.0824,
"step": 310
},
{
"epoch": 7.814814814814815,
"grad_norm": 0.8673564195632935,
"learning_rate": 3.054878048780488e-05,
"loss": 0.1774,
"step": 320
},
{
"epoch": 8.049382716049383,
"grad_norm": 1.7585481405258179,
"learning_rate": 2.9939024390243903e-05,
"loss": 0.1415,
"step": 330
},
{
"epoch": 8.296296296296296,
"grad_norm": 2.8376216888427734,
"learning_rate": 2.9329268292682927e-05,
"loss": 0.1173,
"step": 340
},
{
"epoch": 8.54320987654321,
"grad_norm": 1.6730660200119019,
"learning_rate": 2.8719512195121952e-05,
"loss": 0.1585,
"step": 350
},
{
"epoch": 8.790123456790123,
"grad_norm": 0.30232733488082886,
"learning_rate": 2.810975609756098e-05,
"loss": 0.0377,
"step": 360
},
{
"epoch": 9.024691358024691,
"grad_norm": 2.1726369857788086,
"learning_rate": 2.7500000000000004e-05,
"loss": 0.0568,
"step": 370
},
{
"epoch": 9.271604938271604,
"grad_norm": 0.7406997084617615,
"learning_rate": 2.689024390243903e-05,
"loss": 0.105,
"step": 380
},
{
"epoch": 9.518518518518519,
"grad_norm": 0.2634631097316742,
"learning_rate": 2.628048780487805e-05,
"loss": 0.0328,
"step": 390
},
{
"epoch": 9.765432098765432,
"grad_norm": 0.15690064430236816,
"learning_rate": 2.5670731707317075e-05,
"loss": 0.069,
"step": 400
},
{
"epoch": 10.0,
"grad_norm": 0.09763717651367188,
"learning_rate": 2.50609756097561e-05,
"loss": 0.1503,
"step": 410
},
{
"epoch": 10.246913580246913,
"grad_norm": 0.9397181272506714,
"learning_rate": 2.4451219512195124e-05,
"loss": 0.1123,
"step": 420
},
{
"epoch": 10.493827160493828,
"grad_norm": 0.1911344975233078,
"learning_rate": 2.3841463414634148e-05,
"loss": 0.0439,
"step": 430
},
{
"epoch": 10.74074074074074,
"grad_norm": 2.133268117904663,
"learning_rate": 2.3231707317073173e-05,
"loss": 0.0466,
"step": 440
},
{
"epoch": 10.987654320987655,
"grad_norm": 3.681490898132324,
"learning_rate": 2.2621951219512197e-05,
"loss": 0.083,
"step": 450
},
{
"epoch": 11.222222222222221,
"grad_norm": 0.09520290791988373,
"learning_rate": 2.2012195121951222e-05,
"loss": 0.1275,
"step": 460
},
{
"epoch": 11.469135802469136,
"grad_norm": 0.18467915058135986,
"learning_rate": 2.1402439024390243e-05,
"loss": 0.0206,
"step": 470
},
{
"epoch": 11.716049382716049,
"grad_norm": 0.1692470908164978,
"learning_rate": 2.0792682926829267e-05,
"loss": 0.0694,
"step": 480
},
{
"epoch": 11.962962962962964,
"grad_norm": 0.21938467025756836,
"learning_rate": 2.0182926829268295e-05,
"loss": 0.1137,
"step": 490
},
{
"epoch": 12.197530864197532,
"grad_norm": 0.6872503161430359,
"learning_rate": 1.957317073170732e-05,
"loss": 0.0909,
"step": 500
},
{
"epoch": 12.444444444444445,
"grad_norm": 0.18933941423892975,
"learning_rate": 1.896341463414634e-05,
"loss": 0.0255,
"step": 510
},
{
"epoch": 12.691358024691358,
"grad_norm": 0.2120848298072815,
"learning_rate": 1.8353658536585365e-05,
"loss": 0.0383,
"step": 520
},
{
"epoch": 12.938271604938272,
"grad_norm": 0.12255409359931946,
"learning_rate": 1.774390243902439e-05,
"loss": 0.0941,
"step": 530
},
{
"epoch": 13.17283950617284,
"grad_norm": 0.10398901998996735,
"learning_rate": 1.7134146341463418e-05,
"loss": 0.0191,
"step": 540
},
{
"epoch": 13.419753086419753,
"grad_norm": 0.31278660893440247,
"learning_rate": 1.652439024390244e-05,
"loss": 0.0281,
"step": 550
},
{
"epoch": 13.666666666666666,
"grad_norm": 1.0805507898330688,
"learning_rate": 1.5914634146341464e-05,
"loss": 0.1726,
"step": 560
},
{
"epoch": 13.91358024691358,
"grad_norm": 0.4380980432033539,
"learning_rate": 1.5304878048780488e-05,
"loss": 0.0186,
"step": 570
},
{
"epoch": 14.148148148148149,
"grad_norm": 0.07801090180873871,
"learning_rate": 1.4695121951219513e-05,
"loss": 0.0473,
"step": 580
},
{
"epoch": 14.395061728395062,
"grad_norm": 0.10922332108020782,
"learning_rate": 1.4085365853658535e-05,
"loss": 0.023,
"step": 590
},
{
"epoch": 14.641975308641975,
"grad_norm": 0.2793682813644409,
"learning_rate": 1.3475609756097562e-05,
"loss": 0.0149,
"step": 600
},
{
"epoch": 14.88888888888889,
"grad_norm": 1.3744605779647827,
"learning_rate": 1.2865853658536586e-05,
"loss": 0.1422,
"step": 610
},
{
"epoch": 15.123456790123457,
"grad_norm": 0.1263941377401352,
"learning_rate": 1.225609756097561e-05,
"loss": 0.0124,
"step": 620
},
{
"epoch": 15.37037037037037,
"grad_norm": 0.13304823637008667,
"learning_rate": 1.1646341463414635e-05,
"loss": 0.0317,
"step": 630
},
{
"epoch": 15.617283950617283,
"grad_norm": 1.772745132446289,
"learning_rate": 1.103658536585366e-05,
"loss": 0.0443,
"step": 640
},
{
"epoch": 15.864197530864198,
"grad_norm": 0.06537698954343796,
"learning_rate": 1.0426829268292683e-05,
"loss": 0.0918,
"step": 650
},
{
"epoch": 16.098765432098766,
"grad_norm": 0.07521393895149231,
"learning_rate": 9.817073170731709e-06,
"loss": 0.0395,
"step": 660
},
{
"epoch": 16.34567901234568,
"grad_norm": 0.16562214493751526,
"learning_rate": 9.207317073170732e-06,
"loss": 0.1106,
"step": 670
},
{
"epoch": 16.59259259259259,
"grad_norm": 0.24475818872451782,
"learning_rate": 8.597560975609756e-06,
"loss": 0.019,
"step": 680
},
{
"epoch": 16.839506172839506,
"grad_norm": 0.32736074924468994,
"learning_rate": 7.98780487804878e-06,
"loss": 0.0326,
"step": 690
},
{
"epoch": 17.074074074074073,
"grad_norm": 0.05227570980787277,
"learning_rate": 7.378048780487805e-06,
"loss": 0.0103,
"step": 700
},
{
"epoch": 17.320987654320987,
"grad_norm": 0.09976907074451447,
"learning_rate": 6.76829268292683e-06,
"loss": 0.0864,
"step": 710
},
{
"epoch": 17.567901234567902,
"grad_norm": 0.04490213841199875,
"learning_rate": 6.1585365853658535e-06,
"loss": 0.0178,
"step": 720
},
{
"epoch": 17.814814814814813,
"grad_norm": 0.11479590833187103,
"learning_rate": 5.548780487804878e-06,
"loss": 0.0236,
"step": 730
},
{
"epoch": 18.049382716049383,
"grad_norm": 0.16496780514717102,
"learning_rate": 4.9390243902439025e-06,
"loss": 0.0481,
"step": 740
},
{
"epoch": 18.296296296296298,
"grad_norm": 6.131174564361572,
"learning_rate": 4.329268292682927e-06,
"loss": 0.0696,
"step": 750
},
{
"epoch": 18.54320987654321,
"grad_norm": 0.612872838973999,
"learning_rate": 3.719512195121951e-06,
"loss": 0.0194,
"step": 760
},
{
"epoch": 18.790123456790123,
"grad_norm": 1.3853808641433716,
"learning_rate": 3.1097560975609757e-06,
"loss": 0.037,
"step": 770
},
{
"epoch": 19.02469135802469,
"grad_norm": 0.06428851932287216,
"learning_rate": 2.5e-06,
"loss": 0.0269,
"step": 780
},
{
"epoch": 19.271604938271604,
"grad_norm": 0.15213467180728912,
"learning_rate": 1.8902439024390245e-06,
"loss": 0.017,
"step": 790
},
{
"epoch": 19.51851851851852,
"grad_norm": 2.0686116218566895,
"learning_rate": 1.2804878048780488e-06,
"loss": 0.088,
"step": 800
},
{
"epoch": 19.765432098765434,
"grad_norm": 0.05781777948141098,
"learning_rate": 6.707317073170731e-07,
"loss": 0.0367,
"step": 810
},
{
"epoch": 20.0,
"grad_norm": 0.05907975882291794,
"learning_rate": 6.097560975609757e-08,
"loss": 0.0093,
"step": 820
}
],
"logging_steps": 10,
"max_steps": 820,
"num_input_tokens_seen": 0,
"num_train_epochs": 20,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 430125247180800.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}