so100_dataset50ep_a-v4bu1713s5 / trainer_state.json
LegrandFrederic's picture
Upload trainer_state.json with huggingface_hub
2385a3a verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.985611510791367,
"eval_steps": 500,
"global_step": 1386,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03597122302158273,
"grad_norm": 3.388711929321289,
"learning_rate": 2.857142857142857e-05,
"loss": 1.2192,
"step": 10
},
{
"epoch": 0.07194244604316546,
"grad_norm": 7.04016637802124,
"learning_rate": 5.714285714285714e-05,
"loss": 0.4355,
"step": 20
},
{
"epoch": 0.1079136690647482,
"grad_norm": 1.9987560510635376,
"learning_rate": 8.571428571428571e-05,
"loss": 0.2583,
"step": 30
},
{
"epoch": 0.14388489208633093,
"grad_norm": 1.4927663803100586,
"learning_rate": 0.00011428571428571428,
"loss": 0.1959,
"step": 40
},
{
"epoch": 0.17985611510791366,
"grad_norm": 2.1035594940185547,
"learning_rate": 0.00014285714285714287,
"loss": 0.158,
"step": 50
},
{
"epoch": 0.2158273381294964,
"grad_norm": 1.8321762084960938,
"learning_rate": 0.00017142857142857143,
"loss": 0.1593,
"step": 60
},
{
"epoch": 0.2517985611510791,
"grad_norm": 0.9496012330055237,
"learning_rate": 0.0002,
"loss": 0.1172,
"step": 70
},
{
"epoch": 0.28776978417266186,
"grad_norm": 1.5062251091003418,
"learning_rate": 0.0001999715070545774,
"loss": 0.1394,
"step": 80
},
{
"epoch": 0.3237410071942446,
"grad_norm": 1.1900173425674438,
"learning_rate": 0.00019988604445526827,
"loss": 0.1144,
"step": 90
},
{
"epoch": 0.3597122302158273,
"grad_norm": 1.6312843561172485,
"learning_rate": 0.0001997436609036963,
"loss": 0.0954,
"step": 100
},
{
"epoch": 0.39568345323741005,
"grad_norm": 0.7715893387794495,
"learning_rate": 0.00019954443753839667,
"loss": 0.1043,
"step": 110
},
{
"epoch": 0.4316546762589928,
"grad_norm": 0.9805018901824951,
"learning_rate": 0.00019928848788857887,
"loss": 0.107,
"step": 120
},
{
"epoch": 0.4676258992805755,
"grad_norm": 0.79938805103302,
"learning_rate": 0.00019897595780943102,
"loss": 0.0939,
"step": 130
},
{
"epoch": 0.5035971223021583,
"grad_norm": 0.7441235184669495,
"learning_rate": 0.00019860702539900287,
"loss": 0.0876,
"step": 140
},
{
"epoch": 0.539568345323741,
"grad_norm": 1.4985952377319336,
"learning_rate": 0.00019818190089671508,
"loss": 0.0998,
"step": 150
},
{
"epoch": 0.5755395683453237,
"grad_norm": 0.49447914958000183,
"learning_rate": 0.0001977008265635525,
"loss": 0.084,
"step": 160
},
{
"epoch": 0.6115107913669064,
"grad_norm": 0.9330535531044006,
"learning_rate": 0.00019716407654400952,
"loss": 0.0711,
"step": 170
},
{
"epoch": 0.6474820143884892,
"grad_norm": 0.71977299451828,
"learning_rate": 0.00019657195670986637,
"loss": 0.0649,
"step": 180
},
{
"epoch": 0.6834532374100719,
"grad_norm": 0.6885129809379578,
"learning_rate": 0.00019592480448588542,
"loss": 0.0734,
"step": 190
},
{
"epoch": 0.7194244604316546,
"grad_norm": 0.8220723271369934,
"learning_rate": 0.0001952229886575266,
"loss": 0.0692,
"step": 200
},
{
"epoch": 0.7553956834532374,
"grad_norm": 0.4193010926246643,
"learning_rate": 0.0001944669091607919,
"loss": 0.0634,
"step": 210
},
{
"epoch": 0.7913669064748201,
"grad_norm": 0.5557839274406433,
"learning_rate": 0.0001936569968543179,
"loss": 0.0617,
"step": 220
},
{
"epoch": 0.8273381294964028,
"grad_norm": 0.6012473702430725,
"learning_rate": 0.0001927937132738476,
"loss": 0.0567,
"step": 230
},
{
"epoch": 0.8633093525179856,
"grad_norm": 0.5065937042236328,
"learning_rate": 0.00019187755036921978,
"loss": 0.0666,
"step": 240
},
{
"epoch": 0.8992805755395683,
"grad_norm": 0.8122988343238831,
"learning_rate": 0.00019090903022402729,
"loss": 0.0621,
"step": 250
},
{
"epoch": 0.935251798561151,
"grad_norm": 0.600527822971344,
"learning_rate": 0.00018988870475810282,
"loss": 0.0577,
"step": 260
},
{
"epoch": 0.9712230215827338,
"grad_norm": 0.41501474380493164,
"learning_rate": 0.00018881715541300276,
"loss": 0.0585,
"step": 270
},
{
"epoch": 1.0071942446043165,
"grad_norm": 0.7007383108139038,
"learning_rate": 0.00018769499282066717,
"loss": 0.0727,
"step": 280
},
{
"epoch": 1.0431654676258992,
"grad_norm": 0.4356389045715332,
"learning_rate": 0.00018652285645544603,
"loss": 0.0576,
"step": 290
},
{
"epoch": 1.079136690647482,
"grad_norm": 0.5948470234870911,
"learning_rate": 0.00018530141426968902,
"loss": 0.0641,
"step": 300
},
{
"epoch": 1.1151079136690647,
"grad_norm": 0.6867853403091431,
"learning_rate": 0.00018403136231310684,
"loss": 0.0535,
"step": 310
},
{
"epoch": 1.1510791366906474,
"grad_norm": 0.5092623829841614,
"learning_rate": 0.00018271342433612113,
"loss": 0.05,
"step": 320
},
{
"epoch": 1.1870503597122302,
"grad_norm": 0.6697306632995605,
"learning_rate": 0.0001813483513774289,
"loss": 0.0527,
"step": 330
},
{
"epoch": 1.223021582733813,
"grad_norm": 0.5235440731048584,
"learning_rate": 0.0001799369213360163,
"loss": 0.0477,
"step": 340
},
{
"epoch": 1.2589928057553956,
"grad_norm": 0.40158113837242126,
"learning_rate": 0.0001784799385278661,
"loss": 0.0587,
"step": 350
},
{
"epoch": 1.2949640287769784,
"grad_norm": 0.36186593770980835,
"learning_rate": 0.000176978233227611,
"loss": 0.0523,
"step": 360
},
{
"epoch": 1.330935251798561,
"grad_norm": 0.4480399489402771,
"learning_rate": 0.00017543266119539422,
"loss": 0.0413,
"step": 370
},
{
"epoch": 1.3669064748201438,
"grad_norm": 0.5076740980148315,
"learning_rate": 0.00017384410318920697,
"loss": 0.0465,
"step": 380
},
{
"epoch": 1.4028776978417266,
"grad_norm": 0.8270596265792847,
"learning_rate": 0.0001722134644629807,
"loss": 0.056,
"step": 390
},
{
"epoch": 1.4388489208633093,
"grad_norm": 0.520577609539032,
"learning_rate": 0.00017054167425071995,
"loss": 0.0401,
"step": 400
},
{
"epoch": 1.474820143884892,
"grad_norm": 0.7387260794639587,
"learning_rate": 0.00016882968523697028,
"loss": 0.0444,
"step": 410
},
{
"epoch": 1.5107913669064748,
"grad_norm": 0.34361258149147034,
"learning_rate": 0.00016707847301392236,
"loss": 0.0573,
"step": 420
},
{
"epoch": 1.5467625899280577,
"grad_norm": 0.49156174063682556,
"learning_rate": 0.00016528903552546207,
"loss": 0.0499,
"step": 430
},
{
"epoch": 1.5827338129496402,
"grad_norm": 0.37369754910469055,
"learning_rate": 0.0001634623924984833,
"loss": 0.047,
"step": 440
},
{
"epoch": 1.6187050359712232,
"grad_norm": 0.4199368357658386,
"learning_rate": 0.0001615995848617876,
"loss": 0.0422,
"step": 450
},
{
"epoch": 1.6546762589928057,
"grad_norm": 0.40282875299453735,
"learning_rate": 0.0001597016741529014,
"loss": 0.0388,
"step": 460
},
{
"epoch": 1.6906474820143886,
"grad_norm": 0.7056338787078857,
"learning_rate": 0.0001577697419131497,
"loss": 0.0453,
"step": 470
},
{
"epoch": 1.7266187050359711,
"grad_norm": 0.512008786201477,
"learning_rate": 0.00015580488907132974,
"loss": 0.0419,
"step": 480
},
{
"epoch": 1.762589928057554,
"grad_norm": 0.41075655817985535,
"learning_rate": 0.00015380823531633729,
"loss": 0.0465,
"step": 490
},
{
"epoch": 1.7985611510791366,
"grad_norm": 0.48174160718917847,
"learning_rate": 0.0001517809184591017,
"loss": 0.0386,
"step": 500
},
{
"epoch": 1.8345323741007196,
"grad_norm": 0.5647067427635193,
"learning_rate": 0.0001497240937841944,
"loss": 0.0425,
"step": 510
},
{
"epoch": 1.870503597122302,
"grad_norm": 0.4203431308269501,
"learning_rate": 0.0001476389333914794,
"loss": 0.0377,
"step": 520
},
{
"epoch": 1.906474820143885,
"grad_norm": 0.5694783329963684,
"learning_rate": 0.0001455266255281821,
"loss": 0.0374,
"step": 530
},
{
"epoch": 1.9424460431654675,
"grad_norm": 0.6108015179634094,
"learning_rate": 0.00014338837391175582,
"loss": 0.0417,
"step": 540
},
{
"epoch": 1.9784172661870505,
"grad_norm": 0.43549835681915283,
"learning_rate": 0.00014122539704393265,
"loss": 0.0339,
"step": 550
},
{
"epoch": 2.014388489208633,
"grad_norm": 0.4553022086620331,
"learning_rate": 0.00013903892751634947,
"loss": 0.0423,
"step": 560
},
{
"epoch": 2.050359712230216,
"grad_norm": 0.48167282342910767,
"learning_rate": 0.0001368302113081447,
"loss": 0.0411,
"step": 570
},
{
"epoch": 2.0863309352517985,
"grad_norm": 0.5101397037506104,
"learning_rate": 0.0001346005070759258,
"loss": 0.0495,
"step": 580
},
{
"epoch": 2.1223021582733814,
"grad_norm": 0.47834643721580505,
"learning_rate": 0.00013235108543651272,
"loss": 0.0352,
"step": 590
},
{
"epoch": 2.158273381294964,
"grad_norm": 0.4055221676826477,
"learning_rate": 0.00013008322824286555,
"loss": 0.0321,
"step": 600
},
{
"epoch": 2.194244604316547,
"grad_norm": 0.4231523275375366,
"learning_rate": 0.00012779822785360912,
"loss": 0.0467,
"step": 610
},
{
"epoch": 2.2302158273381294,
"grad_norm": 0.6262776851654053,
"learning_rate": 0.00012549738639657115,
"loss": 0.0363,
"step": 620
},
{
"epoch": 2.2661870503597124,
"grad_norm": 0.5341355800628662,
"learning_rate": 0.00012318201502675285,
"loss": 0.0328,
"step": 630
},
{
"epoch": 2.302158273381295,
"grad_norm": 0.2906142473220825,
"learning_rate": 0.00012085343317915565,
"loss": 0.0316,
"step": 640
},
{
"epoch": 2.338129496402878,
"grad_norm": 0.3438158929347992,
"learning_rate": 0.00011851296781688952,
"loss": 0.0315,
"step": 650
},
{
"epoch": 2.3741007194244603,
"grad_norm": 0.43991851806640625,
"learning_rate": 0.00011616195267499102,
"loss": 0.031,
"step": 660
},
{
"epoch": 2.4100719424460433,
"grad_norm": 0.3710590600967407,
"learning_rate": 0.00011380172750038269,
"loss": 0.0252,
"step": 670
},
{
"epoch": 2.446043165467626,
"grad_norm": 0.4207635223865509,
"learning_rate": 0.00011143363728840625,
"loss": 0.0409,
"step": 680
},
{
"epoch": 2.4820143884892087,
"grad_norm": 0.48165836930274963,
"learning_rate": 0.00010905903151636501,
"loss": 0.0282,
"step": 690
},
{
"epoch": 2.5179856115107913,
"grad_norm": 0.45046257972717285,
"learning_rate": 0.00010667926337451217,
"loss": 0.0257,
"step": 700
},
{
"epoch": 2.553956834532374,
"grad_norm": 0.4864603877067566,
"learning_rate": 0.00010429568899492348,
"loss": 0.0322,
"step": 710
},
{
"epoch": 2.5899280575539567,
"grad_norm": 0.3659379184246063,
"learning_rate": 0.0001019096666786931,
"loss": 0.0271,
"step": 720
},
{
"epoch": 2.6258992805755397,
"grad_norm": 0.2604960799217224,
"learning_rate": 9.952255612189368e-05,
"loss": 0.0326,
"step": 730
},
{
"epoch": 2.661870503597122,
"grad_norm": 0.4740797281265259,
"learning_rate": 9.713571764074152e-05,
"loss": 0.0306,
"step": 740
},
{
"epoch": 2.697841726618705,
"grad_norm": 0.2611696124076843,
"learning_rate": 9.475051139640809e-05,
"loss": 0.0271,
"step": 750
},
{
"epoch": 2.7338129496402876,
"grad_norm": 0.3587149679660797,
"learning_rate": 9.236829661992023e-05,
"loss": 0.0251,
"step": 760
},
{
"epoch": 2.7697841726618706,
"grad_norm": 0.4013247489929199,
"learning_rate": 8.999043083759017e-05,
"loss": 0.0256,
"step": 770
},
{
"epoch": 2.805755395683453,
"grad_norm": 0.40374016761779785,
"learning_rate": 8.761826909741709e-05,
"loss": 0.032,
"step": 780
},
{
"epoch": 2.841726618705036,
"grad_norm": 0.2673892080783844,
"learning_rate": 8.525316319690092e-05,
"loss": 0.0201,
"step": 790
},
{
"epoch": 2.8776978417266186,
"grad_norm": 0.44146105647087097,
"learning_rate": 8.289646091270849e-05,
"loss": 0.0305,
"step": 800
},
{
"epoch": 2.9136690647482015,
"grad_norm": 0.33300504088401794,
"learning_rate": 8.054950523263096e-05,
"loss": 0.0219,
"step": 810
},
{
"epoch": 2.949640287769784,
"grad_norm": 0.26247546076774597,
"learning_rate": 7.821363359027048e-05,
"loss": 0.0359,
"step": 820
},
{
"epoch": 2.985611510791367,
"grad_norm": 0.5542314052581787,
"learning_rate": 7.589017710289139e-05,
"loss": 0.0286,
"step": 830
},
{
"epoch": 3.0215827338129495,
"grad_norm": 0.4509626030921936,
"learning_rate": 7.358045981287141e-05,
"loss": 0.0234,
"step": 840
},
{
"epoch": 3.0575539568345325,
"grad_norm": 0.2706989049911499,
"learning_rate": 7.128579793318428e-05,
"loss": 0.0225,
"step": 850
},
{
"epoch": 3.093525179856115,
"grad_norm": 0.21301843225955963,
"learning_rate": 6.900749909734406e-05,
"loss": 0.0274,
"step": 860
},
{
"epoch": 3.129496402877698,
"grad_norm": 0.32899874448776245,
"learning_rate": 6.674686161423843e-05,
"loss": 0.0234,
"step": 870
},
{
"epoch": 3.1654676258992804,
"grad_norm": 0.4368799924850464,
"learning_rate": 6.450517372827591e-05,
"loss": 0.0199,
"step": 880
},
{
"epoch": 3.2014388489208634,
"grad_norm": 0.36518239974975586,
"learning_rate": 6.22837128852683e-05,
"loss": 0.0218,
"step": 890
},
{
"epoch": 3.237410071942446,
"grad_norm": 0.20987823605537415,
"learning_rate": 6.008374500446676e-05,
"loss": 0.0217,
"step": 900
},
{
"epoch": 3.273381294964029,
"grad_norm": 0.23875588178634644,
"learning_rate": 5.790652375716652e-05,
"loss": 0.0237,
"step": 910
},
{
"epoch": 3.3093525179856114,
"grad_norm": 0.320413738489151,
"learning_rate": 5.575328985229098e-05,
"loss": 0.0279,
"step": 920
},
{
"epoch": 3.3453237410071943,
"grad_norm": 0.20259268581867218,
"learning_rate": 5.362527032936277e-05,
"loss": 0.0265,
"step": 930
},
{
"epoch": 3.381294964028777,
"grad_norm": 0.22162136435508728,
"learning_rate": 5.1523677859264516e-05,
"loss": 0.0256,
"step": 940
},
{
"epoch": 3.41726618705036,
"grad_norm": 0.15575292706489563,
"learning_rate": 4.944971005318716e-05,
"loss": 0.0194,
"step": 950
},
{
"epoch": 3.4532374100719423,
"grad_norm": 0.29202401638031006,
"learning_rate": 4.740454878016084e-05,
"loss": 0.0276,
"step": 960
},
{
"epoch": 3.4892086330935252,
"grad_norm": 0.40271732211112976,
"learning_rate": 4.538935949355623e-05,
"loss": 0.0183,
"step": 970
},
{
"epoch": 3.5251798561151078,
"grad_norm": 0.22273583710193634,
"learning_rate": 4.340529056694047e-05,
"loss": 0.02,
"step": 980
},
{
"epoch": 3.5611510791366907,
"grad_norm": 0.1683393120765686,
"learning_rate": 4.1453472639666457e-05,
"loss": 0.0177,
"step": 990
},
{
"epoch": 3.597122302158273,
"grad_norm": 0.4574311375617981,
"learning_rate": 3.9535017972567675e-05,
"loss": 0.0205,
"step": 1000
},
{
"epoch": 3.633093525179856,
"grad_norm": 0.31037506461143494,
"learning_rate": 3.7651019814126654e-05,
"loss": 0.0265,
"step": 1010
},
{
"epoch": 3.6690647482014387,
"grad_norm": 0.349869966506958,
"learning_rate": 3.5802551777477476e-05,
"loss": 0.0153,
"step": 1020
},
{
"epoch": 3.7050359712230216,
"grad_norm": 0.2690473794937134,
"learning_rate": 3.399066722859782e-05,
"loss": 0.0205,
"step": 1030
},
{
"epoch": 3.741007194244604,
"grad_norm": 0.23748761415481567,
"learning_rate": 3.2216398686038926e-05,
"loss": 0.0145,
"step": 1040
},
{
"epoch": 3.776978417266187,
"grad_norm": 0.24303750693798065,
"learning_rate": 3.0480757232535772e-05,
"loss": 0.0207,
"step": 1050
},
{
"epoch": 3.81294964028777,
"grad_norm": 0.1993425041437149,
"learning_rate": 2.8784731938832556e-05,
"loss": 0.0237,
"step": 1060
},
{
"epoch": 3.8489208633093526,
"grad_norm": 0.2566690444946289,
"learning_rate": 2.7129289300051787e-05,
"loss": 0.0194,
"step": 1070
},
{
"epoch": 3.884892086330935,
"grad_norm": 0.2915847599506378,
"learning_rate": 2.5515372684928683e-05,
"loss": 0.0204,
"step": 1080
},
{
"epoch": 3.920863309352518,
"grad_norm": 0.21342326700687408,
"learning_rate": 2.394390179822382e-05,
"loss": 0.0157,
"step": 1090
},
{
"epoch": 3.956834532374101,
"grad_norm": 0.23817431926727295,
"learning_rate": 2.2415772156621382e-05,
"loss": 0.0224,
"step": 1100
},
{
"epoch": 3.9928057553956835,
"grad_norm": 0.2008386105298996,
"learning_rate": 2.0931854578410905e-05,
"loss": 0.0128,
"step": 1110
},
{
"epoch": 4.028776978417266,
"grad_norm": 0.16650381684303284,
"learning_rate": 1.9492994687243714e-05,
"loss": 0.0144,
"step": 1120
},
{
"epoch": 4.0647482014388485,
"grad_norm": 0.20245954394340515,
"learning_rate": 1.8100012430246837e-05,
"loss": 0.0188,
"step": 1130
},
{
"epoch": 4.100719424460432,
"grad_norm": 0.21814769506454468,
"learning_rate": 1.6753701610768724e-05,
"loss": 0.0128,
"step": 1140
},
{
"epoch": 4.136690647482014,
"grad_norm": 0.10709039866924286,
"learning_rate": 1.545482943602341e-05,
"loss": 0.0191,
"step": 1150
},
{
"epoch": 4.172661870503597,
"grad_norm": 0.14134275913238525,
"learning_rate": 1.4204136079890584e-05,
"loss": 0.0124,
"step": 1160
},
{
"epoch": 4.2086330935251794,
"grad_norm": 0.33789917826652527,
"learning_rate": 1.300233426112103e-05,
"loss": 0.0152,
"step": 1170
},
{
"epoch": 4.244604316546763,
"grad_norm": 0.17584295570850372,
"learning_rate": 1.1850108837187335e-05,
"loss": 0.0162,
"step": 1180
},
{
"epoch": 4.280575539568345,
"grad_norm": 0.17790471017360687,
"learning_rate": 1.0748116414011888e-05,
"loss": 0.0189,
"step": 1190
},
{
"epoch": 4.316546762589928,
"grad_norm": 0.2450784593820572,
"learning_rate": 9.696984971794065e-06,
"loss": 0.0151,
"step": 1200
},
{
"epoch": 4.35251798561151,
"grad_norm": 0.4205743670463562,
"learning_rate": 8.697313507150184e-06,
"loss": 0.0228,
"step": 1210
},
{
"epoch": 4.388489208633094,
"grad_norm": 0.15447357296943665,
"learning_rate": 7.749671691769911e-06,
"loss": 0.0174,
"step": 1220
},
{
"epoch": 4.424460431654676,
"grad_norm": 0.1472749412059784,
"learning_rate": 6.854599547783736e-06,
"loss": 0.0191,
"step": 1230
},
{
"epoch": 4.460431654676259,
"grad_norm": 0.16755138337612152,
"learning_rate": 6.012607140026605e-06,
"loss": 0.0153,
"step": 1240
},
{
"epoch": 4.496402877697841,
"grad_norm": 0.16813568770885468,
"learning_rate": 5.224174285372974e-06,
"loss": 0.0181,
"step": 1250
},
{
"epoch": 4.532374100719425,
"grad_norm": 0.10466030985116959,
"learning_rate": 4.489750279308757e-06,
"loss": 0.0105,
"step": 1260
},
{
"epoch": 4.568345323741007,
"grad_norm": 0.1699487268924713,
"learning_rate": 3.8097536398963963e-06,
"loss": 0.0117,
"step": 1270
},
{
"epoch": 4.60431654676259,
"grad_norm": 0.19925230741500854,
"learning_rate": 3.184571869278574e-06,
"loss": 0.0155,
"step": 1280
},
{
"epoch": 4.640287769784173,
"grad_norm": 0.19352638721466064,
"learning_rate": 2.6145612328566717e-06,
"loss": 0.0118,
"step": 1290
},
{
"epoch": 4.676258992805756,
"grad_norm": 0.3062892556190491,
"learning_rate": 2.1000465562697856e-06,
"loss": 0.0145,
"step": 1300
},
{
"epoch": 4.712230215827338,
"grad_norm": 0.22001531720161438,
"learning_rate": 1.6413210402898893e-06,
"loss": 0.0134,
"step": 1310
},
{
"epoch": 4.748201438848921,
"grad_norm": 0.2228756546974182,
"learning_rate": 1.2386460937387822e-06,
"loss": 0.0106,
"step": 1320
},
{
"epoch": 4.784172661870503,
"grad_norm": 0.3254740834236145,
"learning_rate": 8.922511845219971e-07,
"loss": 0.0217,
"step": 1330
},
{
"epoch": 4.820143884892087,
"grad_norm": 0.23890721797943115,
"learning_rate": 6.023337088643665e-07,
"loss": 0.0217,
"step": 1340
},
{
"epoch": 4.856115107913669,
"grad_norm": 0.1692313402891159,
"learning_rate": 3.6905887882213717e-07,
"loss": 0.0171,
"step": 1350
},
{
"epoch": 4.892086330935252,
"grad_norm": 0.1608191728591919,
"learning_rate": 1.925596281353026e-07,
"loss": 0.0124,
"step": 1360
},
{
"epoch": 4.928057553956835,
"grad_norm": 0.1591498851776123,
"learning_rate": 7.293653647421073e-08,
"loss": 0.0223,
"step": 1370
},
{
"epoch": 4.9640287769784175,
"grad_norm": 0.16027827560901642,
"learning_rate": 1.0257772123312137e-08,
"loss": 0.0149,
"step": 1380
},
{
"epoch": 4.985611510791367,
"step": 1386,
"total_flos": 4.876041943922688e+16,
"train_loss": 0.05376770623662599,
"train_runtime": 634.0011,
"train_samples_per_second": 34.978,
"train_steps_per_second": 2.186
}
],
"logging_steps": 10,
"max_steps": 1386,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.876041943922688e+16,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}