flip_A3-ptwpu8i3k5 / trainer_state.json
LegrandFrederic's picture
Upload trainer_state.json with huggingface_hub
4ced584 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.0,
"eval_steps": 500,
"global_step": 1535,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03257328990228013,
"grad_norm": 9.89926528930664,
"learning_rate": 2.5974025974025972e-05,
"loss": 2.0334,
"step": 10
},
{
"epoch": 0.06514657980456026,
"grad_norm": 7.768723964691162,
"learning_rate": 5.1948051948051944e-05,
"loss": 0.5571,
"step": 20
},
{
"epoch": 0.09771986970684039,
"grad_norm": 1.5563685894012451,
"learning_rate": 7.792207792207793e-05,
"loss": 0.1878,
"step": 30
},
{
"epoch": 0.13029315960912052,
"grad_norm": 1.674676537513733,
"learning_rate": 0.00010389610389610389,
"loss": 0.1257,
"step": 40
},
{
"epoch": 0.16286644951140064,
"grad_norm": 1.1463338136672974,
"learning_rate": 0.00012987012987012987,
"loss": 0.1011,
"step": 50
},
{
"epoch": 0.19543973941368079,
"grad_norm": 1.0946776866912842,
"learning_rate": 0.00015584415584415587,
"loss": 0.0972,
"step": 60
},
{
"epoch": 0.2280130293159609,
"grad_norm": 0.7055681347846985,
"learning_rate": 0.00018181818181818183,
"loss": 0.0724,
"step": 70
},
{
"epoch": 0.26058631921824105,
"grad_norm": 0.51561439037323,
"learning_rate": 0.0001999979107245606,
"loss": 0.0641,
"step": 80
},
{
"epoch": 0.2931596091205212,
"grad_norm": 1.2206898927688599,
"learning_rate": 0.0001999607704786645,
"loss": 0.0727,
"step": 90
},
{
"epoch": 0.3257328990228013,
"grad_norm": 0.7886886596679688,
"learning_rate": 0.00019987722173728587,
"loss": 0.0735,
"step": 100
},
{
"epoch": 0.3583061889250814,
"grad_norm": 0.4768007695674896,
"learning_rate": 0.00019974730328935534,
"loss": 0.0582,
"step": 110
},
{
"epoch": 0.39087947882736157,
"grad_norm": 0.5169155597686768,
"learning_rate": 0.00019957107545173192,
"loss": 0.0593,
"step": 120
},
{
"epoch": 0.4234527687296417,
"grad_norm": 0.359069287776947,
"learning_rate": 0.00019934862004120004,
"loss": 0.0358,
"step": 130
},
{
"epoch": 0.4560260586319218,
"grad_norm": 0.6432995200157166,
"learning_rate": 0.00019908004033648453,
"loss": 0.0382,
"step": 140
},
{
"epoch": 0.48859934853420195,
"grad_norm": 1.358920693397522,
"learning_rate": 0.00019876546103030196,
"loss": 0.0519,
"step": 150
},
{
"epoch": 0.5211726384364821,
"grad_norm": 0.8731968402862549,
"learning_rate": 0.00019840502817146966,
"loss": 0.0582,
"step": 160
},
{
"epoch": 0.5537459283387622,
"grad_norm": 0.8558807969093323,
"learning_rate": 0.00019799890909710013,
"loss": 0.0383,
"step": 170
},
{
"epoch": 0.5863192182410424,
"grad_norm": 0.5326903462409973,
"learning_rate": 0.00019754729235491207,
"loss": 0.0326,
"step": 180
},
{
"epoch": 0.6188925081433225,
"grad_norm": 0.434563547372818,
"learning_rate": 0.0001970503876156937,
"loss": 0.0351,
"step": 190
},
{
"epoch": 0.6514657980456026,
"grad_norm": 0.778796374797821,
"learning_rate": 0.00019650842557595967,
"loss": 0.0362,
"step": 200
},
{
"epoch": 0.6840390879478827,
"grad_norm": 0.49492743611335754,
"learning_rate": 0.00019592165785084603,
"loss": 0.0363,
"step": 210
},
{
"epoch": 0.7166123778501629,
"grad_norm": 0.640010416507721,
"learning_rate": 0.00019529035685729391,
"loss": 0.0403,
"step": 220
},
{
"epoch": 0.749185667752443,
"grad_norm": 0.573532223701477,
"learning_rate": 0.00019461481568757506,
"loss": 0.0394,
"step": 230
},
{
"epoch": 0.7817589576547231,
"grad_norm": 0.3103967308998108,
"learning_rate": 0.00019389534797321884,
"loss": 0.0318,
"step": 240
},
{
"epoch": 0.8143322475570033,
"grad_norm": 0.7902340292930603,
"learning_rate": 0.00019313228773940345,
"loss": 0.0355,
"step": 250
},
{
"epoch": 0.8469055374592834,
"grad_norm": 0.38804200291633606,
"learning_rate": 0.00019232598924987903,
"loss": 0.0353,
"step": 260
},
{
"epoch": 0.8794788273615635,
"grad_norm": 0.540195643901825,
"learning_rate": 0.0001914768268424946,
"loss": 0.0322,
"step": 270
},
{
"epoch": 0.9120521172638436,
"grad_norm": 0.6464592218399048,
"learning_rate": 0.00019058519475540538,
"loss": 0.0369,
"step": 280
},
{
"epoch": 0.9446254071661238,
"grad_norm": 0.36654534935951233,
"learning_rate": 0.00018965150694404094,
"loss": 0.0258,
"step": 290
},
{
"epoch": 0.9771986970684039,
"grad_norm": 0.6431544423103333,
"learning_rate": 0.00018867619688891937,
"loss": 0.0317,
"step": 300
},
{
"epoch": 1.009771986970684,
"grad_norm": 0.47796347737312317,
"learning_rate": 0.0001876597173943965,
"loss": 0.0256,
"step": 310
},
{
"epoch": 1.0423452768729642,
"grad_norm": 0.6077843904495239,
"learning_rate": 0.00018660254037844388,
"loss": 0.0299,
"step": 320
},
{
"epoch": 1.0749185667752443,
"grad_norm": 0.5036475658416748,
"learning_rate": 0.00018550515665355247,
"loss": 0.0341,
"step": 330
},
{
"epoch": 1.1074918566775245,
"grad_norm": 0.7570111751556396,
"learning_rate": 0.000184368075698865,
"loss": 0.0259,
"step": 340
},
{
"epoch": 1.1400651465798046,
"grad_norm": 0.3445906341075897,
"learning_rate": 0.00018319182542364117,
"loss": 0.0253,
"step": 350
},
{
"epoch": 1.1726384364820848,
"grad_norm": 0.43921205401420593,
"learning_rate": 0.00018197695192216702,
"loss": 0.027,
"step": 360
},
{
"epoch": 1.205211726384365,
"grad_norm": 0.390203595161438,
"learning_rate": 0.00018072401922022117,
"loss": 0.0196,
"step": 370
},
{
"epoch": 1.237785016286645,
"grad_norm": 0.3478577733039856,
"learning_rate": 0.0001794336090132164,
"loss": 0.0321,
"step": 380
},
{
"epoch": 1.2703583061889252,
"grad_norm": 0.6172159910202026,
"learning_rate": 0.00017810632039613736,
"loss": 0.0398,
"step": 390
},
{
"epoch": 1.3029315960912053,
"grad_norm": 0.6841723322868347,
"learning_rate": 0.00017674276958540072,
"loss": 0.0339,
"step": 400
},
{
"epoch": 1.3355048859934853,
"grad_norm": 0.26006555557250977,
"learning_rate": 0.00017534358963276607,
"loss": 0.0293,
"step": 410
},
{
"epoch": 1.3680781758957654,
"grad_norm": 0.4724225699901581,
"learning_rate": 0.00017390943013143083,
"loss": 0.0346,
"step": 420
},
{
"epoch": 1.4006514657980456,
"grad_norm": 0.660692572593689,
"learning_rate": 0.0001724409569144455,
"loss": 0.0299,
"step": 430
},
{
"epoch": 1.4332247557003257,
"grad_norm": 0.5522527098655701,
"learning_rate": 0.0001709388517455893,
"loss": 0.0355,
"step": 440
},
{
"epoch": 1.4657980456026058,
"grad_norm": 0.8123102188110352,
"learning_rate": 0.00016940381200284972,
"loss": 0.0303,
"step": 450
},
{
"epoch": 1.498371335504886,
"grad_norm": 1.0083576440811157,
"learning_rate": 0.0001678365503546528,
"loss": 0.0279,
"step": 460
},
{
"epoch": 1.5309446254071661,
"grad_norm": 0.4633665680885315,
"learning_rate": 0.0001662377944289948,
"loss": 0.0299,
"step": 470
},
{
"epoch": 1.5635179153094463,
"grad_norm": 0.42941364645957947,
"learning_rate": 0.0001646082864756282,
"loss": 0.0218,
"step": 480
},
{
"epoch": 1.5960912052117264,
"grad_norm": 0.2960715591907501,
"learning_rate": 0.00016294878302145987,
"loss": 0.025,
"step": 490
},
{
"epoch": 1.6286644951140063,
"grad_norm": 0.4294084310531616,
"learning_rate": 0.0001612600545193203,
"loss": 0.0175,
"step": 500
},
{
"epoch": 1.6612377850162865,
"grad_norm": 0.5202115178108215,
"learning_rate": 0.00015954288499026782,
"loss": 0.0249,
"step": 510
},
{
"epoch": 1.6938110749185666,
"grad_norm": 0.3897187113761902,
"learning_rate": 0.0001577980716595934,
"loss": 0.0178,
"step": 520
},
{
"epoch": 1.7263843648208468,
"grad_norm": 0.33642974495887756,
"learning_rate": 0.00015602642458669528,
"loss": 0.0259,
"step": 530
},
{
"epoch": 1.758957654723127,
"grad_norm": 0.37540072202682495,
"learning_rate": 0.0001542287662889948,
"loss": 0.0261,
"step": 540
},
{
"epoch": 1.791530944625407,
"grad_norm": 0.36353737115859985,
"learning_rate": 0.00015240593136006897,
"loss": 0.0194,
"step": 550
},
{
"epoch": 1.8241042345276872,
"grad_norm": 0.29128432273864746,
"learning_rate": 0.0001505587660821759,
"loss": 0.0201,
"step": 560
},
{
"epoch": 1.8566775244299674,
"grad_norm": 0.5382766723632812,
"learning_rate": 0.0001486881280333539,
"loss": 0.0259,
"step": 570
},
{
"epoch": 1.8892508143322475,
"grad_norm": 0.3132719397544861,
"learning_rate": 0.00014679488568927616,
"loss": 0.0246,
"step": 580
},
{
"epoch": 1.9218241042345277,
"grad_norm": 0.27731725573539734,
"learning_rate": 0.00014487991802004623,
"loss": 0.0176,
"step": 590
},
{
"epoch": 1.9543973941368078,
"grad_norm": 0.45612412691116333,
"learning_rate": 0.0001429441140821209,
"loss": 0.0218,
"step": 600
},
{
"epoch": 1.986970684039088,
"grad_norm": 0.36944088339805603,
"learning_rate": 0.00014098837260555084,
"loss": 0.023,
"step": 610
},
{
"epoch": 2.019543973941368,
"grad_norm": 0.33426433801651,
"learning_rate": 0.0001390136015767295,
"loss": 0.0288,
"step": 620
},
{
"epoch": 2.0521172638436482,
"grad_norm": 0.4996889531612396,
"learning_rate": 0.00013702071781684517,
"loss": 0.0171,
"step": 630
},
{
"epoch": 2.0846905537459284,
"grad_norm": 0.34143733978271484,
"learning_rate": 0.00013501064655623094,
"loss": 0.0171,
"step": 640
},
{
"epoch": 2.1172638436482085,
"grad_norm": 0.33777856826782227,
"learning_rate": 0.00013298432100481079,
"loss": 0.0189,
"step": 650
},
{
"epoch": 2.1498371335504887,
"grad_norm": 0.6728042960166931,
"learning_rate": 0.0001309426819188409,
"loss": 0.0243,
"step": 660
},
{
"epoch": 2.182410423452769,
"grad_norm": 0.4677506685256958,
"learning_rate": 0.0001288866771641474,
"loss": 0.0129,
"step": 670
},
{
"epoch": 2.214983713355049,
"grad_norm": 0.48820221424102783,
"learning_rate": 0.00012681726127606376,
"loss": 0.0276,
"step": 680
},
{
"epoch": 2.247557003257329,
"grad_norm": 0.3881731927394867,
"learning_rate": 0.000124735395016271,
"loss": 0.0158,
"step": 690
},
{
"epoch": 2.2801302931596092,
"grad_norm": 0.27123430371284485,
"learning_rate": 0.00012264204492674815,
"loss": 0.0167,
"step": 700
},
{
"epoch": 2.3127035830618894,
"grad_norm": 0.2786751091480255,
"learning_rate": 0.0001205381828810382,
"loss": 0.0194,
"step": 710
},
{
"epoch": 2.3452768729641695,
"grad_norm": 0.23343567550182343,
"learning_rate": 0.00011842478563303952,
"loss": 0.0155,
"step": 720
},
{
"epoch": 2.3778501628664497,
"grad_norm": 0.22697576880455017,
"learning_rate": 0.00011630283436353098,
"loss": 0.0185,
"step": 730
},
{
"epoch": 2.41042345276873,
"grad_norm": 0.38111135363578796,
"learning_rate": 0.00011417331422464205,
"loss": 0.0151,
"step": 740
},
{
"epoch": 2.44299674267101,
"grad_norm": 0.3761187493801117,
"learning_rate": 0.00011203721388247923,
"loss": 0.0185,
"step": 750
},
{
"epoch": 2.47557003257329,
"grad_norm": 0.29886382818222046,
"learning_rate": 0.00010989552505812072,
"loss": 0.0093,
"step": 760
},
{
"epoch": 2.5081433224755703,
"grad_norm": 0.308753103017807,
"learning_rate": 0.0001077492420671931,
"loss": 0.0182,
"step": 770
},
{
"epoch": 2.5407166123778504,
"grad_norm": 0.26204925775527954,
"learning_rate": 0.00010559936135824322,
"loss": 0.0192,
"step": 780
},
{
"epoch": 2.5732899022801305,
"grad_norm": 0.433326780796051,
"learning_rate": 0.00010344688105012005,
"loss": 0.0154,
"step": 790
},
{
"epoch": 2.6058631921824107,
"grad_norm": 0.24838528037071228,
"learning_rate": 0.00010129280046858086,
"loss": 0.0175,
"step": 800
},
{
"epoch": 2.6384364820846904,
"grad_norm": 0.2929915487766266,
"learning_rate": 9.913811968233716e-05,
"loss": 0.0129,
"step": 810
},
{
"epoch": 2.6710097719869705,
"grad_norm": 0.30677530169487,
"learning_rate": 9.69838390387558e-05,
"loss": 0.0315,
"step": 820
},
{
"epoch": 2.7035830618892507,
"grad_norm": 0.34507474303245544,
"learning_rate": 9.483095869943055e-05,
"loss": 0.0151,
"step": 830
},
{
"epoch": 2.736156351791531,
"grad_norm": 0.2926924526691437,
"learning_rate": 9.268047817583998e-05,
"loss": 0.0112,
"step": 840
},
{
"epoch": 2.768729641693811,
"grad_norm": 0.3242110311985016,
"learning_rate": 9.053339586530723e-05,
"loss": 0.0133,
"step": 850
},
{
"epoch": 2.801302931596091,
"grad_norm": 0.274239718914032,
"learning_rate": 8.839070858747697e-05,
"loss": 0.02,
"step": 860
},
{
"epoch": 2.8338762214983713,
"grad_norm": 0.186782106757164,
"learning_rate": 8.625341112152487e-05,
"loss": 0.0121,
"step": 870
},
{
"epoch": 2.8664495114006514,
"grad_norm": 0.1265726238489151,
"learning_rate": 8.412249574431428e-05,
"loss": 0.0134,
"step": 880
},
{
"epoch": 2.8990228013029316,
"grad_norm": 0.3958815634250641,
"learning_rate": 8.199895176971488e-05,
"loss": 0.0113,
"step": 890
},
{
"epoch": 2.9315960912052117,
"grad_norm": 0.19703464210033417,
"learning_rate": 7.988376508929676e-05,
"loss": 0.0144,
"step": 900
},
{
"epoch": 2.964169381107492,
"grad_norm": 0.2459426373243332,
"learning_rate": 7.777791771461332e-05,
"loss": 0.0138,
"step": 910
},
{
"epoch": 2.996742671009772,
"grad_norm": 0.2412751019001007,
"learning_rate": 7.568238732128585e-05,
"loss": 0.0227,
"step": 920
},
{
"epoch": 3.029315960912052,
"grad_norm": 0.38352710008621216,
"learning_rate": 7.359814679510065e-05,
"loss": 0.0245,
"step": 930
},
{
"epoch": 3.0618892508143323,
"grad_norm": 0.33591127395629883,
"learning_rate": 7.152616378033042e-05,
"loss": 0.0091,
"step": 940
},
{
"epoch": 3.0944625407166124,
"grad_norm": 0.2634866237640381,
"learning_rate": 6.94674002304887e-05,
"loss": 0.01,
"step": 950
},
{
"epoch": 3.1270358306188926,
"grad_norm": 0.17260074615478516,
"learning_rate": 6.742281196172663e-05,
"loss": 0.0156,
"step": 960
},
{
"epoch": 3.1596091205211727,
"grad_norm": 0.20153683423995972,
"learning_rate": 6.539334820907888e-05,
"loss": 0.0091,
"step": 970
},
{
"epoch": 3.192182410423453,
"grad_norm": 0.4081243872642517,
"learning_rate": 6.337995118576521e-05,
"loss": 0.0101,
"step": 980
},
{
"epoch": 3.224755700325733,
"grad_norm": 0.24244199693202972,
"learning_rate": 6.138355564575169e-05,
"loss": 0.0103,
"step": 990
},
{
"epoch": 3.257328990228013,
"grad_norm": 0.17980413138866425,
"learning_rate": 5.940508844977537e-05,
"loss": 0.0111,
"step": 1000
},
{
"epoch": 3.2899022801302933,
"grad_norm": 0.13628683984279633,
"learning_rate": 5.744546813503328e-05,
"loss": 0.0115,
"step": 1010
},
{
"epoch": 3.3224755700325734,
"grad_norm": 0.408488005399704,
"learning_rate": 5.550560448873575e-05,
"loss": 0.0146,
"step": 1020
},
{
"epoch": 3.3550488599348536,
"grad_norm": 0.23400142788887024,
"learning_rate": 5.358639812572244e-05,
"loss": 0.0114,
"step": 1030
},
{
"epoch": 3.3876221498371337,
"grad_norm": 0.2922910749912262,
"learning_rate": 5.168874007033615e-05,
"loss": 0.0094,
"step": 1040
},
{
"epoch": 3.420195439739414,
"grad_norm": 0.20555023849010468,
"learning_rate": 4.9813511342749805e-05,
"loss": 0.0122,
"step": 1050
},
{
"epoch": 3.4527687296416936,
"grad_norm": 0.17685680091381073,
"learning_rate": 4.7961582549937675e-05,
"loss": 0.0182,
"step": 1060
},
{
"epoch": 3.4853420195439737,
"grad_norm": 0.14065317809581757,
"learning_rate": 4.6133813481481246e-05,
"loss": 0.0097,
"step": 1070
},
{
"epoch": 3.517915309446254,
"grad_norm": 0.34961211681365967,
"learning_rate": 4.433105271039721e-05,
"loss": 0.012,
"step": 1080
},
{
"epoch": 3.550488599348534,
"grad_norm": 0.38357603549957275,
"learning_rate": 4.255413719917294e-05,
"loss": 0.0102,
"step": 1090
},
{
"epoch": 3.583061889250814,
"grad_norm": 0.25049689412117004,
"learning_rate": 4.080389191119241e-05,
"loss": 0.0113,
"step": 1100
},
{
"epoch": 3.6156351791530943,
"grad_norm": 0.23103076219558716,
"learning_rate": 3.9081129427732774e-05,
"loss": 0.0096,
"step": 1110
},
{
"epoch": 3.6482084690553744,
"grad_norm": 0.5483614206314087,
"learning_rate": 3.7386649570709644e-05,
"loss": 0.0113,
"step": 1120
},
{
"epoch": 3.6807817589576546,
"grad_norm": 0.14837254583835602,
"learning_rate": 3.5721239031346066e-05,
"loss": 0.0078,
"step": 1130
},
{
"epoch": 3.7133550488599347,
"grad_norm": 0.1685313582420349,
"learning_rate": 3.408567100493787e-05,
"loss": 0.0098,
"step": 1140
},
{
"epoch": 3.745928338762215,
"grad_norm": 0.6023198366165161,
"learning_rate": 3.248070483188426e-05,
"loss": 0.0102,
"step": 1150
},
{
"epoch": 3.778501628664495,
"grad_norm": 0.11718238145112991,
"learning_rate": 3.090708564515124e-05,
"loss": 0.0065,
"step": 1160
},
{
"epoch": 3.811074918566775,
"grad_norm": 0.23708564043045044,
"learning_rate": 2.936554402433087e-05,
"loss": 0.0122,
"step": 1170
},
{
"epoch": 3.8436482084690553,
"grad_norm": 0.4252511262893677,
"learning_rate": 2.7856795656457257e-05,
"loss": 0.0098,
"step": 1180
},
{
"epoch": 3.8762214983713354,
"grad_norm": 0.3115089237689972,
"learning_rate": 2.6381541003736486e-05,
"loss": 0.0105,
"step": 1190
},
{
"epoch": 3.9087947882736156,
"grad_norm": 0.39385488629341125,
"learning_rate": 2.494046497834518e-05,
"loss": 0.0114,
"step": 1200
},
{
"epoch": 3.9413680781758957,
"grad_norm": 0.29438289999961853,
"learning_rate": 2.3534236624448302e-05,
"loss": 0.0148,
"step": 1210
},
{
"epoch": 3.973941368078176,
"grad_norm": 0.25224828720092773,
"learning_rate": 2.2163508807583998e-05,
"loss": 0.0114,
"step": 1220
},
{
"epoch": 4.006514657980456,
"grad_norm": 0.2634766697883606,
"learning_rate": 2.082891791155954e-05,
"loss": 0.0091,
"step": 1230
},
{
"epoch": 4.039087947882736,
"grad_norm": 0.4244247376918793,
"learning_rate": 1.9531083542999317e-05,
"loss": 0.012,
"step": 1240
},
{
"epoch": 4.071661237785016,
"grad_norm": 0.24010491371154785,
"learning_rate": 1.8270608243681953e-05,
"loss": 0.0078,
"step": 1250
},
{
"epoch": 4.1042345276872965,
"grad_norm": 0.2347911149263382,
"learning_rate": 1.7048077210799772e-05,
"loss": 0.0061,
"step": 1260
},
{
"epoch": 4.136807817589577,
"grad_norm": 0.16366265714168549,
"learning_rate": 1.5864058025271246e-05,
"loss": 0.0083,
"step": 1270
},
{
"epoch": 4.169381107491857,
"grad_norm": 0.12379926443099976,
"learning_rate": 1.47191003882317e-05,
"loss": 0.0116,
"step": 1280
},
{
"epoch": 4.201954397394137,
"grad_norm": 0.2692829370498657,
"learning_rate": 1.3613735865825305e-05,
"loss": 0.0084,
"step": 1290
},
{
"epoch": 4.234527687296417,
"grad_norm": 0.180028036236763,
"learning_rate": 1.2548477642416256e-05,
"loss": 0.0073,
"step": 1300
},
{
"epoch": 4.267100977198697,
"grad_norm": 0.1352684646844864,
"learning_rate": 1.1523820282334219e-05,
"loss": 0.0086,
"step": 1310
},
{
"epoch": 4.299674267100977,
"grad_norm": 0.2455337941646576,
"learning_rate": 1.0540239500264516e-05,
"loss": 0.0096,
"step": 1320
},
{
"epoch": 4.3322475570032575,
"grad_norm": 0.23969142138957977,
"learning_rate": 9.598191940389256e-06,
"loss": 0.013,
"step": 1330
},
{
"epoch": 4.364820846905538,
"grad_norm": 0.37968650460243225,
"learning_rate": 8.698114964382598e-06,
"loss": 0.0141,
"step": 1340
},
{
"epoch": 4.397394136807818,
"grad_norm": 0.07284022867679596,
"learning_rate": 7.840426448358085e-06,
"loss": 0.0069,
"step": 1350
},
{
"epoch": 4.429967426710098,
"grad_norm": 0.25828060507774353,
"learning_rate": 7.025524588862542e-06,
"loss": 0.0147,
"step": 1360
},
{
"epoch": 4.462540716612378,
"grad_norm": 0.1627136766910553,
"learning_rate": 6.253787718006498e-06,
"loss": 0.0102,
"step": 1370
},
{
"epoch": 4.495114006514658,
"grad_norm": 0.20821145176887512,
"learning_rate": 5.525574127817046e-06,
"loss": 0.0091,
"step": 1380
},
{
"epoch": 4.527687296416938,
"grad_norm": 0.1315605640411377,
"learning_rate": 4.841221903894633e-06,
"loss": 0.011,
"step": 1390
},
{
"epoch": 4.5602605863192185,
"grad_norm": 0.10593540966510773,
"learning_rate": 4.20104876845111e-06,
"loss": 0.0057,
"step": 1400
},
{
"epoch": 4.592833876221499,
"grad_norm": 0.20290839672088623,
"learning_rate": 3.605351932801693e-06,
"loss": 0.0151,
"step": 1410
},
{
"epoch": 4.625407166123779,
"grad_norm": 0.1976938247680664,
"learning_rate": 3.0544079593795573e-06,
"loss": 0.0119,
"step": 1420
},
{
"epoch": 4.657980456026059,
"grad_norm": 0.22302116453647614,
"learning_rate": 2.548472633337007e-06,
"loss": 0.008,
"step": 1430
},
{
"epoch": 4.690553745928339,
"grad_norm": 0.1248151957988739,
"learning_rate": 2.0877808437928637e-06,
"loss": 0.0078,
"step": 1440
},
{
"epoch": 4.723127035830619,
"grad_norm": 0.20839862525463104,
"learning_rate": 1.6725464747811447e-06,
"loss": 0.0132,
"step": 1450
},
{
"epoch": 4.755700325732899,
"grad_norm": 0.2530406713485718,
"learning_rate": 1.3029623059517493e-06,
"loss": 0.0075,
"step": 1460
},
{
"epoch": 4.7882736156351795,
"grad_norm": 0.11152759194374084,
"learning_rate": 9.791999230692629e-07,
"loss": 0.0064,
"step": 1470
},
{
"epoch": 4.82084690553746,
"grad_norm": 0.20677945017814636,
"learning_rate": 7.014096383512802e-07,
"loss": 0.0083,
"step": 1480
},
{
"epoch": 4.85342019543974,
"grad_norm": 0.186812624335289,
"learning_rate": 4.6972042068341714e-07,
"loss": 0.0074,
"step": 1490
},
{
"epoch": 4.88599348534202,
"grad_norm": 0.07393082976341248,
"learning_rate": 2.8423983574328295e-07,
"loss": 0.0075,
"step": 1500
},
{
"epoch": 4.918566775244299,
"grad_norm": 0.06852855533361435,
"learning_rate": 1.4505399606130621e-07,
"loss": 0.0073,
"step": 1510
},
{
"epoch": 4.95114006514658,
"grad_norm": 0.1197730079293251,
"learning_rate": 5.2227521041470216e-08,
"loss": 0.0057,
"step": 1520
},
{
"epoch": 4.9837133550488595,
"grad_norm": 0.2546689212322235,
"learning_rate": 5.803506960722072e-09,
"loss": 0.0097,
"step": 1530
},
{
"epoch": 5.0,
"step": 1535,
"total_flos": 5.19646304846448e+16,
"train_loss": 0.04019785498296205,
"train_runtime": 734.2052,
"train_samples_per_second": 33.451,
"train_steps_per_second": 2.091
}
],
"logging_steps": 10,
"max_steps": 1535,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.19646304846448e+16,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}