flip_A2-4td7vdit48 / trainer_state.json
LegrandFrederic's picture
Upload trainer_state.json with huggingface_hub
1b3484a verified
raw
history blame
27.5 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.0,
"eval_steps": 500,
"global_step": 1535,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03257328990228013,
"grad_norm": 10.164939880371094,
"learning_rate": 2.5974025974025972e-05,
"loss": 3.0074,
"step": 10
},
{
"epoch": 0.06514657980456026,
"grad_norm": 11.759410858154297,
"learning_rate": 5.1948051948051944e-05,
"loss": 1.1608,
"step": 20
},
{
"epoch": 0.09771986970684039,
"grad_norm": 2.336061716079712,
"learning_rate": 7.792207792207793e-05,
"loss": 0.8385,
"step": 30
},
{
"epoch": 0.13029315960912052,
"grad_norm": 1.3015629053115845,
"learning_rate": 0.00010389610389610389,
"loss": 0.2802,
"step": 40
},
{
"epoch": 0.16286644951140064,
"grad_norm": 1.0599477291107178,
"learning_rate": 0.00012987012987012987,
"loss": 0.1576,
"step": 50
},
{
"epoch": 0.19543973941368079,
"grad_norm": 3.056833267211914,
"learning_rate": 0.00015584415584415587,
"loss": 0.2403,
"step": 60
},
{
"epoch": 0.2280130293159609,
"grad_norm": 2.162693738937378,
"learning_rate": 0.00018181818181818183,
"loss": 0.251,
"step": 70
},
{
"epoch": 0.26058631921824105,
"grad_norm": 3.423180341720581,
"learning_rate": 0.0001999979107245606,
"loss": 0.2418,
"step": 80
},
{
"epoch": 0.2931596091205212,
"grad_norm": 1.267954707145691,
"learning_rate": 0.0001999607704786645,
"loss": 0.157,
"step": 90
},
{
"epoch": 0.3257328990228013,
"grad_norm": 2.345654249191284,
"learning_rate": 0.00019987722173728587,
"loss": 0.2589,
"step": 100
},
{
"epoch": 0.3583061889250814,
"grad_norm": 3.0315651893615723,
"learning_rate": 0.00019974730328935534,
"loss": 0.2621,
"step": 110
},
{
"epoch": 0.39087947882736157,
"grad_norm": 0.543853223323822,
"learning_rate": 0.00019957107545173192,
"loss": 0.1612,
"step": 120
},
{
"epoch": 0.4234527687296417,
"grad_norm": 0.9071061611175537,
"learning_rate": 0.00019934862004120004,
"loss": 0.2302,
"step": 130
},
{
"epoch": 0.4560260586319218,
"grad_norm": 2.0897481441497803,
"learning_rate": 0.00019908004033648453,
"loss": 0.1522,
"step": 140
},
{
"epoch": 0.48859934853420195,
"grad_norm": 0.8175027370452881,
"learning_rate": 0.00019876546103030196,
"loss": 0.0943,
"step": 150
},
{
"epoch": 0.5211726384364821,
"grad_norm": 0.5068450570106506,
"learning_rate": 0.00019840502817146966,
"loss": 0.0865,
"step": 160
},
{
"epoch": 0.5537459283387622,
"grad_norm": 1.0016964673995972,
"learning_rate": 0.00019799890909710013,
"loss": 0.1229,
"step": 170
},
{
"epoch": 0.5863192182410424,
"grad_norm": 2.739112377166748,
"learning_rate": 0.00019754729235491207,
"loss": 0.1327,
"step": 180
},
{
"epoch": 0.6188925081433225,
"grad_norm": 1.9622081518173218,
"learning_rate": 0.0001970503876156937,
"loss": 0.1361,
"step": 190
},
{
"epoch": 0.6514657980456026,
"grad_norm": 3.4183578491210938,
"learning_rate": 0.00019650842557595967,
"loss": 0.1363,
"step": 200
},
{
"epoch": 0.6840390879478827,
"grad_norm": 0.8435428738594055,
"learning_rate": 0.00019592165785084603,
"loss": 0.1079,
"step": 210
},
{
"epoch": 0.7166123778501629,
"grad_norm": 2.0992093086242676,
"learning_rate": 0.00019529035685729391,
"loss": 0.2401,
"step": 220
},
{
"epoch": 0.749185667752443,
"grad_norm": 2.749847650527954,
"learning_rate": 0.00019461481568757506,
"loss": 0.1147,
"step": 230
},
{
"epoch": 0.7817589576547231,
"grad_norm": 2.5013184547424316,
"learning_rate": 0.00019389534797321884,
"loss": 0.1661,
"step": 240
},
{
"epoch": 0.8143322475570033,
"grad_norm": 0.8295263648033142,
"learning_rate": 0.00019313228773940345,
"loss": 0.1267,
"step": 250
},
{
"epoch": 0.8469055374592834,
"grad_norm": 2.893522262573242,
"learning_rate": 0.00019232598924987903,
"loss": 0.0937,
"step": 260
},
{
"epoch": 0.8794788273615635,
"grad_norm": 3.0416808128356934,
"learning_rate": 0.0001914768268424946,
"loss": 0.1318,
"step": 270
},
{
"epoch": 0.9120521172638436,
"grad_norm": 0.6984137892723083,
"learning_rate": 0.00019058519475540538,
"loss": 0.1003,
"step": 280
},
{
"epoch": 0.9446254071661238,
"grad_norm": 1.0821751356124878,
"learning_rate": 0.00018965150694404094,
"loss": 0.0888,
"step": 290
},
{
"epoch": 0.9771986970684039,
"grad_norm": 0.9735985994338989,
"learning_rate": 0.00018867619688891937,
"loss": 0.207,
"step": 300
},
{
"epoch": 1.009771986970684,
"grad_norm": 0.6221311688423157,
"learning_rate": 0.0001876597173943965,
"loss": 0.1472,
"step": 310
},
{
"epoch": 1.0423452768729642,
"grad_norm": 2.335994243621826,
"learning_rate": 0.00018660254037844388,
"loss": 0.071,
"step": 320
},
{
"epoch": 1.0749185667752443,
"grad_norm": 5.066413402557373,
"learning_rate": 0.00018550515665355247,
"loss": 0.0781,
"step": 330
},
{
"epoch": 1.1074918566775245,
"grad_norm": 1.0136305093765259,
"learning_rate": 0.000184368075698865,
"loss": 0.0879,
"step": 340
},
{
"epoch": 1.1400651465798046,
"grad_norm": 1.4384146928787231,
"learning_rate": 0.00018319182542364117,
"loss": 0.1173,
"step": 350
},
{
"epoch": 1.1726384364820848,
"grad_norm": 1.0204399824142456,
"learning_rate": 0.00018197695192216702,
"loss": 0.0738,
"step": 360
},
{
"epoch": 1.205211726384365,
"grad_norm": 0.8423169255256653,
"learning_rate": 0.00018072401922022117,
"loss": 0.0999,
"step": 370
},
{
"epoch": 1.237785016286645,
"grad_norm": 1.6605499982833862,
"learning_rate": 0.0001794336090132164,
"loss": 0.0888,
"step": 380
},
{
"epoch": 1.2703583061889252,
"grad_norm": 3.043694019317627,
"learning_rate": 0.00017810632039613736,
"loss": 0.1041,
"step": 390
},
{
"epoch": 1.3029315960912053,
"grad_norm": 0.49020448327064514,
"learning_rate": 0.00017674276958540072,
"loss": 0.1021,
"step": 400
},
{
"epoch": 1.3355048859934853,
"grad_norm": 2.167153835296631,
"learning_rate": 0.00017534358963276607,
"loss": 0.0971,
"step": 410
},
{
"epoch": 1.3680781758957654,
"grad_norm": 0.8787184953689575,
"learning_rate": 0.00017390943013143083,
"loss": 0.0941,
"step": 420
},
{
"epoch": 1.4006514657980456,
"grad_norm": 1.053727626800537,
"learning_rate": 0.0001724409569144455,
"loss": 0.0756,
"step": 430
},
{
"epoch": 1.4332247557003257,
"grad_norm": 0.5299842357635498,
"learning_rate": 0.0001709388517455893,
"loss": 0.0927,
"step": 440
},
{
"epoch": 1.4657980456026058,
"grad_norm": 1.387248158454895,
"learning_rate": 0.00016940381200284972,
"loss": 0.0828,
"step": 450
},
{
"epoch": 1.498371335504886,
"grad_norm": 0.581618070602417,
"learning_rate": 0.0001678365503546528,
"loss": 0.0697,
"step": 460
},
{
"epoch": 1.5309446254071661,
"grad_norm": 0.41632965207099915,
"learning_rate": 0.0001662377944289948,
"loss": 0.1533,
"step": 470
},
{
"epoch": 1.5635179153094463,
"grad_norm": 1.1524041891098022,
"learning_rate": 0.0001646082864756282,
"loss": 0.0879,
"step": 480
},
{
"epoch": 1.5960912052117264,
"grad_norm": 0.3662007451057434,
"learning_rate": 0.00016294878302145987,
"loss": 0.0421,
"step": 490
},
{
"epoch": 1.6286644951140063,
"grad_norm": 0.5205959677696228,
"learning_rate": 0.0001612600545193203,
"loss": 0.1127,
"step": 500
},
{
"epoch": 1.6612377850162865,
"grad_norm": 0.5683305263519287,
"learning_rate": 0.00015954288499026782,
"loss": 0.0697,
"step": 510
},
{
"epoch": 1.6938110749185666,
"grad_norm": 1.1733250617980957,
"learning_rate": 0.0001577980716595934,
"loss": 0.0724,
"step": 520
},
{
"epoch": 1.7263843648208468,
"grad_norm": 3.809410810470581,
"learning_rate": 0.00015602642458669528,
"loss": 0.0993,
"step": 530
},
{
"epoch": 1.758957654723127,
"grad_norm": 0.48794323205947876,
"learning_rate": 0.0001542287662889948,
"loss": 0.0576,
"step": 540
},
{
"epoch": 1.791530944625407,
"grad_norm": 1.401726245880127,
"learning_rate": 0.00015240593136006897,
"loss": 0.1039,
"step": 550
},
{
"epoch": 1.8241042345276872,
"grad_norm": 0.33422133326530457,
"learning_rate": 0.0001505587660821759,
"loss": 0.0458,
"step": 560
},
{
"epoch": 1.8566775244299674,
"grad_norm": 0.5557196140289307,
"learning_rate": 0.0001486881280333539,
"loss": 0.0489,
"step": 570
},
{
"epoch": 1.8892508143322475,
"grad_norm": 2.7507164478302,
"learning_rate": 0.00014679488568927616,
"loss": 0.0873,
"step": 580
},
{
"epoch": 1.9218241042345277,
"grad_norm": 0.8317222595214844,
"learning_rate": 0.00014487991802004623,
"loss": 0.0402,
"step": 590
},
{
"epoch": 1.9543973941368078,
"grad_norm": 2.6091113090515137,
"learning_rate": 0.0001429441140821209,
"loss": 0.0853,
"step": 600
},
{
"epoch": 1.986970684039088,
"grad_norm": 0.29834985733032227,
"learning_rate": 0.00014098837260555084,
"loss": 0.0544,
"step": 610
},
{
"epoch": 2.019543973941368,
"grad_norm": 0.7023916840553284,
"learning_rate": 0.0001390136015767295,
"loss": 0.0976,
"step": 620
},
{
"epoch": 2.0521172638436482,
"grad_norm": 0.34739574790000916,
"learning_rate": 0.00013702071781684517,
"loss": 0.0612,
"step": 630
},
{
"epoch": 2.0846905537459284,
"grad_norm": 0.43586787581443787,
"learning_rate": 0.00013501064655623094,
"loss": 0.0614,
"step": 640
},
{
"epoch": 2.1172638436482085,
"grad_norm": 0.27919432520866394,
"learning_rate": 0.00013298432100481079,
"loss": 0.0539,
"step": 650
},
{
"epoch": 2.1498371335504887,
"grad_norm": 0.5438246130943298,
"learning_rate": 0.0001309426819188409,
"loss": 0.0401,
"step": 660
},
{
"epoch": 2.182410423452769,
"grad_norm": 0.6277980804443359,
"learning_rate": 0.0001288866771641474,
"loss": 0.0744,
"step": 670
},
{
"epoch": 2.214983713355049,
"grad_norm": 0.9506388306617737,
"learning_rate": 0.00012681726127606376,
"loss": 0.073,
"step": 680
},
{
"epoch": 2.247557003257329,
"grad_norm": 1.6029094457626343,
"learning_rate": 0.000124735395016271,
"loss": 0.0329,
"step": 690
},
{
"epoch": 2.2801302931596092,
"grad_norm": 2.2171266078948975,
"learning_rate": 0.00012264204492674815,
"loss": 0.0885,
"step": 700
},
{
"epoch": 2.3127035830618894,
"grad_norm": 1.0621176958084106,
"learning_rate": 0.0001205381828810382,
"loss": 0.0518,
"step": 710
},
{
"epoch": 2.3452768729641695,
"grad_norm": 1.3911964893341064,
"learning_rate": 0.00011842478563303952,
"loss": 0.0694,
"step": 720
},
{
"epoch": 2.3778501628664497,
"grad_norm": 0.586409330368042,
"learning_rate": 0.00011630283436353098,
"loss": 0.1295,
"step": 730
},
{
"epoch": 2.41042345276873,
"grad_norm": 0.4753137230873108,
"learning_rate": 0.00011417331422464205,
"loss": 0.0438,
"step": 740
},
{
"epoch": 2.44299674267101,
"grad_norm": 0.2599440813064575,
"learning_rate": 0.00011203721388247923,
"loss": 0.0446,
"step": 750
},
{
"epoch": 2.47557003257329,
"grad_norm": 0.43588873744010925,
"learning_rate": 0.00010989552505812072,
"loss": 0.1583,
"step": 760
},
{
"epoch": 2.5081433224755703,
"grad_norm": 0.36574748158454895,
"learning_rate": 0.0001077492420671931,
"loss": 0.0968,
"step": 770
},
{
"epoch": 2.5407166123778504,
"grad_norm": 0.5202295780181885,
"learning_rate": 0.00010559936135824322,
"loss": 0.048,
"step": 780
},
{
"epoch": 2.5732899022801305,
"grad_norm": 0.40310633182525635,
"learning_rate": 0.00010344688105012005,
"loss": 0.0422,
"step": 790
},
{
"epoch": 2.6058631921824107,
"grad_norm": 0.2391999065876007,
"learning_rate": 0.00010129280046858086,
"loss": 0.092,
"step": 800
},
{
"epoch": 2.6384364820846904,
"grad_norm": 0.390307754278183,
"learning_rate": 9.913811968233716e-05,
"loss": 0.0432,
"step": 810
},
{
"epoch": 2.6710097719869705,
"grad_norm": 0.5276124477386475,
"learning_rate": 9.69838390387558e-05,
"loss": 0.0606,
"step": 820
},
{
"epoch": 2.7035830618892507,
"grad_norm": 0.44748738408088684,
"learning_rate": 9.483095869943055e-05,
"loss": 0.0445,
"step": 830
},
{
"epoch": 2.736156351791531,
"grad_norm": 1.4012900590896606,
"learning_rate": 9.268047817583998e-05,
"loss": 0.0372,
"step": 840
},
{
"epoch": 2.768729641693811,
"grad_norm": 0.78389972448349,
"learning_rate": 9.053339586530723e-05,
"loss": 0.0623,
"step": 850
},
{
"epoch": 2.801302931596091,
"grad_norm": 0.5246724486351013,
"learning_rate": 8.839070858747697e-05,
"loss": 0.0499,
"step": 860
},
{
"epoch": 2.8338762214983713,
"grad_norm": 0.2494039684534073,
"learning_rate": 8.625341112152487e-05,
"loss": 0.0462,
"step": 870
},
{
"epoch": 2.8664495114006514,
"grad_norm": 0.3887605667114258,
"learning_rate": 8.412249574431428e-05,
"loss": 0.0319,
"step": 880
},
{
"epoch": 2.8990228013029316,
"grad_norm": 0.3090686500072479,
"learning_rate": 8.199895176971488e-05,
"loss": 0.0804,
"step": 890
},
{
"epoch": 2.9315960912052117,
"grad_norm": 0.47201988101005554,
"learning_rate": 7.988376508929676e-05,
"loss": 0.0466,
"step": 900
},
{
"epoch": 2.964169381107492,
"grad_norm": 0.4059586822986603,
"learning_rate": 7.777791771461332e-05,
"loss": 0.0337,
"step": 910
},
{
"epoch": 2.996742671009772,
"grad_norm": 0.4746284782886505,
"learning_rate": 7.568238732128585e-05,
"loss": 0.0489,
"step": 920
},
{
"epoch": 3.029315960912052,
"grad_norm": 0.24269038438796997,
"learning_rate": 7.359814679510065e-05,
"loss": 0.0408,
"step": 930
},
{
"epoch": 3.0618892508143323,
"grad_norm": 0.6225023865699768,
"learning_rate": 7.152616378033042e-05,
"loss": 0.1245,
"step": 940
},
{
"epoch": 3.0944625407166124,
"grad_norm": 0.3462587296962738,
"learning_rate": 6.94674002304887e-05,
"loss": 0.0369,
"step": 950
},
{
"epoch": 3.1270358306188926,
"grad_norm": 1.3566789627075195,
"learning_rate": 6.742281196172663e-05,
"loss": 0.0499,
"step": 960
},
{
"epoch": 3.1596091205211727,
"grad_norm": 0.45816901326179504,
"learning_rate": 6.539334820907888e-05,
"loss": 0.0368,
"step": 970
},
{
"epoch": 3.192182410423453,
"grad_norm": 0.3949735760688782,
"learning_rate": 6.337995118576521e-05,
"loss": 0.0542,
"step": 980
},
{
"epoch": 3.224755700325733,
"grad_norm": 0.7988837361335754,
"learning_rate": 6.138355564575169e-05,
"loss": 0.0415,
"step": 990
},
{
"epoch": 3.257328990228013,
"grad_norm": 0.2154189944267273,
"learning_rate": 5.940508844977537e-05,
"loss": 0.0322,
"step": 1000
},
{
"epoch": 3.2899022801302933,
"grad_norm": 0.7597559690475464,
"learning_rate": 5.744546813503328e-05,
"loss": 0.0924,
"step": 1010
},
{
"epoch": 3.3224755700325734,
"grad_norm": 0.26576346158981323,
"learning_rate": 5.550560448873575e-05,
"loss": 0.0344,
"step": 1020
},
{
"epoch": 3.3550488599348536,
"grad_norm": 2.2215240001678467,
"learning_rate": 5.358639812572244e-05,
"loss": 0.0429,
"step": 1030
},
{
"epoch": 3.3876221498371337,
"grad_norm": 0.7066490650177002,
"learning_rate": 5.168874007033615e-05,
"loss": 0.0419,
"step": 1040
},
{
"epoch": 3.420195439739414,
"grad_norm": 0.23159299790859222,
"learning_rate": 4.9813511342749805e-05,
"loss": 0.0253,
"step": 1050
},
{
"epoch": 3.4527687296416936,
"grad_norm": 0.5614166259765625,
"learning_rate": 4.7961582549937675e-05,
"loss": 0.0264,
"step": 1060
},
{
"epoch": 3.4853420195439737,
"grad_norm": 0.4681215286254883,
"learning_rate": 4.6133813481481246e-05,
"loss": 0.0234,
"step": 1070
},
{
"epoch": 3.517915309446254,
"grad_norm": 0.3369932770729065,
"learning_rate": 4.433105271039721e-05,
"loss": 0.0441,
"step": 1080
},
{
"epoch": 3.550488599348534,
"grad_norm": 0.30148735642433167,
"learning_rate": 4.255413719917294e-05,
"loss": 0.0492,
"step": 1090
},
{
"epoch": 3.583061889250814,
"grad_norm": 0.3371269106864929,
"learning_rate": 4.080389191119241e-05,
"loss": 0.0325,
"step": 1100
},
{
"epoch": 3.6156351791530943,
"grad_norm": 0.8490313291549683,
"learning_rate": 3.9081129427732774e-05,
"loss": 0.0264,
"step": 1110
},
{
"epoch": 3.6482084690553744,
"grad_norm": 0.30010277032852173,
"learning_rate": 3.7386649570709644e-05,
"loss": 0.0276,
"step": 1120
},
{
"epoch": 3.6807817589576546,
"grad_norm": 0.2943625748157501,
"learning_rate": 3.5721239031346066e-05,
"loss": 0.0945,
"step": 1130
},
{
"epoch": 3.7133550488599347,
"grad_norm": 0.19849584996700287,
"learning_rate": 3.408567100493787e-05,
"loss": 0.0543,
"step": 1140
},
{
"epoch": 3.745928338762215,
"grad_norm": 0.24954350292682648,
"learning_rate": 3.248070483188426e-05,
"loss": 0.0423,
"step": 1150
},
{
"epoch": 3.778501628664495,
"grad_norm": 0.425689697265625,
"learning_rate": 3.090708564515124e-05,
"loss": 0.0556,
"step": 1160
},
{
"epoch": 3.811074918566775,
"grad_norm": 0.18508583307266235,
"learning_rate": 2.936554402433087e-05,
"loss": 0.0217,
"step": 1170
},
{
"epoch": 3.8436482084690553,
"grad_norm": 4.1349196434021,
"learning_rate": 2.7856795656457257e-05,
"loss": 0.0729,
"step": 1180
},
{
"epoch": 3.8762214983713354,
"grad_norm": 0.4305671155452728,
"learning_rate": 2.6381541003736486e-05,
"loss": 0.0327,
"step": 1190
},
{
"epoch": 3.9087947882736156,
"grad_norm": 0.16706599295139313,
"learning_rate": 2.494046497834518e-05,
"loss": 0.0202,
"step": 1200
},
{
"epoch": 3.9413680781758957,
"grad_norm": 0.40168455243110657,
"learning_rate": 2.3534236624448302e-05,
"loss": 0.0428,
"step": 1210
},
{
"epoch": 3.973941368078176,
"grad_norm": 0.33089303970336914,
"learning_rate": 2.2163508807583998e-05,
"loss": 0.0289,
"step": 1220
},
{
"epoch": 4.006514657980456,
"grad_norm": 0.1787663847208023,
"learning_rate": 2.082891791155954e-05,
"loss": 0.0152,
"step": 1230
},
{
"epoch": 4.039087947882736,
"grad_norm": 0.7322351932525635,
"learning_rate": 1.9531083542999317e-05,
"loss": 0.0303,
"step": 1240
},
{
"epoch": 4.071661237785016,
"grad_norm": 0.34002748131752014,
"learning_rate": 1.8270608243681953e-05,
"loss": 0.0144,
"step": 1250
},
{
"epoch": 4.1042345276872965,
"grad_norm": 0.2633480131626129,
"learning_rate": 1.7048077210799772e-05,
"loss": 0.0162,
"step": 1260
},
{
"epoch": 4.136807817589577,
"grad_norm": 0.32320234179496765,
"learning_rate": 1.5864058025271246e-05,
"loss": 0.0158,
"step": 1270
},
{
"epoch": 4.169381107491857,
"grad_norm": 0.2887153625488281,
"learning_rate": 1.47191003882317e-05,
"loss": 0.0275,
"step": 1280
},
{
"epoch": 4.201954397394137,
"grad_norm": 0.2007487416267395,
"learning_rate": 1.3613735865825305e-05,
"loss": 0.0192,
"step": 1290
},
{
"epoch": 4.234527687296417,
"grad_norm": 0.6144905090332031,
"learning_rate": 1.2548477642416256e-05,
"loss": 0.0225,
"step": 1300
},
{
"epoch": 4.267100977198697,
"grad_norm": 0.2809244692325592,
"learning_rate": 1.1523820282334219e-05,
"loss": 0.0201,
"step": 1310
},
{
"epoch": 4.299674267100977,
"grad_norm": 0.2203732132911682,
"learning_rate": 1.0540239500264516e-05,
"loss": 0.0213,
"step": 1320
},
{
"epoch": 4.3322475570032575,
"grad_norm": 0.17500793933868408,
"learning_rate": 9.598191940389256e-06,
"loss": 0.0193,
"step": 1330
},
{
"epoch": 4.364820846905538,
"grad_norm": 0.45830363035202026,
"learning_rate": 8.698114964382598e-06,
"loss": 0.0661,
"step": 1340
},
{
"epoch": 4.397394136807818,
"grad_norm": 0.34097039699554443,
"learning_rate": 7.840426448358085e-06,
"loss": 0.0155,
"step": 1350
},
{
"epoch": 4.429967426710098,
"grad_norm": 2.330317497253418,
"learning_rate": 7.025524588862542e-06,
"loss": 0.0291,
"step": 1360
},
{
"epoch": 4.462540716612378,
"grad_norm": 0.39467132091522217,
"learning_rate": 6.253787718006498e-06,
"loss": 0.0568,
"step": 1370
},
{
"epoch": 4.495114006514658,
"grad_norm": 0.28935298323631287,
"learning_rate": 5.525574127817046e-06,
"loss": 0.0302,
"step": 1380
},
{
"epoch": 4.527687296416938,
"grad_norm": 0.3630921542644501,
"learning_rate": 4.841221903894633e-06,
"loss": 0.027,
"step": 1390
},
{
"epoch": 4.5602605863192185,
"grad_norm": 0.1575985699892044,
"learning_rate": 4.20104876845111e-06,
"loss": 0.0208,
"step": 1400
},
{
"epoch": 4.592833876221499,
"grad_norm": 0.3565620481967926,
"learning_rate": 3.605351932801693e-06,
"loss": 0.0725,
"step": 1410
},
{
"epoch": 4.625407166123779,
"grad_norm": 0.17438632249832153,
"learning_rate": 3.0544079593795573e-06,
"loss": 0.0167,
"step": 1420
},
{
"epoch": 4.657980456026059,
"grad_norm": 0.15625396370887756,
"learning_rate": 2.548472633337007e-06,
"loss": 0.0162,
"step": 1430
},
{
"epoch": 4.690553745928339,
"grad_norm": 0.21613460779190063,
"learning_rate": 2.0877808437928637e-06,
"loss": 0.02,
"step": 1440
},
{
"epoch": 4.723127035830619,
"grad_norm": 0.25940829515457153,
"learning_rate": 1.6725464747811447e-06,
"loss": 0.0327,
"step": 1450
},
{
"epoch": 4.755700325732899,
"grad_norm": 0.6820478439331055,
"learning_rate": 1.3029623059517493e-06,
"loss": 0.0182,
"step": 1460
},
{
"epoch": 4.7882736156351795,
"grad_norm": 0.4143361747264862,
"learning_rate": 9.791999230692629e-07,
"loss": 0.0236,
"step": 1470
},
{
"epoch": 4.82084690553746,
"grad_norm": 0.18389806151390076,
"learning_rate": 7.014096383512802e-07,
"loss": 0.0245,
"step": 1480
},
{
"epoch": 4.85342019543974,
"grad_norm": 0.1316615343093872,
"learning_rate": 4.6972042068341714e-07,
"loss": 0.0162,
"step": 1490
},
{
"epoch": 4.88599348534202,
"grad_norm": 0.7174280881881714,
"learning_rate": 2.8423983574328295e-07,
"loss": 0.0292,
"step": 1500
},
{
"epoch": 4.918566775244299,
"grad_norm": 0.36342909932136536,
"learning_rate": 1.4505399606130621e-07,
"loss": 0.0269,
"step": 1510
},
{
"epoch": 4.95114006514658,
"grad_norm": 0.12144973129034042,
"learning_rate": 5.2227521041470216e-08,
"loss": 0.0193,
"step": 1520
},
{
"epoch": 4.9837133550488595,
"grad_norm": 0.2756155729293823,
"learning_rate": 5.803506960722072e-09,
"loss": 0.0375,
"step": 1530
},
{
"epoch": 5.0,
"step": 1535,
"total_flos": 5.197521391244208e+16,
"train_loss": 0.10585675648980886,
"train_runtime": 733.8736,
"train_samples_per_second": 33.466,
"train_steps_per_second": 2.092
}
],
"logging_steps": 10,
"max_steps": 1535,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.197521391244208e+16,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}