etdnn-voxceleb1 / trainer_state.json
yangwang825's picture
End of training
a152d32 verified
raw
history blame
48.7 kB
{
"best_metric": 0.9340733266061217,
"best_model_checkpoint": "/mnt/data4_HDD_14TB/yang/voxceleb-checkpoints/etdnn/voxceleb1/pretrain/ce-len3-bs256-lr5e-4/checkpoint-5230",
"epoch": 10.0,
"eval_steps": 500,
"global_step": 5230,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03824091778202677,
"grad_norm": 10.999133110046387,
"learning_rate": 1.9120458891013384e-05,
"loss": 7.1473,
"step": 20
},
{
"epoch": 0.07648183556405354,
"grad_norm": 10.200960159301758,
"learning_rate": 3.824091778202677e-05,
"loss": 7.1283,
"step": 40
},
{
"epoch": 0.1147227533460803,
"grad_norm": 9.468878746032715,
"learning_rate": 5.736137667304015e-05,
"loss": 7.0847,
"step": 60
},
{
"epoch": 0.15296367112810708,
"grad_norm": 8.736529350280762,
"learning_rate": 7.648183556405354e-05,
"loss": 7.0321,
"step": 80
},
{
"epoch": 0.19120458891013384,
"grad_norm": 6.856561183929443,
"learning_rate": 9.560229445506692e-05,
"loss": 6.955,
"step": 100
},
{
"epoch": 0.2294455066921606,
"grad_norm": 5.473419666290283,
"learning_rate": 0.0001147227533460803,
"loss": 6.8474,
"step": 120
},
{
"epoch": 0.2676864244741874,
"grad_norm": 3.7059528827667236,
"learning_rate": 0.0001338432122370937,
"loss": 6.7002,
"step": 140
},
{
"epoch": 0.30592734225621415,
"grad_norm": 2.9125759601593018,
"learning_rate": 0.00015296367112810707,
"loss": 6.5368,
"step": 160
},
{
"epoch": 0.3441682600382409,
"grad_norm": 2.5546936988830566,
"learning_rate": 0.00017208413001912047,
"loss": 6.3399,
"step": 180
},
{
"epoch": 0.3824091778202677,
"grad_norm": 2.418347120285034,
"learning_rate": 0.00019120458891013384,
"loss": 6.1511,
"step": 200
},
{
"epoch": 0.42065009560229444,
"grad_norm": 2.2986104488372803,
"learning_rate": 0.0002103250478011472,
"loss": 6.015,
"step": 220
},
{
"epoch": 0.4588910133843212,
"grad_norm": 2.227661609649658,
"learning_rate": 0.0002294455066921606,
"loss": 5.8224,
"step": 240
},
{
"epoch": 0.497131931166348,
"grad_norm": 2.2191731929779053,
"learning_rate": 0.00024856596558317403,
"loss": 5.6838,
"step": 260
},
{
"epoch": 0.5353728489483748,
"grad_norm": 2.2345361709594727,
"learning_rate": 0.0002676864244741874,
"loss": 5.5764,
"step": 280
},
{
"epoch": 0.5736137667304015,
"grad_norm": 2.2029287815093994,
"learning_rate": 0.0002868068833652008,
"loss": 5.4715,
"step": 300
},
{
"epoch": 0.6118546845124283,
"grad_norm": 2.1917331218719482,
"learning_rate": 0.00030592734225621415,
"loss": 5.321,
"step": 320
},
{
"epoch": 0.6500956022944551,
"grad_norm": 2.2598936557769775,
"learning_rate": 0.0003250478011472275,
"loss": 5.2218,
"step": 340
},
{
"epoch": 0.6883365200764818,
"grad_norm": 2.3335328102111816,
"learning_rate": 0.00034416826003824094,
"loss": 5.1449,
"step": 360
},
{
"epoch": 0.7265774378585086,
"grad_norm": 2.3767151832580566,
"learning_rate": 0.0003632887189292543,
"loss": 5.0062,
"step": 380
},
{
"epoch": 0.7648183556405354,
"grad_norm": 2.357161521911621,
"learning_rate": 0.0003824091778202677,
"loss": 4.9352,
"step": 400
},
{
"epoch": 0.8030592734225621,
"grad_norm": 2.271972179412842,
"learning_rate": 0.00040152963671128105,
"loss": 4.8959,
"step": 420
},
{
"epoch": 0.8413001912045889,
"grad_norm": 2.4624767303466797,
"learning_rate": 0.0004206500956022944,
"loss": 4.7951,
"step": 440
},
{
"epoch": 0.8795411089866156,
"grad_norm": 2.1839234828948975,
"learning_rate": 0.00043977055449330785,
"loss": 4.6734,
"step": 460
},
{
"epoch": 0.9177820267686424,
"grad_norm": 2.3169806003570557,
"learning_rate": 0.0004588910133843212,
"loss": 4.6458,
"step": 480
},
{
"epoch": 0.9560229445506692,
"grad_norm": 2.3231122493743896,
"learning_rate": 0.0004780114722753346,
"loss": 4.5657,
"step": 500
},
{
"epoch": 0.994263862332696,
"grad_norm": 2.34975266456604,
"learning_rate": 0.0004971319311663481,
"loss": 4.489,
"step": 520
},
{
"epoch": 1.0,
"eval_accuracy": 0.1722166162125799,
"eval_loss": 4.208949089050293,
"eval_runtime": 1092.4973,
"eval_samples_per_second": 13.606,
"eval_steps_per_second": 13.606,
"step": 523
},
{
"epoch": 1.0325047801147227,
"grad_norm": 2.3869619369506836,
"learning_rate": 0.0004981941788825154,
"loss": 4.3496,
"step": 540
},
{
"epoch": 1.0707456978967496,
"grad_norm": 2.2834343910217285,
"learning_rate": 0.0004960696834501806,
"loss": 4.3108,
"step": 560
},
{
"epoch": 1.1089866156787762,
"grad_norm": 2.192180633544922,
"learning_rate": 0.0004939451880178457,
"loss": 4.229,
"step": 580
},
{
"epoch": 1.147227533460803,
"grad_norm": 2.161285161972046,
"learning_rate": 0.0004918206925855109,
"loss": 4.1804,
"step": 600
},
{
"epoch": 1.1854684512428297,
"grad_norm": 2.2634170055389404,
"learning_rate": 0.0004896961971531761,
"loss": 4.118,
"step": 620
},
{
"epoch": 1.2237093690248566,
"grad_norm": 2.1758434772491455,
"learning_rate": 0.00048757170172084133,
"loss": 4.0295,
"step": 640
},
{
"epoch": 1.2619502868068833,
"grad_norm": 2.25357985496521,
"learning_rate": 0.0004854472062885065,
"loss": 3.9559,
"step": 660
},
{
"epoch": 1.3001912045889101,
"grad_norm": 2.2077853679656982,
"learning_rate": 0.0004833227108561717,
"loss": 3.9295,
"step": 680
},
{
"epoch": 1.338432122370937,
"grad_norm": 2.28712797164917,
"learning_rate": 0.0004811982154238369,
"loss": 3.8221,
"step": 700
},
{
"epoch": 1.3766730401529637,
"grad_norm": 2.2988157272338867,
"learning_rate": 0.000479073719991502,
"loss": 3.7764,
"step": 720
},
{
"epoch": 1.4149139579349903,
"grad_norm": 2.391023874282837,
"learning_rate": 0.0004769492245591672,
"loss": 3.7518,
"step": 740
},
{
"epoch": 1.4531548757170172,
"grad_norm": 2.065288782119751,
"learning_rate": 0.00047482472912683236,
"loss": 3.7074,
"step": 760
},
{
"epoch": 1.491395793499044,
"grad_norm": 2.288686990737915,
"learning_rate": 0.00047270023369449756,
"loss": 3.6558,
"step": 780
},
{
"epoch": 1.5296367112810707,
"grad_norm": 2.228173017501831,
"learning_rate": 0.00047057573826216276,
"loss": 3.6038,
"step": 800
},
{
"epoch": 1.5678776290630974,
"grad_norm": 2.1894149780273438,
"learning_rate": 0.0004684512428298279,
"loss": 3.5405,
"step": 820
},
{
"epoch": 1.6061185468451242,
"grad_norm": 2.234192371368408,
"learning_rate": 0.0004663267473974931,
"loss": 3.4855,
"step": 840
},
{
"epoch": 1.644359464627151,
"grad_norm": 2.309387445449829,
"learning_rate": 0.0004642022519651583,
"loss": 3.4771,
"step": 860
},
{
"epoch": 1.682600382409178,
"grad_norm": 2.183579206466675,
"learning_rate": 0.00046207775653282345,
"loss": 3.3984,
"step": 880
},
{
"epoch": 1.7208413001912046,
"grad_norm": 2.286858558654785,
"learning_rate": 0.00045995326110048865,
"loss": 3.3318,
"step": 900
},
{
"epoch": 1.7590822179732313,
"grad_norm": 2.2087490558624268,
"learning_rate": 0.00045782876566815385,
"loss": 3.3256,
"step": 920
},
{
"epoch": 1.7973231357552581,
"grad_norm": 2.2571709156036377,
"learning_rate": 0.000455704270235819,
"loss": 3.2644,
"step": 940
},
{
"epoch": 1.835564053537285,
"grad_norm": 2.346696138381958,
"learning_rate": 0.0004535797748034842,
"loss": 3.2687,
"step": 960
},
{
"epoch": 1.8738049713193117,
"grad_norm": 2.309258460998535,
"learning_rate": 0.0004514552793711494,
"loss": 3.2346,
"step": 980
},
{
"epoch": 1.9120458891013383,
"grad_norm": 2.200920820236206,
"learning_rate": 0.00044933078393881453,
"loss": 3.1507,
"step": 1000
},
{
"epoch": 1.9502868068833652,
"grad_norm": 2.1627931594848633,
"learning_rate": 0.00044720628850647973,
"loss": 3.0884,
"step": 1020
},
{
"epoch": 1.988527724665392,
"grad_norm": 2.350935220718384,
"learning_rate": 0.00044508179307414493,
"loss": 3.0685,
"step": 1040
},
{
"epoch": 2.0,
"eval_accuracy": 0.41096535486041036,
"eval_loss": 2.762111186981201,
"eval_runtime": 1228.094,
"eval_samples_per_second": 12.104,
"eval_steps_per_second": 12.104,
"step": 1046
},
{
"epoch": 2.026768642447419,
"grad_norm": 2.137504816055298,
"learning_rate": 0.0004429572976418101,
"loss": 3.0334,
"step": 1060
},
{
"epoch": 2.0650095602294454,
"grad_norm": 2.2636399269104004,
"learning_rate": 0.0004408328022094752,
"loss": 2.8843,
"step": 1080
},
{
"epoch": 2.1032504780114722,
"grad_norm": 2.141150712966919,
"learning_rate": 0.0004387083067771405,
"loss": 2.909,
"step": 1100
},
{
"epoch": 2.141491395793499,
"grad_norm": 2.243589401245117,
"learning_rate": 0.0004365838113448056,
"loss": 2.8407,
"step": 1120
},
{
"epoch": 2.179732313575526,
"grad_norm": 2.3046114444732666,
"learning_rate": 0.00043445931591247077,
"loss": 2.8377,
"step": 1140
},
{
"epoch": 2.2179732313575524,
"grad_norm": 2.375234603881836,
"learning_rate": 0.000432334820480136,
"loss": 2.7642,
"step": 1160
},
{
"epoch": 2.2562141491395793,
"grad_norm": 2.36006236076355,
"learning_rate": 0.00043021032504780116,
"loss": 2.8011,
"step": 1180
},
{
"epoch": 2.294455066921606,
"grad_norm": 2.246645450592041,
"learning_rate": 0.0004280858296154663,
"loss": 2.7502,
"step": 1200
},
{
"epoch": 2.332695984703633,
"grad_norm": 2.420032501220703,
"learning_rate": 0.0004259613341831315,
"loss": 2.6646,
"step": 1220
},
{
"epoch": 2.3709369024856595,
"grad_norm": 2.3049144744873047,
"learning_rate": 0.0004238368387507967,
"loss": 2.7294,
"step": 1240
},
{
"epoch": 2.4091778202676863,
"grad_norm": 2.3154027462005615,
"learning_rate": 0.00042171234331846185,
"loss": 2.7323,
"step": 1260
},
{
"epoch": 2.447418738049713,
"grad_norm": 2.3349056243896484,
"learning_rate": 0.00041958784788612705,
"loss": 2.6435,
"step": 1280
},
{
"epoch": 2.48565965583174,
"grad_norm": 2.3369665145874023,
"learning_rate": 0.00041746335245379225,
"loss": 2.5938,
"step": 1300
},
{
"epoch": 2.5239005736137665,
"grad_norm": 2.115408182144165,
"learning_rate": 0.0004153388570214574,
"loss": 2.5557,
"step": 1320
},
{
"epoch": 2.5621414913957934,
"grad_norm": 2.2280750274658203,
"learning_rate": 0.00041321436158912254,
"loss": 2.5262,
"step": 1340
},
{
"epoch": 2.6003824091778203,
"grad_norm": 2.3640174865722656,
"learning_rate": 0.0004110898661567878,
"loss": 2.5302,
"step": 1360
},
{
"epoch": 2.638623326959847,
"grad_norm": 2.220531940460205,
"learning_rate": 0.00040896537072445294,
"loss": 2.4982,
"step": 1380
},
{
"epoch": 2.676864244741874,
"grad_norm": 2.19985294342041,
"learning_rate": 0.0004068408752921181,
"loss": 2.4884,
"step": 1400
},
{
"epoch": 2.7151051625239004,
"grad_norm": 2.083179235458374,
"learning_rate": 0.00040471637985978334,
"loss": 2.4422,
"step": 1420
},
{
"epoch": 2.7533460803059273,
"grad_norm": 2.171346664428711,
"learning_rate": 0.0004025918844274485,
"loss": 2.4187,
"step": 1440
},
{
"epoch": 2.791586998087954,
"grad_norm": 2.330702304840088,
"learning_rate": 0.0004004673889951136,
"loss": 2.3988,
"step": 1460
},
{
"epoch": 2.8298279158699806,
"grad_norm": 2.3132684230804443,
"learning_rate": 0.0003983428935627789,
"loss": 2.3606,
"step": 1480
},
{
"epoch": 2.8680688336520075,
"grad_norm": 2.188427686691284,
"learning_rate": 0.000396218398130444,
"loss": 2.3411,
"step": 1500
},
{
"epoch": 2.9063097514340344,
"grad_norm": 2.1979410648345947,
"learning_rate": 0.00039409390269810917,
"loss": 2.3314,
"step": 1520
},
{
"epoch": 2.9445506692160612,
"grad_norm": 2.246650218963623,
"learning_rate": 0.0003919694072657744,
"loss": 2.3221,
"step": 1540
},
{
"epoch": 2.982791586998088,
"grad_norm": 2.2953240871429443,
"learning_rate": 0.00038984491183343957,
"loss": 2.2892,
"step": 1560
},
{
"epoch": 3.0,
"eval_accuracy": 0.6542885973763874,
"eval_loss": 1.6626524925231934,
"eval_runtime": 1164.8432,
"eval_samples_per_second": 12.761,
"eval_steps_per_second": 12.761,
"step": 1569
},
{
"epoch": 3.0210325047801145,
"grad_norm": 2.2067298889160156,
"learning_rate": 0.0003877204164011047,
"loss": 2.2228,
"step": 1580
},
{
"epoch": 3.0592734225621414,
"grad_norm": 2.2736258506774902,
"learning_rate": 0.00038559592096876997,
"loss": 2.1788,
"step": 1600
},
{
"epoch": 3.0975143403441683,
"grad_norm": 2.324817419052124,
"learning_rate": 0.0003834714255364351,
"loss": 2.1743,
"step": 1620
},
{
"epoch": 3.135755258126195,
"grad_norm": 2.188101291656494,
"learning_rate": 0.00038134693010410026,
"loss": 2.1267,
"step": 1640
},
{
"epoch": 3.173996175908222,
"grad_norm": 2.2637600898742676,
"learning_rate": 0.00037922243467176546,
"loss": 2.0835,
"step": 1660
},
{
"epoch": 3.2122370936902485,
"grad_norm": 2.2315452098846436,
"learning_rate": 0.00037709793923943065,
"loss": 2.0944,
"step": 1680
},
{
"epoch": 3.2504780114722753,
"grad_norm": 2.236682176589966,
"learning_rate": 0.0003749734438070958,
"loss": 2.0867,
"step": 1700
},
{
"epoch": 3.288718929254302,
"grad_norm": 2.1970863342285156,
"learning_rate": 0.000372848948374761,
"loss": 2.0138,
"step": 1720
},
{
"epoch": 3.3269598470363286,
"grad_norm": 2.2857913970947266,
"learning_rate": 0.0003707244529424262,
"loss": 2.0081,
"step": 1740
},
{
"epoch": 3.3652007648183555,
"grad_norm": 2.256211280822754,
"learning_rate": 0.00036859995751009134,
"loss": 1.9887,
"step": 1760
},
{
"epoch": 3.4034416826003824,
"grad_norm": 2.3634960651397705,
"learning_rate": 0.00036647546207775654,
"loss": 1.9965,
"step": 1780
},
{
"epoch": 3.4416826003824093,
"grad_norm": 2.2254064083099365,
"learning_rate": 0.00036435096664542174,
"loss": 2.0024,
"step": 1800
},
{
"epoch": 3.479923518164436,
"grad_norm": 2.271703004837036,
"learning_rate": 0.0003622264712130869,
"loss": 2.0024,
"step": 1820
},
{
"epoch": 3.5181644359464626,
"grad_norm": 2.220524549484253,
"learning_rate": 0.0003601019757807521,
"loss": 1.9561,
"step": 1840
},
{
"epoch": 3.5564053537284894,
"grad_norm": 2.3010692596435547,
"learning_rate": 0.0003579774803484173,
"loss": 1.9641,
"step": 1860
},
{
"epoch": 3.5946462715105163,
"grad_norm": 2.2860300540924072,
"learning_rate": 0.00035585298491608243,
"loss": 1.9032,
"step": 1880
},
{
"epoch": 3.632887189292543,
"grad_norm": 2.2572293281555176,
"learning_rate": 0.00035372848948374763,
"loss": 1.9115,
"step": 1900
},
{
"epoch": 3.67112810707457,
"grad_norm": 2.441771984100342,
"learning_rate": 0.0003516039940514128,
"loss": 1.8957,
"step": 1920
},
{
"epoch": 3.7093690248565965,
"grad_norm": 2.3750736713409424,
"learning_rate": 0.00034947949861907797,
"loss": 1.8638,
"step": 1940
},
{
"epoch": 3.7476099426386233,
"grad_norm": 2.114288806915283,
"learning_rate": 0.00034735500318674317,
"loss": 1.8105,
"step": 1960
},
{
"epoch": 3.78585086042065,
"grad_norm": 2.3129258155822754,
"learning_rate": 0.0003452305077544083,
"loss": 1.8908,
"step": 1980
},
{
"epoch": 3.8240917782026767,
"grad_norm": 2.217604398727417,
"learning_rate": 0.0003431060123220735,
"loss": 1.8116,
"step": 2000
},
{
"epoch": 3.8623326959847035,
"grad_norm": 2.2592735290527344,
"learning_rate": 0.0003409815168897387,
"loss": 1.8325,
"step": 2020
},
{
"epoch": 3.9005736137667304,
"grad_norm": 2.321389675140381,
"learning_rate": 0.00033885702145740386,
"loss": 1.7757,
"step": 2040
},
{
"epoch": 3.9388145315487573,
"grad_norm": 2.0709521770477295,
"learning_rate": 0.00033673252602506906,
"loss": 1.7801,
"step": 2060
},
{
"epoch": 3.977055449330784,
"grad_norm": 2.3316352367401123,
"learning_rate": 0.00033460803059273426,
"loss": 1.7576,
"step": 2080
},
{
"epoch": 4.0,
"eval_accuracy": 0.758627648839556,
"eval_loss": 1.1761366128921509,
"eval_runtime": 910.1664,
"eval_samples_per_second": 16.332,
"eval_steps_per_second": 16.332,
"step": 2092
},
{
"epoch": 4.015296367112811,
"grad_norm": 2.2446036338806152,
"learning_rate": 0.0003324835351603994,
"loss": 1.7157,
"step": 2100
},
{
"epoch": 4.053537284894838,
"grad_norm": 2.1550064086914062,
"learning_rate": 0.0003303590397280646,
"loss": 1.6126,
"step": 2120
},
{
"epoch": 4.091778202676864,
"grad_norm": 2.3542799949645996,
"learning_rate": 0.0003282345442957298,
"loss": 1.6417,
"step": 2140
},
{
"epoch": 4.130019120458891,
"grad_norm": 2.3036603927612305,
"learning_rate": 0.00032611004886339494,
"loss": 1.6108,
"step": 2160
},
{
"epoch": 4.168260038240918,
"grad_norm": 2.168532133102417,
"learning_rate": 0.00032398555343106014,
"loss": 1.5939,
"step": 2180
},
{
"epoch": 4.2065009560229445,
"grad_norm": 2.257270097732544,
"learning_rate": 0.0003218610579987253,
"loss": 1.6157,
"step": 2200
},
{
"epoch": 4.244741873804971,
"grad_norm": 2.2476625442504883,
"learning_rate": 0.0003197365625663905,
"loss": 1.6477,
"step": 2220
},
{
"epoch": 4.282982791586998,
"grad_norm": 2.162182092666626,
"learning_rate": 0.00031761206713405563,
"loss": 1.5589,
"step": 2240
},
{
"epoch": 4.321223709369025,
"grad_norm": 2.084498167037964,
"learning_rate": 0.00031548757170172083,
"loss": 1.5683,
"step": 2260
},
{
"epoch": 4.359464627151052,
"grad_norm": 2.1769893169403076,
"learning_rate": 0.00031336307626938603,
"loss": 1.5324,
"step": 2280
},
{
"epoch": 4.397705544933078,
"grad_norm": 2.1961722373962402,
"learning_rate": 0.0003112385808370512,
"loss": 1.5551,
"step": 2300
},
{
"epoch": 4.435946462715105,
"grad_norm": 2.2195024490356445,
"learning_rate": 0.0003091140854047164,
"loss": 1.5724,
"step": 2320
},
{
"epoch": 4.474187380497132,
"grad_norm": 2.18676495552063,
"learning_rate": 0.0003069895899723816,
"loss": 1.5322,
"step": 2340
},
{
"epoch": 4.512428298279159,
"grad_norm": 2.2305195331573486,
"learning_rate": 0.0003048650945400467,
"loss": 1.5207,
"step": 2360
},
{
"epoch": 4.550669216061186,
"grad_norm": 2.297010898590088,
"learning_rate": 0.0003027405991077119,
"loss": 1.5712,
"step": 2380
},
{
"epoch": 4.588910133843212,
"grad_norm": 2.261610746383667,
"learning_rate": 0.0003006161036753771,
"loss": 1.513,
"step": 2400
},
{
"epoch": 4.627151051625239,
"grad_norm": 2.1144425868988037,
"learning_rate": 0.00029849160824304226,
"loss": 1.4854,
"step": 2420
},
{
"epoch": 4.665391969407266,
"grad_norm": 2.344909191131592,
"learning_rate": 0.00029636711281070746,
"loss": 1.5025,
"step": 2440
},
{
"epoch": 4.7036328871892925,
"grad_norm": 2.1943275928497314,
"learning_rate": 0.00029424261737837266,
"loss": 1.4917,
"step": 2460
},
{
"epoch": 4.741873804971319,
"grad_norm": 2.191070079803467,
"learning_rate": 0.0002921181219460378,
"loss": 1.4539,
"step": 2480
},
{
"epoch": 4.780114722753346,
"grad_norm": 2.142045259475708,
"learning_rate": 0.000289993626513703,
"loss": 1.4688,
"step": 2500
},
{
"epoch": 4.818355640535373,
"grad_norm": 2.203152656555176,
"learning_rate": 0.0002878691310813682,
"loss": 1.4236,
"step": 2520
},
{
"epoch": 4.8565965583174,
"grad_norm": 2.264139175415039,
"learning_rate": 0.00028574463564903335,
"loss": 1.4211,
"step": 2540
},
{
"epoch": 4.894837476099426,
"grad_norm": 2.173497438430786,
"learning_rate": 0.0002836201402166985,
"loss": 1.4236,
"step": 2560
},
{
"epoch": 4.933078393881453,
"grad_norm": 2.069350481033325,
"learning_rate": 0.00028149564478436375,
"loss": 1.3804,
"step": 2580
},
{
"epoch": 4.97131931166348,
"grad_norm": 2.1907660961151123,
"learning_rate": 0.0002793711493520289,
"loss": 1.3706,
"step": 2600
},
{
"epoch": 5.0,
"eval_accuracy": 0.8203834510595358,
"eval_loss": 0.8903235793113708,
"eval_runtime": 813.2495,
"eval_samples_per_second": 18.279,
"eval_steps_per_second": 18.279,
"step": 2615
},
{
"epoch": 5.009560229445507,
"grad_norm": 2.2395236492156982,
"learning_rate": 0.00027724665391969404,
"loss": 1.3611,
"step": 2620
},
{
"epoch": 5.047801147227533,
"grad_norm": 2.164022922515869,
"learning_rate": 0.0002751221584873593,
"loss": 1.2885,
"step": 2640
},
{
"epoch": 5.08604206500956,
"grad_norm": 2.335524559020996,
"learning_rate": 0.00027299766305502443,
"loss": 1.3024,
"step": 2660
},
{
"epoch": 5.124282982791587,
"grad_norm": 2.3510003089904785,
"learning_rate": 0.0002708731676226896,
"loss": 1.2819,
"step": 2680
},
{
"epoch": 5.162523900573614,
"grad_norm": 2.2182960510253906,
"learning_rate": 0.00026874867219035483,
"loss": 1.2927,
"step": 2700
},
{
"epoch": 5.2007648183556405,
"grad_norm": 2.164422035217285,
"learning_rate": 0.00026662417675802,
"loss": 1.2905,
"step": 2720
},
{
"epoch": 5.239005736137667,
"grad_norm": 2.2719788551330566,
"learning_rate": 0.0002644996813256851,
"loss": 1.3067,
"step": 2740
},
{
"epoch": 5.277246653919694,
"grad_norm": 2.1733806133270264,
"learning_rate": 0.0002623751858933504,
"loss": 1.2778,
"step": 2760
},
{
"epoch": 5.315487571701721,
"grad_norm": 2.0940115451812744,
"learning_rate": 0.0002602506904610155,
"loss": 1.2553,
"step": 2780
},
{
"epoch": 5.353728489483748,
"grad_norm": 2.049928665161133,
"learning_rate": 0.00025812619502868067,
"loss": 1.2572,
"step": 2800
},
{
"epoch": 5.3919694072657744,
"grad_norm": 2.039212703704834,
"learning_rate": 0.0002560016995963459,
"loss": 1.2139,
"step": 2820
},
{
"epoch": 5.430210325047801,
"grad_norm": 2.2491774559020996,
"learning_rate": 0.00025387720416401106,
"loss": 1.2591,
"step": 2840
},
{
"epoch": 5.468451242829828,
"grad_norm": 2.1145877838134766,
"learning_rate": 0.0002517527087316762,
"loss": 1.2458,
"step": 2860
},
{
"epoch": 5.506692160611855,
"grad_norm": 2.1274683475494385,
"learning_rate": 0.0002496282132993414,
"loss": 1.227,
"step": 2880
},
{
"epoch": 5.544933078393882,
"grad_norm": 2.0846710205078125,
"learning_rate": 0.0002475037178670066,
"loss": 1.1897,
"step": 2900
},
{
"epoch": 5.583173996175908,
"grad_norm": 2.143688678741455,
"learning_rate": 0.0002453792224346718,
"loss": 1.2133,
"step": 2920
},
{
"epoch": 5.621414913957935,
"grad_norm": 2.2568676471710205,
"learning_rate": 0.00024325472700233695,
"loss": 1.2194,
"step": 2940
},
{
"epoch": 5.659655831739962,
"grad_norm": 2.266052484512329,
"learning_rate": 0.00024113023157000212,
"loss": 1.1895,
"step": 2960
},
{
"epoch": 5.6978967495219885,
"grad_norm": 2.352609395980835,
"learning_rate": 0.0002390057361376673,
"loss": 1.1849,
"step": 2980
},
{
"epoch": 5.736137667304015,
"grad_norm": 2.1269915103912354,
"learning_rate": 0.0002368812407053325,
"loss": 1.205,
"step": 3000
},
{
"epoch": 5.774378585086042,
"grad_norm": 2.20017147064209,
"learning_rate": 0.00023475674527299767,
"loss": 1.1366,
"step": 3020
},
{
"epoch": 5.812619502868069,
"grad_norm": 2.0107390880584717,
"learning_rate": 0.00023263224984066284,
"loss": 1.1268,
"step": 3040
},
{
"epoch": 5.850860420650095,
"grad_norm": 2.091771364212036,
"learning_rate": 0.00023050775440832804,
"loss": 1.1619,
"step": 3060
},
{
"epoch": 5.8891013384321225,
"grad_norm": 2.261592388153076,
"learning_rate": 0.0002283832589759932,
"loss": 1.1789,
"step": 3080
},
{
"epoch": 5.927342256214149,
"grad_norm": 2.126786947250366,
"learning_rate": 0.00022625876354365838,
"loss": 1.1492,
"step": 3100
},
{
"epoch": 5.965583173996176,
"grad_norm": 2.123417615890503,
"learning_rate": 0.00022413426811132355,
"loss": 1.1258,
"step": 3120
},
{
"epoch": 6.0,
"eval_accuracy": 0.8432559704002691,
"eval_loss": 0.7555378079414368,
"eval_runtime": 1118.6028,
"eval_samples_per_second": 13.289,
"eval_steps_per_second": 13.289,
"step": 3138
},
{
"epoch": 6.003824091778203,
"grad_norm": 2.0020689964294434,
"learning_rate": 0.00022200977267898875,
"loss": 1.1086,
"step": 3140
},
{
"epoch": 6.042065009560229,
"grad_norm": 2.0867371559143066,
"learning_rate": 0.00021988527724665392,
"loss": 1.0815,
"step": 3160
},
{
"epoch": 6.080305927342256,
"grad_norm": 1.9533103704452515,
"learning_rate": 0.0002177607818143191,
"loss": 1.0412,
"step": 3180
},
{
"epoch": 6.118546845124283,
"grad_norm": 2.2522337436676025,
"learning_rate": 0.0002156362863819843,
"loss": 1.0312,
"step": 3200
},
{
"epoch": 6.15678776290631,
"grad_norm": 2.058983087539673,
"learning_rate": 0.00021351179094964944,
"loss": 1.0449,
"step": 3220
},
{
"epoch": 6.195028680688337,
"grad_norm": 2.1774373054504395,
"learning_rate": 0.00021138729551731464,
"loss": 1.0532,
"step": 3240
},
{
"epoch": 6.233269598470363,
"grad_norm": 2.0248055458068848,
"learning_rate": 0.00020926280008497984,
"loss": 1.0461,
"step": 3260
},
{
"epoch": 6.27151051625239,
"grad_norm": 2.066737651824951,
"learning_rate": 0.00020713830465264498,
"loss": 1.0243,
"step": 3280
},
{
"epoch": 6.309751434034417,
"grad_norm": 2.100133180618286,
"learning_rate": 0.00020501380922031018,
"loss": 1.0309,
"step": 3300
},
{
"epoch": 6.347992351816444,
"grad_norm": 2.192960739135742,
"learning_rate": 0.00020288931378797538,
"loss": 1.0147,
"step": 3320
},
{
"epoch": 6.3862332695984705,
"grad_norm": 2.123382329940796,
"learning_rate": 0.00020076481835564053,
"loss": 1.0229,
"step": 3340
},
{
"epoch": 6.424474187380497,
"grad_norm": 2.162189245223999,
"learning_rate": 0.00019864032292330573,
"loss": 1.011,
"step": 3360
},
{
"epoch": 6.462715105162524,
"grad_norm": 2.254284143447876,
"learning_rate": 0.0001965158274909709,
"loss": 0.9929,
"step": 3380
},
{
"epoch": 6.500956022944551,
"grad_norm": 2.2183146476745605,
"learning_rate": 0.00019439133205863607,
"loss": 0.9852,
"step": 3400
},
{
"epoch": 6.539196940726577,
"grad_norm": 2.1704065799713135,
"learning_rate": 0.00019226683662630127,
"loss": 1.0129,
"step": 3420
},
{
"epoch": 6.577437858508604,
"grad_norm": 2.0828094482421875,
"learning_rate": 0.00019014234119396644,
"loss": 0.9667,
"step": 3440
},
{
"epoch": 6.615678776290631,
"grad_norm": 2.3150341510772705,
"learning_rate": 0.0001880178457616316,
"loss": 0.9769,
"step": 3460
},
{
"epoch": 6.653919694072657,
"grad_norm": 2.0636775493621826,
"learning_rate": 0.0001858933503292968,
"loss": 0.979,
"step": 3480
},
{
"epoch": 6.692160611854685,
"grad_norm": 2.171602487564087,
"learning_rate": 0.00018376885489696196,
"loss": 0.9667,
"step": 3500
},
{
"epoch": 6.730401529636711,
"grad_norm": 2.1150522232055664,
"learning_rate": 0.00018164435946462716,
"loss": 0.9593,
"step": 3520
},
{
"epoch": 6.768642447418738,
"grad_norm": 2.096452236175537,
"learning_rate": 0.00017951986403229233,
"loss": 0.9828,
"step": 3540
},
{
"epoch": 6.806883365200765,
"grad_norm": 2.2779390811920166,
"learning_rate": 0.0001773953685999575,
"loss": 0.9648,
"step": 3560
},
{
"epoch": 6.845124282982791,
"grad_norm": 2.2793240547180176,
"learning_rate": 0.0001752708731676227,
"loss": 0.9639,
"step": 3580
},
{
"epoch": 6.8833652007648185,
"grad_norm": 2.0822837352752686,
"learning_rate": 0.00017314637773528787,
"loss": 0.9421,
"step": 3600
},
{
"epoch": 6.921606118546845,
"grad_norm": 2.1541502475738525,
"learning_rate": 0.00017102188230295304,
"loss": 0.9277,
"step": 3620
},
{
"epoch": 6.959847036328872,
"grad_norm": 2.1241962909698486,
"learning_rate": 0.00016889738687061824,
"loss": 0.9335,
"step": 3640
},
{
"epoch": 6.998087954110899,
"grad_norm": 2.2354745864868164,
"learning_rate": 0.00016677289143828341,
"loss": 0.9379,
"step": 3660
},
{
"epoch": 7.0,
"eval_accuracy": 0.889673730238816,
"eval_loss": 0.5586521029472351,
"eval_runtime": 904.3879,
"eval_samples_per_second": 16.437,
"eval_steps_per_second": 16.437,
"step": 3661
},
{
"epoch": 7.036328871892925,
"grad_norm": 2.109501600265503,
"learning_rate": 0.0001646483960059486,
"loss": 0.8485,
"step": 3680
},
{
"epoch": 7.074569789674952,
"grad_norm": 2.2034125328063965,
"learning_rate": 0.00016252390057361376,
"loss": 0.897,
"step": 3700
},
{
"epoch": 7.112810707456979,
"grad_norm": 2.053767204284668,
"learning_rate": 0.00016039940514127896,
"loss": 0.859,
"step": 3720
},
{
"epoch": 7.151051625239006,
"grad_norm": 1.977229118347168,
"learning_rate": 0.00015827490970894413,
"loss": 0.8643,
"step": 3740
},
{
"epoch": 7.189292543021033,
"grad_norm": 2.0782172679901123,
"learning_rate": 0.0001561504142766093,
"loss": 0.8665,
"step": 3760
},
{
"epoch": 7.227533460803059,
"grad_norm": 1.9473881721496582,
"learning_rate": 0.0001540259188442745,
"loss": 0.8748,
"step": 3780
},
{
"epoch": 7.265774378585086,
"grad_norm": 2.0109825134277344,
"learning_rate": 0.00015190142341193967,
"loss": 0.8589,
"step": 3800
},
{
"epoch": 7.304015296367113,
"grad_norm": 2.1534509658813477,
"learning_rate": 0.00014977692797960484,
"loss": 0.8373,
"step": 3820
},
{
"epoch": 7.342256214149139,
"grad_norm": 2.1457502841949463,
"learning_rate": 0.00014765243254727004,
"loss": 0.8487,
"step": 3840
},
{
"epoch": 7.3804971319311665,
"grad_norm": 2.200446844100952,
"learning_rate": 0.0001455279371149352,
"loss": 0.841,
"step": 3860
},
{
"epoch": 7.418738049713193,
"grad_norm": 2.0335378646850586,
"learning_rate": 0.0001434034416826004,
"loss": 0.846,
"step": 3880
},
{
"epoch": 7.45697896749522,
"grad_norm": 2.1471550464630127,
"learning_rate": 0.0001412789462502656,
"loss": 0.8442,
"step": 3900
},
{
"epoch": 7.495219885277247,
"grad_norm": 2.2554261684417725,
"learning_rate": 0.00013915445081793073,
"loss": 0.8651,
"step": 3920
},
{
"epoch": 7.533460803059273,
"grad_norm": 2.1498897075653076,
"learning_rate": 0.00013702995538559593,
"loss": 0.839,
"step": 3940
},
{
"epoch": 7.5717017208413,
"grad_norm": 2.1460678577423096,
"learning_rate": 0.00013490545995326113,
"loss": 0.8449,
"step": 3960
},
{
"epoch": 7.609942638623327,
"grad_norm": 2.194730281829834,
"learning_rate": 0.00013278096452092628,
"loss": 0.8075,
"step": 3980
},
{
"epoch": 7.648183556405353,
"grad_norm": 2.291327714920044,
"learning_rate": 0.00013065646908859147,
"loss": 0.8163,
"step": 4000
},
{
"epoch": 7.686424474187381,
"grad_norm": 2.049647808074951,
"learning_rate": 0.00012853197365625662,
"loss": 0.7999,
"step": 4020
},
{
"epoch": 7.724665391969407,
"grad_norm": 2.026001214981079,
"learning_rate": 0.00012640747822392182,
"loss": 0.7721,
"step": 4040
},
{
"epoch": 7.762906309751434,
"grad_norm": 2.1242027282714844,
"learning_rate": 0.00012428298279158702,
"loss": 0.7957,
"step": 4060
},
{
"epoch": 7.801147227533461,
"grad_norm": 2.078123092651367,
"learning_rate": 0.0001221584873592522,
"loss": 0.8244,
"step": 4080
},
{
"epoch": 7.839388145315487,
"grad_norm": 2.028822183609009,
"learning_rate": 0.00012003399192691736,
"loss": 0.8066,
"step": 4100
},
{
"epoch": 7.8776290630975145,
"grad_norm": 2.30352520942688,
"learning_rate": 0.00011790949649458253,
"loss": 0.7892,
"step": 4120
},
{
"epoch": 7.915869980879541,
"grad_norm": 2.100221872329712,
"learning_rate": 0.00011578500106224772,
"loss": 0.7703,
"step": 4140
},
{
"epoch": 7.954110898661568,
"grad_norm": 2.1163010597229004,
"learning_rate": 0.00011366050562991289,
"loss": 0.7958,
"step": 4160
},
{
"epoch": 7.992351816443595,
"grad_norm": 2.194089412689209,
"learning_rate": 0.00011153601019757808,
"loss": 0.7925,
"step": 4180
},
{
"epoch": 8.0,
"eval_accuracy": 0.9117389841910528,
"eval_loss": 0.4518139362335205,
"eval_runtime": 1278.572,
"eval_samples_per_second": 11.626,
"eval_steps_per_second": 11.626,
"step": 4184
},
{
"epoch": 8.030592734225621,
"grad_norm": 2.024029016494751,
"learning_rate": 0.00010941151476524326,
"loss": 0.7768,
"step": 4200
},
{
"epoch": 8.068833652007648,
"grad_norm": 2.101900577545166,
"learning_rate": 0.00010728701933290843,
"loss": 0.7174,
"step": 4220
},
{
"epoch": 8.107074569789676,
"grad_norm": 1.8774019479751587,
"learning_rate": 0.0001051625239005736,
"loss": 0.7331,
"step": 4240
},
{
"epoch": 8.145315487571702,
"grad_norm": 2.0548768043518066,
"learning_rate": 0.0001030380284682388,
"loss": 0.6988,
"step": 4260
},
{
"epoch": 8.183556405353729,
"grad_norm": 2.09981107711792,
"learning_rate": 0.00010091353303590398,
"loss": 0.7329,
"step": 4280
},
{
"epoch": 8.221797323135755,
"grad_norm": 1.9958146810531616,
"learning_rate": 9.878903760356915e-05,
"loss": 0.7085,
"step": 4300
},
{
"epoch": 8.260038240917781,
"grad_norm": 1.9749237298965454,
"learning_rate": 9.666454217123433e-05,
"loss": 0.715,
"step": 4320
},
{
"epoch": 8.29827915869981,
"grad_norm": 1.9691994190216064,
"learning_rate": 9.454004673889952e-05,
"loss": 0.7151,
"step": 4340
},
{
"epoch": 8.336520076481836,
"grad_norm": 2.1549344062805176,
"learning_rate": 9.241555130656469e-05,
"loss": 0.7249,
"step": 4360
},
{
"epoch": 8.374760994263863,
"grad_norm": 2.2257604598999023,
"learning_rate": 9.029105587422986e-05,
"loss": 0.7207,
"step": 4380
},
{
"epoch": 8.413001912045889,
"grad_norm": 2.0424046516418457,
"learning_rate": 8.816656044189505e-05,
"loss": 0.6981,
"step": 4400
},
{
"epoch": 8.451242829827915,
"grad_norm": 1.8724883794784546,
"learning_rate": 8.604206500956024e-05,
"loss": 0.7153,
"step": 4420
},
{
"epoch": 8.489483747609942,
"grad_norm": 2.0893712043762207,
"learning_rate": 8.391756957722541e-05,
"loss": 0.7254,
"step": 4440
},
{
"epoch": 8.52772466539197,
"grad_norm": 2.0398759841918945,
"learning_rate": 8.179307414489059e-05,
"loss": 0.7184,
"step": 4460
},
{
"epoch": 8.565965583173996,
"grad_norm": 2.1322920322418213,
"learning_rate": 7.966857871255577e-05,
"loss": 0.7189,
"step": 4480
},
{
"epoch": 8.604206500956023,
"grad_norm": 2.0477068424224854,
"learning_rate": 7.754408328022095e-05,
"loss": 0.705,
"step": 4500
},
{
"epoch": 8.64244741873805,
"grad_norm": 2.0737295150756836,
"learning_rate": 7.541958784788614e-05,
"loss": 0.7285,
"step": 4520
},
{
"epoch": 8.680688336520076,
"grad_norm": 1.9542038440704346,
"learning_rate": 7.329509241555131e-05,
"loss": 0.7028,
"step": 4540
},
{
"epoch": 8.718929254302104,
"grad_norm": 1.9409737586975098,
"learning_rate": 7.117059698321648e-05,
"loss": 0.7031,
"step": 4560
},
{
"epoch": 8.75717017208413,
"grad_norm": 1.982315182685852,
"learning_rate": 6.904610155088168e-05,
"loss": 0.6909,
"step": 4580
},
{
"epoch": 8.795411089866157,
"grad_norm": 2.2722830772399902,
"learning_rate": 6.692160611854685e-05,
"loss": 0.7083,
"step": 4600
},
{
"epoch": 8.833652007648183,
"grad_norm": 1.9158116579055786,
"learning_rate": 6.479711068621202e-05,
"loss": 0.6885,
"step": 4620
},
{
"epoch": 8.87189292543021,
"grad_norm": 2.058122396469116,
"learning_rate": 6.26726152538772e-05,
"loss": 0.6912,
"step": 4640
},
{
"epoch": 8.910133843212238,
"grad_norm": 2.013585090637207,
"learning_rate": 6.054811982154239e-05,
"loss": 0.6937,
"step": 4660
},
{
"epoch": 8.948374760994264,
"grad_norm": 2.1662707328796387,
"learning_rate": 5.8423624389207567e-05,
"loss": 0.7003,
"step": 4680
},
{
"epoch": 8.98661567877629,
"grad_norm": 2.06309175491333,
"learning_rate": 5.6299128956872745e-05,
"loss": 0.6733,
"step": 4700
},
{
"epoch": 9.0,
"eval_accuracy": 0.929297006390851,
"eval_loss": 0.38889044523239136,
"eval_runtime": 1336.4014,
"eval_samples_per_second": 11.123,
"eval_steps_per_second": 11.123,
"step": 4707
},
{
"epoch": 9.024856596558317,
"grad_norm": 1.8986388444900513,
"learning_rate": 5.4174633524537924e-05,
"loss": 0.6593,
"step": 4720
},
{
"epoch": 9.063097514340344,
"grad_norm": 1.9107776880264282,
"learning_rate": 5.20501380922031e-05,
"loss": 0.6354,
"step": 4740
},
{
"epoch": 9.101338432122372,
"grad_norm": 2.0714404582977295,
"learning_rate": 4.992564265986829e-05,
"loss": 0.6531,
"step": 4760
},
{
"epoch": 9.139579349904398,
"grad_norm": 1.9571270942687988,
"learning_rate": 4.780114722753346e-05,
"loss": 0.6429,
"step": 4780
},
{
"epoch": 9.177820267686425,
"grad_norm": 1.9968469142913818,
"learning_rate": 4.5676651795198646e-05,
"loss": 0.6498,
"step": 4800
},
{
"epoch": 9.216061185468451,
"grad_norm": 1.982832670211792,
"learning_rate": 4.355215636286382e-05,
"loss": 0.6141,
"step": 4820
},
{
"epoch": 9.254302103250478,
"grad_norm": 2.035123825073242,
"learning_rate": 4.1427660930529e-05,
"loss": 0.6269,
"step": 4840
},
{
"epoch": 9.292543021032504,
"grad_norm": 1.8670984506607056,
"learning_rate": 3.930316549819418e-05,
"loss": 0.6143,
"step": 4860
},
{
"epoch": 9.330783938814532,
"grad_norm": 1.9521456956863403,
"learning_rate": 3.7178670065859354e-05,
"loss": 0.6352,
"step": 4880
},
{
"epoch": 9.369024856596559,
"grad_norm": 1.9804260730743408,
"learning_rate": 3.505417463352454e-05,
"loss": 0.6214,
"step": 4900
},
{
"epoch": 9.407265774378585,
"grad_norm": 2.149338483810425,
"learning_rate": 3.292967920118971e-05,
"loss": 0.655,
"step": 4920
},
{
"epoch": 9.445506692160611,
"grad_norm": 1.8145477771759033,
"learning_rate": 3.08051837688549e-05,
"loss": 0.6095,
"step": 4940
},
{
"epoch": 9.483747609942638,
"grad_norm": 2.009587287902832,
"learning_rate": 2.8680688336520076e-05,
"loss": 0.6314,
"step": 4960
},
{
"epoch": 9.521988527724666,
"grad_norm": 1.946964979171753,
"learning_rate": 2.655619290418526e-05,
"loss": 0.6272,
"step": 4980
},
{
"epoch": 9.560229445506693,
"grad_norm": 2.2486774921417236,
"learning_rate": 2.4431697471850437e-05,
"loss": 0.6133,
"step": 5000
},
{
"epoch": 9.598470363288719,
"grad_norm": 1.9570778608322144,
"learning_rate": 2.2307202039515616e-05,
"loss": 0.6185,
"step": 5020
},
{
"epoch": 9.636711281070745,
"grad_norm": 1.8994139432907104,
"learning_rate": 2.0182706607180795e-05,
"loss": 0.6366,
"step": 5040
},
{
"epoch": 9.674952198852772,
"grad_norm": 2.102374792098999,
"learning_rate": 1.8058211174845977e-05,
"loss": 0.6414,
"step": 5060
},
{
"epoch": 9.7131931166348,
"grad_norm": 2.073791027069092,
"learning_rate": 1.5933715742511156e-05,
"loss": 0.6053,
"step": 5080
},
{
"epoch": 9.751434034416826,
"grad_norm": 1.9205766916275024,
"learning_rate": 1.3809220310176333e-05,
"loss": 0.6032,
"step": 5100
},
{
"epoch": 9.789674952198853,
"grad_norm": 2.04294490814209,
"learning_rate": 1.1684724877841512e-05,
"loss": 0.6202,
"step": 5120
},
{
"epoch": 9.82791586998088,
"grad_norm": 1.9936119318008423,
"learning_rate": 9.560229445506692e-06,
"loss": 0.5882,
"step": 5140
},
{
"epoch": 9.866156787762906,
"grad_norm": 1.8728615045547485,
"learning_rate": 7.435734013171872e-06,
"loss": 0.5955,
"step": 5160
},
{
"epoch": 9.904397705544934,
"grad_norm": 2.069910764694214,
"learning_rate": 5.311238580837051e-06,
"loss": 0.6049,
"step": 5180
},
{
"epoch": 9.94263862332696,
"grad_norm": 1.9873836040496826,
"learning_rate": 3.1867431485022306e-06,
"loss": 0.6251,
"step": 5200
},
{
"epoch": 9.980879541108987,
"grad_norm": 1.932947039604187,
"learning_rate": 1.0622477161674102e-06,
"loss": 0.6187,
"step": 5220
},
{
"epoch": 10.0,
"eval_accuracy": 0.9340733266061217,
"eval_loss": 0.35941872000694275,
"eval_runtime": 1417.2186,
"eval_samples_per_second": 10.489,
"eval_steps_per_second": 10.489,
"step": 5230
},
{
"epoch": 10.0,
"step": 5230,
"total_flos": 2.7283641066057605e+18,
"train_loss": 1.9864544938672797,
"train_runtime": 94798.7749,
"train_samples_per_second": 14.112,
"train_steps_per_second": 0.055
}
],
"logging_steps": 20,
"max_steps": 5230,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.7283641066057605e+18,
"train_batch_size": 256,
"trial_name": null,
"trial_params": null
}