jahyungu's picture
Upload 16 files
4e3081a verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 500,
"global_step": 2500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.008,
"grad_norm": 0.3058140892912027,
"learning_rate": 1.6000000000000001e-06,
"loss": 0.5786,
"step": 10
},
{
"epoch": 0.016,
"grad_norm": 0.31255981400696337,
"learning_rate": 3.2000000000000003e-06,
"loss": 0.5032,
"step": 20
},
{
"epoch": 0.024,
"grad_norm": 0.16719945295611136,
"learning_rate": 4.800000000000001e-06,
"loss": 0.3901,
"step": 30
},
{
"epoch": 0.032,
"grad_norm": 0.11095534933859139,
"learning_rate": 6.4000000000000006e-06,
"loss": 0.2978,
"step": 40
},
{
"epoch": 0.04,
"grad_norm": 0.1114769024399936,
"learning_rate": 8.000000000000001e-06,
"loss": 0.2631,
"step": 50
},
{
"epoch": 0.048,
"grad_norm": 0.10411559733650633,
"learning_rate": 9.600000000000001e-06,
"loss": 0.242,
"step": 60
},
{
"epoch": 0.056,
"grad_norm": 0.08350193155299954,
"learning_rate": 1.1200000000000001e-05,
"loss": 0.2214,
"step": 70
},
{
"epoch": 0.064,
"grad_norm": 0.10529290900139675,
"learning_rate": 1.2800000000000001e-05,
"loss": 0.2134,
"step": 80
},
{
"epoch": 0.072,
"grad_norm": 0.09284266871657494,
"learning_rate": 1.4400000000000001e-05,
"loss": 0.2122,
"step": 90
},
{
"epoch": 0.08,
"grad_norm": 0.09938443320990606,
"learning_rate": 1.6000000000000003e-05,
"loss": 0.2035,
"step": 100
},
{
"epoch": 0.088,
"grad_norm": 0.10023595945326495,
"learning_rate": 1.76e-05,
"loss": 0.1841,
"step": 110
},
{
"epoch": 0.096,
"grad_norm": 0.0695781552706901,
"learning_rate": 1.9200000000000003e-05,
"loss": 0.1877,
"step": 120
},
{
"epoch": 0.104,
"grad_norm": 0.07598876239857685,
"learning_rate": 1.9999781283802247e-05,
"loss": 0.1868,
"step": 130
},
{
"epoch": 0.112,
"grad_norm": 0.08623180966304644,
"learning_rate": 1.999803161162393e-05,
"loss": 0.2111,
"step": 140
},
{
"epoch": 0.12,
"grad_norm": 0.08260588578713098,
"learning_rate": 1.999453257340926e-05,
"loss": 0.1958,
"step": 150
},
{
"epoch": 0.128,
"grad_norm": 0.07792179475378579,
"learning_rate": 1.9989284781388617e-05,
"loss": 0.1835,
"step": 160
},
{
"epoch": 0.136,
"grad_norm": 0.07928527753977362,
"learning_rate": 1.9982289153773648e-05,
"loss": 0.1912,
"step": 170
},
{
"epoch": 0.144,
"grad_norm": 0.08357416949011726,
"learning_rate": 1.9973546914596622e-05,
"loss": 0.1885,
"step": 180
},
{
"epoch": 0.152,
"grad_norm": 0.08456641175983287,
"learning_rate": 1.996305959349627e-05,
"loss": 0.1841,
"step": 190
},
{
"epoch": 0.16,
"grad_norm": 0.0901579451770012,
"learning_rate": 1.9950829025450116e-05,
"loss": 0.1806,
"step": 200
},
{
"epoch": 0.168,
"grad_norm": 0.07982438004177704,
"learning_rate": 1.993685735045343e-05,
"loss": 0.1769,
"step": 210
},
{
"epoch": 0.176,
"grad_norm": 0.10398225393987126,
"learning_rate": 1.9921147013144782e-05,
"loss": 0.1895,
"step": 220
},
{
"epoch": 0.184,
"grad_norm": 0.1099351584849273,
"learning_rate": 1.9903700762378303e-05,
"loss": 0.18,
"step": 230
},
{
"epoch": 0.192,
"grad_norm": 0.09703206171895983,
"learning_rate": 1.9884521650742718e-05,
"loss": 0.17,
"step": 240
},
{
"epoch": 0.2,
"grad_norm": 0.09289058414749175,
"learning_rate": 1.9863613034027224e-05,
"loss": 0.1699,
"step": 250
},
{
"epoch": 0.208,
"grad_norm": 0.08208598708089178,
"learning_rate": 1.9840978570634338e-05,
"loss": 0.1843,
"step": 260
},
{
"epoch": 0.216,
"grad_norm": 0.08483871139912566,
"learning_rate": 1.9816622220939762e-05,
"loss": 0.1756,
"step": 270
},
{
"epoch": 0.224,
"grad_norm": 0.08382234577690319,
"learning_rate": 1.9790548246599447e-05,
"loss": 0.1746,
"step": 280
},
{
"epoch": 0.232,
"grad_norm": 0.11261586530879426,
"learning_rate": 1.976276120980393e-05,
"loss": 0.1739,
"step": 290
},
{
"epoch": 0.24,
"grad_norm": 0.0777808773898071,
"learning_rate": 1.973326597248006e-05,
"loss": 0.1791,
"step": 300
},
{
"epoch": 0.248,
"grad_norm": 0.0786352615035203,
"learning_rate": 1.9702067695440333e-05,
"loss": 0.1732,
"step": 310
},
{
"epoch": 0.256,
"grad_norm": 0.07355004150569275,
"learning_rate": 1.966917183747987e-05,
"loss": 0.1737,
"step": 320
},
{
"epoch": 0.264,
"grad_norm": 0.08213670035333703,
"learning_rate": 1.9634584154421316e-05,
"loss": 0.1777,
"step": 330
},
{
"epoch": 0.272,
"grad_norm": 0.07217668674817151,
"learning_rate": 1.95983106981077e-05,
"loss": 0.1695,
"step": 340
},
{
"epoch": 0.28,
"grad_norm": 0.07669208676566047,
"learning_rate": 1.9560357815343577e-05,
"loss": 0.1717,
"step": 350
},
{
"epoch": 0.288,
"grad_norm": 0.07736091266235384,
"learning_rate": 1.9520732146784493e-05,
"loss": 0.1718,
"step": 360
},
{
"epoch": 0.296,
"grad_norm": 0.08315079692157626,
"learning_rate": 1.947944062577507e-05,
"loss": 0.1776,
"step": 370
},
{
"epoch": 0.304,
"grad_norm": 0.09135194147148734,
"learning_rate": 1.9436490477135877e-05,
"loss": 0.173,
"step": 380
},
{
"epoch": 0.312,
"grad_norm": 0.0809078717299552,
"learning_rate": 1.93918892158993e-05,
"loss": 0.1773,
"step": 390
},
{
"epoch": 0.32,
"grad_norm": 0.07427395281421514,
"learning_rate": 1.934564464599461e-05,
"loss": 0.17,
"step": 400
},
{
"epoch": 0.328,
"grad_norm": 0.07398454431370932,
"learning_rate": 1.9297764858882516e-05,
"loss": 0.181,
"step": 410
},
{
"epoch": 0.336,
"grad_norm": 0.0792882992648581,
"learning_rate": 1.924825823213939e-05,
"loss": 0.1627,
"step": 420
},
{
"epoch": 0.344,
"grad_norm": 0.0861604357608397,
"learning_rate": 1.9197133427991437e-05,
"loss": 0.1731,
"step": 430
},
{
"epoch": 0.352,
"grad_norm": 0.07755055594882805,
"learning_rate": 1.9144399391799043e-05,
"loss": 0.1703,
"step": 440
},
{
"epoch": 0.36,
"grad_norm": 0.08634385297265851,
"learning_rate": 1.909006535049163e-05,
"loss": 0.1695,
"step": 450
},
{
"epoch": 0.368,
"grad_norm": 0.07460615095270867,
"learning_rate": 1.903414081095315e-05,
"loss": 0.1808,
"step": 460
},
{
"epoch": 0.376,
"grad_norm": 0.08366554725021544,
"learning_rate": 1.897663555835872e-05,
"loss": 0.1687,
"step": 470
},
{
"epoch": 0.384,
"grad_norm": 0.11050801098711051,
"learning_rate": 1.8917559654462474e-05,
"loss": 0.1693,
"step": 480
},
{
"epoch": 0.392,
"grad_norm": 0.07102922397109013,
"learning_rate": 1.8856923435837024e-05,
"loss": 0.1654,
"step": 490
},
{
"epoch": 0.4,
"grad_norm": 0.07992253400672722,
"learning_rate": 1.879473751206489e-05,
"loss": 0.164,
"step": 500
},
{
"epoch": 0.408,
"grad_norm": 0.08055086025032311,
"learning_rate": 1.8731012763882132e-05,
"loss": 0.1713,
"step": 510
},
{
"epoch": 0.416,
"grad_norm": 0.0737582298851665,
"learning_rate": 1.8665760341274505e-05,
"loss": 0.1691,
"step": 520
},
{
"epoch": 0.424,
"grad_norm": 0.07526226505268085,
"learning_rate": 1.859899166152657e-05,
"loss": 0.1612,
"step": 530
},
{
"epoch": 0.432,
"grad_norm": 0.07994952744363151,
"learning_rate": 1.8530718407223976e-05,
"loss": 0.1741,
"step": 540
},
{
"epoch": 0.44,
"grad_norm": 0.06771882882851786,
"learning_rate": 1.8460952524209355e-05,
"loss": 0.1771,
"step": 550
},
{
"epoch": 0.448,
"grad_norm": 0.07925796602812565,
"learning_rate": 1.8389706219492147e-05,
"loss": 0.1658,
"step": 560
},
{
"epoch": 0.456,
"grad_norm": 0.07737563218705418,
"learning_rate": 1.831699195911272e-05,
"loss": 0.1713,
"step": 570
},
{
"epoch": 0.464,
"grad_norm": 0.07405191205739156,
"learning_rate": 1.8242822465961177e-05,
"loss": 0.1687,
"step": 580
},
{
"epoch": 0.472,
"grad_norm": 0.08527959453273458,
"learning_rate": 1.8167210717551224e-05,
"loss": 0.1645,
"step": 590
},
{
"epoch": 0.48,
"grad_norm": 0.08246899769527764,
"learning_rate": 1.8090169943749477e-05,
"loss": 0.1606,
"step": 600
},
{
"epoch": 0.488,
"grad_norm": 0.07147580637020562,
"learning_rate": 1.8011713624460608e-05,
"loss": 0.1628,
"step": 610
},
{
"epoch": 0.496,
"grad_norm": 0.07722727076416155,
"learning_rate": 1.793185548726878e-05,
"loss": 0.1759,
"step": 620
},
{
"epoch": 0.504,
"grad_norm": 0.06844389954410786,
"learning_rate": 1.785060950503568e-05,
"loss": 0.1748,
"step": 630
},
{
"epoch": 0.512,
"grad_norm": 0.07174329525930123,
"learning_rate": 1.7767989893455696e-05,
"loss": 0.1713,
"step": 640
},
{
"epoch": 0.52,
"grad_norm": 0.08532370630535416,
"learning_rate": 1.7684011108568593e-05,
"loss": 0.1622,
"step": 650
},
{
"epoch": 0.528,
"grad_norm": 0.06580211643615225,
"learning_rate": 1.759868784423009e-05,
"loss": 0.1593,
"step": 660
},
{
"epoch": 0.536,
"grad_norm": 0.07333177241207892,
"learning_rate": 1.7512035029540887e-05,
"loss": 0.1611,
"step": 670
},
{
"epoch": 0.544,
"grad_norm": 0.08247160155974288,
"learning_rate": 1.74240678262345e-05,
"loss": 0.157,
"step": 680
},
{
"epoch": 0.552,
"grad_norm": 0.06614899961508676,
"learning_rate": 1.73348016260244e-05,
"loss": 0.1635,
"step": 690
},
{
"epoch": 0.56,
"grad_norm": 0.08619484121470282,
"learning_rate": 1.7244252047910893e-05,
"loss": 0.1675,
"step": 700
},
{
"epoch": 0.568,
"grad_norm": 0.08412526181881948,
"learning_rate": 1.7152434935448257e-05,
"loss": 0.1562,
"step": 710
},
{
"epoch": 0.576,
"grad_norm": 0.07223298445136259,
"learning_rate": 1.705936635397259e-05,
"loss": 0.1687,
"step": 720
},
{
"epoch": 0.584,
"grad_norm": 0.06857251106754064,
"learning_rate": 1.6965062587790823e-05,
"loss": 0.1663,
"step": 730
},
{
"epoch": 0.592,
"grad_norm": 0.06696835945973133,
"learning_rate": 1.6869540137331445e-05,
"loss": 0.1674,
"step": 740
},
{
"epoch": 0.6,
"grad_norm": 0.06686201633689924,
"learning_rate": 1.6772815716257414e-05,
"loss": 0.174,
"step": 750
},
{
"epoch": 0.608,
"grad_norm": 0.08123286118785028,
"learning_rate": 1.667490624854173e-05,
"loss": 0.1667,
"step": 760
},
{
"epoch": 0.616,
"grad_norm": 0.14148707567389088,
"learning_rate": 1.6575828865506246e-05,
"loss": 0.1682,
"step": 770
},
{
"epoch": 0.624,
"grad_norm": 0.06485016340695926,
"learning_rate": 1.647560090282419e-05,
"loss": 0.1574,
"step": 780
},
{
"epoch": 0.632,
"grad_norm": 0.0867889552062448,
"learning_rate": 1.63742398974869e-05,
"loss": 0.1629,
"step": 790
},
{
"epoch": 0.64,
"grad_norm": 0.06811973631019821,
"learning_rate": 1.6271763584735373e-05,
"loss": 0.163,
"step": 800
},
{
"epoch": 0.648,
"grad_norm": 0.08610829160689334,
"learning_rate": 1.616818989495711e-05,
"loss": 0.1584,
"step": 810
},
{
"epoch": 0.656,
"grad_norm": 0.06829743878657456,
"learning_rate": 1.6063536950548825e-05,
"loss": 0.1611,
"step": 820
},
{
"epoch": 0.664,
"grad_norm": 0.07996846689663462,
"learning_rate": 1.595782306274553e-05,
"loss": 0.1728,
"step": 830
},
{
"epoch": 0.672,
"grad_norm": 0.06827784189424038,
"learning_rate": 1.5851066728416617e-05,
"loss": 0.1657,
"step": 840
},
{
"epoch": 0.68,
"grad_norm": 0.09993992156719474,
"learning_rate": 1.5743286626829437e-05,
"loss": 0.1577,
"step": 850
},
{
"epoch": 0.688,
"grad_norm": 0.0904528328527498,
"learning_rate": 1.5634501616380967e-05,
"loss": 0.1738,
"step": 860
},
{
"epoch": 0.696,
"grad_norm": 0.07536916136620567,
"learning_rate": 1.5524730731298136e-05,
"loss": 0.1724,
"step": 870
},
{
"epoch": 0.704,
"grad_norm": 0.07771163955093588,
"learning_rate": 1.541399317830738e-05,
"loss": 0.1587,
"step": 880
},
{
"epoch": 0.712,
"grad_norm": 0.06870339623694065,
"learning_rate": 1.530230833327405e-05,
"loss": 0.1662,
"step": 890
},
{
"epoch": 0.72,
"grad_norm": 0.08541842938568546,
"learning_rate": 1.5189695737812153e-05,
"loss": 0.1625,
"step": 900
},
{
"epoch": 0.728,
"grad_norm": 0.07174614527960509,
"learning_rate": 1.5076175095865171e-05,
"loss": 0.1716,
"step": 910
},
{
"epoch": 0.736,
"grad_norm": 0.07173726830388383,
"learning_rate": 1.4961766270258422e-05,
"loss": 0.1564,
"step": 920
},
{
"epoch": 0.744,
"grad_norm": 0.07774148753609791,
"learning_rate": 1.4846489279223653e-05,
"loss": 0.1688,
"step": 930
},
{
"epoch": 0.752,
"grad_norm": 0.07047837351321116,
"learning_rate": 1.473036429289641e-05,
"loss": 0.1671,
"step": 940
},
{
"epoch": 0.76,
"grad_norm": 0.07209116510890862,
"learning_rate": 1.461341162978688e-05,
"loss": 0.1636,
"step": 950
},
{
"epoch": 0.768,
"grad_norm": 0.0825643122647062,
"learning_rate": 1.4495651753224706e-05,
"loss": 0.1589,
"step": 960
},
{
"epoch": 0.776,
"grad_norm": 0.09574339039828667,
"learning_rate": 1.437710526777852e-05,
"loss": 0.1663,
"step": 970
},
{
"epoch": 0.784,
"grad_norm": 0.07179601153688872,
"learning_rate": 1.4257792915650728e-05,
"loss": 0.1583,
"step": 980
},
{
"epoch": 0.792,
"grad_norm": 0.07652431647542295,
"learning_rate": 1.4137735573048232e-05,
"loss": 0.1514,
"step": 990
},
{
"epoch": 0.8,
"grad_norm": 0.0686398495128086,
"learning_rate": 1.4016954246529697e-05,
"loss": 0.1504,
"step": 1000
},
{
"epoch": 0.808,
"grad_norm": 0.06508028906617441,
"learning_rate": 1.3895470069330003e-05,
"loss": 0.1599,
"step": 1010
},
{
"epoch": 0.816,
"grad_norm": 0.07608940173232373,
"learning_rate": 1.377330429766256e-05,
"loss": 0.1612,
"step": 1020
},
{
"epoch": 0.824,
"grad_norm": 0.06899267345664216,
"learning_rate": 1.3650478307000059e-05,
"loss": 0.1575,
"step": 1030
},
{
"epoch": 0.832,
"grad_norm": 0.06256387504627259,
"learning_rate": 1.3527013588334415e-05,
"loss": 0.1652,
"step": 1040
},
{
"epoch": 0.84,
"grad_norm": 0.07396943401991835,
"learning_rate": 1.3402931744416432e-05,
"loss": 0.1538,
"step": 1050
},
{
"epoch": 0.848,
"grad_norm": 0.06825533554005977,
"learning_rate": 1.3278254485975977e-05,
"loss": 0.16,
"step": 1060
},
{
"epoch": 0.856,
"grad_norm": 0.07700315050313282,
"learning_rate": 1.3153003627923217e-05,
"loss": 0.1551,
"step": 1070
},
{
"epoch": 0.864,
"grad_norm": 0.07759623789607299,
"learning_rate": 1.3027201085531633e-05,
"loss": 0.1635,
"step": 1080
},
{
"epoch": 0.872,
"grad_norm": 0.07689613377715925,
"learning_rate": 1.2900868870603502e-05,
"loss": 0.1587,
"step": 1090
},
{
"epoch": 0.88,
"grad_norm": 0.07385835492768326,
"learning_rate": 1.2774029087618448e-05,
"loss": 0.1709,
"step": 1100
},
{
"epoch": 0.888,
"grad_norm": 0.06043191054467158,
"learning_rate": 1.2646703929865817e-05,
"loss": 0.1637,
"step": 1110
},
{
"epoch": 0.896,
"grad_norm": 0.07533708016400092,
"learning_rate": 1.2518915675561482e-05,
"loss": 0.1682,
"step": 1120
},
{
"epoch": 0.904,
"grad_norm": 0.06791483038650707,
"learning_rate": 1.2390686683949799e-05,
"loss": 0.1444,
"step": 1130
},
{
"epoch": 0.912,
"grad_norm": 0.0815779013214324,
"learning_rate": 1.2262039391391405e-05,
"loss": 0.1569,
"step": 1140
},
{
"epoch": 0.92,
"grad_norm": 0.07372313932075553,
"learning_rate": 1.213299630743747e-05,
"loss": 0.1523,
"step": 1150
},
{
"epoch": 0.928,
"grad_norm": 0.08636256799085817,
"learning_rate": 1.2003580010891214e-05,
"loss": 0.1563,
"step": 1160
},
{
"epoch": 0.936,
"grad_norm": 0.0717205138861145,
"learning_rate": 1.187381314585725e-05,
"loss": 0.1647,
"step": 1170
},
{
"epoch": 0.944,
"grad_norm": 0.07005503393261057,
"learning_rate": 1.1743718417779518e-05,
"loss": 0.1663,
"step": 1180
},
{
"epoch": 0.952,
"grad_norm": 0.07064996616100924,
"learning_rate": 1.1613318589468512e-05,
"loss": 0.1589,
"step": 1190
},
{
"epoch": 0.96,
"grad_norm": 0.0662381053536627,
"learning_rate": 1.148263647711842e-05,
"loss": 0.161,
"step": 1200
},
{
"epoch": 0.968,
"grad_norm": 0.07589095377012324,
"learning_rate": 1.135169494631497e-05,
"loss": 0.1676,
"step": 1210
},
{
"epoch": 0.976,
"grad_norm": 0.07324628362518953,
"learning_rate": 1.1220516908034602e-05,
"loss": 0.1621,
"step": 1220
},
{
"epoch": 0.984,
"grad_norm": 0.07087940558217348,
"learning_rate": 1.1089125314635727e-05,
"loss": 0.163,
"step": 1230
},
{
"epoch": 0.992,
"grad_norm": 0.08079771757264623,
"learning_rate": 1.0957543155842703e-05,
"loss": 0.1594,
"step": 1240
},
{
"epoch": 1.0,
"grad_norm": 0.06413004885280083,
"learning_rate": 1.0825793454723325e-05,
"loss": 0.1554,
"step": 1250
},
{
"epoch": 1.008,
"grad_norm": 0.05476172368624532,
"learning_rate": 1.0693899263660442e-05,
"loss": 0.1102,
"step": 1260
},
{
"epoch": 1.016,
"grad_norm": 0.05632412500843437,
"learning_rate": 1.0561883660318456e-05,
"loss": 0.1028,
"step": 1270
},
{
"epoch": 1.024,
"grad_norm": 0.05646598003617287,
"learning_rate": 1.0429769743605406e-05,
"loss": 0.1108,
"step": 1280
},
{
"epoch": 1.032,
"grad_norm": 0.06842048425083827,
"learning_rate": 1.0297580629631324e-05,
"loss": 0.1046,
"step": 1290
},
{
"epoch": 1.04,
"grad_norm": 0.0597248090009715,
"learning_rate": 1.0165339447663586e-05,
"loss": 0.1035,
"step": 1300
},
{
"epoch": 1.048,
"grad_norm": 0.07398432585579974,
"learning_rate": 1.0033069336079952e-05,
"loss": 0.0987,
"step": 1310
},
{
"epoch": 1.056,
"grad_norm": 0.06505084369381099,
"learning_rate": 9.900793438320037e-06,
"loss": 0.0981,
"step": 1320
},
{
"epoch": 1.064,
"grad_norm": 0.06284724491257883,
"learning_rate": 9.768534898835864e-06,
"loss": 0.1079,
"step": 1330
},
{
"epoch": 1.072,
"grad_norm": 0.06844723724817173,
"learning_rate": 9.636316859042258e-06,
"loss": 0.1062,
"step": 1340
},
{
"epoch": 1.08,
"grad_norm": 0.06837187272099314,
"learning_rate": 9.504162453267776e-06,
"loss": 0.1056,
"step": 1350
},
{
"epoch": 1.088,
"grad_norm": 0.07008372679481019,
"learning_rate": 9.372094804706867e-06,
"loss": 0.1024,
"step": 1360
},
{
"epoch": 1.096,
"grad_norm": 0.05573557646176466,
"learning_rate": 9.24013702137397e-06,
"loss": 0.1004,
"step": 1370
},
{
"epoch": 1.104,
"grad_norm": 0.061630742090732805,
"learning_rate": 9.108312192060298e-06,
"loss": 0.0958,
"step": 1380
},
{
"epoch": 1.112,
"grad_norm": 0.0626839943409606,
"learning_rate": 8.97664338229395e-06,
"loss": 0.1025,
"step": 1390
},
{
"epoch": 1.12,
"grad_norm": 0.061119158952486805,
"learning_rate": 8.84515363030414e-06,
"loss": 0.0995,
"step": 1400
},
{
"epoch": 1.1280000000000001,
"grad_norm": 0.05757909222944341,
"learning_rate": 8.713865942990143e-06,
"loss": 0.1027,
"step": 1410
},
{
"epoch": 1.1360000000000001,
"grad_norm": 0.06947100250068435,
"learning_rate": 8.582803291895758e-06,
"loss": 0.1111,
"step": 1420
},
{
"epoch": 1.144,
"grad_norm": 0.0562229609672269,
"learning_rate": 8.451988609189987e-06,
"loss": 0.108,
"step": 1430
},
{
"epoch": 1.152,
"grad_norm": 0.06930898413364359,
"learning_rate": 8.321444783654524e-06,
"loss": 0.1078,
"step": 1440
},
{
"epoch": 1.16,
"grad_norm": 0.06571389182101994,
"learning_rate": 8.191194656678905e-06,
"loss": 0.0988,
"step": 1450
},
{
"epoch": 1.168,
"grad_norm": 0.06275542671231407,
"learning_rate": 8.06126101826392e-06,
"loss": 0.0961,
"step": 1460
},
{
"epoch": 1.176,
"grad_norm": 0.06782279303820063,
"learning_rate": 7.931666603034034e-06,
"loss": 0.1024,
"step": 1470
},
{
"epoch": 1.184,
"grad_norm": 0.06578749140882915,
"learning_rate": 7.80243408625947e-06,
"loss": 0.0961,
"step": 1480
},
{
"epoch": 1.192,
"grad_norm": 0.061803419288976154,
"learning_rate": 7.673586079888699e-06,
"loss": 0.1062,
"step": 1490
},
{
"epoch": 1.2,
"grad_norm": 0.0717440728997515,
"learning_rate": 7.545145128592009e-06,
"loss": 0.0989,
"step": 1500
},
{
"epoch": 1.208,
"grad_norm": 0.06490247759799975,
"learning_rate": 7.4171337058168365e-06,
"loss": 0.1045,
"step": 1510
},
{
"epoch": 1.216,
"grad_norm": 0.06226994133294724,
"learning_rate": 7.28957420985556e-06,
"loss": 0.0997,
"step": 1520
},
{
"epoch": 1.224,
"grad_norm": 0.07083340324266124,
"learning_rate": 7.16248895992645e-06,
"loss": 0.1002,
"step": 1530
},
{
"epoch": 1.232,
"grad_norm": 0.07767517880074136,
"learning_rate": 7.035900192268464e-06,
"loss": 0.1132,
"step": 1540
},
{
"epoch": 1.24,
"grad_norm": 0.06462545486578795,
"learning_rate": 6.909830056250527e-06,
"loss": 0.1028,
"step": 1550
},
{
"epoch": 1.248,
"grad_norm": 0.0754203548519236,
"learning_rate": 6.784300610496049e-06,
"loss": 0.1051,
"step": 1560
},
{
"epoch": 1.256,
"grad_norm": 0.06592132565995444,
"learning_rate": 6.659333819023291e-06,
"loss": 0.1047,
"step": 1570
},
{
"epoch": 1.264,
"grad_norm": 0.06961461995248241,
"learning_rate": 6.534951547402322e-06,
"loss": 0.1049,
"step": 1580
},
{
"epoch": 1.272,
"grad_norm": 0.06511899962594735,
"learning_rate": 6.411175558929152e-06,
"loss": 0.106,
"step": 1590
},
{
"epoch": 1.28,
"grad_norm": 0.05916934075813879,
"learning_rate": 6.2880275108177915e-06,
"loss": 0.1046,
"step": 1600
},
{
"epoch": 1.288,
"grad_norm": 0.07134708360913396,
"learning_rate": 6.165528950410884e-06,
"loss": 0.1076,
"step": 1610
},
{
"epoch": 1.296,
"grad_norm": 0.06849648011516535,
"learning_rate": 6.04370131140952e-06,
"loss": 0.1007,
"step": 1620
},
{
"epoch": 1.304,
"grad_norm": 0.0672608182320331,
"learning_rate": 5.922565910122967e-06,
"loss": 0.0973,
"step": 1630
},
{
"epoch": 1.312,
"grad_norm": 0.07780002144539479,
"learning_rate": 5.802143941738945e-06,
"loss": 0.1077,
"step": 1640
},
{
"epoch": 1.32,
"grad_norm": 0.06521931109585341,
"learning_rate": 5.6824564766150724e-06,
"loss": 0.108,
"step": 1650
},
{
"epoch": 1.328,
"grad_norm": 0.0837775649233135,
"learning_rate": 5.563524456592163e-06,
"loss": 0.1002,
"step": 1660
},
{
"epoch": 1.336,
"grad_norm": 0.06954289449761725,
"learning_rate": 5.445368691330008e-06,
"loss": 0.1049,
"step": 1670
},
{
"epoch": 1.3439999999999999,
"grad_norm": 0.06577591748805732,
"learning_rate": 5.328009854666303e-06,
"loss": 0.0984,
"step": 1680
},
{
"epoch": 1.3519999999999999,
"grad_norm": 0.06001848210345022,
"learning_rate": 5.211468480999304e-06,
"loss": 0.1049,
"step": 1690
},
{
"epoch": 1.3599999999999999,
"grad_norm": 0.05950122818785273,
"learning_rate": 5.095764961694923e-06,
"loss": 0.104,
"step": 1700
},
{
"epoch": 1.3679999999999999,
"grad_norm": 0.056395399165830855,
"learning_rate": 4.980919541518796e-06,
"loss": 0.1005,
"step": 1710
},
{
"epoch": 1.376,
"grad_norm": 0.06573253422421312,
"learning_rate": 4.866952315094088e-06,
"loss": 0.1009,
"step": 1720
},
{
"epoch": 1.384,
"grad_norm": 0.07121035158602729,
"learning_rate": 4.753883223385467e-06,
"loss": 0.0983,
"step": 1730
},
{
"epoch": 1.392,
"grad_norm": 0.06524819896068156,
"learning_rate": 4.641732050210032e-06,
"loss": 0.1051,
"step": 1740
},
{
"epoch": 1.4,
"grad_norm": 0.06825918255074036,
"learning_rate": 4.530518418775734e-06,
"loss": 0.094,
"step": 1750
},
{
"epoch": 1.408,
"grad_norm": 0.07443611934581895,
"learning_rate": 4.420261788247841e-06,
"loss": 0.1025,
"step": 1760
},
{
"epoch": 1.416,
"grad_norm": 0.06453930101827404,
"learning_rate": 4.3109814503441894e-06,
"loss": 0.1023,
"step": 1770
},
{
"epoch": 1.424,
"grad_norm": 0.06508573006276362,
"learning_rate": 4.202696525959667e-06,
"loss": 0.1042,
"step": 1780
},
{
"epoch": 1.432,
"grad_norm": 0.059065263422893345,
"learning_rate": 4.0954259618206295e-06,
"loss": 0.109,
"step": 1790
},
{
"epoch": 1.44,
"grad_norm": 0.06698134546589794,
"learning_rate": 3.989188527169749e-06,
"loss": 0.1031,
"step": 1800
},
{
"epoch": 1.448,
"grad_norm": 0.07037002941100785,
"learning_rate": 3.884002810481959e-06,
"loss": 0.1106,
"step": 1810
},
{
"epoch": 1.456,
"grad_norm": 0.05841985205150354,
"learning_rate": 3.7798872162119948e-06,
"loss": 0.0987,
"step": 1820
},
{
"epoch": 1.464,
"grad_norm": 0.0593910919321036,
"learning_rate": 3.676859961574162e-06,
"loss": 0.0994,
"step": 1830
},
{
"epoch": 1.472,
"grad_norm": 0.06578904279209549,
"learning_rate": 3.5749390733548382e-06,
"loss": 0.0998,
"step": 1840
},
{
"epoch": 1.48,
"grad_norm": 0.06370094131872989,
"learning_rate": 3.4741423847583134e-06,
"loss": 0.1008,
"step": 1850
},
{
"epoch": 1.488,
"grad_norm": 0.05739668138835314,
"learning_rate": 3.3744875322865035e-06,
"loss": 0.1016,
"step": 1860
},
{
"epoch": 1.496,
"grad_norm": 0.062215363243071836,
"learning_rate": 3.2759919526530536e-06,
"loss": 0.1032,
"step": 1870
},
{
"epoch": 1.504,
"grad_norm": 0.0715569328821687,
"learning_rate": 3.178672879732435e-06,
"loss": 0.0958,
"step": 1880
},
{
"epoch": 1.512,
"grad_norm": 0.05841095850206598,
"learning_rate": 3.0825473415445073e-06,
"loss": 0.1002,
"step": 1890
},
{
"epoch": 1.52,
"grad_norm": 0.0617717625489719,
"learning_rate": 2.9876321572751143e-06,
"loss": 0.1034,
"step": 1900
},
{
"epoch": 1.528,
"grad_norm": 0.06062020772334433,
"learning_rate": 2.8939439343332086e-06,
"loss": 0.0973,
"step": 1910
},
{
"epoch": 1.536,
"grad_norm": 0.059698562886373666,
"learning_rate": 2.8014990654450325e-06,
"loss": 0.1006,
"step": 1920
},
{
"epoch": 1.544,
"grad_norm": 0.06376657349003535,
"learning_rate": 2.7103137257858867e-06,
"loss": 0.0935,
"step": 1930
},
{
"epoch": 1.552,
"grad_norm": 0.07743391292616765,
"learning_rate": 2.6204038701499056e-06,
"loss": 0.1026,
"step": 1940
},
{
"epoch": 1.56,
"grad_norm": 0.06924240453610515,
"learning_rate": 2.5317852301584642e-06,
"loss": 0.1053,
"step": 1950
},
{
"epoch": 1.568,
"grad_norm": 0.05897566595897626,
"learning_rate": 2.4444733115075823e-06,
"loss": 0.1007,
"step": 1960
},
{
"epoch": 1.576,
"grad_norm": 0.061747152948674104,
"learning_rate": 2.3584833912548887e-06,
"loss": 0.1023,
"step": 1970
},
{
"epoch": 1.584,
"grad_norm": 0.06279470086154411,
"learning_rate": 2.2738305151465646e-06,
"loss": 0.0967,
"step": 1980
},
{
"epoch": 1.592,
"grad_norm": 0.06372313419732084,
"learning_rate": 2.190529494984782e-06,
"loss": 0.0937,
"step": 1990
},
{
"epoch": 1.6,
"grad_norm": 0.061625905799104846,
"learning_rate": 2.1085949060360654e-06,
"loss": 0.0911,
"step": 2000
},
{
"epoch": 1.608,
"grad_norm": 0.0678362739569136,
"learning_rate": 2.0280410844810426e-06,
"loss": 0.1065,
"step": 2010
},
{
"epoch": 1.616,
"grad_norm": 0.06643434332226122,
"learning_rate": 1.9488821249060297e-06,
"loss": 0.103,
"step": 2020
},
{
"epoch": 1.624,
"grad_norm": 0.06165743807147769,
"learning_rate": 1.8711318778368792e-06,
"loss": 0.1019,
"step": 2030
},
{
"epoch": 1.6320000000000001,
"grad_norm": 0.07358372056477895,
"learning_rate": 1.7948039473155553e-06,
"loss": 0.1041,
"step": 2040
},
{
"epoch": 1.6400000000000001,
"grad_norm": 0.06806526736977143,
"learning_rate": 1.7199116885197996e-06,
"loss": 0.1032,
"step": 2050
},
{
"epoch": 1.6480000000000001,
"grad_norm": 0.06919391837094059,
"learning_rate": 1.646468205426377e-06,
"loss": 0.098,
"step": 2060
},
{
"epoch": 1.6560000000000001,
"grad_norm": 0.07710033571374911,
"learning_rate": 1.5744863485182537e-06,
"loss": 0.1039,
"step": 2070
},
{
"epoch": 1.6640000000000001,
"grad_norm": 0.07419656560471355,
"learning_rate": 1.5039787125361327e-06,
"loss": 0.0992,
"step": 2080
},
{
"epoch": 1.6720000000000002,
"grad_norm": 0.06441503783003091,
"learning_rate": 1.4349576342747462e-06,
"loss": 0.1009,
"step": 2090
},
{
"epoch": 1.6800000000000002,
"grad_norm": 0.0587805726462111,
"learning_rate": 1.367435190424261e-06,
"loss": 0.1002,
"step": 2100
},
{
"epoch": 1.688,
"grad_norm": 0.06818045135041786,
"learning_rate": 1.3014231954572287e-06,
"loss": 0.1032,
"step": 2110
},
{
"epoch": 1.696,
"grad_norm": 0.06850284991866502,
"learning_rate": 1.2369331995613664e-06,
"loss": 0.1013,
"step": 2120
},
{
"epoch": 1.704,
"grad_norm": 0.07505851534344943,
"learning_rate": 1.1739764866186309e-06,
"loss": 0.095,
"step": 2130
},
{
"epoch": 1.712,
"grad_norm": 0.07008959733086773,
"learning_rate": 1.112564072230863e-06,
"loss": 0.102,
"step": 2140
},
{
"epoch": 1.72,
"grad_norm": 0.05689031064270844,
"learning_rate": 1.0527067017923654e-06,
"loss": 0.1026,
"step": 2150
},
{
"epoch": 1.728,
"grad_norm": 0.07596542582163048,
"learning_rate": 9.944148486097793e-07,
"loss": 0.1034,
"step": 2160
},
{
"epoch": 1.736,
"grad_norm": 0.06093771036580793,
"learning_rate": 9.376987120695547e-07,
"loss": 0.0997,
"step": 2170
},
{
"epoch": 1.744,
"grad_norm": 0.07428382141394303,
"learning_rate": 8.825682158533555e-07,
"loss": 0.1082,
"step": 2180
},
{
"epoch": 1.752,
"grad_norm": 0.08399394161052269,
"learning_rate": 8.290330062017015e-07,
"loss": 0.1052,
"step": 2190
},
{
"epoch": 1.76,
"grad_norm": 0.0668059758680316,
"learning_rate": 7.771024502261526e-07,
"loss": 0.099,
"step": 2200
},
{
"epoch": 1.768,
"grad_norm": 0.0679292033158011,
"learning_rate": 7.267856342703461e-07,
"loss": 0.1066,
"step": 2210
},
{
"epoch": 1.776,
"grad_norm": 0.058093075805745104,
"learning_rate": 6.780913623201346e-07,
"loss": 0.0989,
"step": 2220
},
{
"epoch": 1.784,
"grad_norm": 0.06223534910150451,
"learning_rate": 6.310281544631547e-07,
"loss": 0.0975,
"step": 2230
},
{
"epoch": 1.792,
"grad_norm": 0.0661429754113188,
"learning_rate": 5.856042453980526e-07,
"loss": 0.0987,
"step": 2240
},
{
"epoch": 1.8,
"grad_norm": 0.061050405248751057,
"learning_rate": 5.418275829936537e-07,
"loss": 0.0998,
"step": 2250
},
{
"epoch": 1.808,
"grad_norm": 0.056130252404826796,
"learning_rate": 4.997058268983135e-07,
"loss": 0.0932,
"step": 2260
},
{
"epoch": 1.8159999999999998,
"grad_norm": 0.06066847959463214,
"learning_rate": 4.592463471997022e-07,
"loss": 0.0974,
"step": 2270
},
{
"epoch": 1.8239999999999998,
"grad_norm": 0.0643399844655628,
"learning_rate": 4.204562231352516e-07,
"loss": 0.1057,
"step": 2280
},
{
"epoch": 1.8319999999999999,
"grad_norm": 0.060150289880508086,
"learning_rate": 3.83342241853496e-07,
"loss": 0.0994,
"step": 2290
},
{
"epoch": 1.8399999999999999,
"grad_norm": 0.06796944373259835,
"learning_rate": 3.4791089722651437e-07,
"loss": 0.1072,
"step": 2300
},
{
"epoch": 1.8479999999999999,
"grad_norm": 0.06759971647919732,
"learning_rate": 3.1416838871368925e-07,
"loss": 0.1063,
"step": 2310
},
{
"epoch": 1.8559999999999999,
"grad_norm": 0.067835192233575,
"learning_rate": 2.8212062027698995e-07,
"loss": 0.1006,
"step": 2320
},
{
"epoch": 1.8639999999999999,
"grad_norm": 0.07157931341704744,
"learning_rate": 2.5177319934793995e-07,
"loss": 0.1042,
"step": 2330
},
{
"epoch": 1.8719999999999999,
"grad_norm": 0.06616867154511055,
"learning_rate": 2.2313143584648423e-07,
"loss": 0.0978,
"step": 2340
},
{
"epoch": 1.88,
"grad_norm": 0.10047393373201215,
"learning_rate": 1.9620034125190645e-07,
"loss": 0.0977,
"step": 2350
},
{
"epoch": 1.888,
"grad_norm": 0.06073465869846798,
"learning_rate": 1.7098462772596302e-07,
"loss": 0.107,
"step": 2360
},
{
"epoch": 1.896,
"grad_norm": 0.0619748402288604,
"learning_rate": 1.474887072883935e-07,
"loss": 0.098,
"step": 2370
},
{
"epoch": 1.904,
"grad_norm": 0.06634025194054122,
"learning_rate": 1.2571669104494254e-07,
"loss": 0.0934,
"step": 2380
},
{
"epoch": 1.912,
"grad_norm": 0.07694308017805654,
"learning_rate": 1.0567238846803995e-07,
"loss": 0.1058,
"step": 2390
},
{
"epoch": 1.92,
"grad_norm": 0.060586005493788085,
"learning_rate": 8.735930673024806e-08,
"loss": 0.0929,
"step": 2400
},
{
"epoch": 1.928,
"grad_norm": 0.07076099012369173,
"learning_rate": 7.078065009060941e-08,
"loss": 0.1053,
"step": 2410
},
{
"epoch": 1.936,
"grad_norm": 0.07088076731988013,
"learning_rate": 5.5939319333998546e-08,
"loss": 0.0994,
"step": 2420
},
{
"epoch": 1.944,
"grad_norm": 0.06859911115828961,
"learning_rate": 4.2837911263562406e-08,
"loss": 0.1046,
"step": 2430
},
{
"epoch": 1.952,
"grad_norm": 0.09088957264396723,
"learning_rate": 3.147871824635717e-08,
"loss": 0.1021,
"step": 2440
},
{
"epoch": 1.96,
"grad_norm": 0.06296850339427222,
"learning_rate": 2.1863727812254653e-08,
"loss": 0.0992,
"step": 2450
},
{
"epoch": 1.968,
"grad_norm": 0.06311334916073906,
"learning_rate": 1.3994622306173766e-08,
"loss": 0.1047,
"step": 2460
},
{
"epoch": 1.976,
"grad_norm": 0.06846458491142236,
"learning_rate": 7.872778593728258e-09,
"loss": 0.102,
"step": 2470
},
{
"epoch": 1.984,
"grad_norm": 0.06333619567681512,
"learning_rate": 3.499267820307184e-09,
"loss": 0.0979,
"step": 2480
},
{
"epoch": 1.992,
"grad_norm": 0.06011655948198505,
"learning_rate": 8.748552236603758e-10,
"loss": 0.1016,
"step": 2490
},
{
"epoch": 2.0,
"grad_norm": 0.06939915779075108,
"learning_rate": 0.0,
"loss": 0.096,
"step": 2500
},
{
"epoch": 2.0,
"step": 2500,
"total_flos": 22262632857600.0,
"train_loss": 0.140902588391304,
"train_runtime": 9768.7089,
"train_samples_per_second": 8.189,
"train_steps_per_second": 0.256
}
],
"logging_steps": 10,
"max_steps": 2500,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 10000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 22262632857600.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}