Qwen2.5-1.5B-Instruct_metamath / trainer_state.json
jahyungu's picture
Upload 16 files
f70f4f0 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 500,
"global_step": 2500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.008,
"grad_norm": 0.2994475388615314,
"learning_rate": 1.6000000000000001e-06,
"loss": 0.2223,
"step": 10
},
{
"epoch": 0.016,
"grad_norm": 0.273631506204062,
"learning_rate": 3.2000000000000003e-06,
"loss": 0.1771,
"step": 20
},
{
"epoch": 0.024,
"grad_norm": 0.2130209890157738,
"learning_rate": 4.800000000000001e-06,
"loss": 0.1598,
"step": 30
},
{
"epoch": 0.032,
"grad_norm": 0.18240027035932016,
"learning_rate": 6.4000000000000006e-06,
"loss": 0.1619,
"step": 40
},
{
"epoch": 0.04,
"grad_norm": 0.20027988136436473,
"learning_rate": 8.000000000000001e-06,
"loss": 0.1724,
"step": 50
},
{
"epoch": 0.048,
"grad_norm": 0.19349670254659063,
"learning_rate": 9.600000000000001e-06,
"loss": 0.1781,
"step": 60
},
{
"epoch": 0.056,
"grad_norm": 0.1522942785320451,
"learning_rate": 1.1200000000000001e-05,
"loss": 0.1741,
"step": 70
},
{
"epoch": 0.064,
"grad_norm": 0.20077878887262207,
"learning_rate": 1.2800000000000001e-05,
"loss": 0.1678,
"step": 80
},
{
"epoch": 0.072,
"grad_norm": 0.2006526811340805,
"learning_rate": 1.4400000000000001e-05,
"loss": 0.1754,
"step": 90
},
{
"epoch": 0.08,
"grad_norm": 0.18403850144652611,
"learning_rate": 1.6000000000000003e-05,
"loss": 0.1793,
"step": 100
},
{
"epoch": 0.088,
"grad_norm": 0.17392033537978838,
"learning_rate": 1.76e-05,
"loss": 0.1669,
"step": 110
},
{
"epoch": 0.096,
"grad_norm": 0.15129051337201097,
"learning_rate": 1.9200000000000003e-05,
"loss": 0.177,
"step": 120
},
{
"epoch": 0.104,
"grad_norm": 0.1686995282450159,
"learning_rate": 1.9999781283802247e-05,
"loss": 0.1838,
"step": 130
},
{
"epoch": 0.112,
"grad_norm": 0.18454731582348338,
"learning_rate": 1.999803161162393e-05,
"loss": 0.2058,
"step": 140
},
{
"epoch": 0.12,
"grad_norm": 0.1594563484769184,
"learning_rate": 1.999453257340926e-05,
"loss": 0.1953,
"step": 150
},
{
"epoch": 0.128,
"grad_norm": 0.16207625262493944,
"learning_rate": 1.9989284781388617e-05,
"loss": 0.1871,
"step": 160
},
{
"epoch": 0.136,
"grad_norm": 0.15859384233929794,
"learning_rate": 1.9982289153773648e-05,
"loss": 0.1948,
"step": 170
},
{
"epoch": 0.144,
"grad_norm": 0.17122386425268052,
"learning_rate": 1.9973546914596622e-05,
"loss": 0.1954,
"step": 180
},
{
"epoch": 0.152,
"grad_norm": 0.16538234415049946,
"learning_rate": 1.996305959349627e-05,
"loss": 0.1898,
"step": 190
},
{
"epoch": 0.16,
"grad_norm": 0.19838611436297024,
"learning_rate": 1.9950829025450116e-05,
"loss": 0.1903,
"step": 200
},
{
"epoch": 0.168,
"grad_norm": 0.18275997172338,
"learning_rate": 1.993685735045343e-05,
"loss": 0.189,
"step": 210
},
{
"epoch": 0.176,
"grad_norm": 0.19605227359376942,
"learning_rate": 1.9921147013144782e-05,
"loss": 0.202,
"step": 220
},
{
"epoch": 0.184,
"grad_norm": 0.1510786730603492,
"learning_rate": 1.9903700762378303e-05,
"loss": 0.1906,
"step": 230
},
{
"epoch": 0.192,
"grad_norm": 0.1731755052103561,
"learning_rate": 1.9884521650742718e-05,
"loss": 0.1818,
"step": 240
},
{
"epoch": 0.2,
"grad_norm": 0.1703147411704783,
"learning_rate": 1.9863613034027224e-05,
"loss": 0.1828,
"step": 250
},
{
"epoch": 0.208,
"grad_norm": 0.16465949216497772,
"learning_rate": 1.9840978570634338e-05,
"loss": 0.1998,
"step": 260
},
{
"epoch": 0.216,
"grad_norm": 0.13341671497634322,
"learning_rate": 1.9816622220939762e-05,
"loss": 0.1864,
"step": 270
},
{
"epoch": 0.224,
"grad_norm": 0.162293542213837,
"learning_rate": 1.9790548246599447e-05,
"loss": 0.1892,
"step": 280
},
{
"epoch": 0.232,
"grad_norm": 0.15597629673666613,
"learning_rate": 1.976276120980393e-05,
"loss": 0.1884,
"step": 290
},
{
"epoch": 0.24,
"grad_norm": 0.14793980628985084,
"learning_rate": 1.973326597248006e-05,
"loss": 0.1978,
"step": 300
},
{
"epoch": 0.248,
"grad_norm": 0.1447727611743318,
"learning_rate": 1.9702067695440333e-05,
"loss": 0.1883,
"step": 310
},
{
"epoch": 0.256,
"grad_norm": 0.16933785764552192,
"learning_rate": 1.966917183747987e-05,
"loss": 0.1898,
"step": 320
},
{
"epoch": 0.264,
"grad_norm": 0.1589759149940254,
"learning_rate": 1.9634584154421316e-05,
"loss": 0.1949,
"step": 330
},
{
"epoch": 0.272,
"grad_norm": 0.12763481414215103,
"learning_rate": 1.95983106981077e-05,
"loss": 0.1855,
"step": 340
},
{
"epoch": 0.28,
"grad_norm": 0.13722214819739667,
"learning_rate": 1.9560357815343577e-05,
"loss": 0.1887,
"step": 350
},
{
"epoch": 0.288,
"grad_norm": 0.17592856768775877,
"learning_rate": 1.9520732146784493e-05,
"loss": 0.1875,
"step": 360
},
{
"epoch": 0.296,
"grad_norm": 0.1540007177537391,
"learning_rate": 1.947944062577507e-05,
"loss": 0.1995,
"step": 370
},
{
"epoch": 0.304,
"grad_norm": 0.14462148850861567,
"learning_rate": 1.9436490477135877e-05,
"loss": 0.1905,
"step": 380
},
{
"epoch": 0.312,
"grad_norm": 0.14275084054493534,
"learning_rate": 1.93918892158993e-05,
"loss": 0.1946,
"step": 390
},
{
"epoch": 0.32,
"grad_norm": 0.13709865630962415,
"learning_rate": 1.934564464599461e-05,
"loss": 0.1848,
"step": 400
},
{
"epoch": 0.328,
"grad_norm": 0.12851355006510504,
"learning_rate": 1.9297764858882516e-05,
"loss": 0.2012,
"step": 410
},
{
"epoch": 0.336,
"grad_norm": 0.14422766911430207,
"learning_rate": 1.924825823213939e-05,
"loss": 0.1796,
"step": 420
},
{
"epoch": 0.344,
"grad_norm": 0.1456420238432456,
"learning_rate": 1.9197133427991437e-05,
"loss": 0.1894,
"step": 430
},
{
"epoch": 0.352,
"grad_norm": 0.11424505824104754,
"learning_rate": 1.9144399391799043e-05,
"loss": 0.1855,
"step": 440
},
{
"epoch": 0.36,
"grad_norm": 0.1505170978234406,
"learning_rate": 1.909006535049163e-05,
"loss": 0.1826,
"step": 450
},
{
"epoch": 0.368,
"grad_norm": 0.1325857585966927,
"learning_rate": 1.903414081095315e-05,
"loss": 0.1969,
"step": 460
},
{
"epoch": 0.376,
"grad_norm": 0.12049306945186798,
"learning_rate": 1.897663555835872e-05,
"loss": 0.1852,
"step": 470
},
{
"epoch": 0.384,
"grad_norm": 0.1321067212261413,
"learning_rate": 1.8917559654462474e-05,
"loss": 0.1902,
"step": 480
},
{
"epoch": 0.392,
"grad_norm": 0.12544038658282392,
"learning_rate": 1.8856923435837024e-05,
"loss": 0.1834,
"step": 490
},
{
"epoch": 0.4,
"grad_norm": 0.13754099065635406,
"learning_rate": 1.879473751206489e-05,
"loss": 0.1777,
"step": 500
},
{
"epoch": 0.408,
"grad_norm": 0.14299681112215643,
"learning_rate": 1.8731012763882132e-05,
"loss": 0.1891,
"step": 510
},
{
"epoch": 0.416,
"grad_norm": 0.12978945402957556,
"learning_rate": 1.8665760341274505e-05,
"loss": 0.1838,
"step": 520
},
{
"epoch": 0.424,
"grad_norm": 0.12828927955187144,
"learning_rate": 1.859899166152657e-05,
"loss": 0.1775,
"step": 530
},
{
"epoch": 0.432,
"grad_norm": 0.1399995330504017,
"learning_rate": 1.8530718407223976e-05,
"loss": 0.1949,
"step": 540
},
{
"epoch": 0.44,
"grad_norm": 0.1201971674290692,
"learning_rate": 1.8460952524209355e-05,
"loss": 0.1997,
"step": 550
},
{
"epoch": 0.448,
"grad_norm": 0.14220963488029062,
"learning_rate": 1.8389706219492147e-05,
"loss": 0.1864,
"step": 560
},
{
"epoch": 0.456,
"grad_norm": 0.12393318013758685,
"learning_rate": 1.831699195911272e-05,
"loss": 0.1932,
"step": 570
},
{
"epoch": 0.464,
"grad_norm": 0.13175301413953197,
"learning_rate": 1.8242822465961177e-05,
"loss": 0.1868,
"step": 580
},
{
"epoch": 0.472,
"grad_norm": 0.1439474730329765,
"learning_rate": 1.8167210717551224e-05,
"loss": 0.1825,
"step": 590
},
{
"epoch": 0.48,
"grad_norm": 0.1189834140430123,
"learning_rate": 1.8090169943749477e-05,
"loss": 0.1793,
"step": 600
},
{
"epoch": 0.488,
"grad_norm": 0.13694513887911383,
"learning_rate": 1.8011713624460608e-05,
"loss": 0.1838,
"step": 610
},
{
"epoch": 0.496,
"grad_norm": 0.1342792821222079,
"learning_rate": 1.793185548726878e-05,
"loss": 0.1981,
"step": 620
},
{
"epoch": 0.504,
"grad_norm": 0.12267440816211221,
"learning_rate": 1.785060950503568e-05,
"loss": 0.1945,
"step": 630
},
{
"epoch": 0.512,
"grad_norm": 0.14191462759384785,
"learning_rate": 1.7767989893455696e-05,
"loss": 0.1939,
"step": 640
},
{
"epoch": 0.52,
"grad_norm": 0.15143689437873276,
"learning_rate": 1.7684011108568593e-05,
"loss": 0.1832,
"step": 650
},
{
"epoch": 0.528,
"grad_norm": 0.10997527630360879,
"learning_rate": 1.759868784423009e-05,
"loss": 0.1765,
"step": 660
},
{
"epoch": 0.536,
"grad_norm": 0.1262528956359292,
"learning_rate": 1.7512035029540887e-05,
"loss": 0.1791,
"step": 670
},
{
"epoch": 0.544,
"grad_norm": 0.15602459318561945,
"learning_rate": 1.74240678262345e-05,
"loss": 0.1743,
"step": 680
},
{
"epoch": 0.552,
"grad_norm": 0.13025666149189777,
"learning_rate": 1.73348016260244e-05,
"loss": 0.1834,
"step": 690
},
{
"epoch": 0.56,
"grad_norm": 0.1387483634701272,
"learning_rate": 1.7244252047910893e-05,
"loss": 0.1844,
"step": 700
},
{
"epoch": 0.568,
"grad_norm": 0.14296180482072157,
"learning_rate": 1.7152434935448257e-05,
"loss": 0.1728,
"step": 710
},
{
"epoch": 0.576,
"grad_norm": 0.13975560544538343,
"learning_rate": 1.705936635397259e-05,
"loss": 0.1861,
"step": 720
},
{
"epoch": 0.584,
"grad_norm": 0.11624329808561513,
"learning_rate": 1.6965062587790823e-05,
"loss": 0.1851,
"step": 730
},
{
"epoch": 0.592,
"grad_norm": 0.12204929575785449,
"learning_rate": 1.6869540137331445e-05,
"loss": 0.1851,
"step": 740
},
{
"epoch": 0.6,
"grad_norm": 0.11843883632568544,
"learning_rate": 1.6772815716257414e-05,
"loss": 0.1926,
"step": 750
},
{
"epoch": 0.608,
"grad_norm": 0.13379905247254648,
"learning_rate": 1.667490624854173e-05,
"loss": 0.1825,
"step": 760
},
{
"epoch": 0.616,
"grad_norm": 0.14954402393242697,
"learning_rate": 1.6575828865506246e-05,
"loss": 0.184,
"step": 770
},
{
"epoch": 0.624,
"grad_norm": 0.11855929270795366,
"learning_rate": 1.647560090282419e-05,
"loss": 0.1741,
"step": 780
},
{
"epoch": 0.632,
"grad_norm": 0.15098593909944216,
"learning_rate": 1.63742398974869e-05,
"loss": 0.18,
"step": 790
},
{
"epoch": 0.64,
"grad_norm": 0.12622455885460282,
"learning_rate": 1.6271763584735373e-05,
"loss": 0.1795,
"step": 800
},
{
"epoch": 0.648,
"grad_norm": 0.1214479548976728,
"learning_rate": 1.616818989495711e-05,
"loss": 0.1754,
"step": 810
},
{
"epoch": 0.656,
"grad_norm": 0.11633961346066299,
"learning_rate": 1.6063536950548825e-05,
"loss": 0.1791,
"step": 820
},
{
"epoch": 0.664,
"grad_norm": 0.1349935261273476,
"learning_rate": 1.595782306274553e-05,
"loss": 0.1901,
"step": 830
},
{
"epoch": 0.672,
"grad_norm": 0.12313061321761182,
"learning_rate": 1.5851066728416617e-05,
"loss": 0.1813,
"step": 840
},
{
"epoch": 0.68,
"grad_norm": 0.16151825468675773,
"learning_rate": 1.5743286626829437e-05,
"loss": 0.1754,
"step": 850
},
{
"epoch": 0.688,
"grad_norm": 0.14085102248573694,
"learning_rate": 1.5634501616380967e-05,
"loss": 0.1885,
"step": 860
},
{
"epoch": 0.696,
"grad_norm": 0.14051153835159377,
"learning_rate": 1.5524730731298136e-05,
"loss": 0.1883,
"step": 870
},
{
"epoch": 0.704,
"grad_norm": 0.10696859105735423,
"learning_rate": 1.541399317830738e-05,
"loss": 0.1785,
"step": 880
},
{
"epoch": 0.712,
"grad_norm": 0.12489614347746164,
"learning_rate": 1.530230833327405e-05,
"loss": 0.1868,
"step": 890
},
{
"epoch": 0.72,
"grad_norm": 0.13020128972045483,
"learning_rate": 1.5189695737812153e-05,
"loss": 0.1803,
"step": 900
},
{
"epoch": 0.728,
"grad_norm": 0.12378887718286205,
"learning_rate": 1.5076175095865171e-05,
"loss": 0.1885,
"step": 910
},
{
"epoch": 0.736,
"grad_norm": 0.12669683188981912,
"learning_rate": 1.4961766270258422e-05,
"loss": 0.1735,
"step": 920
},
{
"epoch": 0.744,
"grad_norm": 0.12139646601130864,
"learning_rate": 1.4846489279223653e-05,
"loss": 0.1842,
"step": 930
},
{
"epoch": 0.752,
"grad_norm": 0.12564036718714786,
"learning_rate": 1.473036429289641e-05,
"loss": 0.1862,
"step": 940
},
{
"epoch": 0.76,
"grad_norm": 0.12879466675814272,
"learning_rate": 1.461341162978688e-05,
"loss": 0.1794,
"step": 950
},
{
"epoch": 0.768,
"grad_norm": 0.13118866753606923,
"learning_rate": 1.4495651753224706e-05,
"loss": 0.1752,
"step": 960
},
{
"epoch": 0.776,
"grad_norm": 0.1397568247577318,
"learning_rate": 1.437710526777852e-05,
"loss": 0.1837,
"step": 970
},
{
"epoch": 0.784,
"grad_norm": 0.1199748082038019,
"learning_rate": 1.4257792915650728e-05,
"loss": 0.1751,
"step": 980
},
{
"epoch": 0.792,
"grad_norm": 0.1275568062184644,
"learning_rate": 1.4137735573048232e-05,
"loss": 0.1681,
"step": 990
},
{
"epoch": 0.8,
"grad_norm": 0.11731254237226522,
"learning_rate": 1.4016954246529697e-05,
"loss": 0.1672,
"step": 1000
},
{
"epoch": 0.808,
"grad_norm": 0.12803490644945145,
"learning_rate": 1.3895470069330003e-05,
"loss": 0.1775,
"step": 1010
},
{
"epoch": 0.816,
"grad_norm": 0.13148610169928593,
"learning_rate": 1.377330429766256e-05,
"loss": 0.179,
"step": 1020
},
{
"epoch": 0.824,
"grad_norm": 0.12498379605499213,
"learning_rate": 1.3650478307000059e-05,
"loss": 0.1743,
"step": 1030
},
{
"epoch": 0.832,
"grad_norm": 0.10768287920272084,
"learning_rate": 1.3527013588334415e-05,
"loss": 0.1798,
"step": 1040
},
{
"epoch": 0.84,
"grad_norm": 0.11876441348756149,
"learning_rate": 1.3402931744416432e-05,
"loss": 0.1669,
"step": 1050
},
{
"epoch": 0.848,
"grad_norm": 0.11670565624337659,
"learning_rate": 1.3278254485975977e-05,
"loss": 0.176,
"step": 1060
},
{
"epoch": 0.856,
"grad_norm": 0.12614319023396608,
"learning_rate": 1.3153003627923217e-05,
"loss": 0.1717,
"step": 1070
},
{
"epoch": 0.864,
"grad_norm": 0.15423610893363637,
"learning_rate": 1.3027201085531633e-05,
"loss": 0.1787,
"step": 1080
},
{
"epoch": 0.872,
"grad_norm": 0.1437966604387281,
"learning_rate": 1.2900868870603502e-05,
"loss": 0.1713,
"step": 1090
},
{
"epoch": 0.88,
"grad_norm": 0.13194227493290273,
"learning_rate": 1.2774029087618448e-05,
"loss": 0.1859,
"step": 1100
},
{
"epoch": 0.888,
"grad_norm": 0.11652085362332747,
"learning_rate": 1.2646703929865817e-05,
"loss": 0.1798,
"step": 1110
},
{
"epoch": 0.896,
"grad_norm": 0.11917839406476669,
"learning_rate": 1.2518915675561482e-05,
"loss": 0.1868,
"step": 1120
},
{
"epoch": 0.904,
"grad_norm": 0.11813364230838641,
"learning_rate": 1.2390686683949799e-05,
"loss": 0.1575,
"step": 1130
},
{
"epoch": 0.912,
"grad_norm": 0.1196110875735261,
"learning_rate": 1.2262039391391405e-05,
"loss": 0.17,
"step": 1140
},
{
"epoch": 0.92,
"grad_norm": 0.12300092078788795,
"learning_rate": 1.213299630743747e-05,
"loss": 0.1682,
"step": 1150
},
{
"epoch": 0.928,
"grad_norm": 0.1208425846118018,
"learning_rate": 1.2003580010891214e-05,
"loss": 0.1703,
"step": 1160
},
{
"epoch": 0.936,
"grad_norm": 0.12083725425562747,
"learning_rate": 1.187381314585725e-05,
"loss": 0.1797,
"step": 1170
},
{
"epoch": 0.944,
"grad_norm": 0.1178408143433832,
"learning_rate": 1.1743718417779518e-05,
"loss": 0.1792,
"step": 1180
},
{
"epoch": 0.952,
"grad_norm": 0.11740271053188309,
"learning_rate": 1.1613318589468512e-05,
"loss": 0.1726,
"step": 1190
},
{
"epoch": 0.96,
"grad_norm": 0.1090840603269972,
"learning_rate": 1.148263647711842e-05,
"loss": 0.1756,
"step": 1200
},
{
"epoch": 0.968,
"grad_norm": 0.124940945129332,
"learning_rate": 1.135169494631497e-05,
"loss": 0.1837,
"step": 1210
},
{
"epoch": 0.976,
"grad_norm": 0.1257424082327308,
"learning_rate": 1.1220516908034602e-05,
"loss": 0.1785,
"step": 1220
},
{
"epoch": 0.984,
"grad_norm": 0.11430280573700291,
"learning_rate": 1.1089125314635727e-05,
"loss": 0.1731,
"step": 1230
},
{
"epoch": 0.992,
"grad_norm": 0.13276298761961705,
"learning_rate": 1.0957543155842703e-05,
"loss": 0.1718,
"step": 1240
},
{
"epoch": 1.0,
"grad_norm": 0.1161581920629144,
"learning_rate": 1.0825793454723325e-05,
"loss": 0.1696,
"step": 1250
},
{
"epoch": 1.008,
"grad_norm": 0.09621449723576145,
"learning_rate": 1.0693899263660442e-05,
"loss": 0.1113,
"step": 1260
},
{
"epoch": 1.016,
"grad_norm": 0.10015368517162866,
"learning_rate": 1.0561883660318456e-05,
"loss": 0.1003,
"step": 1270
},
{
"epoch": 1.024,
"grad_norm": 0.1076609073322909,
"learning_rate": 1.0429769743605406e-05,
"loss": 0.1092,
"step": 1280
},
{
"epoch": 1.032,
"grad_norm": 0.1209467723696444,
"learning_rate": 1.0297580629631324e-05,
"loss": 0.1047,
"step": 1290
},
{
"epoch": 1.04,
"grad_norm": 0.11347187764817329,
"learning_rate": 1.0165339447663586e-05,
"loss": 0.1012,
"step": 1300
},
{
"epoch": 1.048,
"grad_norm": 0.13787078556126084,
"learning_rate": 1.0033069336079952e-05,
"loss": 0.0996,
"step": 1310
},
{
"epoch": 1.056,
"grad_norm": 0.11234445761651994,
"learning_rate": 9.900793438320037e-06,
"loss": 0.0986,
"step": 1320
},
{
"epoch": 1.064,
"grad_norm": 0.11599014325689881,
"learning_rate": 9.768534898835864e-06,
"loss": 0.1063,
"step": 1330
},
{
"epoch": 1.072,
"grad_norm": 0.11347494815929351,
"learning_rate": 9.636316859042258e-06,
"loss": 0.1039,
"step": 1340
},
{
"epoch": 1.08,
"grad_norm": 0.12183008770849793,
"learning_rate": 9.504162453267776e-06,
"loss": 0.1032,
"step": 1350
},
{
"epoch": 1.088,
"grad_norm": 0.11562604188712092,
"learning_rate": 9.372094804706867e-06,
"loss": 0.1011,
"step": 1360
},
{
"epoch": 1.096,
"grad_norm": 0.10067772672412269,
"learning_rate": 9.24013702137397e-06,
"loss": 0.0972,
"step": 1370
},
{
"epoch": 1.104,
"grad_norm": 0.11541149806656344,
"learning_rate": 9.108312192060298e-06,
"loss": 0.0958,
"step": 1380
},
{
"epoch": 1.112,
"grad_norm": 0.129106816260335,
"learning_rate": 8.97664338229395e-06,
"loss": 0.1,
"step": 1390
},
{
"epoch": 1.12,
"grad_norm": 0.11074093024717463,
"learning_rate": 8.84515363030414e-06,
"loss": 0.0976,
"step": 1400
},
{
"epoch": 1.1280000000000001,
"grad_norm": 0.10927701173789658,
"learning_rate": 8.713865942990143e-06,
"loss": 0.1004,
"step": 1410
},
{
"epoch": 1.1360000000000001,
"grad_norm": 0.129800724137463,
"learning_rate": 8.582803291895758e-06,
"loss": 0.1097,
"step": 1420
},
{
"epoch": 1.144,
"grad_norm": 0.1109925526353623,
"learning_rate": 8.451988609189987e-06,
"loss": 0.1047,
"step": 1430
},
{
"epoch": 1.152,
"grad_norm": 0.12402578949666784,
"learning_rate": 8.321444783654524e-06,
"loss": 0.1079,
"step": 1440
},
{
"epoch": 1.16,
"grad_norm": 0.1112595703457445,
"learning_rate": 8.191194656678905e-06,
"loss": 0.0976,
"step": 1450
},
{
"epoch": 1.168,
"grad_norm": 0.12036457116059705,
"learning_rate": 8.06126101826392e-06,
"loss": 0.0973,
"step": 1460
},
{
"epoch": 1.176,
"grad_norm": 0.12090306774036914,
"learning_rate": 7.931666603034034e-06,
"loss": 0.1008,
"step": 1470
},
{
"epoch": 1.184,
"grad_norm": 0.12317909704896644,
"learning_rate": 7.80243408625947e-06,
"loss": 0.0948,
"step": 1480
},
{
"epoch": 1.192,
"grad_norm": 0.10705938042955583,
"learning_rate": 7.673586079888699e-06,
"loss": 0.1067,
"step": 1490
},
{
"epoch": 1.2,
"grad_norm": 0.14259766047955233,
"learning_rate": 7.545145128592009e-06,
"loss": 0.1007,
"step": 1500
},
{
"epoch": 1.208,
"grad_norm": 0.11378740748534237,
"learning_rate": 7.4171337058168365e-06,
"loss": 0.103,
"step": 1510
},
{
"epoch": 1.216,
"grad_norm": 0.11908875076001751,
"learning_rate": 7.28957420985556e-06,
"loss": 0.099,
"step": 1520
},
{
"epoch": 1.224,
"grad_norm": 0.1177996071960852,
"learning_rate": 7.16248895992645e-06,
"loss": 0.0974,
"step": 1530
},
{
"epoch": 1.232,
"grad_norm": 0.12558719327814405,
"learning_rate": 7.035900192268464e-06,
"loss": 0.1114,
"step": 1540
},
{
"epoch": 1.24,
"grad_norm": 0.12231602378332272,
"learning_rate": 6.909830056250527e-06,
"loss": 0.1017,
"step": 1550
},
{
"epoch": 1.248,
"grad_norm": 0.1411428231676542,
"learning_rate": 6.784300610496049e-06,
"loss": 0.1037,
"step": 1560
},
{
"epoch": 1.256,
"grad_norm": 0.11433442906651817,
"learning_rate": 6.659333819023291e-06,
"loss": 0.1017,
"step": 1570
},
{
"epoch": 1.264,
"grad_norm": 0.1587733997288609,
"learning_rate": 6.534951547402322e-06,
"loss": 0.1022,
"step": 1580
},
{
"epoch": 1.272,
"grad_norm": 0.11284632840723509,
"learning_rate": 6.411175558929152e-06,
"loss": 0.1016,
"step": 1590
},
{
"epoch": 1.28,
"grad_norm": 0.10824650118751981,
"learning_rate": 6.2880275108177915e-06,
"loss": 0.1027,
"step": 1600
},
{
"epoch": 1.288,
"grad_norm": 0.13145070635475115,
"learning_rate": 6.165528950410884e-06,
"loss": 0.1066,
"step": 1610
},
{
"epoch": 1.296,
"grad_norm": 0.13085006869996493,
"learning_rate": 6.04370131140952e-06,
"loss": 0.0992,
"step": 1620
},
{
"epoch": 1.304,
"grad_norm": 0.12602863363341382,
"learning_rate": 5.922565910122967e-06,
"loss": 0.0953,
"step": 1630
},
{
"epoch": 1.312,
"grad_norm": 0.1225471587130614,
"learning_rate": 5.802143941738945e-06,
"loss": 0.106,
"step": 1640
},
{
"epoch": 1.32,
"grad_norm": 0.11766655619511902,
"learning_rate": 5.6824564766150724e-06,
"loss": 0.1063,
"step": 1650
},
{
"epoch": 1.328,
"grad_norm": 0.12243121066150191,
"learning_rate": 5.563524456592163e-06,
"loss": 0.1005,
"step": 1660
},
{
"epoch": 1.336,
"grad_norm": 0.1137092367870251,
"learning_rate": 5.445368691330008e-06,
"loss": 0.1014,
"step": 1670
},
{
"epoch": 1.3439999999999999,
"grad_norm": 0.11563617059718279,
"learning_rate": 5.328009854666303e-06,
"loss": 0.0963,
"step": 1680
},
{
"epoch": 1.3519999999999999,
"grad_norm": 0.10812153213525164,
"learning_rate": 5.211468480999304e-06,
"loss": 0.1039,
"step": 1690
},
{
"epoch": 1.3599999999999999,
"grad_norm": 0.10528899506093697,
"learning_rate": 5.095764961694923e-06,
"loss": 0.1003,
"step": 1700
},
{
"epoch": 1.3679999999999999,
"grad_norm": 0.11815436297994328,
"learning_rate": 4.980919541518796e-06,
"loss": 0.0982,
"step": 1710
},
{
"epoch": 1.376,
"grad_norm": 0.12086679854967129,
"learning_rate": 4.866952315094088e-06,
"loss": 0.0971,
"step": 1720
},
{
"epoch": 1.384,
"grad_norm": 0.1216775616601072,
"learning_rate": 4.753883223385467e-06,
"loss": 0.0945,
"step": 1730
},
{
"epoch": 1.392,
"grad_norm": 0.10648914490397961,
"learning_rate": 4.641732050210032e-06,
"loss": 0.1039,
"step": 1740
},
{
"epoch": 1.4,
"grad_norm": 0.11378893301113804,
"learning_rate": 4.530518418775734e-06,
"loss": 0.0935,
"step": 1750
},
{
"epoch": 1.408,
"grad_norm": 0.13321885782018256,
"learning_rate": 4.420261788247841e-06,
"loss": 0.1007,
"step": 1760
},
{
"epoch": 1.416,
"grad_norm": 0.1166412836830051,
"learning_rate": 4.3109814503441894e-06,
"loss": 0.1,
"step": 1770
},
{
"epoch": 1.424,
"grad_norm": 0.12573437053218914,
"learning_rate": 4.202696525959667e-06,
"loss": 0.1018,
"step": 1780
},
{
"epoch": 1.432,
"grad_norm": 0.10766865319844147,
"learning_rate": 4.0954259618206295e-06,
"loss": 0.1055,
"step": 1790
},
{
"epoch": 1.44,
"grad_norm": 0.11916472873837182,
"learning_rate": 3.989188527169749e-06,
"loss": 0.1006,
"step": 1800
},
{
"epoch": 1.448,
"grad_norm": 0.12141420740790235,
"learning_rate": 3.884002810481959e-06,
"loss": 0.1053,
"step": 1810
},
{
"epoch": 1.456,
"grad_norm": 0.10131664100763009,
"learning_rate": 3.7798872162119948e-06,
"loss": 0.095,
"step": 1820
},
{
"epoch": 1.464,
"grad_norm": 0.10718874752624193,
"learning_rate": 3.676859961574162e-06,
"loss": 0.0966,
"step": 1830
},
{
"epoch": 1.472,
"grad_norm": 0.11477403835967784,
"learning_rate": 3.5749390733548382e-06,
"loss": 0.0973,
"step": 1840
},
{
"epoch": 1.48,
"grad_norm": 0.10635089908850691,
"learning_rate": 3.4741423847583134e-06,
"loss": 0.0976,
"step": 1850
},
{
"epoch": 1.488,
"grad_norm": 0.10600348235646671,
"learning_rate": 3.3744875322865035e-06,
"loss": 0.0974,
"step": 1860
},
{
"epoch": 1.496,
"grad_norm": 0.1097923236145172,
"learning_rate": 3.2759919526530536e-06,
"loss": 0.1018,
"step": 1870
},
{
"epoch": 1.504,
"grad_norm": 0.12057262052779812,
"learning_rate": 3.178672879732435e-06,
"loss": 0.0921,
"step": 1880
},
{
"epoch": 1.512,
"grad_norm": 0.10322870417117935,
"learning_rate": 3.0825473415445073e-06,
"loss": 0.0969,
"step": 1890
},
{
"epoch": 1.52,
"grad_norm": 0.11162000352158546,
"learning_rate": 2.9876321572751143e-06,
"loss": 0.1004,
"step": 1900
},
{
"epoch": 1.528,
"grad_norm": 0.11080139256806071,
"learning_rate": 2.8939439343332086e-06,
"loss": 0.0925,
"step": 1910
},
{
"epoch": 1.536,
"grad_norm": 0.11129458730658273,
"learning_rate": 2.8014990654450325e-06,
"loss": 0.0965,
"step": 1920
},
{
"epoch": 1.544,
"grad_norm": 0.11117549882577657,
"learning_rate": 2.7103137257858867e-06,
"loss": 0.0913,
"step": 1930
},
{
"epoch": 1.552,
"grad_norm": 0.13048983161003938,
"learning_rate": 2.6204038701499056e-06,
"loss": 0.0992,
"step": 1940
},
{
"epoch": 1.56,
"grad_norm": 0.11734607541924097,
"learning_rate": 2.5317852301584642e-06,
"loss": 0.1012,
"step": 1950
},
{
"epoch": 1.568,
"grad_norm": 0.1083555940829194,
"learning_rate": 2.4444733115075823e-06,
"loss": 0.0971,
"step": 1960
},
{
"epoch": 1.576,
"grad_norm": 0.10908207209378103,
"learning_rate": 2.3584833912548887e-06,
"loss": 0.0979,
"step": 1970
},
{
"epoch": 1.584,
"grad_norm": 0.11841797427563601,
"learning_rate": 2.2738305151465646e-06,
"loss": 0.0942,
"step": 1980
},
{
"epoch": 1.592,
"grad_norm": 0.11315769991653934,
"learning_rate": 2.190529494984782e-06,
"loss": 0.0888,
"step": 1990
},
{
"epoch": 1.6,
"grad_norm": 0.11433802488811017,
"learning_rate": 2.1085949060360654e-06,
"loss": 0.0875,
"step": 2000
},
{
"epoch": 1.608,
"grad_norm": 0.12338526437657736,
"learning_rate": 2.0280410844810426e-06,
"loss": 0.1007,
"step": 2010
},
{
"epoch": 1.616,
"grad_norm": 0.1140721727012241,
"learning_rate": 1.9488821249060297e-06,
"loss": 0.0961,
"step": 2020
},
{
"epoch": 1.624,
"grad_norm": 0.10596121333045593,
"learning_rate": 1.8711318778368792e-06,
"loss": 0.0955,
"step": 2030
},
{
"epoch": 1.6320000000000001,
"grad_norm": 0.1216702589106228,
"learning_rate": 1.7948039473155553e-06,
"loss": 0.0992,
"step": 2040
},
{
"epoch": 1.6400000000000001,
"grad_norm": 0.12842132114696375,
"learning_rate": 1.7199116885197996e-06,
"loss": 0.0961,
"step": 2050
},
{
"epoch": 1.6480000000000001,
"grad_norm": 0.12054578218454282,
"learning_rate": 1.646468205426377e-06,
"loss": 0.0915,
"step": 2060
},
{
"epoch": 1.6560000000000001,
"grad_norm": 0.13571464476020081,
"learning_rate": 1.5744863485182537e-06,
"loss": 0.1007,
"step": 2070
},
{
"epoch": 1.6640000000000001,
"grad_norm": 0.09706739858443193,
"learning_rate": 1.5039787125361327e-06,
"loss": 0.094,
"step": 2080
},
{
"epoch": 1.6720000000000002,
"grad_norm": 0.10675797230018248,
"learning_rate": 1.4349576342747462e-06,
"loss": 0.0938,
"step": 2090
},
{
"epoch": 1.6800000000000002,
"grad_norm": 0.11223016492676532,
"learning_rate": 1.367435190424261e-06,
"loss": 0.0938,
"step": 2100
},
{
"epoch": 1.688,
"grad_norm": 0.12732260810929744,
"learning_rate": 1.3014231954572287e-06,
"loss": 0.098,
"step": 2110
},
{
"epoch": 1.696,
"grad_norm": 0.12300876323192528,
"learning_rate": 1.2369331995613664e-06,
"loss": 0.0964,
"step": 2120
},
{
"epoch": 1.704,
"grad_norm": 0.12534390954335137,
"learning_rate": 1.1739764866186309e-06,
"loss": 0.0896,
"step": 2130
},
{
"epoch": 1.712,
"grad_norm": 0.11609921777362982,
"learning_rate": 1.112564072230863e-06,
"loss": 0.0959,
"step": 2140
},
{
"epoch": 1.72,
"grad_norm": 0.10391980074321304,
"learning_rate": 1.0527067017923654e-06,
"loss": 0.0961,
"step": 2150
},
{
"epoch": 1.728,
"grad_norm": 0.1278354882865009,
"learning_rate": 9.944148486097793e-07,
"loss": 0.0976,
"step": 2160
},
{
"epoch": 1.736,
"grad_norm": 0.10681775464138107,
"learning_rate": 9.376987120695547e-07,
"loss": 0.0967,
"step": 2170
},
{
"epoch": 1.744,
"grad_norm": 0.12461590835029315,
"learning_rate": 8.825682158533555e-07,
"loss": 0.1019,
"step": 2180
},
{
"epoch": 1.752,
"grad_norm": 0.12925806225583442,
"learning_rate": 8.290330062017015e-07,
"loss": 0.0991,
"step": 2190
},
{
"epoch": 1.76,
"grad_norm": 0.12035123190303527,
"learning_rate": 7.771024502261526e-07,
"loss": 0.095,
"step": 2200
},
{
"epoch": 1.768,
"grad_norm": 0.11830596930835367,
"learning_rate": 7.267856342703461e-07,
"loss": 0.1003,
"step": 2210
},
{
"epoch": 1.776,
"grad_norm": 0.10309290288715313,
"learning_rate": 6.780913623201346e-07,
"loss": 0.0934,
"step": 2220
},
{
"epoch": 1.784,
"grad_norm": 0.11823660601228905,
"learning_rate": 6.310281544631547e-07,
"loss": 0.091,
"step": 2230
},
{
"epoch": 1.792,
"grad_norm": 0.1240524461220155,
"learning_rate": 5.856042453980526e-07,
"loss": 0.0933,
"step": 2240
},
{
"epoch": 1.8,
"grad_norm": 0.10954545218644868,
"learning_rate": 5.418275829936537e-07,
"loss": 0.095,
"step": 2250
},
{
"epoch": 1.808,
"grad_norm": 0.09984478082295495,
"learning_rate": 4.997058268983135e-07,
"loss": 0.0884,
"step": 2260
},
{
"epoch": 1.8159999999999998,
"grad_norm": 0.09980866667906611,
"learning_rate": 4.592463471997022e-07,
"loss": 0.091,
"step": 2270
},
{
"epoch": 1.8239999999999998,
"grad_norm": 0.11065649275917133,
"learning_rate": 4.204562231352516e-07,
"loss": 0.0996,
"step": 2280
},
{
"epoch": 1.8319999999999999,
"grad_norm": 0.10945247265836085,
"learning_rate": 3.83342241853496e-07,
"loss": 0.093,
"step": 2290
},
{
"epoch": 1.8399999999999999,
"grad_norm": 0.11965279820742956,
"learning_rate": 3.4791089722651437e-07,
"loss": 0.1022,
"step": 2300
},
{
"epoch": 1.8479999999999999,
"grad_norm": 0.1181705374127321,
"learning_rate": 3.1416838871368925e-07,
"loss": 0.1009,
"step": 2310
},
{
"epoch": 1.8559999999999999,
"grad_norm": 0.11160263850458792,
"learning_rate": 2.8212062027698995e-07,
"loss": 0.0963,
"step": 2320
},
{
"epoch": 1.8639999999999999,
"grad_norm": 0.13030757683951363,
"learning_rate": 2.5177319934793995e-07,
"loss": 0.0983,
"step": 2330
},
{
"epoch": 1.8719999999999999,
"grad_norm": 0.11874772797683317,
"learning_rate": 2.2313143584648423e-07,
"loss": 0.092,
"step": 2340
},
{
"epoch": 1.88,
"grad_norm": 0.11341961857438242,
"learning_rate": 1.9620034125190645e-07,
"loss": 0.092,
"step": 2350
},
{
"epoch": 1.888,
"grad_norm": 0.12018288004198623,
"learning_rate": 1.7098462772596302e-07,
"loss": 0.1013,
"step": 2360
},
{
"epoch": 1.896,
"grad_norm": 0.1076575691013852,
"learning_rate": 1.474887072883935e-07,
"loss": 0.0911,
"step": 2370
},
{
"epoch": 1.904,
"grad_norm": 0.11337036877085487,
"learning_rate": 1.2571669104494254e-07,
"loss": 0.0881,
"step": 2380
},
{
"epoch": 1.912,
"grad_norm": 0.1610883586396456,
"learning_rate": 1.0567238846803995e-07,
"loss": 0.0977,
"step": 2390
},
{
"epoch": 1.92,
"grad_norm": 0.11183827260464378,
"learning_rate": 8.735930673024806e-08,
"loss": 0.0869,
"step": 2400
},
{
"epoch": 1.928,
"grad_norm": 0.14085464266938202,
"learning_rate": 7.078065009060941e-08,
"loss": 0.0979,
"step": 2410
},
{
"epoch": 1.936,
"grad_norm": 0.12681513850389028,
"learning_rate": 5.5939319333998546e-08,
"loss": 0.0955,
"step": 2420
},
{
"epoch": 1.944,
"grad_norm": 0.12408511876527423,
"learning_rate": 4.2837911263562406e-08,
"loss": 0.0983,
"step": 2430
},
{
"epoch": 1.952,
"grad_norm": 0.12057693933072995,
"learning_rate": 3.147871824635717e-08,
"loss": 0.0978,
"step": 2440
},
{
"epoch": 1.96,
"grad_norm": 0.12061596653924865,
"learning_rate": 2.1863727812254653e-08,
"loss": 0.0941,
"step": 2450
},
{
"epoch": 1.968,
"grad_norm": 0.11057835512679115,
"learning_rate": 1.3994622306173766e-08,
"loss": 0.0977,
"step": 2460
},
{
"epoch": 1.976,
"grad_norm": 0.12585430211339457,
"learning_rate": 7.872778593728258e-09,
"loss": 0.0958,
"step": 2470
},
{
"epoch": 1.984,
"grad_norm": 0.11787146911875013,
"learning_rate": 3.499267820307184e-09,
"loss": 0.0919,
"step": 2480
},
{
"epoch": 1.992,
"grad_norm": 0.1087435338716272,
"learning_rate": 8.748552236603758e-10,
"loss": 0.0936,
"step": 2490
},
{
"epoch": 2.0,
"grad_norm": 0.11470541946837681,
"learning_rate": 0.0,
"loss": 0.0898,
"step": 2500
},
{
"epoch": 2.0,
"step": 2500,
"total_flos": 22262632857600.0,
"train_loss": 0.14060430166721344,
"train_runtime": 9775.4364,
"train_samples_per_second": 8.184,
"train_steps_per_second": 0.256
}
],
"logging_steps": 10,
"max_steps": 2500,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 10000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 22262632857600.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}