alicegoesdown's picture
Training in progress, step 4800, checkpoint
295634f verified
{
"best_metric": 3.3106162548065186,
"best_model_checkpoint": "./output/checkpoint-4800",
"epoch": 28.91566265060241,
"eval_steps": 150,
"global_step": 4800,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.060240963855421686,
"grad_norm": 52.598548889160156,
"learning_rate": 1.25e-05,
"loss": 1.4587,
"step": 10
},
{
"epoch": 0.12048192771084337,
"grad_norm": 1.3145289421081543,
"learning_rate": 2.5e-05,
"loss": 0.1245,
"step": 20
},
{
"epoch": 0.18072289156626506,
"grad_norm": 47.875640869140625,
"learning_rate": 3.75e-05,
"loss": 0.0925,
"step": 30
},
{
"epoch": 0.24096385542168675,
"grad_norm": 25.39703369140625,
"learning_rate": 5e-05,
"loss": 0.1134,
"step": 40
},
{
"epoch": 0.30120481927710846,
"grad_norm": 22.82965660095215,
"learning_rate": 6.25e-05,
"loss": 0.1348,
"step": 50
},
{
"epoch": 0.3614457831325301,
"grad_norm": 0.446526437997818,
"learning_rate": 7.5e-05,
"loss": 0.5273,
"step": 60
},
{
"epoch": 0.42168674698795183,
"grad_norm": 50.92262649536133,
"learning_rate": 8.75e-05,
"loss": 0.2283,
"step": 70
},
{
"epoch": 0.4819277108433735,
"grad_norm": 65.35910034179688,
"learning_rate": 0.0001,
"loss": 0.3627,
"step": 80
},
{
"epoch": 0.5421686746987951,
"grad_norm": 8.19798755645752,
"learning_rate": 0.00011250000000000001,
"loss": 0.2596,
"step": 90
},
{
"epoch": 0.6024096385542169,
"grad_norm": 141.95716857910156,
"learning_rate": 0.000125,
"loss": 0.6644,
"step": 100
},
{
"epoch": 0.6626506024096386,
"grad_norm": 736.8707885742188,
"learning_rate": 0.00012499871543489787,
"loss": 0.9693,
"step": 110
},
{
"epoch": 0.7228915662650602,
"grad_norm": 249.3212432861328,
"learning_rate": 0.00012499486179239495,
"loss": 1.4457,
"step": 120
},
{
"epoch": 0.7831325301204819,
"grad_norm": 30.746965408325195,
"learning_rate": 0.00012498843923089938,
"loss": 1.4675,
"step": 130
},
{
"epoch": 0.8433734939759037,
"grad_norm": 41.87055587768555,
"learning_rate": 0.0001249794480144175,
"loss": 3.2459,
"step": 140
},
{
"epoch": 0.9036144578313253,
"grad_norm": 64.25176239013672,
"learning_rate": 0.000124967888512543,
"loss": 5.6311,
"step": 150
},
{
"epoch": 0.9036144578313253,
"eval_loss": 6.333486080169678,
"eval_runtime": 3.9268,
"eval_samples_per_second": 9.422,
"eval_steps_per_second": 9.422,
"step": 150
},
{
"epoch": 0.963855421686747,
"grad_norm": 20.342771530151367,
"learning_rate": 0.00012495376120044173,
"loss": 6.6599,
"step": 160
},
{
"epoch": 1.0240963855421688,
"grad_norm": 14.641746520996094,
"learning_rate": 0.00012493706665883217,
"loss": 5.9994,
"step": 170
},
{
"epoch": 1.0843373493975903,
"grad_norm": 10.879859924316406,
"learning_rate": 0.00012491780557396154,
"loss": 5.408,
"step": 180
},
{
"epoch": 1.144578313253012,
"grad_norm": 11.879966735839844,
"learning_rate": 0.00012489597873757756,
"loss": 5.4315,
"step": 190
},
{
"epoch": 1.2048192771084336,
"grad_norm": 14.315322875976562,
"learning_rate": 0.00012487158704689602,
"loss": 5.7639,
"step": 200
},
{
"epoch": 1.2650602409638554,
"grad_norm": 12.6843843460083,
"learning_rate": 0.0001248446315045638,
"loss": 5.4332,
"step": 210
},
{
"epoch": 1.3253012048192772,
"grad_norm": 14.997090339660645,
"learning_rate": 0.00012481511321861763,
"loss": 5.7137,
"step": 220
},
{
"epoch": 1.3855421686746987,
"grad_norm": 46041.421875,
"learning_rate": 0.00012478303340243864,
"loss": 7.4686,
"step": 230
},
{
"epoch": 1.4457831325301205,
"grad_norm": 116.35897064208984,
"learning_rate": 0.00012474839337470246,
"loss": 5.9303,
"step": 240
},
{
"epoch": 1.5060240963855422,
"grad_norm": 25.29050064086914,
"learning_rate": 0.0001247111945593249,
"loss": 5.6893,
"step": 250
},
{
"epoch": 1.5662650602409638,
"grad_norm": 30.405521392822266,
"learning_rate": 0.00012467143848540359,
"loss": 5.4675,
"step": 260
},
{
"epoch": 1.6265060240963856,
"grad_norm": 18.263336181640625,
"learning_rate": 0.000124629126787155,
"loss": 5.1431,
"step": 270
},
{
"epoch": 1.6867469879518073,
"grad_norm": 30.267963409423828,
"learning_rate": 0.00012458426120384738,
"loss": 5.3887,
"step": 280
},
{
"epoch": 1.7469879518072289,
"grad_norm": 16.499534606933594,
"learning_rate": 0.00012453684357972906,
"loss": 5.0634,
"step": 290
},
{
"epoch": 1.8072289156626506,
"grad_norm": 13.954059600830078,
"learning_rate": 0.00012448687586395289,
"loss": 5.243,
"step": 300
},
{
"epoch": 1.8072289156626506,
"eval_loss": 5.087627410888672,
"eval_runtime": 3.8181,
"eval_samples_per_second": 9.691,
"eval_steps_per_second": 9.691,
"step": 300
},
{
"epoch": 1.8674698795180724,
"grad_norm": 19.865482330322266,
"learning_rate": 0.00012443436011049593,
"loss": 4.7389,
"step": 310
},
{
"epoch": 1.927710843373494,
"grad_norm": 46.38461685180664,
"learning_rate": 0.0001243792984780751,
"loss": 4.8623,
"step": 320
},
{
"epoch": 1.9879518072289155,
"grad_norm": 21.573978424072266,
"learning_rate": 0.00012432169323005853,
"loss": 4.9039,
"step": 330
},
{
"epoch": 2.0481927710843375,
"grad_norm": 21.620838165283203,
"learning_rate": 0.00012426154673437223,
"loss": 4.863,
"step": 340
},
{
"epoch": 2.108433734939759,
"grad_norm": 64.13438415527344,
"learning_rate": 0.00012419886146340314,
"loss": 4.6726,
"step": 350
},
{
"epoch": 2.1686746987951806,
"grad_norm": 32.13075256347656,
"learning_rate": 0.0001241336399938972,
"loss": 4.6064,
"step": 360
},
{
"epoch": 2.2289156626506026,
"grad_norm": 62.58379364013672,
"learning_rate": 0.00012406588500685355,
"loss": 4.5968,
"step": 370
},
{
"epoch": 2.289156626506024,
"grad_norm": 2010.449462890625,
"learning_rate": 0.00012399559928741435,
"loss": 5.0049,
"step": 380
},
{
"epoch": 2.3493975903614457,
"grad_norm": 17.136829376220703,
"learning_rate": 0.00012392278572475023,
"loss": 5.0645,
"step": 390
},
{
"epoch": 2.4096385542168672,
"grad_norm": 17.000410079956055,
"learning_rate": 0.0001238474473119416,
"loss": 5.2628,
"step": 400
},
{
"epoch": 2.4698795180722892,
"grad_norm": 80.07215118408203,
"learning_rate": 0.00012376958714585545,
"loss": 4.8465,
"step": 410
},
{
"epoch": 2.5301204819277108,
"grad_norm": 77.65228271484375,
"learning_rate": 0.0001236892084270183,
"loss": 4.5762,
"step": 420
},
{
"epoch": 2.5903614457831328,
"grad_norm": 26.298023223876953,
"learning_rate": 0.00012360631445948448,
"loss": 4.6785,
"step": 430
},
{
"epoch": 2.6506024096385543,
"grad_norm": 44.22318649291992,
"learning_rate": 0.00012352090865070026,
"loss": 5.1179,
"step": 440
},
{
"epoch": 2.710843373493976,
"grad_norm": 70.8712387084961,
"learning_rate": 0.00012343299451136397,
"loss": 4.2638,
"step": 450
},
{
"epoch": 2.710843373493976,
"eval_loss": 4.652755260467529,
"eval_runtime": 3.8238,
"eval_samples_per_second": 9.676,
"eval_steps_per_second": 9.676,
"step": 450
},
{
"epoch": 2.7710843373493974,
"grad_norm": 155.15814208984375,
"learning_rate": 0.00012334257565528155,
"loss": 4.7236,
"step": 460
},
{
"epoch": 2.8313253012048194,
"grad_norm": 18.455474853515625,
"learning_rate": 0.000123249655799218,
"loss": 5.0244,
"step": 470
},
{
"epoch": 2.891566265060241,
"grad_norm": 26.79853057861328,
"learning_rate": 0.00012315423876274468,
"loss": 5.1311,
"step": 480
},
{
"epoch": 2.9518072289156625,
"grad_norm": 151.131591796875,
"learning_rate": 0.0001230563284680822,
"loss": 5.1015,
"step": 490
},
{
"epoch": 3.0120481927710845,
"grad_norm": 19.262746810913086,
"learning_rate": 0.00012295592893993935,
"loss": 4.6265,
"step": 500
},
{
"epoch": 3.072289156626506,
"grad_norm": 21.435794830322266,
"learning_rate": 0.00012285304430534745,
"loss": 4.8465,
"step": 510
},
{
"epoch": 3.1325301204819276,
"grad_norm": 122.01215362548828,
"learning_rate": 0.00012274767879349083,
"loss": 4.2186,
"step": 520
},
{
"epoch": 3.1927710843373496,
"grad_norm": 42.04435729980469,
"learning_rate": 0.00012263983673553306,
"loss": 4.9252,
"step": 530
},
{
"epoch": 3.253012048192771,
"grad_norm": 37.1818962097168,
"learning_rate": 0.0001225295225644387,
"loss": 4.5153,
"step": 540
},
{
"epoch": 3.3132530120481927,
"grad_norm": 40.17600631713867,
"learning_rate": 0.0001224167408147913,
"loss": 4.7539,
"step": 550
},
{
"epoch": 3.3734939759036147,
"grad_norm": 11.658145904541016,
"learning_rate": 0.0001223014961226068,
"loss": 4.4217,
"step": 560
},
{
"epoch": 3.433734939759036,
"grad_norm": 53.181190490722656,
"learning_rate": 0.00012218379322514317,
"loss": 4.1959,
"step": 570
},
{
"epoch": 3.4939759036144578,
"grad_norm": 27.87154197692871,
"learning_rate": 0.00012206363696070545,
"loss": 4.1385,
"step": 580
},
{
"epoch": 3.5542168674698793,
"grad_norm": 131.2779541015625,
"learning_rate": 0.0001219410322684471,
"loss": 3.9121,
"step": 590
},
{
"epoch": 3.6144578313253013,
"grad_norm": 88.18590545654297,
"learning_rate": 0.0001218159841881668,
"loss": 4.5732,
"step": 600
},
{
"epoch": 3.6144578313253013,
"eval_loss": 4.325500965118408,
"eval_runtime": 3.8207,
"eval_samples_per_second": 9.684,
"eval_steps_per_second": 9.684,
"step": 600
},
{
"epoch": 3.674698795180723,
"grad_norm": 137.2965087890625,
"learning_rate": 0.00012168849786010133,
"loss": 4.3792,
"step": 610
},
{
"epoch": 3.734939759036145,
"grad_norm": 247.1873779296875,
"learning_rate": 0.00012155857852471433,
"loss": 3.6815,
"step": 620
},
{
"epoch": 3.7951807228915664,
"grad_norm": 243.79896545410156,
"learning_rate": 0.0001214262315224808,
"loss": 4.3236,
"step": 630
},
{
"epoch": 3.855421686746988,
"grad_norm": 81.76631927490234,
"learning_rate": 0.00012129146229366766,
"loss": 4.2099,
"step": 640
},
{
"epoch": 3.9156626506024095,
"grad_norm": 83.27767944335938,
"learning_rate": 0.00012115427637811003,
"loss": 3.6707,
"step": 650
},
{
"epoch": 3.9759036144578315,
"grad_norm": 13.061168670654297,
"learning_rate": 0.00012101467941498357,
"loss": 3.9052,
"step": 660
},
{
"epoch": 4.036144578313253,
"grad_norm": 13.092235565185547,
"learning_rate": 0.0001208726771425727,
"loss": 4.3889,
"step": 670
},
{
"epoch": 4.096385542168675,
"grad_norm": 254.2332305908203,
"learning_rate": 0.00012072827539803463,
"loss": 4.1295,
"step": 680
},
{
"epoch": 4.156626506024097,
"grad_norm": 57.88543701171875,
"learning_rate": 0.00012058148011715949,
"loss": 3.8834,
"step": 690
},
{
"epoch": 4.216867469879518,
"grad_norm": 12.289281845092773,
"learning_rate": 0.00012043229733412636,
"loss": 4.0815,
"step": 700
},
{
"epoch": 4.27710843373494,
"grad_norm": 16.690227508544922,
"learning_rate": 0.0001202807331812551,
"loss": 4.2312,
"step": 710
},
{
"epoch": 4.337349397590361,
"grad_norm": 88.23700714111328,
"learning_rate": 0.00012012679388875441,
"loss": 4.0464,
"step": 720
},
{
"epoch": 4.397590361445783,
"grad_norm": 150.62051391601562,
"learning_rate": 0.00011997048578446568,
"loss": 4.5441,
"step": 730
},
{
"epoch": 4.457831325301205,
"grad_norm": 50.538490295410156,
"learning_rate": 0.00011981181529360282,
"loss": 3.5866,
"step": 740
},
{
"epoch": 4.518072289156627,
"grad_norm": 619.7337646484375,
"learning_rate": 0.00011965078893848828,
"loss": 4.3225,
"step": 750
},
{
"epoch": 4.518072289156627,
"eval_loss": 4.134085178375244,
"eval_runtime": 3.8365,
"eval_samples_per_second": 9.644,
"eval_steps_per_second": 9.644,
"step": 750
},
{
"epoch": 4.578313253012048,
"grad_norm": 13.040363311767578,
"learning_rate": 0.00011948741333828481,
"loss": 4.1669,
"step": 760
},
{
"epoch": 4.63855421686747,
"grad_norm": 26.328935623168945,
"learning_rate": 0.00011932169520872344,
"loss": 3.5026,
"step": 770
},
{
"epoch": 4.698795180722891,
"grad_norm": 12.123993873596191,
"learning_rate": 0.00011915364136182738,
"loss": 3.7511,
"step": 780
},
{
"epoch": 4.759036144578313,
"grad_norm": 34.736244201660156,
"learning_rate": 0.0001189832587056321,
"loss": 3.8989,
"step": 790
},
{
"epoch": 4.8192771084337345,
"grad_norm": 50.64377212524414,
"learning_rate": 0.00011881055424390119,
"loss": 3.9904,
"step": 800
},
{
"epoch": 4.879518072289157,
"grad_norm": 31.657766342163086,
"learning_rate": 0.00011863553507583869,
"loss": 3.8077,
"step": 810
},
{
"epoch": 4.9397590361445785,
"grad_norm": 16.779949188232422,
"learning_rate": 0.00011845820839579708,
"loss": 3.9674,
"step": 820
},
{
"epoch": 5.0,
"grad_norm": 14.840353965759277,
"learning_rate": 0.00011827858149298162,
"loss": 4.0888,
"step": 830
},
{
"epoch": 5.0602409638554215,
"grad_norm": 61.29264450073242,
"learning_rate": 0.00011809666175115075,
"loss": 3.8837,
"step": 840
},
{
"epoch": 5.120481927710843,
"grad_norm": 683.336181640625,
"learning_rate": 0.00011791245664831251,
"loss": 4.0266,
"step": 850
},
{
"epoch": 5.180722891566265,
"grad_norm": 144.3917694091797,
"learning_rate": 0.0001177259737564172,
"loss": 4.3238,
"step": 860
},
{
"epoch": 5.240963855421687,
"grad_norm": 16.853086471557617,
"learning_rate": 0.00011753722074104613,
"loss": 3.8228,
"step": 870
},
{
"epoch": 5.301204819277109,
"grad_norm": 21.043733596801758,
"learning_rate": 0.00011734620536109644,
"loss": 4.0891,
"step": 880
},
{
"epoch": 5.36144578313253,
"grad_norm": 27.56604766845703,
"learning_rate": 0.00011715293546846223,
"loss": 4.0359,
"step": 890
},
{
"epoch": 5.421686746987952,
"grad_norm": 162.16494750976562,
"learning_rate": 0.00011695741900771184,
"loss": 4.3176,
"step": 900
},
{
"epoch": 5.421686746987952,
"eval_loss": 3.8886878490448,
"eval_runtime": 3.8151,
"eval_samples_per_second": 9.698,
"eval_steps_per_second": 9.698,
"step": 900
},
{
"epoch": 5.481927710843373,
"grad_norm": 65.58177185058594,
"learning_rate": 0.00011675966401576116,
"loss": 3.6477,
"step": 910
},
{
"epoch": 5.542168674698795,
"grad_norm": 80.09622955322266,
"learning_rate": 0.00011655967862154335,
"loss": 3.9318,
"step": 920
},
{
"epoch": 5.602409638554217,
"grad_norm": 21.606958389282227,
"learning_rate": 0.0001163574710456747,
"loss": 4.1184,
"step": 930
},
{
"epoch": 5.662650602409639,
"grad_norm": 139.5963134765625,
"learning_rate": 0.00011615304960011663,
"loss": 3.7183,
"step": 940
},
{
"epoch": 5.72289156626506,
"grad_norm": 36.027828216552734,
"learning_rate": 0.00011594642268783415,
"loss": 3.6794,
"step": 950
},
{
"epoch": 5.783132530120482,
"grad_norm": 8.31830883026123,
"learning_rate": 0.00011573759880245027,
"loss": 3.7807,
"step": 960
},
{
"epoch": 5.843373493975903,
"grad_norm": 14.214261054992676,
"learning_rate": 0.00011552658652789703,
"loss": 4.1366,
"step": 970
},
{
"epoch": 5.903614457831325,
"grad_norm": 14.497884750366211,
"learning_rate": 0.00011531339453806258,
"loss": 3.6411,
"step": 980
},
{
"epoch": 5.9638554216867465,
"grad_norm": 16.32625389099121,
"learning_rate": 0.00011509803159643458,
"loss": 4.0479,
"step": 990
},
{
"epoch": 6.024096385542169,
"grad_norm": 238.30307006835938,
"learning_rate": 0.00011488050655574003,
"loss": 3.9169,
"step": 1000
},
{
"epoch": 6.0843373493975905,
"grad_norm": 12.584395408630371,
"learning_rate": 0.00011466082835758141,
"loss": 3.6927,
"step": 1010
},
{
"epoch": 6.144578313253012,
"grad_norm": 34.47623825073242,
"learning_rate": 0.000114439006032069,
"loss": 3.524,
"step": 1020
},
{
"epoch": 6.204819277108434,
"grad_norm": 14.770285606384277,
"learning_rate": 0.00011421504869744978,
"loss": 3.6258,
"step": 1030
},
{
"epoch": 6.265060240963855,
"grad_norm": 24.838224411010742,
"learning_rate": 0.0001139889655597326,
"loss": 3.7966,
"step": 1040
},
{
"epoch": 6.325301204819277,
"grad_norm": 18.04807472229004,
"learning_rate": 0.00011376076591230974,
"loss": 4.263,
"step": 1050
},
{
"epoch": 6.325301204819277,
"eval_loss": 4.747550010681152,
"eval_runtime": 3.8135,
"eval_samples_per_second": 9.702,
"eval_steps_per_second": 9.702,
"step": 1050
},
{
"epoch": 6.385542168674699,
"grad_norm": 472.16766357421875,
"learning_rate": 0.00011353045913557492,
"loss": 4.1538,
"step": 1060
},
{
"epoch": 6.445783132530121,
"grad_norm": 19.558197021484375,
"learning_rate": 0.00011329805469653768,
"loss": 4.3733,
"step": 1070
},
{
"epoch": 6.506024096385542,
"grad_norm": 87.86190795898438,
"learning_rate": 0.00011306356214843422,
"loss": 3.6097,
"step": 1080
},
{
"epoch": 6.566265060240964,
"grad_norm": 12.482193946838379,
"learning_rate": 0.00011282699113033477,
"loss": 4.1259,
"step": 1090
},
{
"epoch": 6.626506024096385,
"grad_norm": 24.125751495361328,
"learning_rate": 0.00011258835136674729,
"loss": 3.9089,
"step": 1100
},
{
"epoch": 6.686746987951807,
"grad_norm": 46.368099212646484,
"learning_rate": 0.00011234765266721778,
"loss": 3.6971,
"step": 1110
},
{
"epoch": 6.746987951807229,
"grad_norm": 51.86185836791992,
"learning_rate": 0.00011210490492592703,
"loss": 3.8875,
"step": 1120
},
{
"epoch": 6.807228915662651,
"grad_norm": 27.235157012939453,
"learning_rate": 0.0001118601181212839,
"loss": 4.3133,
"step": 1130
},
{
"epoch": 6.867469879518072,
"grad_norm": 296.5423278808594,
"learning_rate": 0.00011161330231551515,
"loss": 4.1481,
"step": 1140
},
{
"epoch": 6.927710843373494,
"grad_norm": 11.731728553771973,
"learning_rate": 0.00011136446765425187,
"loss": 4.192,
"step": 1150
},
{
"epoch": 6.9879518072289155,
"grad_norm": 15.431469917297363,
"learning_rate": 0.00011111362436611234,
"loss": 3.8919,
"step": 1160
},
{
"epoch": 7.048192771084337,
"grad_norm": 98.80924987792969,
"learning_rate": 0.00011086078276228167,
"loss": 3.7787,
"step": 1170
},
{
"epoch": 7.108433734939759,
"grad_norm": 101.24623107910156,
"learning_rate": 0.00011060595323608789,
"loss": 3.8452,
"step": 1180
},
{
"epoch": 7.168674698795181,
"grad_norm": 71.53185272216797,
"learning_rate": 0.00011034914626257467,
"loss": 3.874,
"step": 1190
},
{
"epoch": 7.228915662650603,
"grad_norm": 66.60515594482422,
"learning_rate": 0.0001100903723980709,
"loss": 3.8769,
"step": 1200
},
{
"epoch": 7.228915662650603,
"eval_loss": 3.916496992111206,
"eval_runtime": 3.8065,
"eval_samples_per_second": 9.72,
"eval_steps_per_second": 9.72,
"step": 1200
},
{
"epoch": 7.289156626506024,
"grad_norm": 22.101329803466797,
"learning_rate": 0.00010982964227975658,
"loss": 3.9201,
"step": 1210
},
{
"epoch": 7.349397590361446,
"grad_norm": 14.925955772399902,
"learning_rate": 0.00010956696662522569,
"loss": 3.602,
"step": 1220
},
{
"epoch": 7.409638554216867,
"grad_norm": 713.195556640625,
"learning_rate": 0.00010930235623204551,
"loss": 3.8217,
"step": 1230
},
{
"epoch": 7.469879518072289,
"grad_norm": 51.607173919677734,
"learning_rate": 0.00010903582197731294,
"loss": 4.1866,
"step": 1240
},
{
"epoch": 7.530120481927711,
"grad_norm": 10.283899307250977,
"learning_rate": 0.00010876737481720722,
"loss": 3.7481,
"step": 1250
},
{
"epoch": 7.590361445783133,
"grad_norm": 257.76556396484375,
"learning_rate": 0.0001084970257865397,
"loss": 3.8203,
"step": 1260
},
{
"epoch": 7.650602409638554,
"grad_norm": 14.792612075805664,
"learning_rate": 0.00010822478599830008,
"loss": 3.6593,
"step": 1270
},
{
"epoch": 7.710843373493976,
"grad_norm": 303.11767578125,
"learning_rate": 0.00010795066664319983,
"loss": 3.753,
"step": 1280
},
{
"epoch": 7.771084337349397,
"grad_norm": 32.58333206176758,
"learning_rate": 0.00010767467898921197,
"loss": 3.6544,
"step": 1290
},
{
"epoch": 7.831325301204819,
"grad_norm": 17.639245986938477,
"learning_rate": 0.00010739683438110797,
"loss": 3.5928,
"step": 1300
},
{
"epoch": 7.891566265060241,
"grad_norm": 10.270660400390625,
"learning_rate": 0.00010711714423999145,
"loss": 3.7059,
"step": 1310
},
{
"epoch": 7.951807228915663,
"grad_norm": 16.441017150878906,
"learning_rate": 0.00010683562006282861,
"loss": 3.957,
"step": 1320
},
{
"epoch": 8.012048192771084,
"grad_norm": 24.458555221557617,
"learning_rate": 0.00010655227342197574,
"loss": 4.0428,
"step": 1330
},
{
"epoch": 8.072289156626505,
"grad_norm": 9.778626441955566,
"learning_rate": 0.00010626711596470343,
"loss": 4.0835,
"step": 1340
},
{
"epoch": 8.132530120481928,
"grad_norm": 12.588565826416016,
"learning_rate": 0.0001059801594127179,
"loss": 3.523,
"step": 1350
},
{
"epoch": 8.132530120481928,
"eval_loss": 3.8830435276031494,
"eval_runtime": 3.8155,
"eval_samples_per_second": 9.697,
"eval_steps_per_second": 9.697,
"step": 1350
},
{
"epoch": 8.19277108433735,
"grad_norm": 16.958955764770508,
"learning_rate": 0.00010569141556167905,
"loss": 3.8287,
"step": 1360
},
{
"epoch": 8.25301204819277,
"grad_norm": 21.932573318481445,
"learning_rate": 0.00010540089628071566,
"loss": 3.7502,
"step": 1370
},
{
"epoch": 8.313253012048193,
"grad_norm": 21.345739364624023,
"learning_rate": 0.00010510861351193747,
"loss": 4.3312,
"step": 1380
},
{
"epoch": 8.373493975903614,
"grad_norm": 11.423644065856934,
"learning_rate": 0.00010481457926994435,
"loss": 4.3564,
"step": 1390
},
{
"epoch": 8.433734939759036,
"grad_norm": 13.70189094543457,
"learning_rate": 0.0001045188056413323,
"loss": 3.2986,
"step": 1400
},
{
"epoch": 8.493975903614459,
"grad_norm": 17.401643753051758,
"learning_rate": 0.00010422130478419676,
"loss": 3.9109,
"step": 1410
},
{
"epoch": 8.55421686746988,
"grad_norm": 484.1435546875,
"learning_rate": 0.00010392208892763269,
"loss": 3.717,
"step": 1420
},
{
"epoch": 8.614457831325302,
"grad_norm": 23.173097610473633,
"learning_rate": 0.00010362117037123204,
"loss": 3.4596,
"step": 1430
},
{
"epoch": 8.674698795180722,
"grad_norm": 417.0069274902344,
"learning_rate": 0.00010331856148457803,
"loss": 3.722,
"step": 1440
},
{
"epoch": 8.734939759036145,
"grad_norm": 55.5108528137207,
"learning_rate": 0.00010301427470673678,
"loss": 3.665,
"step": 1450
},
{
"epoch": 8.795180722891565,
"grad_norm": 18.918964385986328,
"learning_rate": 0.00010270832254574588,
"loss": 3.6538,
"step": 1460
},
{
"epoch": 8.855421686746988,
"grad_norm": 12.27521800994873,
"learning_rate": 0.00010240071757810036,
"loss": 3.8557,
"step": 1470
},
{
"epoch": 8.91566265060241,
"grad_norm": 17.036134719848633,
"learning_rate": 0.00010209147244823564,
"loss": 3.448,
"step": 1480
},
{
"epoch": 8.975903614457831,
"grad_norm": 9.00616455078125,
"learning_rate": 0.00010178059986800773,
"loss": 3.754,
"step": 1490
},
{
"epoch": 9.036144578313253,
"grad_norm": 8.838912010192871,
"learning_rate": 0.00010146811261617085,
"loss": 3.9134,
"step": 1500
},
{
"epoch": 9.036144578313253,
"eval_loss": 3.791945695877075,
"eval_runtime": 3.8181,
"eval_samples_per_second": 9.691,
"eval_steps_per_second": 9.691,
"step": 1500
},
{
"epoch": 9.096385542168674,
"grad_norm": 14.983063697814941,
"learning_rate": 0.00010115402353785197,
"loss": 3.8232,
"step": 1510
},
{
"epoch": 9.156626506024097,
"grad_norm": 26.87819480895996,
"learning_rate": 0.00010083834554402292,
"loss": 3.7914,
"step": 1520
},
{
"epoch": 9.216867469879517,
"grad_norm": 18.9931583404541,
"learning_rate": 0.00010052109161096958,
"loss": 3.7873,
"step": 1530
},
{
"epoch": 9.27710843373494,
"grad_norm": 27333.392578125,
"learning_rate": 0.00010020227477975852,
"loss": 3.6831,
"step": 1540
},
{
"epoch": 9.337349397590362,
"grad_norm": 13.019943237304688,
"learning_rate": 9.9881908155701e-05,
"loss": 4.0784,
"step": 1550
},
{
"epoch": 9.397590361445783,
"grad_norm": 11.35181999206543,
"learning_rate": 9.956000490781411e-05,
"loss": 3.7456,
"step": 1560
},
{
"epoch": 9.457831325301205,
"grad_norm": 28.795074462890625,
"learning_rate": 9.923657826827957e-05,
"loss": 3.6005,
"step": 1570
},
{
"epoch": 9.518072289156626,
"grad_norm": 18.929288864135742,
"learning_rate": 9.891164153189976e-05,
"loss": 3.8908,
"step": 1580
},
{
"epoch": 9.578313253012048,
"grad_norm": 236.947509765625,
"learning_rate": 9.858520805555123e-05,
"loss": 3.7814,
"step": 1590
},
{
"epoch": 9.638554216867469,
"grad_norm": 30.308324813842773,
"learning_rate": 9.825729125763561e-05,
"loss": 3.8457,
"step": 1600
},
{
"epoch": 9.698795180722891,
"grad_norm": 41.033992767333984,
"learning_rate": 9.792790461752813e-05,
"loss": 3.4012,
"step": 1610
},
{
"epoch": 9.759036144578314,
"grad_norm": 25.953258514404297,
"learning_rate": 9.759706167502343e-05,
"loss": 3.5834,
"step": 1620
},
{
"epoch": 9.819277108433734,
"grad_norm": 15.013717651367188,
"learning_rate": 9.726477602977905e-05,
"loss": 3.6479,
"step": 1630
},
{
"epoch": 9.879518072289157,
"grad_norm": 39.97005081176758,
"learning_rate": 9.69310613407564e-05,
"loss": 3.3718,
"step": 1640
},
{
"epoch": 9.939759036144578,
"grad_norm": 13.676387786865234,
"learning_rate": 9.659593132565929e-05,
"loss": 3.6831,
"step": 1650
},
{
"epoch": 9.939759036144578,
"eval_loss": 3.70894193649292,
"eval_runtime": 3.8103,
"eval_samples_per_second": 9.711,
"eval_steps_per_second": 9.711,
"step": 1650
},
{
"epoch": 10.0,
"grad_norm": 30.147296905517578,
"learning_rate": 9.625939976037002e-05,
"loss": 3.4134,
"step": 1660
},
{
"epoch": 10.060240963855422,
"grad_norm": 130.5504150390625,
"learning_rate": 9.59214804783831e-05,
"loss": 3.4339,
"step": 1670
},
{
"epoch": 10.120481927710843,
"grad_norm": 8.657685279846191,
"learning_rate": 9.558218737023671e-05,
"loss": 4.0876,
"step": 1680
},
{
"epoch": 10.180722891566266,
"grad_norm": 48.8166618347168,
"learning_rate": 9.524153438294159e-05,
"loss": 3.4347,
"step": 1690
},
{
"epoch": 10.240963855421686,
"grad_norm": 15.538662910461426,
"learning_rate": 9.489953551940783e-05,
"loss": 3.7002,
"step": 1700
},
{
"epoch": 10.301204819277109,
"grad_norm": 30.31039047241211,
"learning_rate": 9.455620483786914e-05,
"loss": 3.3832,
"step": 1710
},
{
"epoch": 10.36144578313253,
"grad_norm": 118.9944839477539,
"learning_rate": 9.421155645130514e-05,
"loss": 3.3828,
"step": 1720
},
{
"epoch": 10.421686746987952,
"grad_norm": 12.192248344421387,
"learning_rate": 9.38656045268611e-05,
"loss": 3.6482,
"step": 1730
},
{
"epoch": 10.481927710843374,
"grad_norm": 18.287357330322266,
"learning_rate": 9.351836328526563e-05,
"loss": 3.3905,
"step": 1740
},
{
"epoch": 10.542168674698795,
"grad_norm": 267.9325866699219,
"learning_rate": 9.316984700024612e-05,
"loss": 3.7533,
"step": 1750
},
{
"epoch": 10.602409638554217,
"grad_norm": 542.4744262695312,
"learning_rate": 9.2820069997942e-05,
"loss": 3.8873,
"step": 1760
},
{
"epoch": 10.662650602409638,
"grad_norm": 16.155866622924805,
"learning_rate": 9.246904665631588e-05,
"loss": 3.6396,
"step": 1770
},
{
"epoch": 10.72289156626506,
"grad_norm": 7662.2783203125,
"learning_rate": 9.211679140456242e-05,
"loss": 3.1209,
"step": 1780
},
{
"epoch": 10.783132530120483,
"grad_norm": 40.10587692260742,
"learning_rate": 9.176331872251536e-05,
"loss": 3.4137,
"step": 1790
},
{
"epoch": 10.843373493975903,
"grad_norm": 12.35087776184082,
"learning_rate": 9.140864314005222e-05,
"loss": 3.6861,
"step": 1800
},
{
"epoch": 10.843373493975903,
"eval_loss": 3.865417957305908,
"eval_runtime": 3.8175,
"eval_samples_per_second": 9.692,
"eval_steps_per_second": 9.692,
"step": 1800
},
{
"epoch": 10.903614457831326,
"grad_norm": 36.35682678222656,
"learning_rate": 9.105277923649698e-05,
"loss": 3.4116,
"step": 1810
},
{
"epoch": 10.963855421686747,
"grad_norm": 9.996143341064453,
"learning_rate": 9.06957416400209e-05,
"loss": 3.5382,
"step": 1820
},
{
"epoch": 11.024096385542169,
"grad_norm": 12.074798583984375,
"learning_rate": 9.03375450270412e-05,
"loss": 3.8299,
"step": 1830
},
{
"epoch": 11.08433734939759,
"grad_norm": 14.15644359588623,
"learning_rate": 8.997820412161764e-05,
"loss": 3.4951,
"step": 1840
},
{
"epoch": 11.144578313253012,
"grad_norm": 48.67642593383789,
"learning_rate": 8.961773369484738e-05,
"loss": 3.4306,
"step": 1850
},
{
"epoch": 11.204819277108435,
"grad_norm": 18.517230987548828,
"learning_rate": 8.925614856425786e-05,
"loss": 3.451,
"step": 1860
},
{
"epoch": 11.265060240963855,
"grad_norm": 11.237759590148926,
"learning_rate": 8.88934635931975e-05,
"loss": 3.4937,
"step": 1870
},
{
"epoch": 11.325301204819278,
"grad_norm": 14.1367826461792,
"learning_rate": 8.852969369022494e-05,
"loss": 3.6886,
"step": 1880
},
{
"epoch": 11.385542168674698,
"grad_norm": 9.110208511352539,
"learning_rate": 8.816485380849613e-05,
"loss": 3.3208,
"step": 1890
},
{
"epoch": 11.44578313253012,
"grad_norm": 18.82927703857422,
"learning_rate": 8.779895894514961e-05,
"loss": 3.499,
"step": 1900
},
{
"epoch": 11.506024096385541,
"grad_norm": 156.17605590820312,
"learning_rate": 8.743202414069011e-05,
"loss": 3.2002,
"step": 1910
},
{
"epoch": 11.566265060240964,
"grad_norm": 14.175848960876465,
"learning_rate": 8.706406447837023e-05,
"loss": 3.7181,
"step": 1920
},
{
"epoch": 11.626506024096386,
"grad_norm": 13.365341186523438,
"learning_rate": 8.669509508357052e-05,
"loss": 3.3462,
"step": 1930
},
{
"epoch": 11.686746987951807,
"grad_norm": 20.62955093383789,
"learning_rate": 8.632513112317761e-05,
"loss": 3.4835,
"step": 1940
},
{
"epoch": 11.74698795180723,
"grad_norm": 21.724872589111328,
"learning_rate": 8.59541878049609e-05,
"loss": 3.4933,
"step": 1950
},
{
"epoch": 11.74698795180723,
"eval_loss": 3.6030540466308594,
"eval_runtime": 3.8181,
"eval_samples_per_second": 9.691,
"eval_steps_per_second": 9.691,
"step": 1950
},
{
"epoch": 11.80722891566265,
"grad_norm": 14.012041091918945,
"learning_rate": 8.558228037694728e-05,
"loss": 3.035,
"step": 1960
},
{
"epoch": 11.867469879518072,
"grad_norm": 8.956927299499512,
"learning_rate": 8.520942412679447e-05,
"loss": 3.3299,
"step": 1970
},
{
"epoch": 11.927710843373493,
"grad_norm": 8.330857276916504,
"learning_rate": 8.483563438116257e-05,
"loss": 3.1257,
"step": 1980
},
{
"epoch": 11.987951807228916,
"grad_norm": 10.927474021911621,
"learning_rate": 8.446092650508393e-05,
"loss": 4.1444,
"step": 1990
},
{
"epoch": 12.048192771084338,
"grad_norm": 10.727124214172363,
"learning_rate": 8.408531590133172e-05,
"loss": 3.3155,
"step": 2000
},
{
"epoch": 12.108433734939759,
"grad_norm": 10.256277084350586,
"learning_rate": 8.370881800978673e-05,
"loss": 3.0151,
"step": 2010
},
{
"epoch": 12.168674698795181,
"grad_norm": 61.829872131347656,
"learning_rate": 8.333144830680262e-05,
"loss": 3.1291,
"step": 2020
},
{
"epoch": 12.228915662650602,
"grad_norm": 17.653553009033203,
"learning_rate": 8.29532223045698e-05,
"loss": 3.2804,
"step": 2030
},
{
"epoch": 12.289156626506024,
"grad_norm": 13.582658767700195,
"learning_rate": 8.257415555047785e-05,
"loss": 3.2628,
"step": 2040
},
{
"epoch": 12.349397590361447,
"grad_norm": 9.667671203613281,
"learning_rate": 8.21942636264763e-05,
"loss": 3.4144,
"step": 2050
},
{
"epoch": 12.409638554216867,
"grad_norm": 3135.18798828125,
"learning_rate": 8.181356214843422e-05,
"loss": 3.3164,
"step": 2060
},
{
"epoch": 12.46987951807229,
"grad_norm": 17.966447830200195,
"learning_rate": 8.143206676549826e-05,
"loss": 3.2951,
"step": 2070
},
{
"epoch": 12.53012048192771,
"grad_norm": 10.217341423034668,
"learning_rate": 8.10497931594494e-05,
"loss": 3.2733,
"step": 2080
},
{
"epoch": 12.590361445783133,
"grad_norm": 16.07942771911621,
"learning_rate": 8.066675704405836e-05,
"loss": 3.311,
"step": 2090
},
{
"epoch": 12.650602409638553,
"grad_norm": 771.7515869140625,
"learning_rate": 8.028297416443952e-05,
"loss": 3.3552,
"step": 2100
},
{
"epoch": 12.650602409638553,
"eval_loss": 3.570127010345459,
"eval_runtime": 3.7827,
"eval_samples_per_second": 9.781,
"eval_steps_per_second": 9.781,
"step": 2100
},
{
"epoch": 12.710843373493976,
"grad_norm": 14.326518058776855,
"learning_rate": 7.989846029640397e-05,
"loss": 3.6309,
"step": 2110
},
{
"epoch": 12.771084337349398,
"grad_norm": 19.646242141723633,
"learning_rate": 7.951323124581069e-05,
"loss": 3.0986,
"step": 2120
},
{
"epoch": 12.831325301204819,
"grad_norm": 49.34778594970703,
"learning_rate": 7.91273028479172e-05,
"loss": 3.486,
"step": 2130
},
{
"epoch": 12.891566265060241,
"grad_norm": 21.21525001525879,
"learning_rate": 7.874069096672831e-05,
"loss": 3.5709,
"step": 2140
},
{
"epoch": 12.951807228915662,
"grad_norm": 12.75536060333252,
"learning_rate": 7.83534114943442e-05,
"loss": 3.5097,
"step": 2150
},
{
"epoch": 13.012048192771084,
"grad_norm": 11.8561372756958,
"learning_rate": 7.796548035030715e-05,
"loss": 3.7147,
"step": 2160
},
{
"epoch": 13.072289156626505,
"grad_norm": 7.640058994293213,
"learning_rate": 7.757691348094703e-05,
"loss": 3.2838,
"step": 2170
},
{
"epoch": 13.132530120481928,
"grad_norm": 1143.57177734375,
"learning_rate": 7.718772685872595e-05,
"loss": 3.5913,
"step": 2180
},
{
"epoch": 13.19277108433735,
"grad_norm": 96.58565521240234,
"learning_rate": 7.679793648158159e-05,
"loss": 3.0851,
"step": 2190
},
{
"epoch": 13.25301204819277,
"grad_norm": 35.802677154541016,
"learning_rate": 7.640755837226965e-05,
"loss": 3.1956,
"step": 2200
},
{
"epoch": 13.313253012048193,
"grad_norm": 665.9241333007812,
"learning_rate": 7.601660857770522e-05,
"loss": 3.1286,
"step": 2210
},
{
"epoch": 13.373493975903614,
"grad_norm": 40.26634979248047,
"learning_rate": 7.562510316830308e-05,
"loss": 3.2252,
"step": 2220
},
{
"epoch": 13.433734939759036,
"grad_norm": 34.76996994018555,
"learning_rate": 7.523305823731723e-05,
"loss": 3.152,
"step": 2230
},
{
"epoch": 13.493975903614459,
"grad_norm": 308.2239990234375,
"learning_rate": 7.484048990017919e-05,
"loss": 3.2297,
"step": 2240
},
{
"epoch": 13.55421686746988,
"grad_norm": 39.217891693115234,
"learning_rate": 7.444741429383578e-05,
"loss": 3.4528,
"step": 2250
},
{
"epoch": 13.55421686746988,
"eval_loss": 3.5550057888031006,
"eval_runtime": 3.8001,
"eval_samples_per_second": 9.737,
"eval_steps_per_second": 9.737,
"step": 2250
},
{
"epoch": 13.614457831325302,
"grad_norm": 11.670994758605957,
"learning_rate": 7.405384757608555e-05,
"loss": 3.4062,
"step": 2260
},
{
"epoch": 13.674698795180722,
"grad_norm": 15.571825981140137,
"learning_rate": 7.36598059249148e-05,
"loss": 3.3256,
"step": 2270
},
{
"epoch": 13.734939759036145,
"grad_norm": 21.08329963684082,
"learning_rate": 7.326530553783243e-05,
"loss": 3.4009,
"step": 2280
},
{
"epoch": 13.795180722891565,
"grad_norm": 957.3087768554688,
"learning_rate": 7.287036263120425e-05,
"loss": 3.3748,
"step": 2290
},
{
"epoch": 13.855421686746988,
"grad_norm": 20.058975219726562,
"learning_rate": 7.247499343958621e-05,
"loss": 3.5959,
"step": 2300
},
{
"epoch": 13.91566265060241,
"grad_norm": 8.038925170898438,
"learning_rate": 7.207921421505724e-05,
"loss": 3.4845,
"step": 2310
},
{
"epoch": 13.975903614457831,
"grad_norm": 12.520662307739258,
"learning_rate": 7.168304122655113e-05,
"loss": 3.0425,
"step": 2320
},
{
"epoch": 14.036144578313253,
"grad_norm": 15.789307594299316,
"learning_rate": 7.128649075918768e-05,
"loss": 3.3566,
"step": 2330
},
{
"epoch": 14.096385542168674,
"grad_norm": 14.215753555297852,
"learning_rate": 7.088957911360347e-05,
"loss": 3.3938,
"step": 2340
},
{
"epoch": 14.156626506024097,
"grad_norm": 51.611045837402344,
"learning_rate": 7.049232260528163e-05,
"loss": 3.3809,
"step": 2350
},
{
"epoch": 14.216867469879517,
"grad_norm": 202.7542266845703,
"learning_rate": 7.009473756388128e-05,
"loss": 3.3729,
"step": 2360
},
{
"epoch": 14.27710843373494,
"grad_norm": 10.259781837463379,
"learning_rate": 6.969684033256622e-05,
"loss": 4.1525,
"step": 2370
},
{
"epoch": 14.337349397590362,
"grad_norm": 23.243528366088867,
"learning_rate": 6.92986472673332e-05,
"loss": 3.5824,
"step": 2380
},
{
"epoch": 14.397590361445783,
"grad_norm": 9.191352844238281,
"learning_rate": 6.890017473633946e-05,
"loss": 3.3591,
"step": 2390
},
{
"epoch": 14.457831325301205,
"grad_norm": 16.033109664916992,
"learning_rate": 6.850143911923011e-05,
"loss": 3.6075,
"step": 2400
},
{
"epoch": 14.457831325301205,
"eval_loss": 3.5646708011627197,
"eval_runtime": 3.7918,
"eval_samples_per_second": 9.758,
"eval_steps_per_second": 9.758,
"step": 2400
},
{
"epoch": 14.518072289156626,
"grad_norm": 10.289210319519043,
"learning_rate": 6.81024568064646e-05,
"loss": 3.2997,
"step": 2410
},
{
"epoch": 14.578313253012048,
"grad_norm": 10.703330039978027,
"learning_rate": 6.770324419864309e-05,
"loss": 3.1972,
"step": 2420
},
{
"epoch": 14.638554216867469,
"grad_norm": 277.9293212890625,
"learning_rate": 6.73038177058323e-05,
"loss": 3.6841,
"step": 2430
},
{
"epoch": 14.698795180722891,
"grad_norm": 108.26596069335938,
"learning_rate": 6.690419374689087e-05,
"loss": 3.3921,
"step": 2440
},
{
"epoch": 14.759036144578314,
"grad_norm": 201.3561248779297,
"learning_rate": 6.650438874879456e-05,
"loss": 3.4155,
"step": 2450
},
{
"epoch": 14.819277108433734,
"grad_norm": 68.6462631225586,
"learning_rate": 6.61044191459609e-05,
"loss": 3.2985,
"step": 2460
},
{
"epoch": 14.879518072289157,
"grad_norm": 68.2948989868164,
"learning_rate": 6.57043013795737e-05,
"loss": 3.0743,
"step": 2470
},
{
"epoch": 14.939759036144578,
"grad_norm": 47.222633361816406,
"learning_rate": 6.530405189690719e-05,
"loss": 3.1893,
"step": 2480
},
{
"epoch": 15.0,
"grad_norm": 15.256728172302246,
"learning_rate": 6.49036871506499e-05,
"loss": 3.3537,
"step": 2490
},
{
"epoch": 15.060240963855422,
"grad_norm": 18.81123161315918,
"learning_rate": 6.450322359822846e-05,
"loss": 3.3208,
"step": 2500
},
{
"epoch": 15.120481927710843,
"grad_norm": 11.176192283630371,
"learning_rate": 6.410267770113098e-05,
"loss": 3.1341,
"step": 2510
},
{
"epoch": 15.180722891566266,
"grad_norm": 39.81761932373047,
"learning_rate": 6.370206592423045e-05,
"loss": 2.8517,
"step": 2520
},
{
"epoch": 15.240963855421686,
"grad_norm": 11.802648544311523,
"learning_rate": 6.330140473510796e-05,
"loss": 3.1888,
"step": 2530
},
{
"epoch": 15.301204819277109,
"grad_norm": 51.57283401489258,
"learning_rate": 6.29007106033757e-05,
"loss": 3.4263,
"step": 2540
},
{
"epoch": 15.36144578313253,
"grad_norm": 27.341453552246094,
"learning_rate": 6.25e-05,
"loss": 3.5018,
"step": 2550
},
{
"epoch": 15.36144578313253,
"eval_loss": 3.581845283508301,
"eval_runtime": 3.7845,
"eval_samples_per_second": 9.777,
"eval_steps_per_second": 9.777,
"step": 2550
},
{
"epoch": 15.421686746987952,
"grad_norm": 34.628868103027344,
"learning_rate": 6.20992893966243e-05,
"loss": 3.4468,
"step": 2560
},
{
"epoch": 15.481927710843374,
"grad_norm": 51.63805389404297,
"learning_rate": 6.169859526489204e-05,
"loss": 3.2905,
"step": 2570
},
{
"epoch": 15.542168674698795,
"grad_norm": 1207.8072509765625,
"learning_rate": 6.129793407576955e-05,
"loss": 3.7135,
"step": 2580
},
{
"epoch": 15.602409638554217,
"grad_norm": 88.09565734863281,
"learning_rate": 6.089732229886904e-05,
"loss": 3.9768,
"step": 2590
},
{
"epoch": 15.662650602409638,
"grad_norm": 43.92035675048828,
"learning_rate": 6.049677640177155e-05,
"loss": 3.7418,
"step": 2600
},
{
"epoch": 15.72289156626506,
"grad_norm": 158.6419219970703,
"learning_rate": 6.00963128493501e-05,
"loss": 3.327,
"step": 2610
},
{
"epoch": 15.783132530120483,
"grad_norm": 271.9347229003906,
"learning_rate": 5.969594810309284e-05,
"loss": 3.4304,
"step": 2620
},
{
"epoch": 15.843373493975903,
"grad_norm": 83.00497436523438,
"learning_rate": 5.929569862042631e-05,
"loss": 3.822,
"step": 2630
},
{
"epoch": 15.903614457831326,
"grad_norm": 201.45443725585938,
"learning_rate": 5.889558085403911e-05,
"loss": 3.6166,
"step": 2640
},
{
"epoch": 15.963855421686747,
"grad_norm": 378.865478515625,
"learning_rate": 5.849561125120545e-05,
"loss": 3.4505,
"step": 2650
},
{
"epoch": 16.02409638554217,
"grad_norm": 24.07649040222168,
"learning_rate": 5.809580625310912e-05,
"loss": 3.3913,
"step": 2660
},
{
"epoch": 16.08433734939759,
"grad_norm": 261.75494384765625,
"learning_rate": 5.769618229416773e-05,
"loss": 3.1857,
"step": 2670
},
{
"epoch": 16.14457831325301,
"grad_norm": 362.3284912109375,
"learning_rate": 5.7296755801356926e-05,
"loss": 3.448,
"step": 2680
},
{
"epoch": 16.204819277108435,
"grad_norm": 675.8724975585938,
"learning_rate": 5.6897543193535414e-05,
"loss": 3.5349,
"step": 2690
},
{
"epoch": 16.265060240963855,
"grad_norm": 88.61731719970703,
"learning_rate": 5.649856088076989e-05,
"loss": 3.8733,
"step": 2700
},
{
"epoch": 16.265060240963855,
"eval_loss": 3.6278464794158936,
"eval_runtime": 3.8328,
"eval_samples_per_second": 9.653,
"eval_steps_per_second": 9.653,
"step": 2700
},
{
"epoch": 16.325301204819276,
"grad_norm": 96.55177307128906,
"learning_rate": 5.609982526366054e-05,
"loss": 3.2823,
"step": 2710
},
{
"epoch": 16.3855421686747,
"grad_norm": 44.386287689208984,
"learning_rate": 5.570135273266683e-05,
"loss": 3.2381,
"step": 2720
},
{
"epoch": 16.44578313253012,
"grad_norm": 57.28308868408203,
"learning_rate": 5.53031596674338e-05,
"loss": 3.1898,
"step": 2730
},
{
"epoch": 16.50602409638554,
"grad_norm": 5471.37109375,
"learning_rate": 5.490526243611873e-05,
"loss": 3.1008,
"step": 2740
},
{
"epoch": 16.566265060240966,
"grad_norm": 73.38491821289062,
"learning_rate": 5.450767739471837e-05,
"loss": 3.4751,
"step": 2750
},
{
"epoch": 16.626506024096386,
"grad_norm": 94.40169525146484,
"learning_rate": 5.411042088639655e-05,
"loss": 3.2401,
"step": 2760
},
{
"epoch": 16.686746987951807,
"grad_norm": 31.844371795654297,
"learning_rate": 5.371350924081234e-05,
"loss": 3.5455,
"step": 2770
},
{
"epoch": 16.746987951807228,
"grad_norm": 58.31970977783203,
"learning_rate": 5.331695877344888e-05,
"loss": 3.489,
"step": 2780
},
{
"epoch": 16.80722891566265,
"grad_norm": 47.033939361572266,
"learning_rate": 5.292078578494275e-05,
"loss": 3.007,
"step": 2790
},
{
"epoch": 16.867469879518072,
"grad_norm": 117.59281921386719,
"learning_rate": 5.2525006560413816e-05,
"loss": 3.6114,
"step": 2800
},
{
"epoch": 16.927710843373493,
"grad_norm": 160.5786590576172,
"learning_rate": 5.212963736879578e-05,
"loss": 3.3447,
"step": 2810
},
{
"epoch": 16.987951807228917,
"grad_norm": 375.9039611816406,
"learning_rate": 5.173469446216757e-05,
"loss": 3.5652,
"step": 2820
},
{
"epoch": 17.048192771084338,
"grad_norm": 368.878662109375,
"learning_rate": 5.134019407508521e-05,
"loss": 3.4137,
"step": 2830
},
{
"epoch": 17.10843373493976,
"grad_norm": 20.2479305267334,
"learning_rate": 5.0946152423914456e-05,
"loss": 3.4078,
"step": 2840
},
{
"epoch": 17.16867469879518,
"grad_norm": 300.1862487792969,
"learning_rate": 5.0552585706164246e-05,
"loss": 3.1655,
"step": 2850
},
{
"epoch": 17.16867469879518,
"eval_loss": 3.585510730743408,
"eval_runtime": 3.839,
"eval_samples_per_second": 9.638,
"eval_steps_per_second": 9.638,
"step": 2850
},
{
"epoch": 17.228915662650603,
"grad_norm": 170.2447052001953,
"learning_rate": 5.015951009982081e-05,
"loss": 3.5166,
"step": 2860
},
{
"epoch": 17.289156626506024,
"grad_norm": 696.30712890625,
"learning_rate": 4.976694176268278e-05,
"loss": 3.5968,
"step": 2870
},
{
"epoch": 17.349397590361445,
"grad_norm": 68.88721466064453,
"learning_rate": 4.937489683169692e-05,
"loss": 3.0614,
"step": 2880
},
{
"epoch": 17.40963855421687,
"grad_norm": 43.65079116821289,
"learning_rate": 4.8983391422294786e-05,
"loss": 3.2525,
"step": 2890
},
{
"epoch": 17.46987951807229,
"grad_norm": 19.629438400268555,
"learning_rate": 4.8592441627730355e-05,
"loss": 3.1851,
"step": 2900
},
{
"epoch": 17.53012048192771,
"grad_norm": 176.78993225097656,
"learning_rate": 4.820206351841842e-05,
"loss": 3.1973,
"step": 2910
},
{
"epoch": 17.59036144578313,
"grad_norm": 122.77618408203125,
"learning_rate": 4.781227314127405e-05,
"loss": 3.1958,
"step": 2920
},
{
"epoch": 17.650602409638555,
"grad_norm": 128.48361206054688,
"learning_rate": 4.7423086519052966e-05,
"loss": 3.2149,
"step": 2930
},
{
"epoch": 17.710843373493976,
"grad_norm": 757.3467407226562,
"learning_rate": 4.703451964969287e-05,
"loss": 3.136,
"step": 2940
},
{
"epoch": 17.771084337349397,
"grad_norm": 31.354238510131836,
"learning_rate": 4.66465885056558e-05,
"loss": 3.4783,
"step": 2950
},
{
"epoch": 17.83132530120482,
"grad_norm": 38.50337600708008,
"learning_rate": 4.62593090332717e-05,
"loss": 3.7484,
"step": 2960
},
{
"epoch": 17.89156626506024,
"grad_norm": 25.400075912475586,
"learning_rate": 4.587269715208281e-05,
"loss": 3.228,
"step": 2970
},
{
"epoch": 17.951807228915662,
"grad_norm": 33.36684036254883,
"learning_rate": 4.5486768754189305e-05,
"loss": 3.1605,
"step": 2980
},
{
"epoch": 18.012048192771083,
"grad_norm": 55.69667434692383,
"learning_rate": 4.510153970359606e-05,
"loss": 3.2666,
"step": 2990
},
{
"epoch": 18.072289156626507,
"grad_norm": 34.30318069458008,
"learning_rate": 4.4717025835560476e-05,
"loss": 3.7476,
"step": 3000
},
{
"epoch": 18.072289156626507,
"eval_loss": 3.4825525283813477,
"eval_runtime": 3.828,
"eval_samples_per_second": 9.666,
"eval_steps_per_second": 9.666,
"step": 3000
},
{
"epoch": 18.132530120481928,
"grad_norm": 50.15314865112305,
"learning_rate": 4.433324295594166e-05,
"loss": 3.2661,
"step": 3010
},
{
"epoch": 18.19277108433735,
"grad_norm": 246.28211975097656,
"learning_rate": 4.3950206840550585e-05,
"loss": 3.3131,
"step": 3020
},
{
"epoch": 18.253012048192772,
"grad_norm": 21.07671546936035,
"learning_rate": 4.3567933234501746e-05,
"loss": 3.6945,
"step": 3030
},
{
"epoch": 18.313253012048193,
"grad_norm": 45.0478401184082,
"learning_rate": 4.318643785156579e-05,
"loss": 3.2986,
"step": 3040
},
{
"epoch": 18.373493975903614,
"grad_norm": 26.682987213134766,
"learning_rate": 4.280573637352371e-05,
"loss": 3.2984,
"step": 3050
},
{
"epoch": 18.433734939759034,
"grad_norm": 38.422367095947266,
"learning_rate": 4.242584444952216e-05,
"loss": 3.2741,
"step": 3060
},
{
"epoch": 18.49397590361446,
"grad_norm": 36.31643295288086,
"learning_rate": 4.204677769543019e-05,
"loss": 3.4015,
"step": 3070
},
{
"epoch": 18.55421686746988,
"grad_norm": 103.95494842529297,
"learning_rate": 4.16685516931974e-05,
"loss": 3.4717,
"step": 3080
},
{
"epoch": 18.6144578313253,
"grad_norm": 2644.721435546875,
"learning_rate": 4.1291181990213286e-05,
"loss": 3.1046,
"step": 3090
},
{
"epoch": 18.674698795180724,
"grad_norm": 1700.5968017578125,
"learning_rate": 4.0914684098668286e-05,
"loss": 3.0716,
"step": 3100
},
{
"epoch": 18.734939759036145,
"grad_norm": 34.94669723510742,
"learning_rate": 4.053907349491608e-05,
"loss": 3.1178,
"step": 3110
},
{
"epoch": 18.795180722891565,
"grad_norm": 93.29631042480469,
"learning_rate": 4.016436561883746e-05,
"loss": 3.1197,
"step": 3120
},
{
"epoch": 18.855421686746986,
"grad_norm": 104.23704528808594,
"learning_rate": 3.979057587320554e-05,
"loss": 3.6272,
"step": 3130
},
{
"epoch": 18.91566265060241,
"grad_norm": 27.856496810913086,
"learning_rate": 3.941771962305274e-05,
"loss": 3.1739,
"step": 3140
},
{
"epoch": 18.97590361445783,
"grad_norm": 102.71600341796875,
"learning_rate": 3.9045812195039125e-05,
"loss": 3.2773,
"step": 3150
},
{
"epoch": 18.97590361445783,
"eval_loss": 3.497561454772949,
"eval_runtime": 3.8383,
"eval_samples_per_second": 9.64,
"eval_steps_per_second": 9.64,
"step": 3150
},
{
"epoch": 19.03614457831325,
"grad_norm": 8.53116226196289,
"learning_rate": 3.8674868876822395e-05,
"loss": 2.9576,
"step": 3160
},
{
"epoch": 19.096385542168676,
"grad_norm": 53.600685119628906,
"learning_rate": 3.83049049164295e-05,
"loss": 3.1658,
"step": 3170
},
{
"epoch": 19.156626506024097,
"grad_norm": 42.970123291015625,
"learning_rate": 3.793593552162978e-05,
"loss": 3.075,
"step": 3180
},
{
"epoch": 19.216867469879517,
"grad_norm": 11.737751960754395,
"learning_rate": 3.75679758593099e-05,
"loss": 3.3461,
"step": 3190
},
{
"epoch": 19.27710843373494,
"grad_norm": 36.907981872558594,
"learning_rate": 3.720104105485039e-05,
"loss": 3.2005,
"step": 3200
},
{
"epoch": 19.337349397590362,
"grad_norm": 155.0701141357422,
"learning_rate": 3.6835146191503885e-05,
"loss": 3.1341,
"step": 3210
},
{
"epoch": 19.397590361445783,
"grad_norm": 32.42080307006836,
"learning_rate": 3.647030630977508e-05,
"loss": 3.41,
"step": 3220
},
{
"epoch": 19.457831325301203,
"grad_norm": 61.9431266784668,
"learning_rate": 3.6106536406802524e-05,
"loss": 3.0682,
"step": 3230
},
{
"epoch": 19.518072289156628,
"grad_norm": 218.18533325195312,
"learning_rate": 3.5743851435742176e-05,
"loss": 3.3301,
"step": 3240
},
{
"epoch": 19.57831325301205,
"grad_norm": 34.84703826904297,
"learning_rate": 3.538226630515262e-05,
"loss": 3.1957,
"step": 3250
},
{
"epoch": 19.63855421686747,
"grad_norm": 75.88587188720703,
"learning_rate": 3.502179587838238e-05,
"loss": 3.1191,
"step": 3260
},
{
"epoch": 19.698795180722893,
"grad_norm": 133.42489624023438,
"learning_rate": 3.46624549729588e-05,
"loss": 3.3155,
"step": 3270
},
{
"epoch": 19.759036144578314,
"grad_norm": 13.925497055053711,
"learning_rate": 3.430425835997908e-05,
"loss": 2.8935,
"step": 3280
},
{
"epoch": 19.819277108433734,
"grad_norm": 31.686290740966797,
"learning_rate": 3.394722076350302e-05,
"loss": 3.5147,
"step": 3290
},
{
"epoch": 19.879518072289155,
"grad_norm": 26.684673309326172,
"learning_rate": 3.359135685994781e-05,
"loss": 3.3202,
"step": 3300
},
{
"epoch": 19.879518072289155,
"eval_loss": 3.471327543258667,
"eval_runtime": 3.787,
"eval_samples_per_second": 9.77,
"eval_steps_per_second": 9.77,
"step": 3300
},
{
"epoch": 19.93975903614458,
"grad_norm": 32.881187438964844,
"learning_rate": 3.3236681277484654e-05,
"loss": 2.8533,
"step": 3310
},
{
"epoch": 20.0,
"grad_norm": 11.029044151306152,
"learning_rate": 3.2883208595437584e-05,
"loss": 3.1859,
"step": 3320
},
{
"epoch": 20.06024096385542,
"grad_norm": 341.1537780761719,
"learning_rate": 3.2530953343684136e-05,
"loss": 3.1306,
"step": 3330
},
{
"epoch": 20.120481927710845,
"grad_norm": 20.780797958374023,
"learning_rate": 3.217993000205799e-05,
"loss": 3.2068,
"step": 3340
},
{
"epoch": 20.180722891566266,
"grad_norm": 5419.01123046875,
"learning_rate": 3.1830152999753903e-05,
"loss": 3.1233,
"step": 3350
},
{
"epoch": 20.240963855421686,
"grad_norm": 29.549694061279297,
"learning_rate": 3.148163671473439e-05,
"loss": 3.4924,
"step": 3360
},
{
"epoch": 20.301204819277107,
"grad_norm": 13.60914134979248,
"learning_rate": 3.113439547313892e-05,
"loss": 3.244,
"step": 3370
},
{
"epoch": 20.36144578313253,
"grad_norm": 34.05803680419922,
"learning_rate": 3.0788443548694874e-05,
"loss": 3.0187,
"step": 3380
},
{
"epoch": 20.42168674698795,
"grad_norm": 7.142261028289795,
"learning_rate": 3.0443795162130876e-05,
"loss": 3.1763,
"step": 3390
},
{
"epoch": 20.481927710843372,
"grad_norm": 10.070602416992188,
"learning_rate": 3.0100464480592185e-05,
"loss": 3.2928,
"step": 3400
},
{
"epoch": 20.542168674698797,
"grad_norm": 19.883346557617188,
"learning_rate": 2.9758465617058404e-05,
"loss": 2.9957,
"step": 3410
},
{
"epoch": 20.602409638554217,
"grad_norm": 14.350828170776367,
"learning_rate": 2.9417812629763285e-05,
"loss": 3.2999,
"step": 3420
},
{
"epoch": 20.662650602409638,
"grad_norm": 8.832179069519043,
"learning_rate": 2.9078519521616894e-05,
"loss": 3.115,
"step": 3430
},
{
"epoch": 20.72289156626506,
"grad_norm": 326.385498046875,
"learning_rate": 2.8740600239630002e-05,
"loss": 3.3029,
"step": 3440
},
{
"epoch": 20.783132530120483,
"grad_norm": 13.17289924621582,
"learning_rate": 2.8404068674340714e-05,
"loss": 3.0413,
"step": 3450
},
{
"epoch": 20.783132530120483,
"eval_loss": 3.4084925651550293,
"eval_runtime": 3.8274,
"eval_samples_per_second": 9.667,
"eval_steps_per_second": 9.667,
"step": 3450
},
{
"epoch": 20.843373493975903,
"grad_norm": 9.489799499511719,
"learning_rate": 2.80689386592436e-05,
"loss": 3.1958,
"step": 3460
},
{
"epoch": 20.903614457831324,
"grad_norm": 11.09453296661377,
"learning_rate": 2.7735223970220955e-05,
"loss": 3.0116,
"step": 3470
},
{
"epoch": 20.96385542168675,
"grad_norm": 285.0513916015625,
"learning_rate": 2.7402938324976576e-05,
"loss": 3.1415,
"step": 3480
},
{
"epoch": 21.02409638554217,
"grad_norm": 8.894499778747559,
"learning_rate": 2.70720953824719e-05,
"loss": 2.8106,
"step": 3490
},
{
"epoch": 21.08433734939759,
"grad_norm": 8.07825756072998,
"learning_rate": 2.674270874236441e-05,
"loss": 2.8643,
"step": 3500
},
{
"epoch": 21.14457831325301,
"grad_norm": 9.730963706970215,
"learning_rate": 2.64147919444488e-05,
"loss": 2.9905,
"step": 3510
},
{
"epoch": 21.204819277108435,
"grad_norm": 11.237829208374023,
"learning_rate": 2.6088358468100247e-05,
"loss": 3.6566,
"step": 3520
},
{
"epoch": 21.265060240963855,
"grad_norm": 10.315103530883789,
"learning_rate": 2.5763421731720435e-05,
"loss": 2.9594,
"step": 3530
},
{
"epoch": 21.325301204819276,
"grad_norm": 21.455280303955078,
"learning_rate": 2.5439995092185892e-05,
"loss": 3.1741,
"step": 3540
},
{
"epoch": 21.3855421686747,
"grad_norm": 275.41705322265625,
"learning_rate": 2.5118091844299e-05,
"loss": 3.0803,
"step": 3550
},
{
"epoch": 21.44578313253012,
"grad_norm": 12.709835052490234,
"learning_rate": 2.479772522024147e-05,
"loss": 2.9853,
"step": 3560
},
{
"epoch": 21.50602409638554,
"grad_norm": 27.6678466796875,
"learning_rate": 2.4478908389030427e-05,
"loss": 3.0706,
"step": 3570
},
{
"epoch": 21.566265060240966,
"grad_norm": 13.315126419067383,
"learning_rate": 2.41616544559771e-05,
"loss": 2.9522,
"step": 3580
},
{
"epoch": 21.626506024096386,
"grad_norm": 9.57907485961914,
"learning_rate": 2.3845976462148033e-05,
"loss": 3.0357,
"step": 3590
},
{
"epoch": 21.686746987951807,
"grad_norm": 8.764331817626953,
"learning_rate": 2.3531887383829157e-05,
"loss": 2.9487,
"step": 3600
},
{
"epoch": 21.686746987951807,
"eval_loss": 3.37250018119812,
"eval_runtime": 3.782,
"eval_samples_per_second": 9.783,
"eval_steps_per_second": 9.783,
"step": 3600
},
{
"epoch": 21.746987951807228,
"grad_norm": 11.81778621673584,
"learning_rate": 2.3219400131992273e-05,
"loss": 3.2796,
"step": 3610
},
{
"epoch": 21.80722891566265,
"grad_norm": 9.797389030456543,
"learning_rate": 2.2908527551764404e-05,
"loss": 3.2562,
"step": 3620
},
{
"epoch": 21.867469879518072,
"grad_norm": 10.367379188537598,
"learning_rate": 2.259928242189966e-05,
"loss": 2.996,
"step": 3630
},
{
"epoch": 21.927710843373493,
"grad_norm": 8.591730117797852,
"learning_rate": 2.2291677454254136e-05,
"loss": 3.1695,
"step": 3640
},
{
"epoch": 21.987951807228917,
"grad_norm": 84.82453155517578,
"learning_rate": 2.1985725293263237e-05,
"loss": 3.1198,
"step": 3650
},
{
"epoch": 22.048192771084338,
"grad_norm": 10.37941837310791,
"learning_rate": 2.1681438515421953e-05,
"loss": 3.0494,
"step": 3660
},
{
"epoch": 22.10843373493976,
"grad_norm": 167.1722869873047,
"learning_rate": 2.1378829628767965e-05,
"loss": 3.2033,
"step": 3670
},
{
"epoch": 22.16867469879518,
"grad_norm": 245.55747985839844,
"learning_rate": 2.1077911072367317e-05,
"loss": 3.3328,
"step": 3680
},
{
"epoch": 22.228915662650603,
"grad_norm": 12.856267929077148,
"learning_rate": 2.077869521580325e-05,
"loss": 2.7443,
"step": 3690
},
{
"epoch": 22.289156626506024,
"grad_norm": 8.544069290161133,
"learning_rate": 2.0481194358667695e-05,
"loss": 2.9283,
"step": 3700
},
{
"epoch": 22.349397590361445,
"grad_norm": 11.428658485412598,
"learning_rate": 2.018542073005567e-05,
"loss": 3.1091,
"step": 3710
},
{
"epoch": 22.40963855421687,
"grad_norm": 18.596616744995117,
"learning_rate": 1.9891386488062538e-05,
"loss": 2.944,
"step": 3720
},
{
"epoch": 22.46987951807229,
"grad_norm": 17.378009796142578,
"learning_rate": 1.959910371928436e-05,
"loss": 3.2485,
"step": 3730
},
{
"epoch": 22.53012048192771,
"grad_norm": 7.827014923095703,
"learning_rate": 1.930858443832096e-05,
"loss": 3.0034,
"step": 3740
},
{
"epoch": 22.59036144578313,
"grad_norm": 31.390344619750977,
"learning_rate": 1.90198405872821e-05,
"loss": 3.1377,
"step": 3750
},
{
"epoch": 22.59036144578313,
"eval_loss": 3.3621747493743896,
"eval_runtime": 3.7897,
"eval_samples_per_second": 9.763,
"eval_steps_per_second": 9.763,
"step": 3750
},
{
"epoch": 22.650602409638555,
"grad_norm": 19.31273651123047,
"learning_rate": 1.8732884035296582e-05,
"loss": 3.1508,
"step": 3760
},
{
"epoch": 22.710843373493976,
"grad_norm": 9.601163864135742,
"learning_rate": 1.844772657802428e-05,
"loss": 2.8004,
"step": 3770
},
{
"epoch": 22.771084337349397,
"grad_norm": 27.831907272338867,
"learning_rate": 1.8164379937171382e-05,
"loss": 2.8072,
"step": 3780
},
{
"epoch": 22.83132530120482,
"grad_norm": 15.658758163452148,
"learning_rate": 1.7882855760008547e-05,
"loss": 3.1902,
"step": 3790
},
{
"epoch": 22.89156626506024,
"grad_norm": 22.477460861206055,
"learning_rate": 1.760316561889203e-05,
"loss": 3.3437,
"step": 3800
},
{
"epoch": 22.951807228915662,
"grad_norm": 29.21653175354004,
"learning_rate": 1.7325321010788034e-05,
"loss": 3.0924,
"step": 3810
},
{
"epoch": 23.012048192771083,
"grad_norm": 19.53176498413086,
"learning_rate": 1.7049333356800167e-05,
"loss": 3.0771,
"step": 3820
},
{
"epoch": 23.072289156626507,
"grad_norm": 19.212852478027344,
"learning_rate": 1.6775214001699914e-05,
"loss": 2.7683,
"step": 3830
},
{
"epoch": 23.132530120481928,
"grad_norm": 34.5575065612793,
"learning_rate": 1.6502974213460316e-05,
"loss": 3.0162,
"step": 3840
},
{
"epoch": 23.19277108433735,
"grad_norm": 13.085418701171875,
"learning_rate": 1.623262518279279e-05,
"loss": 3.1395,
"step": 3850
},
{
"epoch": 23.253012048192772,
"grad_norm": 10.661376953125,
"learning_rate": 1.596417802268707e-05,
"loss": 3.0344,
"step": 3860
},
{
"epoch": 23.313253012048193,
"grad_norm": 15.066116333007812,
"learning_rate": 1.5697643767954488e-05,
"loss": 3.0326,
"step": 3870
},
{
"epoch": 23.373493975903614,
"grad_norm": 21.323190689086914,
"learning_rate": 1.543303337477432e-05,
"loss": 3.0845,
"step": 3880
},
{
"epoch": 23.433734939759034,
"grad_norm": 17.696609497070312,
"learning_rate": 1.517035772024343e-05,
"loss": 2.9169,
"step": 3890
},
{
"epoch": 23.49397590361446,
"grad_norm": 121.86737060546875,
"learning_rate": 1.49096276019291e-05,
"loss": 3.411,
"step": 3900
},
{
"epoch": 23.49397590361446,
"eval_loss": 3.3336081504821777,
"eval_runtime": 3.8199,
"eval_samples_per_second": 9.686,
"eval_steps_per_second": 9.686,
"step": 3900
},
{
"epoch": 23.55421686746988,
"grad_norm": 25.903724670410156,
"learning_rate": 1.4650853737425327e-05,
"loss": 2.6079,
"step": 3910
},
{
"epoch": 23.6144578313253,
"grad_norm": 303.8630065917969,
"learning_rate": 1.4394046763912122e-05,
"loss": 3.0013,
"step": 3920
},
{
"epoch": 23.674698795180724,
"grad_norm": 96.3546371459961,
"learning_rate": 1.413921723771832e-05,
"loss": 3.2609,
"step": 3930
},
{
"epoch": 23.734939759036145,
"grad_norm": 51.00669479370117,
"learning_rate": 1.3886375633887665e-05,
"loss": 3.2636,
"step": 3940
},
{
"epoch": 23.795180722891565,
"grad_norm": 21.779207229614258,
"learning_rate": 1.3635532345748137e-05,
"loss": 3.1889,
"step": 3950
},
{
"epoch": 23.855421686746986,
"grad_norm": 483.495361328125,
"learning_rate": 1.3386697684484853e-05,
"loss": 2.8758,
"step": 3960
},
{
"epoch": 23.91566265060241,
"grad_norm": 38.69334411621094,
"learning_rate": 1.3139881878716107e-05,
"loss": 2.7819,
"step": 3970
},
{
"epoch": 23.97590361445783,
"grad_norm": 40.966644287109375,
"learning_rate": 1.2895095074072986e-05,
"loss": 2.9545,
"step": 3980
},
{
"epoch": 24.03614457831325,
"grad_norm": 15.748128890991211,
"learning_rate": 1.2652347332782227e-05,
"loss": 2.9577,
"step": 3990
},
{
"epoch": 24.096385542168676,
"grad_norm": 89.11782836914062,
"learning_rate": 1.2411648633252719e-05,
"loss": 3.2177,
"step": 4000
},
{
"epoch": 24.156626506024097,
"grad_norm": 21.81198501586914,
"learning_rate": 1.2173008869665241e-05,
"loss": 2.9163,
"step": 4010
},
{
"epoch": 24.216867469879517,
"grad_norm": 11.634493827819824,
"learning_rate": 1.1936437851565791e-05,
"loss": 2.9567,
"step": 4020
},
{
"epoch": 24.27710843373494,
"grad_norm": 13.151474952697754,
"learning_rate": 1.1701945303462337e-05,
"loss": 2.9042,
"step": 4030
},
{
"epoch": 24.337349397590362,
"grad_norm": 9.143220901489258,
"learning_rate": 1.146954086442508e-05,
"loss": 3.1724,
"step": 4040
},
{
"epoch": 24.397590361445783,
"grad_norm": 10.673160552978516,
"learning_rate": 1.1239234087690252e-05,
"loss": 3.0989,
"step": 4050
},
{
"epoch": 24.397590361445783,
"eval_loss": 3.318880081176758,
"eval_runtime": 3.7938,
"eval_samples_per_second": 9.753,
"eval_steps_per_second": 9.753,
"step": 4050
},
{
"epoch": 24.457831325301203,
"grad_norm": 22.973133087158203,
"learning_rate": 1.1011034440267395e-05,
"loss": 3.1335,
"step": 4060
},
{
"epoch": 24.518072289156628,
"grad_norm": 57.02553939819336,
"learning_rate": 1.078495130255023e-05,
"loss": 2.6128,
"step": 4070
},
{
"epoch": 24.57831325301205,
"grad_norm": 109.61993408203125,
"learning_rate": 1.0560993967931004e-05,
"loss": 3.1313,
"step": 4080
},
{
"epoch": 24.63855421686747,
"grad_norm": 10.217292785644531,
"learning_rate": 1.0339171642418585e-05,
"loss": 3.0932,
"step": 4090
},
{
"epoch": 24.698795180722893,
"grad_norm": 39.34014892578125,
"learning_rate": 1.0119493444259963e-05,
"loss": 3.0317,
"step": 4100
},
{
"epoch": 24.759036144578314,
"grad_norm": 27.805980682373047,
"learning_rate": 9.901968403565428e-06,
"loss": 2.6487,
"step": 4110
},
{
"epoch": 24.819277108433734,
"grad_norm": 13.394316673278809,
"learning_rate": 9.686605461937441e-06,
"loss": 2.9028,
"step": 4120
},
{
"epoch": 24.879518072289155,
"grad_norm": 9.390585899353027,
"learning_rate": 9.473413472102982e-06,
"loss": 3.1307,
"step": 4130
},
{
"epoch": 24.93975903614458,
"grad_norm": 50.32940673828125,
"learning_rate": 9.262401197549744e-06,
"loss": 2.7928,
"step": 4140
},
{
"epoch": 25.0,
"grad_norm": 24.728273391723633,
"learning_rate": 9.05357731216587e-06,
"loss": 3.0874,
"step": 4150
},
{
"epoch": 25.06024096385542,
"grad_norm": 17.07667350769043,
"learning_rate": 8.846950399883368e-06,
"loss": 3.1649,
"step": 4160
},
{
"epoch": 25.120481927710845,
"grad_norm": 22.225461959838867,
"learning_rate": 8.64252895432531e-06,
"loss": 2.9153,
"step": 4170
},
{
"epoch": 25.180722891566266,
"grad_norm": 9.772006034851074,
"learning_rate": 8.440321378456656e-06,
"loss": 2.7644,
"step": 4180
},
{
"epoch": 25.240963855421686,
"grad_norm": 12.998729705810547,
"learning_rate": 8.240335984238844e-06,
"loss": 2.8009,
"step": 4190
},
{
"epoch": 25.301204819277107,
"grad_norm": 11.086814880371094,
"learning_rate": 8.042580992288163e-06,
"loss": 2.9233,
"step": 4200
},
{
"epoch": 25.301204819277107,
"eval_loss": 3.316469192504883,
"eval_runtime": 3.8267,
"eval_samples_per_second": 9.669,
"eval_steps_per_second": 9.669,
"step": 4200
},
{
"epoch": 25.36144578313253,
"grad_norm": 22.682485580444336,
"learning_rate": 7.847064531537774e-06,
"loss": 2.6035,
"step": 4210
},
{
"epoch": 25.42168674698795,
"grad_norm": 42.95654296875,
"learning_rate": 7.653794638903574e-06,
"loss": 3.1161,
"step": 4220
},
{
"epoch": 25.481927710843372,
"grad_norm": 55.98487091064453,
"learning_rate": 7.462779258953875e-06,
"loss": 2.7615,
"step": 4230
},
{
"epoch": 25.542168674698797,
"grad_norm": 10.97606372833252,
"learning_rate": 7.274026243582796e-06,
"loss": 3.0223,
"step": 4240
},
{
"epoch": 25.602409638554217,
"grad_norm": 37.118247985839844,
"learning_rate": 7.087543351687493e-06,
"loss": 2.9984,
"step": 4250
},
{
"epoch": 25.662650602409638,
"grad_norm": 20.084211349487305,
"learning_rate": 6.903338248849269e-06,
"loss": 3.1486,
"step": 4260
},
{
"epoch": 25.72289156626506,
"grad_norm": 14.11424446105957,
"learning_rate": 6.7214185070183925e-06,
"loss": 2.8441,
"step": 4270
},
{
"epoch": 25.783132530120483,
"grad_norm": 9.799378395080566,
"learning_rate": 6.541791604202936e-06,
"loss": 3.033,
"step": 4280
},
{
"epoch": 25.843373493975903,
"grad_norm": 34.30189514160156,
"learning_rate": 6.364464924161311e-06,
"loss": 3.1671,
"step": 4290
},
{
"epoch": 25.903614457831324,
"grad_norm": 213.951904296875,
"learning_rate": 6.1894457560988106e-06,
"loss": 2.9741,
"step": 4300
},
{
"epoch": 25.96385542168675,
"grad_norm": 186.6619415283203,
"learning_rate": 6.016741294367911e-06,
"loss": 3.1161,
"step": 4310
},
{
"epoch": 26.02409638554217,
"grad_norm": 12.259818077087402,
"learning_rate": 5.846358638172615e-06,
"loss": 2.8821,
"step": 4320
},
{
"epoch": 26.08433734939759,
"grad_norm": 11.736944198608398,
"learning_rate": 5.678304791276567e-06,
"loss": 2.7778,
"step": 4330
},
{
"epoch": 26.14457831325301,
"grad_norm": 59.023399353027344,
"learning_rate": 5.51258666171519e-06,
"loss": 3.1458,
"step": 4340
},
{
"epoch": 26.204819277108435,
"grad_norm": 11.082910537719727,
"learning_rate": 5.349211061511726e-06,
"loss": 2.8066,
"step": 4350
},
{
"epoch": 26.204819277108435,
"eval_loss": 3.3215532302856445,
"eval_runtime": 3.834,
"eval_samples_per_second": 9.65,
"eval_steps_per_second": 9.65,
"step": 4350
},
{
"epoch": 26.265060240963855,
"grad_norm": 9.732200622558594,
"learning_rate": 5.188184706397182e-06,
"loss": 3.1099,
"step": 4360
},
{
"epoch": 26.325301204819276,
"grad_norm": 52.13785171508789,
"learning_rate": 5.029514215534339e-06,
"loss": 2.9703,
"step": 4370
},
{
"epoch": 26.3855421686747,
"grad_norm": 19.796157836914062,
"learning_rate": 4.873206111245594e-06,
"loss": 3.1718,
"step": 4380
},
{
"epoch": 26.44578313253012,
"grad_norm": 224.9999237060547,
"learning_rate": 4.719266818744912e-06,
"loss": 3.0249,
"step": 4390
},
{
"epoch": 26.50602409638554,
"grad_norm": 9.841102600097656,
"learning_rate": 4.567702665873648e-06,
"loss": 3.0305,
"step": 4400
},
{
"epoch": 26.566265060240966,
"grad_norm": 14.756407737731934,
"learning_rate": 4.418519882840505e-06,
"loss": 2.7736,
"step": 4410
},
{
"epoch": 26.626506024096386,
"grad_norm": 25.35453987121582,
"learning_rate": 4.271724601965371e-06,
"loss": 2.5669,
"step": 4420
},
{
"epoch": 26.686746987951807,
"grad_norm": 272.5387268066406,
"learning_rate": 4.127322857427306e-06,
"loss": 3.1571,
"step": 4430
},
{
"epoch": 26.746987951807228,
"grad_norm": 46.450382232666016,
"learning_rate": 3.985320585016425e-06,
"loss": 2.7388,
"step": 4440
},
{
"epoch": 26.80722891566265,
"grad_norm": 20.076871871948242,
"learning_rate": 3.845723621889973e-06,
"loss": 3.16,
"step": 4450
},
{
"epoch": 26.867469879518072,
"grad_norm": 13.591471672058105,
"learning_rate": 3.7085377063323447e-06,
"loss": 2.9016,
"step": 4460
},
{
"epoch": 26.927710843373493,
"grad_norm": 18.94786834716797,
"learning_rate": 3.5737684775191887e-06,
"loss": 2.7776,
"step": 4470
},
{
"epoch": 26.987951807228917,
"grad_norm": 22.884519577026367,
"learning_rate": 3.441421475285679e-06,
"loss": 3.041,
"step": 4480
},
{
"epoch": 27.048192771084338,
"grad_norm": 11.475288391113281,
"learning_rate": 3.3115021398986768e-06,
"loss": 3.3,
"step": 4490
},
{
"epoch": 27.10843373493976,
"grad_norm": 95.50624084472656,
"learning_rate": 3.18401581183321e-06,
"loss": 2.9552,
"step": 4500
},
{
"epoch": 27.10843373493976,
"eval_loss": 3.3192508220672607,
"eval_runtime": 3.8297,
"eval_samples_per_second": 9.661,
"eval_steps_per_second": 9.661,
"step": 4500
},
{
"epoch": 27.16867469879518,
"grad_norm": 312.4958190917969,
"learning_rate": 3.0589677315529044e-06,
"loss": 2.4796,
"step": 4510
},
{
"epoch": 27.228915662650603,
"grad_norm": 35.864219665527344,
"learning_rate": 2.9363630392945513e-06,
"loss": 3.2925,
"step": 4520
},
{
"epoch": 27.289156626506024,
"grad_norm": 28.503623962402344,
"learning_rate": 2.816206774856854e-06,
"loss": 2.9739,
"step": 4530
},
{
"epoch": 27.349397590361445,
"grad_norm": 104.54164123535156,
"learning_rate": 2.6985038773932046e-06,
"loss": 3.0139,
"step": 4540
},
{
"epoch": 27.40963855421687,
"grad_norm": 28.280534744262695,
"learning_rate": 2.583259185208714e-06,
"loss": 2.952,
"step": 4550
},
{
"epoch": 27.46987951807229,
"grad_norm": 18.73903465270996,
"learning_rate": 2.4704774355612943e-06,
"loss": 3.0347,
"step": 4560
},
{
"epoch": 27.53012048192771,
"grad_norm": 32.26377868652344,
"learning_rate": 2.3601632644669536e-06,
"loss": 2.8445,
"step": 4570
},
{
"epoch": 27.59036144578313,
"grad_norm": 8.18028736114502,
"learning_rate": 2.2523212065091723e-06,
"loss": 2.8002,
"step": 4580
},
{
"epoch": 27.650602409638555,
"grad_norm": 31.256370544433594,
"learning_rate": 2.1469556946525706e-06,
"loss": 3.0538,
"step": 4590
},
{
"epoch": 27.710843373493976,
"grad_norm": 13.404227256774902,
"learning_rate": 2.0440710600606595e-06,
"loss": 2.8497,
"step": 4600
},
{
"epoch": 27.771084337349397,
"grad_norm": 48.90232849121094,
"learning_rate": 1.9436715319177956e-06,
"loss": 2.9533,
"step": 4610
},
{
"epoch": 27.83132530120482,
"grad_norm": 101.96350860595703,
"learning_rate": 1.8457612372553348e-06,
"loss": 2.9476,
"step": 4620
},
{
"epoch": 27.89156626506024,
"grad_norm": 73.58414459228516,
"learning_rate": 1.75034420078201e-06,
"loss": 3.0937,
"step": 4630
},
{
"epoch": 27.951807228915662,
"grad_norm": 28.492570877075195,
"learning_rate": 1.6574243447184597e-06,
"loss": 2.784,
"step": 4640
},
{
"epoch": 28.012048192771083,
"grad_norm": 10.421324729919434,
"learning_rate": 1.567005488636024e-06,
"loss": 2.9865,
"step": 4650
},
{
"epoch": 28.012048192771083,
"eval_loss": 3.310655355453491,
"eval_runtime": 3.8346,
"eval_samples_per_second": 9.649,
"eval_steps_per_second": 9.649,
"step": 4650
},
{
"epoch": 28.072289156626507,
"grad_norm": 9.195672035217285,
"learning_rate": 1.4790913492997438e-06,
"loss": 2.6247,
"step": 4660
},
{
"epoch": 28.132530120481928,
"grad_norm": 14.86363697052002,
"learning_rate": 1.3936855405155408e-06,
"loss": 2.8831,
"step": 4670
},
{
"epoch": 28.19277108433735,
"grad_norm": 11.328372955322266,
"learning_rate": 1.3107915729816954e-06,
"loss": 2.7178,
"step": 4680
},
{
"epoch": 28.253012048192772,
"grad_norm": 15.23580551147461,
"learning_rate": 1.230412854144547e-06,
"loss": 3.1991,
"step": 4690
},
{
"epoch": 28.313253012048193,
"grad_norm": 169.89739990234375,
"learning_rate": 1.15255268805841e-06,
"loss": 3.0329,
"step": 4700
},
{
"epoch": 28.373493975903614,
"grad_norm": 323.2249450683594,
"learning_rate": 1.0772142752497604e-06,
"loss": 3.1795,
"step": 4710
},
{
"epoch": 28.433734939759034,
"grad_norm": 24.42875099182129,
"learning_rate": 1.004400712585646e-06,
"loss": 2.8986,
"step": 4720
},
{
"epoch": 28.49397590361446,
"grad_norm": 14.24337387084961,
"learning_rate": 9.341149931464537e-07,
"loss": 2.7472,
"step": 4730
},
{
"epoch": 28.55421686746988,
"grad_norm": 90.30467224121094,
"learning_rate": 8.663600061028162e-07,
"loss": 2.9509,
"step": 4740
},
{
"epoch": 28.6144578313253,
"grad_norm": 32.362003326416016,
"learning_rate": 8.011385365968641e-07,
"loss": 3.0889,
"step": 4750
},
{
"epoch": 28.674698795180724,
"grad_norm": 20.82928466796875,
"learning_rate": 7.384532656277698e-07,
"loss": 3.0301,
"step": 4760
},
{
"epoch": 28.734939759036145,
"grad_norm": 12.928521156311035,
"learning_rate": 6.783067699414891e-07,
"loss": 2.8634,
"step": 4770
},
{
"epoch": 28.795180722891565,
"grad_norm": 14.081971168518066,
"learning_rate": 6.207015219248866e-07,
"loss": 2.6318,
"step": 4780
},
{
"epoch": 28.855421686746986,
"grad_norm": 13.051186561584473,
"learning_rate": 5.656398895040813e-07,
"loss": 2.7618,
"step": 4790
},
{
"epoch": 28.91566265060241,
"grad_norm": 221.75543212890625,
"learning_rate": 5.131241360471217e-07,
"loss": 3.0757,
"step": 4800
},
{
"epoch": 28.91566265060241,
"eval_loss": 3.3106162548065186,
"eval_runtime": 3.8397,
"eval_samples_per_second": 9.636,
"eval_steps_per_second": 9.636,
"step": 4800
}
],
"logging_steps": 10,
"max_steps": 5000,
"num_input_tokens_seen": 0,
"num_train_epochs": 31,
"save_steps": 150,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.5578958526488576e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}