radic2682's picture
End of training
e58c828 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 1000,
"global_step": 21900,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00684931506849315,
"grad_norm": 323.0462646484375,
"learning_rate": 2.9943835616438356e-05,
"loss": 5.0367,
"step": 50
},
{
"epoch": 0.0136986301369863,
"grad_norm": 77.02570343017578,
"learning_rate": 2.987808219178082e-05,
"loss": 3.1287,
"step": 100
},
{
"epoch": 0.02054794520547945,
"grad_norm": 33.8292236328125,
"learning_rate": 2.980958904109589e-05,
"loss": 2.247,
"step": 150
},
{
"epoch": 0.0273972602739726,
"grad_norm": 45.22611618041992,
"learning_rate": 2.974109589041096e-05,
"loss": 1.8313,
"step": 200
},
{
"epoch": 0.03424657534246575,
"grad_norm": 40.95551681518555,
"learning_rate": 2.9672602739726026e-05,
"loss": 1.7307,
"step": 250
},
{
"epoch": 0.0410958904109589,
"grad_norm": 132.5420684814453,
"learning_rate": 2.9604109589041095e-05,
"loss": 1.71,
"step": 300
},
{
"epoch": 0.04794520547945205,
"grad_norm": 46.58742141723633,
"learning_rate": 2.9535616438356165e-05,
"loss": 1.5229,
"step": 350
},
{
"epoch": 0.0547945205479452,
"grad_norm": 60.270259857177734,
"learning_rate": 2.9467123287671234e-05,
"loss": 1.4451,
"step": 400
},
{
"epoch": 0.06164383561643835,
"grad_norm": 36.05481719970703,
"learning_rate": 2.93986301369863e-05,
"loss": 1.4609,
"step": 450
},
{
"epoch": 0.0684931506849315,
"grad_norm": 20.183435440063477,
"learning_rate": 2.933013698630137e-05,
"loss": 1.5096,
"step": 500
},
{
"epoch": 0.07534246575342465,
"grad_norm": 31.55844497680664,
"learning_rate": 2.926164383561644e-05,
"loss": 1.4921,
"step": 550
},
{
"epoch": 0.0821917808219178,
"grad_norm": 26.991056442260742,
"learning_rate": 2.919315068493151e-05,
"loss": 1.3397,
"step": 600
},
{
"epoch": 0.08904109589041095,
"grad_norm": 22.860877990722656,
"learning_rate": 2.9124657534246575e-05,
"loss": 1.2389,
"step": 650
},
{
"epoch": 0.0958904109589041,
"grad_norm": 34.633026123046875,
"learning_rate": 2.9056164383561644e-05,
"loss": 1.182,
"step": 700
},
{
"epoch": 0.10273972602739725,
"grad_norm": 29.118816375732422,
"learning_rate": 2.8987671232876714e-05,
"loss": 1.3518,
"step": 750
},
{
"epoch": 0.1095890410958904,
"grad_norm": 72.57406616210938,
"learning_rate": 2.8919178082191783e-05,
"loss": 1.3301,
"step": 800
},
{
"epoch": 0.11643835616438356,
"grad_norm": 19.362930297851562,
"learning_rate": 2.885068493150685e-05,
"loss": 1.2366,
"step": 850
},
{
"epoch": 0.1232876712328767,
"grad_norm": 34.462242126464844,
"learning_rate": 2.878219178082192e-05,
"loss": 1.3056,
"step": 900
},
{
"epoch": 0.13013698630136986,
"grad_norm": 20.275766372680664,
"learning_rate": 2.871369863013699e-05,
"loss": 1.2415,
"step": 950
},
{
"epoch": 0.136986301369863,
"grad_norm": 34.678558349609375,
"learning_rate": 2.8645205479452058e-05,
"loss": 1.2008,
"step": 1000
},
{
"epoch": 0.136986301369863,
"eval_exact_match": 74.98580889309366,
"eval_f1": 84.12743563305797,
"eval_runtime": 416.5856,
"eval_samples_per_second": 25.373,
"eval_steps_per_second": 1.587,
"step": 1000
},
{
"epoch": 0.14383561643835616,
"grad_norm": 27.335071563720703,
"learning_rate": 2.8576712328767124e-05,
"loss": 1.1301,
"step": 1050
},
{
"epoch": 0.1506849315068493,
"grad_norm": 36.93644714355469,
"learning_rate": 2.8508219178082194e-05,
"loss": 1.3069,
"step": 1100
},
{
"epoch": 0.15753424657534246,
"grad_norm": 53.08256149291992,
"learning_rate": 2.8439726027397263e-05,
"loss": 1.203,
"step": 1150
},
{
"epoch": 0.1643835616438356,
"grad_norm": 70.76780700683594,
"learning_rate": 2.8371232876712332e-05,
"loss": 1.2392,
"step": 1200
},
{
"epoch": 0.17123287671232876,
"grad_norm": 26.65420150756836,
"learning_rate": 2.8302739726027395e-05,
"loss": 1.1829,
"step": 1250
},
{
"epoch": 0.1780821917808219,
"grad_norm": 23.404756546020508,
"learning_rate": 2.8234246575342465e-05,
"loss": 1.2175,
"step": 1300
},
{
"epoch": 0.18493150684931506,
"grad_norm": 23.29783058166504,
"learning_rate": 2.8165753424657534e-05,
"loss": 1.1673,
"step": 1350
},
{
"epoch": 0.1917808219178082,
"grad_norm": 45.53365707397461,
"learning_rate": 2.8097260273972604e-05,
"loss": 1.1966,
"step": 1400
},
{
"epoch": 0.19863013698630136,
"grad_norm": 13.774870872497559,
"learning_rate": 2.802876712328767e-05,
"loss": 1.2635,
"step": 1450
},
{
"epoch": 0.2054794520547945,
"grad_norm": 14.898510932922363,
"learning_rate": 2.796027397260274e-05,
"loss": 1.1165,
"step": 1500
},
{
"epoch": 0.21232876712328766,
"grad_norm": 35.47676467895508,
"learning_rate": 2.789178082191781e-05,
"loss": 1.1877,
"step": 1550
},
{
"epoch": 0.2191780821917808,
"grad_norm": 28.7779598236084,
"learning_rate": 2.7823287671232878e-05,
"loss": 1.1162,
"step": 1600
},
{
"epoch": 0.22602739726027396,
"grad_norm": 21.763216018676758,
"learning_rate": 2.7754794520547944e-05,
"loss": 1.2048,
"step": 1650
},
{
"epoch": 0.2328767123287671,
"grad_norm": 32.003700256347656,
"learning_rate": 2.7686301369863014e-05,
"loss": 1.1233,
"step": 1700
},
{
"epoch": 0.23972602739726026,
"grad_norm": 31.449691772460938,
"learning_rate": 2.7617808219178083e-05,
"loss": 1.0509,
"step": 1750
},
{
"epoch": 0.2465753424657534,
"grad_norm": 18.793638229370117,
"learning_rate": 2.7549315068493153e-05,
"loss": 1.0995,
"step": 1800
},
{
"epoch": 0.2534246575342466,
"grad_norm": 12.931886672973633,
"learning_rate": 2.748082191780822e-05,
"loss": 1.037,
"step": 1850
},
{
"epoch": 0.2602739726027397,
"grad_norm": 14.888827323913574,
"learning_rate": 2.7412328767123288e-05,
"loss": 1.0856,
"step": 1900
},
{
"epoch": 0.2671232876712329,
"grad_norm": 42.177330017089844,
"learning_rate": 2.7343835616438358e-05,
"loss": 1.0143,
"step": 1950
},
{
"epoch": 0.273972602739726,
"grad_norm": 52.39680480957031,
"learning_rate": 2.7275342465753427e-05,
"loss": 0.9292,
"step": 2000
},
{
"epoch": 0.273972602739726,
"eval_exact_match": 78.05108798486282,
"eval_f1": 86.8708882051522,
"eval_runtime": 417.914,
"eval_samples_per_second": 25.292,
"eval_steps_per_second": 1.582,
"step": 2000
},
{
"epoch": 0.2808219178082192,
"grad_norm": 79.39705657958984,
"learning_rate": 2.7206849315068493e-05,
"loss": 1.1627,
"step": 2050
},
{
"epoch": 0.2876712328767123,
"grad_norm": 13.503669738769531,
"learning_rate": 2.7138356164383563e-05,
"loss": 0.9592,
"step": 2100
},
{
"epoch": 0.2945205479452055,
"grad_norm": 34.6471061706543,
"learning_rate": 2.7069863013698632e-05,
"loss": 0.9724,
"step": 2150
},
{
"epoch": 0.3013698630136986,
"grad_norm": 28.719350814819336,
"learning_rate": 2.70013698630137e-05,
"loss": 0.9852,
"step": 2200
},
{
"epoch": 0.3082191780821918,
"grad_norm": 55.4783935546875,
"learning_rate": 2.6932876712328768e-05,
"loss": 0.9469,
"step": 2250
},
{
"epoch": 0.3150684931506849,
"grad_norm": 24.332387924194336,
"learning_rate": 2.6864383561643837e-05,
"loss": 1.0859,
"step": 2300
},
{
"epoch": 0.3219178082191781,
"grad_norm": 16.438060760498047,
"learning_rate": 2.6795890410958907e-05,
"loss": 0.9121,
"step": 2350
},
{
"epoch": 0.3287671232876712,
"grad_norm": 19.229644775390625,
"learning_rate": 2.6727397260273976e-05,
"loss": 1.0109,
"step": 2400
},
{
"epoch": 0.3356164383561644,
"grad_norm": 22.6262149810791,
"learning_rate": 2.6658904109589042e-05,
"loss": 1.077,
"step": 2450
},
{
"epoch": 0.3424657534246575,
"grad_norm": 10.421500205993652,
"learning_rate": 2.659041095890411e-05,
"loss": 1.1242,
"step": 2500
},
{
"epoch": 0.3493150684931507,
"grad_norm": 44.75662612915039,
"learning_rate": 2.6521917808219178e-05,
"loss": 1.0445,
"step": 2550
},
{
"epoch": 0.3561643835616438,
"grad_norm": 10.374776840209961,
"learning_rate": 2.6453424657534247e-05,
"loss": 0.9927,
"step": 2600
},
{
"epoch": 0.363013698630137,
"grad_norm": 21.781341552734375,
"learning_rate": 2.6384931506849313e-05,
"loss": 0.9601,
"step": 2650
},
{
"epoch": 0.3698630136986301,
"grad_norm": 7.096819877624512,
"learning_rate": 2.6316438356164383e-05,
"loss": 1.0105,
"step": 2700
},
{
"epoch": 0.3767123287671233,
"grad_norm": 128.46046447753906,
"learning_rate": 2.6247945205479452e-05,
"loss": 0.9627,
"step": 2750
},
{
"epoch": 0.3835616438356164,
"grad_norm": 43.575111389160156,
"learning_rate": 2.6179452054794522e-05,
"loss": 1.0495,
"step": 2800
},
{
"epoch": 0.3904109589041096,
"grad_norm": 8.99010944366455,
"learning_rate": 2.6110958904109588e-05,
"loss": 0.9465,
"step": 2850
},
{
"epoch": 0.3972602739726027,
"grad_norm": 21.84307289123535,
"learning_rate": 2.6042465753424657e-05,
"loss": 0.9971,
"step": 2900
},
{
"epoch": 0.4041095890410959,
"grad_norm": 42.8900032043457,
"learning_rate": 2.5973972602739727e-05,
"loss": 0.907,
"step": 2950
},
{
"epoch": 0.410958904109589,
"grad_norm": 50.012393951416016,
"learning_rate": 2.5905479452054796e-05,
"loss": 0.9572,
"step": 3000
},
{
"epoch": 0.410958904109589,
"eval_exact_match": 80.07568590350047,
"eval_f1": 88.2866062038965,
"eval_runtime": 417.1017,
"eval_samples_per_second": 25.342,
"eval_steps_per_second": 1.585,
"step": 3000
},
{
"epoch": 0.4178082191780822,
"grad_norm": 33.76340103149414,
"learning_rate": 2.5836986301369862e-05,
"loss": 0.9699,
"step": 3050
},
{
"epoch": 0.4246575342465753,
"grad_norm": 14.410927772521973,
"learning_rate": 2.5768493150684932e-05,
"loss": 0.8769,
"step": 3100
},
{
"epoch": 0.4315068493150685,
"grad_norm": 13.709430694580078,
"learning_rate": 2.57e-05,
"loss": 0.9086,
"step": 3150
},
{
"epoch": 0.4383561643835616,
"grad_norm": 30.455875396728516,
"learning_rate": 2.563150684931507e-05,
"loss": 0.9569,
"step": 3200
},
{
"epoch": 0.4452054794520548,
"grad_norm": 10.021869659423828,
"learning_rate": 2.5563013698630137e-05,
"loss": 0.9403,
"step": 3250
},
{
"epoch": 0.4520547945205479,
"grad_norm": 16.0334415435791,
"learning_rate": 2.5494520547945206e-05,
"loss": 0.9413,
"step": 3300
},
{
"epoch": 0.4589041095890411,
"grad_norm": 17.812877655029297,
"learning_rate": 2.5426027397260276e-05,
"loss": 0.9739,
"step": 3350
},
{
"epoch": 0.4657534246575342,
"grad_norm": 13.129268646240234,
"learning_rate": 2.5357534246575345e-05,
"loss": 0.9456,
"step": 3400
},
{
"epoch": 0.4726027397260274,
"grad_norm": 37.476993560791016,
"learning_rate": 2.528904109589041e-05,
"loss": 0.9359,
"step": 3450
},
{
"epoch": 0.4794520547945205,
"grad_norm": 16.441556930541992,
"learning_rate": 2.522054794520548e-05,
"loss": 0.9096,
"step": 3500
},
{
"epoch": 0.4863013698630137,
"grad_norm": 11.985368728637695,
"learning_rate": 2.515342465753425e-05,
"loss": 0.9038,
"step": 3550
},
{
"epoch": 0.4931506849315068,
"grad_norm": 28.11038589477539,
"learning_rate": 2.5084931506849316e-05,
"loss": 0.8699,
"step": 3600
},
{
"epoch": 0.5,
"grad_norm": 26.2198543548584,
"learning_rate": 2.5016438356164385e-05,
"loss": 0.9071,
"step": 3650
},
{
"epoch": 0.5068493150684932,
"grad_norm": 21.6419620513916,
"learning_rate": 2.4947945205479455e-05,
"loss": 1.1274,
"step": 3700
},
{
"epoch": 0.5136986301369864,
"grad_norm": 27.075368881225586,
"learning_rate": 2.4879452054794524e-05,
"loss": 0.9475,
"step": 3750
},
{
"epoch": 0.5205479452054794,
"grad_norm": 25.394577026367188,
"learning_rate": 2.481095890410959e-05,
"loss": 0.9166,
"step": 3800
},
{
"epoch": 0.5273972602739726,
"grad_norm": 32.769256591796875,
"learning_rate": 2.474246575342466e-05,
"loss": 0.9656,
"step": 3850
},
{
"epoch": 0.5342465753424658,
"grad_norm": 9.152816772460938,
"learning_rate": 2.4673972602739726e-05,
"loss": 0.9538,
"step": 3900
},
{
"epoch": 0.541095890410959,
"grad_norm": 24.151294708251953,
"learning_rate": 2.4605479452054795e-05,
"loss": 0.9396,
"step": 3950
},
{
"epoch": 0.547945205479452,
"grad_norm": 10.290411949157715,
"learning_rate": 2.453698630136986e-05,
"loss": 0.971,
"step": 4000
},
{
"epoch": 0.547945205479452,
"eval_exact_match": 82.82876064333018,
"eval_f1": 90.23705695343135,
"eval_runtime": 418.5094,
"eval_samples_per_second": 25.256,
"eval_steps_per_second": 1.579,
"step": 4000
},
{
"epoch": 0.5547945205479452,
"grad_norm": 30.38792610168457,
"learning_rate": 2.446849315068493e-05,
"loss": 0.9951,
"step": 4050
},
{
"epoch": 0.5616438356164384,
"grad_norm": 12.29451847076416,
"learning_rate": 2.44e-05,
"loss": 0.9758,
"step": 4100
},
{
"epoch": 0.5684931506849316,
"grad_norm": 56.75043869018555,
"learning_rate": 2.433150684931507e-05,
"loss": 0.8756,
"step": 4150
},
{
"epoch": 0.5753424657534246,
"grad_norm": 26.024311065673828,
"learning_rate": 2.4263013698630136e-05,
"loss": 0.9083,
"step": 4200
},
{
"epoch": 0.5821917808219178,
"grad_norm": 14.479564666748047,
"learning_rate": 2.4194520547945205e-05,
"loss": 0.9357,
"step": 4250
},
{
"epoch": 0.589041095890411,
"grad_norm": 9.043543815612793,
"learning_rate": 2.4126027397260275e-05,
"loss": 0.9954,
"step": 4300
},
{
"epoch": 0.5958904109589042,
"grad_norm": 17.975173950195312,
"learning_rate": 2.4057534246575344e-05,
"loss": 0.8758,
"step": 4350
},
{
"epoch": 0.6027397260273972,
"grad_norm": 25.138750076293945,
"learning_rate": 2.398904109589041e-05,
"loss": 0.8641,
"step": 4400
},
{
"epoch": 0.6095890410958904,
"grad_norm": 10.003458976745605,
"learning_rate": 2.392054794520548e-05,
"loss": 1.0095,
"step": 4450
},
{
"epoch": 0.6164383561643836,
"grad_norm": 48.206233978271484,
"learning_rate": 2.385205479452055e-05,
"loss": 0.924,
"step": 4500
},
{
"epoch": 0.6232876712328768,
"grad_norm": 10.344615936279297,
"learning_rate": 2.378356164383562e-05,
"loss": 0.9114,
"step": 4550
},
{
"epoch": 0.6301369863013698,
"grad_norm": 15.59772777557373,
"learning_rate": 2.3715068493150685e-05,
"loss": 0.9313,
"step": 4600
},
{
"epoch": 0.636986301369863,
"grad_norm": 4.01880407333374,
"learning_rate": 2.3646575342465754e-05,
"loss": 0.9007,
"step": 4650
},
{
"epoch": 0.6438356164383562,
"grad_norm": 6.648110866546631,
"learning_rate": 2.3578082191780824e-05,
"loss": 0.9106,
"step": 4700
},
{
"epoch": 0.6506849315068494,
"grad_norm": 36.89479446411133,
"learning_rate": 2.3509589041095893e-05,
"loss": 0.8683,
"step": 4750
},
{
"epoch": 0.6575342465753424,
"grad_norm": 46.464141845703125,
"learning_rate": 2.344109589041096e-05,
"loss": 0.7399,
"step": 4800
},
{
"epoch": 0.6643835616438356,
"grad_norm": 24.97540283203125,
"learning_rate": 2.337260273972603e-05,
"loss": 0.9609,
"step": 4850
},
{
"epoch": 0.6712328767123288,
"grad_norm": 50.79806137084961,
"learning_rate": 2.3304109589041098e-05,
"loss": 0.8724,
"step": 4900
},
{
"epoch": 0.678082191780822,
"grad_norm": 24.82395362854004,
"learning_rate": 2.3235616438356168e-05,
"loss": 0.8565,
"step": 4950
},
{
"epoch": 0.684931506849315,
"grad_norm": 28.389963150024414,
"learning_rate": 2.3167123287671234e-05,
"loss": 0.8048,
"step": 5000
},
{
"epoch": 0.684931506849315,
"eval_exact_match": 84.12488174077578,
"eval_f1": 91.07595753183573,
"eval_runtime": 417.955,
"eval_samples_per_second": 25.29,
"eval_steps_per_second": 1.582,
"step": 5000
},
{
"epoch": 0.6917808219178082,
"grad_norm": 14.127419471740723,
"learning_rate": 2.3098630136986303e-05,
"loss": 0.7959,
"step": 5050
},
{
"epoch": 0.6986301369863014,
"grad_norm": 12.644275665283203,
"learning_rate": 2.3030136986301373e-05,
"loss": 0.9732,
"step": 5100
},
{
"epoch": 0.7054794520547946,
"grad_norm": 26.438093185424805,
"learning_rate": 2.296164383561644e-05,
"loss": 0.8267,
"step": 5150
},
{
"epoch": 0.7123287671232876,
"grad_norm": 14.37074089050293,
"learning_rate": 2.2893150684931505e-05,
"loss": 0.8899,
"step": 5200
},
{
"epoch": 0.7191780821917808,
"grad_norm": 23.79348373413086,
"learning_rate": 2.2824657534246574e-05,
"loss": 0.906,
"step": 5250
},
{
"epoch": 0.726027397260274,
"grad_norm": 12.331747055053711,
"learning_rate": 2.2756164383561644e-05,
"loss": 0.823,
"step": 5300
},
{
"epoch": 0.7328767123287672,
"grad_norm": 17.69827651977539,
"learning_rate": 2.2687671232876713e-05,
"loss": 0.8854,
"step": 5350
},
{
"epoch": 0.7397260273972602,
"grad_norm": 30.975631713867188,
"learning_rate": 2.261917808219178e-05,
"loss": 0.8866,
"step": 5400
},
{
"epoch": 0.7465753424657534,
"grad_norm": 8.740948677062988,
"learning_rate": 2.255068493150685e-05,
"loss": 0.843,
"step": 5450
},
{
"epoch": 0.7534246575342466,
"grad_norm": 25.42966651916504,
"learning_rate": 2.248219178082192e-05,
"loss": 0.937,
"step": 5500
},
{
"epoch": 0.7602739726027398,
"grad_norm": 15.364262580871582,
"learning_rate": 2.2413698630136988e-05,
"loss": 0.8485,
"step": 5550
},
{
"epoch": 0.7671232876712328,
"grad_norm": 13.131271362304688,
"learning_rate": 2.2345205479452054e-05,
"loss": 0.8018,
"step": 5600
},
{
"epoch": 0.773972602739726,
"grad_norm": 9.219653129577637,
"learning_rate": 2.2276712328767123e-05,
"loss": 0.8492,
"step": 5650
},
{
"epoch": 0.7808219178082192,
"grad_norm": 20.117149353027344,
"learning_rate": 2.2208219178082193e-05,
"loss": 0.9339,
"step": 5700
},
{
"epoch": 0.7876712328767124,
"grad_norm": 10.591473579406738,
"learning_rate": 2.2139726027397262e-05,
"loss": 0.8925,
"step": 5750
},
{
"epoch": 0.7945205479452054,
"grad_norm": 25.7907772064209,
"learning_rate": 2.207123287671233e-05,
"loss": 0.9092,
"step": 5800
},
{
"epoch": 0.8013698630136986,
"grad_norm": 12.926765441894531,
"learning_rate": 2.2002739726027398e-05,
"loss": 0.8824,
"step": 5850
},
{
"epoch": 0.8082191780821918,
"grad_norm": 15.865346908569336,
"learning_rate": 2.1934246575342467e-05,
"loss": 0.846,
"step": 5900
},
{
"epoch": 0.815068493150685,
"grad_norm": 20.416015625,
"learning_rate": 2.1865753424657537e-05,
"loss": 0.8126,
"step": 5950
},
{
"epoch": 0.821917808219178,
"grad_norm": 11.727408409118652,
"learning_rate": 2.1797260273972603e-05,
"loss": 0.8308,
"step": 6000
},
{
"epoch": 0.821917808219178,
"eval_exact_match": 84.57899716177862,
"eval_f1": 91.43611698741225,
"eval_runtime": 417.3274,
"eval_samples_per_second": 25.328,
"eval_steps_per_second": 1.584,
"step": 6000
},
{
"epoch": 0.8287671232876712,
"grad_norm": 25.75244903564453,
"learning_rate": 2.1728767123287672e-05,
"loss": 0.8027,
"step": 6050
},
{
"epoch": 0.8356164383561644,
"grad_norm": 10.4827880859375,
"learning_rate": 2.1660273972602742e-05,
"loss": 0.7853,
"step": 6100
},
{
"epoch": 0.8424657534246576,
"grad_norm": 20.461090087890625,
"learning_rate": 2.159178082191781e-05,
"loss": 0.9597,
"step": 6150
},
{
"epoch": 0.8493150684931506,
"grad_norm": 5.970315456390381,
"learning_rate": 2.1523287671232877e-05,
"loss": 0.8921,
"step": 6200
},
{
"epoch": 0.8561643835616438,
"grad_norm": 31.718955993652344,
"learning_rate": 2.1454794520547947e-05,
"loss": 0.7981,
"step": 6250
},
{
"epoch": 0.863013698630137,
"grad_norm": 7.239502906799316,
"learning_rate": 2.1386301369863016e-05,
"loss": 0.8386,
"step": 6300
},
{
"epoch": 0.8698630136986302,
"grad_norm": 8.74398422241211,
"learning_rate": 2.1317808219178086e-05,
"loss": 0.8466,
"step": 6350
},
{
"epoch": 0.8767123287671232,
"grad_norm": 26.66971206665039,
"learning_rate": 2.1249315068493152e-05,
"loss": 0.8054,
"step": 6400
},
{
"epoch": 0.8835616438356164,
"grad_norm": 30.80714988708496,
"learning_rate": 2.1180821917808218e-05,
"loss": 0.8719,
"step": 6450
},
{
"epoch": 0.8904109589041096,
"grad_norm": 10.245841026306152,
"learning_rate": 2.1112328767123287e-05,
"loss": 0.8324,
"step": 6500
},
{
"epoch": 0.8972602739726028,
"grad_norm": 11.401703834533691,
"learning_rate": 2.1043835616438357e-05,
"loss": 0.7636,
"step": 6550
},
{
"epoch": 0.9041095890410958,
"grad_norm": 10.791388511657715,
"learning_rate": 2.0975342465753423e-05,
"loss": 0.8163,
"step": 6600
},
{
"epoch": 0.910958904109589,
"grad_norm": 73.97101593017578,
"learning_rate": 2.0906849315068493e-05,
"loss": 0.8267,
"step": 6650
},
{
"epoch": 0.9178082191780822,
"grad_norm": 5.0418925285339355,
"learning_rate": 2.0838356164383562e-05,
"loss": 0.7787,
"step": 6700
},
{
"epoch": 0.9246575342465754,
"grad_norm": 16.25297737121582,
"learning_rate": 2.076986301369863e-05,
"loss": 0.8305,
"step": 6750
},
{
"epoch": 0.9315068493150684,
"grad_norm": 15.411580085754395,
"learning_rate": 2.0701369863013698e-05,
"loss": 0.7606,
"step": 6800
},
{
"epoch": 0.9383561643835616,
"grad_norm": 14.359014511108398,
"learning_rate": 2.0632876712328767e-05,
"loss": 0.8578,
"step": 6850
},
{
"epoch": 0.9452054794520548,
"grad_norm": 21.724008560180664,
"learning_rate": 2.0564383561643836e-05,
"loss": 0.7836,
"step": 6900
},
{
"epoch": 0.952054794520548,
"grad_norm": 16.269027709960938,
"learning_rate": 2.0495890410958906e-05,
"loss": 0.8681,
"step": 6950
},
{
"epoch": 0.958904109589041,
"grad_norm": 33.833797454833984,
"learning_rate": 2.0427397260273972e-05,
"loss": 0.8034,
"step": 7000
},
{
"epoch": 0.958904109589041,
"eval_exact_match": 84.64522232734153,
"eval_f1": 91.36486838251115,
"eval_runtime": 419.048,
"eval_samples_per_second": 25.224,
"eval_steps_per_second": 1.577,
"step": 7000
},
{
"epoch": 0.9657534246575342,
"grad_norm": 11.25650691986084,
"learning_rate": 2.035890410958904e-05,
"loss": 0.8451,
"step": 7050
},
{
"epoch": 0.9726027397260274,
"grad_norm": 42.16932678222656,
"learning_rate": 2.029041095890411e-05,
"loss": 0.8701,
"step": 7100
},
{
"epoch": 0.9794520547945206,
"grad_norm": 23.863306045532227,
"learning_rate": 2.022191780821918e-05,
"loss": 0.8651,
"step": 7150
},
{
"epoch": 0.9863013698630136,
"grad_norm": 40.55854034423828,
"learning_rate": 2.0153424657534247e-05,
"loss": 0.8481,
"step": 7200
},
{
"epoch": 0.9931506849315068,
"grad_norm": 14.687817573547363,
"learning_rate": 2.0084931506849316e-05,
"loss": 0.8462,
"step": 7250
},
{
"epoch": 1.0,
"grad_norm": 27.034555435180664,
"learning_rate": 2.0016438356164386e-05,
"loss": 0.7084,
"step": 7300
},
{
"epoch": 1.0068493150684932,
"grad_norm": 20.166122436523438,
"learning_rate": 1.9947945205479455e-05,
"loss": 0.6103,
"step": 7350
},
{
"epoch": 1.0136986301369864,
"grad_norm": 7.819751262664795,
"learning_rate": 1.987945205479452e-05,
"loss": 0.5915,
"step": 7400
},
{
"epoch": 1.0205479452054795,
"grad_norm": 7.091070175170898,
"learning_rate": 1.981095890410959e-05,
"loss": 0.6133,
"step": 7450
},
{
"epoch": 1.0273972602739727,
"grad_norm": 8.464035034179688,
"learning_rate": 1.974246575342466e-05,
"loss": 0.626,
"step": 7500
},
{
"epoch": 1.0342465753424657,
"grad_norm": 9.27121353149414,
"learning_rate": 1.9673972602739726e-05,
"loss": 0.6166,
"step": 7550
},
{
"epoch": 1.0410958904109588,
"grad_norm": 10.510052680969238,
"learning_rate": 1.9605479452054796e-05,
"loss": 0.5873,
"step": 7600
},
{
"epoch": 1.047945205479452,
"grad_norm": 12.773880958557129,
"learning_rate": 1.9536986301369865e-05,
"loss": 0.6074,
"step": 7650
},
{
"epoch": 1.0547945205479452,
"grad_norm": 5.2743425369262695,
"learning_rate": 1.9468493150684935e-05,
"loss": 0.5606,
"step": 7700
},
{
"epoch": 1.0616438356164384,
"grad_norm": 45.79352569580078,
"learning_rate": 1.9399999999999997e-05,
"loss": 0.6101,
"step": 7750
},
{
"epoch": 1.0684931506849316,
"grad_norm": 9.683128356933594,
"learning_rate": 1.9331506849315067e-05,
"loss": 0.521,
"step": 7800
},
{
"epoch": 1.0753424657534247,
"grad_norm": 15.017319679260254,
"learning_rate": 1.9263013698630136e-05,
"loss": 0.6256,
"step": 7850
},
{
"epoch": 1.0821917808219177,
"grad_norm": 17.09823226928711,
"learning_rate": 1.9194520547945206e-05,
"loss": 0.6204,
"step": 7900
},
{
"epoch": 1.0890410958904109,
"grad_norm": 16.04283905029297,
"learning_rate": 1.9126027397260272e-05,
"loss": 0.6075,
"step": 7950
},
{
"epoch": 1.095890410958904,
"grad_norm": 10.665413856506348,
"learning_rate": 1.905753424657534e-05,
"loss": 0.5418,
"step": 8000
},
{
"epoch": 1.095890410958904,
"eval_exact_match": 85.19394512771996,
"eval_f1": 92.0232735390316,
"eval_runtime": 417.4898,
"eval_samples_per_second": 25.318,
"eval_steps_per_second": 1.583,
"step": 8000
},
{
"epoch": 1.1027397260273972,
"grad_norm": 28.214811325073242,
"learning_rate": 1.898904109589041e-05,
"loss": 0.6014,
"step": 8050
},
{
"epoch": 1.1095890410958904,
"grad_norm": 34.26095199584961,
"learning_rate": 1.892054794520548e-05,
"loss": 0.6195,
"step": 8100
},
{
"epoch": 1.1164383561643836,
"grad_norm": 7.891120910644531,
"learning_rate": 1.8852054794520546e-05,
"loss": 0.723,
"step": 8150
},
{
"epoch": 1.1232876712328768,
"grad_norm": 21.476566314697266,
"learning_rate": 1.8783561643835616e-05,
"loss": 0.6577,
"step": 8200
},
{
"epoch": 1.13013698630137,
"grad_norm": 15.302453994750977,
"learning_rate": 1.8715068493150685e-05,
"loss": 0.5872,
"step": 8250
},
{
"epoch": 1.1369863013698631,
"grad_norm": 22.77974510192871,
"learning_rate": 1.8646575342465755e-05,
"loss": 0.6372,
"step": 8300
},
{
"epoch": 1.143835616438356,
"grad_norm": 7.8337297439575195,
"learning_rate": 1.857808219178082e-05,
"loss": 0.5487,
"step": 8350
},
{
"epoch": 1.1506849315068493,
"grad_norm": 26.61360740661621,
"learning_rate": 1.850958904109589e-05,
"loss": 0.5555,
"step": 8400
},
{
"epoch": 1.1575342465753424,
"grad_norm": 9.870406150817871,
"learning_rate": 1.844109589041096e-05,
"loss": 0.5728,
"step": 8450
},
{
"epoch": 1.1643835616438356,
"grad_norm": 8.367820739746094,
"learning_rate": 1.837260273972603e-05,
"loss": 0.6081,
"step": 8500
},
{
"epoch": 1.1712328767123288,
"grad_norm": 22.267404556274414,
"learning_rate": 1.8304109589041095e-05,
"loss": 0.6432,
"step": 8550
},
{
"epoch": 1.178082191780822,
"grad_norm": 20.5167236328125,
"learning_rate": 1.8235616438356165e-05,
"loss": 0.5952,
"step": 8600
},
{
"epoch": 1.1849315068493151,
"grad_norm": 13.30615520477295,
"learning_rate": 1.8167123287671234e-05,
"loss": 0.6364,
"step": 8650
},
{
"epoch": 1.191780821917808,
"grad_norm": 16.95148277282715,
"learning_rate": 1.8098630136986304e-05,
"loss": 0.676,
"step": 8700
},
{
"epoch": 1.1986301369863013,
"grad_norm": 19.168123245239258,
"learning_rate": 1.803013698630137e-05,
"loss": 0.5939,
"step": 8750
},
{
"epoch": 1.2054794520547945,
"grad_norm": 13.934683799743652,
"learning_rate": 1.796164383561644e-05,
"loss": 0.6304,
"step": 8800
},
{
"epoch": 1.2123287671232876,
"grad_norm": 16.010356903076172,
"learning_rate": 1.789315068493151e-05,
"loss": 0.7142,
"step": 8850
},
{
"epoch": 1.2191780821917808,
"grad_norm": 20.430280685424805,
"learning_rate": 1.7824657534246578e-05,
"loss": 0.6254,
"step": 8900
},
{
"epoch": 1.226027397260274,
"grad_norm": 44.09880828857422,
"learning_rate": 1.7756164383561644e-05,
"loss": 0.6566,
"step": 8950
},
{
"epoch": 1.2328767123287672,
"grad_norm": 41.73299026489258,
"learning_rate": 1.7687671232876714e-05,
"loss": 0.5479,
"step": 9000
},
{
"epoch": 1.2328767123287672,
"eval_exact_match": 85.38315988647115,
"eval_f1": 92.15019137693149,
"eval_runtime": 418.4718,
"eval_samples_per_second": 25.259,
"eval_steps_per_second": 1.58,
"step": 9000
},
{
"epoch": 1.2397260273972603,
"grad_norm": 20.86771011352539,
"learning_rate": 1.761917808219178e-05,
"loss": 0.5645,
"step": 9050
},
{
"epoch": 1.2465753424657535,
"grad_norm": 6.557628631591797,
"learning_rate": 1.755068493150685e-05,
"loss": 0.6265,
"step": 9100
},
{
"epoch": 1.2534246575342465,
"grad_norm": 6.776758193969727,
"learning_rate": 1.7482191780821915e-05,
"loss": 0.5482,
"step": 9150
},
{
"epoch": 1.2602739726027397,
"grad_norm": 20.538101196289062,
"learning_rate": 1.7413698630136985e-05,
"loss": 0.5793,
"step": 9200
},
{
"epoch": 1.2671232876712328,
"grad_norm": 19.532896041870117,
"learning_rate": 1.7345205479452054e-05,
"loss": 0.6236,
"step": 9250
},
{
"epoch": 1.273972602739726,
"grad_norm": 16.863801956176758,
"learning_rate": 1.7276712328767124e-05,
"loss": 0.5566,
"step": 9300
},
{
"epoch": 1.2808219178082192,
"grad_norm": 46.87477493286133,
"learning_rate": 1.720821917808219e-05,
"loss": 0.5785,
"step": 9350
},
{
"epoch": 1.2876712328767124,
"grad_norm": 10.397550582885742,
"learning_rate": 1.713972602739726e-05,
"loss": 0.7018,
"step": 9400
},
{
"epoch": 1.2945205479452055,
"grad_norm": 6.6365275382995605,
"learning_rate": 1.7072602739726028e-05,
"loss": 0.5387,
"step": 9450
},
{
"epoch": 1.3013698630136985,
"grad_norm": 24.18856430053711,
"learning_rate": 1.7004109589041094e-05,
"loss": 0.5884,
"step": 9500
},
{
"epoch": 1.308219178082192,
"grad_norm": 33.152278900146484,
"learning_rate": 1.6935616438356164e-05,
"loss": 0.6385,
"step": 9550
},
{
"epoch": 1.3150684931506849,
"grad_norm": 4.547526836395264,
"learning_rate": 1.6867123287671233e-05,
"loss": 0.5276,
"step": 9600
},
{
"epoch": 1.321917808219178,
"grad_norm": 34.75554656982422,
"learning_rate": 1.6798630136986303e-05,
"loss": 0.6225,
"step": 9650
},
{
"epoch": 1.3287671232876712,
"grad_norm": 15.248549461364746,
"learning_rate": 1.673013698630137e-05,
"loss": 0.5654,
"step": 9700
},
{
"epoch": 1.3356164383561644,
"grad_norm": 19.826614379882812,
"learning_rate": 1.6661643835616438e-05,
"loss": 0.6356,
"step": 9750
},
{
"epoch": 1.3424657534246576,
"grad_norm": 29.037817001342773,
"learning_rate": 1.6593150684931508e-05,
"loss": 0.5997,
"step": 9800
},
{
"epoch": 1.3493150684931507,
"grad_norm": 5.311439037322998,
"learning_rate": 1.6524657534246577e-05,
"loss": 0.5873,
"step": 9850
},
{
"epoch": 1.356164383561644,
"grad_norm": 44.800636291503906,
"learning_rate": 1.6456164383561643e-05,
"loss": 0.5232,
"step": 9900
},
{
"epoch": 1.3630136986301369,
"grad_norm": 19.861589431762695,
"learning_rate": 1.6387671232876713e-05,
"loss": 0.5877,
"step": 9950
},
{
"epoch": 1.36986301369863,
"grad_norm": 8.236838340759277,
"learning_rate": 1.6319178082191782e-05,
"loss": 0.5612,
"step": 10000
},
{
"epoch": 1.36986301369863,
"eval_exact_match": 84.95742667928099,
"eval_f1": 91.94903795865467,
"eval_runtime": 417.7906,
"eval_samples_per_second": 25.3,
"eval_steps_per_second": 1.582,
"step": 10000
},
{
"epoch": 1.3767123287671232,
"grad_norm": 5.004638671875,
"learning_rate": 1.625068493150685e-05,
"loss": 0.6488,
"step": 10050
},
{
"epoch": 1.3835616438356164,
"grad_norm": 6.322940349578857,
"learning_rate": 1.6182191780821918e-05,
"loss": 0.53,
"step": 10100
},
{
"epoch": 1.3904109589041096,
"grad_norm": 18.412700653076172,
"learning_rate": 1.6113698630136987e-05,
"loss": 0.6148,
"step": 10150
},
{
"epoch": 1.3972602739726028,
"grad_norm": 10.37777042388916,
"learning_rate": 1.6045205479452057e-05,
"loss": 0.5429,
"step": 10200
},
{
"epoch": 1.404109589041096,
"grad_norm": 14.783160209655762,
"learning_rate": 1.5976712328767126e-05,
"loss": 0.5469,
"step": 10250
},
{
"epoch": 1.410958904109589,
"grad_norm": 9.634529113769531,
"learning_rate": 1.5908219178082192e-05,
"loss": 0.5273,
"step": 10300
},
{
"epoch": 1.4178082191780823,
"grad_norm": 8.868270874023438,
"learning_rate": 1.5839726027397258e-05,
"loss": 0.6968,
"step": 10350
},
{
"epoch": 1.4246575342465753,
"grad_norm": 2.925807476043701,
"learning_rate": 1.5771232876712328e-05,
"loss": 0.5857,
"step": 10400
},
{
"epoch": 1.4315068493150684,
"grad_norm": 14.901018142700195,
"learning_rate": 1.5702739726027397e-05,
"loss": 0.5813,
"step": 10450
},
{
"epoch": 1.4383561643835616,
"grad_norm": 18.872957229614258,
"learning_rate": 1.5634246575342463e-05,
"loss": 0.6373,
"step": 10500
},
{
"epoch": 1.4452054794520548,
"grad_norm": 35.207847595214844,
"learning_rate": 1.5565753424657533e-05,
"loss": 0.5539,
"step": 10550
},
{
"epoch": 1.452054794520548,
"grad_norm": 11.02206802368164,
"learning_rate": 1.5497260273972602e-05,
"loss": 0.5957,
"step": 10600
},
{
"epoch": 1.4589041095890412,
"grad_norm": 25.235326766967773,
"learning_rate": 1.5428767123287672e-05,
"loss": 0.6574,
"step": 10650
},
{
"epoch": 1.4657534246575343,
"grad_norm": 32.09264373779297,
"learning_rate": 1.5360273972602738e-05,
"loss": 0.5925,
"step": 10700
},
{
"epoch": 1.4726027397260273,
"grad_norm": 19.04875373840332,
"learning_rate": 1.5291780821917807e-05,
"loss": 0.5259,
"step": 10750
},
{
"epoch": 1.4794520547945205,
"grad_norm": 16.185894012451172,
"learning_rate": 1.5223287671232877e-05,
"loss": 0.6109,
"step": 10800
},
{
"epoch": 1.4863013698630136,
"grad_norm": 17.464332580566406,
"learning_rate": 1.5154794520547946e-05,
"loss": 0.5961,
"step": 10850
},
{
"epoch": 1.4931506849315068,
"grad_norm": 8.539608001708984,
"learning_rate": 1.5086301369863012e-05,
"loss": 0.5867,
"step": 10900
},
{
"epoch": 1.5,
"grad_norm": 28.3868465423584,
"learning_rate": 1.5017808219178082e-05,
"loss": 0.6098,
"step": 10950
},
{
"epoch": 1.5068493150684932,
"grad_norm": 6.247623443603516,
"learning_rate": 1.4949315068493151e-05,
"loss": 0.6059,
"step": 11000
},
{
"epoch": 1.5068493150684932,
"eval_exact_match": 85.22232734153263,
"eval_f1": 92.06076201446909,
"eval_runtime": 416.847,
"eval_samples_per_second": 25.357,
"eval_steps_per_second": 1.586,
"step": 11000
},
{
"epoch": 1.5136986301369864,
"grad_norm": 22.93709373474121,
"learning_rate": 1.4880821917808219e-05,
"loss": 0.5952,
"step": 11050
},
{
"epoch": 1.5205479452054793,
"grad_norm": 40.23944091796875,
"learning_rate": 1.4812328767123289e-05,
"loss": 0.5395,
"step": 11100
},
{
"epoch": 1.5273972602739727,
"grad_norm": 18.1081485748291,
"learning_rate": 1.4743835616438356e-05,
"loss": 0.5989,
"step": 11150
},
{
"epoch": 1.5342465753424657,
"grad_norm": 34.16764831542969,
"learning_rate": 1.4675342465753426e-05,
"loss": 0.5738,
"step": 11200
},
{
"epoch": 1.541095890410959,
"grad_norm": 8.558218002319336,
"learning_rate": 1.4606849315068494e-05,
"loss": 0.5767,
"step": 11250
},
{
"epoch": 1.547945205479452,
"grad_norm": 50.398895263671875,
"learning_rate": 1.4538356164383563e-05,
"loss": 0.581,
"step": 11300
},
{
"epoch": 1.5547945205479452,
"grad_norm": 8.642769813537598,
"learning_rate": 1.4469863013698629e-05,
"loss": 0.6654,
"step": 11350
},
{
"epoch": 1.5616438356164384,
"grad_norm": 22.660131454467773,
"learning_rate": 1.4401369863013699e-05,
"loss": 0.6377,
"step": 11400
},
{
"epoch": 1.5684931506849316,
"grad_norm": 17.38098907470703,
"learning_rate": 1.4332876712328766e-05,
"loss": 0.5625,
"step": 11450
},
{
"epoch": 1.5753424657534247,
"grad_norm": 9.987977981567383,
"learning_rate": 1.4264383561643836e-05,
"loss": 0.5191,
"step": 11500
},
{
"epoch": 1.5821917808219177,
"grad_norm": 42.332763671875,
"learning_rate": 1.4195890410958904e-05,
"loss": 0.6211,
"step": 11550
},
{
"epoch": 1.589041095890411,
"grad_norm": 8.304152488708496,
"learning_rate": 1.4127397260273973e-05,
"loss": 0.6175,
"step": 11600
},
{
"epoch": 1.595890410958904,
"grad_norm": 142.1208038330078,
"learning_rate": 1.4058904109589041e-05,
"loss": 0.5902,
"step": 11650
},
{
"epoch": 1.6027397260273972,
"grad_norm": 13.38881778717041,
"learning_rate": 1.399041095890411e-05,
"loss": 0.5528,
"step": 11700
},
{
"epoch": 1.6095890410958904,
"grad_norm": 8.86255168914795,
"learning_rate": 1.3921917808219178e-05,
"loss": 0.6205,
"step": 11750
},
{
"epoch": 1.6164383561643836,
"grad_norm": 33.42393493652344,
"learning_rate": 1.3853424657534248e-05,
"loss": 0.6477,
"step": 11800
},
{
"epoch": 1.6232876712328768,
"grad_norm": 15.220393180847168,
"learning_rate": 1.3784931506849315e-05,
"loss": 0.5493,
"step": 11850
},
{
"epoch": 1.6301369863013697,
"grad_norm": 9.7684907913208,
"learning_rate": 1.3716438356164385e-05,
"loss": 0.5256,
"step": 11900
},
{
"epoch": 1.6369863013698631,
"grad_norm": 19.64045524597168,
"learning_rate": 1.3647945205479453e-05,
"loss": 0.5653,
"step": 11950
},
{
"epoch": 1.643835616438356,
"grad_norm": 28.879430770874023,
"learning_rate": 1.357945205479452e-05,
"loss": 0.5989,
"step": 12000
},
{
"epoch": 1.643835616438356,
"eval_exact_match": 85.12771996215704,
"eval_f1": 91.80306824412318,
"eval_runtime": 417.2891,
"eval_samples_per_second": 25.33,
"eval_steps_per_second": 1.584,
"step": 12000
},
{
"epoch": 1.6506849315068495,
"grad_norm": 7.393039226531982,
"learning_rate": 1.3510958904109588e-05,
"loss": 0.5271,
"step": 12050
},
{
"epoch": 1.6575342465753424,
"grad_norm": 10.611188888549805,
"learning_rate": 1.3442465753424658e-05,
"loss": 0.5939,
"step": 12100
},
{
"epoch": 1.6643835616438356,
"grad_norm": 9.510908126831055,
"learning_rate": 1.3373972602739725e-05,
"loss": 0.5574,
"step": 12150
},
{
"epoch": 1.6712328767123288,
"grad_norm": 15.351234436035156,
"learning_rate": 1.3305479452054795e-05,
"loss": 0.5861,
"step": 12200
},
{
"epoch": 1.678082191780822,
"grad_norm": 31.311676025390625,
"learning_rate": 1.3236986301369863e-05,
"loss": 0.5568,
"step": 12250
},
{
"epoch": 1.6849315068493151,
"grad_norm": 4.728596210479736,
"learning_rate": 1.3168493150684932e-05,
"loss": 0.5431,
"step": 12300
},
{
"epoch": 1.691780821917808,
"grad_norm": 21.880786895751953,
"learning_rate": 1.31e-05,
"loss": 0.5993,
"step": 12350
},
{
"epoch": 1.6986301369863015,
"grad_norm": 5.625259876251221,
"learning_rate": 1.303150684931507e-05,
"loss": 0.6345,
"step": 12400
},
{
"epoch": 1.7054794520547945,
"grad_norm": 12.03124713897705,
"learning_rate": 1.2963013698630137e-05,
"loss": 0.6195,
"step": 12450
},
{
"epoch": 1.7123287671232876,
"grad_norm": 11.920297622680664,
"learning_rate": 1.2894520547945207e-05,
"loss": 0.5523,
"step": 12500
},
{
"epoch": 1.7191780821917808,
"grad_norm": 12.449995994567871,
"learning_rate": 1.2826027397260274e-05,
"loss": 0.5442,
"step": 12550
},
{
"epoch": 1.726027397260274,
"grad_norm": 15.602882385253906,
"learning_rate": 1.2757534246575342e-05,
"loss": 0.4794,
"step": 12600
},
{
"epoch": 1.7328767123287672,
"grad_norm": 27.904523849487305,
"learning_rate": 1.268904109589041e-05,
"loss": 0.5184,
"step": 12650
},
{
"epoch": 1.7397260273972601,
"grad_norm": 6.819875717163086,
"learning_rate": 1.262054794520548e-05,
"loss": 0.4637,
"step": 12700
},
{
"epoch": 1.7465753424657535,
"grad_norm": 17.69037437438965,
"learning_rate": 1.2552054794520547e-05,
"loss": 0.5248,
"step": 12750
},
{
"epoch": 1.7534246575342465,
"grad_norm": 25.76197052001953,
"learning_rate": 1.2483561643835617e-05,
"loss": 0.6165,
"step": 12800
},
{
"epoch": 1.7602739726027399,
"grad_norm": 5.317371845245361,
"learning_rate": 1.2415068493150685e-05,
"loss": 0.5206,
"step": 12850
},
{
"epoch": 1.7671232876712328,
"grad_norm": 8.703845977783203,
"learning_rate": 1.2346575342465754e-05,
"loss": 0.5994,
"step": 12900
},
{
"epoch": 1.773972602739726,
"grad_norm": 16.243268966674805,
"learning_rate": 1.2278082191780822e-05,
"loss": 0.5416,
"step": 12950
},
{
"epoch": 1.7808219178082192,
"grad_norm": 15.478755950927734,
"learning_rate": 1.2209589041095891e-05,
"loss": 0.6375,
"step": 13000
},
{
"epoch": 1.7808219178082192,
"eval_exact_match": 85.65752128666036,
"eval_f1": 92.15631910787795,
"eval_runtime": 416.7008,
"eval_samples_per_second": 25.366,
"eval_steps_per_second": 1.586,
"step": 13000
},
{
"epoch": 1.7876712328767124,
"grad_norm": 14.403656005859375,
"learning_rate": 1.2141095890410959e-05,
"loss": 0.6117,
"step": 13050
},
{
"epoch": 1.7945205479452055,
"grad_norm": 22.657033920288086,
"learning_rate": 1.2072602739726028e-05,
"loss": 0.5757,
"step": 13100
},
{
"epoch": 1.8013698630136985,
"grad_norm": 13.059876441955566,
"learning_rate": 1.2004109589041096e-05,
"loss": 0.5765,
"step": 13150
},
{
"epoch": 1.808219178082192,
"grad_norm": 2.8631415367126465,
"learning_rate": 1.1935616438356166e-05,
"loss": 0.5688,
"step": 13200
},
{
"epoch": 1.8150684931506849,
"grad_norm": 8.850284576416016,
"learning_rate": 1.1867123287671232e-05,
"loss": 0.5713,
"step": 13250
},
{
"epoch": 1.821917808219178,
"grad_norm": 9.604997634887695,
"learning_rate": 1.1798630136986301e-05,
"loss": 0.5602,
"step": 13300
},
{
"epoch": 1.8287671232876712,
"grad_norm": 27.603858947753906,
"learning_rate": 1.1730136986301369e-05,
"loss": 0.6105,
"step": 13350
},
{
"epoch": 1.8356164383561644,
"grad_norm": 10.399881362915039,
"learning_rate": 1.1661643835616439e-05,
"loss": 0.5744,
"step": 13400
},
{
"epoch": 1.8424657534246576,
"grad_norm": 16.775104522705078,
"learning_rate": 1.1593150684931506e-05,
"loss": 0.5776,
"step": 13450
},
{
"epoch": 1.8493150684931505,
"grad_norm": 8.317610740661621,
"learning_rate": 1.1524657534246576e-05,
"loss": 0.6075,
"step": 13500
},
{
"epoch": 1.856164383561644,
"grad_norm": 18.899354934692383,
"learning_rate": 1.1456164383561644e-05,
"loss": 0.5967,
"step": 13550
},
{
"epoch": 1.8630136986301369,
"grad_norm": 10.251896858215332,
"learning_rate": 1.1387671232876713e-05,
"loss": 0.6121,
"step": 13600
},
{
"epoch": 1.8698630136986303,
"grad_norm": 24.907438278198242,
"learning_rate": 1.131917808219178e-05,
"loss": 0.6348,
"step": 13650
},
{
"epoch": 1.8767123287671232,
"grad_norm": 17.239213943481445,
"learning_rate": 1.125068493150685e-05,
"loss": 0.5835,
"step": 13700
},
{
"epoch": 1.8835616438356164,
"grad_norm": 14.36588191986084,
"learning_rate": 1.1182191780821918e-05,
"loss": 0.5681,
"step": 13750
},
{
"epoch": 1.8904109589041096,
"grad_norm": 10.424467086791992,
"learning_rate": 1.1113698630136988e-05,
"loss": 0.5334,
"step": 13800
},
{
"epoch": 1.8972602739726028,
"grad_norm": 11.122437477111816,
"learning_rate": 1.1045205479452055e-05,
"loss": 0.5594,
"step": 13850
},
{
"epoch": 1.904109589041096,
"grad_norm": 10.735795021057129,
"learning_rate": 1.0976712328767123e-05,
"loss": 0.5565,
"step": 13900
},
{
"epoch": 1.910958904109589,
"grad_norm": 10.91677474975586,
"learning_rate": 1.0908219178082191e-05,
"loss": 0.5945,
"step": 13950
},
{
"epoch": 1.9178082191780823,
"grad_norm": 7.375208377838135,
"learning_rate": 1.083972602739726e-05,
"loss": 0.536,
"step": 14000
},
{
"epoch": 1.9178082191780823,
"eval_exact_match": 86.10217596972564,
"eval_f1": 92.4099426929563,
"eval_runtime": 417.908,
"eval_samples_per_second": 25.293,
"eval_steps_per_second": 1.582,
"step": 14000
},
{
"epoch": 1.9246575342465753,
"grad_norm": 32.034141540527344,
"learning_rate": 1.0771232876712328e-05,
"loss": 0.5696,
"step": 14050
},
{
"epoch": 1.9315068493150684,
"grad_norm": 21.64228057861328,
"learning_rate": 1.0702739726027398e-05,
"loss": 0.6152,
"step": 14100
},
{
"epoch": 1.9383561643835616,
"grad_norm": 21.606149673461914,
"learning_rate": 1.0634246575342465e-05,
"loss": 0.5286,
"step": 14150
},
{
"epoch": 1.9452054794520548,
"grad_norm": 29.915918350219727,
"learning_rate": 1.0565753424657535e-05,
"loss": 0.5781,
"step": 14200
},
{
"epoch": 1.952054794520548,
"grad_norm": 17.88494873046875,
"learning_rate": 1.0497260273972603e-05,
"loss": 0.6066,
"step": 14250
},
{
"epoch": 1.958904109589041,
"grad_norm": 24.821956634521484,
"learning_rate": 1.0428767123287672e-05,
"loss": 0.5819,
"step": 14300
},
{
"epoch": 1.9657534246575343,
"grad_norm": 34.31120681762695,
"learning_rate": 1.036027397260274e-05,
"loss": 0.6531,
"step": 14350
},
{
"epoch": 1.9726027397260273,
"grad_norm": 13.344314575195312,
"learning_rate": 1.029178082191781e-05,
"loss": 0.5585,
"step": 14400
},
{
"epoch": 1.9794520547945207,
"grad_norm": 40.556358337402344,
"learning_rate": 1.0223287671232877e-05,
"loss": 0.5257,
"step": 14450
},
{
"epoch": 1.9863013698630136,
"grad_norm": 11.713874816894531,
"learning_rate": 1.0154794520547947e-05,
"loss": 0.6307,
"step": 14500
},
{
"epoch": 1.9931506849315068,
"grad_norm": 9.927220344543457,
"learning_rate": 1.0086301369863013e-05,
"loss": 0.5814,
"step": 14550
},
{
"epoch": 2.0,
"grad_norm": 14.175002098083496,
"learning_rate": 1.0017808219178082e-05,
"loss": 0.6428,
"step": 14600
},
{
"epoch": 2.006849315068493,
"grad_norm": 28.156503677368164,
"learning_rate": 9.94931506849315e-06,
"loss": 0.3428,
"step": 14650
},
{
"epoch": 2.0136986301369864,
"grad_norm": 12.531486511230469,
"learning_rate": 9.88082191780822e-06,
"loss": 0.3625,
"step": 14700
},
{
"epoch": 2.0205479452054793,
"grad_norm": 6.839471340179443,
"learning_rate": 9.812328767123287e-06,
"loss": 0.3675,
"step": 14750
},
{
"epoch": 2.0273972602739727,
"grad_norm": 9.022225379943848,
"learning_rate": 9.743835616438357e-06,
"loss": 0.3613,
"step": 14800
},
{
"epoch": 2.0342465753424657,
"grad_norm": 9.764174461364746,
"learning_rate": 9.675342465753424e-06,
"loss": 0.3353,
"step": 14850
},
{
"epoch": 2.041095890410959,
"grad_norm": 8.139081001281738,
"learning_rate": 9.606849315068494e-06,
"loss": 0.3749,
"step": 14900
},
{
"epoch": 2.047945205479452,
"grad_norm": 19.99053955078125,
"learning_rate": 9.538356164383562e-06,
"loss": 0.3434,
"step": 14950
},
{
"epoch": 2.0547945205479454,
"grad_norm": 4.129643440246582,
"learning_rate": 9.469863013698631e-06,
"loss": 0.3753,
"step": 15000
},
{
"epoch": 2.0547945205479454,
"eval_exact_match": 85.34531693472091,
"eval_f1": 92.12097175946987,
"eval_runtime": 417.8979,
"eval_samples_per_second": 25.293,
"eval_steps_per_second": 1.582,
"step": 15000
},
{
"epoch": 2.0616438356164384,
"grad_norm": 33.54549026489258,
"learning_rate": 9.401369863013699e-06,
"loss": 0.3592,
"step": 15050
},
{
"epoch": 2.0684931506849313,
"grad_norm": 24.34673500061035,
"learning_rate": 9.332876712328768e-06,
"loss": 0.3847,
"step": 15100
},
{
"epoch": 2.0753424657534247,
"grad_norm": 5.306540489196777,
"learning_rate": 9.264383561643836e-06,
"loss": 0.3157,
"step": 15150
},
{
"epoch": 2.0821917808219177,
"grad_norm": 5.992457866668701,
"learning_rate": 9.195890410958904e-06,
"loss": 0.3308,
"step": 15200
},
{
"epoch": 2.089041095890411,
"grad_norm": 15.749208450317383,
"learning_rate": 9.127397260273972e-06,
"loss": 0.3631,
"step": 15250
},
{
"epoch": 2.095890410958904,
"grad_norm": 10.97288703918457,
"learning_rate": 9.058904109589041e-06,
"loss": 0.3107,
"step": 15300
},
{
"epoch": 2.1027397260273974,
"grad_norm": 5.519676208496094,
"learning_rate": 8.990410958904109e-06,
"loss": 0.3225,
"step": 15350
},
{
"epoch": 2.1095890410958904,
"grad_norm": 24.270828247070312,
"learning_rate": 8.921917808219179e-06,
"loss": 0.3833,
"step": 15400
},
{
"epoch": 2.1164383561643834,
"grad_norm": 17.408357620239258,
"learning_rate": 8.854794520547946e-06,
"loss": 0.3484,
"step": 15450
},
{
"epoch": 2.1232876712328768,
"grad_norm": 19.13545036315918,
"learning_rate": 8.786301369863013e-06,
"loss": 0.3417,
"step": 15500
},
{
"epoch": 2.1301369863013697,
"grad_norm": 12.084640502929688,
"learning_rate": 8.717808219178083e-06,
"loss": 0.3475,
"step": 15550
},
{
"epoch": 2.136986301369863,
"grad_norm": 32.05975341796875,
"learning_rate": 8.64931506849315e-06,
"loss": 0.3406,
"step": 15600
},
{
"epoch": 2.143835616438356,
"grad_norm": 19.190528869628906,
"learning_rate": 8.58082191780822e-06,
"loss": 0.4047,
"step": 15650
},
{
"epoch": 2.1506849315068495,
"grad_norm": 2.928743600845337,
"learning_rate": 8.512328767123288e-06,
"loss": 0.3339,
"step": 15700
},
{
"epoch": 2.1575342465753424,
"grad_norm": 11.721294403076172,
"learning_rate": 8.443835616438357e-06,
"loss": 0.3314,
"step": 15750
},
{
"epoch": 2.1643835616438354,
"grad_norm": 8.114335060119629,
"learning_rate": 8.375342465753425e-06,
"loss": 0.3736,
"step": 15800
},
{
"epoch": 2.171232876712329,
"grad_norm": 14.555135726928711,
"learning_rate": 8.306849315068495e-06,
"loss": 0.3332,
"step": 15850
},
{
"epoch": 2.1780821917808217,
"grad_norm": 81.77395629882812,
"learning_rate": 8.23835616438356e-06,
"loss": 0.3049,
"step": 15900
},
{
"epoch": 2.184931506849315,
"grad_norm": 8.095141410827637,
"learning_rate": 8.16986301369863e-06,
"loss": 0.2637,
"step": 15950
},
{
"epoch": 2.191780821917808,
"grad_norm": 17.374752044677734,
"learning_rate": 8.101369863013698e-06,
"loss": 0.4439,
"step": 16000
},
{
"epoch": 2.191780821917808,
"eval_exact_match": 85.61021759697256,
"eval_f1": 92.16749429035703,
"eval_runtime": 416.4255,
"eval_samples_per_second": 25.383,
"eval_steps_per_second": 1.587,
"step": 16000
},
{
"epoch": 2.1986301369863015,
"grad_norm": 8.427570343017578,
"learning_rate": 8.032876712328767e-06,
"loss": 0.2902,
"step": 16050
},
{
"epoch": 2.2054794520547945,
"grad_norm": 6.139337539672852,
"learning_rate": 7.964383561643835e-06,
"loss": 0.3242,
"step": 16100
},
{
"epoch": 2.212328767123288,
"grad_norm": 26.255056381225586,
"learning_rate": 7.895890410958905e-06,
"loss": 0.3496,
"step": 16150
},
{
"epoch": 2.219178082191781,
"grad_norm": 16.986501693725586,
"learning_rate": 7.827397260273972e-06,
"loss": 0.3297,
"step": 16200
},
{
"epoch": 2.2260273972602738,
"grad_norm": 17.626201629638672,
"learning_rate": 7.758904109589042e-06,
"loss": 0.3733,
"step": 16250
},
{
"epoch": 2.232876712328767,
"grad_norm": 20.750221252441406,
"learning_rate": 7.69041095890411e-06,
"loss": 0.3316,
"step": 16300
},
{
"epoch": 2.23972602739726,
"grad_norm": 42.70435333251953,
"learning_rate": 7.621917808219179e-06,
"loss": 0.3721,
"step": 16350
},
{
"epoch": 2.2465753424657535,
"grad_norm": 11.971570014953613,
"learning_rate": 7.553424657534246e-06,
"loss": 0.3056,
"step": 16400
},
{
"epoch": 2.2534246575342465,
"grad_norm": 7.298882484436035,
"learning_rate": 7.484931506849315e-06,
"loss": 0.4077,
"step": 16450
},
{
"epoch": 2.26027397260274,
"grad_norm": 8.013861656188965,
"learning_rate": 7.416438356164383e-06,
"loss": 0.3488,
"step": 16500
},
{
"epoch": 2.267123287671233,
"grad_norm": 10.631738662719727,
"learning_rate": 7.347945205479452e-06,
"loss": 0.3567,
"step": 16550
},
{
"epoch": 2.2739726027397262,
"grad_norm": 47.24718475341797,
"learning_rate": 7.2794520547945206e-06,
"loss": 0.3319,
"step": 16600
},
{
"epoch": 2.280821917808219,
"grad_norm": 31.66643714904785,
"learning_rate": 7.210958904109589e-06,
"loss": 0.3768,
"step": 16650
},
{
"epoch": 2.287671232876712,
"grad_norm": 34.2202033996582,
"learning_rate": 7.142465753424657e-06,
"loss": 0.4115,
"step": 16700
},
{
"epoch": 2.2945205479452055,
"grad_norm": 35.246585845947266,
"learning_rate": 7.073972602739726e-06,
"loss": 0.3538,
"step": 16750
},
{
"epoch": 2.3013698630136985,
"grad_norm": 17.598068237304688,
"learning_rate": 7.005479452054794e-06,
"loss": 0.3189,
"step": 16800
},
{
"epoch": 2.308219178082192,
"grad_norm": 31.153356552124023,
"learning_rate": 6.936986301369863e-06,
"loss": 0.4082,
"step": 16850
},
{
"epoch": 2.315068493150685,
"grad_norm": 3.9330878257751465,
"learning_rate": 6.8684931506849315e-06,
"loss": 0.366,
"step": 16900
},
{
"epoch": 2.3219178082191783,
"grad_norm": 13.598389625549316,
"learning_rate": 6.8e-06,
"loss": 0.3723,
"step": 16950
},
{
"epoch": 2.328767123287671,
"grad_norm": 12.808009147644043,
"learning_rate": 6.731506849315069e-06,
"loss": 0.323,
"step": 17000
},
{
"epoch": 2.328767123287671,
"eval_exact_match": 85.30747398297068,
"eval_f1": 92.19853158333738,
"eval_runtime": 418.1668,
"eval_samples_per_second": 25.277,
"eval_steps_per_second": 1.581,
"step": 17000
},
{
"epoch": 2.3356164383561646,
"grad_norm": 7.125260353088379,
"learning_rate": 6.6630136986301365e-06,
"loss": 0.3353,
"step": 17050
},
{
"epoch": 2.3424657534246576,
"grad_norm": 11.679519653320312,
"learning_rate": 6.594520547945205e-06,
"loss": 0.3704,
"step": 17100
},
{
"epoch": 2.3493150684931505,
"grad_norm": 12.352055549621582,
"learning_rate": 6.526027397260274e-06,
"loss": 0.3419,
"step": 17150
},
{
"epoch": 2.356164383561644,
"grad_norm": 5.7046709060668945,
"learning_rate": 6.457534246575342e-06,
"loss": 0.3601,
"step": 17200
},
{
"epoch": 2.363013698630137,
"grad_norm": 6.679656505584717,
"learning_rate": 6.389041095890411e-06,
"loss": 0.3537,
"step": 17250
},
{
"epoch": 2.3698630136986303,
"grad_norm": 1.9317164421081543,
"learning_rate": 6.32054794520548e-06,
"loss": 0.3496,
"step": 17300
},
{
"epoch": 2.3767123287671232,
"grad_norm": 7.0557026863098145,
"learning_rate": 6.2520547945205474e-06,
"loss": 0.3469,
"step": 17350
},
{
"epoch": 2.383561643835616,
"grad_norm": 10.497695922851562,
"learning_rate": 6.183561643835616e-06,
"loss": 0.3477,
"step": 17400
},
{
"epoch": 2.3904109589041096,
"grad_norm": 7.504631996154785,
"learning_rate": 6.115068493150685e-06,
"loss": 0.3188,
"step": 17450
},
{
"epoch": 2.3972602739726026,
"grad_norm": 2.7814149856567383,
"learning_rate": 6.046575342465753e-06,
"loss": 0.3582,
"step": 17500
},
{
"epoch": 2.404109589041096,
"grad_norm": 4.986628532409668,
"learning_rate": 5.978082191780822e-06,
"loss": 0.3234,
"step": 17550
},
{
"epoch": 2.410958904109589,
"grad_norm": 10.343502044677734,
"learning_rate": 5.9095890410958906e-06,
"loss": 0.3475,
"step": 17600
},
{
"epoch": 2.4178082191780823,
"grad_norm": 19.3160400390625,
"learning_rate": 5.841095890410958e-06,
"loss": 0.3722,
"step": 17650
},
{
"epoch": 2.4246575342465753,
"grad_norm": 11.461263656616211,
"learning_rate": 5.772602739726027e-06,
"loss": 0.3842,
"step": 17700
},
{
"epoch": 2.4315068493150687,
"grad_norm": 9.144445419311523,
"learning_rate": 5.704109589041096e-06,
"loss": 0.3361,
"step": 17750
},
{
"epoch": 2.4383561643835616,
"grad_norm": 6.6573686599731445,
"learning_rate": 5.635616438356164e-06,
"loss": 0.3334,
"step": 17800
},
{
"epoch": 2.4452054794520546,
"grad_norm": 8.072036743164062,
"learning_rate": 5.567123287671233e-06,
"loss": 0.2792,
"step": 17850
},
{
"epoch": 2.452054794520548,
"grad_norm": 28.824522018432617,
"learning_rate": 5.4986301369863015e-06,
"loss": 0.3438,
"step": 17900
},
{
"epoch": 2.458904109589041,
"grad_norm": 13.252080917358398,
"learning_rate": 5.43013698630137e-06,
"loss": 0.3496,
"step": 17950
},
{
"epoch": 2.4657534246575343,
"grad_norm": 46.41205596923828,
"learning_rate": 5.361643835616438e-06,
"loss": 0.381,
"step": 18000
},
{
"epoch": 2.4657534246575343,
"eval_exact_match": 85.30747398297068,
"eval_f1": 92.19996078048693,
"eval_runtime": 416.6834,
"eval_samples_per_second": 25.367,
"eval_steps_per_second": 1.586,
"step": 18000
},
{
"epoch": 2.4726027397260273,
"grad_norm": 10.28970718383789,
"learning_rate": 5.2931506849315065e-06,
"loss": 0.396,
"step": 18050
},
{
"epoch": 2.4794520547945207,
"grad_norm": 15.0628080368042,
"learning_rate": 5.224657534246575e-06,
"loss": 0.3326,
"step": 18100
},
{
"epoch": 2.4863013698630136,
"grad_norm": 11.597463607788086,
"learning_rate": 5.156164383561644e-06,
"loss": 0.3538,
"step": 18150
},
{
"epoch": 2.493150684931507,
"grad_norm": 7.035673141479492,
"learning_rate": 5.087671232876712e-06,
"loss": 0.3049,
"step": 18200
},
{
"epoch": 2.5,
"grad_norm": 24.056955337524414,
"learning_rate": 5.019178082191781e-06,
"loss": 0.4231,
"step": 18250
},
{
"epoch": 2.506849315068493,
"grad_norm": 19.858354568481445,
"learning_rate": 4.950684931506849e-06,
"loss": 0.3143,
"step": 18300
},
{
"epoch": 2.5136986301369864,
"grad_norm": 10.258318901062012,
"learning_rate": 4.8821917808219174e-06,
"loss": 0.3242,
"step": 18350
},
{
"epoch": 2.5205479452054793,
"grad_norm": 14.595016479492188,
"learning_rate": 4.813698630136986e-06,
"loss": 0.3642,
"step": 18400
},
{
"epoch": 2.5273972602739727,
"grad_norm": 6.389834403991699,
"learning_rate": 4.745205479452055e-06,
"loss": 0.3086,
"step": 18450
},
{
"epoch": 2.5342465753424657,
"grad_norm": 5.80775260925293,
"learning_rate": 4.676712328767123e-06,
"loss": 0.3142,
"step": 18500
},
{
"epoch": 2.541095890410959,
"grad_norm": 4.96169376373291,
"learning_rate": 4.608219178082192e-06,
"loss": 0.3292,
"step": 18550
},
{
"epoch": 2.547945205479452,
"grad_norm": 22.449113845825195,
"learning_rate": 4.5397260273972606e-06,
"loss": 0.4852,
"step": 18600
},
{
"epoch": 2.5547945205479454,
"grad_norm": 23.349910736083984,
"learning_rate": 4.471232876712328e-06,
"loss": 0.3374,
"step": 18650
},
{
"epoch": 2.5616438356164384,
"grad_norm": 9.687122344970703,
"learning_rate": 4.402739726027397e-06,
"loss": 0.3257,
"step": 18700
},
{
"epoch": 2.5684931506849313,
"grad_norm": 8.347723007202148,
"learning_rate": 4.334246575342466e-06,
"loss": 0.2819,
"step": 18750
},
{
"epoch": 2.5753424657534247,
"grad_norm": 11.261799812316895,
"learning_rate": 4.265753424657534e-06,
"loss": 0.2929,
"step": 18800
},
{
"epoch": 2.5821917808219177,
"grad_norm": 7.757129192352295,
"learning_rate": 4.197260273972603e-06,
"loss": 0.2746,
"step": 18850
},
{
"epoch": 2.589041095890411,
"grad_norm": 13.809581756591797,
"learning_rate": 4.1287671232876715e-06,
"loss": 0.3915,
"step": 18900
},
{
"epoch": 2.595890410958904,
"grad_norm": 15.997642517089844,
"learning_rate": 4.060273972602739e-06,
"loss": 0.3181,
"step": 18950
},
{
"epoch": 2.602739726027397,
"grad_norm": 27.046567916870117,
"learning_rate": 3.991780821917808e-06,
"loss": 0.309,
"step": 19000
},
{
"epoch": 2.602739726027397,
"eval_exact_match": 85.57237464522233,
"eval_f1": 92.37303362559862,
"eval_runtime": 417.6236,
"eval_samples_per_second": 25.31,
"eval_steps_per_second": 1.583,
"step": 19000
},
{
"epoch": 2.6095890410958904,
"grad_norm": 12.608302116394043,
"learning_rate": 3.9232876712328765e-06,
"loss": 0.3295,
"step": 19050
},
{
"epoch": 2.616438356164384,
"grad_norm": 8.650862693786621,
"learning_rate": 3.854794520547945e-06,
"loss": 0.3119,
"step": 19100
},
{
"epoch": 2.6232876712328768,
"grad_norm": 8.674386978149414,
"learning_rate": 3.7863013698630138e-06,
"loss": 0.2546,
"step": 19150
},
{
"epoch": 2.6301369863013697,
"grad_norm": 5.919064998626709,
"learning_rate": 3.717808219178082e-06,
"loss": 0.3697,
"step": 19200
},
{
"epoch": 2.636986301369863,
"grad_norm": 7.016098499298096,
"learning_rate": 3.6493150684931506e-06,
"loss": 0.3302,
"step": 19250
},
{
"epoch": 2.643835616438356,
"grad_norm": 8.310173034667969,
"learning_rate": 3.5808219178082192e-06,
"loss": 0.349,
"step": 19300
},
{
"epoch": 2.6506849315068495,
"grad_norm": 8.628369331359863,
"learning_rate": 3.5123287671232874e-06,
"loss": 0.3424,
"step": 19350
},
{
"epoch": 2.6575342465753424,
"grad_norm": 13.100319862365723,
"learning_rate": 3.443835616438356e-06,
"loss": 0.3611,
"step": 19400
},
{
"epoch": 2.6643835616438354,
"grad_norm": 5.1128692626953125,
"learning_rate": 3.3753424657534247e-06,
"loss": 0.3575,
"step": 19450
},
{
"epoch": 2.671232876712329,
"grad_norm": 15.877631187438965,
"learning_rate": 3.306849315068493e-06,
"loss": 0.3791,
"step": 19500
},
{
"epoch": 2.678082191780822,
"grad_norm": 14.39353084564209,
"learning_rate": 3.2383561643835615e-06,
"loss": 0.2931,
"step": 19550
},
{
"epoch": 2.684931506849315,
"grad_norm": 6.899120807647705,
"learning_rate": 3.16986301369863e-06,
"loss": 0.3782,
"step": 19600
},
{
"epoch": 2.691780821917808,
"grad_norm": 10.824689865112305,
"learning_rate": 3.1013698630136988e-06,
"loss": 0.3049,
"step": 19650
},
{
"epoch": 2.6986301369863015,
"grad_norm": 18.822484970092773,
"learning_rate": 3.032876712328767e-06,
"loss": 0.4038,
"step": 19700
},
{
"epoch": 2.7054794520547945,
"grad_norm": 15.976180076599121,
"learning_rate": 2.9643835616438356e-06,
"loss": 0.2828,
"step": 19750
},
{
"epoch": 2.712328767123288,
"grad_norm": 27.864824295043945,
"learning_rate": 2.8958904109589042e-06,
"loss": 0.2974,
"step": 19800
},
{
"epoch": 2.719178082191781,
"grad_norm": 16.251916885375977,
"learning_rate": 2.8273972602739724e-06,
"loss": 0.3124,
"step": 19850
},
{
"epoch": 2.7260273972602738,
"grad_norm": 55.30453872680664,
"learning_rate": 2.758904109589041e-06,
"loss": 0.3211,
"step": 19900
},
{
"epoch": 2.732876712328767,
"grad_norm": 15.253798484802246,
"learning_rate": 2.6904109589041097e-06,
"loss": 0.3369,
"step": 19950
},
{
"epoch": 2.73972602739726,
"grad_norm": 67.18714904785156,
"learning_rate": 2.623287671232877e-06,
"loss": 0.376,
"step": 20000
},
{
"epoch": 2.73972602739726,
"eval_exact_match": 85.26963103122044,
"eval_f1": 92.09863360357713,
"eval_runtime": 417.6598,
"eval_samples_per_second": 25.308,
"eval_steps_per_second": 1.583,
"step": 20000
},
{
"epoch": 2.7465753424657535,
"grad_norm": 11.747102737426758,
"learning_rate": 2.5547945205479453e-06,
"loss": 0.3359,
"step": 20050
},
{
"epoch": 2.7534246575342465,
"grad_norm": 16.32077980041504,
"learning_rate": 2.486301369863014e-06,
"loss": 0.3529,
"step": 20100
},
{
"epoch": 2.76027397260274,
"grad_norm": 40.523311614990234,
"learning_rate": 2.4178082191780826e-06,
"loss": 0.3346,
"step": 20150
},
{
"epoch": 2.767123287671233,
"grad_norm": 16.648462295532227,
"learning_rate": 2.3493150684931508e-06,
"loss": 0.3167,
"step": 20200
},
{
"epoch": 2.7739726027397262,
"grad_norm": 13.732590675354004,
"learning_rate": 2.2808219178082194e-06,
"loss": 0.3431,
"step": 20250
},
{
"epoch": 2.780821917808219,
"grad_norm": 10.560003280639648,
"learning_rate": 2.2123287671232876e-06,
"loss": 0.3688,
"step": 20300
},
{
"epoch": 2.787671232876712,
"grad_norm": 6.171405792236328,
"learning_rate": 2.1438356164383562e-06,
"loss": 0.3314,
"step": 20350
},
{
"epoch": 2.7945205479452055,
"grad_norm": 11.62563419342041,
"learning_rate": 2.0753424657534244e-06,
"loss": 0.3371,
"step": 20400
},
{
"epoch": 2.8013698630136985,
"grad_norm": 12.698870658874512,
"learning_rate": 2.006849315068493e-06,
"loss": 0.3429,
"step": 20450
},
{
"epoch": 2.808219178082192,
"grad_norm": 12.162155151367188,
"learning_rate": 1.9383561643835617e-06,
"loss": 0.3383,
"step": 20500
},
{
"epoch": 2.815068493150685,
"grad_norm": 11.249317169189453,
"learning_rate": 1.8698630136986303e-06,
"loss": 0.3228,
"step": 20550
},
{
"epoch": 2.821917808219178,
"grad_norm": 15.184226989746094,
"learning_rate": 1.8013698630136987e-06,
"loss": 0.3299,
"step": 20600
},
{
"epoch": 2.828767123287671,
"grad_norm": 21.128114700317383,
"learning_rate": 1.7328767123287671e-06,
"loss": 0.2959,
"step": 20650
},
{
"epoch": 2.8356164383561646,
"grad_norm": 18.958660125732422,
"learning_rate": 1.6643835616438358e-06,
"loss": 0.2943,
"step": 20700
},
{
"epoch": 2.8424657534246576,
"grad_norm": 11.99002456665039,
"learning_rate": 1.5958904109589042e-06,
"loss": 0.3151,
"step": 20750
},
{
"epoch": 2.8493150684931505,
"grad_norm": 41.37422561645508,
"learning_rate": 1.5273972602739726e-06,
"loss": 0.3659,
"step": 20800
},
{
"epoch": 2.856164383561644,
"grad_norm": 63.95098876953125,
"learning_rate": 1.4589041095890412e-06,
"loss": 0.3366,
"step": 20850
},
{
"epoch": 2.863013698630137,
"grad_norm": 7.210549354553223,
"learning_rate": 1.3904109589041096e-06,
"loss": 0.3207,
"step": 20900
},
{
"epoch": 2.8698630136986303,
"grad_norm": 21.405946731567383,
"learning_rate": 1.3219178082191783e-06,
"loss": 0.314,
"step": 20950
},
{
"epoch": 2.8767123287671232,
"grad_norm": 40.97032165527344,
"learning_rate": 1.2534246575342467e-06,
"loss": 0.3598,
"step": 21000
},
{
"epoch": 2.8767123287671232,
"eval_exact_match": 85.58183538315988,
"eval_f1": 92.20238705674495,
"eval_runtime": 416.9361,
"eval_samples_per_second": 25.352,
"eval_steps_per_second": 1.585,
"step": 21000
},
{
"epoch": 2.883561643835616,
"grad_norm": 28.014732360839844,
"learning_rate": 1.184931506849315e-06,
"loss": 0.2415,
"step": 21050
},
{
"epoch": 2.8904109589041096,
"grad_norm": 28.053062438964844,
"learning_rate": 1.1164383561643837e-06,
"loss": 0.3468,
"step": 21100
},
{
"epoch": 2.897260273972603,
"grad_norm": 7.82937479019165,
"learning_rate": 1.0479452054794521e-06,
"loss": 0.3102,
"step": 21150
},
{
"epoch": 2.904109589041096,
"grad_norm": 9.58170223236084,
"learning_rate": 9.794520547945205e-07,
"loss": 0.3115,
"step": 21200
},
{
"epoch": 2.910958904109589,
"grad_norm": 15.694490432739258,
"learning_rate": 9.109589041095891e-07,
"loss": 0.2848,
"step": 21250
},
{
"epoch": 2.9178082191780823,
"grad_norm": 11.358589172363281,
"learning_rate": 8.424657534246575e-07,
"loss": 0.3353,
"step": 21300
},
{
"epoch": 2.9246575342465753,
"grad_norm": 42.85386276245117,
"learning_rate": 7.73972602739726e-07,
"loss": 0.271,
"step": 21350
},
{
"epoch": 2.9315068493150687,
"grad_norm": 4.216646194458008,
"learning_rate": 7.054794520547945e-07,
"loss": 0.352,
"step": 21400
},
{
"epoch": 2.9383561643835616,
"grad_norm": 18.55265235900879,
"learning_rate": 6.36986301369863e-07,
"loss": 0.3376,
"step": 21450
},
{
"epoch": 2.9452054794520546,
"grad_norm": 10.340827941894531,
"learning_rate": 5.684931506849316e-07,
"loss": 0.2902,
"step": 21500
},
{
"epoch": 2.952054794520548,
"grad_norm": 13.433025360107422,
"learning_rate": 5e-07,
"loss": 0.3152,
"step": 21550
},
{
"epoch": 2.958904109589041,
"grad_norm": 26.042661666870117,
"learning_rate": 4.315068493150685e-07,
"loss": 0.3431,
"step": 21600
},
{
"epoch": 2.9657534246575343,
"grad_norm": 19.630699157714844,
"learning_rate": 3.63013698630137e-07,
"loss": 0.296,
"step": 21650
},
{
"epoch": 2.9726027397260273,
"grad_norm": 25.595293045043945,
"learning_rate": 2.945205479452055e-07,
"loss": 0.352,
"step": 21700
},
{
"epoch": 2.9794520547945207,
"grad_norm": 11.616922378540039,
"learning_rate": 2.2602739726027398e-07,
"loss": 0.3358,
"step": 21750
},
{
"epoch": 2.9863013698630136,
"grad_norm": 5.852312088012695,
"learning_rate": 1.5753424657534248e-07,
"loss": 0.3017,
"step": 21800
},
{
"epoch": 2.993150684931507,
"grad_norm": 30.67576789855957,
"learning_rate": 8.904109589041097e-08,
"loss": 0.3171,
"step": 21850
},
{
"epoch": 3.0,
"grad_norm": 5.107680320739746,
"learning_rate": 2.0547945205479452e-08,
"loss": 0.3315,
"step": 21900
},
{
"epoch": 3.0,
"step": 21900,
"total_flos": 2.006738209660207e+18,
"train_loss": 0.6596583454358523,
"train_runtime": 64667.9553,
"train_samples_per_second": 4.064,
"train_steps_per_second": 0.339
}
],
"logging_steps": 50,
"max_steps": 21900,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 5000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.006738209660207e+18,
"train_batch_size": 3,
"trial_name": null,
"trial_params": null
}