visalkao's picture
Initial model upload
be5a6cc
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.0,
"eval_steps": 500,
"global_step": 5128,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0078003120124804995,
"grad_norm": 2.144841432571411,
"learning_rate": 0.0009989971027412525,
"loss": 1.1174,
"step": 10
},
{
"epoch": 0.015600624024960999,
"grad_norm": 1.0625462532043457,
"learning_rate": 0.0009978827724537553,
"loss": 0.5035,
"step": 20
},
{
"epoch": 0.0234009360374415,
"grad_norm": 1.619573950767517,
"learning_rate": 0.0009967684421662581,
"loss": 0.3841,
"step": 30
},
{
"epoch": 0.031201248049921998,
"grad_norm": 3.2681403160095215,
"learning_rate": 0.000995654111878761,
"loss": 0.4974,
"step": 40
},
{
"epoch": 0.0390015600624025,
"grad_norm": 1.5944786071777344,
"learning_rate": 0.0009945397815912637,
"loss": 0.4797,
"step": 50
},
{
"epoch": 0.046801872074883,
"grad_norm": 1.8234444856643677,
"learning_rate": 0.0009934254513037665,
"loss": 0.637,
"step": 60
},
{
"epoch": 0.054602184087363496,
"grad_norm": 3.790844440460205,
"learning_rate": 0.0009923111210162691,
"loss": 0.4479,
"step": 70
},
{
"epoch": 0.062402496099843996,
"grad_norm": 2.9351046085357666,
"learning_rate": 0.000991196790728772,
"loss": 0.4332,
"step": 80
},
{
"epoch": 0.07020280811232449,
"grad_norm": 2.2770025730133057,
"learning_rate": 0.0009900824604412747,
"loss": 0.4399,
"step": 90
},
{
"epoch": 0.078003120124805,
"grad_norm": 1.86591637134552,
"learning_rate": 0.0009889681301537775,
"loss": 0.407,
"step": 100
},
{
"epoch": 0.08580343213728549,
"grad_norm": 1.6851640939712524,
"learning_rate": 0.0009878537998662805,
"loss": 0.3862,
"step": 110
},
{
"epoch": 0.093603744149766,
"grad_norm": 2.5469818115234375,
"learning_rate": 0.0009867394695787831,
"loss": 0.4166,
"step": 120
},
{
"epoch": 0.10140405616224649,
"grad_norm": 1.8210259675979614,
"learning_rate": 0.000985625139291286,
"loss": 0.3785,
"step": 130
},
{
"epoch": 0.10920436817472699,
"grad_norm": 2.031057119369507,
"learning_rate": 0.0009845108090037887,
"loss": 0.4177,
"step": 140
},
{
"epoch": 0.11700468018720749,
"grad_norm": 1.6646612882614136,
"learning_rate": 0.0009833964787162915,
"loss": 0.402,
"step": 150
},
{
"epoch": 0.12480499219968799,
"grad_norm": 1.8680285215377808,
"learning_rate": 0.0009822821484287943,
"loss": 0.3282,
"step": 160
},
{
"epoch": 0.13260530421216848,
"grad_norm": 1.8039604425430298,
"learning_rate": 0.0009811678181412971,
"loss": 0.3536,
"step": 170
},
{
"epoch": 0.14040561622464898,
"grad_norm": 3.3018901348114014,
"learning_rate": 0.0009800534878538,
"loss": 0.4595,
"step": 180
},
{
"epoch": 0.1482059282371295,
"grad_norm": 3.684013843536377,
"learning_rate": 0.0009789391575663027,
"loss": 0.4288,
"step": 190
},
{
"epoch": 0.15600624024961,
"grad_norm": 1.4512592554092407,
"learning_rate": 0.0009778248272788055,
"loss": 0.5086,
"step": 200
},
{
"epoch": 0.16380655226209048,
"grad_norm": 2.3981761932373047,
"learning_rate": 0.0009767104969913081,
"loss": 0.4084,
"step": 210
},
{
"epoch": 0.17160686427457097,
"grad_norm": 3.7943010330200195,
"learning_rate": 0.000975596166703811,
"loss": 0.4524,
"step": 220
},
{
"epoch": 0.1794071762870515,
"grad_norm": 2.657606840133667,
"learning_rate": 0.0009744818364163138,
"loss": 0.3592,
"step": 230
},
{
"epoch": 0.187207488299532,
"grad_norm": 2.7629363536834717,
"learning_rate": 0.0009733675061288166,
"loss": 0.4263,
"step": 240
},
{
"epoch": 0.19500780031201248,
"grad_norm": 1.3749983310699463,
"learning_rate": 0.0009722531758413193,
"loss": 0.48,
"step": 250
},
{
"epoch": 0.20280811232449297,
"grad_norm": 2.648716449737549,
"learning_rate": 0.0009711388455538221,
"loss": 0.416,
"step": 260
},
{
"epoch": 0.21060842433697347,
"grad_norm": 1.5672308206558228,
"learning_rate": 0.0009700245152663249,
"loss": 0.4223,
"step": 270
},
{
"epoch": 0.21840873634945399,
"grad_norm": 2.618163585662842,
"learning_rate": 0.0009689101849788277,
"loss": 0.4172,
"step": 280
},
{
"epoch": 0.22620904836193448,
"grad_norm": 3.6365268230438232,
"learning_rate": 0.0009677958546913305,
"loss": 0.5501,
"step": 290
},
{
"epoch": 0.23400936037441497,
"grad_norm": 2.740039825439453,
"learning_rate": 0.0009666815244038332,
"loss": 0.3553,
"step": 300
},
{
"epoch": 0.24180967238689546,
"grad_norm": 3.406210422515869,
"learning_rate": 0.000965567194116336,
"loss": 0.3518,
"step": 310
},
{
"epoch": 0.24960998439937598,
"grad_norm": 1.4707075357437134,
"learning_rate": 0.000964452863828839,
"loss": 0.3452,
"step": 320
},
{
"epoch": 0.2574102964118565,
"grad_norm": 1.608324408531189,
"learning_rate": 0.0009633385335413417,
"loss": 0.4908,
"step": 330
},
{
"epoch": 0.26521060842433697,
"grad_norm": 4.090480327606201,
"learning_rate": 0.0009622242032538444,
"loss": 0.4597,
"step": 340
},
{
"epoch": 0.27301092043681746,
"grad_norm": 2.2214395999908447,
"learning_rate": 0.0009611098729663472,
"loss": 0.4552,
"step": 350
},
{
"epoch": 0.28081123244929795,
"grad_norm": 1.9134166240692139,
"learning_rate": 0.00095999554267885,
"loss": 0.3571,
"step": 360
},
{
"epoch": 0.28861154446177845,
"grad_norm": 1.8127851486206055,
"learning_rate": 0.0009588812123913528,
"loss": 0.3808,
"step": 370
},
{
"epoch": 0.296411856474259,
"grad_norm": 2.2262885570526123,
"learning_rate": 0.0009577668821038556,
"loss": 0.4099,
"step": 380
},
{
"epoch": 0.3042121684867395,
"grad_norm": 2.8041303157806396,
"learning_rate": 0.0009566525518163583,
"loss": 0.3988,
"step": 390
},
{
"epoch": 0.31201248049922,
"grad_norm": 6.797432899475098,
"learning_rate": 0.0009555382215288611,
"loss": 0.4728,
"step": 400
},
{
"epoch": 0.31981279251170047,
"grad_norm": 3.1861369609832764,
"learning_rate": 0.000954423891241364,
"loss": 0.3502,
"step": 410
},
{
"epoch": 0.32761310452418096,
"grad_norm": 2.9223642349243164,
"learning_rate": 0.0009533095609538667,
"loss": 0.4215,
"step": 420
},
{
"epoch": 0.33541341653666146,
"grad_norm": 6.848895072937012,
"learning_rate": 0.0009521952306663694,
"loss": 0.415,
"step": 430
},
{
"epoch": 0.34321372854914195,
"grad_norm": 9.054282188415527,
"learning_rate": 0.0009510809003788722,
"loss": 0.4667,
"step": 440
},
{
"epoch": 0.35101404056162244,
"grad_norm": 2.3005900382995605,
"learning_rate": 0.0009499665700913752,
"loss": 0.6565,
"step": 450
},
{
"epoch": 0.358814352574103,
"grad_norm": 2.9467573165893555,
"learning_rate": 0.000948852239803878,
"loss": 0.3837,
"step": 460
},
{
"epoch": 0.3666146645865835,
"grad_norm": 1.5977652072906494,
"learning_rate": 0.0009477379095163808,
"loss": 0.407,
"step": 470
},
{
"epoch": 0.374414976599064,
"grad_norm": 2.8274600505828857,
"learning_rate": 0.0009466235792288835,
"loss": 0.4501,
"step": 480
},
{
"epoch": 0.38221528861154447,
"grad_norm": 1.8566502332687378,
"learning_rate": 0.0009455092489413863,
"loss": 0.3146,
"step": 490
},
{
"epoch": 0.39001560062402496,
"grad_norm": 2.5871951580047607,
"learning_rate": 0.0009443949186538891,
"loss": 0.3335,
"step": 500
},
{
"epoch": 0.39781591263650545,
"grad_norm": 2.0552711486816406,
"learning_rate": 0.0009432805883663919,
"loss": 0.3698,
"step": 510
},
{
"epoch": 0.40561622464898595,
"grad_norm": 1.5244548320770264,
"learning_rate": 0.0009421662580788946,
"loss": 0.581,
"step": 520
},
{
"epoch": 0.41341653666146644,
"grad_norm": 1.5146633386611938,
"learning_rate": 0.0009410519277913974,
"loss": 0.3634,
"step": 530
},
{
"epoch": 0.42121684867394693,
"grad_norm": 2.394819736480713,
"learning_rate": 0.0009399375975039002,
"loss": 0.338,
"step": 540
},
{
"epoch": 0.4290171606864275,
"grad_norm": 2.3500325679779053,
"learning_rate": 0.000938823267216403,
"loss": 0.4025,
"step": 550
},
{
"epoch": 0.43681747269890797,
"grad_norm": 2.4186370372772217,
"learning_rate": 0.0009377089369289058,
"loss": 0.4095,
"step": 560
},
{
"epoch": 0.44461778471138846,
"grad_norm": 1.9770065546035767,
"learning_rate": 0.0009365946066414085,
"loss": 0.4275,
"step": 570
},
{
"epoch": 0.45241809672386896,
"grad_norm": 1.7679632902145386,
"learning_rate": 0.0009354802763539114,
"loss": 0.3949,
"step": 580
},
{
"epoch": 0.46021840873634945,
"grad_norm": 2.0794620513916016,
"learning_rate": 0.0009343659460664142,
"loss": 0.4208,
"step": 590
},
{
"epoch": 0.46801872074882994,
"grad_norm": 2.677424192428589,
"learning_rate": 0.000933251615778917,
"loss": 0.4089,
"step": 600
},
{
"epoch": 0.47581903276131043,
"grad_norm": 1.526112675666809,
"learning_rate": 0.0009321372854914197,
"loss": 0.4196,
"step": 610
},
{
"epoch": 0.4836193447737909,
"grad_norm": 1.8656370639801025,
"learning_rate": 0.0009310229552039225,
"loss": 0.3732,
"step": 620
},
{
"epoch": 0.4914196567862715,
"grad_norm": 3.3338847160339355,
"learning_rate": 0.0009299086249164253,
"loss": 0.4231,
"step": 630
},
{
"epoch": 0.49921996879875197,
"grad_norm": 2.1057350635528564,
"learning_rate": 0.0009287942946289281,
"loss": 0.3921,
"step": 640
},
{
"epoch": 0.5070202808112324,
"grad_norm": 1.544977068901062,
"learning_rate": 0.0009276799643414309,
"loss": 0.3748,
"step": 650
},
{
"epoch": 0.514820592823713,
"grad_norm": 3.4070258140563965,
"learning_rate": 0.0009265656340539336,
"loss": 0.4027,
"step": 660
},
{
"epoch": 0.5226209048361935,
"grad_norm": 5.5486931800842285,
"learning_rate": 0.0009254513037664364,
"loss": 0.4326,
"step": 670
},
{
"epoch": 0.5304212168486739,
"grad_norm": 3.6824769973754883,
"learning_rate": 0.0009243369734789392,
"loss": 0.4809,
"step": 680
},
{
"epoch": 0.5382215288611545,
"grad_norm": 6.3154778480529785,
"learning_rate": 0.000923222643191442,
"loss": 0.3588,
"step": 690
},
{
"epoch": 0.5460218408736349,
"grad_norm": 3.133465528488159,
"learning_rate": 0.0009221083129039447,
"loss": 0.4712,
"step": 700
},
{
"epoch": 0.5538221528861155,
"grad_norm": 4.222598552703857,
"learning_rate": 0.0009209939826164475,
"loss": 0.4135,
"step": 710
},
{
"epoch": 0.5616224648985959,
"grad_norm": 4.6125078201293945,
"learning_rate": 0.0009198796523289504,
"loss": 0.393,
"step": 720
},
{
"epoch": 0.5694227769110765,
"grad_norm": 6.543318748474121,
"learning_rate": 0.0009187653220414532,
"loss": 0.6138,
"step": 730
},
{
"epoch": 0.5772230889235569,
"grad_norm": 2.596463680267334,
"learning_rate": 0.000917650991753956,
"loss": 0.3694,
"step": 740
},
{
"epoch": 0.5850234009360374,
"grad_norm": 2.428490161895752,
"learning_rate": 0.0009165366614664587,
"loss": 0.4535,
"step": 750
},
{
"epoch": 0.592823712948518,
"grad_norm": 1.8790688514709473,
"learning_rate": 0.0009154223311789615,
"loss": 0.3986,
"step": 760
},
{
"epoch": 0.6006240249609984,
"grad_norm": 3.141587734222412,
"learning_rate": 0.0009143080008914643,
"loss": 0.3881,
"step": 770
},
{
"epoch": 0.608424336973479,
"grad_norm": 2.125810146331787,
"learning_rate": 0.0009131936706039671,
"loss": 0.4225,
"step": 780
},
{
"epoch": 0.6162246489859594,
"grad_norm": 2.532404661178589,
"learning_rate": 0.0009120793403164698,
"loss": 0.3423,
"step": 790
},
{
"epoch": 0.62402496099844,
"grad_norm": 3.6324350833892822,
"learning_rate": 0.0009109650100289726,
"loss": 0.4205,
"step": 800
},
{
"epoch": 0.6318252730109204,
"grad_norm": 1.4804415702819824,
"learning_rate": 0.0009098506797414754,
"loss": 0.4042,
"step": 810
},
{
"epoch": 0.6396255850234009,
"grad_norm": 1.5140562057495117,
"learning_rate": 0.0009087363494539782,
"loss": 0.3056,
"step": 820
},
{
"epoch": 0.6474258970358814,
"grad_norm": 2.470576047897339,
"learning_rate": 0.000907622019166481,
"loss": 0.391,
"step": 830
},
{
"epoch": 0.6552262090483619,
"grad_norm": 3.4496209621429443,
"learning_rate": 0.0009065076888789837,
"loss": 0.4163,
"step": 840
},
{
"epoch": 0.6630265210608425,
"grad_norm": 1.8823250532150269,
"learning_rate": 0.0009053933585914866,
"loss": 0.3877,
"step": 850
},
{
"epoch": 0.6708268330733229,
"grad_norm": 2.492297410964966,
"learning_rate": 0.0009042790283039894,
"loss": 0.3542,
"step": 860
},
{
"epoch": 0.6786271450858035,
"grad_norm": 3.977569341659546,
"learning_rate": 0.0009031646980164922,
"loss": 0.4168,
"step": 870
},
{
"epoch": 0.6864274570982839,
"grad_norm": 3.938462495803833,
"learning_rate": 0.0009020503677289949,
"loss": 0.4894,
"step": 880
},
{
"epoch": 0.6942277691107644,
"grad_norm": 1.3457701206207275,
"learning_rate": 0.0009009360374414977,
"loss": 0.4903,
"step": 890
},
{
"epoch": 0.7020280811232449,
"grad_norm": 6.3473124504089355,
"learning_rate": 0.0008998217071540005,
"loss": 0.4766,
"step": 900
},
{
"epoch": 0.7098283931357254,
"grad_norm": 3.145792245864868,
"learning_rate": 0.0008987073768665033,
"loss": 0.4119,
"step": 910
},
{
"epoch": 0.717628705148206,
"grad_norm": 1.809446930885315,
"learning_rate": 0.0008975930465790061,
"loss": 0.3991,
"step": 920
},
{
"epoch": 0.7254290171606864,
"grad_norm": 1.7960044145584106,
"learning_rate": 0.0008964787162915088,
"loss": 0.3095,
"step": 930
},
{
"epoch": 0.733229329173167,
"grad_norm": 2.9710285663604736,
"learning_rate": 0.0008953643860040116,
"loss": 0.5104,
"step": 940
},
{
"epoch": 0.7410296411856474,
"grad_norm": 2.460524797439575,
"learning_rate": 0.0008942500557165144,
"loss": 0.4332,
"step": 950
},
{
"epoch": 0.748829953198128,
"grad_norm": 1.6166704893112183,
"learning_rate": 0.0008931357254290172,
"loss": 0.3856,
"step": 960
},
{
"epoch": 0.7566302652106084,
"grad_norm": 1.747750163078308,
"learning_rate": 0.0008920213951415199,
"loss": 0.3973,
"step": 970
},
{
"epoch": 0.7644305772230889,
"grad_norm": 1.4469414949417114,
"learning_rate": 0.0008909070648540227,
"loss": 0.3344,
"step": 980
},
{
"epoch": 0.7722308892355694,
"grad_norm": 4.661273956298828,
"learning_rate": 0.0008897927345665256,
"loss": 0.6003,
"step": 990
},
{
"epoch": 0.7800312012480499,
"grad_norm": 3.6588950157165527,
"learning_rate": 0.0008886784042790284,
"loss": 0.473,
"step": 1000
},
{
"epoch": 0.7878315132605305,
"grad_norm": 2.208383798599243,
"learning_rate": 0.0008876755070202809,
"loss": 0.3899,
"step": 1010
},
{
"epoch": 0.7956318252730109,
"grad_norm": 2.3569576740264893,
"learning_rate": 0.0008865611767327836,
"loss": 0.3871,
"step": 1020
},
{
"epoch": 0.8034321372854915,
"grad_norm": 2.7071454524993896,
"learning_rate": 0.0008854468464452864,
"loss": 0.3149,
"step": 1030
},
{
"epoch": 0.8112324492979719,
"grad_norm": 2.8024532794952393,
"learning_rate": 0.0008843325161577892,
"loss": 0.3328,
"step": 1040
},
{
"epoch": 0.8190327613104524,
"grad_norm": 3.0969290733337402,
"learning_rate": 0.000883218185870292,
"loss": 0.3948,
"step": 1050
},
{
"epoch": 0.8268330733229329,
"grad_norm": 2.982484817504883,
"learning_rate": 0.0008821038555827947,
"loss": 0.4323,
"step": 1060
},
{
"epoch": 0.8346333853354134,
"grad_norm": 3.133814573287964,
"learning_rate": 0.0008809895252952975,
"loss": 0.4393,
"step": 1070
},
{
"epoch": 0.8424336973478939,
"grad_norm": 3.3123364448547363,
"learning_rate": 0.0008798751950078003,
"loss": 0.3244,
"step": 1080
},
{
"epoch": 0.8502340093603744,
"grad_norm": 2.308555841445923,
"learning_rate": 0.0008787608647203032,
"loss": 0.424,
"step": 1090
},
{
"epoch": 0.858034321372855,
"grad_norm": 3.654137134552002,
"learning_rate": 0.000877646534432806,
"loss": 0.3445,
"step": 1100
},
{
"epoch": 0.8658346333853354,
"grad_norm": 2.149843692779541,
"learning_rate": 0.0008765322041453087,
"loss": 0.3398,
"step": 1110
},
{
"epoch": 0.8736349453978159,
"grad_norm": 3.1334431171417236,
"learning_rate": 0.0008754178738578115,
"loss": 0.3333,
"step": 1120
},
{
"epoch": 0.8814352574102964,
"grad_norm": 2.2942090034484863,
"learning_rate": 0.0008743035435703143,
"loss": 0.4188,
"step": 1130
},
{
"epoch": 0.8892355694227769,
"grad_norm": 2.0195343494415283,
"learning_rate": 0.0008731892132828171,
"loss": 0.4047,
"step": 1140
},
{
"epoch": 0.8970358814352574,
"grad_norm": 2.3850839138031006,
"learning_rate": 0.0008720748829953198,
"loss": 0.3931,
"step": 1150
},
{
"epoch": 0.9048361934477379,
"grad_norm": 1.6200228929519653,
"learning_rate": 0.0008709605527078226,
"loss": 0.409,
"step": 1160
},
{
"epoch": 0.9126365054602185,
"grad_norm": 2.9001989364624023,
"learning_rate": 0.0008698462224203254,
"loss": 0.4289,
"step": 1170
},
{
"epoch": 0.9204368174726989,
"grad_norm": 1.52889883518219,
"learning_rate": 0.0008687318921328282,
"loss": 0.3575,
"step": 1180
},
{
"epoch": 0.9282371294851794,
"grad_norm": 2.359733819961548,
"learning_rate": 0.000867617561845331,
"loss": 0.3837,
"step": 1190
},
{
"epoch": 0.9360374414976599,
"grad_norm": 2.3807597160339355,
"learning_rate": 0.0008665032315578337,
"loss": 0.4206,
"step": 1200
},
{
"epoch": 0.9438377535101404,
"grad_norm": 1.8366179466247559,
"learning_rate": 0.0008653889012703365,
"loss": 0.3101,
"step": 1210
},
{
"epoch": 0.9516380655226209,
"grad_norm": 3.1048014163970947,
"learning_rate": 0.0008642745709828393,
"loss": 0.4104,
"step": 1220
},
{
"epoch": 0.9594383775351014,
"grad_norm": 1.5314342975616455,
"learning_rate": 0.0008631602406953422,
"loss": 0.3539,
"step": 1230
},
{
"epoch": 0.9672386895475819,
"grad_norm": 2.8501791954040527,
"learning_rate": 0.0008620459104078449,
"loss": 0.4104,
"step": 1240
},
{
"epoch": 0.9750390015600624,
"grad_norm": 5.708191394805908,
"learning_rate": 0.0008609315801203477,
"loss": 0.4038,
"step": 1250
},
{
"epoch": 0.982839313572543,
"grad_norm": 2.173867702484131,
"learning_rate": 0.0008598172498328505,
"loss": 0.338,
"step": 1260
},
{
"epoch": 0.9906396255850234,
"grad_norm": 2.7057418823242188,
"learning_rate": 0.0008587029195453533,
"loss": 0.3941,
"step": 1270
},
{
"epoch": 0.9984399375975039,
"grad_norm": 1.3492989540100098,
"learning_rate": 0.0008575885892578561,
"loss": 0.3696,
"step": 1280
},
{
"epoch": 1.0,
"eval_loss": 0.4044143855571747,
"eval_runtime": 635.8286,
"eval_samples_per_second": 0.865,
"eval_steps_per_second": 0.865,
"eval_wer": 26.665379416875844,
"step": 1282
},
{
"epoch": 1.0062402496099845,
"grad_norm": 2.5727291107177734,
"learning_rate": 0.0008564742589703588,
"loss": 0.3369,
"step": 1290
},
{
"epoch": 1.0140405616224648,
"grad_norm": 2.681490182876587,
"learning_rate": 0.0008553599286828616,
"loss": 0.2434,
"step": 1300
},
{
"epoch": 1.0218408736349454,
"grad_norm": 2.0296504497528076,
"learning_rate": 0.0008542455983953644,
"loss": 0.2122,
"step": 1310
},
{
"epoch": 1.029641185647426,
"grad_norm": 2.277512311935425,
"learning_rate": 0.0008531312681078672,
"loss": 0.3932,
"step": 1320
},
{
"epoch": 1.0374414976599065,
"grad_norm": 4.77215576171875,
"learning_rate": 0.0008520169378203699,
"loss": 0.2658,
"step": 1330
},
{
"epoch": 1.045241809672387,
"grad_norm": 1.4027091264724731,
"learning_rate": 0.0008509026075328727,
"loss": 0.3524,
"step": 1340
},
{
"epoch": 1.0530421216848673,
"grad_norm": 2.2849514484405518,
"learning_rate": 0.0008497882772453755,
"loss": 0.2705,
"step": 1350
},
{
"epoch": 1.0608424336973479,
"grad_norm": 1.2896777391433716,
"learning_rate": 0.0008486739469578784,
"loss": 0.264,
"step": 1360
},
{
"epoch": 1.0686427457098284,
"grad_norm": 2.4552128314971924,
"learning_rate": 0.0008475596166703812,
"loss": 0.2833,
"step": 1370
},
{
"epoch": 1.076443057722309,
"grad_norm": 2.044693946838379,
"learning_rate": 0.0008464452863828839,
"loss": 0.2116,
"step": 1380
},
{
"epoch": 1.0842433697347893,
"grad_norm": 1.3727463483810425,
"learning_rate": 0.0008453309560953867,
"loss": 0.2533,
"step": 1390
},
{
"epoch": 1.0920436817472698,
"grad_norm": 1.6917822360992432,
"learning_rate": 0.0008442166258078895,
"loss": 0.4259,
"step": 1400
},
{
"epoch": 1.0998439937597504,
"grad_norm": 2.198549747467041,
"learning_rate": 0.0008431022955203923,
"loss": 0.3161,
"step": 1410
},
{
"epoch": 1.107644305772231,
"grad_norm": 1.7467869520187378,
"learning_rate": 0.000841987965232895,
"loss": 0.2377,
"step": 1420
},
{
"epoch": 1.1154446177847115,
"grad_norm": 2.5347695350646973,
"learning_rate": 0.0008408736349453978,
"loss": 0.3258,
"step": 1430
},
{
"epoch": 1.1232449297971918,
"grad_norm": 1.9081774950027466,
"learning_rate": 0.0008397593046579006,
"loss": 0.2462,
"step": 1440
},
{
"epoch": 1.1310452418096724,
"grad_norm": 1.5889848470687866,
"learning_rate": 0.0008386449743704034,
"loss": 0.2404,
"step": 1450
},
{
"epoch": 1.138845553822153,
"grad_norm": 1.8944768905639648,
"learning_rate": 0.0008375306440829062,
"loss": 0.2707,
"step": 1460
},
{
"epoch": 1.1466458658346335,
"grad_norm": 2.5448453426361084,
"learning_rate": 0.0008364163137954089,
"loss": 0.3342,
"step": 1470
},
{
"epoch": 1.154446177847114,
"grad_norm": 2.0936005115509033,
"learning_rate": 0.0008353019835079117,
"loss": 0.385,
"step": 1480
},
{
"epoch": 1.1622464898595943,
"grad_norm": 2.614129066467285,
"learning_rate": 0.0008341876532204145,
"loss": 0.2817,
"step": 1490
},
{
"epoch": 1.1700468018720749,
"grad_norm": 1.6156001091003418,
"learning_rate": 0.0008330733229329174,
"loss": 0.3527,
"step": 1500
},
{
"epoch": 1.1778471138845554,
"grad_norm": 1.4294220209121704,
"learning_rate": 0.0008319589926454201,
"loss": 0.2469,
"step": 1510
},
{
"epoch": 1.185647425897036,
"grad_norm": 3.197176456451416,
"learning_rate": 0.0008308446623579229,
"loss": 0.2753,
"step": 1520
},
{
"epoch": 1.1934477379095163,
"grad_norm": 2.1629223823547363,
"learning_rate": 0.0008297303320704257,
"loss": 0.3951,
"step": 1530
},
{
"epoch": 1.2012480499219969,
"grad_norm": 2.9824419021606445,
"learning_rate": 0.0008286160017829285,
"loss": 0.3278,
"step": 1540
},
{
"epoch": 1.2090483619344774,
"grad_norm": 2.866138219833374,
"learning_rate": 0.0008275016714954313,
"loss": 0.4267,
"step": 1550
},
{
"epoch": 1.216848673946958,
"grad_norm": 2.36781644821167,
"learning_rate": 0.000826387341207934,
"loss": 0.3414,
"step": 1560
},
{
"epoch": 1.2246489859594383,
"grad_norm": 1.8305447101593018,
"learning_rate": 0.0008252730109204368,
"loss": 0.2925,
"step": 1570
},
{
"epoch": 1.2324492979719188,
"grad_norm": 1.9879776239395142,
"learning_rate": 0.0008241586806329396,
"loss": 0.357,
"step": 1580
},
{
"epoch": 1.2402496099843994,
"grad_norm": 2.183350086212158,
"learning_rate": 0.0008230443503454424,
"loss": 0.3409,
"step": 1590
},
{
"epoch": 1.24804992199688,
"grad_norm": 2.197072744369507,
"learning_rate": 0.0008219300200579451,
"loss": 0.33,
"step": 1600
},
{
"epoch": 1.2558502340093605,
"grad_norm": 3.2065696716308594,
"learning_rate": 0.0008208156897704479,
"loss": 0.2853,
"step": 1610
},
{
"epoch": 1.2636505460218408,
"grad_norm": 2.0581350326538086,
"learning_rate": 0.0008197013594829507,
"loss": 0.3647,
"step": 1620
},
{
"epoch": 1.2714508580343213,
"grad_norm": 3.149153232574463,
"learning_rate": 0.0008185870291954536,
"loss": 0.3921,
"step": 1630
},
{
"epoch": 1.2792511700468019,
"grad_norm": 2.5097105503082275,
"learning_rate": 0.0008174726989079563,
"loss": 0.3302,
"step": 1640
},
{
"epoch": 1.2870514820592824,
"grad_norm": 2.7537474632263184,
"learning_rate": 0.0008163583686204591,
"loss": 0.3286,
"step": 1650
},
{
"epoch": 1.294851794071763,
"grad_norm": 1.966965675354004,
"learning_rate": 0.0008152440383329619,
"loss": 0.2785,
"step": 1660
},
{
"epoch": 1.3026521060842433,
"grad_norm": 1.9159988164901733,
"learning_rate": 0.0008141297080454647,
"loss": 0.3299,
"step": 1670
},
{
"epoch": 1.3104524180967239,
"grad_norm": 2.2212252616882324,
"learning_rate": 0.0008130153777579675,
"loss": 0.2807,
"step": 1680
},
{
"epoch": 1.3182527301092044,
"grad_norm": 4.194318771362305,
"learning_rate": 0.0008119010474704702,
"loss": 0.3161,
"step": 1690
},
{
"epoch": 1.3260530421216847,
"grad_norm": 1.7189604043960571,
"learning_rate": 0.000810786717182973,
"loss": 0.3356,
"step": 1700
},
{
"epoch": 1.3338533541341655,
"grad_norm": 1.5196418762207031,
"learning_rate": 0.0008096723868954758,
"loss": 0.2616,
"step": 1710
},
{
"epoch": 1.3416536661466458,
"grad_norm": 1.497450351715088,
"learning_rate": 0.0008085580566079786,
"loss": 0.2946,
"step": 1720
},
{
"epoch": 1.3494539781591264,
"grad_norm": 1.74885892868042,
"learning_rate": 0.0008074437263204813,
"loss": 0.2801,
"step": 1730
},
{
"epoch": 1.357254290171607,
"grad_norm": 2.040701389312744,
"learning_rate": 0.0008063293960329841,
"loss": 0.3203,
"step": 1740
},
{
"epoch": 1.3650546021840873,
"grad_norm": 3.760457754135132,
"learning_rate": 0.0008052150657454869,
"loss": 0.4569,
"step": 1750
},
{
"epoch": 1.3728549141965678,
"grad_norm": 2.92971134185791,
"learning_rate": 0.0008041007354579897,
"loss": 0.3321,
"step": 1760
},
{
"epoch": 1.3806552262090483,
"grad_norm": 1.9461047649383545,
"learning_rate": 0.0008029864051704926,
"loss": 0.2696,
"step": 1770
},
{
"epoch": 1.388455538221529,
"grad_norm": 3.2626147270202637,
"learning_rate": 0.0008018720748829953,
"loss": 0.3322,
"step": 1780
},
{
"epoch": 1.3962558502340094,
"grad_norm": 2.1270642280578613,
"learning_rate": 0.0008007577445954981,
"loss": 0.2965,
"step": 1790
},
{
"epoch": 1.4040561622464898,
"grad_norm": 2.3174221515655518,
"learning_rate": 0.0007996434143080009,
"loss": 0.3425,
"step": 1800
},
{
"epoch": 1.4118564742589703,
"grad_norm": 2.5749576091766357,
"learning_rate": 0.0007985290840205037,
"loss": 0.3622,
"step": 1810
},
{
"epoch": 1.4196567862714509,
"grad_norm": 1.873813509941101,
"learning_rate": 0.0007974147537330064,
"loss": 0.2497,
"step": 1820
},
{
"epoch": 1.4274570982839314,
"grad_norm": 3.633928060531616,
"learning_rate": 0.0007963004234455092,
"loss": 0.4058,
"step": 1830
},
{
"epoch": 1.435257410296412,
"grad_norm": 2.356269598007202,
"learning_rate": 0.000795186093158012,
"loss": 0.2635,
"step": 1840
},
{
"epoch": 1.4430577223088923,
"grad_norm": 1.9108752012252808,
"learning_rate": 0.0007940717628705148,
"loss": 0.336,
"step": 1850
},
{
"epoch": 1.4508580343213728,
"grad_norm": 1.5505330562591553,
"learning_rate": 0.0007929574325830176,
"loss": 0.3493,
"step": 1860
},
{
"epoch": 1.4586583463338534,
"grad_norm": 1.9970422983169556,
"learning_rate": 0.0007918431022955203,
"loss": 0.273,
"step": 1870
},
{
"epoch": 1.466458658346334,
"grad_norm": 2.753758192062378,
"learning_rate": 0.0007907287720080231,
"loss": 0.2845,
"step": 1880
},
{
"epoch": 1.4742589703588145,
"grad_norm": NaN,
"learning_rate": 0.0007897258747492757,
"loss": 0.2544,
"step": 1890
},
{
"epoch": 1.4820592823712948,
"grad_norm": 3.0995099544525146,
"learning_rate": 0.0007886115444617785,
"loss": 0.2884,
"step": 1900
},
{
"epoch": 1.4898595943837754,
"grad_norm": 5.728559970855713,
"learning_rate": 0.0007874972141742812,
"loss": 0.2681,
"step": 1910
},
{
"epoch": 1.497659906396256,
"grad_norm": 1.492622971534729,
"learning_rate": 0.000786382883886784,
"loss": 0.2891,
"step": 1920
},
{
"epoch": 1.5054602184087362,
"grad_norm": 1.7419252395629883,
"learning_rate": 0.0007852685535992868,
"loss": 0.4089,
"step": 1930
},
{
"epoch": 1.513260530421217,
"grad_norm": 6.814690589904785,
"learning_rate": 0.0007841542233117896,
"loss": 0.3372,
"step": 1940
},
{
"epoch": 1.5210608424336973,
"grad_norm": 2.380725860595703,
"learning_rate": 0.0007830398930242924,
"loss": 0.3189,
"step": 1950
},
{
"epoch": 1.5288611544461779,
"grad_norm": 5.004116058349609,
"learning_rate": 0.0007819255627367951,
"loss": 0.3018,
"step": 1960
},
{
"epoch": 1.5366614664586584,
"grad_norm": 2.604365825653076,
"learning_rate": 0.0007808112324492979,
"loss": 0.3054,
"step": 1970
},
{
"epoch": 1.5444617784711387,
"grad_norm": 1.585584044456482,
"learning_rate": 0.0007796969021618007,
"loss": 0.3477,
"step": 1980
},
{
"epoch": 1.5522620904836193,
"grad_norm": 1.8678693771362305,
"learning_rate": 0.0007785825718743035,
"loss": 0.3577,
"step": 1990
},
{
"epoch": 1.5600624024960998,
"grad_norm": 1.654689073562622,
"learning_rate": 0.0007774682415868062,
"loss": 0.2625,
"step": 2000
},
{
"epoch": 1.5678627145085804,
"grad_norm": 2.108919858932495,
"learning_rate": 0.0007763539112993092,
"loss": 0.2497,
"step": 2010
},
{
"epoch": 1.575663026521061,
"grad_norm": 7.198604106903076,
"learning_rate": 0.000775239581011812,
"loss": 0.3382,
"step": 2020
},
{
"epoch": 1.5834633385335413,
"grad_norm": 2.2285892963409424,
"learning_rate": 0.0007741252507243148,
"loss": 0.2598,
"step": 2030
},
{
"epoch": 1.5912636505460218,
"grad_norm": 1.7743014097213745,
"learning_rate": 0.0007730109204368176,
"loss": 0.2757,
"step": 2040
},
{
"epoch": 1.5990639625585024,
"grad_norm": 1.7763789892196655,
"learning_rate": 0.0007718965901493203,
"loss": 0.2703,
"step": 2050
},
{
"epoch": 1.6068642745709827,
"grad_norm": 2.159956693649292,
"learning_rate": 0.000770782259861823,
"loss": 0.2824,
"step": 2060
},
{
"epoch": 1.6146645865834635,
"grad_norm": 1.4845560789108276,
"learning_rate": 0.0007696679295743259,
"loss": 0.2528,
"step": 2070
},
{
"epoch": 1.6224648985959438,
"grad_norm": 3.627887010574341,
"learning_rate": 0.0007685535992868287,
"loss": 0.3197,
"step": 2080
},
{
"epoch": 1.6302652106084243,
"grad_norm": 2.2174973487854004,
"learning_rate": 0.0007674392689993314,
"loss": 0.2994,
"step": 2090
},
{
"epoch": 1.6380655226209049,
"grad_norm": 2.5977325439453125,
"learning_rate": 0.0007663249387118342,
"loss": 0.2991,
"step": 2100
},
{
"epoch": 1.6458658346333852,
"grad_norm": 1.9066824913024902,
"learning_rate": 0.000765210608424337,
"loss": 0.2166,
"step": 2110
},
{
"epoch": 1.653666146645866,
"grad_norm": 1.7197297811508179,
"learning_rate": 0.0007640962781368398,
"loss": 0.2948,
"step": 2120
},
{
"epoch": 1.6614664586583463,
"grad_norm": 2.054304361343384,
"learning_rate": 0.0007629819478493426,
"loss": 0.3073,
"step": 2130
},
{
"epoch": 1.6692667706708268,
"grad_norm": 1.7934963703155518,
"learning_rate": 0.0007618676175618453,
"loss": 0.2667,
"step": 2140
},
{
"epoch": 1.6770670826833074,
"grad_norm": 2.5259838104248047,
"learning_rate": 0.0007607532872743482,
"loss": 0.3322,
"step": 2150
},
{
"epoch": 1.6848673946957877,
"grad_norm": 3.6354122161865234,
"learning_rate": 0.000759638956986851,
"loss": 0.3909,
"step": 2160
},
{
"epoch": 1.6926677067082685,
"grad_norm": 1.6722809076309204,
"learning_rate": 0.0007585246266993538,
"loss": 0.2987,
"step": 2170
},
{
"epoch": 1.7004680187207488,
"grad_norm": 4.3235015869140625,
"learning_rate": 0.0007574102964118565,
"loss": 0.3112,
"step": 2180
},
{
"epoch": 1.7082683307332294,
"grad_norm": 2.236316442489624,
"learning_rate": 0.0007562959661243593,
"loss": 0.3174,
"step": 2190
},
{
"epoch": 1.71606864274571,
"grad_norm": 21.32891273498535,
"learning_rate": 0.0007551816358368621,
"loss": 0.3696,
"step": 2200
},
{
"epoch": 1.7238689547581902,
"grad_norm": 2.4251410961151123,
"learning_rate": 0.0007540673055493649,
"loss": 0.3171,
"step": 2210
},
{
"epoch": 1.7316692667706708,
"grad_norm": 2.4152424335479736,
"learning_rate": 0.0007529529752618677,
"loss": 0.328,
"step": 2220
},
{
"epoch": 1.7394695787831513,
"grad_norm": 2.0988574028015137,
"learning_rate": 0.0007518386449743704,
"loss": 0.3061,
"step": 2230
},
{
"epoch": 1.7472698907956317,
"grad_norm": 4.469291687011719,
"learning_rate": 0.0007507243146868732,
"loss": 0.3315,
"step": 2240
},
{
"epoch": 1.7550702028081124,
"grad_norm": 2.4917778968811035,
"learning_rate": 0.000749609984399376,
"loss": 0.4296,
"step": 2250
},
{
"epoch": 1.7628705148205928,
"grad_norm": 3.073840379714966,
"learning_rate": 0.0007484956541118788,
"loss": 0.3685,
"step": 2260
},
{
"epoch": 1.7706708268330733,
"grad_norm": 2.205733299255371,
"learning_rate": 0.0007473813238243815,
"loss": 0.2691,
"step": 2270
},
{
"epoch": 1.7784711388455539,
"grad_norm": 2.3948941230773926,
"learning_rate": 0.0007462669935368844,
"loss": 0.3065,
"step": 2280
},
{
"epoch": 1.7862714508580342,
"grad_norm": 2.6060824394226074,
"learning_rate": 0.0007451526632493872,
"loss": 0.3354,
"step": 2290
},
{
"epoch": 1.794071762870515,
"grad_norm": 3.2586774826049805,
"learning_rate": 0.00074403833296189,
"loss": 0.261,
"step": 2300
},
{
"epoch": 1.8018720748829953,
"grad_norm": 1.6417285203933716,
"learning_rate": 0.0007429240026743928,
"loss": 0.3828,
"step": 2310
},
{
"epoch": 1.8096723868954758,
"grad_norm": 4.006927967071533,
"learning_rate": 0.0007418096723868955,
"loss": 0.4228,
"step": 2320
},
{
"epoch": 1.8174726989079564,
"grad_norm": 2.5880374908447266,
"learning_rate": 0.0007406953420993983,
"loss": 0.2766,
"step": 2330
},
{
"epoch": 1.8252730109204367,
"grad_norm": 1.602337121963501,
"learning_rate": 0.0007395810118119011,
"loss": 0.2794,
"step": 2340
},
{
"epoch": 1.8330733229329175,
"grad_norm": 5.932153224945068,
"learning_rate": 0.0007384666815244039,
"loss": 0.3471,
"step": 2350
},
{
"epoch": 1.8408736349453978,
"grad_norm": 4.076808452606201,
"learning_rate": 0.0007373523512369066,
"loss": 0.2649,
"step": 2360
},
{
"epoch": 1.8486739469578783,
"grad_norm": 4.666397571563721,
"learning_rate": 0.0007362380209494094,
"loss": 0.2751,
"step": 2370
},
{
"epoch": 1.856474258970359,
"grad_norm": 3.792745590209961,
"learning_rate": 0.0007351236906619122,
"loss": 0.2606,
"step": 2380
},
{
"epoch": 1.8642745709828392,
"grad_norm": 2.5275423526763916,
"learning_rate": 0.000734009360374415,
"loss": 0.3469,
"step": 2390
},
{
"epoch": 1.8720748829953198,
"grad_norm": 1.59649658203125,
"learning_rate": 0.0007328950300869178,
"loss": 0.2724,
"step": 2400
},
{
"epoch": 1.8798751950078003,
"grad_norm": 3.7428267002105713,
"learning_rate": 0.0007317806997994206,
"loss": 0.4426,
"step": 2410
},
{
"epoch": 1.8876755070202809,
"grad_norm": 3.7439956665039062,
"learning_rate": 0.0007306663695119234,
"loss": 0.2901,
"step": 2420
},
{
"epoch": 1.8954758190327614,
"grad_norm": 2.3777198791503906,
"learning_rate": 0.0007295520392244262,
"loss": 0.2837,
"step": 2430
},
{
"epoch": 1.9032761310452417,
"grad_norm": 2.7654988765716553,
"learning_rate": 0.000728437708936929,
"loss": 0.277,
"step": 2440
},
{
"epoch": 1.9110764430577223,
"grad_norm": 1.8758680820465088,
"learning_rate": 0.0007273233786494317,
"loss": 0.2706,
"step": 2450
},
{
"epoch": 1.9188767550702028,
"grad_norm": 2.8725340366363525,
"learning_rate": 0.0007262090483619345,
"loss": 0.2566,
"step": 2460
},
{
"epoch": 1.9266770670826832,
"grad_norm": 2.4021458625793457,
"learning_rate": 0.0007250947180744373,
"loss": 0.2645,
"step": 2470
},
{
"epoch": 1.934477379095164,
"grad_norm": 2.8407838344573975,
"learning_rate": 0.0007239803877869401,
"loss": 0.3152,
"step": 2480
},
{
"epoch": 1.9422776911076443,
"grad_norm": 3.606403112411499,
"learning_rate": 0.0007228660574994429,
"loss": 0.373,
"step": 2490
},
{
"epoch": 1.9500780031201248,
"grad_norm": 2.362473487854004,
"learning_rate": 0.0007217517272119456,
"loss": 0.4332,
"step": 2500
},
{
"epoch": 1.9578783151326054,
"grad_norm": 1.9711815118789673,
"learning_rate": 0.0007206373969244484,
"loss": 0.2606,
"step": 2510
},
{
"epoch": 1.9656786271450857,
"grad_norm": 2.683908224105835,
"learning_rate": 0.0007195230666369512,
"loss": 0.3582,
"step": 2520
},
{
"epoch": 1.9734789391575664,
"grad_norm": 2.5902493000030518,
"learning_rate": 0.000718408736349454,
"loss": 0.2694,
"step": 2530
},
{
"epoch": 1.9812792511700468,
"grad_norm": 3.92708420753479,
"learning_rate": 0.0007172944060619567,
"loss": 0.3083,
"step": 2540
},
{
"epoch": 1.9890795631825273,
"grad_norm": 2.6788370609283447,
"learning_rate": 0.0007161800757744596,
"loss": 0.2587,
"step": 2550
},
{
"epoch": 1.9968798751950079,
"grad_norm": 2.413313627243042,
"learning_rate": 0.0007150657454869624,
"loss": 0.2732,
"step": 2560
},
{
"epoch": 2.0,
"eval_loss": 0.3617618680000305,
"eval_runtime": 36952.264,
"eval_samples_per_second": 0.015,
"eval_steps_per_second": 0.015,
"eval_wer": 22.793975670978952,
"step": 2564
},
{
"epoch": 2.004680187207488,
"grad_norm": 1.572451114654541,
"learning_rate": 0.0007139514151994652,
"loss": 0.2147,
"step": 2570
},
{
"epoch": 2.012480499219969,
"grad_norm": 2.4759023189544678,
"learning_rate": 0.000712837084911968,
"loss": 0.2015,
"step": 2580
},
{
"epoch": 2.0202808112324493,
"grad_norm": 1.6903563737869263,
"learning_rate": 0.0007117227546244707,
"loss": 0.1703,
"step": 2590
},
{
"epoch": 2.0280811232449296,
"grad_norm": 3.494985580444336,
"learning_rate": 0.0007106084243369735,
"loss": 0.2063,
"step": 2600
},
{
"epoch": 2.0358814352574104,
"grad_norm": 1.5024439096450806,
"learning_rate": 0.0007094940940494763,
"loss": 0.1807,
"step": 2610
},
{
"epoch": 2.0436817472698907,
"grad_norm": 1.9423105716705322,
"learning_rate": 0.0007083797637619791,
"loss": 0.1953,
"step": 2620
},
{
"epoch": 2.0514820592823715,
"grad_norm": 0.8572360277175903,
"learning_rate": 0.0007072654334744818,
"loss": 0.3423,
"step": 2630
},
{
"epoch": 2.059282371294852,
"grad_norm": 2.574855327606201,
"learning_rate": 0.0007061511031869846,
"loss": 0.1916,
"step": 2640
},
{
"epoch": 2.067082683307332,
"grad_norm": 2.2941219806671143,
"learning_rate": 0.0007050367728994874,
"loss": 0.2168,
"step": 2650
},
{
"epoch": 2.074882995319813,
"grad_norm": 2.0448150634765625,
"learning_rate": 0.0007039224426119902,
"loss": 0.2412,
"step": 2660
},
{
"epoch": 2.0826833073322932,
"grad_norm": 1.8044381141662598,
"learning_rate": 0.000702808112324493,
"loss": 0.3261,
"step": 2670
},
{
"epoch": 2.090483619344774,
"grad_norm": 2.752220630645752,
"learning_rate": 0.0007016937820369958,
"loss": 0.2007,
"step": 2680
},
{
"epoch": 2.0982839313572543,
"grad_norm": 1.4598050117492676,
"learning_rate": 0.0007005794517494986,
"loss": 0.1856,
"step": 2690
},
{
"epoch": 2.1060842433697347,
"grad_norm": 3.578192710876465,
"learning_rate": 0.0006994651214620014,
"loss": 0.1991,
"step": 2700
},
{
"epoch": 2.1138845553822154,
"grad_norm": 2.6971054077148438,
"learning_rate": 0.0006983507911745042,
"loss": 0.2134,
"step": 2710
},
{
"epoch": 2.1216848673946958,
"grad_norm": 2.437596559524536,
"learning_rate": 0.0006972364608870069,
"loss": 0.228,
"step": 2720
},
{
"epoch": 2.129485179407176,
"grad_norm": 2.6254658699035645,
"learning_rate": 0.0006961221305995097,
"loss": 0.1671,
"step": 2730
},
{
"epoch": 2.137285491419657,
"grad_norm": 1.3765720129013062,
"learning_rate": 0.0006950078003120125,
"loss": 0.2391,
"step": 2740
},
{
"epoch": 2.145085803432137,
"grad_norm": 2.192396879196167,
"learning_rate": 0.0006938934700245153,
"loss": 0.2331,
"step": 2750
},
{
"epoch": 2.152886115444618,
"grad_norm": 1.4418809413909912,
"learning_rate": 0.0006927791397370181,
"loss": 0.2041,
"step": 2760
},
{
"epoch": 2.1606864274570983,
"grad_norm": 2.553459882736206,
"learning_rate": 0.0006916648094495208,
"loss": 0.2342,
"step": 2770
},
{
"epoch": 2.1684867394695786,
"grad_norm": 1.7199114561080933,
"learning_rate": 0.0006905504791620236,
"loss": 0.1841,
"step": 2780
},
{
"epoch": 2.1762870514820594,
"grad_norm": 2.2145979404449463,
"learning_rate": 0.0006894361488745264,
"loss": 0.1821,
"step": 2790
},
{
"epoch": 2.1840873634945397,
"grad_norm": 2.434779405593872,
"learning_rate": 0.0006883218185870292,
"loss": 0.2115,
"step": 2800
},
{
"epoch": 2.1918876755070205,
"grad_norm": 1.0518474578857422,
"learning_rate": 0.0006872074882995319,
"loss": 0.2186,
"step": 2810
},
{
"epoch": 2.199687987519501,
"grad_norm": 1.9856785535812378,
"learning_rate": 0.0006860931580120348,
"loss": 0.2448,
"step": 2820
},
{
"epoch": 2.207488299531981,
"grad_norm": 17.3148136138916,
"learning_rate": 0.0006849788277245376,
"loss": 0.2372,
"step": 2830
},
{
"epoch": 2.215288611544462,
"grad_norm": 3.4590959548950195,
"learning_rate": 0.0006838644974370404,
"loss": 0.3179,
"step": 2840
},
{
"epoch": 2.223088923556942,
"grad_norm": 2.127650260925293,
"learning_rate": 0.0006827501671495432,
"loss": 0.2445,
"step": 2850
},
{
"epoch": 2.230889235569423,
"grad_norm": 1.895729660987854,
"learning_rate": 0.0006816358368620459,
"loss": 0.1966,
"step": 2860
},
{
"epoch": 2.2386895475819033,
"grad_norm": 1.4693467617034912,
"learning_rate": 0.0006805215065745487,
"loss": 0.2351,
"step": 2870
},
{
"epoch": 2.2464898595943836,
"grad_norm": 1.500453233718872,
"learning_rate": 0.0006794071762870515,
"loss": 0.1946,
"step": 2880
},
{
"epoch": 2.2542901716068644,
"grad_norm": 2.8631374835968018,
"learning_rate": 0.0006782928459995543,
"loss": 0.2367,
"step": 2890
},
{
"epoch": 2.2620904836193447,
"grad_norm": 1.5281766653060913,
"learning_rate": 0.000677178515712057,
"loss": 0.1855,
"step": 2900
},
{
"epoch": 2.2698907956318255,
"grad_norm": 2.3027195930480957,
"learning_rate": 0.0006760641854245598,
"loss": 0.2117,
"step": 2910
},
{
"epoch": 2.277691107644306,
"grad_norm": 2.9962668418884277,
"learning_rate": 0.0006749498551370626,
"loss": 0.2441,
"step": 2920
},
{
"epoch": 2.285491419656786,
"grad_norm": 2.489192008972168,
"learning_rate": 0.0006738355248495654,
"loss": 0.2212,
"step": 2930
},
{
"epoch": 2.293291731669267,
"grad_norm": 1.366945743560791,
"learning_rate": 0.0006727211945620682,
"loss": 0.2003,
"step": 2940
},
{
"epoch": 2.3010920436817472,
"grad_norm": 1.4768056869506836,
"learning_rate": 0.000671606864274571,
"loss": 0.3107,
"step": 2950
},
{
"epoch": 2.308892355694228,
"grad_norm": 1.797979474067688,
"learning_rate": 0.0006704925339870738,
"loss": 0.1978,
"step": 2960
},
{
"epoch": 2.3166926677067083,
"grad_norm": 1.9439573287963867,
"learning_rate": 0.0006693782036995766,
"loss": 0.2306,
"step": 2970
},
{
"epoch": 2.3244929797191887,
"grad_norm": 1.582478642463684,
"learning_rate": 0.0006682638734120794,
"loss": 0.2079,
"step": 2980
},
{
"epoch": 2.3322932917316694,
"grad_norm": 2.1245157718658447,
"learning_rate": 0.0006671495431245821,
"loss": 0.2258,
"step": 2990
},
{
"epoch": 2.3400936037441498,
"grad_norm": 2.427675724029541,
"learning_rate": 0.0006660352128370849,
"loss": 0.2846,
"step": 3000
},
{
"epoch": 2.34789391575663,
"grad_norm": 0.9095527529716492,
"learning_rate": 0.0006649208825495877,
"loss": 0.1746,
"step": 3010
},
{
"epoch": 2.355694227769111,
"grad_norm": 2.0468294620513916,
"learning_rate": 0.0006638065522620905,
"loss": 0.2645,
"step": 3020
},
{
"epoch": 2.363494539781591,
"grad_norm": 1.5703836679458618,
"learning_rate": 0.0006626922219745933,
"loss": 0.1896,
"step": 3030
},
{
"epoch": 2.371294851794072,
"grad_norm": 2.37263822555542,
"learning_rate": 0.000661577891687096,
"loss": 0.232,
"step": 3040
},
{
"epoch": 2.3790951638065523,
"grad_norm": 1.6041433811187744,
"learning_rate": 0.0006604635613995988,
"loss": 0.2135,
"step": 3050
},
{
"epoch": 2.3868954758190326,
"grad_norm": 1.87883722782135,
"learning_rate": 0.0006593492311121016,
"loss": 0.1993,
"step": 3060
},
{
"epoch": 2.3946957878315134,
"grad_norm": 2.5502099990844727,
"learning_rate": 0.0006582349008246044,
"loss": 0.2468,
"step": 3070
},
{
"epoch": 2.4024960998439937,
"grad_norm": 2.681384801864624,
"learning_rate": 0.0006571205705371071,
"loss": 0.2364,
"step": 3080
},
{
"epoch": 2.410296411856474,
"grad_norm": 1.2032707929611206,
"learning_rate": 0.00065600624024961,
"loss": 0.2732,
"step": 3090
},
{
"epoch": 2.418096723868955,
"grad_norm": 1.553661584854126,
"learning_rate": 0.0006548919099621128,
"loss": 0.2214,
"step": 3100
},
{
"epoch": 2.425897035881435,
"grad_norm": 0.9736389517784119,
"learning_rate": 0.0006537775796746156,
"loss": 0.1778,
"step": 3110
},
{
"epoch": 2.433697347893916,
"grad_norm": 1.911352276802063,
"learning_rate": 0.0006526632493871184,
"loss": 0.2062,
"step": 3120
},
{
"epoch": 2.4414976599063962,
"grad_norm": 1.4338595867156982,
"learning_rate": 0.0006515489190996211,
"loss": 0.193,
"step": 3130
},
{
"epoch": 2.4492979719188765,
"grad_norm": 1.3027153015136719,
"learning_rate": 0.0006504345888121239,
"loss": 0.3175,
"step": 3140
},
{
"epoch": 2.4570982839313573,
"grad_norm": 2.262709140777588,
"learning_rate": 0.0006493202585246267,
"loss": 0.2874,
"step": 3150
},
{
"epoch": 2.4648985959438376,
"grad_norm": 1.36016845703125,
"learning_rate": 0.0006482059282371295,
"loss": 0.1603,
"step": 3160
},
{
"epoch": 2.4726989079563184,
"grad_norm": 3.441779613494873,
"learning_rate": 0.0006470915979496322,
"loss": 0.4564,
"step": 3170
},
{
"epoch": 2.4804992199687987,
"grad_norm": 1.8022549152374268,
"learning_rate": 0.000645977267662135,
"loss": 0.1928,
"step": 3180
},
{
"epoch": 2.488299531981279,
"grad_norm": 2.104497194290161,
"learning_rate": 0.0006448629373746378,
"loss": 0.2047,
"step": 3190
},
{
"epoch": 2.49609984399376,
"grad_norm": 2.5888454914093018,
"learning_rate": 0.0006437486070871406,
"loss": 0.2591,
"step": 3200
},
{
"epoch": 2.50390015600624,
"grad_norm": 1.3476183414459229,
"learning_rate": 0.0006426342767996434,
"loss": 0.2636,
"step": 3210
},
{
"epoch": 2.511700468018721,
"grad_norm": 2.888965606689453,
"learning_rate": 0.0006415199465121462,
"loss": 0.216,
"step": 3220
},
{
"epoch": 2.5195007800312013,
"grad_norm": 1.8190357685089111,
"learning_rate": 0.000640405616224649,
"loss": 0.3498,
"step": 3230
},
{
"epoch": 2.5273010920436816,
"grad_norm": 1.5132287740707397,
"learning_rate": 0.0006392912859371518,
"loss": 0.227,
"step": 3240
},
{
"epoch": 2.5351014040561624,
"grad_norm": 2.8061325550079346,
"learning_rate": 0.0006381769556496546,
"loss": 0.2089,
"step": 3250
},
{
"epoch": 2.5429017160686427,
"grad_norm": 2.2521767616271973,
"learning_rate": 0.0006370626253621573,
"loss": 0.2362,
"step": 3260
},
{
"epoch": 2.5507020280811235,
"grad_norm": 1.4635623693466187,
"learning_rate": 0.0006359482950746601,
"loss": 0.1901,
"step": 3270
},
{
"epoch": 2.5585023400936038,
"grad_norm": 1.9882755279541016,
"learning_rate": 0.0006348339647871629,
"loss": 0.2387,
"step": 3280
},
{
"epoch": 2.566302652106084,
"grad_norm": 6.737502098083496,
"learning_rate": 0.0006337196344996657,
"loss": 0.2278,
"step": 3290
},
{
"epoch": 2.574102964118565,
"grad_norm": 1.686926245689392,
"learning_rate": 0.0006326053042121685,
"loss": 0.3118,
"step": 3300
},
{
"epoch": 2.581903276131045,
"grad_norm": 1.436584234237671,
"learning_rate": 0.0006314909739246712,
"loss": 0.1787,
"step": 3310
},
{
"epoch": 2.589703588143526,
"grad_norm": 3.646476984024048,
"learning_rate": 0.000630376643637174,
"loss": 0.2165,
"step": 3320
},
{
"epoch": 2.5975039001560063,
"grad_norm": 1.8787113428115845,
"learning_rate": 0.0006292623133496768,
"loss": 0.1993,
"step": 3330
},
{
"epoch": 2.6053042121684866,
"grad_norm": 2.4485440254211426,
"learning_rate": 0.0006281479830621796,
"loss": 0.24,
"step": 3340
},
{
"epoch": 2.6131045241809674,
"grad_norm": 2.73197865486145,
"learning_rate": 0.0006270336527746824,
"loss": 0.228,
"step": 3350
},
{
"epoch": 2.6209048361934477,
"grad_norm": 3.2943315505981445,
"learning_rate": 0.0006259193224871852,
"loss": 0.2289,
"step": 3360
},
{
"epoch": 2.6287051482059285,
"grad_norm": 2.165308952331543,
"learning_rate": 0.000624804992199688,
"loss": 0.2435,
"step": 3370
},
{
"epoch": 2.636505460218409,
"grad_norm": 2.3766629695892334,
"learning_rate": 0.0006236906619121908,
"loss": 0.26,
"step": 3380
},
{
"epoch": 2.644305772230889,
"grad_norm": 1.1527057886123657,
"learning_rate": 0.0006225763316246936,
"loss": 0.2486,
"step": 3390
},
{
"epoch": 2.6521060842433695,
"grad_norm": 2.6304874420166016,
"learning_rate": 0.0006214620013371963,
"loss": 0.3827,
"step": 3400
},
{
"epoch": 2.6599063962558502,
"grad_norm": 1.5219537019729614,
"learning_rate": 0.0006203476710496991,
"loss": 0.2231,
"step": 3410
},
{
"epoch": 2.667706708268331,
"grad_norm": 1.9267528057098389,
"learning_rate": 0.0006192333407622019,
"loss": 0.2468,
"step": 3420
},
{
"epoch": 2.6755070202808113,
"grad_norm": 2.247861385345459,
"learning_rate": 0.0006181190104747047,
"loss": 0.2463,
"step": 3430
},
{
"epoch": 2.6833073322932917,
"grad_norm": 2.6133053302764893,
"learning_rate": 0.0006170046801872074,
"loss": 0.2636,
"step": 3440
},
{
"epoch": 2.691107644305772,
"grad_norm": 1.4675954580307007,
"learning_rate": 0.0006158903498997102,
"loss": 0.2141,
"step": 3450
},
{
"epoch": 2.6989079563182528,
"grad_norm": 3.3972527980804443,
"learning_rate": 0.000614776019612213,
"loss": 0.2102,
"step": 3460
},
{
"epoch": 2.706708268330733,
"grad_norm": 2.0020527839660645,
"learning_rate": 0.0006136616893247158,
"loss": 0.2618,
"step": 3470
},
{
"epoch": 2.714508580343214,
"grad_norm": 1.4799424409866333,
"learning_rate": 0.0006125473590372186,
"loss": 0.2644,
"step": 3480
},
{
"epoch": 2.722308892355694,
"grad_norm": 2.207921028137207,
"learning_rate": 0.0006114330287497214,
"loss": 0.2496,
"step": 3490
},
{
"epoch": 2.7301092043681745,
"grad_norm": 2.7361607551574707,
"learning_rate": 0.0006103186984622242,
"loss": 0.1917,
"step": 3500
},
{
"epoch": 2.7379095163806553,
"grad_norm": 2.6031532287597656,
"learning_rate": 0.000609204368174727,
"loss": 0.2358,
"step": 3510
},
{
"epoch": 2.7457098283931356,
"grad_norm": 1.442651629447937,
"learning_rate": 0.0006080900378872299,
"loss": 0.2929,
"step": 3520
},
{
"epoch": 2.7535101404056164,
"grad_norm": 2.853076457977295,
"learning_rate": 0.0006069757075997325,
"loss": 0.2287,
"step": 3530
},
{
"epoch": 2.7613104524180967,
"grad_norm": 1.827863335609436,
"learning_rate": 0.0006058613773122353,
"loss": 0.284,
"step": 3540
},
{
"epoch": 2.769110764430577,
"grad_norm": 1.29642915725708,
"learning_rate": 0.0006047470470247382,
"loss": 0.2156,
"step": 3550
},
{
"epoch": 2.776911076443058,
"grad_norm": 2.0758543014526367,
"learning_rate": 0.000603632716737241,
"loss": 0.4312,
"step": 3560
},
{
"epoch": 2.784711388455538,
"grad_norm": 2.1832942962646484,
"learning_rate": 0.0006025183864497438,
"loss": 0.22,
"step": 3570
},
{
"epoch": 2.792511700468019,
"grad_norm": 1.541040301322937,
"learning_rate": 0.0006014040561622464,
"loss": 0.2452,
"step": 3580
},
{
"epoch": 2.800312012480499,
"grad_norm": 1.5947539806365967,
"learning_rate": 0.0006002897258747492,
"loss": 0.2708,
"step": 3590
},
{
"epoch": 2.8081123244929795,
"grad_norm": 2.130390167236328,
"learning_rate": 0.000599175395587252,
"loss": 0.2194,
"step": 3600
},
{
"epoch": 2.8159126365054603,
"grad_norm": 2.382166862487793,
"learning_rate": 0.0005980610652997549,
"loss": 0.1931,
"step": 3610
},
{
"epoch": 2.8237129485179406,
"grad_norm": 4.421852111816406,
"learning_rate": 0.0005969467350122577,
"loss": 0.3081,
"step": 3620
},
{
"epoch": 2.8315132605304214,
"grad_norm": 1.1889768838882446,
"learning_rate": 0.0005958324047247605,
"loss": 0.2134,
"step": 3630
},
{
"epoch": 2.8393135725429017,
"grad_norm": 2.6204874515533447,
"learning_rate": 0.0005947180744372633,
"loss": 0.2128,
"step": 3640
},
{
"epoch": 2.847113884555382,
"grad_norm": 1.4705913066864014,
"learning_rate": 0.0005936037441497661,
"loss": 0.1894,
"step": 3650
},
{
"epoch": 2.854914196567863,
"grad_norm": 3.110135555267334,
"learning_rate": 0.0005924894138622688,
"loss": 0.2072,
"step": 3660
},
{
"epoch": 2.862714508580343,
"grad_norm": 1.3255491256713867,
"learning_rate": 0.0005913750835747716,
"loss": 0.2325,
"step": 3670
},
{
"epoch": 2.870514820592824,
"grad_norm": 2.2520713806152344,
"learning_rate": 0.0005902607532872744,
"loss": 0.2121,
"step": 3680
},
{
"epoch": 2.8783151326053042,
"grad_norm": 1.4630913734436035,
"learning_rate": 0.0005891464229997772,
"loss": 0.2281,
"step": 3690
},
{
"epoch": 2.8861154446177846,
"grad_norm": 2.0491292476654053,
"learning_rate": 0.00058803209271228,
"loss": 0.2227,
"step": 3700
},
{
"epoch": 2.8939157566302653,
"grad_norm": 6.133053302764893,
"learning_rate": 0.0005869177624247827,
"loss": 0.3106,
"step": 3710
},
{
"epoch": 2.9017160686427457,
"grad_norm": 2.3226382732391357,
"learning_rate": 0.0005858034321372855,
"loss": 0.2193,
"step": 3720
},
{
"epoch": 2.9095163806552264,
"grad_norm": 1.351330041885376,
"learning_rate": 0.0005846891018497883,
"loss": 0.1928,
"step": 3730
},
{
"epoch": 2.9173166926677068,
"grad_norm": 0.9889002442359924,
"learning_rate": 0.0005835747715622911,
"loss": 0.2395,
"step": 3740
},
{
"epoch": 2.925117004680187,
"grad_norm": 3.9231808185577393,
"learning_rate": 0.0005824604412747938,
"loss": 0.2288,
"step": 3750
},
{
"epoch": 2.932917316692668,
"grad_norm": 1.1299773454666138,
"learning_rate": 0.0005813461109872967,
"loss": 0.1996,
"step": 3760
},
{
"epoch": 2.940717628705148,
"grad_norm": 1.894411563873291,
"learning_rate": 0.0005802317806997995,
"loss": 0.2444,
"step": 3770
},
{
"epoch": 2.948517940717629,
"grad_norm": 3.100918769836426,
"learning_rate": 0.0005791174504123023,
"loss": 0.2723,
"step": 3780
},
{
"epoch": 2.9563182527301093,
"grad_norm": 2.921398639678955,
"learning_rate": 0.0005780031201248051,
"loss": 0.1963,
"step": 3790
},
{
"epoch": 2.9641185647425896,
"grad_norm": 2.898193120956421,
"learning_rate": 0.0005768887898373078,
"loss": 0.2262,
"step": 3800
},
{
"epoch": 2.97191887675507,
"grad_norm": 1.8510135412216187,
"learning_rate": 0.0005757744595498106,
"loss": 0.2253,
"step": 3810
},
{
"epoch": 2.9797191887675507,
"grad_norm": 2.716972827911377,
"learning_rate": 0.0005746601292623134,
"loss": 0.222,
"step": 3820
},
{
"epoch": 2.9875195007800315,
"grad_norm": 2.466111660003662,
"learning_rate": 0.0005735457989748162,
"loss": 0.2903,
"step": 3830
},
{
"epoch": 2.995319812792512,
"grad_norm": 2.409747838973999,
"learning_rate": 0.0005724314686873189,
"loss": 0.3088,
"step": 3840
},
{
"epoch": 3.0,
"eval_loss": 0.34428906440734863,
"eval_runtime": 557.5413,
"eval_samples_per_second": 0.986,
"eval_steps_per_second": 0.986,
"eval_wer": 24.454527901139215,
"step": 3846
},
{
"epoch": 3.003120124804992,
"grad_norm": 1.5137571096420288,
"learning_rate": 0.0005713171383998217,
"loss": 0.197,
"step": 3850
},
{
"epoch": 3.010920436817473,
"grad_norm": 0.7653738856315613,
"learning_rate": 0.0005702028081123245,
"loss": 0.1295,
"step": 3860
},
{
"epoch": 3.0187207488299532,
"grad_norm": 1.05947744846344,
"learning_rate": 0.0005690884778248273,
"loss": 0.1684,
"step": 3870
},
{
"epoch": 3.0265210608424336,
"grad_norm": 1.4757158756256104,
"learning_rate": 0.0005679741475373301,
"loss": 0.1379,
"step": 3880
},
{
"epoch": 3.0343213728549143,
"grad_norm": 1.789844274520874,
"learning_rate": 0.0005668598172498329,
"loss": 0.1711,
"step": 3890
},
{
"epoch": 3.0421216848673946,
"grad_norm": 1.6123414039611816,
"learning_rate": 0.0005657454869623357,
"loss": 0.1375,
"step": 3900
},
{
"epoch": 3.049921996879875,
"grad_norm": 1.1085964441299438,
"learning_rate": 0.0005646311566748385,
"loss": 0.1552,
"step": 3910
},
{
"epoch": 3.0577223088923557,
"grad_norm": 0.5497006773948669,
"learning_rate": 0.0005635168263873413,
"loss": 0.2185,
"step": 3920
},
{
"epoch": 3.065522620904836,
"grad_norm": 1.584539532661438,
"learning_rate": 0.000562402496099844,
"loss": 0.1398,
"step": 3930
},
{
"epoch": 3.073322932917317,
"grad_norm": 1.2666114568710327,
"learning_rate": 0.0005612881658123468,
"loss": 0.1372,
"step": 3940
},
{
"epoch": 3.081123244929797,
"grad_norm": 2.3011374473571777,
"learning_rate": 0.0005601738355248496,
"loss": 0.1354,
"step": 3950
},
{
"epoch": 3.0889235569422775,
"grad_norm": 1.05606210231781,
"learning_rate": 0.0005590595052373524,
"loss": 0.1158,
"step": 3960
},
{
"epoch": 3.0967238689547583,
"grad_norm": 1.0332001447677612,
"learning_rate": 0.0005579451749498552,
"loss": 0.1333,
"step": 3970
},
{
"epoch": 3.1045241809672386,
"grad_norm": 1.020912766456604,
"learning_rate": 0.0005568308446623579,
"loss": 0.1259,
"step": 3980
},
{
"epoch": 3.1123244929797194,
"grad_norm": 1.2610992193222046,
"learning_rate": 0.0005557165143748607,
"loss": 0.1391,
"step": 3990
},
{
"epoch": 3.1201248049921997,
"grad_norm": 1.1720095872879028,
"learning_rate": 0.0005546021840873635,
"loss": 0.188,
"step": 4000
},
{
"epoch": 3.12792511700468,
"grad_norm": 3.7483229637145996,
"learning_rate": 0.0005534878537998663,
"loss": 0.1198,
"step": 4010
},
{
"epoch": 3.135725429017161,
"grad_norm": 0.9843854308128357,
"learning_rate": 0.000552373523512369,
"loss": 0.1405,
"step": 4020
},
{
"epoch": 3.143525741029641,
"grad_norm": 2.162191152572632,
"learning_rate": 0.0005512591932248719,
"loss": 0.1268,
"step": 4030
},
{
"epoch": 3.151326053042122,
"grad_norm": 1.1201051473617554,
"learning_rate": 0.0005501448629373747,
"loss": 0.1354,
"step": 4040
},
{
"epoch": 3.159126365054602,
"grad_norm": 8.070413589477539,
"learning_rate": 0.0005490305326498775,
"loss": 0.1438,
"step": 4050
},
{
"epoch": 3.1669266770670825,
"grad_norm": 0.971108078956604,
"learning_rate": 0.0005479162023623803,
"loss": 0.1521,
"step": 4060
},
{
"epoch": 3.1747269890795633,
"grad_norm": 0.9375523924827576,
"learning_rate": 0.000546801872074883,
"loss": 0.1313,
"step": 4070
},
{
"epoch": 3.1825273010920436,
"grad_norm": 1.3351547718048096,
"learning_rate": 0.0005456875417873858,
"loss": 0.1626,
"step": 4080
},
{
"epoch": 3.1903276131045244,
"grad_norm": 2.073784589767456,
"learning_rate": 0.0005445732114998886,
"loss": 0.1435,
"step": 4090
},
{
"epoch": 3.1981279251170047,
"grad_norm": 1.691977620124817,
"learning_rate": 0.0005434588812123914,
"loss": 0.1742,
"step": 4100
},
{
"epoch": 3.205928237129485,
"grad_norm": 22.18892478942871,
"learning_rate": 0.0005423445509248941,
"loss": 0.1618,
"step": 4110
},
{
"epoch": 3.213728549141966,
"grad_norm": 1.7861751317977905,
"learning_rate": 0.0005412302206373969,
"loss": 0.1351,
"step": 4120
},
{
"epoch": 3.221528861154446,
"grad_norm": 1.1942836046218872,
"learning_rate": 0.0005401158903498997,
"loss": 0.1403,
"step": 4130
},
{
"epoch": 3.2293291731669265,
"grad_norm": 4.223365306854248,
"learning_rate": 0.0005390015600624025,
"loss": 0.1568,
"step": 4140
},
{
"epoch": 3.2371294851794072,
"grad_norm": 1.828805685043335,
"learning_rate": 0.0005378872297749053,
"loss": 0.1732,
"step": 4150
},
{
"epoch": 3.2449297971918876,
"grad_norm": 1.3445240259170532,
"learning_rate": 0.0005367728994874081,
"loss": 0.1609,
"step": 4160
},
{
"epoch": 3.2527301092043683,
"grad_norm": 1.8210989236831665,
"learning_rate": 0.0005356585691999109,
"loss": 0.1529,
"step": 4170
},
{
"epoch": 3.2605304212168487,
"grad_norm": 1.5427830219268799,
"learning_rate": 0.0005345442389124137,
"loss": 0.1245,
"step": 4180
},
{
"epoch": 3.268330733229329,
"grad_norm": 2.216970920562744,
"learning_rate": 0.0005334299086249165,
"loss": 0.1321,
"step": 4190
},
{
"epoch": 3.2761310452418098,
"grad_norm": 2.470158338546753,
"learning_rate": 0.0005323155783374192,
"loss": 0.1466,
"step": 4200
},
{
"epoch": 3.28393135725429,
"grad_norm": 1.8444827795028687,
"learning_rate": 0.000531201248049922,
"loss": 0.141,
"step": 4210
},
{
"epoch": 3.291731669266771,
"grad_norm": 1.771625280380249,
"learning_rate": 0.0005300869177624248,
"loss": 0.1443,
"step": 4220
},
{
"epoch": 3.299531981279251,
"grad_norm": 2.486222267150879,
"learning_rate": 0.0005289725874749276,
"loss": 0.1665,
"step": 4230
},
{
"epoch": 3.3073322932917315,
"grad_norm": 2.0185546875,
"learning_rate": 0.0005278582571874304,
"loss": 0.2224,
"step": 4240
},
{
"epoch": 3.3151326053042123,
"grad_norm": 1.810857892036438,
"learning_rate": 0.0005267439268999331,
"loss": 0.1384,
"step": 4250
},
{
"epoch": 3.3229329173166926,
"grad_norm": 0.9884628057479858,
"learning_rate": 0.0005256295966124359,
"loss": 0.1028,
"step": 4260
},
{
"epoch": 3.330733229329173,
"grad_norm": 0.9811989068984985,
"learning_rate": 0.0005245152663249387,
"loss": 0.1327,
"step": 4270
},
{
"epoch": 3.3385335413416537,
"grad_norm": 0.9268475770950317,
"learning_rate": 0.0005234009360374415,
"loss": 0.2011,
"step": 4280
},
{
"epoch": 3.346333853354134,
"grad_norm": 2.9813990592956543,
"learning_rate": 0.0005222866057499443,
"loss": 0.1729,
"step": 4290
},
{
"epoch": 3.354134165366615,
"grad_norm": 1.216805338859558,
"learning_rate": 0.0005211722754624471,
"loss": 0.148,
"step": 4300
},
{
"epoch": 3.361934477379095,
"grad_norm": 4.585384845733643,
"learning_rate": 0.0005200579451749499,
"loss": 0.28,
"step": 4310
},
{
"epoch": 3.3697347893915754,
"grad_norm": 1.331865906715393,
"learning_rate": 0.0005189436148874527,
"loss": 0.1757,
"step": 4320
},
{
"epoch": 3.377535101404056,
"grad_norm": 3.4767305850982666,
"learning_rate": 0.0005178292845999555,
"loss": 0.1955,
"step": 4330
},
{
"epoch": 3.3853354134165365,
"grad_norm": 1.2598906755447388,
"learning_rate": 0.0005167149543124582,
"loss": 0.1633,
"step": 4340
},
{
"epoch": 3.3931357254290173,
"grad_norm": 1.9768121242523193,
"learning_rate": 0.000515600624024961,
"loss": 0.1314,
"step": 4350
},
{
"epoch": 3.4009360374414976,
"grad_norm": 1.916244387626648,
"learning_rate": 0.0005144862937374638,
"loss": 0.1826,
"step": 4360
},
{
"epoch": 3.408736349453978,
"grad_norm": 1.159989595413208,
"learning_rate": 0.0005133719634499666,
"loss": 0.1655,
"step": 4370
},
{
"epoch": 3.4165366614664587,
"grad_norm": 2.1428520679473877,
"learning_rate": 0.0005122576331624693,
"loss": 0.1868,
"step": 4380
},
{
"epoch": 3.424336973478939,
"grad_norm": 1.3343071937561035,
"learning_rate": 0.0005111433028749721,
"loss": 0.1539,
"step": 4390
},
{
"epoch": 3.43213728549142,
"grad_norm": 2.33966326713562,
"learning_rate": 0.0005100289725874749,
"loss": 0.1613,
"step": 4400
},
{
"epoch": 3.4399375975039,
"grad_norm": 1.662204384803772,
"learning_rate": 0.0005089146422999777,
"loss": 0.1554,
"step": 4410
},
{
"epoch": 3.4477379095163805,
"grad_norm": 0.7760083675384521,
"learning_rate": 0.0005078003120124805,
"loss": 0.0962,
"step": 4420
},
{
"epoch": 3.4555382215288613,
"grad_norm": 1.860338807106018,
"learning_rate": 0.0005066859817249833,
"loss": 0.1757,
"step": 4430
},
{
"epoch": 3.4633385335413416,
"grad_norm": 0.6259675025939941,
"learning_rate": 0.0005055716514374861,
"loss": 0.176,
"step": 4440
},
{
"epoch": 3.4711388455538223,
"grad_norm": 2.819291591644287,
"learning_rate": 0.0005044573211499889,
"loss": 0.2447,
"step": 4450
},
{
"epoch": 3.4789391575663027,
"grad_norm": 1.125827431678772,
"learning_rate": 0.0005033429908624917,
"loss": 0.1265,
"step": 4460
},
{
"epoch": 3.486739469578783,
"grad_norm": 1.1929935216903687,
"learning_rate": 0.0005022286605749944,
"loss": 0.1428,
"step": 4470
},
{
"epoch": 3.4945397815912638,
"grad_norm": 1.2320295572280884,
"learning_rate": 0.0005011143302874972,
"loss": 0.1372,
"step": 4480
},
{
"epoch": 3.502340093603744,
"grad_norm": 1.1627320051193237,
"learning_rate": 0.0005,
"loss": 0.1143,
"step": 4490
},
{
"epoch": 3.510140405616225,
"grad_norm": 2.7850723266601562,
"learning_rate": 0.0004988856697125028,
"loss": 0.1431,
"step": 4500
},
{
"epoch": 3.517940717628705,
"grad_norm": 1.2920804023742676,
"learning_rate": 0.0004977713394250056,
"loss": 0.17,
"step": 4510
},
{
"epoch": 3.5257410296411855,
"grad_norm": 1.2807234525680542,
"learning_rate": 0.0004966570091375084,
"loss": 0.1379,
"step": 4520
},
{
"epoch": 3.5335413416536663,
"grad_norm": 1.2454224824905396,
"learning_rate": 0.0004955426788500111,
"loss": 0.1439,
"step": 4530
},
{
"epoch": 3.5413416536661466,
"grad_norm": 3.7264480590820312,
"learning_rate": 0.0004944283485625139,
"loss": 0.1691,
"step": 4540
},
{
"epoch": 3.5491419656786274,
"grad_norm": 1.653113842010498,
"learning_rate": 0.0004933140182750167,
"loss": 0.157,
"step": 4550
},
{
"epoch": 3.5569422776911077,
"grad_norm": 2.0576372146606445,
"learning_rate": 0.0004921996879875195,
"loss": 0.171,
"step": 4560
},
{
"epoch": 3.564742589703588,
"grad_norm": 1.499207615852356,
"learning_rate": 0.0004910853577000223,
"loss": 0.1297,
"step": 4570
},
{
"epoch": 3.572542901716069,
"grad_norm": 1.2240318059921265,
"learning_rate": 0.0004899710274125251,
"loss": 0.2274,
"step": 4580
},
{
"epoch": 3.580343213728549,
"grad_norm": 1.4562804698944092,
"learning_rate": 0.0004888566971250279,
"loss": 0.1884,
"step": 4590
},
{
"epoch": 3.58814352574103,
"grad_norm": 2.0862207412719727,
"learning_rate": 0.00048774236683753067,
"loss": 0.1323,
"step": 4600
},
{
"epoch": 3.5959438377535102,
"grad_norm": 1.8791660070419312,
"learning_rate": 0.00048662803655003347,
"loss": 0.1446,
"step": 4610
},
{
"epoch": 3.6037441497659906,
"grad_norm": 2.589106798171997,
"learning_rate": 0.0004855137062625362,
"loss": 0.1722,
"step": 4620
},
{
"epoch": 3.611544461778471,
"grad_norm": 1.3053058385849,
"learning_rate": 0.000484399375975039,
"loss": 0.1458,
"step": 4630
},
{
"epoch": 3.6193447737909517,
"grad_norm": 1.4898595809936523,
"learning_rate": 0.00048328504568754177,
"loss": 0.155,
"step": 4640
},
{
"epoch": 3.627145085803432,
"grad_norm": 1.9109569787979126,
"learning_rate": 0.00048217071540004457,
"loss": 0.1526,
"step": 4650
},
{
"epoch": 3.6349453978159127,
"grad_norm": 2.045905351638794,
"learning_rate": 0.00048105638511254737,
"loss": 0.1281,
"step": 4660
},
{
"epoch": 3.642745709828393,
"grad_norm": 2.0706069469451904,
"learning_rate": 0.0004799420548250502,
"loss": 0.2016,
"step": 4670
},
{
"epoch": 3.6505460218408734,
"grad_norm": 1.3242971897125244,
"learning_rate": 0.000478827724537553,
"loss": 0.1715,
"step": 4680
},
{
"epoch": 3.658346333853354,
"grad_norm": 1.2207064628601074,
"learning_rate": 0.0004777133942500557,
"loss": 0.1262,
"step": 4690
},
{
"epoch": 3.6661466458658345,
"grad_norm": 2.0521817207336426,
"learning_rate": 0.0004765990639625585,
"loss": 0.1929,
"step": 4700
},
{
"epoch": 3.6739469578783153,
"grad_norm": 1.009435772895813,
"learning_rate": 0.00047548473367506127,
"loss": 0.2248,
"step": 4710
},
{
"epoch": 3.6817472698907956,
"grad_norm": 1.105904221534729,
"learning_rate": 0.0004743704033875641,
"loss": 0.1554,
"step": 4720
},
{
"epoch": 3.689547581903276,
"grad_norm": 1.9131145477294922,
"learning_rate": 0.0004732560731000669,
"loss": 0.1435,
"step": 4730
},
{
"epoch": 3.6973478939157567,
"grad_norm": 4.082576751708984,
"learning_rate": 0.0004721417428125697,
"loss": 0.1539,
"step": 4740
},
{
"epoch": 3.705148205928237,
"grad_norm": 0.8368641138076782,
"learning_rate": 0.0004710274125250724,
"loss": 0.1296,
"step": 4750
},
{
"epoch": 3.712948517940718,
"grad_norm": 2.7137794494628906,
"learning_rate": 0.00046991308223757523,
"loss": 0.1557,
"step": 4760
},
{
"epoch": 3.720748829953198,
"grad_norm": 1.2786542177200317,
"learning_rate": 0.00046879875195007803,
"loss": 0.1529,
"step": 4770
},
{
"epoch": 3.7285491419656784,
"grad_norm": 2.214386463165283,
"learning_rate": 0.0004676844216625808,
"loss": 0.143,
"step": 4780
},
{
"epoch": 3.736349453978159,
"grad_norm": 1.2340818643569946,
"learning_rate": 0.0004665700913750836,
"loss": 0.1481,
"step": 4790
},
{
"epoch": 3.7441497659906395,
"grad_norm": 1.804880976676941,
"learning_rate": 0.0004654557610875864,
"loss": 0.1385,
"step": 4800
},
{
"epoch": 3.7519500780031203,
"grad_norm": 1.0058701038360596,
"learning_rate": 0.0004643414308000892,
"loss": 0.2446,
"step": 4810
},
{
"epoch": 3.7597503900156006,
"grad_norm": 2.6180830001831055,
"learning_rate": 0.00046322710051259193,
"loss": 0.18,
"step": 4820
},
{
"epoch": 3.767550702028081,
"grad_norm": 0.8924421072006226,
"learning_rate": 0.00046211277022509473,
"loss": 0.1533,
"step": 4830
},
{
"epoch": 3.7753510140405617,
"grad_norm": 1.5280059576034546,
"learning_rate": 0.0004609984399375975,
"loss": 0.1567,
"step": 4840
},
{
"epoch": 3.783151326053042,
"grad_norm": 2.1860077381134033,
"learning_rate": 0.0004598841096501003,
"loss": 0.1502,
"step": 4850
},
{
"epoch": 3.790951638065523,
"grad_norm": 1.2397303581237793,
"learning_rate": 0.00045876977936260314,
"loss": 0.163,
"step": 4860
},
{
"epoch": 3.798751950078003,
"grad_norm": 1.8644335269927979,
"learning_rate": 0.0004576554490751059,
"loss": 0.1843,
"step": 4870
},
{
"epoch": 3.8065522620904835,
"grad_norm": 4.985640525817871,
"learning_rate": 0.0004565411187876087,
"loss": 0.2699,
"step": 4880
},
{
"epoch": 3.8143525741029642,
"grad_norm": 1.4370672702789307,
"learning_rate": 0.00045542678850011144,
"loss": 0.1285,
"step": 4890
},
{
"epoch": 3.8221528861154446,
"grad_norm": 1.725677728652954,
"learning_rate": 0.00045431245821261424,
"loss": 0.1609,
"step": 4900
},
{
"epoch": 3.8299531981279253,
"grad_norm": 0.9739437699317932,
"learning_rate": 0.000453198127925117,
"loss": 0.1614,
"step": 4910
},
{
"epoch": 3.8377535101404057,
"grad_norm": 3.672434091567993,
"learning_rate": 0.0004520837976376198,
"loss": 0.1747,
"step": 4920
},
{
"epoch": 3.845553822152886,
"grad_norm": 1.3917264938354492,
"learning_rate": 0.0004509694673501226,
"loss": 0.159,
"step": 4930
},
{
"epoch": 3.8533541341653668,
"grad_norm": 2.578519105911255,
"learning_rate": 0.0004498551370626254,
"loss": 0.1394,
"step": 4940
},
{
"epoch": 3.861154446177847,
"grad_norm": 1.3072192668914795,
"learning_rate": 0.00044874080677512814,
"loss": 0.1167,
"step": 4950
},
{
"epoch": 3.868954758190328,
"grad_norm": 1.1134214401245117,
"learning_rate": 0.00044762647648763094,
"loss": 0.1445,
"step": 4960
},
{
"epoch": 3.876755070202808,
"grad_norm": 2.7288825511932373,
"learning_rate": 0.00044651214620013374,
"loss": 0.1693,
"step": 4970
},
{
"epoch": 3.8845553822152885,
"grad_norm": 1.1238129138946533,
"learning_rate": 0.0004453978159126365,
"loss": 0.1455,
"step": 4980
},
{
"epoch": 3.892355694227769,
"grad_norm": 1.7059211730957031,
"learning_rate": 0.0004442834856251393,
"loss": 0.1069,
"step": 4990
},
{
"epoch": 3.9001560062402496,
"grad_norm": 1.0301696062088013,
"learning_rate": 0.0004431691553376421,
"loss": 0.3035,
"step": 5000
},
{
"epoch": 3.9079563182527304,
"grad_norm": 1.744986891746521,
"learning_rate": 0.0004420548250501449,
"loss": 0.1249,
"step": 5010
},
{
"epoch": 3.9157566302652107,
"grad_norm": 0.8731406331062317,
"learning_rate": 0.00044094049476264764,
"loss": 0.1714,
"step": 5020
},
{
"epoch": 3.923556942277691,
"grad_norm": 1.1749571561813354,
"learning_rate": 0.00043982616447515045,
"loss": 0.264,
"step": 5030
},
{
"epoch": 3.9313572542901714,
"grad_norm": 2.552839756011963,
"learning_rate": 0.0004387118341876532,
"loss": 0.2517,
"step": 5040
},
{
"epoch": 3.939157566302652,
"grad_norm": 1.9722734689712524,
"learning_rate": 0.000437597503900156,
"loss": 0.1854,
"step": 5050
},
{
"epoch": 3.9469578783151325,
"grad_norm": 2.063805341720581,
"learning_rate": 0.0004364831736126588,
"loss": 0.1574,
"step": 5060
},
{
"epoch": 3.954758190327613,
"grad_norm": 4.838432312011719,
"learning_rate": 0.0004353688433251616,
"loss": 0.1939,
"step": 5070
},
{
"epoch": 3.9625585023400935,
"grad_norm": 1.9519829750061035,
"learning_rate": 0.0004342545130376644,
"loss": 0.1334,
"step": 5080
},
{
"epoch": 3.970358814352574,
"grad_norm": 1.3953014612197876,
"learning_rate": 0.00043314018275016715,
"loss": 0.1504,
"step": 5090
},
{
"epoch": 3.9781591263650546,
"grad_norm": 1.6272999048233032,
"learning_rate": 0.00043202585246266995,
"loss": 0.153,
"step": 5100
},
{
"epoch": 3.985959438377535,
"grad_norm": 1.610538125038147,
"learning_rate": 0.0004309115221751727,
"loss": 0.1727,
"step": 5110
},
{
"epoch": 3.9937597503900157,
"grad_norm": 1.6635684967041016,
"learning_rate": 0.0004297971918876755,
"loss": 0.1316,
"step": 5120
},
{
"epoch": 4.0,
"eval_loss": 0.31802308559417725,
"eval_runtime": 526.9062,
"eval_samples_per_second": 1.044,
"eval_steps_per_second": 1.044,
"eval_wer": 21.84784707472485,
"step": 5128
}
],
"logging_steps": 10,
"max_steps": 8974,
"num_input_tokens_seen": 0,
"num_train_epochs": 7,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 4.51799588339712e+18,
"train_batch_size": 3,
"trial_name": null,
"trial_params": null
}