{ "best_metric": null, "best_model_checkpoint": null, "epoch": 3.0, "eval_steps": 500, "global_step": 3846, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.0078003120124804995, "grad_norm": 2.144841432571411, "learning_rate": 0.0009989971027412525, "loss": 1.1174, "step": 10 }, { "epoch": 0.015600624024960999, "grad_norm": 1.0625462532043457, "learning_rate": 0.0009978827724537553, "loss": 0.5035, "step": 20 }, { "epoch": 0.0234009360374415, "grad_norm": 1.619573950767517, "learning_rate": 0.0009967684421662581, "loss": 0.3841, "step": 30 }, { "epoch": 0.031201248049921998, "grad_norm": 3.2681403160095215, "learning_rate": 0.000995654111878761, "loss": 0.4974, "step": 40 }, { "epoch": 0.0390015600624025, "grad_norm": 1.5944786071777344, "learning_rate": 0.0009945397815912637, "loss": 0.4797, "step": 50 }, { "epoch": 0.046801872074883, "grad_norm": 1.8234444856643677, "learning_rate": 0.0009934254513037665, "loss": 0.637, "step": 60 }, { "epoch": 0.054602184087363496, "grad_norm": 3.790844440460205, "learning_rate": 0.0009923111210162691, "loss": 0.4479, "step": 70 }, { "epoch": 0.062402496099843996, "grad_norm": 2.9351046085357666, "learning_rate": 0.000991196790728772, "loss": 0.4332, "step": 80 }, { "epoch": 0.07020280811232449, "grad_norm": 2.2770025730133057, "learning_rate": 0.0009900824604412747, "loss": 0.4399, "step": 90 }, { "epoch": 0.078003120124805, "grad_norm": 1.86591637134552, "learning_rate": 0.0009889681301537775, "loss": 0.407, "step": 100 }, { "epoch": 0.08580343213728549, "grad_norm": 1.6851640939712524, "learning_rate": 0.0009878537998662805, "loss": 0.3862, "step": 110 }, { "epoch": 0.093603744149766, "grad_norm": 2.5469818115234375, "learning_rate": 0.0009867394695787831, "loss": 0.4166, "step": 120 }, { "epoch": 0.10140405616224649, "grad_norm": 1.8210259675979614, "learning_rate": 0.000985625139291286, "loss": 0.3785, "step": 130 }, { "epoch": 0.10920436817472699, "grad_norm": 2.031057119369507, "learning_rate": 0.0009845108090037887, "loss": 0.4177, "step": 140 }, { "epoch": 0.11700468018720749, "grad_norm": 1.6646612882614136, "learning_rate": 0.0009833964787162915, "loss": 0.402, "step": 150 }, { "epoch": 0.12480499219968799, "grad_norm": 1.8680285215377808, "learning_rate": 0.0009822821484287943, "loss": 0.3282, "step": 160 }, { "epoch": 0.13260530421216848, "grad_norm": 1.8039604425430298, "learning_rate": 0.0009811678181412971, "loss": 0.3536, "step": 170 }, { "epoch": 0.14040561622464898, "grad_norm": 3.3018901348114014, "learning_rate": 0.0009800534878538, "loss": 0.4595, "step": 180 }, { "epoch": 0.1482059282371295, "grad_norm": 3.684013843536377, "learning_rate": 0.0009789391575663027, "loss": 0.4288, "step": 190 }, { "epoch": 0.15600624024961, "grad_norm": 1.4512592554092407, "learning_rate": 0.0009778248272788055, "loss": 0.5086, "step": 200 }, { "epoch": 0.16380655226209048, "grad_norm": 2.3981761932373047, "learning_rate": 0.0009767104969913081, "loss": 0.4084, "step": 210 }, { "epoch": 0.17160686427457097, "grad_norm": 3.7943010330200195, "learning_rate": 0.000975596166703811, "loss": 0.4524, "step": 220 }, { "epoch": 0.1794071762870515, "grad_norm": 2.657606840133667, "learning_rate": 0.0009744818364163138, "loss": 0.3592, "step": 230 }, { "epoch": 0.187207488299532, "grad_norm": 2.7629363536834717, "learning_rate": 0.0009733675061288166, "loss": 0.4263, "step": 240 }, { "epoch": 0.19500780031201248, "grad_norm": 1.3749983310699463, "learning_rate": 0.0009722531758413193, "loss": 0.48, "step": 250 }, { "epoch": 0.20280811232449297, "grad_norm": 2.648716449737549, "learning_rate": 0.0009711388455538221, "loss": 0.416, "step": 260 }, { "epoch": 0.21060842433697347, "grad_norm": 1.5672308206558228, "learning_rate": 0.0009700245152663249, "loss": 0.4223, "step": 270 }, { "epoch": 0.21840873634945399, "grad_norm": 2.618163585662842, "learning_rate": 0.0009689101849788277, "loss": 0.4172, "step": 280 }, { "epoch": 0.22620904836193448, "grad_norm": 3.6365268230438232, "learning_rate": 0.0009677958546913305, "loss": 0.5501, "step": 290 }, { "epoch": 0.23400936037441497, "grad_norm": 2.740039825439453, "learning_rate": 0.0009666815244038332, "loss": 0.3553, "step": 300 }, { "epoch": 0.24180967238689546, "grad_norm": 3.406210422515869, "learning_rate": 0.000965567194116336, "loss": 0.3518, "step": 310 }, { "epoch": 0.24960998439937598, "grad_norm": 1.4707075357437134, "learning_rate": 0.000964452863828839, "loss": 0.3452, "step": 320 }, { "epoch": 0.2574102964118565, "grad_norm": 1.608324408531189, "learning_rate": 0.0009633385335413417, "loss": 0.4908, "step": 330 }, { "epoch": 0.26521060842433697, "grad_norm": 4.090480327606201, "learning_rate": 0.0009622242032538444, "loss": 0.4597, "step": 340 }, { "epoch": 0.27301092043681746, "grad_norm": 2.2214395999908447, "learning_rate": 0.0009611098729663472, "loss": 0.4552, "step": 350 }, { "epoch": 0.28081123244929795, "grad_norm": 1.9134166240692139, "learning_rate": 0.00095999554267885, "loss": 0.3571, "step": 360 }, { "epoch": 0.28861154446177845, "grad_norm": 1.8127851486206055, "learning_rate": 0.0009588812123913528, "loss": 0.3808, "step": 370 }, { "epoch": 0.296411856474259, "grad_norm": 2.2262885570526123, "learning_rate": 0.0009577668821038556, "loss": 0.4099, "step": 380 }, { "epoch": 0.3042121684867395, "grad_norm": 2.8041303157806396, "learning_rate": 0.0009566525518163583, "loss": 0.3988, "step": 390 }, { "epoch": 0.31201248049922, "grad_norm": 6.797432899475098, "learning_rate": 0.0009555382215288611, "loss": 0.4728, "step": 400 }, { "epoch": 0.31981279251170047, "grad_norm": 3.1861369609832764, "learning_rate": 0.000954423891241364, "loss": 0.3502, "step": 410 }, { "epoch": 0.32761310452418096, "grad_norm": 2.9223642349243164, "learning_rate": 0.0009533095609538667, "loss": 0.4215, "step": 420 }, { "epoch": 0.33541341653666146, "grad_norm": 6.848895072937012, "learning_rate": 0.0009521952306663694, "loss": 0.415, "step": 430 }, { "epoch": 0.34321372854914195, "grad_norm": 9.054282188415527, "learning_rate": 0.0009510809003788722, "loss": 0.4667, "step": 440 }, { "epoch": 0.35101404056162244, "grad_norm": 2.3005900382995605, "learning_rate": 0.0009499665700913752, "loss": 0.6565, "step": 450 }, { "epoch": 0.358814352574103, "grad_norm": 2.9467573165893555, "learning_rate": 0.000948852239803878, "loss": 0.3837, "step": 460 }, { "epoch": 0.3666146645865835, "grad_norm": 1.5977652072906494, "learning_rate": 0.0009477379095163808, "loss": 0.407, "step": 470 }, { "epoch": 0.374414976599064, "grad_norm": 2.8274600505828857, "learning_rate": 0.0009466235792288835, "loss": 0.4501, "step": 480 }, { "epoch": 0.38221528861154447, "grad_norm": 1.8566502332687378, "learning_rate": 0.0009455092489413863, "loss": 0.3146, "step": 490 }, { "epoch": 0.39001560062402496, "grad_norm": 2.5871951580047607, "learning_rate": 0.0009443949186538891, "loss": 0.3335, "step": 500 }, { "epoch": 0.39781591263650545, "grad_norm": 2.0552711486816406, "learning_rate": 0.0009432805883663919, "loss": 0.3698, "step": 510 }, { "epoch": 0.40561622464898595, "grad_norm": 1.5244548320770264, "learning_rate": 0.0009421662580788946, "loss": 0.581, "step": 520 }, { "epoch": 0.41341653666146644, "grad_norm": 1.5146633386611938, "learning_rate": 0.0009410519277913974, "loss": 0.3634, "step": 530 }, { "epoch": 0.42121684867394693, "grad_norm": 2.394819736480713, "learning_rate": 0.0009399375975039002, "loss": 0.338, "step": 540 }, { "epoch": 0.4290171606864275, "grad_norm": 2.3500325679779053, "learning_rate": 0.000938823267216403, "loss": 0.4025, "step": 550 }, { "epoch": 0.43681747269890797, "grad_norm": 2.4186370372772217, "learning_rate": 0.0009377089369289058, "loss": 0.4095, "step": 560 }, { "epoch": 0.44461778471138846, "grad_norm": 1.9770065546035767, "learning_rate": 0.0009365946066414085, "loss": 0.4275, "step": 570 }, { "epoch": 0.45241809672386896, "grad_norm": 1.7679632902145386, "learning_rate": 0.0009354802763539114, "loss": 0.3949, "step": 580 }, { "epoch": 0.46021840873634945, "grad_norm": 2.0794620513916016, "learning_rate": 0.0009343659460664142, "loss": 0.4208, "step": 590 }, { "epoch": 0.46801872074882994, "grad_norm": 2.677424192428589, "learning_rate": 0.000933251615778917, "loss": 0.4089, "step": 600 }, { "epoch": 0.47581903276131043, "grad_norm": 1.526112675666809, "learning_rate": 0.0009321372854914197, "loss": 0.4196, "step": 610 }, { "epoch": 0.4836193447737909, "grad_norm": 1.8656370639801025, "learning_rate": 0.0009310229552039225, "loss": 0.3732, "step": 620 }, { "epoch": 0.4914196567862715, "grad_norm": 3.3338847160339355, "learning_rate": 0.0009299086249164253, "loss": 0.4231, "step": 630 }, { "epoch": 0.49921996879875197, "grad_norm": 2.1057350635528564, "learning_rate": 0.0009287942946289281, "loss": 0.3921, "step": 640 }, { "epoch": 0.5070202808112324, "grad_norm": 1.544977068901062, "learning_rate": 0.0009276799643414309, "loss": 0.3748, "step": 650 }, { "epoch": 0.514820592823713, "grad_norm": 3.4070258140563965, "learning_rate": 0.0009265656340539336, "loss": 0.4027, "step": 660 }, { "epoch": 0.5226209048361935, "grad_norm": 5.5486931800842285, "learning_rate": 0.0009254513037664364, "loss": 0.4326, "step": 670 }, { "epoch": 0.5304212168486739, "grad_norm": 3.6824769973754883, "learning_rate": 0.0009243369734789392, "loss": 0.4809, "step": 680 }, { "epoch": 0.5382215288611545, "grad_norm": 6.3154778480529785, "learning_rate": 0.000923222643191442, "loss": 0.3588, "step": 690 }, { "epoch": 0.5460218408736349, "grad_norm": 3.133465528488159, "learning_rate": 0.0009221083129039447, "loss": 0.4712, "step": 700 }, { "epoch": 0.5538221528861155, "grad_norm": 4.222598552703857, "learning_rate": 0.0009209939826164475, "loss": 0.4135, "step": 710 }, { "epoch": 0.5616224648985959, "grad_norm": 4.6125078201293945, "learning_rate": 0.0009198796523289504, "loss": 0.393, "step": 720 }, { "epoch": 0.5694227769110765, "grad_norm": 6.543318748474121, "learning_rate": 0.0009187653220414532, "loss": 0.6138, "step": 730 }, { "epoch": 0.5772230889235569, "grad_norm": 2.596463680267334, "learning_rate": 0.000917650991753956, "loss": 0.3694, "step": 740 }, { "epoch": 0.5850234009360374, "grad_norm": 2.428490161895752, "learning_rate": 0.0009165366614664587, "loss": 0.4535, "step": 750 }, { "epoch": 0.592823712948518, "grad_norm": 1.8790688514709473, "learning_rate": 0.0009154223311789615, "loss": 0.3986, "step": 760 }, { "epoch": 0.6006240249609984, "grad_norm": 3.141587734222412, "learning_rate": 0.0009143080008914643, "loss": 0.3881, "step": 770 }, { "epoch": 0.608424336973479, "grad_norm": 2.125810146331787, "learning_rate": 0.0009131936706039671, "loss": 0.4225, "step": 780 }, { "epoch": 0.6162246489859594, "grad_norm": 2.532404661178589, "learning_rate": 0.0009120793403164698, "loss": 0.3423, "step": 790 }, { "epoch": 0.62402496099844, "grad_norm": 3.6324350833892822, "learning_rate": 0.0009109650100289726, "loss": 0.4205, "step": 800 }, { "epoch": 0.6318252730109204, "grad_norm": 1.4804415702819824, "learning_rate": 0.0009098506797414754, "loss": 0.4042, "step": 810 }, { "epoch": 0.6396255850234009, "grad_norm": 1.5140562057495117, "learning_rate": 0.0009087363494539782, "loss": 0.3056, "step": 820 }, { "epoch": 0.6474258970358814, "grad_norm": 2.470576047897339, "learning_rate": 0.000907622019166481, "loss": 0.391, "step": 830 }, { "epoch": 0.6552262090483619, "grad_norm": 3.4496209621429443, "learning_rate": 0.0009065076888789837, "loss": 0.4163, "step": 840 }, { "epoch": 0.6630265210608425, "grad_norm": 1.8823250532150269, "learning_rate": 0.0009053933585914866, "loss": 0.3877, "step": 850 }, { "epoch": 0.6708268330733229, "grad_norm": 2.492297410964966, "learning_rate": 0.0009042790283039894, "loss": 0.3542, "step": 860 }, { "epoch": 0.6786271450858035, "grad_norm": 3.977569341659546, "learning_rate": 0.0009031646980164922, "loss": 0.4168, "step": 870 }, { "epoch": 0.6864274570982839, "grad_norm": 3.938462495803833, "learning_rate": 0.0009020503677289949, "loss": 0.4894, "step": 880 }, { "epoch": 0.6942277691107644, "grad_norm": 1.3457701206207275, "learning_rate": 0.0009009360374414977, "loss": 0.4903, "step": 890 }, { "epoch": 0.7020280811232449, "grad_norm": 6.3473124504089355, "learning_rate": 0.0008998217071540005, "loss": 0.4766, "step": 900 }, { "epoch": 0.7098283931357254, "grad_norm": 3.145792245864868, "learning_rate": 0.0008987073768665033, "loss": 0.4119, "step": 910 }, { "epoch": 0.717628705148206, "grad_norm": 1.809446930885315, "learning_rate": 0.0008975930465790061, "loss": 0.3991, "step": 920 }, { "epoch": 0.7254290171606864, "grad_norm": 1.7960044145584106, "learning_rate": 0.0008964787162915088, "loss": 0.3095, "step": 930 }, { "epoch": 0.733229329173167, "grad_norm": 2.9710285663604736, "learning_rate": 0.0008953643860040116, "loss": 0.5104, "step": 940 }, { "epoch": 0.7410296411856474, "grad_norm": 2.460524797439575, "learning_rate": 0.0008942500557165144, "loss": 0.4332, "step": 950 }, { "epoch": 0.748829953198128, "grad_norm": 1.6166704893112183, "learning_rate": 0.0008931357254290172, "loss": 0.3856, "step": 960 }, { "epoch": 0.7566302652106084, "grad_norm": 1.747750163078308, "learning_rate": 0.0008920213951415199, "loss": 0.3973, "step": 970 }, { "epoch": 0.7644305772230889, "grad_norm": 1.4469414949417114, "learning_rate": 0.0008909070648540227, "loss": 0.3344, "step": 980 }, { "epoch": 0.7722308892355694, "grad_norm": 4.661273956298828, "learning_rate": 0.0008897927345665256, "loss": 0.6003, "step": 990 }, { "epoch": 0.7800312012480499, "grad_norm": 3.6588950157165527, "learning_rate": 0.0008886784042790284, "loss": 0.473, "step": 1000 }, { "epoch": 0.7878315132605305, "grad_norm": 2.208383798599243, "learning_rate": 0.0008876755070202809, "loss": 0.3899, "step": 1010 }, { "epoch": 0.7956318252730109, "grad_norm": 2.3569576740264893, "learning_rate": 0.0008865611767327836, "loss": 0.3871, "step": 1020 }, { "epoch": 0.8034321372854915, "grad_norm": 2.7071454524993896, "learning_rate": 0.0008854468464452864, "loss": 0.3149, "step": 1030 }, { "epoch": 0.8112324492979719, "grad_norm": 2.8024532794952393, "learning_rate": 0.0008843325161577892, "loss": 0.3328, "step": 1040 }, { "epoch": 0.8190327613104524, "grad_norm": 3.0969290733337402, "learning_rate": 0.000883218185870292, "loss": 0.3948, "step": 1050 }, { "epoch": 0.8268330733229329, "grad_norm": 2.982484817504883, "learning_rate": 0.0008821038555827947, "loss": 0.4323, "step": 1060 }, { "epoch": 0.8346333853354134, "grad_norm": 3.133814573287964, "learning_rate": 0.0008809895252952975, "loss": 0.4393, "step": 1070 }, { "epoch": 0.8424336973478939, "grad_norm": 3.3123364448547363, "learning_rate": 0.0008798751950078003, "loss": 0.3244, "step": 1080 }, { "epoch": 0.8502340093603744, "grad_norm": 2.308555841445923, "learning_rate": 0.0008787608647203032, "loss": 0.424, "step": 1090 }, { "epoch": 0.858034321372855, "grad_norm": 3.654137134552002, "learning_rate": 0.000877646534432806, "loss": 0.3445, "step": 1100 }, { "epoch": 0.8658346333853354, "grad_norm": 2.149843692779541, "learning_rate": 0.0008765322041453087, "loss": 0.3398, "step": 1110 }, { "epoch": 0.8736349453978159, "grad_norm": 3.1334431171417236, "learning_rate": 0.0008754178738578115, "loss": 0.3333, "step": 1120 }, { "epoch": 0.8814352574102964, "grad_norm": 2.2942090034484863, "learning_rate": 0.0008743035435703143, "loss": 0.4188, "step": 1130 }, { "epoch": 0.8892355694227769, "grad_norm": 2.0195343494415283, "learning_rate": 0.0008731892132828171, "loss": 0.4047, "step": 1140 }, { "epoch": 0.8970358814352574, "grad_norm": 2.3850839138031006, "learning_rate": 0.0008720748829953198, "loss": 0.3931, "step": 1150 }, { "epoch": 0.9048361934477379, "grad_norm": 1.6200228929519653, "learning_rate": 0.0008709605527078226, "loss": 0.409, "step": 1160 }, { "epoch": 0.9126365054602185, "grad_norm": 2.9001989364624023, "learning_rate": 0.0008698462224203254, "loss": 0.4289, "step": 1170 }, { "epoch": 0.9204368174726989, "grad_norm": 1.52889883518219, "learning_rate": 0.0008687318921328282, "loss": 0.3575, "step": 1180 }, { "epoch": 0.9282371294851794, "grad_norm": 2.359733819961548, "learning_rate": 0.000867617561845331, "loss": 0.3837, "step": 1190 }, { "epoch": 0.9360374414976599, "grad_norm": 2.3807597160339355, "learning_rate": 0.0008665032315578337, "loss": 0.4206, "step": 1200 }, { "epoch": 0.9438377535101404, "grad_norm": 1.8366179466247559, "learning_rate": 0.0008653889012703365, "loss": 0.3101, "step": 1210 }, { "epoch": 0.9516380655226209, "grad_norm": 3.1048014163970947, "learning_rate": 0.0008642745709828393, "loss": 0.4104, "step": 1220 }, { "epoch": 0.9594383775351014, "grad_norm": 1.5314342975616455, "learning_rate": 0.0008631602406953422, "loss": 0.3539, "step": 1230 }, { "epoch": 0.9672386895475819, "grad_norm": 2.8501791954040527, "learning_rate": 0.0008620459104078449, "loss": 0.4104, "step": 1240 }, { "epoch": 0.9750390015600624, "grad_norm": 5.708191394805908, "learning_rate": 0.0008609315801203477, "loss": 0.4038, "step": 1250 }, { "epoch": 0.982839313572543, "grad_norm": 2.173867702484131, "learning_rate": 0.0008598172498328505, "loss": 0.338, "step": 1260 }, { "epoch": 0.9906396255850234, "grad_norm": 2.7057418823242188, "learning_rate": 0.0008587029195453533, "loss": 0.3941, "step": 1270 }, { "epoch": 0.9984399375975039, "grad_norm": 1.3492989540100098, "learning_rate": 0.0008575885892578561, "loss": 0.3696, "step": 1280 }, { "epoch": 1.0, "eval_loss": 0.4044143855571747, "eval_runtime": 635.8286, "eval_samples_per_second": 0.865, "eval_steps_per_second": 0.865, "eval_wer": 26.665379416875844, "step": 1282 }, { "epoch": 1.0062402496099845, "grad_norm": 2.5727291107177734, "learning_rate": 0.0008564742589703588, "loss": 0.3369, "step": 1290 }, { "epoch": 1.0140405616224648, "grad_norm": 2.681490182876587, "learning_rate": 0.0008553599286828616, "loss": 0.2434, "step": 1300 }, { "epoch": 1.0218408736349454, "grad_norm": 2.0296504497528076, "learning_rate": 0.0008542455983953644, "loss": 0.2122, "step": 1310 }, { "epoch": 1.029641185647426, "grad_norm": 2.277512311935425, "learning_rate": 0.0008531312681078672, "loss": 0.3932, "step": 1320 }, { "epoch": 1.0374414976599065, "grad_norm": 4.77215576171875, "learning_rate": 0.0008520169378203699, "loss": 0.2658, "step": 1330 }, { "epoch": 1.045241809672387, "grad_norm": 1.4027091264724731, "learning_rate": 0.0008509026075328727, "loss": 0.3524, "step": 1340 }, { "epoch": 1.0530421216848673, "grad_norm": 2.2849514484405518, "learning_rate": 0.0008497882772453755, "loss": 0.2705, "step": 1350 }, { "epoch": 1.0608424336973479, "grad_norm": 1.2896777391433716, "learning_rate": 0.0008486739469578784, "loss": 0.264, "step": 1360 }, { "epoch": 1.0686427457098284, "grad_norm": 2.4552128314971924, "learning_rate": 0.0008475596166703812, "loss": 0.2833, "step": 1370 }, { "epoch": 1.076443057722309, "grad_norm": 2.044693946838379, "learning_rate": 0.0008464452863828839, "loss": 0.2116, "step": 1380 }, { "epoch": 1.0842433697347893, "grad_norm": 1.3727463483810425, "learning_rate": 0.0008453309560953867, "loss": 0.2533, "step": 1390 }, { "epoch": 1.0920436817472698, "grad_norm": 1.6917822360992432, "learning_rate": 0.0008442166258078895, "loss": 0.4259, "step": 1400 }, { "epoch": 1.0998439937597504, "grad_norm": 2.198549747467041, "learning_rate": 0.0008431022955203923, "loss": 0.3161, "step": 1410 }, { "epoch": 1.107644305772231, "grad_norm": 1.7467869520187378, "learning_rate": 0.000841987965232895, "loss": 0.2377, "step": 1420 }, { "epoch": 1.1154446177847115, "grad_norm": 2.5347695350646973, "learning_rate": 0.0008408736349453978, "loss": 0.3258, "step": 1430 }, { "epoch": 1.1232449297971918, "grad_norm": 1.9081774950027466, "learning_rate": 0.0008397593046579006, "loss": 0.2462, "step": 1440 }, { "epoch": 1.1310452418096724, "grad_norm": 1.5889848470687866, "learning_rate": 0.0008386449743704034, "loss": 0.2404, "step": 1450 }, { "epoch": 1.138845553822153, "grad_norm": 1.8944768905639648, "learning_rate": 0.0008375306440829062, "loss": 0.2707, "step": 1460 }, { "epoch": 1.1466458658346335, "grad_norm": 2.5448453426361084, "learning_rate": 0.0008364163137954089, "loss": 0.3342, "step": 1470 }, { "epoch": 1.154446177847114, "grad_norm": 2.0936005115509033, "learning_rate": 0.0008353019835079117, "loss": 0.385, "step": 1480 }, { "epoch": 1.1622464898595943, "grad_norm": 2.614129066467285, "learning_rate": 0.0008341876532204145, "loss": 0.2817, "step": 1490 }, { "epoch": 1.1700468018720749, "grad_norm": 1.6156001091003418, "learning_rate": 0.0008330733229329174, "loss": 0.3527, "step": 1500 }, { "epoch": 1.1778471138845554, "grad_norm": 1.4294220209121704, "learning_rate": 0.0008319589926454201, "loss": 0.2469, "step": 1510 }, { "epoch": 1.185647425897036, "grad_norm": 3.197176456451416, "learning_rate": 0.0008308446623579229, "loss": 0.2753, "step": 1520 }, { "epoch": 1.1934477379095163, "grad_norm": 2.1629223823547363, "learning_rate": 0.0008297303320704257, "loss": 0.3951, "step": 1530 }, { "epoch": 1.2012480499219969, "grad_norm": 2.9824419021606445, "learning_rate": 0.0008286160017829285, "loss": 0.3278, "step": 1540 }, { "epoch": 1.2090483619344774, "grad_norm": 2.866138219833374, "learning_rate": 0.0008275016714954313, "loss": 0.4267, "step": 1550 }, { "epoch": 1.216848673946958, "grad_norm": 2.36781644821167, "learning_rate": 0.000826387341207934, "loss": 0.3414, "step": 1560 }, { "epoch": 1.2246489859594383, "grad_norm": 1.8305447101593018, "learning_rate": 0.0008252730109204368, "loss": 0.2925, "step": 1570 }, { "epoch": 1.2324492979719188, "grad_norm": 1.9879776239395142, "learning_rate": 0.0008241586806329396, "loss": 0.357, "step": 1580 }, { "epoch": 1.2402496099843994, "grad_norm": 2.183350086212158, "learning_rate": 0.0008230443503454424, "loss": 0.3409, "step": 1590 }, { "epoch": 1.24804992199688, "grad_norm": 2.197072744369507, "learning_rate": 0.0008219300200579451, "loss": 0.33, "step": 1600 }, { "epoch": 1.2558502340093605, "grad_norm": 3.2065696716308594, "learning_rate": 0.0008208156897704479, "loss": 0.2853, "step": 1610 }, { "epoch": 1.2636505460218408, "grad_norm": 2.0581350326538086, "learning_rate": 0.0008197013594829507, "loss": 0.3647, "step": 1620 }, { "epoch": 1.2714508580343213, "grad_norm": 3.149153232574463, "learning_rate": 0.0008185870291954536, "loss": 0.3921, "step": 1630 }, { "epoch": 1.2792511700468019, "grad_norm": 2.5097105503082275, "learning_rate": 0.0008174726989079563, "loss": 0.3302, "step": 1640 }, { "epoch": 1.2870514820592824, "grad_norm": 2.7537474632263184, "learning_rate": 0.0008163583686204591, "loss": 0.3286, "step": 1650 }, { "epoch": 1.294851794071763, "grad_norm": 1.966965675354004, "learning_rate": 0.0008152440383329619, "loss": 0.2785, "step": 1660 }, { "epoch": 1.3026521060842433, "grad_norm": 1.9159988164901733, "learning_rate": 0.0008141297080454647, "loss": 0.3299, "step": 1670 }, { "epoch": 1.3104524180967239, "grad_norm": 2.2212252616882324, "learning_rate": 0.0008130153777579675, "loss": 0.2807, "step": 1680 }, { "epoch": 1.3182527301092044, "grad_norm": 4.194318771362305, "learning_rate": 0.0008119010474704702, "loss": 0.3161, "step": 1690 }, { "epoch": 1.3260530421216847, "grad_norm": 1.7189604043960571, "learning_rate": 0.000810786717182973, "loss": 0.3356, "step": 1700 }, { "epoch": 1.3338533541341655, "grad_norm": 1.5196418762207031, "learning_rate": 0.0008096723868954758, "loss": 0.2616, "step": 1710 }, { "epoch": 1.3416536661466458, "grad_norm": 1.497450351715088, "learning_rate": 0.0008085580566079786, "loss": 0.2946, "step": 1720 }, { "epoch": 1.3494539781591264, "grad_norm": 1.74885892868042, "learning_rate": 0.0008074437263204813, "loss": 0.2801, "step": 1730 }, { "epoch": 1.357254290171607, "grad_norm": 2.040701389312744, "learning_rate": 0.0008063293960329841, "loss": 0.3203, "step": 1740 }, { "epoch": 1.3650546021840873, "grad_norm": 3.760457754135132, "learning_rate": 0.0008052150657454869, "loss": 0.4569, "step": 1750 }, { "epoch": 1.3728549141965678, "grad_norm": 2.92971134185791, "learning_rate": 0.0008041007354579897, "loss": 0.3321, "step": 1760 }, { "epoch": 1.3806552262090483, "grad_norm": 1.9461047649383545, "learning_rate": 0.0008029864051704926, "loss": 0.2696, "step": 1770 }, { "epoch": 1.388455538221529, "grad_norm": 3.2626147270202637, "learning_rate": 0.0008018720748829953, "loss": 0.3322, "step": 1780 }, { "epoch": 1.3962558502340094, "grad_norm": 2.1270642280578613, "learning_rate": 0.0008007577445954981, "loss": 0.2965, "step": 1790 }, { "epoch": 1.4040561622464898, "grad_norm": 2.3174221515655518, "learning_rate": 0.0007996434143080009, "loss": 0.3425, "step": 1800 }, { "epoch": 1.4118564742589703, "grad_norm": 2.5749576091766357, "learning_rate": 0.0007985290840205037, "loss": 0.3622, "step": 1810 }, { "epoch": 1.4196567862714509, "grad_norm": 1.873813509941101, "learning_rate": 0.0007974147537330064, "loss": 0.2497, "step": 1820 }, { "epoch": 1.4274570982839314, "grad_norm": 3.633928060531616, "learning_rate": 0.0007963004234455092, "loss": 0.4058, "step": 1830 }, { "epoch": 1.435257410296412, "grad_norm": 2.356269598007202, "learning_rate": 0.000795186093158012, "loss": 0.2635, "step": 1840 }, { "epoch": 1.4430577223088923, "grad_norm": 1.9108752012252808, "learning_rate": 0.0007940717628705148, "loss": 0.336, "step": 1850 }, { "epoch": 1.4508580343213728, "grad_norm": 1.5505330562591553, "learning_rate": 0.0007929574325830176, "loss": 0.3493, "step": 1860 }, { "epoch": 1.4586583463338534, "grad_norm": 1.9970422983169556, "learning_rate": 0.0007918431022955203, "loss": 0.273, "step": 1870 }, { "epoch": 1.466458658346334, "grad_norm": 2.753758192062378, "learning_rate": 0.0007907287720080231, "loss": 0.2845, "step": 1880 }, { "epoch": 1.4742589703588145, "grad_norm": NaN, "learning_rate": 0.0007897258747492757, "loss": 0.2544, "step": 1890 }, { "epoch": 1.4820592823712948, "grad_norm": 3.0995099544525146, "learning_rate": 0.0007886115444617785, "loss": 0.2884, "step": 1900 }, { "epoch": 1.4898595943837754, "grad_norm": 5.728559970855713, "learning_rate": 0.0007874972141742812, "loss": 0.2681, "step": 1910 }, { "epoch": 1.497659906396256, "grad_norm": 1.492622971534729, "learning_rate": 0.000786382883886784, "loss": 0.2891, "step": 1920 }, { "epoch": 1.5054602184087362, "grad_norm": 1.7419252395629883, "learning_rate": 0.0007852685535992868, "loss": 0.4089, "step": 1930 }, { "epoch": 1.513260530421217, "grad_norm": 6.814690589904785, "learning_rate": 0.0007841542233117896, "loss": 0.3372, "step": 1940 }, { "epoch": 1.5210608424336973, "grad_norm": 2.380725860595703, "learning_rate": 0.0007830398930242924, "loss": 0.3189, "step": 1950 }, { "epoch": 1.5288611544461779, "grad_norm": 5.004116058349609, "learning_rate": 0.0007819255627367951, "loss": 0.3018, "step": 1960 }, { "epoch": 1.5366614664586584, "grad_norm": 2.604365825653076, "learning_rate": 0.0007808112324492979, "loss": 0.3054, "step": 1970 }, { "epoch": 1.5444617784711387, "grad_norm": 1.585584044456482, "learning_rate": 0.0007796969021618007, "loss": 0.3477, "step": 1980 }, { "epoch": 1.5522620904836193, "grad_norm": 1.8678693771362305, "learning_rate": 0.0007785825718743035, "loss": 0.3577, "step": 1990 }, { "epoch": 1.5600624024960998, "grad_norm": 1.654689073562622, "learning_rate": 0.0007774682415868062, "loss": 0.2625, "step": 2000 }, { "epoch": 1.5678627145085804, "grad_norm": 2.108919858932495, "learning_rate": 0.0007763539112993092, "loss": 0.2497, "step": 2010 }, { "epoch": 1.575663026521061, "grad_norm": 7.198604106903076, "learning_rate": 0.000775239581011812, "loss": 0.3382, "step": 2020 }, { "epoch": 1.5834633385335413, "grad_norm": 2.2285892963409424, "learning_rate": 0.0007741252507243148, "loss": 0.2598, "step": 2030 }, { "epoch": 1.5912636505460218, "grad_norm": 1.7743014097213745, "learning_rate": 0.0007730109204368176, "loss": 0.2757, "step": 2040 }, { "epoch": 1.5990639625585024, "grad_norm": 1.7763789892196655, "learning_rate": 0.0007718965901493203, "loss": 0.2703, "step": 2050 }, { "epoch": 1.6068642745709827, "grad_norm": 2.159956693649292, "learning_rate": 0.000770782259861823, "loss": 0.2824, "step": 2060 }, { "epoch": 1.6146645865834635, "grad_norm": 1.4845560789108276, "learning_rate": 0.0007696679295743259, "loss": 0.2528, "step": 2070 }, { "epoch": 1.6224648985959438, "grad_norm": 3.627887010574341, "learning_rate": 0.0007685535992868287, "loss": 0.3197, "step": 2080 }, { "epoch": 1.6302652106084243, "grad_norm": 2.2174973487854004, "learning_rate": 0.0007674392689993314, "loss": 0.2994, "step": 2090 }, { "epoch": 1.6380655226209049, "grad_norm": 2.5977325439453125, "learning_rate": 0.0007663249387118342, "loss": 0.2991, "step": 2100 }, { "epoch": 1.6458658346333852, "grad_norm": 1.9066824913024902, "learning_rate": 0.000765210608424337, "loss": 0.2166, "step": 2110 }, { "epoch": 1.653666146645866, "grad_norm": 1.7197297811508179, "learning_rate": 0.0007640962781368398, "loss": 0.2948, "step": 2120 }, { "epoch": 1.6614664586583463, "grad_norm": 2.054304361343384, "learning_rate": 0.0007629819478493426, "loss": 0.3073, "step": 2130 }, { "epoch": 1.6692667706708268, "grad_norm": 1.7934963703155518, "learning_rate": 0.0007618676175618453, "loss": 0.2667, "step": 2140 }, { "epoch": 1.6770670826833074, "grad_norm": 2.5259838104248047, "learning_rate": 0.0007607532872743482, "loss": 0.3322, "step": 2150 }, { "epoch": 1.6848673946957877, "grad_norm": 3.6354122161865234, "learning_rate": 0.000759638956986851, "loss": 0.3909, "step": 2160 }, { "epoch": 1.6926677067082685, "grad_norm": 1.6722809076309204, "learning_rate": 0.0007585246266993538, "loss": 0.2987, "step": 2170 }, { "epoch": 1.7004680187207488, "grad_norm": 4.3235015869140625, "learning_rate": 0.0007574102964118565, "loss": 0.3112, "step": 2180 }, { "epoch": 1.7082683307332294, "grad_norm": 2.236316442489624, "learning_rate": 0.0007562959661243593, "loss": 0.3174, "step": 2190 }, { "epoch": 1.71606864274571, "grad_norm": 21.32891273498535, "learning_rate": 0.0007551816358368621, "loss": 0.3696, "step": 2200 }, { "epoch": 1.7238689547581902, "grad_norm": 2.4251410961151123, "learning_rate": 0.0007540673055493649, "loss": 0.3171, "step": 2210 }, { "epoch": 1.7316692667706708, "grad_norm": 2.4152424335479736, "learning_rate": 0.0007529529752618677, "loss": 0.328, "step": 2220 }, { "epoch": 1.7394695787831513, "grad_norm": 2.0988574028015137, "learning_rate": 0.0007518386449743704, "loss": 0.3061, "step": 2230 }, { "epoch": 1.7472698907956317, "grad_norm": 4.469291687011719, "learning_rate": 0.0007507243146868732, "loss": 0.3315, "step": 2240 }, { "epoch": 1.7550702028081124, "grad_norm": 2.4917778968811035, "learning_rate": 0.000749609984399376, "loss": 0.4296, "step": 2250 }, { "epoch": 1.7628705148205928, "grad_norm": 3.073840379714966, "learning_rate": 0.0007484956541118788, "loss": 0.3685, "step": 2260 }, { "epoch": 1.7706708268330733, "grad_norm": 2.205733299255371, "learning_rate": 0.0007473813238243815, "loss": 0.2691, "step": 2270 }, { "epoch": 1.7784711388455539, "grad_norm": 2.3948941230773926, "learning_rate": 0.0007462669935368844, "loss": 0.3065, "step": 2280 }, { "epoch": 1.7862714508580342, "grad_norm": 2.6060824394226074, "learning_rate": 0.0007451526632493872, "loss": 0.3354, "step": 2290 }, { "epoch": 1.794071762870515, "grad_norm": 3.2586774826049805, "learning_rate": 0.00074403833296189, "loss": 0.261, "step": 2300 }, { "epoch": 1.8018720748829953, "grad_norm": 1.6417285203933716, "learning_rate": 0.0007429240026743928, "loss": 0.3828, "step": 2310 }, { "epoch": 1.8096723868954758, "grad_norm": 4.006927967071533, "learning_rate": 0.0007418096723868955, "loss": 0.4228, "step": 2320 }, { "epoch": 1.8174726989079564, "grad_norm": 2.5880374908447266, "learning_rate": 0.0007406953420993983, "loss": 0.2766, "step": 2330 }, { "epoch": 1.8252730109204367, "grad_norm": 1.602337121963501, "learning_rate": 0.0007395810118119011, "loss": 0.2794, "step": 2340 }, { "epoch": 1.8330733229329175, "grad_norm": 5.932153224945068, "learning_rate": 0.0007384666815244039, "loss": 0.3471, "step": 2350 }, { "epoch": 1.8408736349453978, "grad_norm": 4.076808452606201, "learning_rate": 0.0007373523512369066, "loss": 0.2649, "step": 2360 }, { "epoch": 1.8486739469578783, "grad_norm": 4.666397571563721, "learning_rate": 0.0007362380209494094, "loss": 0.2751, "step": 2370 }, { "epoch": 1.856474258970359, "grad_norm": 3.792745590209961, "learning_rate": 0.0007351236906619122, "loss": 0.2606, "step": 2380 }, { "epoch": 1.8642745709828392, "grad_norm": 2.5275423526763916, "learning_rate": 0.000734009360374415, "loss": 0.3469, "step": 2390 }, { "epoch": 1.8720748829953198, "grad_norm": 1.59649658203125, "learning_rate": 0.0007328950300869178, "loss": 0.2724, "step": 2400 }, { "epoch": 1.8798751950078003, "grad_norm": 3.7428267002105713, "learning_rate": 0.0007317806997994206, "loss": 0.4426, "step": 2410 }, { "epoch": 1.8876755070202809, "grad_norm": 3.7439956665039062, "learning_rate": 0.0007306663695119234, "loss": 0.2901, "step": 2420 }, { "epoch": 1.8954758190327614, "grad_norm": 2.3777198791503906, "learning_rate": 0.0007295520392244262, "loss": 0.2837, "step": 2430 }, { "epoch": 1.9032761310452417, "grad_norm": 2.7654988765716553, "learning_rate": 0.000728437708936929, "loss": 0.277, "step": 2440 }, { "epoch": 1.9110764430577223, "grad_norm": 1.8758680820465088, "learning_rate": 0.0007273233786494317, "loss": 0.2706, "step": 2450 }, { "epoch": 1.9188767550702028, "grad_norm": 2.8725340366363525, "learning_rate": 0.0007262090483619345, "loss": 0.2566, "step": 2460 }, { "epoch": 1.9266770670826832, "grad_norm": 2.4021458625793457, "learning_rate": 0.0007250947180744373, "loss": 0.2645, "step": 2470 }, { "epoch": 1.934477379095164, "grad_norm": 2.8407838344573975, "learning_rate": 0.0007239803877869401, "loss": 0.3152, "step": 2480 }, { "epoch": 1.9422776911076443, "grad_norm": 3.606403112411499, "learning_rate": 0.0007228660574994429, "loss": 0.373, "step": 2490 }, { "epoch": 1.9500780031201248, "grad_norm": 2.362473487854004, "learning_rate": 0.0007217517272119456, "loss": 0.4332, "step": 2500 }, { "epoch": 1.9578783151326054, "grad_norm": 1.9711815118789673, "learning_rate": 0.0007206373969244484, "loss": 0.2606, "step": 2510 }, { "epoch": 1.9656786271450857, "grad_norm": 2.683908224105835, "learning_rate": 0.0007195230666369512, "loss": 0.3582, "step": 2520 }, { "epoch": 1.9734789391575664, "grad_norm": 2.5902493000030518, "learning_rate": 0.000718408736349454, "loss": 0.2694, "step": 2530 }, { "epoch": 1.9812792511700468, "grad_norm": 3.92708420753479, "learning_rate": 0.0007172944060619567, "loss": 0.3083, "step": 2540 }, { "epoch": 1.9890795631825273, "grad_norm": 2.6788370609283447, "learning_rate": 0.0007161800757744596, "loss": 0.2587, "step": 2550 }, { "epoch": 1.9968798751950079, "grad_norm": 2.413313627243042, "learning_rate": 0.0007150657454869624, "loss": 0.2732, "step": 2560 }, { "epoch": 2.0, "eval_loss": 0.3617618680000305, "eval_runtime": 36952.264, "eval_samples_per_second": 0.015, "eval_steps_per_second": 0.015, "eval_wer": 22.793975670978952, "step": 2564 }, { "epoch": 2.004680187207488, "grad_norm": 1.572451114654541, "learning_rate": 0.0007139514151994652, "loss": 0.2147, "step": 2570 }, { "epoch": 2.012480499219969, "grad_norm": 2.4759023189544678, "learning_rate": 0.000712837084911968, "loss": 0.2015, "step": 2580 }, { "epoch": 2.0202808112324493, "grad_norm": 1.6903563737869263, "learning_rate": 0.0007117227546244707, "loss": 0.1703, "step": 2590 }, { "epoch": 2.0280811232449296, "grad_norm": 3.494985580444336, "learning_rate": 0.0007106084243369735, "loss": 0.2063, "step": 2600 }, { "epoch": 2.0358814352574104, "grad_norm": 1.5024439096450806, "learning_rate": 0.0007094940940494763, "loss": 0.1807, "step": 2610 }, { "epoch": 2.0436817472698907, "grad_norm": 1.9423105716705322, "learning_rate": 0.0007083797637619791, "loss": 0.1953, "step": 2620 }, { "epoch": 2.0514820592823715, "grad_norm": 0.8572360277175903, "learning_rate": 0.0007072654334744818, "loss": 0.3423, "step": 2630 }, { "epoch": 2.059282371294852, "grad_norm": 2.574855327606201, "learning_rate": 0.0007061511031869846, "loss": 0.1916, "step": 2640 }, { "epoch": 2.067082683307332, "grad_norm": 2.2941219806671143, "learning_rate": 0.0007050367728994874, "loss": 0.2168, "step": 2650 }, { "epoch": 2.074882995319813, "grad_norm": 2.0448150634765625, "learning_rate": 0.0007039224426119902, "loss": 0.2412, "step": 2660 }, { "epoch": 2.0826833073322932, "grad_norm": 1.8044381141662598, "learning_rate": 0.000702808112324493, "loss": 0.3261, "step": 2670 }, { "epoch": 2.090483619344774, "grad_norm": 2.752220630645752, "learning_rate": 0.0007016937820369958, "loss": 0.2007, "step": 2680 }, { "epoch": 2.0982839313572543, "grad_norm": 1.4598050117492676, "learning_rate": 0.0007005794517494986, "loss": 0.1856, "step": 2690 }, { "epoch": 2.1060842433697347, "grad_norm": 3.578192710876465, "learning_rate": 0.0006994651214620014, "loss": 0.1991, "step": 2700 }, { "epoch": 2.1138845553822154, "grad_norm": 2.6971054077148438, "learning_rate": 0.0006983507911745042, "loss": 0.2134, "step": 2710 }, { "epoch": 2.1216848673946958, "grad_norm": 2.437596559524536, "learning_rate": 0.0006972364608870069, "loss": 0.228, "step": 2720 }, { "epoch": 2.129485179407176, "grad_norm": 2.6254658699035645, "learning_rate": 0.0006961221305995097, "loss": 0.1671, "step": 2730 }, { "epoch": 2.137285491419657, "grad_norm": 1.3765720129013062, "learning_rate": 0.0006950078003120125, "loss": 0.2391, "step": 2740 }, { "epoch": 2.145085803432137, "grad_norm": 2.192396879196167, "learning_rate": 0.0006938934700245153, "loss": 0.2331, "step": 2750 }, { "epoch": 2.152886115444618, "grad_norm": 1.4418809413909912, "learning_rate": 0.0006927791397370181, "loss": 0.2041, "step": 2760 }, { "epoch": 2.1606864274570983, "grad_norm": 2.553459882736206, "learning_rate": 0.0006916648094495208, "loss": 0.2342, "step": 2770 }, { "epoch": 2.1684867394695786, "grad_norm": 1.7199114561080933, "learning_rate": 0.0006905504791620236, "loss": 0.1841, "step": 2780 }, { "epoch": 2.1762870514820594, "grad_norm": 2.2145979404449463, "learning_rate": 0.0006894361488745264, "loss": 0.1821, "step": 2790 }, { "epoch": 2.1840873634945397, "grad_norm": 2.434779405593872, "learning_rate": 0.0006883218185870292, "loss": 0.2115, "step": 2800 }, { "epoch": 2.1918876755070205, "grad_norm": 1.0518474578857422, "learning_rate": 0.0006872074882995319, "loss": 0.2186, "step": 2810 }, { "epoch": 2.199687987519501, "grad_norm": 1.9856785535812378, "learning_rate": 0.0006860931580120348, "loss": 0.2448, "step": 2820 }, { "epoch": 2.207488299531981, "grad_norm": 17.3148136138916, "learning_rate": 0.0006849788277245376, "loss": 0.2372, "step": 2830 }, { "epoch": 2.215288611544462, "grad_norm": 3.4590959548950195, "learning_rate": 0.0006838644974370404, "loss": 0.3179, "step": 2840 }, { "epoch": 2.223088923556942, "grad_norm": 2.127650260925293, "learning_rate": 0.0006827501671495432, "loss": 0.2445, "step": 2850 }, { "epoch": 2.230889235569423, "grad_norm": 1.895729660987854, "learning_rate": 0.0006816358368620459, "loss": 0.1966, "step": 2860 }, { "epoch": 2.2386895475819033, "grad_norm": 1.4693467617034912, "learning_rate": 0.0006805215065745487, "loss": 0.2351, "step": 2870 }, { "epoch": 2.2464898595943836, "grad_norm": 1.500453233718872, "learning_rate": 0.0006794071762870515, "loss": 0.1946, "step": 2880 }, { "epoch": 2.2542901716068644, "grad_norm": 2.8631374835968018, "learning_rate": 0.0006782928459995543, "loss": 0.2367, "step": 2890 }, { "epoch": 2.2620904836193447, "grad_norm": 1.5281766653060913, "learning_rate": 0.000677178515712057, "loss": 0.1855, "step": 2900 }, { "epoch": 2.2698907956318255, "grad_norm": 2.3027195930480957, "learning_rate": 0.0006760641854245598, "loss": 0.2117, "step": 2910 }, { "epoch": 2.277691107644306, "grad_norm": 2.9962668418884277, "learning_rate": 0.0006749498551370626, "loss": 0.2441, "step": 2920 }, { "epoch": 2.285491419656786, "grad_norm": 2.489192008972168, "learning_rate": 0.0006738355248495654, "loss": 0.2212, "step": 2930 }, { "epoch": 2.293291731669267, "grad_norm": 1.366945743560791, "learning_rate": 0.0006727211945620682, "loss": 0.2003, "step": 2940 }, { "epoch": 2.3010920436817472, "grad_norm": 1.4768056869506836, "learning_rate": 0.000671606864274571, "loss": 0.3107, "step": 2950 }, { "epoch": 2.308892355694228, "grad_norm": 1.797979474067688, "learning_rate": 0.0006704925339870738, "loss": 0.1978, "step": 2960 }, { "epoch": 2.3166926677067083, "grad_norm": 1.9439573287963867, "learning_rate": 0.0006693782036995766, "loss": 0.2306, "step": 2970 }, { "epoch": 2.3244929797191887, "grad_norm": 1.582478642463684, "learning_rate": 0.0006682638734120794, "loss": 0.2079, "step": 2980 }, { "epoch": 2.3322932917316694, "grad_norm": 2.1245157718658447, "learning_rate": 0.0006671495431245821, "loss": 0.2258, "step": 2990 }, { "epoch": 2.3400936037441498, "grad_norm": 2.427675724029541, "learning_rate": 0.0006660352128370849, "loss": 0.2846, "step": 3000 }, { "epoch": 2.34789391575663, "grad_norm": 0.9095527529716492, "learning_rate": 0.0006649208825495877, "loss": 0.1746, "step": 3010 }, { "epoch": 2.355694227769111, "grad_norm": 2.0468294620513916, "learning_rate": 0.0006638065522620905, "loss": 0.2645, "step": 3020 }, { "epoch": 2.363494539781591, "grad_norm": 1.5703836679458618, "learning_rate": 0.0006626922219745933, "loss": 0.1896, "step": 3030 }, { "epoch": 2.371294851794072, "grad_norm": 2.37263822555542, "learning_rate": 0.000661577891687096, "loss": 0.232, "step": 3040 }, { "epoch": 2.3790951638065523, "grad_norm": 1.6041433811187744, "learning_rate": 0.0006604635613995988, "loss": 0.2135, "step": 3050 }, { "epoch": 2.3868954758190326, "grad_norm": 1.87883722782135, "learning_rate": 0.0006593492311121016, "loss": 0.1993, "step": 3060 }, { "epoch": 2.3946957878315134, "grad_norm": 2.5502099990844727, "learning_rate": 0.0006582349008246044, "loss": 0.2468, "step": 3070 }, { "epoch": 2.4024960998439937, "grad_norm": 2.681384801864624, "learning_rate": 0.0006571205705371071, "loss": 0.2364, "step": 3080 }, { "epoch": 2.410296411856474, "grad_norm": 1.2032707929611206, "learning_rate": 0.00065600624024961, "loss": 0.2732, "step": 3090 }, { "epoch": 2.418096723868955, "grad_norm": 1.553661584854126, "learning_rate": 0.0006548919099621128, "loss": 0.2214, "step": 3100 }, { "epoch": 2.425897035881435, "grad_norm": 0.9736389517784119, "learning_rate": 0.0006537775796746156, "loss": 0.1778, "step": 3110 }, { "epoch": 2.433697347893916, "grad_norm": 1.911352276802063, "learning_rate": 0.0006526632493871184, "loss": 0.2062, "step": 3120 }, { "epoch": 2.4414976599063962, "grad_norm": 1.4338595867156982, "learning_rate": 0.0006515489190996211, "loss": 0.193, "step": 3130 }, { "epoch": 2.4492979719188765, "grad_norm": 1.3027153015136719, "learning_rate": 0.0006504345888121239, "loss": 0.3175, "step": 3140 }, { "epoch": 2.4570982839313573, "grad_norm": 2.262709140777588, "learning_rate": 0.0006493202585246267, "loss": 0.2874, "step": 3150 }, { "epoch": 2.4648985959438376, "grad_norm": 1.36016845703125, "learning_rate": 0.0006482059282371295, "loss": 0.1603, "step": 3160 }, { "epoch": 2.4726989079563184, "grad_norm": 3.441779613494873, "learning_rate": 0.0006470915979496322, "loss": 0.4564, "step": 3170 }, { "epoch": 2.4804992199687987, "grad_norm": 1.8022549152374268, "learning_rate": 0.000645977267662135, "loss": 0.1928, "step": 3180 }, { "epoch": 2.488299531981279, "grad_norm": 2.104497194290161, "learning_rate": 0.0006448629373746378, "loss": 0.2047, "step": 3190 }, { "epoch": 2.49609984399376, "grad_norm": 2.5888454914093018, "learning_rate": 0.0006437486070871406, "loss": 0.2591, "step": 3200 }, { "epoch": 2.50390015600624, "grad_norm": 1.3476183414459229, "learning_rate": 0.0006426342767996434, "loss": 0.2636, "step": 3210 }, { "epoch": 2.511700468018721, "grad_norm": 2.888965606689453, "learning_rate": 0.0006415199465121462, "loss": 0.216, "step": 3220 }, { "epoch": 2.5195007800312013, "grad_norm": 1.8190357685089111, "learning_rate": 0.000640405616224649, "loss": 0.3498, "step": 3230 }, { "epoch": 2.5273010920436816, "grad_norm": 1.5132287740707397, "learning_rate": 0.0006392912859371518, "loss": 0.227, "step": 3240 }, { "epoch": 2.5351014040561624, "grad_norm": 2.8061325550079346, "learning_rate": 0.0006381769556496546, "loss": 0.2089, "step": 3250 }, { "epoch": 2.5429017160686427, "grad_norm": 2.2521767616271973, "learning_rate": 0.0006370626253621573, "loss": 0.2362, "step": 3260 }, { "epoch": 2.5507020280811235, "grad_norm": 1.4635623693466187, "learning_rate": 0.0006359482950746601, "loss": 0.1901, "step": 3270 }, { "epoch": 2.5585023400936038, "grad_norm": 1.9882755279541016, "learning_rate": 0.0006348339647871629, "loss": 0.2387, "step": 3280 }, { "epoch": 2.566302652106084, "grad_norm": 6.737502098083496, "learning_rate": 0.0006337196344996657, "loss": 0.2278, "step": 3290 }, { "epoch": 2.574102964118565, "grad_norm": 1.686926245689392, "learning_rate": 0.0006326053042121685, "loss": 0.3118, "step": 3300 }, { "epoch": 2.581903276131045, "grad_norm": 1.436584234237671, "learning_rate": 0.0006314909739246712, "loss": 0.1787, "step": 3310 }, { "epoch": 2.589703588143526, "grad_norm": 3.646476984024048, "learning_rate": 0.000630376643637174, "loss": 0.2165, "step": 3320 }, { "epoch": 2.5975039001560063, "grad_norm": 1.8787113428115845, "learning_rate": 0.0006292623133496768, "loss": 0.1993, "step": 3330 }, { "epoch": 2.6053042121684866, "grad_norm": 2.4485440254211426, "learning_rate": 0.0006281479830621796, "loss": 0.24, "step": 3340 }, { "epoch": 2.6131045241809674, "grad_norm": 2.73197865486145, "learning_rate": 0.0006270336527746824, "loss": 0.228, "step": 3350 }, { "epoch": 2.6209048361934477, "grad_norm": 3.2943315505981445, "learning_rate": 0.0006259193224871852, "loss": 0.2289, "step": 3360 }, { "epoch": 2.6287051482059285, "grad_norm": 2.165308952331543, "learning_rate": 0.000624804992199688, "loss": 0.2435, "step": 3370 }, { "epoch": 2.636505460218409, "grad_norm": 2.3766629695892334, "learning_rate": 0.0006236906619121908, "loss": 0.26, "step": 3380 }, { "epoch": 2.644305772230889, "grad_norm": 1.1527057886123657, "learning_rate": 0.0006225763316246936, "loss": 0.2486, "step": 3390 }, { "epoch": 2.6521060842433695, "grad_norm": 2.6304874420166016, "learning_rate": 0.0006214620013371963, "loss": 0.3827, "step": 3400 }, { "epoch": 2.6599063962558502, "grad_norm": 1.5219537019729614, "learning_rate": 0.0006203476710496991, "loss": 0.2231, "step": 3410 }, { "epoch": 2.667706708268331, "grad_norm": 1.9267528057098389, "learning_rate": 0.0006192333407622019, "loss": 0.2468, "step": 3420 }, { "epoch": 2.6755070202808113, "grad_norm": 2.247861385345459, "learning_rate": 0.0006181190104747047, "loss": 0.2463, "step": 3430 }, { "epoch": 2.6833073322932917, "grad_norm": 2.6133053302764893, "learning_rate": 0.0006170046801872074, "loss": 0.2636, "step": 3440 }, { "epoch": 2.691107644305772, "grad_norm": 1.4675954580307007, "learning_rate": 0.0006158903498997102, "loss": 0.2141, "step": 3450 }, { "epoch": 2.6989079563182528, "grad_norm": 3.3972527980804443, "learning_rate": 0.000614776019612213, "loss": 0.2102, "step": 3460 }, { "epoch": 2.706708268330733, "grad_norm": 2.0020527839660645, "learning_rate": 0.0006136616893247158, "loss": 0.2618, "step": 3470 }, { "epoch": 2.714508580343214, "grad_norm": 1.4799424409866333, "learning_rate": 0.0006125473590372186, "loss": 0.2644, "step": 3480 }, { "epoch": 2.722308892355694, "grad_norm": 2.207921028137207, "learning_rate": 0.0006114330287497214, "loss": 0.2496, "step": 3490 }, { "epoch": 2.7301092043681745, "grad_norm": 2.7361607551574707, "learning_rate": 0.0006103186984622242, "loss": 0.1917, "step": 3500 }, { "epoch": 2.7379095163806553, "grad_norm": 2.6031532287597656, "learning_rate": 0.000609204368174727, "loss": 0.2358, "step": 3510 }, { "epoch": 2.7457098283931356, "grad_norm": 1.442651629447937, "learning_rate": 0.0006080900378872299, "loss": 0.2929, "step": 3520 }, { "epoch": 2.7535101404056164, "grad_norm": 2.853076457977295, "learning_rate": 0.0006069757075997325, "loss": 0.2287, "step": 3530 }, { "epoch": 2.7613104524180967, "grad_norm": 1.827863335609436, "learning_rate": 0.0006058613773122353, "loss": 0.284, "step": 3540 }, { "epoch": 2.769110764430577, "grad_norm": 1.29642915725708, "learning_rate": 0.0006047470470247382, "loss": 0.2156, "step": 3550 }, { "epoch": 2.776911076443058, "grad_norm": 2.0758543014526367, "learning_rate": 0.000603632716737241, "loss": 0.4312, "step": 3560 }, { "epoch": 2.784711388455538, "grad_norm": 2.1832942962646484, "learning_rate": 0.0006025183864497438, "loss": 0.22, "step": 3570 }, { "epoch": 2.792511700468019, "grad_norm": 1.541040301322937, "learning_rate": 0.0006014040561622464, "loss": 0.2452, "step": 3580 }, { "epoch": 2.800312012480499, "grad_norm": 1.5947539806365967, "learning_rate": 0.0006002897258747492, "loss": 0.2708, "step": 3590 }, { "epoch": 2.8081123244929795, "grad_norm": 2.130390167236328, "learning_rate": 0.000599175395587252, "loss": 0.2194, "step": 3600 }, { "epoch": 2.8159126365054603, "grad_norm": 2.382166862487793, "learning_rate": 0.0005980610652997549, "loss": 0.1931, "step": 3610 }, { "epoch": 2.8237129485179406, "grad_norm": 4.421852111816406, "learning_rate": 0.0005969467350122577, "loss": 0.3081, "step": 3620 }, { "epoch": 2.8315132605304214, "grad_norm": 1.1889768838882446, "learning_rate": 0.0005958324047247605, "loss": 0.2134, "step": 3630 }, { "epoch": 2.8393135725429017, "grad_norm": 2.6204874515533447, "learning_rate": 0.0005947180744372633, "loss": 0.2128, "step": 3640 }, { "epoch": 2.847113884555382, "grad_norm": 1.4705913066864014, "learning_rate": 0.0005936037441497661, "loss": 0.1894, "step": 3650 }, { "epoch": 2.854914196567863, "grad_norm": 3.110135555267334, "learning_rate": 0.0005924894138622688, "loss": 0.2072, "step": 3660 }, { "epoch": 2.862714508580343, "grad_norm": 1.3255491256713867, "learning_rate": 0.0005913750835747716, "loss": 0.2325, "step": 3670 }, { "epoch": 2.870514820592824, "grad_norm": 2.2520713806152344, "learning_rate": 0.0005902607532872744, "loss": 0.2121, "step": 3680 }, { "epoch": 2.8783151326053042, "grad_norm": 1.4630913734436035, "learning_rate": 0.0005891464229997772, "loss": 0.2281, "step": 3690 }, { "epoch": 2.8861154446177846, "grad_norm": 2.0491292476654053, "learning_rate": 0.00058803209271228, "loss": 0.2227, "step": 3700 }, { "epoch": 2.8939157566302653, "grad_norm": 6.133053302764893, "learning_rate": 0.0005869177624247827, "loss": 0.3106, "step": 3710 }, { "epoch": 2.9017160686427457, "grad_norm": 2.3226382732391357, "learning_rate": 0.0005858034321372855, "loss": 0.2193, "step": 3720 }, { "epoch": 2.9095163806552264, "grad_norm": 1.351330041885376, "learning_rate": 0.0005846891018497883, "loss": 0.1928, "step": 3730 }, { "epoch": 2.9173166926677068, "grad_norm": 0.9889002442359924, "learning_rate": 0.0005835747715622911, "loss": 0.2395, "step": 3740 }, { "epoch": 2.925117004680187, "grad_norm": 3.9231808185577393, "learning_rate": 0.0005824604412747938, "loss": 0.2288, "step": 3750 }, { "epoch": 2.932917316692668, "grad_norm": 1.1299773454666138, "learning_rate": 0.0005813461109872967, "loss": 0.1996, "step": 3760 }, { "epoch": 2.940717628705148, "grad_norm": 1.894411563873291, "learning_rate": 0.0005802317806997995, "loss": 0.2444, "step": 3770 }, { "epoch": 2.948517940717629, "grad_norm": 3.100918769836426, "learning_rate": 0.0005791174504123023, "loss": 0.2723, "step": 3780 }, { "epoch": 2.9563182527301093, "grad_norm": 2.921398639678955, "learning_rate": 0.0005780031201248051, "loss": 0.1963, "step": 3790 }, { "epoch": 2.9641185647425896, "grad_norm": 2.898193120956421, "learning_rate": 0.0005768887898373078, "loss": 0.2262, "step": 3800 }, { "epoch": 2.97191887675507, "grad_norm": 1.8510135412216187, "learning_rate": 0.0005757744595498106, "loss": 0.2253, "step": 3810 }, { "epoch": 2.9797191887675507, "grad_norm": 2.716972827911377, "learning_rate": 0.0005746601292623134, "loss": 0.222, "step": 3820 }, { "epoch": 2.9875195007800315, "grad_norm": 2.466111660003662, "learning_rate": 0.0005735457989748162, "loss": 0.2903, "step": 3830 }, { "epoch": 2.995319812792512, "grad_norm": 2.409747838973999, "learning_rate": 0.0005724314686873189, "loss": 0.3088, "step": 3840 }, { "epoch": 3.0, "eval_loss": 0.34428906440734863, "eval_runtime": 557.5413, "eval_samples_per_second": 0.986, "eval_steps_per_second": 0.986, "eval_wer": 24.454527901139215, "step": 3846 } ], "logging_steps": 10, "max_steps": 8974, "num_input_tokens_seen": 0, "num_train_epochs": 7, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 3.38849691254784e+18, "train_batch_size": 3, "trial_name": null, "trial_params": null }