{ "best_metric": null, "best_model_checkpoint": null, "epoch": 2.9943246311010214, "eval_steps": 50, "global_step": 1320, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.022701475595913734, "grad_norm": 0.98377925157547, "learning_rate": 0.0004980988593155894, "loss": 2.041, "step": 10 }, { "epoch": 0.04540295119182747, "grad_norm": 0.9438253045082092, "learning_rate": 0.0004942965779467681, "loss": 0.5227, "step": 20 }, { "epoch": 0.0681044267877412, "grad_norm": 1.2742111682891846, "learning_rate": 0.0004904942965779467, "loss": 0.3237, "step": 30 }, { "epoch": 0.09080590238365494, "grad_norm": 1.0742741823196411, "learning_rate": 0.00048669201520912546, "loss": 0.2732, "step": 40 }, { "epoch": 0.11350737797956867, "grad_norm": 0.293316513299942, "learning_rate": 0.0004828897338403042, "loss": 0.2525, "step": 50 }, { "epoch": 0.11350737797956867, "eval_loss": 0.22574295103549957, "eval_runtime": 222.4911, "eval_samples_per_second": 0.899, "eval_steps_per_second": 0.449, "step": 50 }, { "epoch": 0.1362088535754824, "grad_norm": 0.6410325765609741, "learning_rate": 0.00047908745247148286, "loss": 0.2252, "step": 60 }, { "epoch": 0.15891032917139614, "grad_norm": 0.4331947863101959, "learning_rate": 0.0004752851711026616, "loss": 0.2028, "step": 70 }, { "epoch": 0.18161180476730987, "grad_norm": 0.2875344157218933, "learning_rate": 0.00047148288973384027, "loss": 0.1861, "step": 80 }, { "epoch": 0.2043132803632236, "grad_norm": 0.25494372844696045, "learning_rate": 0.000467680608365019, "loss": 0.1871, "step": 90 }, { "epoch": 0.22701475595913734, "grad_norm": 0.2936597168445587, "learning_rate": 0.00046387832699619773, "loss": 0.1805, "step": 100 }, { "epoch": 0.22701475595913734, "eval_loss": 0.17074081301689148, "eval_runtime": 217.5217, "eval_samples_per_second": 0.919, "eval_steps_per_second": 0.46, "step": 100 }, { "epoch": 0.24971623155505107, "grad_norm": 0.23551008105278015, "learning_rate": 0.0004600760456273764, "loss": 0.1642, "step": 110 }, { "epoch": 0.2724177071509648, "grad_norm": 0.21073344349861145, "learning_rate": 0.00045627376425855514, "loss": 0.1674, "step": 120 }, { "epoch": 0.29511918274687854, "grad_norm": 0.27475476264953613, "learning_rate": 0.0004524714828897338, "loss": 0.1586, "step": 130 }, { "epoch": 0.3178206583427923, "grad_norm": 0.19965380430221558, "learning_rate": 0.00044866920152091254, "loss": 0.1553, "step": 140 }, { "epoch": 0.340522133938706, "grad_norm": 0.2891227602958679, "learning_rate": 0.0004448669201520912, "loss": 0.1484, "step": 150 }, { "epoch": 0.340522133938706, "eval_loss": 0.14293429255485535, "eval_runtime": 217.6158, "eval_samples_per_second": 0.919, "eval_steps_per_second": 0.46, "step": 150 }, { "epoch": 0.36322360953461974, "grad_norm": 0.18177160620689392, "learning_rate": 0.00044106463878326995, "loss": 0.1403, "step": 160 }, { "epoch": 0.3859250851305335, "grad_norm": 0.1343546211719513, "learning_rate": 0.0004372623574144487, "loss": 0.1394, "step": 170 }, { "epoch": 0.4086265607264472, "grad_norm": 0.19174069166183472, "learning_rate": 0.00043346007604562736, "loss": 0.1329, "step": 180 }, { "epoch": 0.43132803632236094, "grad_norm": 0.2594343423843384, "learning_rate": 0.0004296577946768061, "loss": 0.1346, "step": 190 }, { "epoch": 0.4540295119182747, "grad_norm": 0.1463007926940918, "learning_rate": 0.00042585551330798476, "loss": 0.1366, "step": 200 }, { "epoch": 0.4540295119182747, "eval_loss": 0.13306422531604767, "eval_runtime": 217.7159, "eval_samples_per_second": 0.919, "eval_steps_per_second": 0.459, "step": 200 }, { "epoch": 0.4767309875141884, "grad_norm": 0.25085681676864624, "learning_rate": 0.0004220532319391635, "loss": 0.125, "step": 210 }, { "epoch": 0.49943246311010214, "grad_norm": 0.13558617234230042, "learning_rate": 0.0004182509505703422, "loss": 0.1354, "step": 220 }, { "epoch": 0.5221339387060159, "grad_norm": 0.1223764568567276, "learning_rate": 0.0004144486692015209, "loss": 0.13, "step": 230 }, { "epoch": 0.5448354143019296, "grad_norm": 0.21993857622146606, "learning_rate": 0.00041064638783269963, "loss": 0.1265, "step": 240 }, { "epoch": 0.5675368898978433, "grad_norm": 0.17128345370292664, "learning_rate": 0.0004068441064638783, "loss": 0.1248, "step": 250 }, { "epoch": 0.5675368898978433, "eval_loss": 0.1296500563621521, "eval_runtime": 217.6406, "eval_samples_per_second": 0.919, "eval_steps_per_second": 0.459, "step": 250 }, { "epoch": 0.5902383654937571, "grad_norm": 0.13287974894046783, "learning_rate": 0.00040304182509505703, "loss": 0.1346, "step": 260 }, { "epoch": 0.6129398410896708, "grad_norm": 0.1447504311800003, "learning_rate": 0.00039923954372623577, "loss": 0.1261, "step": 270 }, { "epoch": 0.6356413166855845, "grad_norm": 0.12065211683511734, "learning_rate": 0.00039543726235741444, "loss": 0.1184, "step": 280 }, { "epoch": 0.6583427922814983, "grad_norm": 0.11076737195253372, "learning_rate": 0.00039163498098859317, "loss": 0.1219, "step": 290 }, { "epoch": 0.681044267877412, "grad_norm": 0.09753841161727905, "learning_rate": 0.00038783269961977185, "loss": 0.1236, "step": 300 }, { "epoch": 0.681044267877412, "eval_loss": 0.12370797991752625, "eval_runtime": 217.5925, "eval_samples_per_second": 0.919, "eval_steps_per_second": 0.46, "step": 300 }, { "epoch": 0.7037457434733257, "grad_norm": 0.1080508679151535, "learning_rate": 0.0003840304182509506, "loss": 0.1232, "step": 310 }, { "epoch": 0.7264472190692395, "grad_norm": 0.11670809239149094, "learning_rate": 0.00038022813688212925, "loss": 0.116, "step": 320 }, { "epoch": 0.7491486946651532, "grad_norm": 0.10847495496273041, "learning_rate": 0.000376425855513308, "loss": 0.1225, "step": 330 }, { "epoch": 0.771850170261067, "grad_norm": 0.13137522339820862, "learning_rate": 0.0003726235741444867, "loss": 0.1183, "step": 340 }, { "epoch": 0.7945516458569807, "grad_norm": 0.3472500443458557, "learning_rate": 0.0003688212927756654, "loss": 0.1213, "step": 350 }, { "epoch": 0.7945516458569807, "eval_loss": 0.13061952590942383, "eval_runtime": 217.6112, "eval_samples_per_second": 0.919, "eval_steps_per_second": 0.46, "step": 350 }, { "epoch": 0.8172531214528944, "grad_norm": 1.0965760946273804, "learning_rate": 0.0003650190114068441, "loss": 0.1245, "step": 360 }, { "epoch": 0.8399545970488081, "grad_norm": 0.11599018424749374, "learning_rate": 0.0003612167300380228, "loss": 0.1181, "step": 370 }, { "epoch": 0.8626560726447219, "grad_norm": 0.1406373679637909, "learning_rate": 0.0003574144486692015, "loss": 0.1264, "step": 380 }, { "epoch": 0.8853575482406356, "grad_norm": 0.1464611142873764, "learning_rate": 0.00035361216730038026, "loss": 0.1262, "step": 390 }, { "epoch": 0.9080590238365494, "grad_norm": 0.11901883035898209, "learning_rate": 0.00034980988593155893, "loss": 0.1212, "step": 400 }, { "epoch": 0.9080590238365494, "eval_loss": 0.1218264177441597, "eval_runtime": 217.4651, "eval_samples_per_second": 0.92, "eval_steps_per_second": 0.46, "step": 400 }, { "epoch": 0.9307604994324631, "grad_norm": 0.17761249840259552, "learning_rate": 0.00034600760456273766, "loss": 0.1224, "step": 410 }, { "epoch": 0.9534619750283768, "grad_norm": 0.10262706875801086, "learning_rate": 0.00034220532319391634, "loss": 0.1218, "step": 420 }, { "epoch": 0.9761634506242906, "grad_norm": 0.12772826850414276, "learning_rate": 0.00033840304182509507, "loss": 0.1177, "step": 430 }, { "epoch": 0.9988649262202043, "grad_norm": 0.11388087272644043, "learning_rate": 0.00033460076045627375, "loss": 0.1202, "step": 440 }, { "epoch": 1.0204313280363224, "grad_norm": 0.12442510575056076, "learning_rate": 0.0003307984790874525, "loss": 0.1178, "step": 450 }, { "epoch": 1.0204313280363224, "eval_loss": 0.12656380236148834, "eval_runtime": 217.5337, "eval_samples_per_second": 0.919, "eval_steps_per_second": 0.46, "step": 450 }, { "epoch": 1.043132803632236, "grad_norm": 26971.419921875, "learning_rate": 0.0003269961977186312, "loss": 0.1155, "step": 460 }, { "epoch": 1.0658342792281499, "grad_norm": 0.09757928550243378, "learning_rate": 0.0003231939163498099, "loss": 0.126, "step": 470 }, { "epoch": 1.0885357548240635, "grad_norm": 0.11210393160581589, "learning_rate": 0.0003193916349809886, "loss": 0.1082, "step": 480 }, { "epoch": 1.1112372304199774, "grad_norm": 0.10138614475727081, "learning_rate": 0.0003155893536121673, "loss": 0.1124, "step": 490 }, { "epoch": 1.133938706015891, "grad_norm": 0.12984132766723633, "learning_rate": 0.000311787072243346, "loss": 0.1212, "step": 500 }, { "epoch": 1.133938706015891, "eval_loss": 0.11706481873989105, "eval_runtime": 217.5308, "eval_samples_per_second": 0.919, "eval_steps_per_second": 0.46, "step": 500 }, { "epoch": 1.1566401816118048, "grad_norm": 0.11328810453414917, "learning_rate": 0.00030798479087452475, "loss": 0.1144, "step": 510 }, { "epoch": 1.1793416572077184, "grad_norm": 0.1247556060552597, "learning_rate": 0.0003041825095057034, "loss": 0.1172, "step": 520 }, { "epoch": 1.2020431328036323, "grad_norm": 0.11804669350385666, "learning_rate": 0.00030038022813688215, "loss": 0.1204, "step": 530 }, { "epoch": 1.224744608399546, "grad_norm": 0.11827069520950317, "learning_rate": 0.00029657794676806083, "loss": 0.1188, "step": 540 }, { "epoch": 1.2474460839954598, "grad_norm": 0.1352234184741974, "learning_rate": 0.00029277566539923956, "loss": 0.1165, "step": 550 }, { "epoch": 1.2474460839954598, "eval_loss": 0.11637674272060394, "eval_runtime": 217.5994, "eval_samples_per_second": 0.919, "eval_steps_per_second": 0.46, "step": 550 }, { "epoch": 1.2701475595913734, "grad_norm": 0.12024690955877304, "learning_rate": 0.00028897338403041824, "loss": 0.1178, "step": 560 }, { "epoch": 1.2928490351872872, "grad_norm": 0.09398236870765686, "learning_rate": 0.00028517110266159697, "loss": 0.1164, "step": 570 }, { "epoch": 1.3155505107832008, "grad_norm": 0.09315728396177292, "learning_rate": 0.0002813688212927757, "loss": 0.1118, "step": 580 }, { "epoch": 1.3382519863791147, "grad_norm": 0.09556794911623001, "learning_rate": 0.0002775665399239544, "loss": 0.114, "step": 590 }, { "epoch": 1.3609534619750283, "grad_norm": 0.12721088528633118, "learning_rate": 0.0002737642585551331, "loss": 0.1206, "step": 600 }, { "epoch": 1.3609534619750283, "eval_loss": 0.11379682272672653, "eval_runtime": 217.4599, "eval_samples_per_second": 0.92, "eval_steps_per_second": 0.46, "step": 600 }, { "epoch": 1.3836549375709422, "grad_norm": 0.11218762397766113, "learning_rate": 0.0002699619771863118, "loss": 0.1143, "step": 610 }, { "epoch": 1.4063564131668558, "grad_norm": 0.1336316615343094, "learning_rate": 0.0002661596958174905, "loss": 0.1157, "step": 620 }, { "epoch": 1.4290578887627696, "grad_norm": 0.1047380119562149, "learning_rate": 0.00026235741444866924, "loss": 0.1155, "step": 630 }, { "epoch": 1.4517593643586832, "grad_norm": 0.10472728312015533, "learning_rate": 0.0002585551330798479, "loss": 0.112, "step": 640 }, { "epoch": 1.474460839954597, "grad_norm": 0.11173685640096664, "learning_rate": 0.00025475285171102665, "loss": 0.116, "step": 650 }, { "epoch": 1.474460839954597, "eval_loss": 0.11390363425016403, "eval_runtime": 217.503, "eval_samples_per_second": 0.92, "eval_steps_per_second": 0.46, "step": 650 }, { "epoch": 1.4971623155505107, "grad_norm": 0.09606759250164032, "learning_rate": 0.0002509505703422053, "loss": 0.1106, "step": 660 }, { "epoch": 1.5198637911464246, "grad_norm": 0.10025890916585922, "learning_rate": 0.00024714828897338405, "loss": 0.1104, "step": 670 }, { "epoch": 1.5425652667423382, "grad_norm": 0.09173867851495743, "learning_rate": 0.00024334600760456273, "loss": 0.1138, "step": 680 }, { "epoch": 1.565266742338252, "grad_norm": 0.11808419227600098, "learning_rate": 0.00023954372623574143, "loss": 0.1176, "step": 690 }, { "epoch": 1.5879682179341659, "grad_norm": 0.10776414722204208, "learning_rate": 0.00023574144486692014, "loss": 0.1168, "step": 700 }, { "epoch": 1.5879682179341659, "eval_loss": 0.11171030253171921, "eval_runtime": 217.522, "eval_samples_per_second": 0.919, "eval_steps_per_second": 0.46, "step": 700 }, { "epoch": 1.6106696935300795, "grad_norm": 0.1097252145409584, "learning_rate": 0.00023193916349809887, "loss": 0.1094, "step": 710 }, { "epoch": 1.6333711691259931, "grad_norm": 0.10685060173273087, "learning_rate": 0.00022813688212927757, "loss": 0.1109, "step": 720 }, { "epoch": 1.656072644721907, "grad_norm": 0.13034620881080627, "learning_rate": 0.00022433460076045627, "loss": 0.1116, "step": 730 }, { "epoch": 1.6787741203178208, "grad_norm": 0.09243394434452057, "learning_rate": 0.00022053231939163497, "loss": 0.1181, "step": 740 }, { "epoch": 1.7014755959137344, "grad_norm": 0.09220673888921738, "learning_rate": 0.00021673003802281368, "loss": 0.1156, "step": 750 }, { "epoch": 1.7014755959137344, "eval_loss": 0.11321926862001419, "eval_runtime": 217.4623, "eval_samples_per_second": 0.92, "eval_steps_per_second": 0.46, "step": 750 }, { "epoch": 1.724177071509648, "grad_norm": 0.08847390860319138, "learning_rate": 0.00021292775665399238, "loss": 0.1142, "step": 760 }, { "epoch": 1.746878547105562, "grad_norm": 0.10436313599348068, "learning_rate": 0.0002091254752851711, "loss": 0.1116, "step": 770 }, { "epoch": 1.7695800227014757, "grad_norm": 0.10285446792840958, "learning_rate": 0.00020532319391634981, "loss": 0.1122, "step": 780 }, { "epoch": 1.7922814982973894, "grad_norm": 0.11135664582252502, "learning_rate": 0.00020152091254752852, "loss": 0.1121, "step": 790 }, { "epoch": 1.814982973893303, "grad_norm": 0.10696069896221161, "learning_rate": 0.00019771863117870722, "loss": 0.113, "step": 800 }, { "epoch": 1.814982973893303, "eval_loss": 0.10933104157447815, "eval_runtime": 217.663, "eval_samples_per_second": 0.919, "eval_steps_per_second": 0.459, "step": 800 }, { "epoch": 1.8376844494892168, "grad_norm": 0.10220722109079361, "learning_rate": 0.00019391634980988592, "loss": 0.1131, "step": 810 }, { "epoch": 1.8603859250851307, "grad_norm": 0.10076457262039185, "learning_rate": 0.00019011406844106463, "loss": 0.1105, "step": 820 }, { "epoch": 1.8830874006810443, "grad_norm": 0.09080884605646133, "learning_rate": 0.00018631178707224336, "loss": 0.1135, "step": 830 }, { "epoch": 1.905788876276958, "grad_norm": 0.09699123352766037, "learning_rate": 0.00018250950570342206, "loss": 0.1128, "step": 840 }, { "epoch": 1.9284903518728718, "grad_norm": 0.13103429973125458, "learning_rate": 0.00017870722433460076, "loss": 0.1125, "step": 850 }, { "epoch": 1.9284903518728718, "eval_loss": 0.08514724671840668, "eval_runtime": 217.5471, "eval_samples_per_second": 0.919, "eval_steps_per_second": 0.46, "step": 850 }, { "epoch": 1.9511918274687856, "grad_norm": 0.10150181502103806, "learning_rate": 0.00017490494296577947, "loss": 0.1104, "step": 860 }, { "epoch": 1.9738933030646992, "grad_norm": 0.09728129953145981, "learning_rate": 0.00017110266159695817, "loss": 0.1117, "step": 870 }, { "epoch": 1.9965947786606129, "grad_norm": 0.11810111254453659, "learning_rate": 0.00016730038022813687, "loss": 0.1088, "step": 880 }, { "epoch": 2.018161180476731, "grad_norm": 0.12136723101139069, "learning_rate": 0.0001634980988593156, "loss": 0.1099, "step": 890 }, { "epoch": 2.040862656072645, "grad_norm": 0.09833360463380814, "learning_rate": 0.0001596958174904943, "loss": 0.104, "step": 900 }, { "epoch": 2.040862656072645, "eval_loss": 0.11464639753103256, "eval_runtime": 217.5705, "eval_samples_per_second": 0.919, "eval_steps_per_second": 0.46, "step": 900 }, { "epoch": 2.0635641316685582, "grad_norm": 0.1004079207777977, "learning_rate": 0.000155893536121673, "loss": 0.1045, "step": 910 }, { "epoch": 2.086265607264472, "grad_norm": 0.12761454284191132, "learning_rate": 0.0001520912547528517, "loss": 0.1091, "step": 920 }, { "epoch": 2.108967082860386, "grad_norm": 0.10577848553657532, "learning_rate": 0.00014828897338403042, "loss": 0.1095, "step": 930 }, { "epoch": 2.1316685584562998, "grad_norm": 0.10021686553955078, "learning_rate": 0.00014448669201520912, "loss": 0.1038, "step": 940 }, { "epoch": 2.1543700340522136, "grad_norm": 0.12449836730957031, "learning_rate": 0.00014068441064638785, "loss": 0.1114, "step": 950 }, { "epoch": 2.1543700340522136, "eval_loss": 0.11278806626796722, "eval_runtime": 217.472, "eval_samples_per_second": 0.92, "eval_steps_per_second": 0.46, "step": 950 }, { "epoch": 2.177071509648127, "grad_norm": 0.11041835695505142, "learning_rate": 0.00013688212927756655, "loss": 0.1115, "step": 960 }, { "epoch": 2.199772985244041, "grad_norm": 0.12451056391000748, "learning_rate": 0.00013307984790874526, "loss": 0.1067, "step": 970 }, { "epoch": 2.2224744608399547, "grad_norm": 0.11106307804584503, "learning_rate": 0.00012927756653992396, "loss": 0.107, "step": 980 }, { "epoch": 2.245175936435868, "grad_norm": 0.11006781458854675, "learning_rate": 0.00012547528517110266, "loss": 0.1041, "step": 990 }, { "epoch": 2.267877412031782, "grad_norm": 0.12557250261306763, "learning_rate": 0.00012167300380228136, "loss": 0.1106, "step": 1000 }, { "epoch": 2.267877412031782, "eval_loss": 0.111506387591362, "eval_runtime": 219.8537, "eval_samples_per_second": 0.91, "eval_steps_per_second": 0.455, "step": 1000 }, { "epoch": 2.290578887627696, "grad_norm": 0.10331263393163681, "learning_rate": 0.00011787072243346007, "loss": 0.1086, "step": 1010 }, { "epoch": 2.3132803632236096, "grad_norm": 0.19199371337890625, "learning_rate": 0.00011406844106463878, "loss": 0.1075, "step": 1020 }, { "epoch": 2.3359818388195235, "grad_norm": 0.1070881262421608, "learning_rate": 0.00011026615969581749, "loss": 0.0407, "step": 1030 }, { "epoch": 2.358683314415437, "grad_norm": 0.12608259916305542, "learning_rate": 0.00010646387832699619, "loss": 0.11, "step": 1040 }, { "epoch": 2.3813847900113507, "grad_norm": 0.13995610177516937, "learning_rate": 0.00010266159695817491, "loss": 0.1019, "step": 1050 }, { "epoch": 2.3813847900113507, "eval_loss": 0.11113368719816208, "eval_runtime": 219.8288, "eval_samples_per_second": 0.91, "eval_steps_per_second": 0.455, "step": 1050 }, { "epoch": 2.4040862656072646, "grad_norm": 0.11652055382728577, "learning_rate": 9.885931558935361e-05, "loss": 0.1077, "step": 1060 }, { "epoch": 2.426787741203178, "grad_norm": 0.1517985761165619, "learning_rate": 9.505703422053231e-05, "loss": 0.1068, "step": 1070 }, { "epoch": 2.449489216799092, "grad_norm": 0.10748250037431717, "learning_rate": 9.125475285171103e-05, "loss": 0.1057, "step": 1080 }, { "epoch": 2.4721906923950057, "grad_norm": 0.11488188803195953, "learning_rate": 8.745247148288973e-05, "loss": 0.1075, "step": 1090 }, { "epoch": 2.4948921679909195, "grad_norm": 0.1252240389585495, "learning_rate": 8.365019011406844e-05, "loss": 0.1043, "step": 1100 }, { "epoch": 2.4948921679909195, "eval_loss": 0.11009979248046875, "eval_runtime": 219.9087, "eval_samples_per_second": 0.909, "eval_steps_per_second": 0.455, "step": 1100 }, { "epoch": 2.5175936435868334, "grad_norm": 0.13906443119049072, "learning_rate": 7.984790874524715e-05, "loss": 0.107, "step": 1110 }, { "epoch": 2.5402951191827468, "grad_norm": 0.1267954409122467, "learning_rate": 7.604562737642586e-05, "loss": 0.1071, "step": 1120 }, { "epoch": 2.5629965947786606, "grad_norm": 0.14272169768810272, "learning_rate": 7.224334600760456e-05, "loss": 0.1095, "step": 1130 }, { "epoch": 2.5856980703745744, "grad_norm": 0.14471879601478577, "learning_rate": 6.844106463878328e-05, "loss": 0.1094, "step": 1140 }, { "epoch": 2.608399545970488, "grad_norm": 0.14543314278125763, "learning_rate": 6.463878326996198e-05, "loss": 0.1045, "step": 1150 }, { "epoch": 2.608399545970488, "eval_loss": 0.10972020775079727, "eval_runtime": 219.8328, "eval_samples_per_second": 0.91, "eval_steps_per_second": 0.455, "step": 1150 }, { "epoch": 2.6311010215664017, "grad_norm": 3218.48046875, "learning_rate": 6.083650190114068e-05, "loss": 0.1031, "step": 1160 }, { "epoch": 2.6538024971623155, "grad_norm": 0.15402665734291077, "learning_rate": 5.703422053231939e-05, "loss": 0.1083, "step": 1170 }, { "epoch": 2.6765039727582294, "grad_norm": 0.12117785960435867, "learning_rate": 5.3231939163498095e-05, "loss": 0.1034, "step": 1180 }, { "epoch": 2.699205448354143, "grad_norm": 0.15971869230270386, "learning_rate": 4.9429657794676805e-05, "loss": 0.1018, "step": 1190 }, { "epoch": 2.7219069239500566, "grad_norm": 0.176614910364151, "learning_rate": 4.5627376425855515e-05, "loss": 0.1029, "step": 1200 }, { "epoch": 2.7219069239500566, "eval_loss": 0.10988742858171463, "eval_runtime": 219.8843, "eval_samples_per_second": 0.91, "eval_steps_per_second": 0.455, "step": 1200 }, { "epoch": 2.7446083995459705, "grad_norm": 0.13368096947669983, "learning_rate": 4.182509505703422e-05, "loss": 0.1022, "step": 1210 }, { "epoch": 2.7673098751418843, "grad_norm": 0.18552756309509277, "learning_rate": 3.802281368821293e-05, "loss": 0.1093, "step": 1220 }, { "epoch": 2.7900113507377977, "grad_norm": 0.12576653063297272, "learning_rate": 3.422053231939164e-05, "loss": 0.1089, "step": 1230 }, { "epoch": 2.8127128263337116, "grad_norm": 0.156230628490448, "learning_rate": 3.041825095057034e-05, "loss": 0.1005, "step": 1240 }, { "epoch": 2.8354143019296254, "grad_norm": 0.14209876954555511, "learning_rate": 2.6615969581749048e-05, "loss": 0.1049, "step": 1250 }, { "epoch": 2.8354143019296254, "eval_loss": 0.10819864273071289, "eval_runtime": 219.473, "eval_samples_per_second": 0.911, "eval_steps_per_second": 0.456, "step": 1250 }, { "epoch": 2.8581157775255392, "grad_norm": 0.1460907757282257, "learning_rate": 2.2813688212927758e-05, "loss": 0.1066, "step": 1260 }, { "epoch": 2.880817253121453, "grad_norm": 0.12771661579608917, "learning_rate": 1.9011406844106464e-05, "loss": 0.1032, "step": 1270 }, { "epoch": 2.9035187287173665, "grad_norm": 0.16184042394161224, "learning_rate": 1.520912547528517e-05, "loss": 0.1015, "step": 1280 }, { "epoch": 2.9262202043132803, "grad_norm": 0.15833236277103424, "learning_rate": 1.1406844106463879e-05, "loss": 0.1054, "step": 1290 }, { "epoch": 2.948921679909194, "grad_norm": 0.1427476406097412, "learning_rate": 7.604562737642585e-06, "loss": 0.0987, "step": 1300 }, { "epoch": 2.948921679909194, "eval_loss": 0.10615785419940948, "eval_runtime": 219.4485, "eval_samples_per_second": 0.911, "eval_steps_per_second": 0.456, "step": 1300 }, { "epoch": 2.9716231555051076, "grad_norm": 0.17593209445476532, "learning_rate": 3.8022813688212926e-06, "loss": 0.1046, "step": 1310 }, { "epoch": 2.9943246311010214, "grad_norm": 0.15066806972026825, "learning_rate": 0.0, "loss": 0.1081, "step": 1320 } ], "logging_steps": 10, "max_steps": 1320, "num_input_tokens_seen": 0, "num_train_epochs": 3, "save_steps": 100, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 8.234027242780262e+16, "train_batch_size": 2, "trial_name": null, "trial_params": null }