|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.02594033722438392, |
|
"eval_steps": 500, |
|
"global_step": 1000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00025940337224383917, |
|
"grad_norm": 13.49365234375, |
|
"learning_rate": 1.6000000000000003e-05, |
|
"loss": 1.5584, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0005188067444876783, |
|
"grad_norm": 2.6006040573120117, |
|
"learning_rate": 3.2000000000000005e-05, |
|
"loss": 0.7111, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0007782101167315176, |
|
"grad_norm": 4.435553073883057, |
|
"learning_rate": 4.8e-05, |
|
"loss": 0.3076, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.0010376134889753567, |
|
"grad_norm": 1.1146177053451538, |
|
"learning_rate": 6.400000000000001e-05, |
|
"loss": 0.1826, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.0012970168612191958, |
|
"grad_norm": 0.8707008957862854, |
|
"learning_rate": 8e-05, |
|
"loss": 0.1341, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0015564202334630351, |
|
"grad_norm": 1.7411352396011353, |
|
"learning_rate": 9.6e-05, |
|
"loss": 0.1144, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.0018158236057068742, |
|
"grad_norm": 0.9691908955574036, |
|
"learning_rate": 0.00011200000000000001, |
|
"loss": 0.0831, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.0020752269779507134, |
|
"grad_norm": 1.0398532152175903, |
|
"learning_rate": 0.00012800000000000002, |
|
"loss": 0.0801, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.0023346303501945525, |
|
"grad_norm": 0.5252112746238708, |
|
"learning_rate": 0.000144, |
|
"loss": 0.081, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.0025940337224383916, |
|
"grad_norm": 0.6552278995513916, |
|
"learning_rate": 0.00016, |
|
"loss": 0.0821, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.0028534370946822307, |
|
"grad_norm": 0.6302046775817871, |
|
"learning_rate": 0.00017600000000000002, |
|
"loss": 0.0749, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.0031128404669260703, |
|
"grad_norm": 0.7439587712287903, |
|
"learning_rate": 0.000192, |
|
"loss": 0.0647, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.0033722438391699094, |
|
"grad_norm": 6.8584771156311035, |
|
"learning_rate": 0.0001999978128380225, |
|
"loss": 0.0726, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.0036316472114137485, |
|
"grad_norm": 2.014613389968872, |
|
"learning_rate": 0.0001999803161162393, |
|
"loss": 0.0936, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.0038910505836575876, |
|
"grad_norm": 1.3758050203323364, |
|
"learning_rate": 0.00019994532573409262, |
|
"loss": 0.1089, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.004150453955901427, |
|
"grad_norm": 1.3027769327163696, |
|
"learning_rate": 0.00019989284781388617, |
|
"loss": 0.0772, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.004409857328145266, |
|
"grad_norm": 1.021938443183899, |
|
"learning_rate": 0.00019982289153773646, |
|
"loss": 0.0695, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.004669260700389105, |
|
"grad_norm": 0.8069638609886169, |
|
"learning_rate": 0.00019973546914596623, |
|
"loss": 0.0624, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.004928664072632944, |
|
"grad_norm": 0.5198134779930115, |
|
"learning_rate": 0.00019963059593496268, |
|
"loss": 0.0481, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.005188067444876783, |
|
"grad_norm": 0.39965012669563293, |
|
"learning_rate": 0.00019950829025450114, |
|
"loss": 0.0493, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.005447470817120622, |
|
"grad_norm": 0.8152652978897095, |
|
"learning_rate": 0.0001993685735045343, |
|
"loss": 0.0477, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.005706874189364461, |
|
"grad_norm": 0.7083427309989929, |
|
"learning_rate": 0.0001992114701314478, |
|
"loss": 0.0501, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.0059662775616083005, |
|
"grad_norm": 0.690096914768219, |
|
"learning_rate": 0.000199037007623783, |
|
"loss": 0.0509, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.0062256809338521405, |
|
"grad_norm": 0.871699869632721, |
|
"learning_rate": 0.00019884521650742715, |
|
"loss": 0.0464, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.00648508430609598, |
|
"grad_norm": 0.6445639133453369, |
|
"learning_rate": 0.00019863613034027224, |
|
"loss": 0.0462, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.006744487678339819, |
|
"grad_norm": 0.4920782148838043, |
|
"learning_rate": 0.0001984097857063434, |
|
"loss": 0.0425, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.007003891050583658, |
|
"grad_norm": 0.8624788522720337, |
|
"learning_rate": 0.0001981662222093976, |
|
"loss": 0.0471, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.007263294422827497, |
|
"grad_norm": 0.5704030990600586, |
|
"learning_rate": 0.00019790548246599447, |
|
"loss": 0.0571, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.007522697795071336, |
|
"grad_norm": 0.569247841835022, |
|
"learning_rate": 0.00019762761209803927, |
|
"loss": 0.0531, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.007782101167315175, |
|
"grad_norm": 0.7414583563804626, |
|
"learning_rate": 0.0001973326597248006, |
|
"loss": 0.0455, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.008041504539559013, |
|
"grad_norm": 0.42905348539352417, |
|
"learning_rate": 0.00019702067695440332, |
|
"loss": 0.0542, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.008300907911802853, |
|
"grad_norm": 0.6132651567459106, |
|
"learning_rate": 0.00019669171837479873, |
|
"loss": 0.0439, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.008560311284046693, |
|
"grad_norm": 0.44733384251594543, |
|
"learning_rate": 0.00019634584154421317, |
|
"loss": 0.0429, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.008819714656290532, |
|
"grad_norm": 0.516659677028656, |
|
"learning_rate": 0.00019598310698107702, |
|
"loss": 0.0407, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.009079118028534372, |
|
"grad_norm": 0.2340136468410492, |
|
"learning_rate": 0.00019560357815343577, |
|
"loss": 0.036, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.00933852140077821, |
|
"grad_norm": 0.6629723906517029, |
|
"learning_rate": 0.00019520732146784491, |
|
"loss": 0.041, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.00959792477302205, |
|
"grad_norm": 0.3777199387550354, |
|
"learning_rate": 0.0001947944062577507, |
|
"loss": 0.0414, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.009857328145265888, |
|
"grad_norm": 0.44298338890075684, |
|
"learning_rate": 0.00019436490477135878, |
|
"loss": 0.0371, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.010116731517509728, |
|
"grad_norm": 0.4530090093612671, |
|
"learning_rate": 0.00019391889215899299, |
|
"loss": 0.037, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.010376134889753566, |
|
"grad_norm": 0.36142367124557495, |
|
"learning_rate": 0.0001934564464599461, |
|
"loss": 0.0387, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.010635538261997406, |
|
"grad_norm": 0.36559927463531494, |
|
"learning_rate": 0.00019297764858882514, |
|
"loss": 0.0386, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.010894941634241245, |
|
"grad_norm": 0.47660204768180847, |
|
"learning_rate": 0.00019248258232139388, |
|
"loss": 0.0316, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.011154345006485085, |
|
"grad_norm": 0.2614981234073639, |
|
"learning_rate": 0.00019197133427991436, |
|
"loss": 0.0417, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.011413748378728923, |
|
"grad_norm": 0.44811561703681946, |
|
"learning_rate": 0.00019144399391799043, |
|
"loss": 0.036, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.011673151750972763, |
|
"grad_norm": 0.4213508367538452, |
|
"learning_rate": 0.00019090065350491626, |
|
"loss": 0.0399, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.011932555123216601, |
|
"grad_norm": 0.29146406054496765, |
|
"learning_rate": 0.0001903414081095315, |
|
"loss": 0.0376, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.012191958495460441, |
|
"grad_norm": 0.2714293897151947, |
|
"learning_rate": 0.00018976635558358722, |
|
"loss": 0.0362, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.012451361867704281, |
|
"grad_norm": 0.3767367899417877, |
|
"learning_rate": 0.00018917559654462474, |
|
"loss": 0.0364, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.01271076523994812, |
|
"grad_norm": 0.36578992009162903, |
|
"learning_rate": 0.00018856923435837022, |
|
"loss": 0.0327, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.01297016861219196, |
|
"grad_norm": 0.32976096868515015, |
|
"learning_rate": 0.0001879473751206489, |
|
"loss": 0.04, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.013229571984435798, |
|
"grad_norm": 0.4037308990955353, |
|
"learning_rate": 0.00018731012763882133, |
|
"loss": 0.0365, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.013488975356679637, |
|
"grad_norm": 0.3599701225757599, |
|
"learning_rate": 0.00018665760341274505, |
|
"loss": 0.0337, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.013748378728923476, |
|
"grad_norm": 0.6167535185813904, |
|
"learning_rate": 0.00018598991661526572, |
|
"loss": 0.0384, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.014007782101167316, |
|
"grad_norm": 0.44012153148651123, |
|
"learning_rate": 0.00018530718407223974, |
|
"loss": 0.0291, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.014267185473411154, |
|
"grad_norm": 0.5593993663787842, |
|
"learning_rate": 0.00018460952524209355, |
|
"loss": 0.0328, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.014526588845654994, |
|
"grad_norm": 0.3970232307910919, |
|
"learning_rate": 0.00018389706219492147, |
|
"loss": 0.0282, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.014785992217898832, |
|
"grad_norm": 0.2354394942522049, |
|
"learning_rate": 0.00018316991959112716, |
|
"loss": 0.0317, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.015045395590142672, |
|
"grad_norm": 0.35121485590934753, |
|
"learning_rate": 0.00018242822465961176, |
|
"loss": 0.0328, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.01530479896238651, |
|
"grad_norm": 0.4026910066604614, |
|
"learning_rate": 0.00018167210717551224, |
|
"loss": 0.0346, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.01556420233463035, |
|
"grad_norm": 0.36058488488197327, |
|
"learning_rate": 0.00018090169943749476, |
|
"loss": 0.0356, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.01582360570687419, |
|
"grad_norm": 0.43329063057899475, |
|
"learning_rate": 0.00018011713624460608, |
|
"loss": 0.039, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.016083009079118027, |
|
"grad_norm": 0.5267623662948608, |
|
"learning_rate": 0.00017931855487268782, |
|
"loss": 0.034, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.01634241245136187, |
|
"grad_norm": 0.4230927526950836, |
|
"learning_rate": 0.0001785060950503568, |
|
"loss": 0.031, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.016601815823605707, |
|
"grad_norm": 0.3721458613872528, |
|
"learning_rate": 0.00017767989893455698, |
|
"loss": 0.0306, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.016861219195849545, |
|
"grad_norm": 0.4158923029899597, |
|
"learning_rate": 0.00017684011108568592, |
|
"loss": 0.0343, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.017120622568093387, |
|
"grad_norm": 0.42805808782577515, |
|
"learning_rate": 0.00017598687844230088, |
|
"loss": 0.0348, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.017380025940337225, |
|
"grad_norm": 0.2532889246940613, |
|
"learning_rate": 0.00017512035029540885, |
|
"loss": 0.0348, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.017639429312581063, |
|
"grad_norm": 0.33485907316207886, |
|
"learning_rate": 0.000174240678262345, |
|
"loss": 0.0382, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.0178988326848249, |
|
"grad_norm": 0.33811262249946594, |
|
"learning_rate": 0.000173348016260244, |
|
"loss": 0.0321, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 0.018158236057068743, |
|
"grad_norm": 0.5319349765777588, |
|
"learning_rate": 0.00017244252047910892, |
|
"loss": 0.0369, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.01841763942931258, |
|
"grad_norm": 0.1967012882232666, |
|
"learning_rate": 0.00017152434935448256, |
|
"loss": 0.0392, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 0.01867704280155642, |
|
"grad_norm": 0.491212397813797, |
|
"learning_rate": 0.0001705936635397259, |
|
"loss": 0.034, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.018936446173800258, |
|
"grad_norm": 0.3200204074382782, |
|
"learning_rate": 0.00016965062587790823, |
|
"loss": 0.0349, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 0.0191958495460441, |
|
"grad_norm": 0.29090484976768494, |
|
"learning_rate": 0.00016869540137331445, |
|
"loss": 0.0302, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 0.019455252918287938, |
|
"grad_norm": 0.4633236825466156, |
|
"learning_rate": 0.00016772815716257412, |
|
"loss": 0.0305, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.019714656290531776, |
|
"grad_norm": 0.6030534505844116, |
|
"learning_rate": 0.00016674906248541726, |
|
"loss": 0.0331, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 0.019974059662775615, |
|
"grad_norm": 0.28692400455474854, |
|
"learning_rate": 0.00016575828865506245, |
|
"loss": 0.0319, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 0.020233463035019456, |
|
"grad_norm": 0.3799605667591095, |
|
"learning_rate": 0.0001647560090282419, |
|
"loss": 0.0316, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 0.020492866407263294, |
|
"grad_norm": 0.529679536819458, |
|
"learning_rate": 0.000163742398974869, |
|
"loss": 0.0317, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 0.020752269779507133, |
|
"grad_norm": 0.328056275844574, |
|
"learning_rate": 0.0001627176358473537, |
|
"loss": 0.0332, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.021011673151750974, |
|
"grad_norm": 0.3303374648094177, |
|
"learning_rate": 0.0001616818989495711, |
|
"loss": 0.0323, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 0.021271076523994813, |
|
"grad_norm": 0.23507635295391083, |
|
"learning_rate": 0.00016063536950548826, |
|
"loss": 0.0338, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 0.02153047989623865, |
|
"grad_norm": 0.3870477080345154, |
|
"learning_rate": 0.0001595782306274553, |
|
"loss": 0.0326, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 0.02178988326848249, |
|
"grad_norm": 0.37890198826789856, |
|
"learning_rate": 0.00015851066728416618, |
|
"loss": 0.0309, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 0.02204928664072633, |
|
"grad_norm": 0.48460447788238525, |
|
"learning_rate": 0.00015743286626829437, |
|
"loss": 0.0351, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.02230869001297017, |
|
"grad_norm": 0.38216733932495117, |
|
"learning_rate": 0.00015634501616380967, |
|
"loss": 0.0331, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 0.022568093385214007, |
|
"grad_norm": 0.31668373942375183, |
|
"learning_rate": 0.00015524730731298134, |
|
"loss": 0.0306, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 0.022827496757457846, |
|
"grad_norm": 0.5256275534629822, |
|
"learning_rate": 0.0001541399317830738, |
|
"loss": 0.0307, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 0.023086900129701687, |
|
"grad_norm": 0.2295175939798355, |
|
"learning_rate": 0.0001530230833327405, |
|
"loss": 0.0297, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 0.023346303501945526, |
|
"grad_norm": 0.35704338550567627, |
|
"learning_rate": 0.00015189695737812152, |
|
"loss": 0.0358, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.023605706874189364, |
|
"grad_norm": 0.31458932161331177, |
|
"learning_rate": 0.0001507617509586517, |
|
"loss": 0.0278, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 0.023865110246433202, |
|
"grad_norm": 0.47554951906204224, |
|
"learning_rate": 0.00014961766270258422, |
|
"loss": 0.0332, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 0.024124513618677044, |
|
"grad_norm": 0.29959654808044434, |
|
"learning_rate": 0.00014846489279223652, |
|
"loss": 0.0268, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 0.024383916990920882, |
|
"grad_norm": 0.31856444478034973, |
|
"learning_rate": 0.0001473036429289641, |
|
"loss": 0.028, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 0.02464332036316472, |
|
"grad_norm": 0.23568253219127655, |
|
"learning_rate": 0.0001461341162978688, |
|
"loss": 0.0339, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.024902723735408562, |
|
"grad_norm": 0.4032520055770874, |
|
"learning_rate": 0.00014495651753224705, |
|
"loss": 0.0291, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 0.0251621271076524, |
|
"grad_norm": 0.36155375838279724, |
|
"learning_rate": 0.00014377105267778518, |
|
"loss": 0.0261, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 0.02542153047989624, |
|
"grad_norm": 0.2985226511955261, |
|
"learning_rate": 0.00014257792915650728, |
|
"loss": 0.03, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 0.025680933852140077, |
|
"grad_norm": 0.3420771658420563, |
|
"learning_rate": 0.00014137735573048233, |
|
"loss": 0.0318, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 0.02594033722438392, |
|
"grad_norm": 0.19557547569274902, |
|
"learning_rate": 0.00014016954246529696, |
|
"loss": 0.0287, |
|
"step": 1000 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 2500, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.31459956342784e+17, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|