|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 14.723716381418093, |
|
"eval_steps": 500, |
|
"global_step": 765, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.097799511002445, |
|
"grad_norm": 2.395451784133911, |
|
"learning_rate": 1.9997891995035914e-05, |
|
"loss": 0.8739, |
|
"num_input_tokens_seen": 163840, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.19559902200489, |
|
"grad_norm": 1.3395614624023438, |
|
"learning_rate": 1.999156886888064e-05, |
|
"loss": 0.7957, |
|
"num_input_tokens_seen": 327680, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.293398533007335, |
|
"grad_norm": 1.125549554824829, |
|
"learning_rate": 1.9981033287370443e-05, |
|
"loss": 0.7822, |
|
"num_input_tokens_seen": 491520, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.39119804400978, |
|
"grad_norm": 1.1936776638031006, |
|
"learning_rate": 1.9966289692316944e-05, |
|
"loss": 0.7815, |
|
"num_input_tokens_seen": 655360, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.4889975550122249, |
|
"grad_norm": 1.230684757232666, |
|
"learning_rate": 1.9947344299634464e-05, |
|
"loss": 0.7432, |
|
"num_input_tokens_seen": 819200, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.58679706601467, |
|
"grad_norm": 1.0630358457565308, |
|
"learning_rate": 1.992420509671936e-05, |
|
"loss": 0.7389, |
|
"num_input_tokens_seen": 982368, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.684596577017115, |
|
"grad_norm": 1.1900779008865356, |
|
"learning_rate": 1.9896881839082554e-05, |
|
"loss": 0.7654, |
|
"num_input_tokens_seen": 1146208, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.78239608801956, |
|
"grad_norm": 1.0942089557647705, |
|
"learning_rate": 1.9865386046236597e-05, |
|
"loss": 0.7753, |
|
"num_input_tokens_seen": 1310048, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.8801955990220048, |
|
"grad_norm": 1.0910882949829102, |
|
"learning_rate": 1.982973099683902e-05, |
|
"loss": 0.7256, |
|
"num_input_tokens_seen": 1473888, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.9779951100244498, |
|
"grad_norm": 1.0902478694915771, |
|
"learning_rate": 1.9789931723094046e-05, |
|
"loss": 0.727, |
|
"num_input_tokens_seen": 1637728, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.058679706601467, |
|
"grad_norm": 1.3777414560317993, |
|
"learning_rate": 1.9746005004415004e-05, |
|
"loss": 0.582, |
|
"num_input_tokens_seen": 1770848, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 1.156479217603912, |
|
"grad_norm": 1.3062232732772827, |
|
"learning_rate": 1.9697969360350098e-05, |
|
"loss": 0.5628, |
|
"num_input_tokens_seen": 1934688, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.254278728606357, |
|
"grad_norm": 1.239915132522583, |
|
"learning_rate": 1.9645845042774555e-05, |
|
"loss": 0.5561, |
|
"num_input_tokens_seen": 2098528, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 1.352078239608802, |
|
"grad_norm": 1.1612083911895752, |
|
"learning_rate": 1.9589654027352412e-05, |
|
"loss": 0.5778, |
|
"num_input_tokens_seen": 2262368, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.449877750611247, |
|
"grad_norm": 1.2168060541152954, |
|
"learning_rate": 1.9529420004271568e-05, |
|
"loss": 0.5207, |
|
"num_input_tokens_seen": 2426128, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.5476772616136918, |
|
"grad_norm": 1.2806190252304077, |
|
"learning_rate": 1.9465168368255946e-05, |
|
"loss": 0.5452, |
|
"num_input_tokens_seen": 2589968, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.6454767726161368, |
|
"grad_norm": 1.17153000831604, |
|
"learning_rate": 1.9396926207859085e-05, |
|
"loss": 0.5379, |
|
"num_input_tokens_seen": 2753808, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 1.7432762836185818, |
|
"grad_norm": 1.1548662185668945, |
|
"learning_rate": 1.932472229404356e-05, |
|
"loss": 0.5116, |
|
"num_input_tokens_seen": 2917648, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.8410757946210268, |
|
"grad_norm": 1.3172167539596558, |
|
"learning_rate": 1.924858706805112e-05, |
|
"loss": 0.5532, |
|
"num_input_tokens_seen": 3081488, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 1.9388753056234718, |
|
"grad_norm": 1.1930724382400513, |
|
"learning_rate": 1.9168552628568632e-05, |
|
"loss": 0.5659, |
|
"num_input_tokens_seen": 3245328, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.019559902200489, |
|
"grad_norm": 1.7079554796218872, |
|
"learning_rate": 1.9084652718195237e-05, |
|
"loss": 0.6028, |
|
"num_input_tokens_seen": 3378448, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 2.117359413202934, |
|
"grad_norm": 1.7393510341644287, |
|
"learning_rate": 1.8996922709216456e-05, |
|
"loss": 0.4228, |
|
"num_input_tokens_seen": 3542288, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 2.215158924205379, |
|
"grad_norm": 1.2822431325912476, |
|
"learning_rate": 1.8905399588691165e-05, |
|
"loss": 0.3648, |
|
"num_input_tokens_seen": 3706128, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 2.312958435207824, |
|
"grad_norm": 1.3198189735412598, |
|
"learning_rate": 1.8810121942857848e-05, |
|
"loss": 0.3822, |
|
"num_input_tokens_seen": 3869968, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 2.410757946210269, |
|
"grad_norm": 1.2831072807312012, |
|
"learning_rate": 1.8711129940866577e-05, |
|
"loss": 0.3907, |
|
"num_input_tokens_seen": 4033808, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 2.508557457212714, |
|
"grad_norm": 1.4272454977035522, |
|
"learning_rate": 1.860846531784368e-05, |
|
"loss": 0.3936, |
|
"num_input_tokens_seen": 4197648, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 2.606356968215159, |
|
"grad_norm": 1.3846728801727295, |
|
"learning_rate": 1.8502171357296144e-05, |
|
"loss": 0.376, |
|
"num_input_tokens_seen": 4361488, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 2.704156479217604, |
|
"grad_norm": 1.1824907064437866, |
|
"learning_rate": 1.839229287286327e-05, |
|
"loss": 0.3735, |
|
"num_input_tokens_seen": 4525328, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 2.801955990220049, |
|
"grad_norm": 1.2617613077163696, |
|
"learning_rate": 1.827887618942318e-05, |
|
"loss": 0.4114, |
|
"num_input_tokens_seen": 4689168, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 2.899755501222494, |
|
"grad_norm": 1.1593624353408813, |
|
"learning_rate": 1.816196912356222e-05, |
|
"loss": 0.3696, |
|
"num_input_tokens_seen": 4853008, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 2.997555012224939, |
|
"grad_norm": 1.1450086832046509, |
|
"learning_rate": 1.8041620963415418e-05, |
|
"loss": 0.3888, |
|
"num_input_tokens_seen": 5016848, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 3.078239608801956, |
|
"grad_norm": 1.4760794639587402, |
|
"learning_rate": 1.7917882447886585e-05, |
|
"loss": 0.2605, |
|
"num_input_tokens_seen": 5149968, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 3.176039119804401, |
|
"grad_norm": 1.520880103111267, |
|
"learning_rate": 1.7790805745256703e-05, |
|
"loss": 0.2475, |
|
"num_input_tokens_seen": 5313808, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 3.273838630806846, |
|
"grad_norm": 1.4768394231796265, |
|
"learning_rate": 1.766044443118978e-05, |
|
"loss": 0.2344, |
|
"num_input_tokens_seen": 5477648, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 3.371638141809291, |
|
"grad_norm": 1.4958367347717285, |
|
"learning_rate": 1.7526853466145248e-05, |
|
"loss": 0.2665, |
|
"num_input_tokens_seen": 5641488, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 3.469437652811736, |
|
"grad_norm": 1.4445388317108154, |
|
"learning_rate": 1.7390089172206594e-05, |
|
"loss": 0.2477, |
|
"num_input_tokens_seen": 5805328, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 3.567237163814181, |
|
"grad_norm": 1.4173372983932495, |
|
"learning_rate": 1.725020920933593e-05, |
|
"loss": 0.2679, |
|
"num_input_tokens_seen": 5969168, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 3.665036674816626, |
|
"grad_norm": 1.6111114025115967, |
|
"learning_rate": 1.710727255106447e-05, |
|
"loss": 0.234, |
|
"num_input_tokens_seen": 6133008, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 3.762836185819071, |
|
"grad_norm": 1.4121896028518677, |
|
"learning_rate": 1.696133945962927e-05, |
|
"loss": 0.2587, |
|
"num_input_tokens_seen": 6296848, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 3.860635696821516, |
|
"grad_norm": 1.3143867254257202, |
|
"learning_rate": 1.681247146056654e-05, |
|
"loss": 0.2606, |
|
"num_input_tokens_seen": 6460688, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 3.958435207823961, |
|
"grad_norm": 1.2710639238357544, |
|
"learning_rate": 1.6660731316772503e-05, |
|
"loss": 0.2543, |
|
"num_input_tokens_seen": 6624528, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 4.039119804400978, |
|
"grad_norm": 1.284611463546753, |
|
"learning_rate": 1.650618300204242e-05, |
|
"loss": 0.1856, |
|
"num_input_tokens_seen": 6757128, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 4.136919315403423, |
|
"grad_norm": 1.6113184690475464, |
|
"learning_rate": 1.634889167409923e-05, |
|
"loss": 0.1462, |
|
"num_input_tokens_seen": 6920968, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 4.234718826405868, |
|
"grad_norm": 1.230480432510376, |
|
"learning_rate": 1.6188923647122946e-05, |
|
"loss": 0.17, |
|
"num_input_tokens_seen": 7084808, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 4.332518337408313, |
|
"grad_norm": 1.388247013092041, |
|
"learning_rate": 1.6026346363792565e-05, |
|
"loss": 0.1427, |
|
"num_input_tokens_seen": 7248648, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 4.430317848410758, |
|
"grad_norm": 1.3794785737991333, |
|
"learning_rate": 1.5861228366852148e-05, |
|
"loss": 0.15, |
|
"num_input_tokens_seen": 7412488, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 4.528117359413203, |
|
"grad_norm": 1.5547810792922974, |
|
"learning_rate": 1.5693639270213138e-05, |
|
"loss": 0.1603, |
|
"num_input_tokens_seen": 7576264, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 4.625916870415648, |
|
"grad_norm": 1.49036705493927, |
|
"learning_rate": 1.552364972960506e-05, |
|
"loss": 0.1536, |
|
"num_input_tokens_seen": 7740104, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 4.723716381418093, |
|
"grad_norm": 1.2922449111938477, |
|
"learning_rate": 1.5351331412787004e-05, |
|
"loss": 0.1415, |
|
"num_input_tokens_seen": 7903944, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 4.821515892420538, |
|
"grad_norm": 1.5458725690841675, |
|
"learning_rate": 1.5176756969332428e-05, |
|
"loss": 0.159, |
|
"num_input_tokens_seen": 8067784, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 4.919315403422983, |
|
"grad_norm": 1.3060288429260254, |
|
"learning_rate": 1.5000000000000002e-05, |
|
"loss": 0.1699, |
|
"num_input_tokens_seen": 8231624, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 4.3814568519592285, |
|
"learning_rate": 1.4821135025703491e-05, |
|
"loss": 0.1295, |
|
"num_input_tokens_seen": 8364744, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 5.097799511002445, |
|
"grad_norm": 1.496863842010498, |
|
"learning_rate": 1.4640237456093636e-05, |
|
"loss": 0.1019, |
|
"num_input_tokens_seen": 8528584, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 5.19559902200489, |
|
"grad_norm": 1.3016966581344604, |
|
"learning_rate": 1.4457383557765385e-05, |
|
"loss": 0.0916, |
|
"num_input_tokens_seen": 8692424, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 5.293398533007335, |
|
"grad_norm": 1.3272321224212646, |
|
"learning_rate": 1.427265042210381e-05, |
|
"loss": 0.0935, |
|
"num_input_tokens_seen": 8856264, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 5.39119804400978, |
|
"grad_norm": 1.2260195016860962, |
|
"learning_rate": 1.4086115932782316e-05, |
|
"loss": 0.0679, |
|
"num_input_tokens_seen": 9020104, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 5.488997555012225, |
|
"grad_norm": 1.1941609382629395, |
|
"learning_rate": 1.3897858732926794e-05, |
|
"loss": 0.101, |
|
"num_input_tokens_seen": 9183944, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 5.58679706601467, |
|
"grad_norm": 1.3348464965820312, |
|
"learning_rate": 1.3707958191959609e-05, |
|
"loss": 0.0802, |
|
"num_input_tokens_seen": 9347784, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 5.684596577017115, |
|
"grad_norm": 1.3610597848892212, |
|
"learning_rate": 1.3516494372137368e-05, |
|
"loss": 0.0812, |
|
"num_input_tokens_seen": 9511624, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 5.78239608801956, |
|
"grad_norm": 1.1276705265045166, |
|
"learning_rate": 1.3323547994796597e-05, |
|
"loss": 0.0812, |
|
"num_input_tokens_seen": 9675464, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 5.880195599022005, |
|
"grad_norm": 1.4052499532699585, |
|
"learning_rate": 1.3129200406321545e-05, |
|
"loss": 0.0884, |
|
"num_input_tokens_seen": 9839304, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 5.97799511002445, |
|
"grad_norm": 1.2434982061386108, |
|
"learning_rate": 1.2933533543848462e-05, |
|
"loss": 0.0906, |
|
"num_input_tokens_seen": 10003144, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 6.058679706601467, |
|
"grad_norm": 1.19767427444458, |
|
"learning_rate": 1.2736629900720832e-05, |
|
"loss": 0.0616, |
|
"num_input_tokens_seen": 10136264, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 6.156479217603912, |
|
"grad_norm": 1.2783230543136597, |
|
"learning_rate": 1.2538572491710079e-05, |
|
"loss": 0.0447, |
|
"num_input_tokens_seen": 10300104, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 6.254278728606357, |
|
"grad_norm": 1.1590359210968018, |
|
"learning_rate": 1.2339444818016488e-05, |
|
"loss": 0.0528, |
|
"num_input_tokens_seen": 10463944, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 6.352078239608802, |
|
"grad_norm": 1.2891935110092163, |
|
"learning_rate": 1.2139330832064975e-05, |
|
"loss": 0.0429, |
|
"num_input_tokens_seen": 10627784, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 6.449877750611247, |
|
"grad_norm": 1.0882583856582642, |
|
"learning_rate": 1.1938314902110701e-05, |
|
"loss": 0.0442, |
|
"num_input_tokens_seen": 10791624, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 6.547677261613692, |
|
"grad_norm": 1.1069742441177368, |
|
"learning_rate": 1.1736481776669307e-05, |
|
"loss": 0.047, |
|
"num_input_tokens_seen": 10955464, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 6.645476772616137, |
|
"grad_norm": 1.0986578464508057, |
|
"learning_rate": 1.1533916548786856e-05, |
|
"loss": 0.0437, |
|
"num_input_tokens_seen": 11119304, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 6.743276283618582, |
|
"grad_norm": 1.1824274063110352, |
|
"learning_rate": 1.133070462016454e-05, |
|
"loss": 0.0466, |
|
"num_input_tokens_seen": 11282568, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 6.841075794621027, |
|
"grad_norm": 1.197988510131836, |
|
"learning_rate": 1.1126931665153213e-05, |
|
"loss": 0.0498, |
|
"num_input_tokens_seen": 11446408, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 6.938875305623472, |
|
"grad_norm": 1.1721147298812866, |
|
"learning_rate": 1.092268359463302e-05, |
|
"loss": 0.0409, |
|
"num_input_tokens_seen": 11610248, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 7.019559902200489, |
|
"grad_norm": 0.9280807971954346, |
|
"learning_rate": 1.0718046519793276e-05, |
|
"loss": 0.0444, |
|
"num_input_tokens_seen": 11743368, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 7.117359413202934, |
|
"grad_norm": 0.8121551871299744, |
|
"learning_rate": 1.0513106715827897e-05, |
|
"loss": 0.0212, |
|
"num_input_tokens_seen": 11906632, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 7.215158924205379, |
|
"grad_norm": 0.974238932132721, |
|
"learning_rate": 1.0307950585561705e-05, |
|
"loss": 0.0275, |
|
"num_input_tokens_seen": 12070472, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 7.312958435207824, |
|
"grad_norm": 0.8143863081932068, |
|
"learning_rate": 1.01026646230229e-05, |
|
"loss": 0.0234, |
|
"num_input_tokens_seen": 12234312, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 7.410757946210269, |
|
"grad_norm": 0.9007371664047241, |
|
"learning_rate": 9.897335376977104e-06, |
|
"loss": 0.0227, |
|
"num_input_tokens_seen": 12398152, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 7.508557457212714, |
|
"grad_norm": 0.8347233533859253, |
|
"learning_rate": 9.692049414438298e-06, |
|
"loss": 0.0264, |
|
"num_input_tokens_seen": 12561992, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 7.606356968215159, |
|
"grad_norm": 1.0170607566833496, |
|
"learning_rate": 9.486893284172103e-06, |
|
"loss": 0.0251, |
|
"num_input_tokens_seen": 12725832, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 7.704156479217604, |
|
"grad_norm": 0.8994267582893372, |
|
"learning_rate": 9.281953480206725e-06, |
|
"loss": 0.0237, |
|
"num_input_tokens_seen": 12889672, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 7.801955990220049, |
|
"grad_norm": 1.0272514820098877, |
|
"learning_rate": 9.07731640536698e-06, |
|
"loss": 0.0228, |
|
"num_input_tokens_seen": 13053512, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 7.899755501222494, |
|
"grad_norm": 0.8432409167289734, |
|
"learning_rate": 8.87306833484679e-06, |
|
"loss": 0.0238, |
|
"num_input_tokens_seen": 13217352, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 7.997555012224939, |
|
"grad_norm": 0.9295158386230469, |
|
"learning_rate": 8.669295379835467e-06, |
|
"loss": 0.0222, |
|
"num_input_tokens_seen": 13381192, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 8.078239608801956, |
|
"grad_norm": 0.6758769750595093, |
|
"learning_rate": 8.466083451213145e-06, |
|
"loss": 0.0109, |
|
"num_input_tokens_seen": 13514312, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 8.1760391198044, |
|
"grad_norm": 0.5933005213737488, |
|
"learning_rate": 8.263518223330698e-06, |
|
"loss": 0.0118, |
|
"num_input_tokens_seen": 13677448, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 8.273838630806846, |
|
"grad_norm": 0.7031515836715698, |
|
"learning_rate": 8.0616850978893e-06, |
|
"loss": 0.01, |
|
"num_input_tokens_seen": 13841288, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 8.37163814180929, |
|
"grad_norm": 0.8007884621620178, |
|
"learning_rate": 7.860669167935028e-06, |
|
"loss": 0.0112, |
|
"num_input_tokens_seen": 14005128, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 8.469437652811736, |
|
"grad_norm": 0.6427900195121765, |
|
"learning_rate": 7.660555181983517e-06, |
|
"loss": 0.0133, |
|
"num_input_tokens_seen": 14168968, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 8.56723716381418, |
|
"grad_norm": 0.6638085246086121, |
|
"learning_rate": 7.461427508289922e-06, |
|
"loss": 0.0113, |
|
"num_input_tokens_seen": 14332808, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 8.665036674816626, |
|
"grad_norm": 0.6730697751045227, |
|
"learning_rate": 7.263370099279173e-06, |
|
"loss": 0.0102, |
|
"num_input_tokens_seen": 14496648, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 8.76283618581907, |
|
"grad_norm": 0.7171938419342041, |
|
"learning_rate": 7.066466456151541e-06, |
|
"loss": 0.01, |
|
"num_input_tokens_seen": 14660488, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 8.860635696821516, |
|
"grad_norm": 0.7203698754310608, |
|
"learning_rate": 6.870799593678459e-06, |
|
"loss": 0.0116, |
|
"num_input_tokens_seen": 14824328, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 8.95843520782396, |
|
"grad_norm": 0.7533681392669678, |
|
"learning_rate": 6.6764520052034054e-06, |
|
"loss": 0.0111, |
|
"num_input_tokens_seen": 14988168, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 9.039119804400977, |
|
"grad_norm": 0.22926057875156403, |
|
"learning_rate": 6.483505627862632e-06, |
|
"loss": 0.0071, |
|
"num_input_tokens_seen": 15121288, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 9.136919315403423, |
|
"grad_norm": 0.4467085301876068, |
|
"learning_rate": 6.292041808040393e-06, |
|
"loss": 0.0054, |
|
"num_input_tokens_seen": 15285128, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 9.234718826405867, |
|
"grad_norm": 0.5730965733528137, |
|
"learning_rate": 6.102141267073207e-06, |
|
"loss": 0.0059, |
|
"num_input_tokens_seen": 15448968, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 9.332518337408313, |
|
"grad_norm": 0.3801887333393097, |
|
"learning_rate": 5.913884067217686e-06, |
|
"loss": 0.0045, |
|
"num_input_tokens_seen": 15612552, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 9.430317848410757, |
|
"grad_norm": 0.4900866448879242, |
|
"learning_rate": 5.727349577896194e-06, |
|
"loss": 0.004, |
|
"num_input_tokens_seen": 15776392, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 9.528117359413203, |
|
"grad_norm": 0.3590303957462311, |
|
"learning_rate": 5.542616442234618e-06, |
|
"loss": 0.0042, |
|
"num_input_tokens_seen": 15940232, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 9.625916870415647, |
|
"grad_norm": 0.36999812722206116, |
|
"learning_rate": 5.3597625439063685e-06, |
|
"loss": 0.0049, |
|
"num_input_tokens_seen": 16104072, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 9.723716381418093, |
|
"grad_norm": 0.3589613735675812, |
|
"learning_rate": 5.178864974296511e-06, |
|
"loss": 0.0045, |
|
"num_input_tokens_seen": 16267912, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 9.821515892420537, |
|
"grad_norm": 0.30231136083602905, |
|
"learning_rate": 5.000000000000003e-06, |
|
"loss": 0.004, |
|
"num_input_tokens_seen": 16431752, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 9.919315403422983, |
|
"grad_norm": 0.34575000405311584, |
|
"learning_rate": 4.823243030667576e-06, |
|
"loss": 0.0042, |
|
"num_input_tokens_seen": 16595592, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"grad_norm": 0.6714850068092346, |
|
"learning_rate": 4.648668587212998e-06, |
|
"loss": 0.0037, |
|
"num_input_tokens_seen": 16728712, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 10.097799511002446, |
|
"grad_norm": 0.19611337780952454, |
|
"learning_rate": 4.476350270394942e-06, |
|
"loss": 0.0021, |
|
"num_input_tokens_seen": 16892552, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 10.19559902200489, |
|
"grad_norm": 0.13641950488090515, |
|
"learning_rate": 4.306360729786867e-06, |
|
"loss": 0.002, |
|
"num_input_tokens_seen": 17056392, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 10.293398533007334, |
|
"grad_norm": 0.16702738404273987, |
|
"learning_rate": 4.138771633147856e-06, |
|
"loss": 0.0023, |
|
"num_input_tokens_seen": 17220232, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 10.39119804400978, |
|
"grad_norm": 0.2278226763010025, |
|
"learning_rate": 3.973653636207437e-06, |
|
"loss": 0.002, |
|
"num_input_tokens_seen": 17384072, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 10.488997555012224, |
|
"grad_norm": 0.1814439296722412, |
|
"learning_rate": 3.8110763528770543e-06, |
|
"loss": 0.0021, |
|
"num_input_tokens_seen": 17547912, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 10.58679706601467, |
|
"grad_norm": 0.2528248429298401, |
|
"learning_rate": 3.651108325900773e-06, |
|
"loss": 0.0025, |
|
"num_input_tokens_seen": 17711752, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 10.684596577017114, |
|
"grad_norm": 0.165152445435524, |
|
"learning_rate": 3.493816997957582e-06, |
|
"loss": 0.0021, |
|
"num_input_tokens_seen": 17875592, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 10.78239608801956, |
|
"grad_norm": 0.08041153103113174, |
|
"learning_rate": 3.339268683227499e-06, |
|
"loss": 0.0017, |
|
"num_input_tokens_seen": 18039432, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 10.880195599022004, |
|
"grad_norm": 0.24567244946956635, |
|
"learning_rate": 3.1875285394334575e-06, |
|
"loss": 0.0018, |
|
"num_input_tokens_seen": 18203272, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 10.97799511002445, |
|
"grad_norm": 0.21529506146907806, |
|
"learning_rate": 3.0386605403707347e-06, |
|
"loss": 0.0018, |
|
"num_input_tokens_seen": 18367112, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 11.058679706601467, |
|
"grad_norm": 0.05531800910830498, |
|
"learning_rate": 2.8927274489355296e-06, |
|
"loss": 0.0014, |
|
"num_input_tokens_seen": 18500232, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 11.156479217603913, |
|
"grad_norm": 0.04145563766360283, |
|
"learning_rate": 2.749790790664074e-06, |
|
"loss": 0.0013, |
|
"num_input_tokens_seen": 18662984, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 11.254278728606357, |
|
"grad_norm": 0.07673907279968262, |
|
"learning_rate": 2.6099108277934105e-06, |
|
"loss": 0.0014, |
|
"num_input_tokens_seen": 18826824, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 11.352078239608803, |
|
"grad_norm": 0.054488956928253174, |
|
"learning_rate": 2.4731465338547556e-06, |
|
"loss": 0.0013, |
|
"num_input_tokens_seen": 18990664, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 11.449877750611247, |
|
"grad_norm": 0.11207219213247299, |
|
"learning_rate": 2.339555568810221e-06, |
|
"loss": 0.0013, |
|
"num_input_tokens_seen": 19154504, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 11.547677261613693, |
|
"grad_norm": 0.062526635825634, |
|
"learning_rate": 2.209194254743295e-06, |
|
"loss": 0.0012, |
|
"num_input_tokens_seen": 19318344, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 11.645476772616137, |
|
"grad_norm": 0.05114143341779709, |
|
"learning_rate": 2.0821175521134208e-06, |
|
"loss": 0.0013, |
|
"num_input_tokens_seen": 19482184, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 11.743276283618583, |
|
"grad_norm": 0.05199455842375755, |
|
"learning_rate": 1.9583790365845823e-06, |
|
"loss": 0.0013, |
|
"num_input_tokens_seen": 19646024, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 11.841075794621027, |
|
"grad_norm": 0.057281751185655594, |
|
"learning_rate": 1.8380308764377841e-06, |
|
"loss": 0.0014, |
|
"num_input_tokens_seen": 19809864, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 11.938875305623473, |
|
"grad_norm": 0.04751597344875336, |
|
"learning_rate": 1.7211238105768213e-06, |
|
"loss": 0.0012, |
|
"num_input_tokens_seen": 19973704, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 12.01955990220049, |
|
"grad_norm": 0.04285968840122223, |
|
"learning_rate": 1.607707127136734e-06, |
|
"loss": 0.0014, |
|
"num_input_tokens_seen": 20106824, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 12.117359413202934, |
|
"grad_norm": 0.044368330389261246, |
|
"learning_rate": 1.4978286427038602e-06, |
|
"loss": 0.0011, |
|
"num_input_tokens_seen": 20270664, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 12.21515892420538, |
|
"grad_norm": 0.04820827767252922, |
|
"learning_rate": 1.3915346821563235e-06, |
|
"loss": 0.0012, |
|
"num_input_tokens_seen": 20434504, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 12.312958435207824, |
|
"grad_norm": 0.034025732427835464, |
|
"learning_rate": 1.2888700591334225e-06, |
|
"loss": 0.0011, |
|
"num_input_tokens_seen": 20598280, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 12.41075794621027, |
|
"grad_norm": 0.03853330388665199, |
|
"learning_rate": 1.1898780571421554e-06, |
|
"loss": 0.0011, |
|
"num_input_tokens_seen": 20762120, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 12.508557457212714, |
|
"grad_norm": 0.039888255298137665, |
|
"learning_rate": 1.0946004113088381e-06, |
|
"loss": 0.0012, |
|
"num_input_tokens_seen": 20925960, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 12.60635696821516, |
|
"grad_norm": 0.04366978630423546, |
|
"learning_rate": 1.0030772907835484e-06, |
|
"loss": 0.001, |
|
"num_input_tokens_seen": 21089800, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 12.704156479217604, |
|
"grad_norm": 0.03416445106267929, |
|
"learning_rate": 9.153472818047627e-07, |
|
"loss": 0.001, |
|
"num_input_tokens_seen": 21253640, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 12.80195599022005, |
|
"grad_norm": 0.04817335307598114, |
|
"learning_rate": 8.31447371431372e-07, |
|
"loss": 0.0011, |
|
"num_input_tokens_seen": 21417480, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 12.899755501222494, |
|
"grad_norm": 0.03280309960246086, |
|
"learning_rate": 7.514129319488839e-07, |
|
"loss": 0.0011, |
|
"num_input_tokens_seen": 21581320, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 12.99755501222494, |
|
"grad_norm": 0.04426475614309311, |
|
"learning_rate": 6.752777059564431e-07, |
|
"loss": 0.0011, |
|
"num_input_tokens_seen": 21745160, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 13.078239608801956, |
|
"grad_norm": 0.03248964622616768, |
|
"learning_rate": 6.030737921409169e-07, |
|
"loss": 0.0011, |
|
"num_input_tokens_seen": 21877864, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 13.1760391198044, |
|
"grad_norm": 0.0347786545753479, |
|
"learning_rate": 5.348316317440549e-07, |
|
"loss": 0.001, |
|
"num_input_tokens_seen": 22041704, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 13.273838630806846, |
|
"grad_norm": 0.04258348420262337, |
|
"learning_rate": 4.7057999572843516e-07, |
|
"loss": 0.001, |
|
"num_input_tokens_seen": 22205544, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 13.37163814180929, |
|
"grad_norm": 0.0364152230322361, |
|
"learning_rate": 4.103459726475889e-07, |
|
"loss": 0.0011, |
|
"num_input_tokens_seen": 22369384, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 13.469437652811736, |
|
"grad_norm": 0.039297617971897125, |
|
"learning_rate": 3.541549572254488e-07, |
|
"loss": 0.0011, |
|
"num_input_tokens_seen": 22533224, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 13.56723716381418, |
|
"grad_norm": 0.037363260984420776, |
|
"learning_rate": 3.020306396499062e-07, |
|
"loss": 0.0009, |
|
"num_input_tokens_seen": 22697064, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 13.665036674816626, |
|
"grad_norm": 0.032989222556352615, |
|
"learning_rate": 2.539949955849985e-07, |
|
"loss": 0.0011, |
|
"num_input_tokens_seen": 22860904, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 13.76283618581907, |
|
"grad_norm": 0.03354249894618988, |
|
"learning_rate": 2.1006827690595478e-07, |
|
"loss": 0.001, |
|
"num_input_tokens_seen": 23024744, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 13.860635696821516, |
|
"grad_norm": 0.03322712704539299, |
|
"learning_rate": 1.7026900316098217e-07, |
|
"loss": 0.001, |
|
"num_input_tokens_seen": 23188584, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 13.95843520782396, |
|
"grad_norm": 0.03775469958782196, |
|
"learning_rate": 1.3461395376340502e-07, |
|
"loss": 0.001, |
|
"num_input_tokens_seen": 23352424, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 14.039119804400977, |
|
"grad_norm": 0.039018385112285614, |
|
"learning_rate": 1.0311816091744698e-07, |
|
"loss": 0.001, |
|
"num_input_tokens_seen": 23485296, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 14.136919315403423, |
|
"grad_norm": 0.03440910577774048, |
|
"learning_rate": 7.579490328064265e-08, |
|
"loss": 0.001, |
|
"num_input_tokens_seen": 23649136, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 14.234718826405867, |
|
"grad_norm": 0.03226885199546814, |
|
"learning_rate": 5.265570036553813e-08, |
|
"loss": 0.001, |
|
"num_input_tokens_seen": 23812976, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 14.332518337408313, |
|
"grad_norm": 0.03353099152445793, |
|
"learning_rate": 3.371030768305583e-08, |
|
"loss": 0.0009, |
|
"num_input_tokens_seen": 23976816, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 14.430317848410757, |
|
"grad_norm": 0.04043235257267952, |
|
"learning_rate": 1.896671262955896e-08, |
|
"loss": 0.001, |
|
"num_input_tokens_seen": 24140656, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 14.528117359413203, |
|
"grad_norm": 0.0331251285970211, |
|
"learning_rate": 8.431131119361891e-09, |
|
"loss": 0.001, |
|
"num_input_tokens_seen": 24304496, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 14.625916870415647, |
|
"grad_norm": 0.03793644160032272, |
|
"learning_rate": 2.108004964086474e-09, |
|
"loss": 0.001, |
|
"num_input_tokens_seen": 24467760, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 14.723716381418093, |
|
"grad_norm": 0.03213175758719444, |
|
"learning_rate": 0.0, |
|
"loss": 0.001, |
|
"num_input_tokens_seen": 24631600, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 14.723716381418093, |
|
"num_input_tokens_seen": 24631600, |
|
"step": 765, |
|
"total_flos": 1.936547143200768e+17, |
|
"train_loss": 0.1555636612302776, |
|
"train_runtime": 3426.6896, |
|
"train_samples_per_second": 3.576, |
|
"train_steps_per_second": 0.223 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 765, |
|
"num_input_tokens_seen": 24631600, |
|
"num_train_epochs": 15, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.936547143200768e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|