|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 10.0, |
|
"eval_steps": 500, |
|
"global_step": 1210, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.08264462809917356, |
|
"grad_norm": 2.061657190322876, |
|
"learning_rate": 3.2786885245901635e-05, |
|
"loss": 0.8384, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.1652892561983471, |
|
"grad_norm": 1.1209560632705688, |
|
"learning_rate": 6.557377049180327e-05, |
|
"loss": 0.3975, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.24793388429752067, |
|
"grad_norm": 1.1699508428573608, |
|
"learning_rate": 9.836065573770493e-05, |
|
"loss": 0.216, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.3305785123966942, |
|
"grad_norm": 0.6142070889472961, |
|
"learning_rate": 0.00013114754098360654, |
|
"loss": 0.1394, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.4132231404958678, |
|
"grad_norm": 1.4109835624694824, |
|
"learning_rate": 0.0001639344262295082, |
|
"loss": 0.1186, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.49586776859504134, |
|
"grad_norm": 0.6400930881500244, |
|
"learning_rate": 0.00019672131147540985, |
|
"loss": 0.1159, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.5785123966942148, |
|
"grad_norm": 0.6176881790161133, |
|
"learning_rate": 0.00019996972439741538, |
|
"loss": 0.1029, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.6611570247933884, |
|
"grad_norm": 0.7654891610145569, |
|
"learning_rate": 0.00019986509152903302, |
|
"loss": 0.0998, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.743801652892562, |
|
"grad_norm": 0.8429086804389954, |
|
"learning_rate": 0.0001996858058202821, |
|
"loss": 0.0898, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.8264462809917356, |
|
"grad_norm": 1.0951145887374878, |
|
"learning_rate": 0.00019943200129384444, |
|
"loss": 0.0762, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.9090909090909091, |
|
"grad_norm": 0.7709806561470032, |
|
"learning_rate": 0.00019910386767796751, |
|
"loss": 0.0828, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.9917355371900827, |
|
"grad_norm": 0.7118253707885742, |
|
"learning_rate": 0.0001987016502646355, |
|
"loss": 0.0742, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.0743801652892562, |
|
"grad_norm": 0.37856435775756836, |
|
"learning_rate": 0.00019822564972620427, |
|
"loss": 0.0638, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.1570247933884297, |
|
"grad_norm": 0.49986979365348816, |
|
"learning_rate": 0.00019767622189063812, |
|
"loss": 0.0623, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.2396694214876034, |
|
"grad_norm": 0.6002529859542847, |
|
"learning_rate": 0.0001970537774755148, |
|
"loss": 0.0636, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.322314049586777, |
|
"grad_norm": 0.8187810778617859, |
|
"learning_rate": 0.00019635878178099928, |
|
"loss": 0.0646, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.4049586776859504, |
|
"grad_norm": 0.5900979042053223, |
|
"learning_rate": 0.00019559175434201458, |
|
"loss": 0.0539, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.487603305785124, |
|
"grad_norm": 0.39900392293930054, |
|
"learning_rate": 0.00019475326853987026, |
|
"loss": 0.051, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.5702479338842976, |
|
"grad_norm": 0.4218977689743042, |
|
"learning_rate": 0.0001938439511736388, |
|
"loss": 0.0523, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.6528925619834711, |
|
"grad_norm": 0.3695419132709503, |
|
"learning_rate": 0.00019286448199159995, |
|
"loss": 0.0484, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.7355371900826446, |
|
"grad_norm": 0.5481135845184326, |
|
"learning_rate": 0.0001918155931831042, |
|
"loss": 0.0491, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 1.8181818181818183, |
|
"grad_norm": 0.6999943256378174, |
|
"learning_rate": 0.00019069806883123387, |
|
"loss": 0.0502, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 1.9008264462809916, |
|
"grad_norm": 0.46525853872299194, |
|
"learning_rate": 0.0001895127443266722, |
|
"loss": 0.0496, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 1.9834710743801653, |
|
"grad_norm": 0.35091671347618103, |
|
"learning_rate": 0.0001882605057432181, |
|
"loss": 0.0486, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 2.0661157024793386, |
|
"grad_norm": 0.30202504992485046, |
|
"learning_rate": 0.00018694228917541313, |
|
"loss": 0.0439, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 2.1487603305785123, |
|
"grad_norm": 0.3925149738788605, |
|
"learning_rate": 0.00018555908003877635, |
|
"loss": 0.0407, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 2.231404958677686, |
|
"grad_norm": 0.5707463026046753, |
|
"learning_rate": 0.0001841119123331699, |
|
"loss": 0.0415, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 2.3140495867768593, |
|
"grad_norm": 0.31396156549453735, |
|
"learning_rate": 0.000182601867869846, |
|
"loss": 0.0382, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 2.396694214876033, |
|
"grad_norm": 0.4403229355812073, |
|
"learning_rate": 0.00018103007546275293, |
|
"loss": 0.0408, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 2.479338842975207, |
|
"grad_norm": 0.5294861197471619, |
|
"learning_rate": 0.00017939771008470512, |
|
"loss": 0.043, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.56198347107438, |
|
"grad_norm": 0.4009588956832886, |
|
"learning_rate": 0.00017770599198904763, |
|
"loss": 0.0472, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 2.644628099173554, |
|
"grad_norm": 0.5458269119262695, |
|
"learning_rate": 0.00017595618579747173, |
|
"loss": 0.0376, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 2.7272727272727275, |
|
"grad_norm": 0.270888090133667, |
|
"learning_rate": 0.0001741495995546634, |
|
"loss": 0.0354, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 2.809917355371901, |
|
"grad_norm": 0.3030223548412323, |
|
"learning_rate": 0.00017228758375049185, |
|
"loss": 0.0427, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 2.8925619834710745, |
|
"grad_norm": 0.31921663880348206, |
|
"learning_rate": 0.00017037153031046847, |
|
"loss": 0.0415, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 2.975206611570248, |
|
"grad_norm": 0.2452174574136734, |
|
"learning_rate": 0.000168402871555231, |
|
"loss": 0.0418, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 3.0578512396694215, |
|
"grad_norm": 0.25872424244880676, |
|
"learning_rate": 0.00016638307912983136, |
|
"loss": 0.0371, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 3.1404958677685952, |
|
"grad_norm": 0.2113211750984192, |
|
"learning_rate": 0.00016431366290362625, |
|
"loss": 0.0311, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 3.2231404958677685, |
|
"grad_norm": 0.34439799189567566, |
|
"learning_rate": 0.00016219616984159435, |
|
"loss": 0.0348, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 3.3057851239669422, |
|
"grad_norm": 0.22683021426200867, |
|
"learning_rate": 0.00016003218284792298, |
|
"loss": 0.0335, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 3.3884297520661155, |
|
"grad_norm": 0.3409021496772766, |
|
"learning_rate": 0.00015782331958272858, |
|
"loss": 0.037, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 3.4710743801652892, |
|
"grad_norm": 0.27717071771621704, |
|
"learning_rate": 0.00015557123125279636, |
|
"loss": 0.0328, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 3.553719008264463, |
|
"grad_norm": 0.37764936685562134, |
|
"learning_rate": 0.00015327760137724212, |
|
"loss": 0.0354, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 3.6363636363636362, |
|
"grad_norm": 0.4590853452682495, |
|
"learning_rate": 0.00015094414452901958, |
|
"loss": 0.0351, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 3.71900826446281, |
|
"grad_norm": 0.3909158408641815, |
|
"learning_rate": 0.00014857260505321407, |
|
"loss": 0.0377, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 3.8016528925619832, |
|
"grad_norm": 0.3208233714103699, |
|
"learning_rate": 0.00014616475576308005, |
|
"loss": 0.0302, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 3.884297520661157, |
|
"grad_norm": 0.4053466022014618, |
|
"learning_rate": 0.00014372239661479766, |
|
"loss": 0.0318, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 3.9669421487603307, |
|
"grad_norm": 0.26150038838386536, |
|
"learning_rate": 0.00014124735336193934, |
|
"loss": 0.0339, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 4.049586776859504, |
|
"grad_norm": 0.29400137066841125, |
|
"learning_rate": 0.0001387414761906516, |
|
"loss": 0.0286, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 4.132231404958677, |
|
"grad_norm": 0.24488015472888947, |
|
"learning_rate": 0.00013620663833657245, |
|
"loss": 0.0296, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 4.214876033057851, |
|
"grad_norm": 0.23769892752170563, |
|
"learning_rate": 0.00013364473468451888, |
|
"loss": 0.0288, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 4.297520661157025, |
|
"grad_norm": 0.4920603334903717, |
|
"learning_rate": 0.00013105768035199034, |
|
"loss": 0.0329, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 4.380165289256198, |
|
"grad_norm": 0.3728867173194885, |
|
"learning_rate": 0.00012844740925754792, |
|
"loss": 0.0285, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 4.462809917355372, |
|
"grad_norm": 0.21319624781608582, |
|
"learning_rate": 0.0001258158726751388, |
|
"loss": 0.0298, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 4.545454545454545, |
|
"grad_norm": 0.255109965801239, |
|
"learning_rate": 0.000123165037775447, |
|
"loss": 0.0266, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 4.628099173553719, |
|
"grad_norm": 0.3256368339061737, |
|
"learning_rate": 0.00012049688615536063, |
|
"loss": 0.0314, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 4.710743801652892, |
|
"grad_norm": 0.2564131021499634, |
|
"learning_rate": 0.00011781341235665528, |
|
"loss": 0.0358, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 4.793388429752066, |
|
"grad_norm": 0.29052674770355225, |
|
"learning_rate": 0.00011511662237500032, |
|
"loss": 0.0281, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 4.87603305785124, |
|
"grad_norm": 0.2417948693037033, |
|
"learning_rate": 0.00011240853216040312, |
|
"loss": 0.0302, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 4.958677685950414, |
|
"grad_norm": 0.3323270082473755, |
|
"learning_rate": 0.00010969116611021218, |
|
"loss": 0.0294, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 5.041322314049586, |
|
"grad_norm": 0.28201115131378174, |
|
"learning_rate": 0.00010696655555580524, |
|
"loss": 0.0272, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 5.12396694214876, |
|
"grad_norm": 0.4427995979785919, |
|
"learning_rate": 0.0001042367372440943, |
|
"loss": 0.0259, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 5.206611570247934, |
|
"grad_norm": 0.2434374839067459, |
|
"learning_rate": 0.00010150375181498202, |
|
"loss": 0.0292, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 5.289256198347108, |
|
"grad_norm": 0.2886279821395874, |
|
"learning_rate": 9.87696422759082e-05, |
|
"loss": 0.03, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 5.371900826446281, |
|
"grad_norm": 0.2933793067932129, |
|
"learning_rate": 9.603645247462633e-05, |
|
"loss": 0.0316, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 5.454545454545454, |
|
"grad_norm": 0.363825261592865, |
|
"learning_rate": 9.330622557135186e-05, |
|
"loss": 0.0241, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 5.537190082644628, |
|
"grad_norm": 0.29603031277656555, |
|
"learning_rate": 9.058100251142483e-05, |
|
"loss": 0.0265, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 5.619834710743802, |
|
"grad_norm": 0.29677891731262207, |
|
"learning_rate": 8.786282049962761e-05, |
|
"loss": 0.0228, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 5.702479338842975, |
|
"grad_norm": 0.296303391456604, |
|
"learning_rate": 8.515371147729943e-05, |
|
"loss": 0.0215, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 5.785123966942149, |
|
"grad_norm": 0.29517602920532227, |
|
"learning_rate": 8.245570060338512e-05, |
|
"loss": 0.0235, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 5.867768595041323, |
|
"grad_norm": 0.24108897149562836, |
|
"learning_rate": 7.977080474055404e-05, |
|
"loss": 0.0256, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 5.950413223140496, |
|
"grad_norm": 0.23352974653244019, |
|
"learning_rate": 7.710103094752103e-05, |
|
"loss": 0.0276, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 6.033057851239669, |
|
"grad_norm": 0.21120014786720276, |
|
"learning_rate": 7.44483749786957e-05, |
|
"loss": 0.0254, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 6.115702479338843, |
|
"grad_norm": 0.2598652243614197, |
|
"learning_rate": 7.181481979228262e-05, |
|
"loss": 0.0264, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 6.198347107438017, |
|
"grad_norm": 0.21868811547756195, |
|
"learning_rate": 6.920233406794672e-05, |
|
"loss": 0.0217, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 6.2809917355371905, |
|
"grad_norm": 0.2242676317691803, |
|
"learning_rate": 6.661287073515275e-05, |
|
"loss": 0.0246, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 6.363636363636363, |
|
"grad_norm": 0.19823093712329865, |
|
"learning_rate": 6.404836551327837e-05, |
|
"loss": 0.0223, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 6.446280991735537, |
|
"grad_norm": 0.16410717368125916, |
|
"learning_rate": 6.151073546459248e-05, |
|
"loss": 0.0257, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 6.528925619834711, |
|
"grad_norm": 0.17154166102409363, |
|
"learning_rate": 5.900187756118055e-05, |
|
"loss": 0.024, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 6.6115702479338845, |
|
"grad_norm": 0.19076073169708252, |
|
"learning_rate": 5.652366726688782e-05, |
|
"loss": 0.0241, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 6.694214876033058, |
|
"grad_norm": 0.1697314828634262, |
|
"learning_rate": 5.4077957135341115e-05, |
|
"loss": 0.0209, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 6.776859504132231, |
|
"grad_norm": 0.14440375566482544, |
|
"learning_rate": 5.1666575425096396e-05, |
|
"loss": 0.0182, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 6.859504132231405, |
|
"grad_norm": 0.2061329334974289, |
|
"learning_rate": 4.9291324732948376e-05, |
|
"loss": 0.0235, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 6.9421487603305785, |
|
"grad_norm": 0.243088960647583, |
|
"learning_rate": 4.6953980646422535e-05, |
|
"loss": 0.0209, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 7.024793388429752, |
|
"grad_norm": 0.22780433297157288, |
|
"learning_rate": 4.465629041645819e-05, |
|
"loss": 0.0199, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 7.107438016528926, |
|
"grad_norm": 0.23546425998210907, |
|
"learning_rate": 4.239997165127384e-05, |
|
"loss": 0.02, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 7.190082644628099, |
|
"grad_norm": 0.19151107966899872, |
|
"learning_rate": 4.0186711032391776e-05, |
|
"loss": 0.0228, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 7.2727272727272725, |
|
"grad_norm": 0.1883740872144699, |
|
"learning_rate": 3.801816305378124e-05, |
|
"loss": 0.0226, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 7.355371900826446, |
|
"grad_norm": 0.23011337220668793, |
|
"learning_rate": 3.5895948785063225e-05, |
|
"loss": 0.0228, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 7.43801652892562, |
|
"grad_norm": 0.2320934236049652, |
|
"learning_rate": 3.382165465970123e-05, |
|
"loss": 0.0221, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 7.520661157024794, |
|
"grad_norm": 0.12290257960557938, |
|
"learning_rate": 3.179683128908352e-05, |
|
"loss": 0.019, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 7.6033057851239665, |
|
"grad_norm": 0.13940581679344177, |
|
"learning_rate": 2.9822992303384024e-05, |
|
"loss": 0.0173, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 7.68595041322314, |
|
"grad_norm": 0.21917001903057098, |
|
"learning_rate": 2.7901613220067914e-05, |
|
"loss": 0.0196, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 7.768595041322314, |
|
"grad_norm": 0.31197458505630493, |
|
"learning_rate": 2.6034130340887895e-05, |
|
"loss": 0.0186, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 7.851239669421488, |
|
"grad_norm": 0.19257141649723053, |
|
"learning_rate": 2.4221939678195594e-05, |
|
"loss": 0.0194, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 7.933884297520661, |
|
"grad_norm": 0.13493625819683075, |
|
"learning_rate": 2.2466395911370817e-05, |
|
"loss": 0.0204, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 8.016528925619834, |
|
"grad_norm": 0.17625942826271057, |
|
"learning_rate": 2.07688113741488e-05, |
|
"loss": 0.0202, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 8.099173553719009, |
|
"grad_norm": 0.17436304688453674, |
|
"learning_rate": 1.913045507360227e-05, |
|
"loss": 0.0179, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 8.181818181818182, |
|
"grad_norm": 0.16752925515174866, |
|
"learning_rate": 1.7552551741511847e-05, |
|
"loss": 0.0184, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 8.264462809917354, |
|
"grad_norm": 0.20424728095531464, |
|
"learning_rate": 1.6036280918833924e-05, |
|
"loss": 0.0204, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 8.347107438016529, |
|
"grad_norm": 0.18081198632717133, |
|
"learning_rate": 1.4582776073950143e-05, |
|
"loss": 0.0163, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 8.429752066115702, |
|
"grad_norm": 0.16358765959739685, |
|
"learning_rate": 1.3193123755358072e-05, |
|
"loss": 0.0229, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 8.512396694214877, |
|
"grad_norm": 0.27697688341140747, |
|
"learning_rate": 1.186836277943606e-05, |
|
"loss": 0.0189, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 8.59504132231405, |
|
"grad_norm": 0.15346001088619232, |
|
"learning_rate": 1.0609483453889746e-05, |
|
"loss": 0.0166, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 8.677685950413224, |
|
"grad_norm": 0.19226881861686707, |
|
"learning_rate": 9.41742683746042e-06, |
|
"loss": 0.0163, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 8.760330578512397, |
|
"grad_norm": 0.25044411420822144, |
|
"learning_rate": 8.293084036448895e-06, |
|
"loss": 0.0177, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 8.84297520661157, |
|
"grad_norm": 0.11807776987552643, |
|
"learning_rate": 7.237295538580791e-06, |
|
"loss": 0.0161, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 8.925619834710744, |
|
"grad_norm": 0.1679440587759018, |
|
"learning_rate": 6.250850584710799e-06, |
|
"loss": 0.0191, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 9.008264462809917, |
|
"grad_norm": 0.17056939005851746, |
|
"learning_rate": 5.334486578836118e-06, |
|
"loss": 0.0146, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 9.090909090909092, |
|
"grad_norm": 0.1600365787744522, |
|
"learning_rate": 4.488888536859714e-06, |
|
"loss": 0.0205, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 9.173553719008265, |
|
"grad_norm": 0.18527384102344513, |
|
"learning_rate": 3.7146885745157233e-06, |
|
"loss": 0.0127, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 9.256198347107437, |
|
"grad_norm": 0.11880697309970856, |
|
"learning_rate": 3.012465434839529e-06, |
|
"loss": 0.0168, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 9.338842975206612, |
|
"grad_norm": 0.1584666520357132, |
|
"learning_rate": 2.382744055536068e-06, |
|
"loss": 0.0161, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 9.421487603305785, |
|
"grad_norm": 0.18531374633312225, |
|
"learning_rate": 1.8259951765694728e-06, |
|
"loss": 0.0177, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 9.50413223140496, |
|
"grad_norm": 0.17779088020324707, |
|
"learning_rate": 1.3426349882676325e-06, |
|
"loss": 0.0157, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 9.586776859504132, |
|
"grad_norm": 0.1391703486442566, |
|
"learning_rate": 9.3302482020452e-07, |
|
"loss": 0.0167, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 9.669421487603305, |
|
"grad_norm": 0.1815948635339737, |
|
"learning_rate": 5.974708710930421e-07, |
|
"loss": 0.0191, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 9.75206611570248, |
|
"grad_norm": 0.2050154209136963, |
|
"learning_rate": 3.362239798901712e-07, |
|
"loss": 0.0188, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 9.834710743801653, |
|
"grad_norm": 0.1503370851278305, |
|
"learning_rate": 1.4947943828561574e-07, |
|
"loss": 0.0154, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 9.917355371900827, |
|
"grad_norm": 0.163643941283226, |
|
"learning_rate": 3.737684471400993e-08, |
|
"loss": 0.0187, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"grad_norm": 0.12294866144657135, |
|
"learning_rate": 0.0, |
|
"loss": 0.014, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"step": 1210, |
|
"total_flos": 1.7516975778360288e+17, |
|
"train_loss": 0.045855508757031656, |
|
"train_runtime": 1400.3465, |
|
"train_samples_per_second": 55.179, |
|
"train_steps_per_second": 0.864 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 1210, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 10000, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.7516975778360288e+17, |
|
"train_batch_size": 64, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|