{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.031221096094631143, "eval_steps": 334, "global_step": 1000, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 3.1221096094631145e-05, "eval_loss": 2.5151805877685547, "eval_runtime": 12858.7571, "eval_samples_per_second": 2.214, "eval_steps_per_second": 2.214, "step": 1 }, { "epoch": 0.0003122109609463114, "grad_norm": 1.6301891803741455, "learning_rate": 0.0002, "loss": 1.2603, "step": 10 }, { "epoch": 0.0006244219218926228, "grad_norm": 2.2189297676086426, "learning_rate": 0.00019994965423831854, "loss": 0.8201, "step": 20 }, { "epoch": 0.0009366328828389343, "grad_norm": 1.6551645994186401, "learning_rate": 0.00019979866764718843, "loss": 0.7913, "step": 30 }, { "epoch": 0.0012488438437852456, "grad_norm": 4.437395095825195, "learning_rate": 0.00019954719225730847, "loss": 1.2831, "step": 40 }, { "epoch": 0.001561054804731557, "grad_norm": 7.653807163238525, "learning_rate": 0.00019919548128307954, "loss": 1.6268, "step": 50 }, { "epoch": 0.0018732657656778686, "grad_norm": 1.2926839590072632, "learning_rate": 0.00019874388886763944, "loss": 1.1779, "step": 60 }, { "epoch": 0.00218547672662418, "grad_norm": 1.5756969451904297, "learning_rate": 0.00019819286972627066, "loss": 0.6204, "step": 70 }, { "epoch": 0.002497687687570491, "grad_norm": 1.8183478116989136, "learning_rate": 0.00019754297868854073, "loss": 0.8666, "step": 80 }, { "epoch": 0.0028098986485168027, "grad_norm": 4.023663520812988, "learning_rate": 0.00019679487013963564, "loss": 1.1012, "step": 90 }, { "epoch": 0.003122109609463114, "grad_norm": 3.675027847290039, "learning_rate": 0.00019594929736144976, "loss": 1.3794, "step": 100 }, { "epoch": 0.0034343205704094257, "grad_norm": 1.1626209020614624, "learning_rate": 0.00019500711177409454, "loss": 1.1787, "step": 110 }, { "epoch": 0.003746531531355737, "grad_norm": 1.4152737855911255, "learning_rate": 0.00019396926207859084, "loss": 0.471, "step": 120 }, { "epoch": 0.004058742492302049, "grad_norm": 2.0098798274993896, "learning_rate": 0.00019283679330160726, "loss": 0.9037, "step": 130 }, { "epoch": 0.00437095345324836, "grad_norm": 2.845428943634033, "learning_rate": 0.00019161084574320696, "loss": 1.2114, "step": 140 }, { "epoch": 0.004683164414194672, "grad_norm": 5.3034749031066895, "learning_rate": 0.00019029265382866214, "loss": 1.397, "step": 150 }, { "epoch": 0.004995375375140982, "grad_norm": 1.5945558547973633, "learning_rate": 0.00018888354486549237, "loss": 1.145, "step": 160 }, { "epoch": 0.005307586336087294, "grad_norm": 1.1813420057296753, "learning_rate": 0.00018738493770697852, "loss": 0.5603, "step": 170 }, { "epoch": 0.005619797297033605, "grad_norm": 2.35960054397583, "learning_rate": 0.00018579834132349772, "loss": 0.8248, "step": 180 }, { "epoch": 0.005932008257979917, "grad_norm": 2.443915605545044, "learning_rate": 0.00018412535328311814, "loss": 1.1141, "step": 190 }, { "epoch": 0.006244219218926228, "grad_norm": 5.689703941345215, "learning_rate": 0.0001823676581429833, "loss": 1.2009, "step": 200 }, { "epoch": 0.00655643017987254, "grad_norm": 1.4314906597137451, "learning_rate": 0.00018052702575310588, "loss": 1.1061, "step": 210 }, { "epoch": 0.006868641140818851, "grad_norm": 0.6448104977607727, "learning_rate": 0.00017860530947427875, "loss": 0.4016, "step": 220 }, { "epoch": 0.007180852101765163, "grad_norm": 2.0396196842193604, "learning_rate": 0.0001766044443118978, "loss": 0.8709, "step": 230 }, { "epoch": 0.007493063062711474, "grad_norm": 2.5875227451324463, "learning_rate": 0.0001745264449675755, "loss": 1.1121, "step": 240 }, { "epoch": 0.007805274023657786, "grad_norm": 3.9609525203704834, "learning_rate": 0.00017237340381050703, "loss": 1.251, "step": 250 }, { "epoch": 0.008117484984604097, "grad_norm": 1.2032607793807983, "learning_rate": 0.00017014748877063214, "loss": 1.1823, "step": 260 }, { "epoch": 0.008429695945550408, "grad_norm": 1.186848521232605, "learning_rate": 0.00016785094115571322, "loss": 0.6219, "step": 270 }, { "epoch": 0.00874190690649672, "grad_norm": 1.85453462600708, "learning_rate": 0.00016548607339452853, "loss": 0.5809, "step": 280 }, { "epoch": 0.009054117867443031, "grad_norm": 2.0443332195281982, "learning_rate": 0.00016305526670845226, "loss": 1.2146, "step": 290 }, { "epoch": 0.009366328828389343, "grad_norm": 7.1448516845703125, "learning_rate": 0.00016056096871376667, "loss": 1.2524, "step": 300 }, { "epoch": 0.009678539789335654, "grad_norm": 1.334848165512085, "learning_rate": 0.00015800569095711982, "loss": 1.1966, "step": 310 }, { "epoch": 0.009990750750281965, "grad_norm": 0.4558267295360565, "learning_rate": 0.00015539200638661104, "loss": 0.589, "step": 320 }, { "epoch": 0.010302961711228277, "grad_norm": 1.8344190120697021, "learning_rate": 0.00015272254676105025, "loss": 0.5806, "step": 330 }, { "epoch": 0.010427846095606801, "eval_loss": 0.9566133618354797, "eval_runtime": 13592.0734, "eval_samples_per_second": 2.095, "eval_steps_per_second": 2.095, "step": 334 }, { "epoch": 0.010615172672174588, "grad_norm": 1.9632649421691895, "learning_rate": 0.00015000000000000001, "loss": 1.0551, "step": 340 }, { "epoch": 0.0109273836331209, "grad_norm": 4.136826992034912, "learning_rate": 0.0001472271074772683, "loss": 1.0784, "step": 350 }, { "epoch": 0.01123959459406721, "grad_norm": 1.1779104471206665, "learning_rate": 0.00014440666126057744, "loss": 1.1613, "step": 360 }, { "epoch": 0.011551805555013523, "grad_norm": 0.8325644731521606, "learning_rate": 0.00014154150130018866, "loss": 0.5119, "step": 370 }, { "epoch": 0.011864016515959834, "grad_norm": 1.6711801290512085, "learning_rate": 0.00013863451256931287, "loss": 0.6156, "step": 380 }, { "epoch": 0.012176227476906146, "grad_norm": 2.293975353240967, "learning_rate": 0.00013568862215918717, "loss": 1.0706, "step": 390 }, { "epoch": 0.012488438437852457, "grad_norm": 2.2785656452178955, "learning_rate": 0.00013270679633174218, "loss": 1.2872, "step": 400 }, { "epoch": 0.012800649398798769, "grad_norm": 1.2502048015594482, "learning_rate": 0.0001296920375328275, "loss": 1.0768, "step": 410 }, { "epoch": 0.01311286035974508, "grad_norm": 0.7812928557395935, "learning_rate": 0.00012664738136900348, "loss": 0.5199, "step": 420 }, { "epoch": 0.01342507132069139, "grad_norm": 2.0176918506622314, "learning_rate": 0.00012357589355094275, "loss": 0.8125, "step": 430 }, { "epoch": 0.013737282281637703, "grad_norm": 2.014697313308716, "learning_rate": 0.00012048066680651908, "loss": 1.0261, "step": 440 }, { "epoch": 0.014049493242584013, "grad_norm": 3.0161404609680176, "learning_rate": 0.00011736481776669306, "loss": 1.1352, "step": 450 }, { "epoch": 0.014361704203530326, "grad_norm": 1.1186920404434204, "learning_rate": 0.00011423148382732853, "loss": 1.1374, "step": 460 }, { "epoch": 0.014673915164476636, "grad_norm": 0.9820886850357056, "learning_rate": 0.00011108381999010111, "loss": 0.5135, "step": 470 }, { "epoch": 0.014986126125422949, "grad_norm": 2.8473262786865234, "learning_rate": 0.00010792499568567884, "loss": 0.8812, "step": 480 }, { "epoch": 0.01529833708636926, "grad_norm": 2.1481053829193115, "learning_rate": 0.00010475819158237425, "loss": 1.0178, "step": 490 }, { "epoch": 0.015610548047315572, "grad_norm": 1.20015287399292, "learning_rate": 0.00010158659638348081, "loss": 1.0468, "step": 500 }, { "epoch": 0.015922759008261882, "grad_norm": 1.3715260028839111, "learning_rate": 9.84134036165192e-05, "loss": 1.0121, "step": 510 }, { "epoch": 0.016234969969208195, "grad_norm": 0.9032047390937805, "learning_rate": 9.524180841762577e-05, "loss": 0.5379, "step": 520 }, { "epoch": 0.016547180930154507, "grad_norm": 1.6322988271713257, "learning_rate": 9.207500431432115e-05, "loss": 0.5695, "step": 530 }, { "epoch": 0.016859391891100816, "grad_norm": 2.7707173824310303, "learning_rate": 8.891618000989891e-05, "loss": 0.9264, "step": 540 }, { "epoch": 0.01717160285204713, "grad_norm": 2.905618667602539, "learning_rate": 8.57685161726715e-05, "loss": 1.0027, "step": 550 }, { "epoch": 0.01748381381299344, "grad_norm": 1.0891023874282837, "learning_rate": 8.263518223330697e-05, "loss": 1.0168, "step": 560 }, { "epoch": 0.01779602477393975, "grad_norm": 1.0451884269714355, "learning_rate": 7.951933319348095e-05, "loss": 0.3617, "step": 570 }, { "epoch": 0.018108235734886062, "grad_norm": 1.5976777076721191, "learning_rate": 7.642410644905726e-05, "loss": 0.7168, "step": 580 }, { "epoch": 0.018420446695832374, "grad_norm": 2.3960399627685547, "learning_rate": 7.335261863099651e-05, "loss": 0.9692, "step": 590 }, { "epoch": 0.018732657656778687, "grad_norm": 2.764636516571045, "learning_rate": 7.030796246717255e-05, "loss": 1.0993, "step": 600 }, { "epoch": 0.019044868617724996, "grad_norm": 1.0002310276031494, "learning_rate": 6.729320366825784e-05, "loss": 1.1104, "step": 610 }, { "epoch": 0.019357079578671308, "grad_norm": 1.2536269426345825, "learning_rate": 6.431137784081282e-05, "loss": 0.5409, "step": 620 }, { "epoch": 0.01966929053961762, "grad_norm": 1.9996048212051392, "learning_rate": 6.136548743068713e-05, "loss": 0.7138, "step": 630 }, { "epoch": 0.01998150150056393, "grad_norm": 1.9154224395751953, "learning_rate": 5.845849869981137e-05, "loss": 0.9189, "step": 640 }, { "epoch": 0.02029371246151024, "grad_norm": 0.8520050644874573, "learning_rate": 5.559333873942259e-05, "loss": 1.1057, "step": 650 }, { "epoch": 0.020605923422456554, "grad_norm": 0.821565568447113, "learning_rate": 5.277289252273174e-05, "loss": 0.9571, "step": 660 }, { "epoch": 0.020855692191213603, "eval_loss": 0.8139573931694031, "eval_runtime": 11568.1075, "eval_samples_per_second": 2.461, "eval_steps_per_second": 2.461, "step": 668 }, { "epoch": 0.020918134383402866, "grad_norm": 1.3406473398208618, "learning_rate": 5.000000000000002e-05, "loss": 0.4458, "step": 670 }, { "epoch": 0.021230345344349175, "grad_norm": 1.6482555866241455, "learning_rate": 4.727745323894976e-05, "loss": 0.8605, "step": 680 }, { "epoch": 0.021542556305295488, "grad_norm": 2.3034768104553223, "learning_rate": 4.4607993613388976e-05, "loss": 0.8842, "step": 690 }, { "epoch": 0.0218547672662418, "grad_norm": 2.2205710411071777, "learning_rate": 4.19943090428802e-05, "loss": 1.0044, "step": 700 }, { "epoch": 0.022166978227188112, "grad_norm": 1.0776264667510986, "learning_rate": 3.943903128623335e-05, "loss": 0.9987, "step": 710 }, { "epoch": 0.02247918918813442, "grad_norm": 0.7974869012832642, "learning_rate": 3.694473329154778e-05, "loss": 0.4341, "step": 720 }, { "epoch": 0.022791400149080734, "grad_norm": 1.410180926322937, "learning_rate": 3.45139266054715e-05, "loss": 0.7361, "step": 730 }, { "epoch": 0.023103611110027046, "grad_norm": 1.5520089864730835, "learning_rate": 3.21490588442868e-05, "loss": 0.8486, "step": 740 }, { "epoch": 0.023415822070973355, "grad_norm": 4.628422260284424, "learning_rate": 2.9852511229367865e-05, "loss": 1.1191, "step": 750 }, { "epoch": 0.023728033031919667, "grad_norm": 1.1060361862182617, "learning_rate": 2.7626596189492983e-05, "loss": 1.018, "step": 760 }, { "epoch": 0.02404024399286598, "grad_norm": 1.108621597290039, "learning_rate": 2.5473555032424533e-05, "loss": 0.4265, "step": 770 }, { "epoch": 0.024352454953812292, "grad_norm": 1.4980049133300781, "learning_rate": 2.339555568810221e-05, "loss": 0.6406, "step": 780 }, { "epoch": 0.0246646659147586, "grad_norm": 1.4313726425170898, "learning_rate": 2.139469052572127e-05, "loss": 0.9413, "step": 790 }, { "epoch": 0.024976876875704913, "grad_norm": 1.4710307121276855, "learning_rate": 1.947297424689414e-05, "loss": 1.0135, "step": 800 }, { "epoch": 0.025289087836651226, "grad_norm": 0.8172721266746521, "learning_rate": 1.763234185701673e-05, "loss": 0.9491, "step": 810 }, { "epoch": 0.025601298797597538, "grad_norm": 1.1474344730377197, "learning_rate": 1.587464671688187e-05, "loss": 0.5334, "step": 820 }, { "epoch": 0.025913509758543847, "grad_norm": 1.4124549627304077, "learning_rate": 1.4201658676502294e-05, "loss": 0.7033, "step": 830 }, { "epoch": 0.02622572071949016, "grad_norm": 1.3929728269577026, "learning_rate": 1.2615062293021507e-05, "loss": 0.852, "step": 840 }, { "epoch": 0.026537931680436472, "grad_norm": 0.782926619052887, "learning_rate": 1.1116455134507664e-05, "loss": 1.0648, "step": 850 }, { "epoch": 0.02685014264138278, "grad_norm": 1.1986958980560303, "learning_rate": 9.707346171337894e-06, "loss": 0.9374, "step": 860 }, { "epoch": 0.027162353602329093, "grad_norm": 1.0474522113800049, "learning_rate": 8.38915425679304e-06, "loss": 0.5091, "step": 870 }, { "epoch": 0.027474564563275405, "grad_norm": 1.569217562675476, "learning_rate": 7.163206698392744e-06, "loss": 0.6196, "step": 880 }, { "epoch": 0.027786775524221718, "grad_norm": 2.387129306793213, "learning_rate": 6.030737921409169e-06, "loss": 0.8911, "step": 890 }, { "epoch": 0.028098986485168027, "grad_norm": 1.0116970539093018, "learning_rate": 4.992888225905468e-06, "loss": 0.893, "step": 900 }, { "epoch": 0.02841119744611434, "grad_norm": 1.056663155555725, "learning_rate": 4.050702638550275e-06, "loss": 0.9925, "step": 910 }, { "epoch": 0.02872340840706065, "grad_norm": 1.0909359455108643, "learning_rate": 3.2051298603643753e-06, "loss": 0.634, "step": 920 }, { "epoch": 0.029035619368006964, "grad_norm": 1.5931735038757324, "learning_rate": 2.4570213114592954e-06, "loss": 0.6653, "step": 930 }, { "epoch": 0.029347830328953273, "grad_norm": 1.73000967502594, "learning_rate": 1.8071302737293295e-06, "loss": 0.7891, "step": 940 }, { "epoch": 0.029660041289899585, "grad_norm": 0.5561469197273254, "learning_rate": 1.2561111323605712e-06, "loss": 0.9679, "step": 950 }, { "epoch": 0.029972252250845897, "grad_norm": 1.1528609991073608, "learning_rate": 8.04518716920466e-07, "loss": 0.9362, "step": 960 }, { "epoch": 0.030284463211792206, "grad_norm": 0.6554343104362488, "learning_rate": 4.5280774269154115e-07, "loss": 0.3348, "step": 970 }, { "epoch": 0.03059667417273852, "grad_norm": 1.3619601726531982, "learning_rate": 2.0133235281156736e-07, "loss": 0.5258, "step": 980 }, { "epoch": 0.03090888513368483, "grad_norm": 1.2936415672302246, "learning_rate": 5.0345761681491746e-08, "loss": 0.9126, "step": 990 }, { "epoch": 0.031221096094631143, "grad_norm": 1.708081841468811, "learning_rate": 0.0, "loss": 1.0155, "step": 1000 } ], "logging_steps": 10, "max_steps": 1000, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 167, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 7.641564097334477e+17, "train_batch_size": 1, "trial_name": null, "trial_params": null }