{ "best_metric": null, "best_model_checkpoint": null, "epoch": 2.9936305732484074, "eval_steps": 500, "global_step": 1059, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.028308563340410473, "grad_norm": 10.472460746765137, "learning_rate": 4.971671388101983e-05, "loss": 5.6874, "step": 10 }, { "epoch": 0.056617126680820945, "grad_norm": 1.0690819025039673, "learning_rate": 4.9244570349386214e-05, "loss": 1.622, "step": 20 }, { "epoch": 0.08492569002123142, "grad_norm": 0.6105159521102905, "learning_rate": 4.8772426817752596e-05, "loss": 2.0585, "step": 30 }, { "epoch": 0.11323425336164189, "grad_norm": 0.9013139009475708, "learning_rate": 4.830028328611898e-05, "loss": 1.3675, "step": 40 }, { "epoch": 0.14154281670205238, "grad_norm": 1.0703097581863403, "learning_rate": 4.7828139754485365e-05, "loss": 1.7004, "step": 50 }, { "epoch": 0.16985138004246284, "grad_norm": 0.715630829334259, "learning_rate": 4.7355996222851754e-05, "loss": 1.2608, "step": 60 }, { "epoch": 0.19815994338287332, "grad_norm": 0.6118766069412231, "learning_rate": 4.6883852691218135e-05, "loss": 1.3334, "step": 70 }, { "epoch": 0.22646850672328378, "grad_norm": 1.0044665336608887, "learning_rate": 4.641170915958452e-05, "loss": 1.3244, "step": 80 }, { "epoch": 0.25477707006369427, "grad_norm": 1.927817940711975, "learning_rate": 4.59395656279509e-05, "loss": 1.2348, "step": 90 }, { "epoch": 0.28308563340410475, "grad_norm": 1.5741430521011353, "learning_rate": 4.546742209631728e-05, "loss": 1.22, "step": 100 }, { "epoch": 0.31139419674451524, "grad_norm": 0.6977642774581909, "learning_rate": 4.499527856468367e-05, "loss": 1.0786, "step": 110 }, { "epoch": 0.33970276008492567, "grad_norm": 2.7766706943511963, "learning_rate": 4.452313503305005e-05, "loss": 1.1531, "step": 120 }, { "epoch": 0.36801132342533616, "grad_norm": 2.0790321826934814, "learning_rate": 4.405099150141643e-05, "loss": 1.0915, "step": 130 }, { "epoch": 0.39631988676574664, "grad_norm": 3.8764405250549316, "learning_rate": 4.357884796978281e-05, "loss": 1.1757, "step": 140 }, { "epoch": 0.42462845010615713, "grad_norm": 0.8142156600952148, "learning_rate": 4.31067044381492e-05, "loss": 1.0972, "step": 150 }, { "epoch": 0.45293701344656756, "grad_norm": 2.6679811477661133, "learning_rate": 4.263456090651558e-05, "loss": 1.1941, "step": 160 }, { "epoch": 0.48124557678697805, "grad_norm": 3.724242925643921, "learning_rate": 4.216241737488197e-05, "loss": 1.0199, "step": 170 }, { "epoch": 0.5095541401273885, "grad_norm": 0.9096172451972961, "learning_rate": 4.169027384324835e-05, "loss": 0.9365, "step": 180 }, { "epoch": 0.537862703467799, "grad_norm": 0.8280671834945679, "learning_rate": 4.1218130311614734e-05, "loss": 0.7999, "step": 190 }, { "epoch": 0.5661712668082095, "grad_norm": 0.7605137825012207, "learning_rate": 4.0745986779981115e-05, "loss": 0.9142, "step": 200 }, { "epoch": 0.5944798301486199, "grad_norm": 1.1094071865081787, "learning_rate": 4.02738432483475e-05, "loss": 0.9304, "step": 210 }, { "epoch": 0.6227883934890305, "grad_norm": 1.2188092470169067, "learning_rate": 3.980169971671388e-05, "loss": 1.0773, "step": 220 }, { "epoch": 0.6510969568294409, "grad_norm": 1.421103835105896, "learning_rate": 3.932955618508027e-05, "loss": 0.9702, "step": 230 }, { "epoch": 0.6794055201698513, "grad_norm": 0.6401333212852478, "learning_rate": 3.885741265344665e-05, "loss": 0.8283, "step": 240 }, { "epoch": 0.7077140835102619, "grad_norm": 1.2169103622436523, "learning_rate": 3.8385269121813036e-05, "loss": 0.7306, "step": 250 }, { "epoch": 0.7360226468506723, "grad_norm": 4.206311225891113, "learning_rate": 3.791312559017942e-05, "loss": 0.7564, "step": 260 }, { "epoch": 0.7643312101910829, "grad_norm": 0.7803528308868408, "learning_rate": 3.74409820585458e-05, "loss": 0.9409, "step": 270 }, { "epoch": 0.7926397735314933, "grad_norm": 1.2564524412155151, "learning_rate": 3.696883852691218e-05, "loss": 0.8487, "step": 280 }, { "epoch": 0.8209483368719037, "grad_norm": 1.0345911979675293, "learning_rate": 3.649669499527857e-05, "loss": 0.9284, "step": 290 }, { "epoch": 0.8492569002123143, "grad_norm": 2.6895930767059326, "learning_rate": 3.602455146364495e-05, "loss": 1.1654, "step": 300 }, { "epoch": 0.8775654635527247, "grad_norm": 1.2171903848648071, "learning_rate": 3.555240793201133e-05, "loss": 0.8589, "step": 310 }, { "epoch": 0.9058740268931351, "grad_norm": 2.6197869777679443, "learning_rate": 3.5080264400377714e-05, "loss": 0.8405, "step": 320 }, { "epoch": 0.9341825902335457, "grad_norm": 0.9666792750358582, "learning_rate": 3.4608120868744095e-05, "loss": 1.1513, "step": 330 }, { "epoch": 0.9624911535739561, "grad_norm": 0.9216092824935913, "learning_rate": 3.4135977337110484e-05, "loss": 0.8712, "step": 340 }, { "epoch": 0.9907997169143666, "grad_norm": 2.9435744285583496, "learning_rate": 3.366383380547687e-05, "loss": 0.9633, "step": 350 }, { "epoch": 1.0, "eval_loss": 0.920014500617981, "eval_runtime": 45.4818, "eval_samples_per_second": 15.545, "eval_steps_per_second": 1.957, "step": 354 }, { "epoch": 1.0169851380042463, "grad_norm": 0.7391465306282043, "learning_rate": 3.3191690273843253e-05, "loss": 0.6779, "step": 360 }, { "epoch": 1.0452937013446568, "grad_norm": 1.267843246459961, "learning_rate": 3.2719546742209635e-05, "loss": 0.9072, "step": 370 }, { "epoch": 1.0736022646850671, "grad_norm": 0.7193703651428223, "learning_rate": 3.2247403210576016e-05, "loss": 1.0308, "step": 380 }, { "epoch": 1.1019108280254777, "grad_norm": 3.1680991649627686, "learning_rate": 3.17752596789424e-05, "loss": 0.8272, "step": 390 }, { "epoch": 1.1302193913658882, "grad_norm": 1.0723551511764526, "learning_rate": 3.130311614730878e-05, "loss": 1.02, "step": 400 }, { "epoch": 1.1585279547062988, "grad_norm": 0.7101927995681763, "learning_rate": 3.083097261567517e-05, "loss": 0.942, "step": 410 }, { "epoch": 1.186836518046709, "grad_norm": 1.9213780164718628, "learning_rate": 3.0358829084041553e-05, "loss": 0.8884, "step": 420 }, { "epoch": 1.2151450813871196, "grad_norm": 4.313545227050781, "learning_rate": 2.9886685552407934e-05, "loss": 1.055, "step": 430 }, { "epoch": 1.2434536447275302, "grad_norm": 1.1699409484863281, "learning_rate": 2.9414542020774316e-05, "loss": 0.7015, "step": 440 }, { "epoch": 1.2717622080679405, "grad_norm": 0.7084372043609619, "learning_rate": 2.8942398489140697e-05, "loss": 0.8342, "step": 450 }, { "epoch": 1.300070771408351, "grad_norm": 1.1760873794555664, "learning_rate": 2.8470254957507082e-05, "loss": 0.7567, "step": 460 }, { "epoch": 1.3283793347487616, "grad_norm": 6.9120192527771, "learning_rate": 2.799811142587347e-05, "loss": 0.8156, "step": 470 }, { "epoch": 1.356687898089172, "grad_norm": 0.7138371467590332, "learning_rate": 2.7525967894239852e-05, "loss": 0.8024, "step": 480 }, { "epoch": 1.3849964614295824, "grad_norm": 0.6835730075836182, "learning_rate": 2.7053824362606233e-05, "loss": 0.7977, "step": 490 }, { "epoch": 1.413305024769993, "grad_norm": 6.436087608337402, "learning_rate": 2.6581680830972615e-05, "loss": 0.8102, "step": 500 }, { "epoch": 1.4416135881104033, "grad_norm": 3.4291305541992188, "learning_rate": 2.6109537299339e-05, "loss": 0.7643, "step": 510 }, { "epoch": 1.4699221514508138, "grad_norm": 0.9250390529632568, "learning_rate": 2.563739376770538e-05, "loss": 0.8417, "step": 520 }, { "epoch": 1.4982307147912244, "grad_norm": 3.264242649078369, "learning_rate": 2.516525023607177e-05, "loss": 0.9988, "step": 530 }, { "epoch": 1.5265392781316347, "grad_norm": 1.8651660680770874, "learning_rate": 2.469310670443815e-05, "loss": 0.8652, "step": 540 }, { "epoch": 1.5548478414720452, "grad_norm": 5.576003074645996, "learning_rate": 2.4220963172804533e-05, "loss": 0.7481, "step": 550 }, { "epoch": 1.5831564048124558, "grad_norm": 4.8853302001953125, "learning_rate": 2.3748819641170918e-05, "loss": 0.7857, "step": 560 }, { "epoch": 1.611464968152866, "grad_norm": 2.0183866024017334, "learning_rate": 2.3276676109537303e-05, "loss": 0.607, "step": 570 }, { "epoch": 1.6397735314932769, "grad_norm": 3.133368730545044, "learning_rate": 2.2804532577903684e-05, "loss": 0.8706, "step": 580 }, { "epoch": 1.6680820948336872, "grad_norm": 2.440894365310669, "learning_rate": 2.2332389046270066e-05, "loss": 0.8035, "step": 590 }, { "epoch": 1.6963906581740975, "grad_norm": 3.0257763862609863, "learning_rate": 2.186024551463645e-05, "loss": 1.1186, "step": 600 }, { "epoch": 1.7246992215145083, "grad_norm": 2.741161823272705, "learning_rate": 2.1388101983002835e-05, "loss": 1.0302, "step": 610 }, { "epoch": 1.7530077848549186, "grad_norm": 3.0165324211120605, "learning_rate": 2.0915958451369217e-05, "loss": 1.0235, "step": 620 }, { "epoch": 1.7813163481953291, "grad_norm": 1.8402531147003174, "learning_rate": 2.0443814919735602e-05, "loss": 0.8455, "step": 630 }, { "epoch": 1.8096249115357397, "grad_norm": 1.0427038669586182, "learning_rate": 1.9971671388101983e-05, "loss": 0.7868, "step": 640 }, { "epoch": 1.83793347487615, "grad_norm": 4.955197334289551, "learning_rate": 1.9499527856468365e-05, "loss": 0.8728, "step": 650 }, { "epoch": 1.8662420382165605, "grad_norm": 1.1331288814544678, "learning_rate": 1.9027384324834753e-05, "loss": 0.8719, "step": 660 }, { "epoch": 1.894550601556971, "grad_norm": 1.395735263824463, "learning_rate": 1.8555240793201135e-05, "loss": 0.8224, "step": 670 }, { "epoch": 1.9228591648973814, "grad_norm": 1.110823154449463, "learning_rate": 1.8083097261567516e-05, "loss": 0.6859, "step": 680 }, { "epoch": 1.951167728237792, "grad_norm": 1.224063754081726, "learning_rate": 1.76109537299339e-05, "loss": 0.79, "step": 690 }, { "epoch": 1.9794762915782025, "grad_norm": 1.4636934995651245, "learning_rate": 1.7138810198300283e-05, "loss": 0.9543, "step": 700 }, { "epoch": 2.0, "eval_loss": 0.8783606290817261, "eval_runtime": 45.4943, "eval_samples_per_second": 15.54, "eval_steps_per_second": 1.956, "step": 708 }, { "epoch": 2.0056617126680822, "grad_norm": 3.3927221298217773, "learning_rate": 1.6666666666666667e-05, "loss": 1.0075, "step": 710 }, { "epoch": 2.0339702760084926, "grad_norm": 2.4237897396087646, "learning_rate": 1.6194523135033052e-05, "loss": 0.7245, "step": 720 }, { "epoch": 2.062278839348903, "grad_norm": 0.9672299027442932, "learning_rate": 1.5722379603399434e-05, "loss": 0.7631, "step": 730 }, { "epoch": 2.0905874026893136, "grad_norm": 2.908951997756958, "learning_rate": 1.5250236071765817e-05, "loss": 1.0795, "step": 740 }, { "epoch": 2.118895966029724, "grad_norm": 3.8468856811523438, "learning_rate": 1.4778092540132202e-05, "loss": 0.6312, "step": 750 }, { "epoch": 2.1472045293701343, "grad_norm": 1.352995753288269, "learning_rate": 1.4305949008498584e-05, "loss": 0.8062, "step": 760 }, { "epoch": 2.175513092710545, "grad_norm": 1.1038804054260254, "learning_rate": 1.3833805476864967e-05, "loss": 0.8566, "step": 770 }, { "epoch": 2.2038216560509554, "grad_norm": 1.2990654706954956, "learning_rate": 1.3361661945231352e-05, "loss": 0.7693, "step": 780 }, { "epoch": 2.2321302193913657, "grad_norm": 2.6625759601593018, "learning_rate": 1.2889518413597735e-05, "loss": 0.6126, "step": 790 }, { "epoch": 2.2604387827317765, "grad_norm": 5.676569938659668, "learning_rate": 1.2417374881964118e-05, "loss": 0.7988, "step": 800 }, { "epoch": 2.2887473460721868, "grad_norm": 2.953200340270996, "learning_rate": 1.1945231350330501e-05, "loss": 0.6122, "step": 810 }, { "epoch": 2.3170559094125975, "grad_norm": 2.253474473953247, "learning_rate": 1.1473087818696884e-05, "loss": 1.1378, "step": 820 }, { "epoch": 2.345364472753008, "grad_norm": 1.5058188438415527, "learning_rate": 1.1000944287063268e-05, "loss": 0.8095, "step": 830 }, { "epoch": 2.373673036093418, "grad_norm": 0.8082104325294495, "learning_rate": 1.0528800755429651e-05, "loss": 0.5857, "step": 840 }, { "epoch": 2.401981599433829, "grad_norm": 2.0527071952819824, "learning_rate": 1.0056657223796034e-05, "loss": 0.9159, "step": 850 }, { "epoch": 2.4302901627742393, "grad_norm": 4.757397651672363, "learning_rate": 9.584513692162419e-06, "loss": 0.7801, "step": 860 }, { "epoch": 2.4585987261146496, "grad_norm": 2.750612735748291, "learning_rate": 9.1123701605288e-06, "loss": 0.7435, "step": 870 }, { "epoch": 2.4869072894550603, "grad_norm": 0.7405831813812256, "learning_rate": 8.640226628895184e-06, "loss": 0.7491, "step": 880 }, { "epoch": 2.5152158527954707, "grad_norm": 3.5916926860809326, "learning_rate": 8.168083097261569e-06, "loss": 0.896, "step": 890 }, { "epoch": 2.543524416135881, "grad_norm": 2.436352252960205, "learning_rate": 7.69593956562795e-06, "loss": 0.7018, "step": 900 }, { "epoch": 2.5718329794762917, "grad_norm": 0.8077714443206787, "learning_rate": 7.223796033994334e-06, "loss": 1.1473, "step": 910 }, { "epoch": 2.600141542816702, "grad_norm": 2.01489520072937, "learning_rate": 6.751652502360718e-06, "loss": 0.8547, "step": 920 }, { "epoch": 2.6284501061571124, "grad_norm": 0.9167873859405518, "learning_rate": 6.279508970727101e-06, "loss": 0.9682, "step": 930 }, { "epoch": 2.656758669497523, "grad_norm": 0.8447218537330627, "learning_rate": 5.807365439093485e-06, "loss": 0.5817, "step": 940 }, { "epoch": 2.6850672328379335, "grad_norm": 1.5092198848724365, "learning_rate": 5.335221907459869e-06, "loss": 1.2028, "step": 950 }, { "epoch": 2.713375796178344, "grad_norm": 2.282589912414551, "learning_rate": 4.863078375826251e-06, "loss": 0.6571, "step": 960 }, { "epoch": 2.7416843595187546, "grad_norm": 1.5408430099487305, "learning_rate": 4.390934844192634e-06, "loss": 0.6459, "step": 970 }, { "epoch": 2.769992922859165, "grad_norm": 5.208029270172119, "learning_rate": 3.918791312559018e-06, "loss": 0.8532, "step": 980 }, { "epoch": 2.798301486199575, "grad_norm": 1.4328051805496216, "learning_rate": 3.4466477809254016e-06, "loss": 0.8735, "step": 990 }, { "epoch": 2.826610049539986, "grad_norm": 2.1999919414520264, "learning_rate": 2.9745042492917848e-06, "loss": 1.0141, "step": 1000 }, { "epoch": 2.8549186128803963, "grad_norm": 1.0183438062667847, "learning_rate": 2.502360717658168e-06, "loss": 0.7974, "step": 1010 }, { "epoch": 2.8832271762208066, "grad_norm": 3.3981127738952637, "learning_rate": 2.0302171860245516e-06, "loss": 0.8471, "step": 1020 }, { "epoch": 2.9115357395612174, "grad_norm": 2.758704662322998, "learning_rate": 1.5580736543909348e-06, "loss": 0.8021, "step": 1030 }, { "epoch": 2.9398443029016277, "grad_norm": 0.8233171105384827, "learning_rate": 1.0859301227573182e-06, "loss": 0.7704, "step": 1040 }, { "epoch": 2.968152866242038, "grad_norm": 3.0792815685272217, "learning_rate": 6.137865911237016e-07, "loss": 0.6247, "step": 1050 }, { "epoch": 2.9936305732484074, "eval_loss": 0.861165463924408, "eval_runtime": 45.4972, "eval_samples_per_second": 15.539, "eval_steps_per_second": 1.956, "step": 1059 } ], "logging_steps": 10, "max_steps": 1059, "num_input_tokens_seen": 0, "num_train_epochs": 3, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 1.95177370947158e+17, "train_batch_size": 2, "trial_name": null, "trial_params": null }