{ "best_global_step": null, "best_metric": null, "best_model_checkpoint": null, "epoch": 5.0, "eval_steps": 1000.0, "global_step": 7215, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.0693000693000693, "grad_norm": 84.25032806396484, "learning_rate": 9.900000000000002e-06, "loss": 5.6087, "step": 100 }, { "epoch": 0.1386001386001386, "grad_norm": 94.63692474365234, "learning_rate": 1.9900000000000003e-05, "loss": 3.348, "step": 200 }, { "epoch": 0.2079002079002079, "grad_norm": 3.8304085731506348, "learning_rate": 2.9900000000000002e-05, "loss": 1.6412, "step": 300 }, { "epoch": 0.2772002772002772, "grad_norm": 1.6271604299545288, "learning_rate": 3.99e-05, "loss": 1.0079, "step": 400 }, { "epoch": 0.3465003465003465, "grad_norm": 1.7938034534454346, "learning_rate": 4.99e-05, "loss": 0.8602, "step": 500 }, { "epoch": 0.4158004158004158, "grad_norm": 1.656532883644104, "learning_rate": 4.926284437825763e-05, "loss": 0.7916, "step": 600 }, { "epoch": 0.4851004851004851, "grad_norm": 1.0380038022994995, "learning_rate": 4.851824274013403e-05, "loss": 0.7504, "step": 700 }, { "epoch": 0.5544005544005544, "grad_norm": 1.5392476320266724, "learning_rate": 4.7773641102010426e-05, "loss": 0.716, "step": 800 }, { "epoch": 0.6237006237006237, "grad_norm": 1.010367512702942, "learning_rate": 4.702903946388682e-05, "loss": 0.6681, "step": 900 }, { "epoch": 0.693000693000693, "grad_norm": 1.0135914087295532, "learning_rate": 4.628443782576322e-05, "loss": 0.58, "step": 1000 }, { "epoch": 0.7623007623007623, "grad_norm": 1.3977186679840088, "learning_rate": 4.5539836187639616e-05, "loss": 0.4681, "step": 1100 }, { "epoch": 0.8316008316008316, "grad_norm": 1.3097604513168335, "learning_rate": 4.479523454951601e-05, "loss": 0.3936, "step": 1200 }, { "epoch": 0.9009009009009009, "grad_norm": 1.0134280920028687, "learning_rate": 4.405063291139241e-05, "loss": 0.3355, "step": 1300 }, { "epoch": 0.9702009702009702, "grad_norm": 1.1942052841186523, "learning_rate": 4.330603127326881e-05, "loss": 0.3031, "step": 1400 }, { "epoch": 1.0395010395010396, "grad_norm": 1.3656076192855835, "learning_rate": 4.25614296351452e-05, "loss": 0.2624, "step": 1500 }, { "epoch": 1.1088011088011087, "grad_norm": 1.570897102355957, "learning_rate": 4.1816827997021594e-05, "loss": 0.2349, "step": 1600 }, { "epoch": 1.178101178101178, "grad_norm": 1.404532551765442, "learning_rate": 4.107222635889799e-05, "loss": 0.2216, "step": 1700 }, { "epoch": 1.2474012474012475, "grad_norm": 0.8654778599739075, "learning_rate": 4.032762472077439e-05, "loss": 0.2005, "step": 1800 }, { "epoch": 1.3167013167013166, "grad_norm": 1.196643352508545, "learning_rate": 3.9583023082650785e-05, "loss": 0.1958, "step": 1900 }, { "epoch": 1.386001386001386, "grad_norm": 1.4473341703414917, "learning_rate": 3.883842144452718e-05, "loss": 0.1791, "step": 2000 }, { "epoch": 1.4553014553014554, "grad_norm": 0.9925177097320557, "learning_rate": 3.809381980640357e-05, "loss": 0.1721, "step": 2100 }, { "epoch": 1.5246015246015245, "grad_norm": 1.4818828105926514, "learning_rate": 3.734921816827997e-05, "loss": 0.1615, "step": 2200 }, { "epoch": 1.593901593901594, "grad_norm": 1.7907570600509644, "learning_rate": 3.6604616530156365e-05, "loss": 0.1438, "step": 2300 }, { "epoch": 1.6632016632016633, "grad_norm": 0.9886597394943237, "learning_rate": 3.586001489203276e-05, "loss": 0.1384, "step": 2400 }, { "epoch": 1.7325017325017324, "grad_norm": 1.0584090948104858, "learning_rate": 3.5115413253909166e-05, "loss": 0.1393, "step": 2500 }, { "epoch": 1.8018018018018018, "grad_norm": 0.7380561828613281, "learning_rate": 3.4370811615785556e-05, "loss": 0.1257, "step": 2600 }, { "epoch": 1.871101871101871, "grad_norm": 0.8808689117431641, "learning_rate": 3.362620997766195e-05, "loss": 0.1198, "step": 2700 }, { "epoch": 1.9404019404019404, "grad_norm": 0.8636562824249268, "learning_rate": 3.288160833953835e-05, "loss": 0.1159, "step": 2800 }, { "epoch": 2.0097020097020097, "grad_norm": 0.7358367443084717, "learning_rate": 3.213700670141475e-05, "loss": 0.1038, "step": 2900 }, { "epoch": 2.079002079002079, "grad_norm": 0.7814437747001648, "learning_rate": 3.1392405063291144e-05, "loss": 0.105, "step": 3000 }, { "epoch": 2.1483021483021485, "grad_norm": 1.4671565294265747, "learning_rate": 3.064780342516754e-05, "loss": 0.0935, "step": 3100 }, { "epoch": 2.2176022176022174, "grad_norm": 0.721126139163971, "learning_rate": 2.9903201787043934e-05, "loss": 0.096, "step": 3200 }, { "epoch": 2.286902286902287, "grad_norm": 0.6991815567016602, "learning_rate": 2.9158600148920327e-05, "loss": 0.1034, "step": 3300 }, { "epoch": 2.356202356202356, "grad_norm": 0.9121260643005371, "learning_rate": 2.8413998510796724e-05, "loss": 0.0939, "step": 3400 }, { "epoch": 2.4255024255024256, "grad_norm": 1.1622234582901, "learning_rate": 2.766939687267312e-05, "loss": 0.092, "step": 3500 }, { "epoch": 2.494802494802495, "grad_norm": 0.7380202412605286, "learning_rate": 2.6924795234549515e-05, "loss": 0.084, "step": 3600 }, { "epoch": 2.564102564102564, "grad_norm": 1.413069248199463, "learning_rate": 2.618019359642591e-05, "loss": 0.0841, "step": 3700 }, { "epoch": 2.6334026334026333, "grad_norm": 0.6783232092857361, "learning_rate": 2.543559195830231e-05, "loss": 0.0831, "step": 3800 }, { "epoch": 2.7027027027027026, "grad_norm": 0.8206629157066345, "learning_rate": 2.4690990320178705e-05, "loss": 0.0853, "step": 3900 }, { "epoch": 2.772002772002772, "grad_norm": 0.8955414295196533, "learning_rate": 2.3946388682055102e-05, "loss": 0.0807, "step": 4000 }, { "epoch": 2.8413028413028414, "grad_norm": 0.5236178636550903, "learning_rate": 2.3201787043931496e-05, "loss": 0.0801, "step": 4100 }, { "epoch": 2.9106029106029108, "grad_norm": 0.6238868832588196, "learning_rate": 2.2457185405807893e-05, "loss": 0.0788, "step": 4200 }, { "epoch": 2.97990297990298, "grad_norm": 0.8797939419746399, "learning_rate": 2.171258376768429e-05, "loss": 0.0767, "step": 4300 }, { "epoch": 3.049203049203049, "grad_norm": 0.8538146018981934, "learning_rate": 2.0967982129560686e-05, "loss": 0.0758, "step": 4400 }, { "epoch": 3.1185031185031185, "grad_norm": 0.5005732774734497, "learning_rate": 2.0223380491437083e-05, "loss": 0.0685, "step": 4500 }, { "epoch": 3.187803187803188, "grad_norm": 0.6817125082015991, "learning_rate": 1.947877885331348e-05, "loss": 0.0702, "step": 4600 }, { "epoch": 3.257103257103257, "grad_norm": 0.5497564673423767, "learning_rate": 1.8734177215189874e-05, "loss": 0.0664, "step": 4700 }, { "epoch": 3.3264033264033266, "grad_norm": 0.5136599540710449, "learning_rate": 1.798957557706627e-05, "loss": 0.0624, "step": 4800 }, { "epoch": 3.3957033957033955, "grad_norm": 0.8245359659194946, "learning_rate": 1.7244973938942664e-05, "loss": 0.0637, "step": 4900 }, { "epoch": 3.465003465003465, "grad_norm": 0.6158192157745361, "learning_rate": 1.650037230081906e-05, "loss": 0.065, "step": 5000 }, { "epoch": 3.5343035343035343, "grad_norm": 0.9357655048370361, "learning_rate": 1.575577066269546e-05, "loss": 0.0683, "step": 5100 }, { "epoch": 3.6036036036036037, "grad_norm": 0.6707029938697815, "learning_rate": 1.5011169024571856e-05, "loss": 0.0635, "step": 5200 }, { "epoch": 3.672903672903673, "grad_norm": 0.6795034408569336, "learning_rate": 1.4266567386448252e-05, "loss": 0.0648, "step": 5300 }, { "epoch": 3.742203742203742, "grad_norm": 0.9226582050323486, "learning_rate": 1.3521965748324647e-05, "loss": 0.0613, "step": 5400 }, { "epoch": 3.8115038115038113, "grad_norm": 0.7824294567108154, "learning_rate": 1.2777364110201044e-05, "loss": 0.0652, "step": 5500 }, { "epoch": 3.8808038808038807, "grad_norm": 0.5446070432662964, "learning_rate": 1.203276247207744e-05, "loss": 0.0661, "step": 5600 }, { "epoch": 3.95010395010395, "grad_norm": 0.4837566614151001, "learning_rate": 1.1288160833953836e-05, "loss": 0.0603, "step": 5700 }, { "epoch": 4.0194040194040195, "grad_norm": 0.7145822048187256, "learning_rate": 1.054355919583023e-05, "loss": 0.0604, "step": 5800 }, { "epoch": 4.088704088704088, "grad_norm": 1.1573742628097534, "learning_rate": 9.798957557706628e-06, "loss": 0.0593, "step": 5900 }, { "epoch": 4.158004158004158, "grad_norm": 0.6243206262588501, "learning_rate": 9.054355919583025e-06, "loss": 0.0632, "step": 6000 }, { "epoch": 4.227304227304227, "grad_norm": 0.6134985089302063, "learning_rate": 8.30975428145942e-06, "loss": 0.0601, "step": 6100 }, { "epoch": 4.296604296604297, "grad_norm": 1.786774754524231, "learning_rate": 7.565152643335816e-06, "loss": 0.0541, "step": 6200 }, { "epoch": 4.365904365904366, "grad_norm": 0.9844805598258972, "learning_rate": 6.820551005212211e-06, "loss": 0.0569, "step": 6300 }, { "epoch": 4.435204435204435, "grad_norm": 0.7235813736915588, "learning_rate": 6.075949367088608e-06, "loss": 0.0586, "step": 6400 }, { "epoch": 4.504504504504505, "grad_norm": 0.6935411095619202, "learning_rate": 5.331347728965004e-06, "loss": 0.0602, "step": 6500 }, { "epoch": 4.573804573804574, "grad_norm": 0.6599699854850769, "learning_rate": 4.5867460908414e-06, "loss": 0.0589, "step": 6600 }, { "epoch": 4.643104643104643, "grad_norm": 0.7888091206550598, "learning_rate": 3.842144452717796e-06, "loss": 0.0543, "step": 6700 }, { "epoch": 4.712404712404712, "grad_norm": 0.4492560923099518, "learning_rate": 3.0975428145941924e-06, "loss": 0.0555, "step": 6800 }, { "epoch": 4.781704781704782, "grad_norm": 0.570002555847168, "learning_rate": 2.3529411764705885e-06, "loss": 0.0548, "step": 6900 }, { "epoch": 4.851004851004851, "grad_norm": 0.48206043243408203, "learning_rate": 1.6083395383469843e-06, "loss": 0.059, "step": 7000 }, { "epoch": 4.92030492030492, "grad_norm": 0.6836007237434387, "learning_rate": 8.637379002233806e-07, "loss": 0.054, "step": 7100 }, { "epoch": 4.98960498960499, "grad_norm": 0.8150354027748108, "learning_rate": 1.1913626209977663e-07, "loss": 0.0569, "step": 7200 }, { "epoch": 5.0, "step": 7215, "total_flos": 9204713354568960.0, "train_loss": 0.3190814718469843, "train_runtime": 955.3205, "train_samples_per_second": 120.792, "train_steps_per_second": 7.552 } ], "logging_steps": 100, "max_steps": 7215, "num_input_tokens_seen": 0, "num_train_epochs": 5, "save_steps": 1000, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 9204713354568960.0, "train_batch_size": 16, "trial_name": null, "trial_params": null }