{ "best_metric": null, "best_model_checkpoint": null, "epoch": 1.0, "eval_steps": 500, "global_step": 268, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.018656716417910446, "grad_norm": 23.549664007081883, "learning_rate": 1.785714285714286e-05, "loss": 6.105, "step": 5 }, { "epoch": 0.03731343283582089, "grad_norm": 13.350189408858572, "learning_rate": 3.571428571428572e-05, "loss": 4.6199, "step": 10 }, { "epoch": 0.055970149253731345, "grad_norm": 9.366720060199096, "learning_rate": 4.999827900623038e-05, "loss": 3.3379, "step": 15 }, { "epoch": 0.07462686567164178, "grad_norm": 6.751610634894579, "learning_rate": 4.993807186343243e-05, "loss": 2.6927, "step": 20 }, { "epoch": 0.09328358208955224, "grad_norm": 6.488714411639678, "learning_rate": 4.979207812402531e-05, "loss": 2.0578, "step": 25 }, { "epoch": 0.11194029850746269, "grad_norm": 3.5421676034059377, "learning_rate": 4.956085596012407e-05, "loss": 1.5831, "step": 30 }, { "epoch": 0.13059701492537312, "grad_norm": 2.571834851270616, "learning_rate": 4.924528939432311e-05, "loss": 1.315, "step": 35 }, { "epoch": 0.14925373134328357, "grad_norm": 1.5963085542775126, "learning_rate": 4.884658491984735e-05, "loss": 1.1863, "step": 40 }, { "epoch": 0.16791044776119404, "grad_norm": 2.074329344904024, "learning_rate": 4.8366266887814235e-05, "loss": 1.1028, "step": 45 }, { "epoch": 0.1865671641791045, "grad_norm": 1.3297400462887774, "learning_rate": 4.780617167924209e-05, "loss": 1.04, "step": 50 }, { "epoch": 0.20522388059701493, "grad_norm": 1.7003158095532473, "learning_rate": 4.716844068408693e-05, "loss": 0.9969, "step": 55 }, { "epoch": 0.22388059701492538, "grad_norm": 1.3298211305507253, "learning_rate": 4.6455512114150546e-05, "loss": 0.9653, "step": 60 }, { "epoch": 0.24253731343283583, "grad_norm": 1.5188363324223721, "learning_rate": 4.5670111681161296e-05, "loss": 0.9325, "step": 65 }, { "epoch": 0.26119402985074625, "grad_norm": 1.455002995998784, "learning_rate": 4.481524217566783e-05, "loss": 0.8993, "step": 70 }, { "epoch": 0.2798507462686567, "grad_norm": 1.2015534696464851, "learning_rate": 4.3894171986588217e-05, "loss": 0.8828, "step": 75 }, { "epoch": 0.29850746268656714, "grad_norm": 2.042471802842113, "learning_rate": 4.29104226053073e-05, "loss": 0.873, "step": 80 }, { "epoch": 0.31716417910447764, "grad_norm": 1.6348429954668364, "learning_rate": 4.186775516209732e-05, "loss": 0.8589, "step": 85 }, { "epoch": 0.3358208955223881, "grad_norm": 1.2929068924137062, "learning_rate": 4.077015604633669e-05, "loss": 0.8499, "step": 90 }, { "epoch": 0.35447761194029853, "grad_norm": 1.2588102670148498, "learning_rate": 3.962182166550441e-05, "loss": 0.8296, "step": 95 }, { "epoch": 0.373134328358209, "grad_norm": 1.016824958444882, "learning_rate": 3.8427142401220634e-05, "loss": 0.8183, "step": 100 }, { "epoch": 0.3917910447761194, "grad_norm": 1.076029269402344, "learning_rate": 3.71906858236735e-05, "loss": 0.8175, "step": 105 }, { "epoch": 0.41044776119402987, "grad_norm": 0.9643240409515527, "learning_rate": 3.591717922860785e-05, "loss": 0.8024, "step": 110 }, { "epoch": 0.4291044776119403, "grad_norm": 1.0996417684236115, "learning_rate": 3.46114915636416e-05, "loss": 0.8028, "step": 115 }, { "epoch": 0.44776119402985076, "grad_norm": 0.6236520681960638, "learning_rate": 3.3278614813010034e-05, "loss": 0.7843, "step": 120 }, { "epoch": 0.4664179104477612, "grad_norm": 0.7490461585227262, "learning_rate": 3.1923644911909e-05, "loss": 0.7802, "step": 125 }, { "epoch": 0.48507462686567165, "grad_norm": 0.7753398709862848, "learning_rate": 3.0551762263406576e-05, "loss": 0.7718, "step": 130 }, { "epoch": 0.503731343283582, "grad_norm": 0.8263114166565009, "learning_rate": 2.9168211932412042e-05, "loss": 0.7694, "step": 135 }, { "epoch": 0.5223880597014925, "grad_norm": 0.5977157830288298, "learning_rate": 2.777828359242567e-05, "loss": 0.7638, "step": 140 }, { "epoch": 0.5410447761194029, "grad_norm": 0.6960446376964153, "learning_rate": 2.6387291301738377e-05, "loss": 0.7548, "step": 145 }, { "epoch": 0.5597014925373134, "grad_norm": 0.5170782529619016, "learning_rate": 2.50005531864019e-05, "loss": 0.7465, "step": 150 }, { "epoch": 0.5783582089552238, "grad_norm": 0.48159615421876123, "learning_rate": 2.362337110764688e-05, "loss": 0.7486, "step": 155 }, { "epoch": 0.5970149253731343, "grad_norm": 0.5432350714014745, "learning_rate": 2.226101039148557e-05, "loss": 0.741, "step": 160 }, { "epoch": 0.6156716417910447, "grad_norm": 0.4696819234168871, "learning_rate": 2.0918679697998252e-05, "loss": 0.74, "step": 165 }, { "epoch": 0.6343283582089553, "grad_norm": 0.46483548295455934, "learning_rate": 1.9601511107268255e-05, "loss": 0.7425, "step": 170 }, { "epoch": 0.6529850746268657, "grad_norm": 0.5530022968626128, "learning_rate": 1.8314540498102216e-05, "loss": 0.7338, "step": 175 }, { "epoch": 0.6716417910447762, "grad_norm": 0.4915552805413531, "learning_rate": 1.7062688294552992e-05, "loss": 0.7374, "step": 180 }, { "epoch": 0.6902985074626866, "grad_norm": 0.4452556265823543, "learning_rate": 1.5850740653856096e-05, "loss": 0.7268, "step": 185 }, { "epoch": 0.7089552238805971, "grad_norm": 0.46333067364783953, "learning_rate": 1.4683331167703218e-05, "loss": 0.7275, "step": 190 }, { "epoch": 0.7276119402985075, "grad_norm": 0.39997819847068644, "learning_rate": 1.356492314681356e-05, "loss": 0.7264, "step": 195 }, { "epoch": 0.746268656716418, "grad_norm": 0.405044297741591, "learning_rate": 1.2499792556533716e-05, "loss": 0.7238, "step": 200 }, { "epoch": 0.7649253731343284, "grad_norm": 0.42435372517376513, "learning_rate": 1.1492011668707753e-05, "loss": 0.7191, "step": 205 }, { "epoch": 0.7835820895522388, "grad_norm": 0.3991508216227003, "learning_rate": 1.0545433492320603e-05, "loss": 0.7192, "step": 210 }, { "epoch": 0.8022388059701493, "grad_norm": 0.4035129969039292, "learning_rate": 9.663677042440537e-06, "loss": 0.7175, "step": 215 }, { "epoch": 0.8208955223880597, "grad_norm": 0.3656087904784262, "learning_rate": 8.850113503781367e-06, "loss": 0.7129, "step": 220 }, { "epoch": 0.8395522388059702, "grad_norm": 0.3363321384740739, "learning_rate": 8.107853341784671e-06, "loss": 0.7197, "step": 225 }, { "epoch": 0.8582089552238806, "grad_norm": 1.564164016405774, "learning_rate": 7.439734410499752e-06, "loss": 0.716, "step": 230 }, { "epoch": 0.8768656716417911, "grad_norm": 0.38113827793929866, "learning_rate": 6.848311102728011e-06, "loss": 0.7114, "step": 235 }, { "epoch": 0.8955223880597015, "grad_norm": 0.35881991263382823, "learning_rate": 6.335844583913515e-06, "loss": 0.7117, "step": 240 }, { "epoch": 0.914179104477612, "grad_norm": 0.3389853140732799, "learning_rate": 5.904294147118193e-06, "loss": 0.7089, "step": 245 }, { "epoch": 0.9328358208955224, "grad_norm": 0.3017636686149359, "learning_rate": 5.555309722133842e-06, "loss": 0.7098, "step": 250 }, { "epoch": 0.9514925373134329, "grad_norm": 0.32612384040343395, "learning_rate": 5.290225567370509e-06, "loss": 0.7085, "step": 255 }, { "epoch": 0.9701492537313433, "grad_norm": 0.3215733932661547, "learning_rate": 5.110055168638854e-06, "loss": 0.7185, "step": 260 }, { "epoch": 0.9888059701492538, "grad_norm": 0.32235402928737444, "learning_rate": 5.0154873643297575e-06, "loss": 0.7153, "step": 265 }, { "epoch": 1.0, "step": 268, "total_flos": 590489283723264.0, "train_loss": 1.0968534403772496, "train_runtime": 7318.0016, "train_samples_per_second": 4.68, "train_steps_per_second": 0.037 } ], "logging_steps": 5, "max_steps": 268, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 100, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 590489283723264.0, "train_batch_size": 16, "trial_name": null, "trial_params": null }