{ "best_metric": 1.1070915460586548, "best_model_checkpoint": "miner_id_24/checkpoint-50", "epoch": 2.061224489795918, "eval_steps": 50, "global_step": 50, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.04081632653061224, "grad_norm": 3.8167569637298584, "learning_rate": 1e-05, "loss": 5.0013, "step": 1 }, { "epoch": 0.04081632653061224, "eval_loss": 1.616941213607788, "eval_runtime": 1.9737, "eval_samples_per_second": 21.279, "eval_steps_per_second": 5.573, "step": 1 }, { "epoch": 0.08163265306122448, "grad_norm": 4.669461250305176, "learning_rate": 2e-05, "loss": 5.8704, "step": 2 }, { "epoch": 0.12244897959183673, "grad_norm": 5.1267523765563965, "learning_rate": 3e-05, "loss": 6.4629, "step": 3 }, { "epoch": 0.16326530612244897, "grad_norm": 5.208806991577148, "learning_rate": 4e-05, "loss": 6.5763, "step": 4 }, { "epoch": 0.20408163265306123, "grad_norm": 6.084200382232666, "learning_rate": 5e-05, "loss": 6.3859, "step": 5 }, { "epoch": 0.24489795918367346, "grad_norm": 6.192852973937988, "learning_rate": 6e-05, "loss": 6.8691, "step": 6 }, { "epoch": 0.2857142857142857, "grad_norm": 2.8081114292144775, "learning_rate": 7e-05, "loss": 4.8936, "step": 7 }, { "epoch": 0.32653061224489793, "grad_norm": 3.0847012996673584, "learning_rate": 8e-05, "loss": 4.5358, "step": 8 }, { "epoch": 0.3673469387755102, "grad_norm": 3.113804817199707, "learning_rate": 9e-05, "loss": 5.5366, "step": 9 }, { "epoch": 0.40816326530612246, "grad_norm": 3.7368416786193848, "learning_rate": 0.0001, "loss": 5.2371, "step": 10 }, { "epoch": 0.4489795918367347, "grad_norm": 4.704006671905518, "learning_rate": 9.993977281025862e-05, "loss": 5.5426, "step": 11 }, { "epoch": 0.4897959183673469, "grad_norm": 5.045379638671875, "learning_rate": 9.975923633360985e-05, "loss": 5.5486, "step": 12 }, { "epoch": 0.5306122448979592, "grad_norm": 2.9507429599761963, "learning_rate": 9.945882549823906e-05, "loss": 4.5517, "step": 13 }, { "epoch": 0.5714285714285714, "grad_norm": 3.5386080741882324, "learning_rate": 9.903926402016153e-05, "loss": 4.6742, "step": 14 }, { "epoch": 0.6122448979591837, "grad_norm": 3.318535804748535, "learning_rate": 9.850156265972721e-05, "loss": 4.5647, "step": 15 }, { "epoch": 0.6530612244897959, "grad_norm": 3.5165586471557617, "learning_rate": 9.784701678661045e-05, "loss": 4.1373, "step": 16 }, { "epoch": 0.6938775510204082, "grad_norm": 3.11450457572937, "learning_rate": 9.707720325915104e-05, "loss": 4.8453, "step": 17 }, { "epoch": 0.7346938775510204, "grad_norm": 3.800767421722412, "learning_rate": 9.619397662556435e-05, "loss": 5.2269, "step": 18 }, { "epoch": 0.7755102040816326, "grad_norm": 2.5894737243652344, "learning_rate": 9.519946465617218e-05, "loss": 4.6343, "step": 19 }, { "epoch": 0.8163265306122449, "grad_norm": 3.455152988433838, "learning_rate": 9.409606321741775e-05, "loss": 4.6455, "step": 20 }, { "epoch": 0.8571428571428571, "grad_norm": 3.879774808883667, "learning_rate": 9.288643050001361e-05, "loss": 4.2191, "step": 21 }, { "epoch": 0.8979591836734694, "grad_norm": 3.647123336791992, "learning_rate": 9.157348061512727e-05, "loss": 5.1176, "step": 22 }, { "epoch": 0.9387755102040817, "grad_norm": 3.1560988426208496, "learning_rate": 9.016037657403224e-05, "loss": 4.787, "step": 23 }, { "epoch": 0.9795918367346939, "grad_norm": 4.0848212242126465, "learning_rate": 8.865052266813685e-05, "loss": 5.1071, "step": 24 }, { "epoch": 1.030612244897959, "grad_norm": 2.5242435932159424, "learning_rate": 8.704755626774796e-05, "loss": 4.1258, "step": 25 }, { "epoch": 1.0714285714285714, "grad_norm": 2.305724620819092, "learning_rate": 8.535533905932738e-05, "loss": 3.9269, "step": 26 }, { "epoch": 1.1122448979591837, "grad_norm": 2.2410366535186768, "learning_rate": 8.357794774235092e-05, "loss": 4.1793, "step": 27 }, { "epoch": 1.153061224489796, "grad_norm": 3.6255550384521484, "learning_rate": 8.171966420818228e-05, "loss": 3.9643, "step": 28 }, { "epoch": 1.193877551020408, "grad_norm": 2.4370410442352295, "learning_rate": 7.978496522462167e-05, "loss": 3.9714, "step": 29 }, { "epoch": 1.2346938775510203, "grad_norm": 3.172468423843384, "learning_rate": 7.777851165098012e-05, "loss": 4.4107, "step": 30 }, { "epoch": 1.2755102040816326, "grad_norm": 2.33469820022583, "learning_rate": 7.570513720966108e-05, "loss": 4.1162, "step": 31 }, { "epoch": 1.316326530612245, "grad_norm": 2.3233158588409424, "learning_rate": 7.35698368412999e-05, "loss": 4.0241, "step": 32 }, { "epoch": 1.3571428571428572, "grad_norm": 2.4508869647979736, "learning_rate": 7.137775467151411e-05, "loss": 3.6321, "step": 33 }, { "epoch": 1.3979591836734695, "grad_norm": 2.6886332035064697, "learning_rate": 6.91341716182545e-05, "loss": 3.9451, "step": 34 }, { "epoch": 1.4387755102040816, "grad_norm": 2.929544687271118, "learning_rate": 6.6844492669611e-05, "loss": 4.2282, "step": 35 }, { "epoch": 1.4795918367346939, "grad_norm": 3.1234920024871826, "learning_rate": 6.451423386272312e-05, "loss": 4.4053, "step": 36 }, { "epoch": 1.5204081632653061, "grad_norm": 2.7144057750701904, "learning_rate": 6.21490089951632e-05, "loss": 3.887, "step": 37 }, { "epoch": 1.5612244897959182, "grad_norm": 2.749767541885376, "learning_rate": 5.9754516100806423e-05, "loss": 3.7678, "step": 38 }, { "epoch": 1.6020408163265305, "grad_norm": 2.5833752155303955, "learning_rate": 5.733652372276809e-05, "loss": 4.1636, "step": 39 }, { "epoch": 1.6428571428571428, "grad_norm": 2.9307188987731934, "learning_rate": 5.490085701647805e-05, "loss": 3.7071, "step": 40 }, { "epoch": 1.683673469387755, "grad_norm": 3.1807944774627686, "learning_rate": 5.245338371637091e-05, "loss": 3.6077, "step": 41 }, { "epoch": 1.7244897959183674, "grad_norm": 3.5064828395843506, "learning_rate": 5e-05, "loss": 3.6712, "step": 42 }, { "epoch": 1.7653061224489797, "grad_norm": 2.263582468032837, "learning_rate": 4.7546616283629105e-05, "loss": 3.4453, "step": 43 }, { "epoch": 1.806122448979592, "grad_norm": 2.1443774700164795, "learning_rate": 4.509914298352197e-05, "loss": 3.3618, "step": 44 }, { "epoch": 1.8469387755102042, "grad_norm": 2.293882131576538, "learning_rate": 4.2663476277231915e-05, "loss": 3.4901, "step": 45 }, { "epoch": 1.8877551020408163, "grad_norm": 2.586204767227173, "learning_rate": 4.0245483899193595e-05, "loss": 3.9754, "step": 46 }, { "epoch": 1.9285714285714286, "grad_norm": 2.9233639240264893, "learning_rate": 3.785099100483681e-05, "loss": 4.1341, "step": 47 }, { "epoch": 1.9693877551020407, "grad_norm": 3.0968315601348877, "learning_rate": 3.5485766137276894e-05, "loss": 4.0113, "step": 48 }, { "epoch": 2.020408163265306, "grad_norm": 2.734189748764038, "learning_rate": 3.3155507330389e-05, "loss": 3.6672, "step": 49 }, { "epoch": 2.061224489795918, "grad_norm": 2.0895090103149414, "learning_rate": 3.086582838174551e-05, "loss": 3.2272, "step": 50 }, { "epoch": 2.061224489795918, "eval_loss": 1.1070915460586548, "eval_runtime": 2.0195, "eval_samples_per_second": 20.797, "eval_steps_per_second": 5.447, "step": 50 } ], "logging_steps": 1, "max_steps": 74, "num_input_tokens_seen": 0, "num_train_epochs": 4, "save_steps": 50, "stateful_callbacks": { "EarlyStoppingCallback": { "args": { "early_stopping_patience": 5, "early_stopping_threshold": 0.0 }, "attributes": { "early_stopping_patience_counter": 0 } }, "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 3.7584000516096e+16, "train_batch_size": 8, "trial_name": null, "trial_params": null }