{ "best_global_step": null, "best_metric": null, "best_model_checkpoint": null, "epoch": 0.24, "eval_steps": 500, "global_step": 30, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.008, "grad_norm": 15.428410530090332, "learning_rate": 0.0, "loss": 5.2835, "step": 1 }, { "epoch": 0.016, "grad_norm": 15.426681518554688, "learning_rate": 4e-05, "loss": 5.2835, "step": 2 }, { "epoch": 0.024, "grad_norm": 16.885461807250977, "learning_rate": 8e-05, "loss": 4.9305, "step": 3 }, { "epoch": 0.032, "grad_norm": 8.955594062805176, "learning_rate": 0.00012, "loss": 3.6748, "step": 4 }, { "epoch": 0.04, "grad_norm": 9.647289276123047, "learning_rate": 0.00016, "loss": 2.6065, "step": 5 }, { "epoch": 0.048, "grad_norm": 6.1404500007629395, "learning_rate": 0.0002, "loss": 1.7512, "step": 6 }, { "epoch": 0.056, "grad_norm": 9.074440956115723, "learning_rate": 0.000192, "loss": 0.8787, "step": 7 }, { "epoch": 0.064, "grad_norm": 4.00243616104126, "learning_rate": 0.00018400000000000003, "loss": 0.4512, "step": 8 }, { "epoch": 0.072, "grad_norm": 1.3253382444381714, "learning_rate": 0.00017600000000000002, "loss": 0.0879, "step": 9 }, { "epoch": 0.08, "grad_norm": 1.162848949432373, "learning_rate": 0.000168, "loss": 0.0525, "step": 10 }, { "epoch": 0.088, "grad_norm": 1.1460442543029785, "learning_rate": 0.00016, "loss": 0.0412, "step": 11 }, { "epoch": 0.096, "grad_norm": 0.5914936661720276, "learning_rate": 0.000152, "loss": 0.0232, "step": 12 }, { "epoch": 0.104, "grad_norm": 0.40345266461372375, "learning_rate": 0.000144, "loss": 0.0131, "step": 13 }, { "epoch": 0.112, "grad_norm": 0.5011171698570251, "learning_rate": 0.00013600000000000003, "loss": 0.008, "step": 14 }, { "epoch": 0.12, "grad_norm": 0.4873741865158081, "learning_rate": 0.00012800000000000002, "loss": 0.0059, "step": 15 }, { "epoch": 0.128, "grad_norm": 0.27999016642570496, "learning_rate": 0.00012, "loss": 0.0032, "step": 16 }, { "epoch": 0.136, "grad_norm": 0.16314102709293365, "learning_rate": 0.00011200000000000001, "loss": 0.0018, "step": 17 }, { "epoch": 0.144, "grad_norm": 0.10664301365613937, "learning_rate": 0.00010400000000000001, "loss": 0.0012, "step": 18 }, { "epoch": 0.152, "grad_norm": 0.04609872028231621, "learning_rate": 9.6e-05, "loss": 0.0007, "step": 19 }, { "epoch": 0.16, "grad_norm": 0.03019017167389393, "learning_rate": 8.800000000000001e-05, "loss": 0.0005, "step": 20 }, { "epoch": 0.168, "grad_norm": 0.01985774375498295, "learning_rate": 8e-05, "loss": 0.0004, "step": 21 }, { "epoch": 0.176, "grad_norm": 0.015222536399960518, "learning_rate": 7.2e-05, "loss": 0.0003, "step": 22 }, { "epoch": 0.184, "grad_norm": 0.012847527861595154, "learning_rate": 6.400000000000001e-05, "loss": 0.0003, "step": 23 }, { "epoch": 0.192, "grad_norm": 0.011019011028110981, "learning_rate": 5.6000000000000006e-05, "loss": 0.0002, "step": 24 }, { "epoch": 0.2, "grad_norm": 0.009830518625676632, "learning_rate": 4.8e-05, "loss": 0.0002, "step": 25 }, { "epoch": 0.208, "grad_norm": 0.008783610537648201, "learning_rate": 4e-05, "loss": 0.0002, "step": 26 }, { "epoch": 0.216, "grad_norm": 0.009143549017608166, "learning_rate": 3.2000000000000005e-05, "loss": 0.0002, "step": 27 }, { "epoch": 0.224, "grad_norm": 0.009110250510275364, "learning_rate": 2.4e-05, "loss": 0.0002, "step": 28 }, { "epoch": 0.232, "grad_norm": 0.00845172069966793, "learning_rate": 1.6000000000000003e-05, "loss": 0.0002, "step": 29 }, { "epoch": 0.24, "grad_norm": 0.008985675871372223, "learning_rate": 8.000000000000001e-06, "loss": 0.0002, "step": 30 } ], "logging_steps": 1, "max_steps": 30, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 525137523027840.0, "train_batch_size": 1, "trial_name": null, "trial_params": null }