{ "best_metric": null, "best_model_checkpoint": null, "epoch": 3.0, "eval_steps": 6, "global_step": 57, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.05263157894736842, "grad_norm": 211.82034301757812, "learning_rate": 1e-05, "loss": 0.5234, "step": 1 }, { "epoch": 0.10526315789473684, "grad_norm": 330236.46875, "learning_rate": 9.992134075089085e-06, "loss": 0.4781, "step": 2 }, { "epoch": 0.15789473684210525, "grad_norm": 3773.773681640625, "learning_rate": 9.968561049466214e-06, "loss": 0.5327, "step": 3 }, { "epoch": 0.21052631578947367, "grad_norm": 8766.265625, "learning_rate": 9.92935509259118e-06, "loss": 0.4726, "step": 4 }, { "epoch": 0.2631578947368421, "grad_norm": 633.0609130859375, "learning_rate": 9.874639560909118e-06, "loss": 0.4571, "step": 5 }, { "epoch": 0.3157894736842105, "grad_norm": 609995.625, "learning_rate": 9.804586609725499e-06, "loss": 0.5539, "step": 6 }, { "epoch": 0.3157894736842105, "eval_loss": 0.3873564898967743, "eval_runtime": 0.8662, "eval_samples_per_second": 2.309, "eval_steps_per_second": 1.154, "step": 6 }, { "epoch": 0.3684210526315789, "grad_norm": 578.82763671875, "learning_rate": 9.719416651541839e-06, "loss": 0.3949, "step": 7 }, { "epoch": 0.42105263157894735, "grad_norm": 227617.40625, "learning_rate": 9.619397662556434e-06, "loss": 0.5256, "step": 8 }, { "epoch": 0.47368421052631576, "grad_norm": 785.8093872070312, "learning_rate": 9.504844339512096e-06, "loss": 0.4265, "step": 9 }, { "epoch": 0.5263157894736842, "grad_norm": 354838.09375, "learning_rate": 9.376117109543769e-06, "loss": 0.6413, "step": 10 }, { "epoch": 0.5789473684210527, "grad_norm": 3835.27197265625, "learning_rate": 9.233620996141421e-06, "loss": 0.5344, "step": 11 }, { "epoch": 0.631578947368421, "grad_norm": 4633.39013671875, "learning_rate": 9.077804344796302e-06, "loss": 0.5069, "step": 12 }, { "epoch": 0.631578947368421, "eval_loss": 0.42546606063842773, "eval_runtime": 0.8681, "eval_samples_per_second": 2.304, "eval_steps_per_second": 1.152, "step": 12 }, { "epoch": 0.6842105263157895, "grad_norm": 2101.546142578125, "learning_rate": 8.90915741234015e-06, "loss": 0.5058, "step": 13 }, { "epoch": 0.7368421052631579, "grad_norm": 175295.453125, "learning_rate": 8.728210824415829e-06, "loss": 0.4587, "step": 14 }, { "epoch": 0.7894736842105263, "grad_norm": 3184.1103515625, "learning_rate": 8.535533905932739e-06, "loss": 0.614, "step": 15 }, { "epoch": 0.8421052631578947, "grad_norm": 496073.1875, "learning_rate": 8.331732889760021e-06, "loss": 0.644, "step": 16 }, { "epoch": 0.8947368421052632, "grad_norm": 11245.0107421875, "learning_rate": 8.117449009293668e-06, "loss": 0.5248, "step": 17 }, { "epoch": 0.9473684210526315, "grad_norm": 600.7078857421875, "learning_rate": 7.89335648089903e-06, "loss": 0.5634, "step": 18 }, { "epoch": 0.9473684210526315, "eval_loss": 0.47000423073768616, "eval_runtime": 0.8698, "eval_samples_per_second": 2.299, "eval_steps_per_second": 1.15, "step": 18 }, { "epoch": 1.0, "grad_norm": 1067464.375, "learning_rate": 7.660160382576683e-06, "loss": 0.655, "step": 19 }, { "epoch": 1.0526315789473684, "grad_norm": 7849.232421875, "learning_rate": 7.4185944355261996e-06, "loss": 0.5225, "step": 20 }, { "epoch": 1.1052631578947367, "grad_norm": 5074.62060546875, "learning_rate": 7.169418695587791e-06, "loss": 0.5764, "step": 21 }, { "epoch": 1.1578947368421053, "grad_norm": 755949.375, "learning_rate": 6.913417161825449e-06, "loss": 0.5878, "step": 22 }, { "epoch": 1.2105263157894737, "grad_norm": 1128.499267578125, "learning_rate": 6.651395309775837e-06, "loss": 0.6382, "step": 23 }, { "epoch": 1.263157894736842, "grad_norm": 955.5560913085938, "learning_rate": 6.384177557124247e-06, "loss": 0.7207, "step": 24 }, { "epoch": 1.263157894736842, "eval_loss": 0.5061320066452026, "eval_runtime": 0.8698, "eval_samples_per_second": 2.299, "eval_steps_per_second": 1.15, "step": 24 }, { "epoch": 1.3157894736842106, "grad_norm": 10727177.0, "learning_rate": 6.112604669781572e-06, "loss": 0.5727, "step": 25 }, { "epoch": 1.368421052631579, "grad_norm": 835351.6875, "learning_rate": 5.837531116523683e-06, "loss": 0.6236, "step": 26 }, { "epoch": 1.4210526315789473, "grad_norm": 24849.080078125, "learning_rate": 5.559822380516539e-06, "loss": 0.5477, "step": 27 }, { "epoch": 1.4736842105263157, "grad_norm": 1144.915283203125, "learning_rate": 5.2803522361859596e-06, "loss": 0.7404, "step": 28 }, { "epoch": 1.526315789473684, "grad_norm": 43178.171875, "learning_rate": 5e-06, "loss": 0.5133, "step": 29 }, { "epoch": 1.5789473684210527, "grad_norm": 105640.3828125, "learning_rate": 4.719647763814041e-06, "loss": 0.6525, "step": 30 }, { "epoch": 1.5789473684210527, "eval_loss": 0.5270132422447205, "eval_runtime": 0.8703, "eval_samples_per_second": 2.298, "eval_steps_per_second": 1.149, "step": 30 }, { "epoch": 1.631578947368421, "grad_norm": 2033.227783203125, "learning_rate": 4.4401776194834615e-06, "loss": 0.538, "step": 31 }, { "epoch": 1.6842105263157894, "grad_norm": 679948.1875, "learning_rate": 4.162468883476319e-06, "loss": 0.6714, "step": 32 }, { "epoch": 1.736842105263158, "grad_norm": 220607.5, "learning_rate": 3.887395330218429e-06, "loss": 0.7117, "step": 33 }, { "epoch": 1.7894736842105263, "grad_norm": 13599.486328125, "learning_rate": 3.6158224428757538e-06, "loss": 0.5645, "step": 34 }, { "epoch": 1.8421052631578947, "grad_norm": 651100.1875, "learning_rate": 3.3486046902241663e-06, "loss": 0.716, "step": 35 }, { "epoch": 1.8947368421052633, "grad_norm": 575592.1875, "learning_rate": 3.0865828381745515e-06, "loss": 0.7435, "step": 36 }, { "epoch": 1.8947368421052633, "eval_loss": 0.5324785113334656, "eval_runtime": 0.8699, "eval_samples_per_second": 2.299, "eval_steps_per_second": 1.149, "step": 36 }, { "epoch": 1.9473684210526314, "grad_norm": 163141.171875, "learning_rate": 2.83058130441221e-06, "loss": 0.7309, "step": 37 }, { "epoch": 2.0, "grad_norm": 3034.2685546875, "learning_rate": 2.5814055644738013e-06, "loss": 0.5396, "step": 38 }, { "epoch": 2.0526315789473686, "grad_norm": 124152.5, "learning_rate": 2.339839617423318e-06, "loss": 0.6829, "step": 39 }, { "epoch": 2.1052631578947367, "grad_norm": 3723410.75, "learning_rate": 2.1066435191009717e-06, "loss": 0.5327, "step": 40 }, { "epoch": 2.1578947368421053, "grad_norm": 539427.625, "learning_rate": 1.8825509907063328e-06, "loss": 0.532, "step": 41 }, { "epoch": 2.2105263157894735, "grad_norm": 20275.404296875, "learning_rate": 1.6682671102399806e-06, "loss": 0.7488, "step": 42 }, { "epoch": 2.2105263157894735, "eval_loss": 0.5383380055427551, "eval_runtime": 0.8703, "eval_samples_per_second": 2.298, "eval_steps_per_second": 1.149, "step": 42 }, { "epoch": 2.263157894736842, "grad_norm": 505885.78125, "learning_rate": 1.4644660940672628e-06, "loss": 0.5959, "step": 43 }, { "epoch": 2.3157894736842106, "grad_norm": 74390.25, "learning_rate": 1.2717891755841722e-06, "loss": 0.5717, "step": 44 }, { "epoch": 2.3684210526315788, "grad_norm": 1850279.375, "learning_rate": 1.0908425876598512e-06, "loss": 0.682, "step": 45 }, { "epoch": 2.4210526315789473, "grad_norm": 7982978.5, "learning_rate": 9.221956552036992e-07, "loss": 0.6275, "step": 46 }, { "epoch": 2.473684210526316, "grad_norm": 132265160.0, "learning_rate": 7.663790038585794e-07, "loss": 0.6225, "step": 47 }, { "epoch": 2.526315789473684, "grad_norm": 96502.90625, "learning_rate": 6.238828904562316e-07, "loss": 0.5786, "step": 48 }, { "epoch": 2.526315789473684, "eval_loss": 0.5181117057800293, "eval_runtime": 0.8701, "eval_samples_per_second": 2.299, "eval_steps_per_second": 1.149, "step": 48 }, { "epoch": 2.5789473684210527, "grad_norm": 697082.1875, "learning_rate": 4.951556604879049e-07, "loss": 0.6937, "step": 49 }, { "epoch": 2.6315789473684212, "grad_norm": 43987.10546875, "learning_rate": 3.8060233744356634e-07, "loss": 0.675, "step": 50 }, { "epoch": 2.6842105263157894, "grad_norm": 47614.27734375, "learning_rate": 2.8058334845816214e-07, "loss": 0.676, "step": 51 }, { "epoch": 2.736842105263158, "grad_norm": 79889032.0, "learning_rate": 1.9541339027450256e-07, "loss": 0.6332, "step": 52 }, { "epoch": 2.7894736842105265, "grad_norm": 1658783.125, "learning_rate": 1.253604390908819e-07, "loss": 0.6212, "step": 53 }, { "epoch": 2.8421052631578947, "grad_norm": 1998786.75, "learning_rate": 7.064490740882057e-08, "loss": 0.7325, "step": 54 }, { "epoch": 2.8421052631578947, "eval_loss": 0.5145716667175293, "eval_runtime": 0.871, "eval_samples_per_second": 2.296, "eval_steps_per_second": 1.148, "step": 54 }, { "epoch": 2.8947368421052633, "grad_norm": 58182.7734375, "learning_rate": 3.143895053378698e-08, "loss": 0.5281, "step": 55 }, { "epoch": 2.9473684210526314, "grad_norm": 854980864.0, "learning_rate": 7.865924910916977e-09, "loss": 0.7652, "step": 56 }, { "epoch": 3.0, "grad_norm": 4794.498046875, "learning_rate": 0.0, "loss": 0.66, "step": 57 }, { "epoch": 3.0, "step": 57, "total_flos": 8.713084645618483e+16, "train_loss": 0.597966728503244, "train_runtime": 375.3657, "train_samples_per_second": 1.183, "train_steps_per_second": 0.152 } ], "logging_steps": 1, "max_steps": 57, "num_input_tokens_seen": 0, "num_train_epochs": 3, "save_steps": 57, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 8.713084645618483e+16, "train_batch_size": 1, "trial_name": null, "trial_params": null }