|
{ |
|
"best_metric": 1.809238076210022, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.18518518518518517, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.003703703703703704, |
|
"grad_norm": 4.116030693054199, |
|
"learning_rate": 5e-05, |
|
"loss": 4.0241, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.003703703703703704, |
|
"eval_loss": 4.521655082702637, |
|
"eval_runtime": 1.4574, |
|
"eval_samples_per_second": 34.309, |
|
"eval_steps_per_second": 8.92, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.007407407407407408, |
|
"grad_norm": 4.121915340423584, |
|
"learning_rate": 0.0001, |
|
"loss": 4.2284, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.011111111111111112, |
|
"grad_norm": 4.936906337738037, |
|
"learning_rate": 9.990365154573717e-05, |
|
"loss": 4.2735, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.014814814814814815, |
|
"grad_norm": 2.855342388153076, |
|
"learning_rate": 9.961501876182148e-05, |
|
"loss": 3.5536, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.018518518518518517, |
|
"grad_norm": 2.5260751247406006, |
|
"learning_rate": 9.913533761814537e-05, |
|
"loss": 3.3432, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.022222222222222223, |
|
"grad_norm": 2.948049783706665, |
|
"learning_rate": 9.846666218300807e-05, |
|
"loss": 2.8829, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.025925925925925925, |
|
"grad_norm": 3.049560546875, |
|
"learning_rate": 9.761185582727977e-05, |
|
"loss": 2.7401, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.02962962962962963, |
|
"grad_norm": 2.5307116508483887, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 2.5649, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.03333333333333333, |
|
"grad_norm": 3.314162492752075, |
|
"learning_rate": 9.535927336897098e-05, |
|
"loss": 2.5844, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.037037037037037035, |
|
"grad_norm": 2.8759920597076416, |
|
"learning_rate": 9.397114317029975e-05, |
|
"loss": 2.3834, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.040740740740740744, |
|
"grad_norm": 1.3312515020370483, |
|
"learning_rate": 9.241613255361455e-05, |
|
"loss": 2.2634, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.044444444444444446, |
|
"grad_norm": 2.3702969551086426, |
|
"learning_rate": 9.070090031310558e-05, |
|
"loss": 2.0426, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.04814814814814815, |
|
"grad_norm": 2.423916816711426, |
|
"learning_rate": 8.883279133655399e-05, |
|
"loss": 2.2698, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.05185185185185185, |
|
"grad_norm": 1.5132427215576172, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 2.3085, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.05555555555555555, |
|
"grad_norm": 1.1343423128128052, |
|
"learning_rate": 8.467056167950311e-05, |
|
"loss": 2.2665, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.05925925925925926, |
|
"grad_norm": 1.2370033264160156, |
|
"learning_rate": 8.239426430539243e-05, |
|
"loss": 2.0012, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.06296296296296296, |
|
"grad_norm": 1.340045690536499, |
|
"learning_rate": 8.000066048588211e-05, |
|
"loss": 2.1011, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.06666666666666667, |
|
"grad_norm": 1.0047345161437988, |
|
"learning_rate": 7.75e-05, |
|
"loss": 2.2134, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.07037037037037037, |
|
"grad_norm": 0.9375550150871277, |
|
"learning_rate": 7.490299105985507e-05, |
|
"loss": 2.0775, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.07407407407407407, |
|
"grad_norm": 1.221209168434143, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 2.0637, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.07777777777777778, |
|
"grad_norm": 0.9997118711471558, |
|
"learning_rate": 6.946477593864228e-05, |
|
"loss": 2.0458, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.08148148148148149, |
|
"grad_norm": 0.9523096084594727, |
|
"learning_rate": 6.664685702961344e-05, |
|
"loss": 2.0679, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.08518518518518518, |
|
"grad_norm": 0.9293127655982971, |
|
"learning_rate": 6.377906449072578e-05, |
|
"loss": 1.9178, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.08888888888888889, |
|
"grad_norm": 0.7721121907234192, |
|
"learning_rate": 6.087367864990233e-05, |
|
"loss": 2.0166, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.09259259259259259, |
|
"grad_norm": 1.129605770111084, |
|
"learning_rate": 5.794314081535644e-05, |
|
"loss": 1.8354, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.09259259259259259, |
|
"eval_loss": 1.8420437574386597, |
|
"eval_runtime": 0.7521, |
|
"eval_samples_per_second": 66.48, |
|
"eval_steps_per_second": 17.285, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0962962962962963, |
|
"grad_norm": 1.1217167377471924, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 2.1233, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 0.8602536916732788, |
|
"learning_rate": 5.205685918464356e-05, |
|
"loss": 2.1363, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.1037037037037037, |
|
"grad_norm": 0.8962283134460449, |
|
"learning_rate": 4.912632135009769e-05, |
|
"loss": 2.174, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.10740740740740741, |
|
"grad_norm": 0.8276489973068237, |
|
"learning_rate": 4.6220935509274235e-05, |
|
"loss": 2.0971, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.1111111111111111, |
|
"grad_norm": 0.8067771196365356, |
|
"learning_rate": 4.3353142970386564e-05, |
|
"loss": 2.0047, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.11481481481481481, |
|
"grad_norm": 0.7078531980514526, |
|
"learning_rate": 4.053522406135775e-05, |
|
"loss": 2.1274, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.11851851851851852, |
|
"grad_norm": 1.049298644065857, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 1.9293, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.12222222222222222, |
|
"grad_norm": 0.8572384715080261, |
|
"learning_rate": 3.509700894014496e-05, |
|
"loss": 1.8519, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.1259259259259259, |
|
"grad_norm": 0.7951518297195435, |
|
"learning_rate": 3.250000000000001e-05, |
|
"loss": 1.9323, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.12962962962962962, |
|
"grad_norm": 0.7671780586242676, |
|
"learning_rate": 2.9999339514117912e-05, |
|
"loss": 1.8821, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.13333333333333333, |
|
"grad_norm": 0.7220433354377747, |
|
"learning_rate": 2.760573569460757e-05, |
|
"loss": 1.8686, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.13703703703703704, |
|
"grad_norm": 0.8542306423187256, |
|
"learning_rate": 2.53294383204969e-05, |
|
"loss": 1.8169, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.14074074074074075, |
|
"grad_norm": 0.8364683985710144, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 2.012, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.14444444444444443, |
|
"grad_norm": 0.7114874124526978, |
|
"learning_rate": 2.1167208663446025e-05, |
|
"loss": 2.0428, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.14814814814814814, |
|
"grad_norm": 0.740842878818512, |
|
"learning_rate": 1.9299099686894423e-05, |
|
"loss": 2.0602, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.15185185185185185, |
|
"grad_norm": 0.8619032502174377, |
|
"learning_rate": 1.758386744638546e-05, |
|
"loss": 2.2465, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.15555555555555556, |
|
"grad_norm": 0.7360240817070007, |
|
"learning_rate": 1.602885682970026e-05, |
|
"loss": 2.0157, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.15925925925925927, |
|
"grad_norm": 0.6555927991867065, |
|
"learning_rate": 1.464072663102903e-05, |
|
"loss": 1.8632, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.16296296296296298, |
|
"grad_norm": 0.7459237575531006, |
|
"learning_rate": 1.3425421036992098e-05, |
|
"loss": 2.0581, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.16666666666666666, |
|
"grad_norm": 0.845435619354248, |
|
"learning_rate": 1.2388144172720251e-05, |
|
"loss": 2.1435, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.17037037037037037, |
|
"grad_norm": 0.7518559098243713, |
|
"learning_rate": 1.1533337816991932e-05, |
|
"loss": 2.0249, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.17407407407407408, |
|
"grad_norm": 0.6932052373886108, |
|
"learning_rate": 1.0864662381854632e-05, |
|
"loss": 1.9508, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.17777777777777778, |
|
"grad_norm": 0.6623319387435913, |
|
"learning_rate": 1.0384981238178534e-05, |
|
"loss": 1.8047, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.1814814814814815, |
|
"grad_norm": 0.7716500759124756, |
|
"learning_rate": 1.0096348454262845e-05, |
|
"loss": 1.9208, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.18518518518518517, |
|
"grad_norm": 0.8495790362358093, |
|
"learning_rate": 1e-05, |
|
"loss": 1.764, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.18518518518518517, |
|
"eval_loss": 1.809238076210022, |
|
"eval_runtime": 0.7465, |
|
"eval_samples_per_second": 66.983, |
|
"eval_steps_per_second": 17.416, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.059536353886208e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|