vdos's picture
Training in progress, step 50, checkpoint
245c292 verified
{
"best_metric": 10.364799499511719,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 0.11341083073433512,
"eval_steps": 25,
"global_step": 50,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0022682166146867026,
"grad_norm": 0.1346822828054428,
"learning_rate": 5e-05,
"loss": 10.3842,
"step": 1
},
{
"epoch": 0.0022682166146867026,
"eval_loss": 10.386616706848145,
"eval_runtime": 3.0546,
"eval_samples_per_second": 972.312,
"eval_steps_per_second": 121.784,
"step": 1
},
{
"epoch": 0.004536433229373405,
"grad_norm": 0.13732023537158966,
"learning_rate": 0.0001,
"loss": 10.3876,
"step": 2
},
{
"epoch": 0.006804649844060108,
"grad_norm": 0.14120781421661377,
"learning_rate": 9.989294616193017e-05,
"loss": 10.3875,
"step": 3
},
{
"epoch": 0.00907286645874681,
"grad_norm": 0.1419186294078827,
"learning_rate": 9.957224306869053e-05,
"loss": 10.3854,
"step": 4
},
{
"epoch": 0.011341083073433513,
"grad_norm": 0.14743481576442719,
"learning_rate": 9.903926402016153e-05,
"loss": 10.3863,
"step": 5
},
{
"epoch": 0.013609299688120215,
"grad_norm": 0.1470358669757843,
"learning_rate": 9.829629131445342e-05,
"loss": 10.3842,
"step": 6
},
{
"epoch": 0.01587751630280692,
"grad_norm": 0.1487945318222046,
"learning_rate": 9.73465064747553e-05,
"loss": 10.3833,
"step": 7
},
{
"epoch": 0.01814573291749362,
"grad_norm": 0.13862915337085724,
"learning_rate": 9.619397662556435e-05,
"loss": 10.3862,
"step": 8
},
{
"epoch": 0.020413949532180325,
"grad_norm": 0.14534485340118408,
"learning_rate": 9.484363707663442e-05,
"loss": 10.385,
"step": 9
},
{
"epoch": 0.022682166146867026,
"grad_norm": 0.14452332258224487,
"learning_rate": 9.330127018922194e-05,
"loss": 10.384,
"step": 10
},
{
"epoch": 0.02495038276155373,
"grad_norm": 0.13165196776390076,
"learning_rate": 9.157348061512727e-05,
"loss": 10.3834,
"step": 11
},
{
"epoch": 0.02721859937624043,
"grad_norm": 0.14375360310077667,
"learning_rate": 8.966766701456177e-05,
"loss": 10.3852,
"step": 12
},
{
"epoch": 0.029486815990927135,
"grad_norm": 0.16815604269504547,
"learning_rate": 8.759199037394887e-05,
"loss": 10.3775,
"step": 13
},
{
"epoch": 0.03175503260561384,
"grad_norm": 0.16887976229190826,
"learning_rate": 8.535533905932738e-05,
"loss": 10.3798,
"step": 14
},
{
"epoch": 0.03402324922030054,
"grad_norm": 0.17802490293979645,
"learning_rate": 8.296729075500344e-05,
"loss": 10.3792,
"step": 15
},
{
"epoch": 0.03629146583498724,
"grad_norm": 0.1856260597705841,
"learning_rate": 8.043807145043604e-05,
"loss": 10.3768,
"step": 16
},
{
"epoch": 0.03855968244967394,
"grad_norm": 0.1794416904449463,
"learning_rate": 7.777851165098012e-05,
"loss": 10.3793,
"step": 17
},
{
"epoch": 0.04082789906436065,
"grad_norm": 0.17807826399803162,
"learning_rate": 7.500000000000001e-05,
"loss": 10.3762,
"step": 18
},
{
"epoch": 0.04309611567904735,
"grad_norm": 0.182838574051857,
"learning_rate": 7.211443451095007e-05,
"loss": 10.3777,
"step": 19
},
{
"epoch": 0.04536433229373405,
"grad_norm": 0.18508730828762054,
"learning_rate": 6.91341716182545e-05,
"loss": 10.3753,
"step": 20
},
{
"epoch": 0.04763254890842075,
"grad_norm": 0.19818346202373505,
"learning_rate": 6.607197326515808e-05,
"loss": 10.3759,
"step": 21
},
{
"epoch": 0.04990076552310746,
"grad_norm": 0.17286446690559387,
"learning_rate": 6.294095225512603e-05,
"loss": 10.375,
"step": 22
},
{
"epoch": 0.05216898213779416,
"grad_norm": 0.17338807880878448,
"learning_rate": 5.9754516100806423e-05,
"loss": 10.3757,
"step": 23
},
{
"epoch": 0.05443719875248086,
"grad_norm": 0.18380731344223022,
"learning_rate": 5.6526309611002594e-05,
"loss": 10.3751,
"step": 24
},
{
"epoch": 0.05670541536716756,
"grad_norm": 0.19529420137405396,
"learning_rate": 5.327015646150716e-05,
"loss": 10.3706,
"step": 25
},
{
"epoch": 0.05670541536716756,
"eval_loss": 10.371033668518066,
"eval_runtime": 3.0382,
"eval_samples_per_second": 977.536,
"eval_steps_per_second": 122.439,
"step": 25
},
{
"epoch": 0.05897363198185427,
"grad_norm": 0.20476947724819183,
"learning_rate": 5e-05,
"loss": 10.3699,
"step": 26
},
{
"epoch": 0.06124184859654097,
"grad_norm": 0.23112817108631134,
"learning_rate": 4.6729843538492847e-05,
"loss": 10.3707,
"step": 27
},
{
"epoch": 0.06351006521122768,
"grad_norm": 0.23180252313613892,
"learning_rate": 4.347369038899744e-05,
"loss": 10.3711,
"step": 28
},
{
"epoch": 0.06577828182591437,
"grad_norm": 0.22128894925117493,
"learning_rate": 4.0245483899193595e-05,
"loss": 10.3708,
"step": 29
},
{
"epoch": 0.06804649844060108,
"grad_norm": 0.2330835610628128,
"learning_rate": 3.705904774487396e-05,
"loss": 10.3695,
"step": 30
},
{
"epoch": 0.07031471505528777,
"grad_norm": 0.2294759452342987,
"learning_rate": 3.392802673484193e-05,
"loss": 10.3689,
"step": 31
},
{
"epoch": 0.07258293166997448,
"grad_norm": 0.23074816167354584,
"learning_rate": 3.086582838174551e-05,
"loss": 10.3672,
"step": 32
},
{
"epoch": 0.07485114828466119,
"grad_norm": 0.2303849160671234,
"learning_rate": 2.7885565489049946e-05,
"loss": 10.3683,
"step": 33
},
{
"epoch": 0.07711936489934788,
"grad_norm": 0.22095413506031036,
"learning_rate": 2.500000000000001e-05,
"loss": 10.3697,
"step": 34
},
{
"epoch": 0.07938758151403459,
"grad_norm": 0.2163832187652588,
"learning_rate": 2.2221488349019903e-05,
"loss": 10.3694,
"step": 35
},
{
"epoch": 0.0816557981287213,
"grad_norm": 0.20412799715995789,
"learning_rate": 1.9561928549563968e-05,
"loss": 10.3699,
"step": 36
},
{
"epoch": 0.08392401474340799,
"grad_norm": 0.21430176496505737,
"learning_rate": 1.703270924499656e-05,
"loss": 10.3699,
"step": 37
},
{
"epoch": 0.0861922313580947,
"grad_norm": 0.240758016705513,
"learning_rate": 1.4644660940672627e-05,
"loss": 10.3665,
"step": 38
},
{
"epoch": 0.0884604479727814,
"grad_norm": 0.24734526872634888,
"learning_rate": 1.2408009626051137e-05,
"loss": 10.3648,
"step": 39
},
{
"epoch": 0.0907286645874681,
"grad_norm": 0.25556033849716187,
"learning_rate": 1.0332332985438248e-05,
"loss": 10.3649,
"step": 40
},
{
"epoch": 0.09299688120215481,
"grad_norm": 0.25022992491722107,
"learning_rate": 8.426519384872733e-06,
"loss": 10.3659,
"step": 41
},
{
"epoch": 0.0952650978168415,
"grad_norm": 0.2543710768222809,
"learning_rate": 6.698729810778065e-06,
"loss": 10.3659,
"step": 42
},
{
"epoch": 0.09753331443152821,
"grad_norm": 0.2404075562953949,
"learning_rate": 5.156362923365588e-06,
"loss": 10.3647,
"step": 43
},
{
"epoch": 0.09980153104621492,
"grad_norm": 0.24763478338718414,
"learning_rate": 3.8060233744356633e-06,
"loss": 10.365,
"step": 44
},
{
"epoch": 0.10206974766090161,
"grad_norm": 0.2456260770559311,
"learning_rate": 2.653493525244721e-06,
"loss": 10.3668,
"step": 45
},
{
"epoch": 0.10433796427558832,
"grad_norm": 0.23241014778614044,
"learning_rate": 1.70370868554659e-06,
"loss": 10.3673,
"step": 46
},
{
"epoch": 0.10660618089027502,
"grad_norm": 0.2225053906440735,
"learning_rate": 9.607359798384785e-07,
"loss": 10.3674,
"step": 47
},
{
"epoch": 0.10887439750496172,
"grad_norm": 0.21233010292053223,
"learning_rate": 4.277569313094809e-07,
"loss": 10.3674,
"step": 48
},
{
"epoch": 0.11114261411964843,
"grad_norm": 0.21536342799663544,
"learning_rate": 1.0705383806982606e-07,
"loss": 10.369,
"step": 49
},
{
"epoch": 0.11341083073433512,
"grad_norm": 0.2400255799293518,
"learning_rate": 0.0,
"loss": 10.3672,
"step": 50
},
{
"epoch": 0.11341083073433512,
"eval_loss": 10.364799499511719,
"eval_runtime": 3.032,
"eval_samples_per_second": 979.564,
"eval_steps_per_second": 122.693,
"step": 50
}
],
"logging_steps": 1,
"max_steps": 50,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 42781424615424.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}