|
{ |
|
"best_metric": NaN, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-25", |
|
"epoch": 1.2121212121212122, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.024242424242424242, |
|
"grad_norm": 27.580669403076172, |
|
"learning_rate": 0.0001, |
|
"loss": 7.2765, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.024242424242424242, |
|
"eval_loss": NaN, |
|
"eval_runtime": 2.228, |
|
"eval_samples_per_second": 8.079, |
|
"eval_steps_per_second": 4.04, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.048484848484848485, |
|
"grad_norm": 23.396574020385742, |
|
"learning_rate": 0.0002, |
|
"loss": 7.3094, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.07272727272727272, |
|
"grad_norm": 27.01788902282715, |
|
"learning_rate": 0.00019978589232386035, |
|
"loss": 9.0047, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.09696969696969697, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.00019914448613738106, |
|
"loss": 0.0, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.12121212121212122, |
|
"grad_norm": 35.79210662841797, |
|
"learning_rate": 0.00019807852804032305, |
|
"loss": 5.509, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.14545454545454545, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.00019659258262890683, |
|
"loss": 0.0, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.1696969696969697, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.0001946930129495106, |
|
"loss": 0.0, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.19393939393939394, |
|
"grad_norm": 25.79961395263672, |
|
"learning_rate": 0.0001923879532511287, |
|
"loss": 5.2964, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.21818181818181817, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.00018968727415326884, |
|
"loss": 0.0, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.24242424242424243, |
|
"grad_norm": 29.951295852661133, |
|
"learning_rate": 0.00018660254037844388, |
|
"loss": 2.8452, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.26666666666666666, |
|
"grad_norm": 32.658363342285156, |
|
"learning_rate": 0.00018314696123025454, |
|
"loss": 3.1162, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.2909090909090909, |
|
"grad_norm": 17.688011169433594, |
|
"learning_rate": 0.00017933533402912354, |
|
"loss": 2.3919, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.3151515151515151, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.00017518398074789775, |
|
"loss": 0.0, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.3393939393939394, |
|
"grad_norm": 7.906186580657959, |
|
"learning_rate": 0.00017071067811865476, |
|
"loss": 1.345, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.36363636363636365, |
|
"grad_norm": 7.411261081695557, |
|
"learning_rate": 0.00016593458151000688, |
|
"loss": 0.9667, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.3878787878787879, |
|
"grad_norm": 7.091380596160889, |
|
"learning_rate": 0.00016087614290087208, |
|
"loss": 0.6503, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.4121212121212121, |
|
"grad_norm": 9.68917465209961, |
|
"learning_rate": 0.00015555702330196023, |
|
"loss": 0.956, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.43636363636363634, |
|
"grad_norm": 8.627195358276367, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 0.7758, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.46060606060606063, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.00014422886902190014, |
|
"loss": 0.0, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.48484848484848486, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.000138268343236509, |
|
"loss": 0.0, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.509090909090909, |
|
"grad_norm": 10.164412498474121, |
|
"learning_rate": 0.00013214394653031616, |
|
"loss": 0.9879, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.5333333333333333, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.00012588190451025207, |
|
"loss": 0.0, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.5575757575757576, |
|
"grad_norm": 8.16072940826416, |
|
"learning_rate": 0.00011950903220161285, |
|
"loss": 0.4841, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.5818181818181818, |
|
"grad_norm": 13.351353645324707, |
|
"learning_rate": 0.00011305261922200519, |
|
"loss": 0.9616, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.6060606060606061, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.00010654031292301432, |
|
"loss": 0.0, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.6060606060606061, |
|
"eval_loss": NaN, |
|
"eval_runtime": 2.2355, |
|
"eval_samples_per_second": 8.052, |
|
"eval_steps_per_second": 4.026, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.6303030303030303, |
|
"grad_norm": 0.0, |
|
"learning_rate": 0.0001, |
|
"loss": 0.0, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.6545454545454545, |
|
"grad_norm": 13.499174118041992, |
|
"learning_rate": 9.345968707698569e-05, |
|
"loss": 1.0393, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.6787878787878788, |
|
"grad_norm": 6.368555068969727, |
|
"learning_rate": 8.694738077799488e-05, |
|
"loss": 0.3086, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.703030303030303, |
|
"grad_norm": 13.0841064453125, |
|
"learning_rate": 8.049096779838719e-05, |
|
"loss": 0.9051, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.7272727272727273, |
|
"grad_norm": 13.383321762084961, |
|
"learning_rate": 7.411809548974792e-05, |
|
"loss": 1.1452, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.7515151515151515, |
|
"grad_norm": 0.0, |
|
"learning_rate": 6.785605346968386e-05, |
|
"loss": 0.0, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.7757575757575758, |
|
"grad_norm": 0.0, |
|
"learning_rate": 6.173165676349103e-05, |
|
"loss": 0.0, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 8.7849702835083, |
|
"learning_rate": 5.577113097809989e-05, |
|
"loss": 0.8124, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.8242424242424242, |
|
"grad_norm": 16.79686737060547, |
|
"learning_rate": 5.000000000000002e-05, |
|
"loss": 0.5349, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.8484848484848485, |
|
"grad_norm": 0.0, |
|
"learning_rate": 4.444297669803981e-05, |
|
"loss": 0.0, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.8727272727272727, |
|
"grad_norm": 14.058218002319336, |
|
"learning_rate": 3.9123857099127936e-05, |
|
"loss": 0.8506, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.896969696969697, |
|
"grad_norm": 11.131260871887207, |
|
"learning_rate": 3.406541848999312e-05, |
|
"loss": 1.0009, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.9212121212121213, |
|
"grad_norm": 16.038894653320312, |
|
"learning_rate": 2.9289321881345254e-05, |
|
"loss": 0.6914, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.9454545454545454, |
|
"grad_norm": 15.095179557800293, |
|
"learning_rate": 2.4816019252102273e-05, |
|
"loss": 0.6247, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.9696969696969697, |
|
"grad_norm": 11.539749145507812, |
|
"learning_rate": 2.0664665970876496e-05, |
|
"loss": 0.8336, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.9939393939393939, |
|
"grad_norm": 13.128904342651367, |
|
"learning_rate": 1.6853038769745467e-05, |
|
"loss": 1.1426, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 1.018181818181818, |
|
"grad_norm": 10.853378295898438, |
|
"learning_rate": 1.339745962155613e-05, |
|
"loss": 6.4015, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 1.0424242424242425, |
|
"grad_norm": 9.659259796142578, |
|
"learning_rate": 1.0312725846731175e-05, |
|
"loss": 0.4573, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 1.0666666666666667, |
|
"grad_norm": 0.0, |
|
"learning_rate": 7.612046748871327e-06, |
|
"loss": 0.0, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 1.0909090909090908, |
|
"grad_norm": 0.0, |
|
"learning_rate": 5.306987050489442e-06, |
|
"loss": 0.0, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 1.1151515151515152, |
|
"grad_norm": 4.810526371002197, |
|
"learning_rate": 3.40741737109318e-06, |
|
"loss": 1.6521, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 1.1393939393939394, |
|
"grad_norm": 0.0, |
|
"learning_rate": 1.921471959676957e-06, |
|
"loss": 0.0, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 1.1636363636363636, |
|
"grad_norm": 13.763459205627441, |
|
"learning_rate": 8.555138626189618e-07, |
|
"loss": 0.7686, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 1.187878787878788, |
|
"grad_norm": 10.437994003295898, |
|
"learning_rate": 2.141076761396521e-07, |
|
"loss": 0.7031, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 1.2121212121212122, |
|
"grad_norm": 7.7976579666137695, |
|
"learning_rate": 0.0, |
|
"loss": 0.4385, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.2121212121212122, |
|
"eval_loss": NaN, |
|
"eval_runtime": 2.2391, |
|
"eval_samples_per_second": 8.039, |
|
"eval_steps_per_second": 4.019, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 1 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.30626883682304e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|