|
{ |
|
"best_metric": 3.224152088165283, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.08503401360544217, |
|
"eval_steps": 50, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0017006802721088435, |
|
"grad_norm": 0.24977613985538483, |
|
"learning_rate": 1e-05, |
|
"loss": 2.8692, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0017006802721088435, |
|
"eval_loss": 3.276529550552368, |
|
"eval_runtime": 37.2906, |
|
"eval_samples_per_second": 26.548, |
|
"eval_steps_per_second": 6.65, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.003401360544217687, |
|
"grad_norm": 0.24531881511211395, |
|
"learning_rate": 2e-05, |
|
"loss": 3.0437, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.00510204081632653, |
|
"grad_norm": 0.25416088104248047, |
|
"learning_rate": 3e-05, |
|
"loss": 3.0804, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.006802721088435374, |
|
"grad_norm": 0.24987466633319855, |
|
"learning_rate": 4e-05, |
|
"loss": 3.0, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.008503401360544218, |
|
"grad_norm": 0.2428084909915924, |
|
"learning_rate": 5e-05, |
|
"loss": 2.9788, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.01020408163265306, |
|
"grad_norm": 0.24101322889328003, |
|
"learning_rate": 6e-05, |
|
"loss": 2.8507, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.011904761904761904, |
|
"grad_norm": 0.2492375373840332, |
|
"learning_rate": 7e-05, |
|
"loss": 3.0046, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.013605442176870748, |
|
"grad_norm": 0.2558836340904236, |
|
"learning_rate": 8e-05, |
|
"loss": 3.0009, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.015306122448979591, |
|
"grad_norm": 0.2605135142803192, |
|
"learning_rate": 9e-05, |
|
"loss": 3.0991, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.017006802721088437, |
|
"grad_norm": 0.24015140533447266, |
|
"learning_rate": 0.0001, |
|
"loss": 2.8682, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.01870748299319728, |
|
"grad_norm": 0.23625339567661285, |
|
"learning_rate": 9.999316524962345e-05, |
|
"loss": 2.9808, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.02040816326530612, |
|
"grad_norm": 0.2354581356048584, |
|
"learning_rate": 9.997266286704631e-05, |
|
"loss": 3.0725, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.022108843537414966, |
|
"grad_norm": 0.23762401938438416, |
|
"learning_rate": 9.993849845741524e-05, |
|
"loss": 3.191, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.023809523809523808, |
|
"grad_norm": 0.22220423817634583, |
|
"learning_rate": 9.989068136093873e-05, |
|
"loss": 2.852, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.025510204081632654, |
|
"grad_norm": 0.2376808524131775, |
|
"learning_rate": 9.98292246503335e-05, |
|
"loss": 2.9857, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.027210884353741496, |
|
"grad_norm": 0.2597469091415405, |
|
"learning_rate": 9.975414512725057e-05, |
|
"loss": 3.2452, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.02891156462585034, |
|
"grad_norm": 0.25804516673088074, |
|
"learning_rate": 9.966546331768191e-05, |
|
"loss": 3.2068, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.030612244897959183, |
|
"grad_norm": 0.2507534325122833, |
|
"learning_rate": 9.956320346634876e-05, |
|
"loss": 3.0956, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.03231292517006803, |
|
"grad_norm": 0.2603234648704529, |
|
"learning_rate": 9.944739353007344e-05, |
|
"loss": 3.1878, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.034013605442176874, |
|
"grad_norm": 0.2637031078338623, |
|
"learning_rate": 9.931806517013612e-05, |
|
"loss": 3.2389, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.03571428571428571, |
|
"grad_norm": 0.2582753896713257, |
|
"learning_rate": 9.917525374361912e-05, |
|
"loss": 3.0519, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.03741496598639456, |
|
"grad_norm": 0.2699371576309204, |
|
"learning_rate": 9.901899829374047e-05, |
|
"loss": 3.1635, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.0391156462585034, |
|
"grad_norm": 0.2730785310268402, |
|
"learning_rate": 9.884934153917997e-05, |
|
"loss": 3.1656, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.04081632653061224, |
|
"grad_norm": 0.2866193652153015, |
|
"learning_rate": 9.86663298624003e-05, |
|
"loss": 3.22, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.04251700680272109, |
|
"grad_norm": 0.28086575865745544, |
|
"learning_rate": 9.847001329696653e-05, |
|
"loss": 2.948, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.04421768707482993, |
|
"grad_norm": 0.2861677408218384, |
|
"learning_rate": 9.826044551386744e-05, |
|
"loss": 3.2173, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.04591836734693878, |
|
"grad_norm": 0.2805924713611603, |
|
"learning_rate": 9.803768380684242e-05, |
|
"loss": 3.0343, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.047619047619047616, |
|
"grad_norm": 0.3162534832954407, |
|
"learning_rate": 9.780178907671789e-05, |
|
"loss": 3.313, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.04931972789115646, |
|
"grad_norm": 0.3136957287788391, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 3.244, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.05102040816326531, |
|
"grad_norm": 0.31942903995513916, |
|
"learning_rate": 9.729086208503174e-05, |
|
"loss": 3.2875, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.05272108843537415, |
|
"grad_norm": 0.33962124586105347, |
|
"learning_rate": 9.701596950580806e-05, |
|
"loss": 3.3639, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.05442176870748299, |
|
"grad_norm": 0.3294471502304077, |
|
"learning_rate": 9.672822322997305e-05, |
|
"loss": 3.3009, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.05612244897959184, |
|
"grad_norm": 0.3405294418334961, |
|
"learning_rate": 9.642770192448536e-05, |
|
"loss": 3.3337, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.05782312925170068, |
|
"grad_norm": 0.33866071701049805, |
|
"learning_rate": 9.611448774886924e-05, |
|
"loss": 3.3841, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.05952380952380952, |
|
"grad_norm": 0.40185490250587463, |
|
"learning_rate": 9.578866633275288e-05, |
|
"loss": 3.3815, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.061224489795918366, |
|
"grad_norm": 0.351744145154953, |
|
"learning_rate": 9.545032675245813e-05, |
|
"loss": 3.2629, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.06292517006802721, |
|
"grad_norm": 0.36549699306488037, |
|
"learning_rate": 9.509956150664796e-05, |
|
"loss": 3.3077, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.06462585034013606, |
|
"grad_norm": 0.3804144263267517, |
|
"learning_rate": 9.473646649103818e-05, |
|
"loss": 3.2124, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.0663265306122449, |
|
"grad_norm": 0.3786104917526245, |
|
"learning_rate": 9.43611409721806e-05, |
|
"loss": 3.3519, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.06802721088435375, |
|
"grad_norm": 0.3824675381183624, |
|
"learning_rate": 9.397368756032445e-05, |
|
"loss": 3.344, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.06972789115646258, |
|
"grad_norm": 0.40192821621894836, |
|
"learning_rate": 9.357421218136386e-05, |
|
"loss": 3.197, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.07142857142857142, |
|
"grad_norm": 0.3984317183494568, |
|
"learning_rate": 9.316282404787871e-05, |
|
"loss": 3.1668, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.07312925170068027, |
|
"grad_norm": 0.4219413697719574, |
|
"learning_rate": 9.273963562927695e-05, |
|
"loss": 3.3366, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.07482993197278912, |
|
"grad_norm": 0.5630561709403992, |
|
"learning_rate": 9.230476262104677e-05, |
|
"loss": 3.2803, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.07653061224489796, |
|
"grad_norm": 0.5532132387161255, |
|
"learning_rate": 9.185832391312644e-05, |
|
"loss": 3.2889, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.0782312925170068, |
|
"grad_norm": 0.46265870332717896, |
|
"learning_rate": 9.140044155740101e-05, |
|
"loss": 3.2862, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.07993197278911565, |
|
"grad_norm": 0.5097538828849792, |
|
"learning_rate": 9.093124073433463e-05, |
|
"loss": 3.4773, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.08163265306122448, |
|
"grad_norm": 0.6026643514633179, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 3.3722, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.08333333333333333, |
|
"grad_norm": 0.5687397718429565, |
|
"learning_rate": 8.995939984474624e-05, |
|
"loss": 3.5381, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.08503401360544217, |
|
"grad_norm": 0.596480667591095, |
|
"learning_rate": 8.945702546981969e-05, |
|
"loss": 3.6079, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.08503401360544217, |
|
"eval_loss": 3.224152088165283, |
|
"eval_runtime": 37.6501, |
|
"eval_samples_per_second": 26.295, |
|
"eval_steps_per_second": 6.587, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.755333512921088e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|