|
{ |
|
"best_metric": 3.2546098232269287, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-25", |
|
"epoch": 3.0651162790697675, |
|
"eval_steps": 25, |
|
"global_step": 41, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.07441860465116279, |
|
"grad_norm": 2.020779609680176, |
|
"learning_rate": 5e-05, |
|
"loss": 4.1328, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.07441860465116279, |
|
"eval_loss": 4.533937931060791, |
|
"eval_runtime": 1.7261, |
|
"eval_samples_per_second": 52.72, |
|
"eval_steps_per_second": 6.952, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.14883720930232558, |
|
"grad_norm": 2.7467870712280273, |
|
"learning_rate": 0.0001, |
|
"loss": 4.4258, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.22325581395348837, |
|
"grad_norm": 2.7919416427612305, |
|
"learning_rate": 9.983786540671051e-05, |
|
"loss": 4.792, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.29767441860465116, |
|
"grad_norm": 1.701142430305481, |
|
"learning_rate": 9.935251313189564e-05, |
|
"loss": 3.9521, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.37209302325581395, |
|
"grad_norm": 1.7278472185134888, |
|
"learning_rate": 9.85470908713026e-05, |
|
"loss": 4.0736, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.44651162790697674, |
|
"grad_norm": 1.9098764657974243, |
|
"learning_rate": 9.742682209735727e-05, |
|
"loss": 4.1726, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.5209302325581395, |
|
"grad_norm": 1.3584764003753662, |
|
"learning_rate": 9.599897218294122e-05, |
|
"loss": 3.8425, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.5953488372093023, |
|
"grad_norm": 1.167802095413208, |
|
"learning_rate": 9.42728012826605e-05, |
|
"loss": 3.6163, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.6697674418604651, |
|
"grad_norm": 1.3454344272613525, |
|
"learning_rate": 9.225950427718975e-05, |
|
"loss": 3.883, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.7441860465116279, |
|
"grad_norm": 1.2075648307800293, |
|
"learning_rate": 8.997213817017507e-05, |
|
"loss": 3.8244, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.8186046511627907, |
|
"grad_norm": 1.0436967611312866, |
|
"learning_rate": 8.742553740855506e-05, |
|
"loss": 3.5138, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.8930232558139535, |
|
"grad_norm": 1.1808282136917114, |
|
"learning_rate": 8.463621767547998e-05, |
|
"loss": 3.5784, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.9674418604651163, |
|
"grad_norm": 1.4680527448654175, |
|
"learning_rate": 8.162226877976887e-05, |
|
"loss": 3.7168, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 1.0465116279069768, |
|
"grad_norm": 1.8288421630859375, |
|
"learning_rate": 7.840323733655778e-05, |
|
"loss": 5.6196, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 1.1209302325581396, |
|
"grad_norm": 3.125596284866333, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 3.1786, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 1.1953488372093024, |
|
"grad_norm": 1.9149749279022217, |
|
"learning_rate": 7.143462807015271e-05, |
|
"loss": 3.3919, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.2697674418604652, |
|
"grad_norm": 1.4515650272369385, |
|
"learning_rate": 6.773024435212678e-05, |
|
"loss": 3.0889, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 1.344186046511628, |
|
"grad_norm": 1.6954460144042969, |
|
"learning_rate": 6.391087319582264e-05, |
|
"loss": 3.311, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.4186046511627908, |
|
"grad_norm": 1.7925573587417603, |
|
"learning_rate": 6.0001284688802226e-05, |
|
"loss": 3.3055, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 1.4930232558139536, |
|
"grad_norm": 1.7305032014846802, |
|
"learning_rate": 5.602683401276615e-05, |
|
"loss": 2.6646, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.5674418604651161, |
|
"grad_norm": 1.7365140914916992, |
|
"learning_rate": 5.201329700547076e-05, |
|
"loss": 3.674, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 1.6418604651162791, |
|
"grad_norm": 1.725070834159851, |
|
"learning_rate": 4.798670299452926e-05, |
|
"loss": 3.1755, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 1.7162790697674417, |
|
"grad_norm": 1.5631095170974731, |
|
"learning_rate": 4.397316598723385e-05, |
|
"loss": 2.7024, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 1.7906976744186047, |
|
"grad_norm": 15.420148849487305, |
|
"learning_rate": 3.9998715311197785e-05, |
|
"loss": 3.6947, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 1.8651162790697673, |
|
"grad_norm": 1.6800768375396729, |
|
"learning_rate": 3.608912680417737e-05, |
|
"loss": 3.1303, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.8651162790697673, |
|
"eval_loss": 3.2546098232269287, |
|
"eval_runtime": 1.7251, |
|
"eval_samples_per_second": 52.752, |
|
"eval_steps_per_second": 6.956, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.9395348837209303, |
|
"grad_norm": 2.245436191558838, |
|
"learning_rate": 3.226975564787322e-05, |
|
"loss": 3.2327, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 2.0186046511627906, |
|
"grad_norm": 2.354249954223633, |
|
"learning_rate": 2.8565371929847284e-05, |
|
"loss": 5.0536, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 2.0930232558139537, |
|
"grad_norm": 1.1819117069244385, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 3.0264, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 2.167441860465116, |
|
"grad_norm": 1.337063193321228, |
|
"learning_rate": 2.1596762663442218e-05, |
|
"loss": 3.1343, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 2.2418604651162792, |
|
"grad_norm": 1.1551506519317627, |
|
"learning_rate": 1.837773122023114e-05, |
|
"loss": 2.6179, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 2.316279069767442, |
|
"grad_norm": 1.2832993268966675, |
|
"learning_rate": 1.536378232452003e-05, |
|
"loss": 3.5282, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 2.390697674418605, |
|
"grad_norm": 1.2016886472702026, |
|
"learning_rate": 1.257446259144494e-05, |
|
"loss": 3.0694, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 2.4651162790697674, |
|
"grad_norm": 1.090936541557312, |
|
"learning_rate": 1.0027861829824952e-05, |
|
"loss": 2.6476, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 2.5395348837209304, |
|
"grad_norm": 1.5232198238372803, |
|
"learning_rate": 7.740495722810271e-06, |
|
"loss": 3.3762, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 2.613953488372093, |
|
"grad_norm": 1.120077133178711, |
|
"learning_rate": 5.727198717339511e-06, |
|
"loss": 2.9806, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 2.688372093023256, |
|
"grad_norm": 1.069722056388855, |
|
"learning_rate": 4.001027817058789e-06, |
|
"loss": 2.9037, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 2.7627906976744185, |
|
"grad_norm": 1.2132554054260254, |
|
"learning_rate": 2.573177902642726e-06, |
|
"loss": 3.2992, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 2.8372093023255816, |
|
"grad_norm": 1.175353765487671, |
|
"learning_rate": 1.4529091286973995e-06, |
|
"loss": 2.9901, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 2.911627906976744, |
|
"grad_norm": 1.4980123043060303, |
|
"learning_rate": 6.474868681043578e-07, |
|
"loss": 3.1414, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 2.986046511627907, |
|
"grad_norm": 2.204864501953125, |
|
"learning_rate": 1.6213459328950352e-07, |
|
"loss": 4.5125, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 3.0651162790697675, |
|
"grad_norm": 1.5250720977783203, |
|
"learning_rate": 0.0, |
|
"loss": 3.4608, |
|
"step": 41 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 41, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.827986893480919e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|