|
{ |
|
"best_metric": 0.6597008109092712, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.019807863721897592, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00039615727443795186, |
|
"grad_norm": 15.38939380645752, |
|
"learning_rate": 2e-05, |
|
"loss": 2.6599, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00039615727443795186, |
|
"eval_loss": 1.200274109840393, |
|
"eval_runtime": 154.9972, |
|
"eval_samples_per_second": 27.433, |
|
"eval_steps_per_second": 3.432, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0007923145488759037, |
|
"grad_norm": 16.734960556030273, |
|
"learning_rate": 4e-05, |
|
"loss": 3.396, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0011884718233138556, |
|
"grad_norm": 15.702166557312012, |
|
"learning_rate": 6e-05, |
|
"loss": 2.8498, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0015846290977518075, |
|
"grad_norm": 13.420672416687012, |
|
"learning_rate": 8e-05, |
|
"loss": 2.7706, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0019807863721897595, |
|
"grad_norm": 9.73044490814209, |
|
"learning_rate": 0.0001, |
|
"loss": 2.5057, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0023769436466277113, |
|
"grad_norm": 11.664520263671875, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 2.7975, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.002773100921065663, |
|
"grad_norm": 8.877344131469727, |
|
"learning_rate": 9.951340343707852e-05, |
|
"loss": 2.5462, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.003169258195503615, |
|
"grad_norm": 9.045130729675293, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 2.6158, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0035654154699415667, |
|
"grad_norm": 9.858509063720703, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 2.8669, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.003961572744379519, |
|
"grad_norm": 8.872286796569824, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 2.516, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.004357730018817471, |
|
"grad_norm": 9.686171531677246, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 2.5248, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.004753887293255423, |
|
"grad_norm": 10.663348197937012, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 3.0213, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.005150044567693374, |
|
"grad_norm": 9.221182823181152, |
|
"learning_rate": 9.24024048078213e-05, |
|
"loss": 2.4556, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.005546201842131326, |
|
"grad_norm": 9.011594772338867, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 2.2169, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.005942359116569278, |
|
"grad_norm": 9.134177207946777, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 2.6884, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.00633851639100723, |
|
"grad_norm": 8.743730545043945, |
|
"learning_rate": 8.596699001693255e-05, |
|
"loss": 2.4097, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.006734673665445182, |
|
"grad_norm": 9.52989673614502, |
|
"learning_rate": 8.345653031794292e-05, |
|
"loss": 2.7904, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0071308309398831335, |
|
"grad_norm": 9.70661735534668, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 2.4424, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.007526988214321085, |
|
"grad_norm": 10.09250259399414, |
|
"learning_rate": 7.795964517353735e-05, |
|
"loss": 2.436, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.007923145488759038, |
|
"grad_norm": 8.261049270629883, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 2.318, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.008319302763196989, |
|
"grad_norm": 9.031270027160645, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 2.4178, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.008715460037634942, |
|
"grad_norm": 12.21824836730957, |
|
"learning_rate": 6.873032967079561e-05, |
|
"loss": 2.7133, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.009111617312072893, |
|
"grad_norm": 10.730268478393555, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 2.6344, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.009507774586510845, |
|
"grad_norm": 9.152085304260254, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 2.3463, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.009903931860948796, |
|
"grad_norm": 9.404061317443848, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 2.3261, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.009903931860948796, |
|
"eval_loss": 0.6671332120895386, |
|
"eval_runtime": 156.6633, |
|
"eval_samples_per_second": 27.141, |
|
"eval_steps_per_second": 3.396, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.010300089135386749, |
|
"grad_norm": 12.529690742492676, |
|
"learning_rate": 5.522642316338268e-05, |
|
"loss": 2.8625, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.0106962464098247, |
|
"grad_norm": 10.027824401855469, |
|
"learning_rate": 5.174497483512506e-05, |
|
"loss": 2.5588, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.011092403684262652, |
|
"grad_norm": 10.082047462463379, |
|
"learning_rate": 4.825502516487497e-05, |
|
"loss": 2.4771, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.011488560958700603, |
|
"grad_norm": 11.15814208984375, |
|
"learning_rate": 4.477357683661734e-05, |
|
"loss": 2.4113, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.011884718233138556, |
|
"grad_norm": 10.759804725646973, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 2.8421, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.012280875507576509, |
|
"grad_norm": 10.599135398864746, |
|
"learning_rate": 3.790390522001662e-05, |
|
"loss": 2.6885, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.01267703278201446, |
|
"grad_norm": 11.816875457763672, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 3.0024, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.013073190056452412, |
|
"grad_norm": 10.234508514404297, |
|
"learning_rate": 3.12696703292044e-05, |
|
"loss": 2.0345, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.013469347330890363, |
|
"grad_norm": 12.437875747680664, |
|
"learning_rate": 2.8081442660546125e-05, |
|
"loss": 2.7898, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.013865504605328316, |
|
"grad_norm": 10.455293655395508, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 2.2859, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.014261661879766267, |
|
"grad_norm": 10.511425971984863, |
|
"learning_rate": 2.2040354826462668e-05, |
|
"loss": 2.2994, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.01465781915420422, |
|
"grad_norm": 11.496809959411621, |
|
"learning_rate": 1.9216926233717085e-05, |
|
"loss": 2.7418, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.01505397642864217, |
|
"grad_norm": 13.628725051879883, |
|
"learning_rate": 1.6543469682057106e-05, |
|
"loss": 3.0457, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.015450133703080123, |
|
"grad_norm": 13.12936782836914, |
|
"learning_rate": 1.4033009983067452e-05, |
|
"loss": 2.5216, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.015846290977518076, |
|
"grad_norm": 10.766741752624512, |
|
"learning_rate": 1.1697777844051105e-05, |
|
"loss": 2.3881, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.016242448251956025, |
|
"grad_norm": 15.045173645019531, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 3.1368, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.016638605526393978, |
|
"grad_norm": 10.670278549194336, |
|
"learning_rate": 7.597595192178702e-06, |
|
"loss": 2.4484, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.01703476280083193, |
|
"grad_norm": 14.797165870666504, |
|
"learning_rate": 5.852620357053651e-06, |
|
"loss": 3.4172, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.017430920075269883, |
|
"grad_norm": 12.900824546813965, |
|
"learning_rate": 4.322727117869951e-06, |
|
"loss": 2.7403, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.017827077349707832, |
|
"grad_norm": 12.451089859008789, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 3.1842, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.018223234624145785, |
|
"grad_norm": 14.523232460021973, |
|
"learning_rate": 1.9369152030840556e-06, |
|
"loss": 3.4595, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.018619391898583738, |
|
"grad_norm": 12.919535636901855, |
|
"learning_rate": 1.0926199633097157e-06, |
|
"loss": 3.0023, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.01901554917302169, |
|
"grad_norm": 13.515146255493164, |
|
"learning_rate": 4.865965629214819e-07, |
|
"loss": 2.8616, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.019411706447459643, |
|
"grad_norm": 13.035895347595215, |
|
"learning_rate": 1.2179748700879012e-07, |
|
"loss": 2.3172, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.019807863721897592, |
|
"grad_norm": 19.37661361694336, |
|
"learning_rate": 0.0, |
|
"loss": 3.5126, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.019807863721897592, |
|
"eval_loss": 0.6597008109092712, |
|
"eval_runtime": 156.6523, |
|
"eval_samples_per_second": 27.143, |
|
"eval_steps_per_second": 3.396, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.5953824975814656e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|