|
{ |
|
"best_metric": 0.6969178318977356, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.4383561643835616, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.008767123287671232, |
|
"grad_norm": 20.05573272705078, |
|
"learning_rate": 5e-05, |
|
"loss": 6.9854, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.008767123287671232, |
|
"eval_loss": 7.201192855834961, |
|
"eval_runtime": 23.5294, |
|
"eval_samples_per_second": 32.683, |
|
"eval_steps_per_second": 4.123, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.017534246575342465, |
|
"grad_norm": 17.615022659301758, |
|
"learning_rate": 0.0001, |
|
"loss": 6.9208, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0263013698630137, |
|
"grad_norm": 14.698570251464844, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 5.9672, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.03506849315068493, |
|
"grad_norm": 14.905780792236328, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 3.5246, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.043835616438356165, |
|
"grad_norm": 13.3781099319458, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 2.0829, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0526027397260274, |
|
"grad_norm": 4.5874924659729, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 0.9954, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.06136986301369863, |
|
"grad_norm": 3.978563070297241, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 0.8675, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.07013698630136986, |
|
"grad_norm": 2.2998287677764893, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 0.7723, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0789041095890411, |
|
"grad_norm": 3.281687021255493, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 0.7537, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.08767123287671233, |
|
"grad_norm": 1.4720951318740845, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 0.7145, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.09643835616438357, |
|
"grad_norm": 1.1521276235580444, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 0.7025, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.1052054794520548, |
|
"grad_norm": 1.9535162448883057, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 0.6964, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.11397260273972602, |
|
"grad_norm": 1.1660391092300415, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 0.6992, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.12273972602739726, |
|
"grad_norm": 2.2504096031188965, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 0.7409, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.13150684931506848, |
|
"grad_norm": 1.188321828842163, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 0.709, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.14027397260273972, |
|
"grad_norm": 0.5811285972595215, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 0.6904, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.14904109589041095, |
|
"grad_norm": 1.6429232358932495, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 0.707, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.1578082191780822, |
|
"grad_norm": 2.918581247329712, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 0.7792, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.16657534246575342, |
|
"grad_norm": 0.7487760782241821, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 0.6948, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.17534246575342466, |
|
"grad_norm": 1.080714225769043, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 0.7048, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.1841095890410959, |
|
"grad_norm": 0.9710987210273743, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 0.7044, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.19287671232876713, |
|
"grad_norm": 0.5480700135231018, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 0.7005, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.20164383561643837, |
|
"grad_norm": 0.9117023348808289, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 0.7057, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.2104109589041096, |
|
"grad_norm": 0.7569409012794495, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 0.7037, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.2191780821917808, |
|
"grad_norm": 0.3170976936817169, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 0.6972, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.2191780821917808, |
|
"eval_loss": 0.7007076144218445, |
|
"eval_runtime": 23.4954, |
|
"eval_samples_per_second": 32.73, |
|
"eval_steps_per_second": 4.128, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.22794520547945205, |
|
"grad_norm": 1.066445231437683, |
|
"learning_rate": 5e-05, |
|
"loss": 0.7141, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.23671232876712328, |
|
"grad_norm": 0.6127803325653076, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 0.6957, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.24547945205479452, |
|
"grad_norm": 0.7028255462646484, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 0.6946, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.25424657534246575, |
|
"grad_norm": 0.7202122211456299, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 0.6816, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.26301369863013696, |
|
"grad_norm": 0.2293580323457718, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 0.6956, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.2717808219178082, |
|
"grad_norm": 1.1750402450561523, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 0.7183, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.28054794520547943, |
|
"grad_norm": 0.39852091670036316, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 0.7003, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.2893150684931507, |
|
"grad_norm": 0.5287603735923767, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 0.6979, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.2980821917808219, |
|
"grad_norm": 0.6953926086425781, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 0.7014, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.30684931506849317, |
|
"grad_norm": 0.5524169206619263, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 0.6915, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.3156164383561644, |
|
"grad_norm": 0.7208291888237, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 0.6864, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.32438356164383564, |
|
"grad_norm": 0.28683096170425415, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 0.6927, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.33315068493150685, |
|
"grad_norm": 0.3897854685783386, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 0.6978, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.3419178082191781, |
|
"grad_norm": 0.2978964149951935, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 0.6946, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.3506849315068493, |
|
"grad_norm": 0.4156556725502014, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 0.691, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.3594520547945205, |
|
"grad_norm": 0.15310971438884735, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 0.6926, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.3682191780821918, |
|
"grad_norm": 0.3335914611816406, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 0.6923, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.376986301369863, |
|
"grad_norm": 0.14917869865894318, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 0.6942, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.38575342465753426, |
|
"grad_norm": 0.7024045586585999, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 0.6965, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.39452054794520547, |
|
"grad_norm": 0.7428992390632629, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 0.7004, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.40328767123287673, |
|
"grad_norm": 0.44110044836997986, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 0.6889, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.41205479452054794, |
|
"grad_norm": 0.3936268091201782, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 0.6905, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.4208219178082192, |
|
"grad_norm": 0.5188143253326416, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 0.6922, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.4295890410958904, |
|
"grad_norm": 0.46438923478126526, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 0.6922, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.4383561643835616, |
|
"grad_norm": 0.21112455427646637, |
|
"learning_rate": 0.0, |
|
"loss": 0.6954, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.4383561643835616, |
|
"eval_loss": 0.6969178318977356, |
|
"eval_runtime": 23.5116, |
|
"eval_samples_per_second": 32.707, |
|
"eval_steps_per_second": 4.126, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.9736786926882e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|