|
{ |
|
"best_metric": 1.5392982959747314, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.0942951438000943, |
|
"eval_steps": 50, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.001885902876001886, |
|
"grad_norm": 1.6719341278076172, |
|
"learning_rate": 1e-05, |
|
"loss": 1.4085, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.001885902876001886, |
|
"eval_loss": 1.798437476158142, |
|
"eval_runtime": 15.88, |
|
"eval_samples_per_second": 56.234, |
|
"eval_steps_per_second": 14.106, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.003771805752003772, |
|
"grad_norm": 1.6418969631195068, |
|
"learning_rate": 2e-05, |
|
"loss": 1.4482, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.005657708628005658, |
|
"grad_norm": 1.6673892736434937, |
|
"learning_rate": 3e-05, |
|
"loss": 1.5452, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.007543611504007544, |
|
"grad_norm": 1.5872431993484497, |
|
"learning_rate": 4e-05, |
|
"loss": 1.5037, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.00942951438000943, |
|
"grad_norm": 1.4873931407928467, |
|
"learning_rate": 5e-05, |
|
"loss": 1.362, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.011315417256011316, |
|
"grad_norm": 1.3264328241348267, |
|
"learning_rate": 6e-05, |
|
"loss": 1.385, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.013201320132013201, |
|
"grad_norm": 1.127667784690857, |
|
"learning_rate": 7e-05, |
|
"loss": 1.3468, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.015087223008015087, |
|
"grad_norm": 1.2904081344604492, |
|
"learning_rate": 8e-05, |
|
"loss": 1.3893, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.016973125884016973, |
|
"grad_norm": 1.4509556293487549, |
|
"learning_rate": 9e-05, |
|
"loss": 1.5524, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.01885902876001886, |
|
"grad_norm": 1.1893857717514038, |
|
"learning_rate": 0.0001, |
|
"loss": 1.5371, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.020744931636020744, |
|
"grad_norm": 1.0708231925964355, |
|
"learning_rate": 9.999316524962345e-05, |
|
"loss": 1.2872, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.02263083451202263, |
|
"grad_norm": 1.1538314819335938, |
|
"learning_rate": 9.997266286704631e-05, |
|
"loss": 1.291, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.024516737388024516, |
|
"grad_norm": 1.117232084274292, |
|
"learning_rate": 9.993849845741524e-05, |
|
"loss": 1.4771, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.026402640264026403, |
|
"grad_norm": 1.038404941558838, |
|
"learning_rate": 9.989068136093873e-05, |
|
"loss": 1.4339, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.028288543140028287, |
|
"grad_norm": 1.0554722547531128, |
|
"learning_rate": 9.98292246503335e-05, |
|
"loss": 1.2713, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.030174446016030174, |
|
"grad_norm": 0.9968124628067017, |
|
"learning_rate": 9.975414512725057e-05, |
|
"loss": 1.4743, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.03206034889203206, |
|
"grad_norm": 1.0715779066085815, |
|
"learning_rate": 9.966546331768191e-05, |
|
"loss": 1.5222, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.033946251768033946, |
|
"grad_norm": 1.1179016828536987, |
|
"learning_rate": 9.956320346634876e-05, |
|
"loss": 1.4405, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.03583215464403583, |
|
"grad_norm": 0.9977557063102722, |
|
"learning_rate": 9.944739353007344e-05, |
|
"loss": 1.3601, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.03771805752003772, |
|
"grad_norm": 1.0840213298797607, |
|
"learning_rate": 9.931806517013612e-05, |
|
"loss": 1.7028, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.039603960396039604, |
|
"grad_norm": 1.0020934343338013, |
|
"learning_rate": 9.917525374361912e-05, |
|
"loss": 1.5547, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.04148986327204149, |
|
"grad_norm": 1.0338664054870605, |
|
"learning_rate": 9.901899829374047e-05, |
|
"loss": 1.4547, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.04337576614804337, |
|
"grad_norm": 1.02623450756073, |
|
"learning_rate": 9.884934153917997e-05, |
|
"loss": 1.5793, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.04526166902404526, |
|
"grad_norm": 0.9653357863426208, |
|
"learning_rate": 9.86663298624003e-05, |
|
"loss": 1.4715, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.04714757190004715, |
|
"grad_norm": 1.058281421661377, |
|
"learning_rate": 9.847001329696653e-05, |
|
"loss": 1.5606, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.04903347477604903, |
|
"grad_norm": 1.0581485033035278, |
|
"learning_rate": 9.826044551386744e-05, |
|
"loss": 1.3818, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.05091937765205092, |
|
"grad_norm": 1.0404139757156372, |
|
"learning_rate": 9.803768380684242e-05, |
|
"loss": 1.4896, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.052805280528052806, |
|
"grad_norm": 0.9869558215141296, |
|
"learning_rate": 9.780178907671789e-05, |
|
"loss": 1.6065, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.05469118340405469, |
|
"grad_norm": 1.03351628780365, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 1.4879, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.056577086280056574, |
|
"grad_norm": 1.1283892393112183, |
|
"learning_rate": 9.729086208503174e-05, |
|
"loss": 1.6899, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.058462989156058465, |
|
"grad_norm": 1.1303669214248657, |
|
"learning_rate": 9.701596950580806e-05, |
|
"loss": 1.7194, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.06034889203206035, |
|
"grad_norm": 1.136953592300415, |
|
"learning_rate": 9.672822322997305e-05, |
|
"loss": 1.5471, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.06223479490806223, |
|
"grad_norm": 1.0339196920394897, |
|
"learning_rate": 9.642770192448536e-05, |
|
"loss": 1.6283, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.06412069778406412, |
|
"grad_norm": 1.0422121286392212, |
|
"learning_rate": 9.611448774886924e-05, |
|
"loss": 1.6243, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.066006600660066, |
|
"grad_norm": 1.0634968280792236, |
|
"learning_rate": 9.578866633275288e-05, |
|
"loss": 1.6214, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.06789250353606789, |
|
"grad_norm": 1.0582144260406494, |
|
"learning_rate": 9.545032675245813e-05, |
|
"loss": 1.4956, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.06977840641206978, |
|
"grad_norm": 1.1187388896942139, |
|
"learning_rate": 9.509956150664796e-05, |
|
"loss": 1.5546, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.07166430928807166, |
|
"grad_norm": 1.0846741199493408, |
|
"learning_rate": 9.473646649103818e-05, |
|
"loss": 1.6097, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.07355021216407355, |
|
"grad_norm": 1.1303223371505737, |
|
"learning_rate": 9.43611409721806e-05, |
|
"loss": 1.6785, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.07543611504007544, |
|
"grad_norm": 1.1018422842025757, |
|
"learning_rate": 9.397368756032445e-05, |
|
"loss": 1.4387, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.07732201791607732, |
|
"grad_norm": 1.0702595710754395, |
|
"learning_rate": 9.357421218136386e-05, |
|
"loss": 1.5708, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.07920792079207921, |
|
"grad_norm": 1.133355975151062, |
|
"learning_rate": 9.316282404787871e-05, |
|
"loss": 1.5197, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.0810938236680811, |
|
"grad_norm": 1.1223176717758179, |
|
"learning_rate": 9.273963562927695e-05, |
|
"loss": 1.5539, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.08297972654408298, |
|
"grad_norm": 1.097373127937317, |
|
"learning_rate": 9.230476262104677e-05, |
|
"loss": 1.4639, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.08486562942008487, |
|
"grad_norm": 1.259489893913269, |
|
"learning_rate": 9.185832391312644e-05, |
|
"loss": 1.6197, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.08675153229608674, |
|
"grad_norm": 1.1635847091674805, |
|
"learning_rate": 9.140044155740101e-05, |
|
"loss": 1.412, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.08863743517208864, |
|
"grad_norm": 1.3465157747268677, |
|
"learning_rate": 9.093124073433463e-05, |
|
"loss": 1.6082, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.09052333804809053, |
|
"grad_norm": 1.2739461660385132, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 1.5323, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.0924092409240924, |
|
"grad_norm": 1.239130973815918, |
|
"learning_rate": 8.995939984474624e-05, |
|
"loss": 1.4232, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.0942951438000943, |
|
"grad_norm": 1.4111778736114502, |
|
"learning_rate": 8.945702546981969e-05, |
|
"loss": 1.4941, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0942951438000943, |
|
"eval_loss": 1.5392982959747314, |
|
"eval_runtime": 15.8606, |
|
"eval_samples_per_second": 56.303, |
|
"eval_steps_per_second": 14.123, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3883562421977088.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|