|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 5.1648125, |
|
"eval_steps": 500, |
|
"global_step": 2500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 3.224301815032959, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 3.0473, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 3.7534642219543457, |
|
"learning_rate": 1.6000000000000003e-05, |
|
"loss": 2.9068, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"grad_norm": 3.2999300956726074, |
|
"learning_rate": 1.9975640502598243e-05, |
|
"loss": 2.9097, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 3.1935625076293945, |
|
"learning_rate": 1.9781476007338058e-05, |
|
"loss": 2.9126, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 1.0329625, |
|
"grad_norm": 3.9796504974365234, |
|
"learning_rate": 1.9396926207859085e-05, |
|
"loss": 2.8695, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 1.0729625, |
|
"grad_norm": 3.6588051319122314, |
|
"learning_rate": 1.8829475928589272e-05, |
|
"loss": 2.6282, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 1.1129625, |
|
"grad_norm": 3.0422592163085938, |
|
"learning_rate": 1.8090169943749477e-05, |
|
"loss": 2.4448, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 1.1529625000000001, |
|
"grad_norm": 3.506638526916504, |
|
"learning_rate": 1.7193398003386514e-05, |
|
"loss": 2.3841, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 2.025925, |
|
"grad_norm": 3.232518434524536, |
|
"learning_rate": 1.6156614753256583e-05, |
|
"loss": 2.4055, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 2.065925, |
|
"grad_norm": 3.5629770755767822, |
|
"learning_rate": 1.5000000000000002e-05, |
|
"loss": 2.1635, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 2.105925, |
|
"grad_norm": 4.052393913269043, |
|
"learning_rate": 1.3746065934159123e-05, |
|
"loss": 1.91, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 2.145925, |
|
"grad_norm": 4.011883735656738, |
|
"learning_rate": 1.2419218955996677e-05, |
|
"loss": 1.8443, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 3.0188875, |
|
"grad_norm": 4.183358192443848, |
|
"learning_rate": 1.1045284632676535e-05, |
|
"loss": 1.9117, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 3.0588875, |
|
"grad_norm": 4.792853355407715, |
|
"learning_rate": 9.651005032974994e-06, |
|
"loss": 1.6355, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 3.0988875, |
|
"grad_norm": 5.4336256980896, |
|
"learning_rate": 8.263518223330698e-06, |
|
"loss": 1.4449, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 3.1388875, |
|
"grad_norm": 4.874321937561035, |
|
"learning_rate": 6.909830056250527e-06, |
|
"loss": 1.3494, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 4.01185, |
|
"grad_norm": 5.182672500610352, |
|
"learning_rate": 5.616288532109225e-06, |
|
"loss": 1.3964, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 4.05185, |
|
"grad_norm": 6.411541938781738, |
|
"learning_rate": 4.408070965292534e-06, |
|
"loss": 1.252, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 4.09185, |
|
"grad_norm": 6.957055568695068, |
|
"learning_rate": 3.308693936411421e-06, |
|
"loss": 1.0674, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 4.13185, |
|
"grad_norm": 6.159031391143799, |
|
"learning_rate": 2.339555568810221e-06, |
|
"loss": 0.9957, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 5.0048125, |
|
"grad_norm": 5.721092224121094, |
|
"learning_rate": 1.5195190384357405e-06, |
|
"loss": 1.0377, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 5.0448125, |
|
"grad_norm": 7.575408458709717, |
|
"learning_rate": 8.645454235739903e-07, |
|
"loss": 1.0448, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 5.0848125, |
|
"grad_norm": 6.4172844886779785, |
|
"learning_rate": 3.8738304061681107e-07, |
|
"loss": 0.8786, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 5.1248125, |
|
"grad_norm": 8.13640022277832, |
|
"learning_rate": 9.731931258429638e-08, |
|
"loss": 0.8682, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 5.1648125, |
|
"grad_norm": 7.7202839851379395, |
|
"learning_rate": 0.0, |
|
"loss": 0.9273, |
|
"step": 2500 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 2500, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 9223372036854775807, |
|
"save_steps": 2500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.3916038848512e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|