|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.9879518072289155, |
|
"eval_steps": 500, |
|
"global_step": 186, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.1606425702811245, |
|
"grad_norm": 55.777050153768585, |
|
"learning_rate": 5.263157894736842e-06, |
|
"loss": 3.9145, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.321285140562249, |
|
"grad_norm": 6.574653822635258, |
|
"learning_rate": 9.999115304121459e-06, |
|
"loss": 0.3472, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.4819277108433735, |
|
"grad_norm": 4.244131917524834, |
|
"learning_rate": 9.893330096656576e-06, |
|
"loss": 0.2294, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.642570281124498, |
|
"grad_norm": 3.324081116501074, |
|
"learning_rate": 9.614885510995047e-06, |
|
"loss": 0.2041, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.8032128514056225, |
|
"grad_norm": 3.1892405513679645, |
|
"learning_rate": 9.173606363381218e-06, |
|
"loss": 0.1989, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.963855421686747, |
|
"grad_norm": 2.4324918223476892, |
|
"learning_rate": 8.585063028536015e-06, |
|
"loss": 0.2037, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.1244979919678715, |
|
"grad_norm": 2.16192012501172, |
|
"learning_rate": 7.870022044630569e-06, |
|
"loss": 0.1804, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.285140562248996, |
|
"grad_norm": 46.57778152030111, |
|
"learning_rate": 7.053713373516538e-06, |
|
"loss": 0.1612, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.4457831325301205, |
|
"grad_norm": 2.3080019184835585, |
|
"learning_rate": 6.164940170670266e-06, |
|
"loss": 0.1608, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.606425702811245, |
|
"grad_norm": 2.4581039241773364, |
|
"learning_rate": 5.235062476295488e-06, |
|
"loss": 0.1577, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.7670682730923695, |
|
"grad_norm": 2.2847917000132414, |
|
"learning_rate": 4.29689068767551e-06, |
|
"loss": 0.1529, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.927710843373494, |
|
"grad_norm": 1.7632703410669297, |
|
"learning_rate": 3.3835278562017405e-06, |
|
"loss": 0.1576, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 2.0883534136546187, |
|
"grad_norm": 1.161830645663707, |
|
"learning_rate": 2.5272016582081236e-06, |
|
"loss": 0.1296, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 2.248995983935743, |
|
"grad_norm": 1.8411600296685922, |
|
"learning_rate": 1.7581272530970666e-06, |
|
"loss": 0.1004, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 2.4096385542168672, |
|
"grad_norm": 2.21901118340881, |
|
"learning_rate": 1.103441152395588e-06, |
|
"loss": 0.0891, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 2.570281124497992, |
|
"grad_norm": 1.888493064382548, |
|
"learning_rate": 5.862437177854629e-07, |
|
"loss": 0.0949, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 2.7309236947791167, |
|
"grad_norm": 1.3428837781254024, |
|
"learning_rate": 2.2478407321721295e-07, |
|
"loss": 0.0925, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 2.891566265060241, |
|
"grad_norm": 1.7122983109076155, |
|
"learning_rate": 3.181619118841517e-08, |
|
"loss": 0.0886, |
|
"step": 180 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 186, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4435300122624.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|