|
{ |
|
"best_metric": 1.8831839561462402, |
|
"best_model_checkpoint": "./output/checkpoint-150", |
|
"epoch": 0.016123831022250887, |
|
"eval_steps": 150, |
|
"global_step": 150, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.001074922068150059, |
|
"grad_norm": 43.13871765136719, |
|
"learning_rate": 4.125e-06, |
|
"loss": 2.2922, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.002149844136300118, |
|
"grad_norm": 26.323883056640625, |
|
"learning_rate": 8.25e-06, |
|
"loss": 2.1328, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0032247662044501773, |
|
"grad_norm": 17.629505157470703, |
|
"learning_rate": 1.2375e-05, |
|
"loss": 2.036, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.004299688272600236, |
|
"grad_norm": 10.648378372192383, |
|
"learning_rate": 1.65e-05, |
|
"loss": 1.851, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.005374610340750295, |
|
"grad_norm": 17.73092269897461, |
|
"learning_rate": 2.0625e-05, |
|
"loss": 1.8746, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0064495324089003546, |
|
"grad_norm": 6.716779708862305, |
|
"learning_rate": 2.475e-05, |
|
"loss": 1.8512, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.007524454477050414, |
|
"grad_norm": 15.877828598022461, |
|
"learning_rate": 2.8874999999999997e-05, |
|
"loss": 1.9416, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.008599376545200472, |
|
"grad_norm": 14.675684928894043, |
|
"learning_rate": 3.3e-05, |
|
"loss": 1.8581, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.009674298613350531, |
|
"grad_norm": 11.489137649536133, |
|
"learning_rate": 3.7125e-05, |
|
"loss": 1.9017, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.01074922068150059, |
|
"grad_norm": 7.483497619628906, |
|
"learning_rate": 4.125e-05, |
|
"loss": 1.9335, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.01182414274965065, |
|
"grad_norm": 9.6410551071167, |
|
"learning_rate": 4.12495760935163e-05, |
|
"loss": 1.7841, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.012899064817800709, |
|
"grad_norm": 7.8748979568481445, |
|
"learning_rate": 4.1248304391490334e-05, |
|
"loss": 1.8529, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.013973986885950768, |
|
"grad_norm": 9.455327987670898, |
|
"learning_rate": 4.1246184946196796e-05, |
|
"loss": 1.9366, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.015048908954100828, |
|
"grad_norm": 8.64035701751709, |
|
"learning_rate": 4.124321784475777e-05, |
|
"loss": 1.8501, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.016123831022250887, |
|
"grad_norm": 13.220332145690918, |
|
"learning_rate": 4.123940320913919e-05, |
|
"loss": 1.9095, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.016123831022250887, |
|
"eval_loss": 1.8831839561462402, |
|
"eval_runtime": 61.5136, |
|
"eval_samples_per_second": 8.128, |
|
"eval_steps_per_second": 8.128, |
|
"step": 150 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 5000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 150, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.941968917351629e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|