|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.11026905649785476, |
|
"eval_steps": 500, |
|
"global_step": 5500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01002445968162316, |
|
"grad_norm": 0.1908789426088333, |
|
"learning_rate": 2.969926620955131e-05, |
|
"loss": 0.0305, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.02004891936324632, |
|
"grad_norm": 0.2333366870880127, |
|
"learning_rate": 2.939853241910261e-05, |
|
"loss": 0.027, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.03007337904486948, |
|
"grad_norm": 0.18207737803459167, |
|
"learning_rate": 2.9097798628653917e-05, |
|
"loss": 0.026, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.04009783872649264, |
|
"grad_norm": 0.13153837621212006, |
|
"learning_rate": 2.879706483820522e-05, |
|
"loss": 0.0253, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.0501222984081158, |
|
"grad_norm": 0.20580655336380005, |
|
"learning_rate": 2.849633104775653e-05, |
|
"loss": 0.0249, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.06014675808973896, |
|
"grad_norm": 0.16466622054576874, |
|
"learning_rate": 2.8196198724888727e-05, |
|
"loss": 0.0239, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.07017121777136212, |
|
"grad_norm": 0.1590748131275177, |
|
"learning_rate": 2.7895464934440035e-05, |
|
"loss": 0.0239, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.08019567745298528, |
|
"grad_norm": 0.14150911569595337, |
|
"learning_rate": 2.759473114399134e-05, |
|
"loss": 0.0236, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.09022013713460844, |
|
"grad_norm": 0.09914161264896393, |
|
"learning_rate": 2.7293997353542647e-05, |
|
"loss": 0.0236, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.1002445968162316, |
|
"grad_norm": 0.16175945103168488, |
|
"learning_rate": 2.6993263563093947e-05, |
|
"loss": 0.0232, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.11026905649785476, |
|
"grad_norm": 0.14557033777236938, |
|
"learning_rate": 2.6692529772645255e-05, |
|
"loss": 0.0229, |
|
"step": 5500 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 49878, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|