easylm-sft-gemma-2-9b / trainer_state.json
scottsuk0306's picture
Model save
abd3264 verified
raw
history blame
3.97 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 7500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.2,
"grad_norm": 5.575812339782715,
"learning_rate": 1.9781476007338058e-05,
"loss": 0.9324,
"step": 500
},
{
"epoch": 0.4,
"grad_norm": 4.583652973175049,
"learning_rate": 1.913545457642601e-05,
"loss": 0.9619,
"step": 1000
},
{
"epoch": 0.6,
"grad_norm": 3.9456448554992676,
"learning_rate": 1.8090169943749477e-05,
"loss": 0.979,
"step": 1500
},
{
"epoch": 0.8,
"grad_norm": 5.700883865356445,
"learning_rate": 1.6691306063588583e-05,
"loss": 0.9591,
"step": 2000
},
{
"epoch": 1.0,
"grad_norm": 3.7766757011413574,
"learning_rate": 1.5000000000000002e-05,
"loss": 0.9381,
"step": 2500
},
{
"epoch": 1.0,
"eval_loss": 0.937586784362793,
"eval_runtime": 43.9207,
"eval_samples_per_second": 45.537,
"eval_steps_per_second": 11.384,
"step": 2500
},
{
"epoch": 1.2,
"grad_norm": 4.477228164672852,
"learning_rate": 1.3090169943749475e-05,
"loss": 0.4559,
"step": 3000
},
{
"epoch": 1.4,
"grad_norm": 4.878340721130371,
"learning_rate": 1.1045284632676535e-05,
"loss": 0.4521,
"step": 3500
},
{
"epoch": 1.6,
"grad_norm": 3.9909627437591553,
"learning_rate": 8.954715367323468e-06,
"loss": 0.4468,
"step": 4000
},
{
"epoch": 1.8,
"grad_norm": 2.667670726776123,
"learning_rate": 6.909830056250527e-06,
"loss": 0.4321,
"step": 4500
},
{
"epoch": 2.0,
"grad_norm": 4.369731903076172,
"learning_rate": 5.000000000000003e-06,
"loss": 0.4124,
"step": 5000
},
{
"epoch": 2.0,
"eval_loss": 1.0478131771087646,
"eval_runtime": 41.0932,
"eval_samples_per_second": 48.67,
"eval_steps_per_second": 12.167,
"step": 5000
},
{
"epoch": 2.2,
"grad_norm": 2.2593352794647217,
"learning_rate": 3.308693936411421e-06,
"loss": 0.1657,
"step": 5500
},
{
"epoch": 2.4,
"grad_norm": 2.3517982959747314,
"learning_rate": 1.9098300562505266e-06,
"loss": 0.1616,
"step": 6000
},
{
"epoch": 2.6,
"grad_norm": 2.2167694568634033,
"learning_rate": 8.645454235739903e-07,
"loss": 0.153,
"step": 6500
},
{
"epoch": 2.8,
"grad_norm": 2.2378008365631104,
"learning_rate": 2.1852399266194312e-07,
"loss": 0.1521,
"step": 7000
},
{
"epoch": 3.0,
"grad_norm": 1.863773226737976,
"learning_rate": 0.0,
"loss": 0.1515,
"step": 7500
},
{
"epoch": 3.0,
"eval_loss": 1.4480849504470825,
"eval_runtime": 41.4492,
"eval_samples_per_second": 48.252,
"eval_steps_per_second": 12.063,
"step": 7500
},
{
"epoch": 3.0,
"step": 7500,
"total_flos": 2.3924465601190298e+17,
"train_loss": 0.5169153635660807,
"train_runtime": 5042.7608,
"train_samples_per_second": 5.949,
"train_steps_per_second": 1.487
}
],
"logging_steps": 500,
"max_steps": 7500,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.3924465601190298e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}