gemma-2-9b-it-v3 / trainer_state.json
hhyun's picture
Upload folder using huggingface_hub
98d5157 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.896551724137931,
"eval_steps": 500,
"global_step": 21,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.13793103448275862,
"grad_norm": 14.019169682273084,
"learning_rate": 6.666666666666666e-07,
"loss": 1.507,
"step": 1
},
{
"epoch": 0.27586206896551724,
"grad_norm": 14.71228068046855,
"learning_rate": 1.3333333333333332e-06,
"loss": 1.4883,
"step": 2
},
{
"epoch": 0.41379310344827586,
"grad_norm": 14.342453028916033,
"learning_rate": 2e-06,
"loss": 1.4746,
"step": 3
},
{
"epoch": 0.5517241379310345,
"grad_norm": 11.865842851033273,
"learning_rate": 1.984807753012208e-06,
"loss": 1.4302,
"step": 4
},
{
"epoch": 0.6896551724137931,
"grad_norm": 9.644001731592224,
"learning_rate": 1.9396926207859082e-06,
"loss": 1.2784,
"step": 5
},
{
"epoch": 0.8275862068965517,
"grad_norm": 9.22710365972269,
"learning_rate": 1.8660254037844386e-06,
"loss": 1.2302,
"step": 6
},
{
"epoch": 0.9655172413793104,
"grad_norm": 8.622558679008812,
"learning_rate": 1.766044443118978e-06,
"loss": 1.1959,
"step": 7
},
{
"epoch": 1.103448275862069,
"grad_norm": 9.106960231342002,
"learning_rate": 1.6427876096865393e-06,
"loss": 1.0703,
"step": 8
},
{
"epoch": 1.2413793103448276,
"grad_norm": 8.29490409484742,
"learning_rate": 1.5e-06,
"loss": 1.042,
"step": 9
},
{
"epoch": 1.3793103448275863,
"grad_norm": 6.446031536810181,
"learning_rate": 1.3420201433256689e-06,
"loss": 0.9649,
"step": 10
},
{
"epoch": 1.5172413793103448,
"grad_norm": 5.764192061204997,
"learning_rate": 1.1736481776669305e-06,
"loss": 0.869,
"step": 11
},
{
"epoch": 1.6551724137931034,
"grad_norm": 5.6349278706228345,
"learning_rate": 1e-06,
"loss": 0.9707,
"step": 12
},
{
"epoch": 1.793103448275862,
"grad_norm": 4.868404389323644,
"learning_rate": 8.263518223330696e-07,
"loss": 0.9837,
"step": 13
},
{
"epoch": 1.9310344827586206,
"grad_norm": 4.44321806590041,
"learning_rate": 6.579798566743313e-07,
"loss": 0.9148,
"step": 14
},
{
"epoch": 2.0689655172413794,
"grad_norm": 4.338533070335014,
"learning_rate": 5.000000000000002e-07,
"loss": 0.7761,
"step": 15
},
{
"epoch": 2.206896551724138,
"grad_norm": 4.45792051740593,
"learning_rate": 3.5721239031346063e-07,
"loss": 0.8401,
"step": 16
},
{
"epoch": 2.344827586206897,
"grad_norm": 4.512087188956715,
"learning_rate": 2.339555568810221e-07,
"loss": 0.7472,
"step": 17
},
{
"epoch": 2.4827586206896552,
"grad_norm": 4.223326488483186,
"learning_rate": 1.3397459621556128e-07,
"loss": 0.8066,
"step": 18
},
{
"epoch": 2.6206896551724137,
"grad_norm": 4.252642416848704,
"learning_rate": 6.030737921409168e-08,
"loss": 0.8122,
"step": 19
},
{
"epoch": 2.7586206896551726,
"grad_norm": 4.1503783715330105,
"learning_rate": 1.519224698779198e-08,
"loss": 0.8134,
"step": 20
},
{
"epoch": 2.896551724137931,
"grad_norm": 3.741071114000594,
"learning_rate": 0.0,
"loss": 0.7617,
"step": 21
},
{
"epoch": 2.896551724137931,
"step": 21,
"total_flos": 1395378216960.0,
"train_loss": 1.0465347681726729,
"train_runtime": 5265.2341,
"train_samples_per_second": 0.132,
"train_steps_per_second": 0.004
}
],
"logging_steps": 1,
"max_steps": 21,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1395378216960.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}