zuona's picture
End of training
fb7f985 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.888888888888889,
"eval_steps": 500,
"global_step": 42,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.25396825396825395,
"grad_norm": 22.339479446411133,
"learning_rate": 9.523809523809524e-05,
"loss": 72.1042,
"step": 2
},
{
"epoch": 0.5079365079365079,
"grad_norm": 23.86441421508789,
"learning_rate": 9.047619047619048e-05,
"loss": 70.3059,
"step": 4
},
{
"epoch": 0.7619047619047619,
"grad_norm": 27.62677001953125,
"learning_rate": 8.571428571428571e-05,
"loss": 66.5863,
"step": 6
},
{
"epoch": 1.126984126984127,
"grad_norm": 57.27507781982422,
"learning_rate": 8.095238095238096e-05,
"loss": 82.8912,
"step": 8
},
{
"epoch": 1.380952380952381,
"grad_norm": 38.90316390991211,
"learning_rate": 7.619047619047618e-05,
"loss": 57.8037,
"step": 10
},
{
"epoch": 1.6349206349206349,
"grad_norm": 28.1763916015625,
"learning_rate": 7.142857142857143e-05,
"loss": 52.8797,
"step": 12
},
{
"epoch": 1.8888888888888888,
"grad_norm": 30.55934715270996,
"learning_rate": 6.666666666666667e-05,
"loss": 52.8374,
"step": 14
},
{
"epoch": 2.253968253968254,
"grad_norm": 26.450092315673828,
"learning_rate": 6.19047619047619e-05,
"loss": 69.0564,
"step": 16
},
{
"epoch": 2.507936507936508,
"grad_norm": 34.066776275634766,
"learning_rate": 5.714285714285714e-05,
"loss": 46.8763,
"step": 18
},
{
"epoch": 2.761904761904762,
"grad_norm": 24.209619522094727,
"learning_rate": 5.2380952380952384e-05,
"loss": 46.5825,
"step": 20
},
{
"epoch": 3.126984126984127,
"grad_norm": 35.07560729980469,
"learning_rate": 4.761904761904762e-05,
"loss": 62.4426,
"step": 22
},
{
"epoch": 3.380952380952381,
"grad_norm": 14.254531860351562,
"learning_rate": 4.2857142857142856e-05,
"loss": 43.6682,
"step": 24
},
{
"epoch": 3.634920634920635,
"grad_norm": 10.83181095123291,
"learning_rate": 3.809523809523809e-05,
"loss": 44.4259,
"step": 26
},
{
"epoch": 3.888888888888889,
"grad_norm": 15.05122184753418,
"learning_rate": 3.3333333333333335e-05,
"loss": 43.1349,
"step": 28
},
{
"epoch": 4.253968253968254,
"grad_norm": 10.117740631103516,
"learning_rate": 2.857142857142857e-05,
"loss": 58.7522,
"step": 30
},
{
"epoch": 4.507936507936508,
"grad_norm": 12.624483108520508,
"learning_rate": 2.380952380952381e-05,
"loss": 41.6464,
"step": 32
},
{
"epoch": 4.761904761904762,
"grad_norm": 8.7578125,
"learning_rate": 1.9047619047619046e-05,
"loss": 41.2899,
"step": 34
},
{
"epoch": 5.1269841269841265,
"grad_norm": 14.106217384338379,
"learning_rate": 1.4285714285714285e-05,
"loss": 58.7284,
"step": 36
},
{
"epoch": 5.380952380952381,
"grad_norm": 11.060308456420898,
"learning_rate": 9.523809523809523e-06,
"loss": 39.9459,
"step": 38
},
{
"epoch": 5.634920634920634,
"grad_norm": 11.781099319458008,
"learning_rate": 4.7619047619047615e-06,
"loss": 43.8549,
"step": 40
},
{
"epoch": 5.888888888888889,
"grad_norm": 8.562162399291992,
"learning_rate": 0.0,
"loss": 42.2842,
"step": 42
},
{
"epoch": 5.888888888888889,
"step": 42,
"total_flos": 674107298249544.0,
"train_loss": 54.19509578886486,
"train_runtime": 1597.8044,
"train_samples_per_second": 0.473,
"train_steps_per_second": 0.026
}
],
"logging_steps": 2,
"max_steps": 42,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 674107298249544.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}