File size: 4,303 Bytes
91a7972 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 |
{
"best_global_step": 1500,
"best_metric": 0.33901429176330566,
"best_model_checkpoint": "./stt/checkpoint-1500",
"epoch": 1.854140914709518,
"eval_steps": 500,
"global_step": 1500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.12360939431396786,
"grad_norm": 10.66793155670166,
"learning_rate": 9.999173944549022e-06,
"loss": 1.6423,
"step": 100
},
{
"epoch": 0.24721878862793573,
"grad_norm": 8.940858840942383,
"learning_rate": 9.991680624320292e-06,
"loss": 0.5818,
"step": 200
},
{
"epoch": 0.37082818294190356,
"grad_norm": 7.630367755889893,
"learning_rate": 9.976393387344834e-06,
"loss": 0.4997,
"step": 300
},
{
"epoch": 0.49443757725587145,
"grad_norm": 8.54523754119873,
"learning_rate": 9.953336102828482e-06,
"loss": 0.4622,
"step": 400
},
{
"epoch": 0.6180469715698393,
"grad_norm": 8.749971389770508,
"learning_rate": 9.922544771983945e-06,
"loss": 0.4135,
"step": 500
},
{
"epoch": 0.6180469715698393,
"eval_loss": 0.4068942070007324,
"eval_runtime": 1305.4198,
"eval_samples_per_second": 2.477,
"eval_steps_per_second": 0.156,
"eval_wer": 21.631878557874764,
"eval_wer_ortho": 29.911516425399192,
"step": 500
},
{
"epoch": 0.7416563658838071,
"grad_norm": 7.349922180175781,
"learning_rate": 9.884067471819184e-06,
"loss": 0.3882,
"step": 600
},
{
"epoch": 0.865265760197775,
"grad_norm": 8.007939338684082,
"learning_rate": 9.837964280070878e-06,
"loss": 0.3613,
"step": 700
},
{
"epoch": 0.9888751545117429,
"grad_norm": 7.204431056976318,
"learning_rate": 9.784307181400159e-06,
"loss": 0.3561,
"step": 800
},
{
"epoch": 1.1124845488257107,
"grad_norm": 6.045726776123047,
"learning_rate": 9.723179954997127e-06,
"loss": 0.2156,
"step": 900
},
{
"epoch": 1.2360939431396787,
"grad_norm": 5.328393459320068,
"learning_rate": 9.654678043769566e-06,
"loss": 0.2036,
"step": 1000
},
{
"epoch": 1.2360939431396787,
"eval_loss": 0.35839423537254333,
"eval_runtime": 1301.6256,
"eval_samples_per_second": 2.484,
"eval_steps_per_second": 0.156,
"eval_wer": 18.355245323936025,
"eval_wer_ortho": 25.873817676373868,
"step": 1000
},
{
"epoch": 1.3597033374536465,
"grad_norm": 6.864717960357666,
"learning_rate": 9.578908405320196e-06,
"loss": 0.2019,
"step": 1100
},
{
"epoch": 1.4833127317676142,
"grad_norm": 5.546298027038574,
"learning_rate": 9.495989344945056e-06,
"loss": 0.211,
"step": 1200
},
{
"epoch": 1.6069221260815822,
"grad_norm": 4.569760322570801,
"learning_rate": 9.406050330913839e-06,
"loss": 0.196,
"step": 1300
},
{
"epoch": 1.73053152039555,
"grad_norm": 6.44041109085083,
"learning_rate": 9.309231792320548e-06,
"loss": 0.2003,
"step": 1400
},
{
"epoch": 1.854140914709518,
"grad_norm": 6.337347507476807,
"learning_rate": 9.205684899820146e-06,
"loss": 0.1899,
"step": 1500
},
{
"epoch": 1.854140914709518,
"eval_loss": 0.33901429176330566,
"eval_runtime": 1299.601,
"eval_samples_per_second": 2.488,
"eval_steps_per_second": 0.156,
"eval_wer": 16.481431282190297,
"eval_wer_ortho": 24.09397565854155,
"step": 1500
}
],
"logging_steps": 100,
"max_steps": 8000,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.448022931177472e+19,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}
|