csikasote commited on
Commit
bd058a5
·
verified ·
1 Parent(s): 5c6d489

End of training

Browse files
README.md CHANGED
@@ -4,11 +4,23 @@ license: apache-2.0
4
  base_model: openai/whisper-medium
5
  tags:
6
  - generated_from_trainer
 
 
7
  metrics:
8
  - wer
9
  model-index:
10
  - name: whisper-medium-bemgen-100f50m-model
11
- results: []
 
 
 
 
 
 
 
 
 
 
12
  ---
13
 
14
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -16,10 +28,10 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # whisper-medium-bemgen-100f50m-model
18
 
19
- This model is a fine-tuned version of [openai/whisper-medium](https://huggingface.co/openai/whisper-medium) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
- - Loss: 0.4367
22
- - Wer: 0.3405
23
 
24
  ## Model description
25
 
 
4
  base_model: openai/whisper-medium
5
  tags:
6
  - generated_from_trainer
7
+ datasets:
8
+ - bemgen
9
  metrics:
10
  - wer
11
  model-index:
12
  - name: whisper-medium-bemgen-100f50m-model
13
+ results:
14
+ - task:
15
+ name: Automatic Speech Recognition
16
+ type: automatic-speech-recognition
17
+ dataset:
18
+ name: bemgen
19
+ type: bemgen
20
+ metrics:
21
+ - name: Wer
22
+ type: wer
23
+ value: 0.38982326111744586
24
  ---
25
 
26
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
28
 
29
  # whisper-medium-bemgen-100f50m-model
30
 
31
+ This model is a fine-tuned version of [openai/whisper-medium](https://huggingface.co/openai/whisper-medium) on the bemgen dataset.
32
  It achieves the following results on the evaluation set:
33
+ - Loss: 0.4354
34
+ - Wer: 0.3898
35
 
36
  ## Model description
37
 
all_results.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.6391548365797295,
3
+ "eval_loss": 0.4354272186756134,
4
+ "eval_runtime": 430.7377,
5
+ "eval_samples": 758,
6
+ "eval_samples_per_second": 1.76,
7
+ "eval_steps_per_second": 0.88,
8
+ "eval_wer": 0.38982326111744586,
9
+ "total_flos": 1.631539005161472e+19,
10
+ "train_loss": 1.9525959782600404,
11
+ "train_runtime": 8300.7195,
12
+ "train_samples": 6057,
13
+ "train_samples_per_second": 4.819,
14
+ "train_steps_per_second": 0.602
15
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.6391548365797295,
3
+ "eval_loss": 0.4354272186756134,
4
+ "eval_runtime": 430.7377,
5
+ "eval_samples": 758,
6
+ "eval_samples_per_second": 1.76,
7
+ "eval_steps_per_second": 0.88,
8
+ "eval_wer": 0.38982326111744586
9
+ }
runs/Feb09_22-40-00_srvrocgpu011.uct.ac.za/events.out.tfevents.1739142666.srvrocgpu011.uct.ac.za ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a22e27e8bad18174cd13f00f691383c6768d40f2f07181c7c9d08436633f30dc
3
+ size 40
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.6391548365797295,
3
+ "total_flos": 1.631539005161472e+19,
4
+ "train_loss": 1.9525959782600404,
5
+ "train_runtime": 8300.7195,
6
+ "train_samples": 6057,
7
+ "train_samples_per_second": 4.819,
8
+ "train_steps_per_second": 0.602
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,701 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.4354272186756134,
3
+ "best_model_checkpoint": "/scratch/skscla001/speech/results/whisper-medium-bemgen-100f50m-model/checkpoint-1400",
4
+ "epoch": 2.6391548365797295,
5
+ "eval_steps": 200,
6
+ "global_step": 2000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.03301419610432486,
13
+ "grad_norm": 117.47059631347656,
14
+ "learning_rate": 3.8e-07,
15
+ "loss": 10.9949,
16
+ "step": 25
17
+ },
18
+ {
19
+ "epoch": 0.06602839220864971,
20
+ "grad_norm": 102.7651138305664,
21
+ "learning_rate": 8.8e-07,
22
+ "loss": 9.4482,
23
+ "step": 50
24
+ },
25
+ {
26
+ "epoch": 0.09904258831297458,
27
+ "grad_norm": 64.81011199951172,
28
+ "learning_rate": 1.3800000000000001e-06,
29
+ "loss": 7.2641,
30
+ "step": 75
31
+ },
32
+ {
33
+ "epoch": 0.13205678441729943,
34
+ "grad_norm": 81.74808502197266,
35
+ "learning_rate": 1.8800000000000002e-06,
36
+ "loss": 5.5236,
37
+ "step": 100
38
+ },
39
+ {
40
+ "epoch": 0.1650709805216243,
41
+ "grad_norm": 93.43115234375,
42
+ "learning_rate": 2.38e-06,
43
+ "loss": 4.7369,
44
+ "step": 125
45
+ },
46
+ {
47
+ "epoch": 0.19808517662594916,
48
+ "grad_norm": 89.20719909667969,
49
+ "learning_rate": 2.88e-06,
50
+ "loss": 3.8232,
51
+ "step": 150
52
+ },
53
+ {
54
+ "epoch": 0.231099372730274,
55
+ "grad_norm": 64.15678405761719,
56
+ "learning_rate": 3.3800000000000007e-06,
57
+ "loss": 3.806,
58
+ "step": 175
59
+ },
60
+ {
61
+ "epoch": 0.26411356883459886,
62
+ "grad_norm": 64.91326904296875,
63
+ "learning_rate": 3.88e-06,
64
+ "loss": 3.1512,
65
+ "step": 200
66
+ },
67
+ {
68
+ "epoch": 0.26411356883459886,
69
+ "eval_loss": 0.8282185792922974,
70
+ "eval_runtime": 423.1335,
71
+ "eval_samples_per_second": 1.791,
72
+ "eval_steps_per_second": 0.896,
73
+ "eval_wer": 0.6533637400228051,
74
+ "step": 200
75
+ },
76
+ {
77
+ "epoch": 0.29712776493892373,
78
+ "grad_norm": 75.13513946533203,
79
+ "learning_rate": 4.38e-06,
80
+ "loss": 3.3773,
81
+ "step": 225
82
+ },
83
+ {
84
+ "epoch": 0.3301419610432486,
85
+ "grad_norm": 62.38654708862305,
86
+ "learning_rate": 4.880000000000001e-06,
87
+ "loss": 3.3841,
88
+ "step": 250
89
+ },
90
+ {
91
+ "epoch": 0.36315615714757343,
92
+ "grad_norm": 49.35546875,
93
+ "learning_rate": 5.380000000000001e-06,
94
+ "loss": 2.8367,
95
+ "step": 275
96
+ },
97
+ {
98
+ "epoch": 0.3961703532518983,
99
+ "grad_norm": 77.38804626464844,
100
+ "learning_rate": 5.8800000000000005e-06,
101
+ "loss": 2.9174,
102
+ "step": 300
103
+ },
104
+ {
105
+ "epoch": 0.4291845493562232,
106
+ "grad_norm": 78.50695037841797,
107
+ "learning_rate": 6.380000000000001e-06,
108
+ "loss": 2.6359,
109
+ "step": 325
110
+ },
111
+ {
112
+ "epoch": 0.462198745460548,
113
+ "grad_norm": 55.40019607543945,
114
+ "learning_rate": 6.88e-06,
115
+ "loss": 2.4471,
116
+ "step": 350
117
+ },
118
+ {
119
+ "epoch": 0.4952129415648729,
120
+ "grad_norm": 55.860809326171875,
121
+ "learning_rate": 7.3800000000000005e-06,
122
+ "loss": 2.6426,
123
+ "step": 375
124
+ },
125
+ {
126
+ "epoch": 0.5282271376691977,
127
+ "grad_norm": 51.24520492553711,
128
+ "learning_rate": 7.88e-06,
129
+ "loss": 2.4008,
130
+ "step": 400
131
+ },
132
+ {
133
+ "epoch": 0.5282271376691977,
134
+ "eval_loss": 0.6202582716941833,
135
+ "eval_runtime": 422.1868,
136
+ "eval_samples_per_second": 1.795,
137
+ "eval_steps_per_second": 0.898,
138
+ "eval_wer": 0.5096921322690992,
139
+ "step": 400
140
+ },
141
+ {
142
+ "epoch": 0.5612413337735226,
143
+ "grad_norm": 70.62708282470703,
144
+ "learning_rate": 8.380000000000001e-06,
145
+ "loss": 2.4166,
146
+ "step": 425
147
+ },
148
+ {
149
+ "epoch": 0.5942555298778475,
150
+ "grad_norm": 54.51411437988281,
151
+ "learning_rate": 8.880000000000001e-06,
152
+ "loss": 2.7424,
153
+ "step": 450
154
+ },
155
+ {
156
+ "epoch": 0.6272697259821723,
157
+ "grad_norm": 51.03986740112305,
158
+ "learning_rate": 9.38e-06,
159
+ "loss": 2.2441,
160
+ "step": 475
161
+ },
162
+ {
163
+ "epoch": 0.6602839220864972,
164
+ "grad_norm": 64.73434448242188,
165
+ "learning_rate": 9.88e-06,
166
+ "loss": 2.1702,
167
+ "step": 500
168
+ },
169
+ {
170
+ "epoch": 0.6932981181908221,
171
+ "grad_norm": 58.62995147705078,
172
+ "learning_rate": 9.957777777777779e-06,
173
+ "loss": 2.3904,
174
+ "step": 525
175
+ },
176
+ {
177
+ "epoch": 0.7263123142951469,
178
+ "grad_norm": 61.52521514892578,
179
+ "learning_rate": 9.902222222222223e-06,
180
+ "loss": 2.2456,
181
+ "step": 550
182
+ },
183
+ {
184
+ "epoch": 0.7593265103994717,
185
+ "grad_norm": 45.40699768066406,
186
+ "learning_rate": 9.846666666666668e-06,
187
+ "loss": 2.3398,
188
+ "step": 575
189
+ },
190
+ {
191
+ "epoch": 0.7923407065037966,
192
+ "grad_norm": 52.031795501708984,
193
+ "learning_rate": 9.791111111111112e-06,
194
+ "loss": 2.2459,
195
+ "step": 600
196
+ },
197
+ {
198
+ "epoch": 0.7923407065037966,
199
+ "eval_loss": 0.5506373643875122,
200
+ "eval_runtime": 424.3675,
201
+ "eval_samples_per_second": 1.786,
202
+ "eval_steps_per_second": 0.893,
203
+ "eval_wer": 0.4643671607753706,
204
+ "step": 600
205
+ },
206
+ {
207
+ "epoch": 0.8253549026081215,
208
+ "grad_norm": 51.51828384399414,
209
+ "learning_rate": 9.735555555555556e-06,
210
+ "loss": 2.1322,
211
+ "step": 625
212
+ },
213
+ {
214
+ "epoch": 0.8583690987124464,
215
+ "grad_norm": 47.97977066040039,
216
+ "learning_rate": 9.68e-06,
217
+ "loss": 2.3046,
218
+ "step": 650
219
+ },
220
+ {
221
+ "epoch": 0.8913832948167713,
222
+ "grad_norm": 47.63246154785156,
223
+ "learning_rate": 9.624444444444445e-06,
224
+ "loss": 2.0285,
225
+ "step": 675
226
+ },
227
+ {
228
+ "epoch": 0.924397490921096,
229
+ "grad_norm": 36.78068161010742,
230
+ "learning_rate": 9.56888888888889e-06,
231
+ "loss": 1.9158,
232
+ "step": 700
233
+ },
234
+ {
235
+ "epoch": 0.9574116870254209,
236
+ "grad_norm": 57.10271453857422,
237
+ "learning_rate": 9.513333333333334e-06,
238
+ "loss": 2.034,
239
+ "step": 725
240
+ },
241
+ {
242
+ "epoch": 0.9904258831297458,
243
+ "grad_norm": 35.37094497680664,
244
+ "learning_rate": 9.457777777777778e-06,
245
+ "loss": 1.92,
246
+ "step": 750
247
+ },
248
+ {
249
+ "epoch": 1.0224496533509408,
250
+ "grad_norm": 59.17182159423828,
251
+ "learning_rate": 9.402222222222222e-06,
252
+ "loss": 1.5202,
253
+ "step": 775
254
+ },
255
+ {
256
+ "epoch": 1.0554638494552657,
257
+ "grad_norm": 48.809059143066406,
258
+ "learning_rate": 9.346666666666666e-06,
259
+ "loss": 1.3319,
260
+ "step": 800
261
+ },
262
+ {
263
+ "epoch": 1.0554638494552657,
264
+ "eval_loss": 0.5028851628303528,
265
+ "eval_runtime": 423.0433,
266
+ "eval_samples_per_second": 1.792,
267
+ "eval_steps_per_second": 0.896,
268
+ "eval_wer": 0.4016533637400228,
269
+ "step": 800
270
+ },
271
+ {
272
+ "epoch": 1.0884780455595906,
273
+ "grad_norm": 59.115562438964844,
274
+ "learning_rate": 9.291111111111112e-06,
275
+ "loss": 1.2718,
276
+ "step": 825
277
+ },
278
+ {
279
+ "epoch": 1.1214922416639155,
280
+ "grad_norm": 22.62116050720215,
281
+ "learning_rate": 9.235555555555556e-06,
282
+ "loss": 1.4598,
283
+ "step": 850
284
+ },
285
+ {
286
+ "epoch": 1.1545064377682404,
287
+ "grad_norm": 37.58869934082031,
288
+ "learning_rate": 9.180000000000002e-06,
289
+ "loss": 1.33,
290
+ "step": 875
291
+ },
292
+ {
293
+ "epoch": 1.1875206338725652,
294
+ "grad_norm": 41.18476867675781,
295
+ "learning_rate": 9.124444444444444e-06,
296
+ "loss": 1.4802,
297
+ "step": 900
298
+ },
299
+ {
300
+ "epoch": 1.2205348299768901,
301
+ "grad_norm": 34.091915130615234,
302
+ "learning_rate": 9.06888888888889e-06,
303
+ "loss": 1.325,
304
+ "step": 925
305
+ },
306
+ {
307
+ "epoch": 1.253549026081215,
308
+ "grad_norm": 43.94548416137695,
309
+ "learning_rate": 9.013333333333334e-06,
310
+ "loss": 1.3758,
311
+ "step": 950
312
+ },
313
+ {
314
+ "epoch": 1.2865632221855399,
315
+ "grad_norm": 50.9454231262207,
316
+ "learning_rate": 8.957777777777778e-06,
317
+ "loss": 1.4197,
318
+ "step": 975
319
+ },
320
+ {
321
+ "epoch": 1.3195774182898647,
322
+ "grad_norm": 39.400634765625,
323
+ "learning_rate": 8.902222222222224e-06,
324
+ "loss": 1.5588,
325
+ "step": 1000
326
+ },
327
+ {
328
+ "epoch": 1.3195774182898647,
329
+ "eval_loss": 0.4675042927265167,
330
+ "eval_runtime": 426.8283,
331
+ "eval_samples_per_second": 1.776,
332
+ "eval_steps_per_second": 0.888,
333
+ "eval_wer": 0.3896807297605473,
334
+ "step": 1000
335
+ },
336
+ {
337
+ "epoch": 1.3525916143941896,
338
+ "grad_norm": 64.05392456054688,
339
+ "learning_rate": 8.846666666666668e-06,
340
+ "loss": 1.4914,
341
+ "step": 1025
342
+ },
343
+ {
344
+ "epoch": 1.3856058104985143,
345
+ "grad_norm": 38.36800765991211,
346
+ "learning_rate": 8.791111111111112e-06,
347
+ "loss": 1.3193,
348
+ "step": 1050
349
+ },
350
+ {
351
+ "epoch": 1.4186200066028392,
352
+ "grad_norm": 56.90019226074219,
353
+ "learning_rate": 8.735555555555556e-06,
354
+ "loss": 1.3192,
355
+ "step": 1075
356
+ },
357
+ {
358
+ "epoch": 1.451634202707164,
359
+ "grad_norm": 41.730682373046875,
360
+ "learning_rate": 8.68e-06,
361
+ "loss": 1.4852,
362
+ "step": 1100
363
+ },
364
+ {
365
+ "epoch": 1.484648398811489,
366
+ "grad_norm": 37.495849609375,
367
+ "learning_rate": 8.624444444444446e-06,
368
+ "loss": 1.244,
369
+ "step": 1125
370
+ },
371
+ {
372
+ "epoch": 1.5176625949158138,
373
+ "grad_norm": 34.61660385131836,
374
+ "learning_rate": 8.56888888888889e-06,
375
+ "loss": 1.3055,
376
+ "step": 1150
377
+ },
378
+ {
379
+ "epoch": 1.5506767910201387,
380
+ "grad_norm": 32.87508773803711,
381
+ "learning_rate": 8.513333333333335e-06,
382
+ "loss": 1.4434,
383
+ "step": 1175
384
+ },
385
+ {
386
+ "epoch": 1.5836909871244635,
387
+ "grad_norm": 43.57270812988281,
388
+ "learning_rate": 8.457777777777778e-06,
389
+ "loss": 1.2908,
390
+ "step": 1200
391
+ },
392
+ {
393
+ "epoch": 1.5836909871244635,
394
+ "eval_loss": 0.45341047644615173,
395
+ "eval_runtime": 422.1999,
396
+ "eval_samples_per_second": 1.795,
397
+ "eval_steps_per_second": 0.898,
398
+ "eval_wer": 0.3727194982896237,
399
+ "step": 1200
400
+ },
401
+ {
402
+ "epoch": 1.6167051832287884,
403
+ "grad_norm": 46.83863830566406,
404
+ "learning_rate": 8.402222222222223e-06,
405
+ "loss": 1.3848,
406
+ "step": 1225
407
+ },
408
+ {
409
+ "epoch": 1.649719379333113,
410
+ "grad_norm": 46.93707275390625,
411
+ "learning_rate": 8.346666666666668e-06,
412
+ "loss": 1.2536,
413
+ "step": 1250
414
+ },
415
+ {
416
+ "epoch": 1.682733575437438,
417
+ "grad_norm": 44.47553253173828,
418
+ "learning_rate": 8.291111111111112e-06,
419
+ "loss": 1.3358,
420
+ "step": 1275
421
+ },
422
+ {
423
+ "epoch": 1.7157477715417628,
424
+ "grad_norm": 35.15113830566406,
425
+ "learning_rate": 8.235555555555557e-06,
426
+ "loss": 1.1214,
427
+ "step": 1300
428
+ },
429
+ {
430
+ "epoch": 1.7487619676460877,
431
+ "grad_norm": 27.538331985473633,
432
+ "learning_rate": 8.18e-06,
433
+ "loss": 1.097,
434
+ "step": 1325
435
+ },
436
+ {
437
+ "epoch": 1.7817761637504126,
438
+ "grad_norm": 35.05278015136719,
439
+ "learning_rate": 8.124444444444445e-06,
440
+ "loss": 1.1746,
441
+ "step": 1350
442
+ },
443
+ {
444
+ "epoch": 1.8147903598547375,
445
+ "grad_norm": 50.673095703125,
446
+ "learning_rate": 8.06888888888889e-06,
447
+ "loss": 1.397,
448
+ "step": 1375
449
+ },
450
+ {
451
+ "epoch": 1.8478045559590623,
452
+ "grad_norm": 38.40850067138672,
453
+ "learning_rate": 8.013333333333333e-06,
454
+ "loss": 1.4258,
455
+ "step": 1400
456
+ },
457
+ {
458
+ "epoch": 1.8478045559590623,
459
+ "eval_loss": 0.4354272186756134,
460
+ "eval_runtime": 431.6561,
461
+ "eval_samples_per_second": 1.756,
462
+ "eval_steps_per_second": 0.878,
463
+ "eval_wer": 0.38982326111744586,
464
+ "step": 1400
465
+ },
466
+ {
467
+ "epoch": 1.8808187520633872,
468
+ "grad_norm": 27.05866050720215,
469
+ "learning_rate": 7.957777777777779e-06,
470
+ "loss": 1.4042,
471
+ "step": 1425
472
+ },
473
+ {
474
+ "epoch": 1.913832948167712,
475
+ "grad_norm": 36.17206573486328,
476
+ "learning_rate": 7.902222222222223e-06,
477
+ "loss": 1.0702,
478
+ "step": 1450
479
+ },
480
+ {
481
+ "epoch": 1.946847144272037,
482
+ "grad_norm": 40.80569076538086,
483
+ "learning_rate": 7.846666666666667e-06,
484
+ "loss": 1.2552,
485
+ "step": 1475
486
+ },
487
+ {
488
+ "epoch": 1.9798613403763619,
489
+ "grad_norm": 34.74424362182617,
490
+ "learning_rate": 7.791111111111111e-06,
491
+ "loss": 1.349,
492
+ "step": 1500
493
+ },
494
+ {
495
+ "epoch": 2.011885110597557,
496
+ "grad_norm": 18.330013275146484,
497
+ "learning_rate": 7.735555555555557e-06,
498
+ "loss": 1.0107,
499
+ "step": 1525
500
+ },
501
+ {
502
+ "epoch": 2.0448993067018817,
503
+ "grad_norm": 20.316329956054688,
504
+ "learning_rate": 7.680000000000001e-06,
505
+ "loss": 0.6744,
506
+ "step": 1550
507
+ },
508
+ {
509
+ "epoch": 2.0779135028062066,
510
+ "grad_norm": 56.3316764831543,
511
+ "learning_rate": 7.624444444444445e-06,
512
+ "loss": 0.6948,
513
+ "step": 1575
514
+ },
515
+ {
516
+ "epoch": 2.1109276989105314,
517
+ "grad_norm": 28.286052703857422,
518
+ "learning_rate": 7.56888888888889e-06,
519
+ "loss": 0.6383,
520
+ "step": 1600
521
+ },
522
+ {
523
+ "epoch": 2.1109276989105314,
524
+ "eval_loss": 0.44802892208099365,
525
+ "eval_runtime": 429.0328,
526
+ "eval_samples_per_second": 1.767,
527
+ "eval_steps_per_second": 0.883,
528
+ "eval_wer": 0.3600342075256556,
529
+ "step": 1600
530
+ },
531
+ {
532
+ "epoch": 2.1439418950148563,
533
+ "grad_norm": 33.71493148803711,
534
+ "learning_rate": 7.513333333333334e-06,
535
+ "loss": 0.5558,
536
+ "step": 1625
537
+ },
538
+ {
539
+ "epoch": 2.176956091119181,
540
+ "grad_norm": 29.385910034179688,
541
+ "learning_rate": 7.457777777777778e-06,
542
+ "loss": 0.6777,
543
+ "step": 1650
544
+ },
545
+ {
546
+ "epoch": 2.209970287223506,
547
+ "grad_norm": 20.755077362060547,
548
+ "learning_rate": 7.402222222222223e-06,
549
+ "loss": 0.6332,
550
+ "step": 1675
551
+ },
552
+ {
553
+ "epoch": 2.242984483327831,
554
+ "grad_norm": 37.17219543457031,
555
+ "learning_rate": 7.346666666666668e-06,
556
+ "loss": 0.6242,
557
+ "step": 1700
558
+ },
559
+ {
560
+ "epoch": 2.275998679432156,
561
+ "grad_norm": 37.963409423828125,
562
+ "learning_rate": 7.291111111111112e-06,
563
+ "loss": 0.7322,
564
+ "step": 1725
565
+ },
566
+ {
567
+ "epoch": 2.3090128755364807,
568
+ "grad_norm": 25.863115310668945,
569
+ "learning_rate": 7.235555555555556e-06,
570
+ "loss": 0.697,
571
+ "step": 1750
572
+ },
573
+ {
574
+ "epoch": 2.3420270716408056,
575
+ "grad_norm": 25.60062026977539,
576
+ "learning_rate": 7.180000000000001e-06,
577
+ "loss": 0.6266,
578
+ "step": 1775
579
+ },
580
+ {
581
+ "epoch": 2.3750412677451305,
582
+ "grad_norm": 20.695463180541992,
583
+ "learning_rate": 7.124444444444445e-06,
584
+ "loss": 0.6079,
585
+ "step": 1800
586
+ },
587
+ {
588
+ "epoch": 2.3750412677451305,
589
+ "eval_loss": 0.4444292187690735,
590
+ "eval_runtime": 421.917,
591
+ "eval_samples_per_second": 1.797,
592
+ "eval_steps_per_second": 0.898,
593
+ "eval_wer": 0.3482041049030787,
594
+ "step": 1800
595
+ },
596
+ {
597
+ "epoch": 2.4080554638494553,
598
+ "grad_norm": 26.363622665405273,
599
+ "learning_rate": 7.06888888888889e-06,
600
+ "loss": 0.663,
601
+ "step": 1825
602
+ },
603
+ {
604
+ "epoch": 2.4410696599537802,
605
+ "grad_norm": 31.212791442871094,
606
+ "learning_rate": 7.0133333333333345e-06,
607
+ "loss": 0.655,
608
+ "step": 1850
609
+ },
610
+ {
611
+ "epoch": 2.474083856058105,
612
+ "grad_norm": 39.38019561767578,
613
+ "learning_rate": 6.9577777777777785e-06,
614
+ "loss": 0.7364,
615
+ "step": 1875
616
+ },
617
+ {
618
+ "epoch": 2.50709805216243,
619
+ "grad_norm": 26.713680267333984,
620
+ "learning_rate": 6.902222222222223e-06,
621
+ "loss": 0.647,
622
+ "step": 1900
623
+ },
624
+ {
625
+ "epoch": 2.540112248266755,
626
+ "grad_norm": 24.708005905151367,
627
+ "learning_rate": 6.846666666666667e-06,
628
+ "loss": 0.6818,
629
+ "step": 1925
630
+ },
631
+ {
632
+ "epoch": 2.5731264443710797,
633
+ "grad_norm": 22.732967376708984,
634
+ "learning_rate": 6.7911111111111115e-06,
635
+ "loss": 0.6711,
636
+ "step": 1950
637
+ },
638
+ {
639
+ "epoch": 2.606140640475404,
640
+ "grad_norm": 23.73015022277832,
641
+ "learning_rate": 6.735555555555556e-06,
642
+ "loss": 0.6492,
643
+ "step": 1975
644
+ },
645
+ {
646
+ "epoch": 2.6391548365797295,
647
+ "grad_norm": 30.428665161132812,
648
+ "learning_rate": 6.680000000000001e-06,
649
+ "loss": 0.5709,
650
+ "step": 2000
651
+ },
652
+ {
653
+ "epoch": 2.6391548365797295,
654
+ "eval_loss": 0.4367344081401825,
655
+ "eval_runtime": 423.1943,
656
+ "eval_samples_per_second": 1.791,
657
+ "eval_steps_per_second": 0.896,
658
+ "eval_wer": 0.3405074116305587,
659
+ "step": 2000
660
+ },
661
+ {
662
+ "epoch": 2.6391548365797295,
663
+ "step": 2000,
664
+ "total_flos": 1.631539005161472e+19,
665
+ "train_loss": 1.9525959782600404,
666
+ "train_runtime": 8300.7195,
667
+ "train_samples_per_second": 4.819,
668
+ "train_steps_per_second": 0.602
669
+ }
670
+ ],
671
+ "logging_steps": 25,
672
+ "max_steps": 5000,
673
+ "num_input_tokens_seen": 0,
674
+ "num_train_epochs": 7,
675
+ "save_steps": 200,
676
+ "stateful_callbacks": {
677
+ "EarlyStoppingCallback": {
678
+ "args": {
679
+ "early_stopping_patience": 3,
680
+ "early_stopping_threshold": 0.0
681
+ },
682
+ "attributes": {
683
+ "early_stopping_patience_counter": 3
684
+ }
685
+ },
686
+ "TrainerControl": {
687
+ "args": {
688
+ "should_epoch_stop": false,
689
+ "should_evaluate": false,
690
+ "should_log": false,
691
+ "should_save": true,
692
+ "should_training_stop": true
693
+ },
694
+ "attributes": {}
695
+ }
696
+ },
697
+ "total_flos": 1.631539005161472e+19,
698
+ "train_batch_size": 2,
699
+ "trial_name": null,
700
+ "trial_params": null
701
+ }