csikasote commited on
Commit
ca6af2f
·
verified ·
1 Parent(s): 7e849c3

End of training

Browse files
README.md CHANGED
@@ -3,6 +3,9 @@ library_name: transformers
3
  license: cc-by-nc-4.0
4
  base_model: mms-meta/mms-zeroshot-300m
5
  tags:
 
 
 
6
  - generated_from_trainer
7
  metrics:
8
  - wer
@@ -16,10 +19,10 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # mms-zeroshot-300m-bembaspeech-model
18
 
19
- This model is a fine-tuned version of [mms-meta/mms-zeroshot-300m](https://huggingface.co/mms-meta/mms-zeroshot-300m) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
- - Loss: 0.2050
22
- - Wer: 0.3964
23
 
24
  ## Model description
25
 
 
3
  license: cc-by-nc-4.0
4
  base_model: mms-meta/mms-zeroshot-300m
5
  tags:
6
+ - automatic-speech-recognition
7
+ - BembaSpeech
8
+ - mms
9
  - generated_from_trainer
10
  metrics:
11
  - wer
 
19
 
20
  # mms-zeroshot-300m-bembaspeech-model
21
 
22
+ This model is a fine-tuned version of [mms-meta/mms-zeroshot-300m](https://huggingface.co/mms-meta/mms-zeroshot-300m) on the BEMBASPEECH - BEM dataset.
23
  It achieves the following results on the evaluation set:
24
+ - Loss: 0.2038
25
+ - Wer: 0.4007
26
 
27
  ## Model description
28
 
adapter.bem.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93d7a20e47045ab4c6aedf559296976f43295040ca31afe23bdcf6a3955bb349
3
+ size 3586804
all_results.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.6542515811665495,
3
+ "eval_loss": 0.20377855002880096,
4
+ "eval_runtime": 82.3088,
5
+ "eval_samples": 1492,
6
+ "eval_samples_per_second": 18.127,
7
+ "eval_steps_per_second": 2.272,
8
+ "eval_wer": 0.40072670646249675,
9
+ "total_flos": 8.128487028902814e+18,
10
+ "train_loss": 0.7083046766427847,
11
+ "train_runtime": 5849.6566,
12
+ "train_samples": 11377,
13
+ "train_samples_per_second": 58.347,
14
+ "train_steps_per_second": 7.298
15
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.6542515811665495,
3
+ "eval_loss": 0.20377855002880096,
4
+ "eval_runtime": 82.3088,
5
+ "eval_samples": 1492,
6
+ "eval_samples_per_second": 18.127,
7
+ "eval_steps_per_second": 2.272,
8
+ "eval_wer": 0.40072670646249675
9
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.6542515811665495,
3
+ "total_flos": 8.128487028902814e+18,
4
+ "train_loss": 0.7083046766427847,
5
+ "train_runtime": 5849.6566,
6
+ "train_samples": 11377,
7
+ "train_samples_per_second": 58.347,
8
+ "train_steps_per_second": 7.298
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,355 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.20377855002880096,
3
+ "best_model_checkpoint": "/scratch/skscla001/results/mms-zeroshot-300m-bembaspeech-model/checkpoint-4600",
4
+ "epoch": 3.6542515811665495,
5
+ "eval_steps": 200,
6
+ "global_step": 5200,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.14054813773717498,
13
+ "eval_loss": 2.2802751064300537,
14
+ "eval_runtime": 82.1473,
15
+ "eval_samples_per_second": 18.162,
16
+ "eval_steps_per_second": 2.276,
17
+ "eval_wer": 1.0,
18
+ "step": 200
19
+ },
20
+ {
21
+ "epoch": 0.28109627547434995,
22
+ "eval_loss": 0.2944619655609131,
23
+ "eval_runtime": 82.5798,
24
+ "eval_samples_per_second": 18.067,
25
+ "eval_steps_per_second": 2.264,
26
+ "eval_wer": 0.5100787265334371,
27
+ "step": 400
28
+ },
29
+ {
30
+ "epoch": 0.35137034434293746,
31
+ "grad_norm": 0.8652539849281311,
32
+ "learning_rate": 0.00029720356891289033,
33
+ "loss": 2.741,
34
+ "step": 500
35
+ },
36
+ {
37
+ "epoch": 0.42164441321152496,
38
+ "eval_loss": 0.2641391158103943,
39
+ "eval_runtime": 81.2987,
40
+ "eval_samples_per_second": 18.352,
41
+ "eval_steps_per_second": 2.3,
42
+ "eval_wer": 0.4834328229085561,
43
+ "step": 600
44
+ },
45
+ {
46
+ "epoch": 0.5621925509486999,
47
+ "eval_loss": 0.26114964485168457,
48
+ "eval_runtime": 81.2645,
49
+ "eval_samples_per_second": 18.36,
50
+ "eval_steps_per_second": 2.301,
51
+ "eval_wer": 0.47434899212734666,
52
+ "step": 800
53
+ },
54
+ {
55
+ "epoch": 0.7027406886858749,
56
+ "grad_norm": 1.3384147882461548,
57
+ "learning_rate": 0.00029368161540267664,
58
+ "loss": 0.5962,
59
+ "step": 1000
60
+ },
61
+ {
62
+ "epoch": 0.7027406886858749,
63
+ "eval_loss": 0.2608044445514679,
64
+ "eval_runtime": 81.9453,
65
+ "eval_samples_per_second": 18.207,
66
+ "eval_steps_per_second": 2.282,
67
+ "eval_wer": 0.4830867722121291,
68
+ "step": 1000
69
+ },
70
+ {
71
+ "epoch": 0.8432888264230499,
72
+ "eval_loss": 0.24766217172145844,
73
+ "eval_runtime": 81.5485,
74
+ "eval_samples_per_second": 18.296,
75
+ "eval_steps_per_second": 2.293,
76
+ "eval_wer": 0.4562678432390345,
77
+ "step": 1200
78
+ },
79
+ {
80
+ "epoch": 0.9838369641602249,
81
+ "eval_loss": 0.24071051180362701,
82
+ "eval_runtime": 81.0412,
83
+ "eval_samples_per_second": 18.41,
84
+ "eval_steps_per_second": 2.307,
85
+ "eval_wer": 0.4567004066095683,
86
+ "step": 1400
87
+ },
88
+ {
89
+ "epoch": 1.0541110330288124,
90
+ "grad_norm": 0.5781381130218506,
91
+ "learning_rate": 0.000290159661892463,
92
+ "loss": 0.536,
93
+ "step": 1500
94
+ },
95
+ {
96
+ "epoch": 1.1243851018973998,
97
+ "eval_loss": 0.23427943885326385,
98
+ "eval_runtime": 82.3276,
99
+ "eval_samples_per_second": 18.123,
100
+ "eval_steps_per_second": 2.271,
101
+ "eval_wer": 0.44121463794445887,
102
+ "step": 1600
103
+ },
104
+ {
105
+ "epoch": 1.264933239634575,
106
+ "eval_loss": 0.23070427775382996,
107
+ "eval_runtime": 82.28,
108
+ "eval_samples_per_second": 18.133,
109
+ "eval_steps_per_second": 2.273,
110
+ "eval_wer": 0.44225279003373996,
111
+ "step": 1800
112
+ },
113
+ {
114
+ "epoch": 1.4054813773717498,
115
+ "grad_norm": 0.5301165580749512,
116
+ "learning_rate": 0.0002866377083822493,
117
+ "loss": 0.5221,
118
+ "step": 2000
119
+ },
120
+ {
121
+ "epoch": 1.4054813773717498,
122
+ "eval_loss": 0.22523300349712372,
123
+ "eval_runtime": 81.337,
124
+ "eval_samples_per_second": 18.343,
125
+ "eval_steps_per_second": 2.299,
126
+ "eval_wer": 0.4348127000605589,
127
+ "step": 2000
128
+ },
129
+ {
130
+ "epoch": 1.5460295151089247,
131
+ "eval_loss": 0.22277304530143738,
132
+ "eval_runtime": 81.5619,
133
+ "eval_samples_per_second": 18.293,
134
+ "eval_steps_per_second": 2.293,
135
+ "eval_wer": 0.4325633705337832,
136
+ "step": 2200
137
+ },
138
+ {
139
+ "epoch": 1.6865776528460998,
140
+ "eval_loss": 0.21623817086219788,
141
+ "eval_runtime": 81.5646,
142
+ "eval_samples_per_second": 18.292,
143
+ "eval_steps_per_second": 2.293,
144
+ "eval_wer": 0.4252963059088156,
145
+ "step": 2400
146
+ },
147
+ {
148
+ "epoch": 1.7568517217146873,
149
+ "grad_norm": 0.5294317007064819,
150
+ "learning_rate": 0.00028311575487203566,
151
+ "loss": 0.5027,
152
+ "step": 2500
153
+ },
154
+ {
155
+ "epoch": 1.8271257905832747,
156
+ "eval_loss": 0.22000491619110107,
157
+ "eval_runtime": 81.31,
158
+ "eval_samples_per_second": 18.35,
159
+ "eval_steps_per_second": 2.3,
160
+ "eval_wer": 0.4188078553508089,
161
+ "step": 2600
162
+ },
163
+ {
164
+ "epoch": 1.9676739283204498,
165
+ "eval_loss": 0.2131248265504837,
166
+ "eval_runtime": 81.3644,
167
+ "eval_samples_per_second": 18.337,
168
+ "eval_steps_per_second": 2.298,
169
+ "eval_wer": 0.4142226836231508,
170
+ "step": 2800
171
+ },
172
+ {
173
+ "epoch": 2.1082220660576247,
174
+ "grad_norm": 0.3984699547290802,
175
+ "learning_rate": 0.000279593801361822,
176
+ "loss": 0.4818,
177
+ "step": 3000
178
+ },
179
+ {
180
+ "epoch": 2.1082220660576247,
181
+ "eval_loss": 0.22807464003562927,
182
+ "eval_runtime": 81.2983,
183
+ "eval_samples_per_second": 18.352,
184
+ "eval_steps_per_second": 2.3,
185
+ "eval_wer": 0.42806471148023184,
186
+ "step": 3000
187
+ },
188
+ {
189
+ "epoch": 2.2487702037947996,
190
+ "eval_loss": 0.21783553063869476,
191
+ "eval_runtime": 81.2747,
192
+ "eval_samples_per_second": 18.357,
193
+ "eval_steps_per_second": 2.301,
194
+ "eval_wer": 0.41465524699368456,
195
+ "step": 3200
196
+ },
197
+ {
198
+ "epoch": 2.3893183415319745,
199
+ "eval_loss": 0.2122805118560791,
200
+ "eval_runtime": 81.1791,
201
+ "eval_samples_per_second": 18.379,
202
+ "eval_steps_per_second": 2.304,
203
+ "eval_wer": 0.41552037373475215,
204
+ "step": 3400
205
+ },
206
+ {
207
+ "epoch": 2.459592410400562,
208
+ "grad_norm": 0.6205651760101318,
209
+ "learning_rate": 0.00027607184785160833,
210
+ "loss": 0.4619,
211
+ "step": 3500
212
+ },
213
+ {
214
+ "epoch": 2.52986647926915,
215
+ "eval_loss": 0.21417662501335144,
216
+ "eval_runtime": 81.5176,
217
+ "eval_samples_per_second": 18.303,
218
+ "eval_steps_per_second": 2.294,
219
+ "eval_wer": 0.40790725841335757,
220
+ "step": 3600
221
+ },
222
+ {
223
+ "epoch": 2.6704146170063248,
224
+ "eval_loss": 0.21559232473373413,
225
+ "eval_runtime": 81.1016,
226
+ "eval_samples_per_second": 18.397,
227
+ "eval_steps_per_second": 2.306,
228
+ "eval_wer": 0.40107275715892376,
229
+ "step": 3800
230
+ },
231
+ {
232
+ "epoch": 2.8109627547434997,
233
+ "grad_norm": 0.5996519923210144,
234
+ "learning_rate": 0.0002725498943413947,
235
+ "loss": 0.464,
236
+ "step": 4000
237
+ },
238
+ {
239
+ "epoch": 2.8109627547434997,
240
+ "eval_loss": 0.2071918100118637,
241
+ "eval_runtime": 82.6744,
242
+ "eval_samples_per_second": 18.047,
243
+ "eval_steps_per_second": 2.262,
244
+ "eval_wer": 0.40089973181071026,
245
+ "step": 4000
246
+ },
247
+ {
248
+ "epoch": 2.9515108924806746,
249
+ "eval_loss": 0.21284320950508118,
250
+ "eval_runtime": 81.7324,
251
+ "eval_samples_per_second": 18.255,
252
+ "eval_steps_per_second": 2.288,
253
+ "eval_wer": 0.401332295181244,
254
+ "step": 4200
255
+ },
256
+ {
257
+ "epoch": 3.0920590302178494,
258
+ "eval_loss": 0.20559127628803253,
259
+ "eval_runtime": 81.3555,
260
+ "eval_samples_per_second": 18.339,
261
+ "eval_steps_per_second": 2.299,
262
+ "eval_wer": 0.3982178389134008,
263
+ "step": 4400
264
+ },
265
+ {
266
+ "epoch": 3.162333099086437,
267
+ "grad_norm": 0.6319628357887268,
268
+ "learning_rate": 0.000269027940831181,
269
+ "loss": 0.4464,
270
+ "step": 4500
271
+ },
272
+ {
273
+ "epoch": 3.232607167955025,
274
+ "eval_loss": 0.20377855002880096,
275
+ "eval_runtime": 82.1978,
276
+ "eval_samples_per_second": 18.151,
277
+ "eval_steps_per_second": 2.275,
278
+ "eval_wer": 0.40072670646249675,
279
+ "step": 4600
280
+ },
281
+ {
282
+ "epoch": 3.3731553056921997,
283
+ "eval_loss": 0.20886844396591187,
284
+ "eval_runtime": 81.607,
285
+ "eval_samples_per_second": 18.283,
286
+ "eval_steps_per_second": 2.291,
287
+ "eval_wer": 0.39873691495804137,
288
+ "step": 4800
289
+ },
290
+ {
291
+ "epoch": 3.5137034434293746,
292
+ "grad_norm": 0.6382594704627991,
293
+ "learning_rate": 0.0002655059873209673,
294
+ "loss": 0.4418,
295
+ "step": 5000
296
+ },
297
+ {
298
+ "epoch": 3.5137034434293746,
299
+ "eval_loss": 0.20426709949970245,
300
+ "eval_runtime": 81.6377,
301
+ "eval_samples_per_second": 18.276,
302
+ "eval_steps_per_second": 2.291,
303
+ "eval_wer": 0.40089973181071026,
304
+ "step": 5000
305
+ },
306
+ {
307
+ "epoch": 3.6542515811665495,
308
+ "eval_loss": 0.2049737125635147,
309
+ "eval_runtime": 81.6534,
310
+ "eval_samples_per_second": 18.272,
311
+ "eval_steps_per_second": 2.29,
312
+ "eval_wer": 0.3964010727571589,
313
+ "step": 5200
314
+ },
315
+ {
316
+ "epoch": 3.6542515811665495,
317
+ "step": 5200,
318
+ "total_flos": 8.128487028902814e+18,
319
+ "train_loss": 0.7083046766427847,
320
+ "train_runtime": 5849.6566,
321
+ "train_samples_per_second": 58.347,
322
+ "train_steps_per_second": 7.298
323
+ }
324
+ ],
325
+ "logging_steps": 500,
326
+ "max_steps": 42690,
327
+ "num_input_tokens_seen": 0,
328
+ "num_train_epochs": 30,
329
+ "save_steps": 200,
330
+ "stateful_callbacks": {
331
+ "EarlyStoppingCallback": {
332
+ "args": {
333
+ "early_stopping_patience": 3,
334
+ "early_stopping_threshold": 0.0
335
+ },
336
+ "attributes": {
337
+ "early_stopping_patience_counter": 3
338
+ }
339
+ },
340
+ "TrainerControl": {
341
+ "args": {
342
+ "should_epoch_stop": false,
343
+ "should_evaluate": false,
344
+ "should_log": false,
345
+ "should_save": true,
346
+ "should_training_stop": true
347
+ },
348
+ "attributes": {}
349
+ }
350
+ },
351
+ "total_flos": 8.128487028902814e+18,
352
+ "train_batch_size": 8,
353
+ "trial_name": null,
354
+ "trial_params": null
355
+ }