csikasote commited on
Commit
0352d3a
·
verified ·
1 Parent(s): 91c9023

End of training

Browse files
README.md CHANGED
@@ -2,6 +2,9 @@
2
  license: cc-by-nc-4.0
3
  base_model: facebook/mms-1b-all
4
  tags:
 
 
 
5
  - generated_from_trainer
6
  metrics:
7
  - wer
@@ -16,7 +19,7 @@ should probably proofread and complete it, then remove this comment. -->
16
  [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/cicasote/huggingface/runs/x8tbh9an)
17
  # mms-1b-bem-male-sv
18
 
19
- This model is a fine-tuned version of [facebook/mms-1b-all](https://huggingface.co/facebook/mms-1b-all) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
  - Loss: 0.1409
22
  - Wer: 0.3498
 
2
  license: cc-by-nc-4.0
3
  base_model: facebook/mms-1b-all
4
  tags:
5
+ - automatic-speech-recognition
6
+ - BembaSpeech
7
+ - mms
8
  - generated_from_trainer
9
  metrics:
10
  - wer
 
19
  [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/cicasote/huggingface/runs/x8tbh9an)
20
  # mms-1b-bem-male-sv
21
 
22
+ This model is a fine-tuned version of [facebook/mms-1b-all](https://huggingface.co/facebook/mms-1b-all) on the BEMBASPEECH - BEM dataset.
23
  It achieves the following results on the evaluation set:
24
  - Loss: 0.1409
25
  - Wer: 0.3498
adapter.default.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07f038f7297a4616de47c1ef4e3dbd758e40cebca64631682da8e4d280ef415f
3
+ size 8798532
all_results.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_loss": 0.1408591866493225,
4
+ "eval_runtime": 127.4295,
5
+ "eval_samples": 1017,
6
+ "eval_samples_per_second": 7.981,
7
+ "eval_steps_per_second": 1.004,
8
+ "eval_wer": 0.3498427672955975,
9
+ "total_flos": 2.179329538942206e+19,
10
+ "train_loss": 0.6123242174173547,
11
+ "train_runtime": 13424.2744,
12
+ "train_samples": 7327,
13
+ "train_samples_per_second": 2.729,
14
+ "train_steps_per_second": 0.341
15
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_loss": 0.1408591866493225,
4
+ "eval_runtime": 127.4295,
5
+ "eval_samples": 1017,
6
+ "eval_samples_per_second": 7.981,
7
+ "eval_steps_per_second": 1.004,
8
+ "eval_wer": 0.3498427672955975
9
+ }
runs/Aug31_23-08-11_cd623d704fb0/events.out.tfevents.1725159486.cd623d704fb0.8015.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:440228a6e6849eab25efa6b3ff916e210cec11cd054610e70f8a4a0a467ee336
3
+ size 406
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "total_flos": 2.179329538942206e+19,
4
+ "train_loss": 0.6123242174173547,
5
+ "train_runtime": 13424.2744,
6
+ "train_samples": 7327,
7
+ "train_samples_per_second": 2.729,
8
+ "train_steps_per_second": 0.341
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.1408591866493225,
3
+ "best_model_checkpoint": "./mms-1b-bem-male-sv/checkpoint-4400",
4
+ "epoch": 5.0,
5
+ "eval_steps": 200,
6
+ "global_step": 4580,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.2183406113537118,
13
+ "eval_loss": 0.1927431970834732,
14
+ "eval_runtime": 127.6058,
15
+ "eval_samples_per_second": 7.97,
16
+ "eval_steps_per_second": 1.003,
17
+ "eval_wer": 0.4257075471698113,
18
+ "step": 200
19
+ },
20
+ {
21
+ "epoch": 0.4366812227074236,
22
+ "eval_loss": 0.1712573915719986,
23
+ "eval_runtime": 126.4823,
24
+ "eval_samples_per_second": 8.041,
25
+ "eval_steps_per_second": 1.012,
26
+ "eval_wer": 0.3884958071278826,
27
+ "step": 400
28
+ },
29
+ {
30
+ "epoch": 0.5458515283842795,
31
+ "grad_norm": 1.9703088998794556,
32
+ "learning_rate": 0.000992,
33
+ "loss": 2.0358,
34
+ "step": 500
35
+ },
36
+ {
37
+ "epoch": 0.6550218340611353,
38
+ "eval_loss": 0.17603513598442078,
39
+ "eval_runtime": 127.0067,
40
+ "eval_samples_per_second": 8.007,
41
+ "eval_steps_per_second": 1.008,
42
+ "eval_wer": 0.39072327044025157,
43
+ "step": 600
44
+ },
45
+ {
46
+ "epoch": 0.8733624454148472,
47
+ "eval_loss": 0.18193688988685608,
48
+ "eval_runtime": 126.7253,
49
+ "eval_samples_per_second": 8.025,
50
+ "eval_steps_per_second": 1.01,
51
+ "eval_wer": 0.4143081761006289,
52
+ "step": 800
53
+ },
54
+ {
55
+ "epoch": 1.091703056768559,
56
+ "grad_norm": 4.094836235046387,
57
+ "learning_rate": 0.0008784313725490196,
58
+ "loss": 0.519,
59
+ "step": 1000
60
+ },
61
+ {
62
+ "epoch": 1.091703056768559,
63
+ "eval_loss": 0.16109386086463928,
64
+ "eval_runtime": 127.0612,
65
+ "eval_samples_per_second": 8.004,
66
+ "eval_steps_per_second": 1.007,
67
+ "eval_wer": 0.38692348008385746,
68
+ "step": 1000
69
+ },
70
+ {
71
+ "epoch": 1.3100436681222707,
72
+ "eval_loss": 0.15501286089420319,
73
+ "eval_runtime": 126.7082,
74
+ "eval_samples_per_second": 8.026,
75
+ "eval_steps_per_second": 1.01,
76
+ "eval_wer": 0.3735587002096436,
77
+ "step": 1200
78
+ },
79
+ {
80
+ "epoch": 1.5283842794759825,
81
+ "eval_loss": 0.15383152663707733,
82
+ "eval_runtime": 127.2176,
83
+ "eval_samples_per_second": 7.994,
84
+ "eval_steps_per_second": 1.006,
85
+ "eval_wer": 0.3770964360587002,
86
+ "step": 1400
87
+ },
88
+ {
89
+ "epoch": 1.6375545851528384,
90
+ "grad_norm": 1.7256165742874146,
91
+ "learning_rate": 0.0007558823529411764,
92
+ "loss": 0.4764,
93
+ "step": 1500
94
+ },
95
+ {
96
+ "epoch": 1.7467248908296944,
97
+ "eval_loss": 0.1743510365486145,
98
+ "eval_runtime": 127.1222,
99
+ "eval_samples_per_second": 8.0,
100
+ "eval_steps_per_second": 1.007,
101
+ "eval_wer": 0.417583857442348,
102
+ "step": 1600
103
+ },
104
+ {
105
+ "epoch": 1.965065502183406,
106
+ "eval_loss": 0.15977127850055695,
107
+ "eval_runtime": 127.3519,
108
+ "eval_samples_per_second": 7.986,
109
+ "eval_steps_per_second": 1.005,
110
+ "eval_wer": 0.38836477987421386,
111
+ "step": 1800
112
+ },
113
+ {
114
+ "epoch": 2.183406113537118,
115
+ "grad_norm": 2.758544683456421,
116
+ "learning_rate": 0.0006333333333333333,
117
+ "loss": 0.4501,
118
+ "step": 2000
119
+ },
120
+ {
121
+ "epoch": 2.183406113537118,
122
+ "eval_loss": 0.15066786110401154,
123
+ "eval_runtime": 127.104,
124
+ "eval_samples_per_second": 8.001,
125
+ "eval_steps_per_second": 1.007,
126
+ "eval_wer": 0.3577044025157233,
127
+ "step": 2000
128
+ },
129
+ {
130
+ "epoch": 2.4017467248908297,
131
+ "eval_loss": 0.15350954234600067,
132
+ "eval_runtime": 127.7067,
133
+ "eval_samples_per_second": 7.964,
134
+ "eval_steps_per_second": 1.002,
135
+ "eval_wer": 0.37631027253668764,
136
+ "step": 2200
137
+ },
138
+ {
139
+ "epoch": 2.6200873362445414,
140
+ "eval_loss": 0.15018954873085022,
141
+ "eval_runtime": 127.3689,
142
+ "eval_samples_per_second": 7.985,
143
+ "eval_steps_per_second": 1.005,
144
+ "eval_wer": 0.36491090146750527,
145
+ "step": 2400
146
+ },
147
+ {
148
+ "epoch": 2.7292576419213974,
149
+ "grad_norm": 0.7292295694351196,
150
+ "learning_rate": 0.0005107843137254902,
151
+ "loss": 0.4422,
152
+ "step": 2500
153
+ },
154
+ {
155
+ "epoch": 2.8384279475982535,
156
+ "eval_loss": 0.14573481678962708,
157
+ "eval_runtime": 127.0532,
158
+ "eval_samples_per_second": 8.005,
159
+ "eval_steps_per_second": 1.007,
160
+ "eval_wer": 0.35023584905660377,
161
+ "step": 2600
162
+ },
163
+ {
164
+ "epoch": 3.056768558951965,
165
+ "eval_loss": 0.1484854817390442,
166
+ "eval_runtime": 128.0576,
167
+ "eval_samples_per_second": 7.942,
168
+ "eval_steps_per_second": 1.0,
169
+ "eval_wer": 0.3579664570230608,
170
+ "step": 2800
171
+ },
172
+ {
173
+ "epoch": 3.2751091703056767,
174
+ "grad_norm": 0.6407122015953064,
175
+ "learning_rate": 0.0003884803921568628,
176
+ "loss": 0.4217,
177
+ "step": 3000
178
+ },
179
+ {
180
+ "epoch": 3.2751091703056767,
181
+ "eval_loss": 0.148036390542984,
182
+ "eval_runtime": 128.302,
183
+ "eval_samples_per_second": 7.927,
184
+ "eval_steps_per_second": 0.998,
185
+ "eval_wer": 0.3546907756813417,
186
+ "step": 3000
187
+ },
188
+ {
189
+ "epoch": 3.493449781659389,
190
+ "eval_loss": 0.14975149929523468,
191
+ "eval_runtime": 127.0597,
192
+ "eval_samples_per_second": 8.004,
193
+ "eval_steps_per_second": 1.007,
194
+ "eval_wer": 0.3666142557651992,
195
+ "step": 3200
196
+ },
197
+ {
198
+ "epoch": 3.7117903930131004,
199
+ "eval_loss": 0.14578010141849518,
200
+ "eval_runtime": 127.8692,
201
+ "eval_samples_per_second": 7.953,
202
+ "eval_steps_per_second": 1.001,
203
+ "eval_wer": 0.3494496855345912,
204
+ "step": 3400
205
+ },
206
+ {
207
+ "epoch": 3.8209606986899565,
208
+ "grad_norm": 1.9009268283843994,
209
+ "learning_rate": 0.0002659313725490196,
210
+ "loss": 0.4144,
211
+ "step": 3500
212
+ },
213
+ {
214
+ "epoch": 3.930131004366812,
215
+ "eval_loss": 0.1427353024482727,
216
+ "eval_runtime": 127.8119,
217
+ "eval_samples_per_second": 7.957,
218
+ "eval_steps_per_second": 1.001,
219
+ "eval_wer": 0.35744234800838576,
220
+ "step": 3600
221
+ },
222
+ {
223
+ "epoch": 4.148471615720524,
224
+ "eval_loss": 0.14451348781585693,
225
+ "eval_runtime": 127.6381,
226
+ "eval_samples_per_second": 7.968,
227
+ "eval_steps_per_second": 1.003,
228
+ "eval_wer": 0.3594077568134172,
229
+ "step": 3800
230
+ },
231
+ {
232
+ "epoch": 4.366812227074236,
233
+ "grad_norm": 1.5231894254684448,
234
+ "learning_rate": 0.00014338235294117645,
235
+ "loss": 0.3926,
236
+ "step": 4000
237
+ },
238
+ {
239
+ "epoch": 4.366812227074236,
240
+ "eval_loss": 0.14618775248527527,
241
+ "eval_runtime": 127.9874,
242
+ "eval_samples_per_second": 7.946,
243
+ "eval_steps_per_second": 1.0,
244
+ "eval_wer": 0.3666142557651992,
245
+ "step": 4000
246
+ },
247
+ {
248
+ "epoch": 4.585152838427947,
249
+ "eval_loss": 0.14320309460163116,
250
+ "eval_runtime": 128.1452,
251
+ "eval_samples_per_second": 7.936,
252
+ "eval_steps_per_second": 0.999,
253
+ "eval_wer": 0.3527253668763103,
254
+ "step": 4200
255
+ },
256
+ {
257
+ "epoch": 4.8034934497816595,
258
+ "eval_loss": 0.1408591866493225,
259
+ "eval_runtime": 127.7676,
260
+ "eval_samples_per_second": 7.96,
261
+ "eval_steps_per_second": 1.002,
262
+ "eval_wer": 0.3498427672955975,
263
+ "step": 4400
264
+ },
265
+ {
266
+ "epoch": 4.9126637554585155,
267
+ "grad_norm": 6.477123737335205,
268
+ "learning_rate": 2.0833333333333333e-05,
269
+ "loss": 0.3928,
270
+ "step": 4500
271
+ },
272
+ {
273
+ "epoch": 5.0,
274
+ "step": 4580,
275
+ "total_flos": 2.179329538942206e+19,
276
+ "train_loss": 0.6123242174173547,
277
+ "train_runtime": 13424.2744,
278
+ "train_samples_per_second": 2.729,
279
+ "train_steps_per_second": 0.341
280
+ }
281
+ ],
282
+ "logging_steps": 500,
283
+ "max_steps": 4580,
284
+ "num_input_tokens_seen": 0,
285
+ "num_train_epochs": 5,
286
+ "save_steps": 200,
287
+ "stateful_callbacks": {
288
+ "TrainerControl": {
289
+ "args": {
290
+ "should_epoch_stop": false,
291
+ "should_evaluate": false,
292
+ "should_log": false,
293
+ "should_save": true,
294
+ "should_training_stop": true
295
+ },
296
+ "attributes": {}
297
+ }
298
+ },
299
+ "total_flos": 2.179329538942206e+19,
300
+ "train_batch_size": 8,
301
+ "trial_name": null,
302
+ "trial_params": null
303
+ }