mikr commited on
Commit
f2bbe6a
·
1 Parent(s): c13915e

Training in progress, step 2000

Browse files
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6e1ff147209cb0ce1c1f06c05eb82fd888b17784eb2e7a0089e6814aaa89eb54
3
  size 3055754841
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ac2d1857992248df444ef4ce8e1af45a08d723b4efede467861e84936c9c1a6
3
  size 3055754841
run.log CHANGED
@@ -324,3 +324,44 @@ Dataset common_voice_11_0 downloaded and prepared to /root/.cache/huggingface/da
324
  {'loss': 0.0199, 'learning_rate': 9.002222222222223e-06, 'epoch': 4.03}
325
  {'loss': 0.0097, 'learning_rate': 8.946666666666669e-06, 'epoch': 4.13}
326
  {'loss': 0.0105, 'learning_rate': 8.891111111111111e-06, 'epoch': 4.24}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
324
  {'loss': 0.0199, 'learning_rate': 9.002222222222223e-06, 'epoch': 4.03}
325
  {'loss': 0.0097, 'learning_rate': 8.946666666666669e-06, 'epoch': 4.13}
326
  {'loss': 0.0105, 'learning_rate': 8.891111111111111e-06, 'epoch': 4.24}
327
+ {'eval_loss': 0.1972593367099762, 'eval_wer': 12.613027384394393, 'eval_runtime': 8895.0887, 'eval_samples_per_second': 0.867, 'eval_steps_per_second': 0.027, 'epoch': 4.24}
328
+ {'loss': 0.0112, 'learning_rate': 8.835555555555557e-06, 'epoch': 4.34}
329
+ {'loss': 0.0108, 'learning_rate': 8.78e-06, 'epoch': 4.45}
330
+ {'loss': 0.0116, 'learning_rate': 8.724444444444445e-06, 'epoch': 4.56}
331
+ {'loss': 0.0114, 'learning_rate': 8.66888888888889e-06, 'epoch': 4.66}
332
+ {'loss': 0.011, 'learning_rate': 8.613333333333333e-06, 'epoch': 4.77}
333
+ {'loss': 0.0096, 'learning_rate': 8.557777777777778e-06, 'epoch': 4.87}
334
+ {'loss': 0.0126, 'learning_rate': 8.502222222222223e-06, 'epoch': 4.98}
335
+ {'loss': 0.0078, 'learning_rate': 8.446666666666668e-06, 'epoch': 5.08}
336
+ {'loss': 0.006, 'learning_rate': 8.391111111111112e-06, 'epoch': 5.19}
337
+ {'loss': 0.0053, 'learning_rate': 8.335555555555556e-06, 'epoch': 5.3}
338
+ {'loss': 0.0067, 'learning_rate': 8.28e-06, 'epoch': 5.4}
339
+ {'loss': 0.0055, 'learning_rate': 8.224444444444444e-06, 'epoch': 5.51}
340
+ {'loss': 0.0059, 'learning_rate': 8.16888888888889e-06, 'epoch': 5.61}
341
+ {'loss': 0.0061, 'learning_rate': 8.113333333333334e-06, 'epoch': 5.72}
342
+ {'loss': 0.0057, 'learning_rate': 8.057777777777778e-06, 'epoch': 5.83}
343
+ {'loss': 0.0071, 'learning_rate': 8.002222222222222e-06, 'epoch': 5.93}
344
+ {'loss': 0.0061, 'learning_rate': 7.946666666666666e-06, 'epoch': 6.04}
345
+ {'loss': 0.0047, 'learning_rate': 7.891111111111112e-06, 'epoch': 6.14}
346
+ {'loss': 0.004, 'learning_rate': 7.835555555555556e-06, 'epoch': 6.25}
347
+ {'loss': 0.005, 'learning_rate': 7.78e-06, 'epoch': 6.36}
348
+ {'loss': 0.0049, 'learning_rate': 7.724444444444446e-06, 'epoch': 6.46}
349
+ {'loss': 0.0036, 'learning_rate': 7.66888888888889e-06, 'epoch': 6.57}
350
+ {'loss': 0.0035, 'learning_rate': 7.613333333333334e-06, 'epoch': 6.67}
351
+ {'loss': 0.0046, 'learning_rate': 7.557777777777779e-06, 'epoch': 6.78}
352
+ {'loss': 0.0045, 'learning_rate': 7.502222222222223e-06, 'epoch': 6.89}
353
+ {'loss': 0.0044, 'learning_rate': 7.446666666666668e-06, 'epoch': 6.99}
354
+ {'loss': 0.0022, 'learning_rate': 7.3911111111111125e-06, 'epoch': 7.1}
355
+ {'loss': 0.0019, 'learning_rate': 7.335555555555556e-06, 'epoch': 7.2}
356
+ {'loss': 0.0019, 'learning_rate': 7.280000000000001e-06, 'epoch': 7.31}
357
+ {'loss': 0.0024, 'learning_rate': 7.224444444444445e-06, 'epoch': 7.42}
358
+ {'loss': 0.0022, 'learning_rate': 7.1688888888888895e-06, 'epoch': 7.52}
359
+ {'loss': 0.0029, 'learning_rate': 7.113333333333334e-06, 'epoch': 7.63}
360
+ {'loss': 0.0026, 'learning_rate': 7.057777777777778e-06, 'epoch': 7.73}
361
+ {'loss': 0.0033, 'learning_rate': 7.0022222222222225e-06, 'epoch': 7.84}
362
+ {'loss': 0.0026, 'learning_rate': 6.946666666666667e-06, 'epoch': 7.94}
363
+ {'loss': 0.0025, 'learning_rate': 6.891111111111111e-06, 'epoch': 8.05}
364
+ {'loss': 0.0019, 'learning_rate': 6.835555555555556e-06, 'epoch': 8.16}
365
+ {'loss': 0.0022, 'learning_rate': 6.780000000000001e-06, 'epoch': 8.26}
366
+ {'loss': 0.0015, 'learning_rate': 6.724444444444444e-06, 'epoch': 8.37}
367
+ {'loss': 0.0016, 'learning_rate': 6.668888888888889e-06, 'epoch': 8.47}
runs/Dec11_16-15-08_4b942bf2873e/events.out.tfevents.1670781880.4b942bf2873e.1511212.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a525d3193bea56df74ab7699601d99aaa71cb2944c621ef29d2229d8dc1d94d4
3
- size 10761
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8c603312c18f4403556e986c0e06a62ed0ea2bff8e94df3984dbb6a150c0a4d
3
+ size 17359