phongdtd commited on
Commit
9fe2df1
·
1 Parent(s): 018c60b

[Update] Update

Browse files
Files changed (2) hide show
  1. custom_common_voice.py +7 -6
  2. dataset_infos.json +1 -1
custom_common_voice.py CHANGED
@@ -21,16 +21,17 @@ import pandas as pd
21
  import re
22
 
23
 
24
- _DATA_URL = "https://drive.google.com/uc?export=download&id=15WLhuiIl7q-zM_VFol8zw4tXMoyKTOJz"
25
  _PROMPTS_URLS = {
26
- "train": "https://drive.google.com/uc?export=download&id=1-EGkB5axUEUw8W8zUHuNsEmbqMOfZDqz",
27
- "test": "https://drive.google.com/uc?export=download&id=1-9Ahfqkn_DD3bteH06F8EThUbjmcWI7k",
28
- "val": "https://drive.google.com/uc?export=download&id=1V8M437ncD6ogE-e56OipMPuuFqLgEt5g",
29
  }
30
 
31
  _DESCRIPTION = """\
32
  Common Voice is Mozilla's initiative to help teach machines how real people speak.
33
- The dataset currently consists of 7,335 validated hours of speech in 60 languages, but we’re always adding more voices and languages.
 
34
  """
35
 
36
  _LANGUAGES = {
@@ -137,7 +138,7 @@ class CustomCommonVoice(datasets.GeneratorBasedBuilder):
137
  datasets.SplitGenerator(
138
  name=datasets.Split.VALIDATION,
139
  gen_kwargs={
140
- "tsv_files": tsv_files["val"],
141
  "audio_files": dl_manager.iter_archive(archive),
142
  "path_to_clips": path_to_clips,
143
  },
 
21
  import re
22
 
23
 
24
+ _DATA_URL = "https://drive.google.com/uc?export=download&id=1AF2jRixUgi-LzDsEm3CZOcSigHv81AVi"
25
  _PROMPTS_URLS = {
26
+ "train": "https://drive.google.com/uc?export=download&id=1WPgxJBsbu6isHe8UVnY6U2ub7523eC1q",
27
+ "test": "https://drive.google.com/uc?export=download&id=1N-FjPwooTL5o4hmJkFY_E-TPwh5UEZ68",
28
+ "validation": "https://drive.google.com/uc?export=download&id=1p5OJjTwtfgwKER9SltNHUajwRt7aTmbK"
29
  }
30
 
31
  _DESCRIPTION = """\
32
  Common Voice is Mozilla's initiative to help teach machines how real people speak.
33
+ The dataset currently consists of 7,335 validated hours of speech in 60 languages, but we’re always adding more voices
34
+ and languages.
35
  """
36
 
37
  _LANGUAGES = {
 
138
  datasets.SplitGenerator(
139
  name=datasets.Split.VALIDATION,
140
  gen_kwargs={
141
+ "tsv_files": tsv_files["validation"],
142
  "audio_files": dl_manager.iter_archive(archive),
143
  "path_to_clips": path_to_clips,
144
  },
dataset_infos.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1c1212eec1244b86866c405471bd61fdf16ce43e8e52b3a1c3d6ed60278542e0
3
  size 1659
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6474db188c1ee3b46ac983c0c6febbe0598953626075303f567210caf11cde2c
3
  size 1659