Datasets:
LIUM
/

Sanchit Gandhi commited on
Commit
44fd86d
·
2 Parent(s): 3162781 3e78884

Merge branch 'main' of https://huggingface.co/datasets/LIUM/tedlium into main

Browse files
Files changed (1) hide show
  1. tedlium.py +76 -331
tedlium.py CHANGED
@@ -11,13 +11,9 @@
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
14
-
15
- """TED-LIUM speech recognition dataset."""
16
 
17
  import os
18
- import re
19
- from collections import defaultdict
20
- from io import BytesIO
21
  from pathlib import Path
22
 
23
  import numpy as np
@@ -27,358 +23,107 @@ import datasets
27
  from datasets.tasks import AutomaticSpeechRecognition
28
 
29
 
30
- _DL_URL = "https://huggingface.co/datasets/LIUM/tedlium/resolve/main/"
31
-
32
- _LICENSE = "licensed under Creative Commons BY-NC-ND 3.0 (http://creativecommons.org/licenses/by-nc-nd/3.0/deed.en)"
33
-
34
-
35
- class TedliumReleaseConfig(datasets.BuilderConfig):
36
- """BuilderConfig for a release of the TED-LIUM dataset."""
37
-
38
- def __init__(self, *, url, download_urls, split_paths, citation, **kwargs):
39
- super(TedliumReleaseConfig, self).__init__(version=datasets.Version("1.0.1"), **kwargs)
40
- self.url = url
41
- self.download_urls = download_urls
42
- # List of split, path pairs containing the relative path within the
43
- # extracted tarball to the data for each split.
44
- self.split_paths = split_paths
45
- self.citation = citation
46
-
47
-
48
- def _make_builder_configs():
49
- """Creates builder configs for all supported Tedlium dataset releases."""
50
- release1 = TedliumReleaseConfig(
51
- name="release1",
52
- description="""\
53
- The TED-LIUM corpus is English-language TED talks, with transcriptions,
54
- sampled at 16kHz. It contains about 118 hours of speech.
55
-
56
- This is the TED-LIUM corpus release 1,
57
- licensed under Creative Commons BY-NC-ND 3.0
58
- (http://creativecommons.org/licenses/by-nc-nd/3.0/deed.en).
59
- """,
60
- citation="""\
61
- @inproceedings{rousseau2012tedlium,
62
- title={TED-LIUM: an Automatic Speech Recognition dedicated corpus},
63
- author={Rousseau, Anthony and Del{\\'e}glise, Paul and Est{\\`e}ve, Yannick},
64
- booktitle={Conference on Language Resources and Evaluation (LREC)},
65
- pages={125--129},
66
- year={2012}
67
- }
68
- """,
69
- url="https://www.openslr.org/7/",
70
- download_urls={
71
- "train": [_DL_URL + os.path.join("TEDLIUM_release1", "train.tar.gz")],
72
- "validation": [_DL_URL + os.path.join("TEDLIUM_release1", "dev.tar.gz")],
73
- "test": [_DL_URL + os.path.join("TEDLIUM_release1", "test.tar.gz")],
74
- },
75
- split_paths=[
76
- (datasets.Split.TRAIN, "train"),
77
- (datasets.Split.VALIDATION, "dev"),
78
- (datasets.Split.TEST, "test"),
79
- ],
80
- )
81
-
82
- release2 = TedliumReleaseConfig(
83
- name="release2",
84
- description="""\
85
- This is the TED-LIUM corpus release 2,
86
- licensed under Creative Commons BY-NC-ND 3.0
87
- (http://creativecommons.org/licenses/by-nc-nd/3.0/deed.en).
88
-
89
- All talks and text are property of TED Conferences LLC.
90
-
91
- The TED-LIUM corpus was made from audio talks and their transcriptions
92
- available on the TED website. We have prepared and filtered these data
93
- in order to train acoustic models to participate to the International
94
- Workshop on Spoken Language Translation 2011 (the LIUM English/French
95
- SLT system reached the first rank in the SLT task).
96
-
97
- Contains 1495 talks and transcripts.
98
- """,
99
- citation="""\
100
- @inproceedings{rousseau2014tedlium2,
101
- title={Enhancing the {TED-LIUM} Corpus with Selected Data for Language Modeling and More {TED} Talks},
102
- author={Rousseau, Anthony and Del{\\'e}glise, Paul and Est{\\`e}ve, Yannick},
103
- booktitle={Conference on Language Resources and Evaluation (LREC)},
104
- year={2014}
105
- }
106
- """,
107
- url="https://www.openslr.org/19/",
108
- download_urls={
109
- "train": [_DL_URL + os.path.join("TEDLIUM_release2", "train.tar.gz")],
110
- "validation": [_DL_URL + os.path.join("TEDLIUM_release2", "dev.tar.gz")],
111
- "test": [_DL_URL + os.path.join("TEDLIUM_release2", "test.tar.gz")],
112
- },
113
- split_paths=[
114
- (datasets.Split.TRAIN, "train"),
115
- (datasets.Split.VALIDATION, "dev"),
116
- (datasets.Split.TEST, "test"),
117
- ],
118
- )
119
-
120
- release3 = TedliumReleaseConfig(
121
- name="release3",
122
- description="""\
123
- This is the TED-LIUM corpus release 3, licensed under Creative Commons
124
- BY-NC-ND 3.0. This is the 'legacy' version of the corpus, in which the dev and test datasets are the same as in
125
- TED-LIUM 2 (and TED-LIUM 1).
126
-
127
- All talks and text are property of TED Conferences LLC.
128
 
129
- This new TED-LIUM release was made through a collaboration between the
130
- Ubiqus company and the LIUM (University of Le Mans, France)
131
 
132
- Contents:
 
 
133
 
134
- - 2351 audio talks in NIST sphere format (SPH), including talks from
135
- TED-LIUM 2: be careful, same talks but not same audio files (only
136
- these audio file must be used with the TED-LIUM 3 STM files)
137
- - 452 hours of audio
138
- - 2351 aligned automatic transcripts in STM format
139
- - TEDLIUM 2 dev and test data: 19 TED talks in SPH format with
140
- corresponding manual transcriptions.
141
- - Dictionary with pronunciations (159848 entries), same file as the one
142
- included in TED-LIUM 2
143
- - Selected monolingual data for language modeling from WMT12 publicly
144
- available corpora: these files come from the TED-LIUM 2 release, but
145
- have been modified to get a tokenization more relevant for English
146
- language
147
 
148
- """,
149
- citation="""\
150
- @inproceedings{hernandez2018tedlium3,
151
- title={TED-LIUM 3: twice as much data and corpus repartition for experiments on speaker adaptation},
152
- author={Hernandez, Fran{\\c{c}}ois and Nguyen, Vincent and Ghannay, Sahar and Tomashenko, Natalia and Est{\\`e}ve, Yannick},
153
- booktitle={International Conference on Speech and Computer},
154
- pages={198--208},
155
- year={2018},
156
- organization={Springer}
157
- }
158
- """,
159
- url="https://www.openslr.org/51/",
160
- download_urls={
161
- "train": [
162
- _DL_URL + os.path.join("TEDLIUM_release3", "legacy", "train_1.tar.gz"),
163
- _DL_URL + os.path.join("TEDLIUM_release3", "legacy", "train_2.tar.gz"),
164
- _DL_URL + os.path.join("TEDLIUM_release3", "legacy", "train_3.tar.gz"),
165
- _DL_URL + os.path.join("TEDLIUM_release3", "legacy", "train_4.tar.gz"),
166
- ],
167
- "validation": [_DL_URL + os.path.join("TEDLIUM_release3", "legacy", "dev.tar.gz")],
168
- "test": [_DL_URL + os.path.join("TEDLIUM_release3", "legacy", "test.tar.gz")],
169
- },
170
- split_paths=[
171
- (datasets.Split.TRAIN, "train"),
172
- (datasets.Split.VALIDATION, "dev"),
173
- (datasets.Split.TEST, "test"),
174
- ],
175
- )
176
 
177
- release3_speaker_adaptation = TedliumReleaseConfig(
178
- name="release3-speaker-adaptation",
179
- description="""\
180
- This is the TED-LIUM corpus release 3, licensed under Creative Commons
181
- BY-NC-ND 3.0. This is the 'speaker adaptation' version of the corpus, specially designed for experiments on
182
- speaker adaptation.
183
 
184
- All talks and text are property of TED Conferences LLC.
185
 
186
- This new TED-LIUM release was made through a collaboration between the
187
- Ubiqus company and the LIUM (University of Le Mans, France)
188
- """,
189
- citation="""\
190
- @inproceedings{hernandez2018tedlium3,
191
- title={TED-LIUM 3: twice as much data and corpus repartition for experiments on speaker adaptation},
192
- author={Hernandez, Fran{\\c{c}}ois and Nguyen, Vincent and Ghannay, Sahar and Tomashenko, Natalia and Est{\\`e}ve, Yannick},
193
- booktitle={International Conference on Speech and Computer},
194
- pages={198--208},
195
- year={2018},
196
- organization={Springer}
197
- }
198
- """,
199
- url="https://www.openslr.org/51/",
200
- download_urls={
201
- "train": [_DL_URL + os.path.join("TEDLIUM_release3", "speaker-adaptation", "train.tar.gz")],
202
- "validation": [_DL_URL + os.path.join("TEDLIUM_release3", "speaker-adaptation", "dev.tar.gz")],
203
- "test": [_DL_URL + os.path.join("TEDLIUM_release3", "speaker-adaptation", "test.tar.gz")],
204
- },
205
- split_paths=[
206
- (datasets.Split.TRAIN, "train"),
207
- (datasets.Split.VALIDATION, "dev"),
208
- (datasets.Split.TEST, "test"),
209
- ],
210
- )
211
-
212
- return [release1, release2, release3, release3_speaker_adaptation]
213
-
214
-
215
- class TedLium(datasets.GeneratorBasedBuilder):
216
- """The TED-LIUM corpus is English-language TED talks, with transcriptions, sampled at 16kHz. It contains about 118 hours of speech."""
217
 
218
  VERSION = datasets.Version("1.1.0")
219
 
220
- BUILDER_CONFIGS = _make_builder_configs()
221
 
222
  def _info(self):
223
  features = datasets.Features(
224
  {
225
- "audio": datasets.features.Audio(sampling_rate=16_000),
226
  "text": datasets.Value("string"),
227
- "speaker_id": datasets.Value("string"),
228
- "gender": datasets.features.ClassLabel(names=["unknown", "female", "male"]),
229
  "file": datasets.Value("string"),
230
  "id": datasets.Value("string"),
231
  }
232
  )
233
  return datasets.DatasetInfo(
234
- description=self.config.description,
235
  features=features,
236
  supervised_keys=("audio", "text"),
237
- homepage=self.config.url,
238
  license=_LICENSE,
239
- citation=self.config.citation,
240
  task_templates=[AutomaticSpeechRecognition(audio_column="audio", transcription_column="text")],
241
  )
242
 
243
  def _split_generators(self, dl_manager):
244
- archive_path = dl_manager.download(self.config.download_urls)
245
- # (Optional) In non-streaming mode, we can extract the archive locally to have actual local audio files:
246
- local_extracted_archive = dl_manager.extract(archive_path) if not dl_manager.is_streaming else {}
247
- splits = []
248
- for split, path in self.config.split_paths:
249
- kwargs = {
250
- "filepath": [dl_manager.iter_archive(sharded_path) for sharded_path in archive_path[split]],
251
- "local_extracted_archive": local_extracted_archive.get(split),
252
- "split_path": path,
253
- }
254
- splits.append(datasets.SplitGenerator(name=split, gen_kwargs=kwargs))
255
- return splits
256
-
257
- def _generate_examples(self, filepath, local_extracted_archive, split_path):
258
- """Generate examples from a TED-LIUM stm file."""
259
- if local_extracted_archive:
260
- for local_archive in local_extracted_archive:
261
- # The stm directory houses the speaker and transcription information in .stm format
262
- stm_dir = os.path.join(local_archive, split_path, "stm")
263
- # The sph directory houses the audio files in .sph format
264
- sph_dir = os.path.join(local_archive, split_path, "sph")
265
- stm_files = [os.path.join(stm_dir, f) for f in os.listdir(stm_dir) if f.endswith(".stm")]
266
- for file in stm_files:
267
- # the .sph speaker file almost always has the same file name as the .stm file
268
- speaker_file = Path(file).stem
269
- audio_file = os.path.join(sph_dir, speaker_file + ".sph")
270
- segment, sampling_rate = sf.read(audio_file, dtype=np.int16)
271
- with open(file) as f:
272
- for line in f:
273
- line = line.strip()
274
- fn, channel, speaker, start, end, label, transcript = line.split(" ", 6)
275
- transcript = _maybe_trim_suffix(transcript)
276
- if speaker_file != fn:
277
- # handle the case where the stm file does not have the same file name as the transcript
278
- speaker_file = fn
279
- audio_file = os.path.join(sph_dir, speaker_file + ".sph")
280
- segment, sampling_rate = sf.read(audio_file, dtype=np.int16)
281
- samples = _extract_audio_segment(segment, int(channel), float(start), float(end))
282
- key = "-".join([speaker, start, end, label])
283
- example = {
284
- "audio": {"path": audio_file, "array": samples, "sampling_rate": sampling_rate},
285
- "text": transcript,
286
- "speaker_id": speaker,
287
- "gender": _parse_gender(label),
288
- "file": audio_file,
289
- "id": key,
290
- }
291
- yield key, example
292
-
293
- else:
294
- audio_data = {}
295
- transcripts = defaultdict(list)
296
- for file in filepath:
297
- for path, f in file:
298
- if path.endswith(".sph"):
299
- # get the speaker id
300
- fn = path.split("/")[-1].strip(".sph")
301
- # read the audio data from raw byte form and add key-value pair to dict
302
- audio_data[fn] = sf.read(BytesIO(f.read()), dtype=np.int16)
303
- elif path.endswith(".stm"):
304
- for line in f:
305
- if line:
306
- line = line.decode("utf-8").strip()
307
- fn, channel, speaker, start, end, label, transcript = line.split(" ", 6)
308
- transcript = _maybe_trim_suffix(transcript)
309
- audio_file = path.replace("stm", "sph")
310
- key = "-".join([speaker, start, end, label])
311
- # append metadata information to the dict of transcripts for the associated speaker
312
- transcripts[fn].append(
313
- {
314
- "text": transcript,
315
- "speaker_id": speaker,
316
- "gender": _parse_gender(label),
317
- "file": audio_file,
318
- "id": key,
319
- "start": start,
320
- "end": end,
321
- "channel": channel,
322
- "fn": fn,
323
- }
324
- )
325
-
326
- if audio_data and audio_data.keys() == transcripts.keys():
327
- for fn, speaker in transcripts.items():
328
- for transcript in speaker:
329
- segment, sampling_rate = audio_data[transcript["fn"]]
330
- samples = _extract_audio_segment(
331
- segment,
332
- int(transcript["channel"]),
333
- float(transcript["start"]),
334
- float(transcript["end"]),
335
- )
336
- audio = {"path": transcript["file"], "array": samples, "sampling_rate": sampling_rate}
337
- key = transcript["id"]
338
- yield key, {
339
- "audio": audio,
340
- "text": transcript["text"],
341
- "speaker_id": transcript["speaker_id"],
342
- "gender": transcript["gender"],
343
- "file": transcript["file"],
344
- "id": transcript["id"],
345
- }
346
- audio_data = {}
347
- transcripts = defaultdict(list)
348
-
349
-
350
- def _maybe_trim_suffix(transcript):
351
- # stm files for the TEDLIUM release 1 train split contain a key (enclosed in
352
- # parens) at the end.
353
- splits = transcript.rsplit(" ", 1)
354
- transcript = splits[0]
355
- if len(splits) > 1:
356
- suffix = splits[-1]
357
- if not suffix.startswith("("):
358
- transcript += " " + suffix
359
- return transcript
360
-
361
-
362
- def _extract_audio_segment(segment, channel, start_sec, end_sec):
363
  """Extracts segment of audio samples (as an ndarray) from the given segment."""
364
- # The dataset only contains mono audio.
365
- assert channel == 1
366
- start_ms = int(start_sec * 1000)
367
- end_ms = int(end_sec * 1000)
368
- samples = segment[start_ms:end_ms]
369
  return samples
370
-
371
-
372
- def _parse_gender(label_str):
373
- """Parse gender string from STM "<label>" field."""
374
- gender = re.split(",|_", label_str)[-1][:-1]
375
- # Fix inconsistencies in the data.
376
- if not gender:
377
- gender = -1 # Missing label.
378
- elif gender == "<NA": # In TEDLIUM release 3 training data.
379
- gender = -1 # Missing label.
380
- elif gender == "F":
381
- gender = "female"
382
- elif gender == "M":
383
- gender = "male"
384
- return gender
 
11
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
  # See the License for the specific language governing permissions and
13
  # limitations under the License.
14
+ """The Switchboard conversational dataset for automatic speech recognition."""
 
15
 
16
  import os
 
 
 
17
  from pathlib import Path
18
 
19
  import numpy as np
 
23
  from datasets.tasks import AutomaticSpeechRecognition
24
 
25
 
26
+ _CITATION = """\
27
+ @INPROCEEDINGS{225858,
28
+ author={Godfrey, J.J. and Holliman, E.C. and McDaniel, J.},
29
+ booktitle={[Proceedings] ICASSP-92: 1992 IEEE International Conference on Acoustics, Speech, and Signal Processing},
30
+ title={SWITCHBOARD: telephone speech corpus for research and development},
31
+ year={1992},
32
+ volume={1},
33
+ number={},
34
+ pages={517-520 vol.1},
35
+ doi={10.1109/ICASSP.1992.225858}}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
+ """
 
38
 
39
+ _DESCRIPTION = """\
40
+ Switchboard is a collection of about 2,400 two-sided telephone conversations among 543 speakers (302 male, 241 female) from all areas of the United States. A computer-driven robot operator system handled the calls, giving the caller appropriate recorded prompts, selecting and dialing another person (the callee) to take part in a conversation, introducing a topic for discussion and recording the speech from the two subjects into separate channels until the conversation was finished. About 70 topics were provided, of which about 50 were used frequently. Selection of topics and callees was constrained so that: (1) no two speakers would converse together more than once and (2) no one spoke more than once on a given topic.
41
+ """
42
 
43
+ _HOMEPAGE = "https://catalog.ldc.upenn.edu/LDC97S62"
 
 
 
 
 
 
 
 
 
 
 
 
44
 
45
+ _LICENSE = "https://catalog.ldc.upenn.edu/license/ldc-non-members-agreement.pdf"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
 
47
+ # Replace with the URL of your download link
48
+ _URL = "https://huggingface.co/datasets/speech-seq2seq/switchboard/resolve/main/switchboard.tar.gz"
 
 
 
 
49
 
 
50
 
51
+ class Switchboard(datasets.GeneratorBasedBuilder):
52
+ """The Switchboard corpus is a collection of about 2,400 two-sided telephone conversations."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
 
54
  VERSION = datasets.Version("1.1.0")
55
 
56
+ BUILDER_CONFIGS = [datasets.BuilderConfig(name="switchboard", version=VERSION, description=_DESCRIPTION)]
57
 
58
  def _info(self):
59
  features = datasets.Features(
60
  {
61
+ "audio": datasets.features.Audio(sampling_rate=8_000),
62
  "text": datasets.Value("string"),
 
 
63
  "file": datasets.Value("string"),
64
  "id": datasets.Value("string"),
65
  }
66
  )
67
  return datasets.DatasetInfo(
68
+ description=_DESCRIPTION,
69
  features=features,
70
  supervised_keys=("audio", "text"),
71
+ homepage=_HOMEPAGE,
72
  license=_LICENSE,
73
+ citation=_CITATION,
74
  task_templates=[AutomaticSpeechRecognition(audio_column="audio", transcription_column="text")],
75
  )
76
 
77
  def _split_generators(self, dl_manager):
78
+ data_dir = dl_manager.download_and_extract(_URL)
79
+ return [
80
+ datasets.SplitGenerator(
81
+ name=datasets.Split.TRAIN,
82
+ gen_kwargs={
83
+ "filepath": data_dir, # later: change this to train/dev/test (e.g. os.path.join(data_dir, "train"))
84
+ "split": "train",
85
+ },
86
+ ),
87
+ ]
88
+
89
+ def _generate_examples(self, filepath, split):
90
+ """Generate examples from a Switchboard filepath."""
91
+ txt_files = list()
92
+ sph_filenames = list()
93
+ sph_files = list()
94
+
95
+ for (dirpath, dirnames, filenames) in os.walk(filepath):
96
+ txt_files += [os.path.join(dirpath, file) for file in filenames if file.endswith("-trans.text")]
97
+ sph_filenames += [file[3:7] for file in filenames if file.endswith(".sph")]
98
+ sph_files += [os.path.join(dirpath, file) for file in filenames if file.endswith(".sph")]
99
+
100
+ for file in txt_files:
101
+ # the .sph speaker file almost always has the same file name as the .stm file
102
+ speaker_file = Path(file).stem
103
+ idx = sph_filenames.index(speaker_file[2:6])
104
+ segment, sampling_rate = sf.read(sph_files[idx], dtype=np.int16)
105
+ with open(file) as f:
106
+ for line in f:
107
+ line = line.strip()
108
+ id, start, end, transcript = line.split(" ", 3)
109
+ if speaker_file != id[2:6]:
110
+ # handle the case where the stm file does not have the same file name as the transcript
111
+ idx = sph_filenames.index(id[2:6])
112
+ segment, sampling_rate = sf.read(sph_files[idx], dtype=np.int16)
113
+ samples = _extract_audio_segment(segment, sampling_rate, float(start), float(end))
114
+ key = "-".join([id, start, end])
115
+ example = {
116
+ "audio": {"path": file, "array": samples, "sampling_rate": sampling_rate},
117
+ "text": transcript,
118
+ "file": file,
119
+ "id": key,
120
+ }
121
+ yield key, example
122
+
123
+
124
+ def _extract_audio_segment(segment, sampling_rate, start_sec, end_sec):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
  """Extracts segment of audio samples (as an ndarray) from the given segment."""
126
+ start_sample = int(start_sec * sampling_rate)
127
+ end_sample = min(int(end_sec * sampling_rate), segment.shape[0])
128
+ samples = segment[start_sample:end_sample]
 
 
129
  return samples