albertvillanova HF staff commited on
Commit
aa96d9a
1 Parent(s): 8bf96aa

Revert Convert dataset to Parquet (#6)

Browse files

- Revert "Convert dataset to Parquet (#3)" (dd561d612217712e3e032309995fb72415cce0a4)

README.md CHANGED
@@ -9,6 +9,8 @@ license:
9
  - unlicense
10
  multilinguality:
11
  - monolingual
 
 
12
  size_categories:
13
  - 10K<n<100K
14
  source_datasets:
@@ -18,10 +20,21 @@ task_categories:
18
  - text-to-speech
19
  - text-to-audio
20
  task_ids: []
21
- paperswithcode_id: ljspeech
22
- pretty_name: LJ Speech
 
 
 
 
 
 
 
 
 
 
 
 
23
  dataset_info:
24
- config_name: main
25
  features:
26
  - name: id
27
  dtype: string
@@ -35,32 +48,13 @@ dataset_info:
35
  dtype: string
36
  - name: normalized_text
37
  dtype: string
 
38
  splits:
39
  - name: train
40
- num_bytes: 3860187268.0
41
  num_examples: 13100
42
- download_size: 3786217548
43
- dataset_size: 3860187268.0
44
- configs:
45
- - config_name: main
46
- data_files:
47
- - split: train
48
- path: main/train-*
49
- default: true
50
- train-eval-index:
51
- - config: main
52
- task: automatic-speech-recognition
53
- task_id: speech_recognition
54
- splits:
55
- train_split: train
56
- col_mapping:
57
- file: path
58
- text: text
59
- metrics:
60
- - type: wer
61
- name: WER
62
- - type: cer
63
- name: CER
64
  ---
65
 
66
  # Dataset Card for lj_speech
 
9
  - unlicense
10
  multilinguality:
11
  - monolingual
12
+ paperswithcode_id: ljspeech
13
+ pretty_name: LJ Speech
14
  size_categories:
15
  - 10K<n<100K
16
  source_datasets:
 
20
  - text-to-speech
21
  - text-to-audio
22
  task_ids: []
23
+ train-eval-index:
24
+ - config: main
25
+ task: automatic-speech-recognition
26
+ task_id: speech_recognition
27
+ splits:
28
+ train_split: train
29
+ col_mapping:
30
+ file: path
31
+ text: text
32
+ metrics:
33
+ - type: wer
34
+ name: WER
35
+ - type: cer
36
+ name: CER
37
  dataset_info:
 
38
  features:
39
  - name: id
40
  dtype: string
 
48
  dtype: string
49
  - name: normalized_text
50
  dtype: string
51
+ config_name: main
52
  splits:
53
  - name: train
54
+ num_bytes: 4667022
55
  num_examples: 13100
56
+ download_size: 2748572632
57
+ dataset_size: 4667022
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  ---
59
 
60
  # Dataset Card for lj_speech
lj_speech.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """LJ automatic speech recognition dataset."""
18
+
19
+
20
+ import csv
21
+ import os
22
+
23
+ import datasets
24
+ from datasets.tasks import AutomaticSpeechRecognition
25
+
26
+
27
+ _CITATION = """\
28
+ @misc{ljspeech17,
29
+ author = {Keith Ito and Linda Johnson},
30
+ title = {The LJ Speech Dataset},
31
+ howpublished = {\\url{https://keithito.com/LJ-Speech-Dataset/}},
32
+ year = 2017
33
+ }
34
+ """
35
+
36
+ _DESCRIPTION = """\
37
+ This is a public domain speech dataset consisting of 13,100 short audio clips of a single speaker reading
38
+ passages from 7 non-fiction books in English. A transcription is provided for each clip. Clips vary in length
39
+ from 1 to 10 seconds and have a total length of approximately 24 hours.
40
+
41
+ Note that in order to limit the required storage for preparing this dataset, the audio
42
+ is stored in the .wav format and is not converted to a float32 array. To convert the audio
43
+ file to a float32 array, please make use of the `.map()` function as follows:
44
+
45
+
46
+ ```python
47
+ import soundfile as sf
48
+
49
+ def map_to_array(batch):
50
+ speech_array, _ = sf.read(batch["file"])
51
+ batch["speech"] = speech_array
52
+ return batch
53
+
54
+ dataset = dataset.map(map_to_array, remove_columns=["file"])
55
+ ```
56
+ """
57
+
58
+ _URL = "https://keithito.com/LJ-Speech-Dataset/"
59
+ _DL_URL = "https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2"
60
+
61
+
62
+ class LJSpeech(datasets.GeneratorBasedBuilder):
63
+ """LJ Speech dataset."""
64
+
65
+ VERSION = datasets.Version("1.1.0")
66
+
67
+ BUILDER_CONFIGS = [
68
+ datasets.BuilderConfig(name="main", version=VERSION, description="The full LJ Speech dataset"),
69
+ ]
70
+
71
+ def _info(self):
72
+ return datasets.DatasetInfo(
73
+ description=_DESCRIPTION,
74
+ features=datasets.Features(
75
+ {
76
+ "id": datasets.Value("string"),
77
+ "audio": datasets.Audio(sampling_rate=22050),
78
+ "file": datasets.Value("string"),
79
+ "text": datasets.Value("string"),
80
+ "normalized_text": datasets.Value("string"),
81
+ }
82
+ ),
83
+ supervised_keys=("file", "text"),
84
+ homepage=_URL,
85
+ citation=_CITATION,
86
+ task_templates=[AutomaticSpeechRecognition(audio_column="audio", transcription_column="text")],
87
+ )
88
+
89
+ def _split_generators(self, dl_manager):
90
+ root_path = dl_manager.download_and_extract(_DL_URL)
91
+ root_path = os.path.join(root_path, "LJSpeech-1.1")
92
+ wav_path = os.path.join(root_path, "wavs")
93
+ csv_path = os.path.join(root_path, "metadata.csv")
94
+
95
+ return [
96
+ datasets.SplitGenerator(
97
+ name=datasets.Split.TRAIN, gen_kwargs={"wav_path": wav_path, "csv_path": csv_path}
98
+ ),
99
+ ]
100
+
101
+ def _generate_examples(self, wav_path, csv_path):
102
+ """Generate examples from an LJ Speech archive_path."""
103
+
104
+ with open(csv_path, encoding="utf-8") as csv_file:
105
+ csv_reader = csv.reader(csv_file, delimiter="|", quotechar=None, skipinitialspace=True)
106
+ for row in csv_reader:
107
+ uid, text, norm_text = row
108
+ filename = f"{uid}.wav"
109
+ example = {
110
+ "id": uid,
111
+ "file": os.path.join(wav_path, filename),
112
+ "audio": os.path.join(wav_path, filename),
113
+ "text": text,
114
+ "normalized_text": norm_text,
115
+ }
116
+ yield uid, example
main/train-00000-of-00008.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:18c8a13f9a8975a1c22546d931ee30b7852f091f2aed75e766ee3289b396d6d8
3
- size 479478819
 
 
 
 
main/train-00001-of-00008.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:715f47e2a99950bc1abb74453b5c03411586fe1b652b06d2b0ed0f651a09f0aa
3
- size 476725772
 
 
 
 
main/train-00002-of-00008.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e8db7cc332abc8068d1df51f0b61e48308b2f14397cbc15ffdd7bc21e46c456a
3
- size 474225836
 
 
 
 
main/train-00003-of-00008.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:87a586ebc0f9509a54ada3cc0fc7377ec7b0641218f4b91dd2b2dc5ca9d47283
3
- size 467178572
 
 
 
 
main/train-00004-of-00008.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:5c92c6986e34b627b13a1f0d38e096ef5fcc0296bcc1e4ba40f4453b313e0307
3
- size 464994923
 
 
 
 
main/train-00005-of-00008.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8b5031b3e260cd461f8997d16674cde131668511988769da3501ece26d9f063b
3
- size 458327927
 
 
 
 
main/train-00006-of-00008.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:7e48a0ae5d4015b9b2d823201b80bfc43ddbb4c9c970275152f8be3c07208ca9
3
- size 486514835
 
 
 
 
main/train-00007-of-00008.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e0174c75c39e24b9d6699e1030671472fd7c00c1657de088a0bb4190b2664e72
3
- size 478770864