Datasets:
admin
commited on
Commit
·
480b4b6
1
Parent(s):
15232bd
2 arrows
Browse files- README.md +102 -5
- chest_falsetto.py +0 -152
- default/dataset_dict.json +1 -0
- default/test/data-00000-of-00001.arrow +3 -0
- default/test/dataset_info.json +91 -0
- default/test/state.json +13 -0
- default/train/data-00000-of-00001.arrow +3 -0
- default/train/dataset_info.json +91 -0
- default/train/state.json +13 -0
- default/validation/data-00000-of-00001.arrow +3 -0
- default/validation/dataset_info.json +91 -0
- default/validation/state.json +13 -0
- eval/dataset_dict.json +1 -0
- eval/test/data-00000-of-00001.arrow +3 -0
- eval/test/dataset_info.json +89 -0
- eval/test/state.json +13 -0
- eval/train/data-00000-of-00001.arrow +3 -0
- eval/train/dataset_info.json +89 -0
- eval/train/state.json +13 -0
- eval/validation/data-00000-of-00001.arrow +3 -0
- eval/validation/dataset_info.json +89 -0
- eval/validation/state.json +13 -0
README.md
CHANGED
@@ -11,7 +11,104 @@ tags:
|
|
11 |
pretty_name: Chest voice and Falsetto Dataset
|
12 |
size_categories:
|
13 |
- 1K<n<10K
|
14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
---
|
16 |
|
17 |
# Dataset Card for Chest voice and Falsetto Dataset
|
@@ -115,10 +212,13 @@ for item in ds["test"]:
|
|
115 |
|
116 |
## Maintenance
|
117 |
```bash
|
118 |
-
git clone [email protected]:datasets/ccmusic-database/chest_falsetto
|
119 |
cd chest_falsetto
|
120 |
```
|
121 |
|
|
|
|
|
|
|
122 |
## Dataset Description
|
123 |
### Dataset Summary
|
124 |
For the pre-processed version, the audio clip was into 0.25 seconds and then transformed to Mel, CQT and Chroma spectrogram in .jpg format, resulting in 8,974 files. The chest/falsetto label for each file is given as one of the four classes: m chest, m falsetto, f chest, and f falsetto. The spectrogram, the chest/falsetto label and the gender label are combined into one data entry, with the first three columns representing the Mel, CQT and Chroma. The fourth and fifth columns are the chest/falsetto label and gender label, respectively. Additionally, the integrated dataset provides the function to shuffle and split the dataset into training, validation, and test sets in an 8:1:1 ratio. This dataset can be used for singing-related tasks such as singing gender classification or chest and falsetto voice classification.
|
@@ -158,9 +258,6 @@ Only for chest and falsetto voices
|
|
158 |
Recordings are cut into slices that are too short;
|
159 |
The CQT spectrum column has the problem of spectrum leakage, but because the original audio slice is too short, only 0.5s, it cannot effectively avoid this problem.
|
160 |
|
161 |
-
## Mirror
|
162 |
-
<https://www.modelscope.cn/datasets/ccmusic-database/chest_falsetto>
|
163 |
-
|
164 |
## Additional Information
|
165 |
### Dataset Curators
|
166 |
Zijin Li
|
|
|
11 |
pretty_name: Chest voice and Falsetto Dataset
|
12 |
size_categories:
|
13 |
- 1K<n<10K
|
14 |
+
dataset_info:
|
15 |
+
- config_name: default
|
16 |
+
features:
|
17 |
+
- name: audio
|
18 |
+
dtype:
|
19 |
+
audio:
|
20 |
+
sampling_rate: 22050
|
21 |
+
- name: mel
|
22 |
+
dtype: image
|
23 |
+
- name: label
|
24 |
+
dtype:
|
25 |
+
class_label:
|
26 |
+
names:
|
27 |
+
'0': m_chest
|
28 |
+
'1': f_chest
|
29 |
+
'2': m_falsetto
|
30 |
+
'3': f_falsetto
|
31 |
+
- name: gender
|
32 |
+
dtype:
|
33 |
+
class_label:
|
34 |
+
names:
|
35 |
+
'0': female
|
36 |
+
'1': male
|
37 |
+
- name: singing_method
|
38 |
+
dtype:
|
39 |
+
class_label:
|
40 |
+
names:
|
41 |
+
'0': falsetto
|
42 |
+
'1': chest
|
43 |
+
splits:
|
44 |
+
- name: train
|
45 |
+
num_bytes: 293944
|
46 |
+
num_examples: 767
|
47 |
+
- name: validation
|
48 |
+
num_bytes: 98112
|
49 |
+
num_examples: 256
|
50 |
+
- name: test
|
51 |
+
num_bytes: 98494
|
52 |
+
num_examples: 257
|
53 |
+
download_size: 41000619
|
54 |
+
dataset_size: 490550
|
55 |
+
- config_name: eval
|
56 |
+
features:
|
57 |
+
- name: mel
|
58 |
+
dtype: image
|
59 |
+
- name: cqt
|
60 |
+
dtype: image
|
61 |
+
- name: chroma
|
62 |
+
dtype: image
|
63 |
+
- name: label
|
64 |
+
dtype:
|
65 |
+
class_label:
|
66 |
+
names:
|
67 |
+
'0': m_chest
|
68 |
+
'1': f_chest
|
69 |
+
'2': m_falsetto
|
70 |
+
'3': f_falsetto
|
71 |
+
- name: gender
|
72 |
+
dtype:
|
73 |
+
class_label:
|
74 |
+
names:
|
75 |
+
'0': female
|
76 |
+
'1': male
|
77 |
+
- name: singing_method
|
78 |
+
dtype:
|
79 |
+
class_label:
|
80 |
+
names:
|
81 |
+
'0': falsetto
|
82 |
+
'1': chest
|
83 |
+
splits:
|
84 |
+
- name: train
|
85 |
+
num_bytes: 447819
|
86 |
+
num_examples: 767
|
87 |
+
- name: validation
|
88 |
+
num_bytes: 149472
|
89 |
+
num_examples: 256
|
90 |
+
- name: test
|
91 |
+
num_bytes: 150054
|
92 |
+
num_examples: 257
|
93 |
+
download_size: 81547911
|
94 |
+
dataset_size: 747345
|
95 |
+
configs:
|
96 |
+
- config_name: default
|
97 |
+
data_files:
|
98 |
+
- split: train
|
99 |
+
path: default/train/data-*.arrow
|
100 |
+
- split: validation
|
101 |
+
path: default/validation/data-*.arrow
|
102 |
+
- split: test
|
103 |
+
path: default/test/data-*.arrow
|
104 |
+
- config_name: eval
|
105 |
+
data_files:
|
106 |
+
- split: train
|
107 |
+
path: eval/train/data-*.arrow
|
108 |
+
- split: validation
|
109 |
+
path: eval/validation/data-*.arrow
|
110 |
+
- split: test
|
111 |
+
path: eval/test/data-*.arrow
|
112 |
---
|
113 |
|
114 |
# Dataset Card for Chest voice and Falsetto Dataset
|
|
|
212 |
|
213 |
## Maintenance
|
214 |
```bash
|
215 |
+
GIT_LFS_SKIP_SMUDGE=1 git clone [email protected]:datasets/ccmusic-database/chest_falsetto
|
216 |
cd chest_falsetto
|
217 |
```
|
218 |
|
219 |
+
## Mirror
|
220 |
+
<https://www.modelscope.cn/datasets/ccmusic-database/chest_falsetto>
|
221 |
+
|
222 |
## Dataset Description
|
223 |
### Dataset Summary
|
224 |
For the pre-processed version, the audio clip was into 0.25 seconds and then transformed to Mel, CQT and Chroma spectrogram in .jpg format, resulting in 8,974 files. The chest/falsetto label for each file is given as one of the four classes: m chest, m falsetto, f chest, and f falsetto. The spectrogram, the chest/falsetto label and the gender label are combined into one data entry, with the first three columns representing the Mel, CQT and Chroma. The fourth and fifth columns are the chest/falsetto label and gender label, respectively. Additionally, the integrated dataset provides the function to shuffle and split the dataset into training, validation, and test sets in an 8:1:1 ratio. This dataset can be used for singing-related tasks such as singing gender classification or chest and falsetto voice classification.
|
|
|
258 |
Recordings are cut into slices that are too short;
|
259 |
The CQT spectrum column has the problem of spectrum leakage, but because the original audio slice is too short, only 0.5s, it cannot effectively avoid this problem.
|
260 |
|
|
|
|
|
|
|
261 |
## Additional Information
|
262 |
### Dataset Curators
|
263 |
Zijin Li
|
chest_falsetto.py
DELETED
@@ -1,152 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import random
|
3 |
-
import datasets
|
4 |
-
from datasets.tasks import ImageClassification
|
5 |
-
|
6 |
-
_NAMES = {
|
7 |
-
"all": ["m_chest", "f_chest", "m_falsetto", "f_falsetto"],
|
8 |
-
"gender": ["female", "male"],
|
9 |
-
"singing_method": ["falsetto", "chest"],
|
10 |
-
}
|
11 |
-
|
12 |
-
_HOMEPAGE = f"https://www.modelscope.cn/datasets/ccmusic-database/{os.path.basename(__file__)[:-3]}"
|
13 |
-
|
14 |
-
_DOMAIN = f"{_HOMEPAGE}/resolve/master/data"
|
15 |
-
|
16 |
-
_URLS = {
|
17 |
-
"audio": f"{_DOMAIN}/audio.zip",
|
18 |
-
"mel": f"{_DOMAIN}/mel.zip",
|
19 |
-
"eval": f"{_DOMAIN}/eval.zip",
|
20 |
-
}
|
21 |
-
|
22 |
-
|
23 |
-
class chest_falsetto(datasets.GeneratorBasedBuilder):
|
24 |
-
def _info(self):
|
25 |
-
return datasets.DatasetInfo(
|
26 |
-
features=(
|
27 |
-
datasets.Features(
|
28 |
-
{
|
29 |
-
"audio": datasets.Audio(sampling_rate=22050),
|
30 |
-
"mel": datasets.Image(),
|
31 |
-
"label": datasets.features.ClassLabel(names=_NAMES["all"]),
|
32 |
-
"gender": datasets.features.ClassLabel(names=_NAMES["gender"]),
|
33 |
-
"singing_method": datasets.features.ClassLabel(
|
34 |
-
names=_NAMES["singing_method"]
|
35 |
-
),
|
36 |
-
}
|
37 |
-
)
|
38 |
-
if self.config.name == "default"
|
39 |
-
else datasets.Features(
|
40 |
-
{
|
41 |
-
"mel": datasets.Image(),
|
42 |
-
"cqt": datasets.Image(),
|
43 |
-
"chroma": datasets.Image(),
|
44 |
-
"label": datasets.features.ClassLabel(names=_NAMES["all"]),
|
45 |
-
"gender": datasets.features.ClassLabel(names=_NAMES["gender"]),
|
46 |
-
"singing_method": datasets.features.ClassLabel(
|
47 |
-
names=_NAMES["singing_method"]
|
48 |
-
),
|
49 |
-
}
|
50 |
-
)
|
51 |
-
),
|
52 |
-
supervised_keys=("mel", "label"),
|
53 |
-
homepage=_HOMEPAGE,
|
54 |
-
license="CC-BY-NC-ND",
|
55 |
-
version="1.2.0",
|
56 |
-
task_templates=[
|
57 |
-
ImageClassification(
|
58 |
-
task="image-classification",
|
59 |
-
image_column="mel",
|
60 |
-
label_column="label",
|
61 |
-
)
|
62 |
-
],
|
63 |
-
)
|
64 |
-
|
65 |
-
def _split_generators(self, dl_manager):
|
66 |
-
dataset = []
|
67 |
-
if self.config.name == "default":
|
68 |
-
files = {}
|
69 |
-
audio_files = dl_manager.download_and_extract(_URLS["audio"])
|
70 |
-
mel_files = dl_manager.download_and_extract(_URLS["mel"])
|
71 |
-
for fpath in dl_manager.iter_files([audio_files]):
|
72 |
-
fname: str = os.path.basename(fpath)
|
73 |
-
if fname.endswith(".wav"):
|
74 |
-
item_id = fname.split(".")[0]
|
75 |
-
files[item_id] = {"audio": fpath}
|
76 |
-
|
77 |
-
for fpath in dl_manager.iter_files([mel_files]):
|
78 |
-
fname = os.path.basename(fpath)
|
79 |
-
if fname.endswith(".jpg"):
|
80 |
-
item_id = fname.split(".")[0]
|
81 |
-
files[item_id]["mel"] = fpath
|
82 |
-
|
83 |
-
dataset = list(files.values())
|
84 |
-
|
85 |
-
else:
|
86 |
-
data_files = dl_manager.download_and_extract(_URLS["eval"])
|
87 |
-
for fpath in dl_manager.iter_files([data_files]):
|
88 |
-
if "mel" in fpath and os.path.basename(fpath).endswith(".jpg"):
|
89 |
-
dataset.append(fpath)
|
90 |
-
|
91 |
-
categories = {}
|
92 |
-
for name in _NAMES["all"]:
|
93 |
-
categories[name] = []
|
94 |
-
|
95 |
-
for data in dataset:
|
96 |
-
fpath = data["audio"] if self.config.name == "default" else data
|
97 |
-
filename: str = os.path.basename(fpath)[:-4]
|
98 |
-
label = "_".join(filename.split("_")[1:3])
|
99 |
-
categories[label].append(data)
|
100 |
-
|
101 |
-
testset, validset, trainset = [], [], []
|
102 |
-
for cls in categories:
|
103 |
-
random.shuffle(categories[cls])
|
104 |
-
count = len(categories[cls])
|
105 |
-
p60 = int(count * 0.6)
|
106 |
-
p80 = int(count * 0.8)
|
107 |
-
trainset += categories[cls][:p60]
|
108 |
-
validset += categories[cls][p60:p80]
|
109 |
-
testset += categories[cls][p80:]
|
110 |
-
|
111 |
-
random.shuffle(trainset)
|
112 |
-
random.shuffle(validset)
|
113 |
-
random.shuffle(testset)
|
114 |
-
return [
|
115 |
-
datasets.SplitGenerator(
|
116 |
-
name=datasets.Split.TRAIN, gen_kwargs={"files": trainset}
|
117 |
-
),
|
118 |
-
datasets.SplitGenerator(
|
119 |
-
name=datasets.Split.VALIDATION, gen_kwargs={"files": validset}
|
120 |
-
),
|
121 |
-
datasets.SplitGenerator(
|
122 |
-
name=datasets.Split.TEST, gen_kwargs={"files": testset}
|
123 |
-
),
|
124 |
-
]
|
125 |
-
|
126 |
-
def _generate_examples(self, files):
|
127 |
-
if self.config.name == "default":
|
128 |
-
for i, fpath in enumerate(files):
|
129 |
-
file_name = os.path.basename(fpath["audio"])
|
130 |
-
sex = file_name.split("_")[1]
|
131 |
-
method = file_name.split("_")[2].split(".")[0]
|
132 |
-
yield i, {
|
133 |
-
"audio": fpath["audio"],
|
134 |
-
"mel": fpath["mel"],
|
135 |
-
"label": f"{sex}_{method}",
|
136 |
-
"gender": "male" if sex == "m" else "female",
|
137 |
-
"singing_method": method,
|
138 |
-
}
|
139 |
-
|
140 |
-
else:
|
141 |
-
for i, fpath in enumerate(files):
|
142 |
-
file_name: str = os.path.basename(fpath)
|
143 |
-
sex = file_name.split("_")[1]
|
144 |
-
method = file_name.split("_")[2]
|
145 |
-
yield i, {
|
146 |
-
"mel": fpath,
|
147 |
-
"cqt": fpath.replace("mel", "cqt"),
|
148 |
-
"chroma": fpath.replace("mel", "chroma"),
|
149 |
-
"label": f"{sex}_{method}",
|
150 |
-
"gender": "male" if sex == "m" else "female",
|
151 |
-
"singing_method": method,
|
152 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
default/dataset_dict.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"splits": ["train", "validation", "test"]}
|
default/test/data-00000-of-00001.arrow
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:911be4c58dc2b5e622c22afcbb6b474879f4d12205bf46655b34a9f25e1c65d2
|
3 |
+
size 8900808
|
default/test/dataset_info.json
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"builder_name": "chest_falsetto",
|
3 |
+
"citation": "",
|
4 |
+
"config_name": "default",
|
5 |
+
"dataset_name": "chest_falsetto",
|
6 |
+
"dataset_size": 490550,
|
7 |
+
"description": "",
|
8 |
+
"download_checksums": {
|
9 |
+
"https://www.modelscope.cn/datasets/ccmusic-database/chest_falsetto/resolve/master/data/audio.zip": {
|
10 |
+
"num_bytes": 39256131,
|
11 |
+
"checksum": null
|
12 |
+
},
|
13 |
+
"https://www.modelscope.cn/datasets/ccmusic-database/chest_falsetto/resolve/master/data/mel.zip": {
|
14 |
+
"num_bytes": 1744488,
|
15 |
+
"checksum": null
|
16 |
+
}
|
17 |
+
},
|
18 |
+
"download_size": 41000619,
|
19 |
+
"features": {
|
20 |
+
"audio": {
|
21 |
+
"sampling_rate": 22050,
|
22 |
+
"_type": "Audio"
|
23 |
+
},
|
24 |
+
"mel": {
|
25 |
+
"_type": "Image"
|
26 |
+
},
|
27 |
+
"label": {
|
28 |
+
"names": [
|
29 |
+
"m_chest",
|
30 |
+
"f_chest",
|
31 |
+
"m_falsetto",
|
32 |
+
"f_falsetto"
|
33 |
+
],
|
34 |
+
"_type": "ClassLabel"
|
35 |
+
},
|
36 |
+
"gender": {
|
37 |
+
"names": [
|
38 |
+
"female",
|
39 |
+
"male"
|
40 |
+
],
|
41 |
+
"_type": "ClassLabel"
|
42 |
+
},
|
43 |
+
"singing_method": {
|
44 |
+
"names": [
|
45 |
+
"falsetto",
|
46 |
+
"chest"
|
47 |
+
],
|
48 |
+
"_type": "ClassLabel"
|
49 |
+
}
|
50 |
+
},
|
51 |
+
"homepage": "https://www.modelscope.cn/datasets/ccmusic-database/chest_falsetto",
|
52 |
+
"license": "CC-BY-NC-ND",
|
53 |
+
"size_in_bytes": 41491169,
|
54 |
+
"splits": {
|
55 |
+
"train": {
|
56 |
+
"name": "train",
|
57 |
+
"num_bytes": 293944,
|
58 |
+
"num_examples": 767,
|
59 |
+
"dataset_name": "chest_falsetto"
|
60 |
+
},
|
61 |
+
"validation": {
|
62 |
+
"name": "validation",
|
63 |
+
"num_bytes": 98112,
|
64 |
+
"num_examples": 256,
|
65 |
+
"dataset_name": "chest_falsetto"
|
66 |
+
},
|
67 |
+
"test": {
|
68 |
+
"name": "test",
|
69 |
+
"num_bytes": 98494,
|
70 |
+
"num_examples": 257,
|
71 |
+
"dataset_name": "chest_falsetto"
|
72 |
+
}
|
73 |
+
},
|
74 |
+
"supervised_keys": {
|
75 |
+
"input": "mel",
|
76 |
+
"output": "label"
|
77 |
+
},
|
78 |
+
"task_templates": [
|
79 |
+
{
|
80 |
+
"task": "image-classification",
|
81 |
+
"image_column": "mel",
|
82 |
+
"label_column": "label"
|
83 |
+
}
|
84 |
+
],
|
85 |
+
"version": {
|
86 |
+
"version_str": "0.0.0",
|
87 |
+
"major": 0,
|
88 |
+
"minor": 0,
|
89 |
+
"patch": 0
|
90 |
+
}
|
91 |
+
}
|
default/test/state.json
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_data_files": [
|
3 |
+
{
|
4 |
+
"filename": "data-00000-of-00001.arrow"
|
5 |
+
}
|
6 |
+
],
|
7 |
+
"_fingerprint": "e341c366fbee68fd",
|
8 |
+
"_format_columns": null,
|
9 |
+
"_format_kwargs": {},
|
10 |
+
"_format_type": null,
|
11 |
+
"_output_all_columns": false,
|
12 |
+
"_split": "test"
|
13 |
+
}
|
default/train/data-00000-of-00001.arrow
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9196ac9f2e24ac4a5aea2caba9d0c0ce4be2fb0956326bc2abb9d883e05a56a0
|
3 |
+
size 26427464
|
default/train/dataset_info.json
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"builder_name": "chest_falsetto",
|
3 |
+
"citation": "",
|
4 |
+
"config_name": "default",
|
5 |
+
"dataset_name": "chest_falsetto",
|
6 |
+
"dataset_size": 490550,
|
7 |
+
"description": "",
|
8 |
+
"download_checksums": {
|
9 |
+
"https://www.modelscope.cn/datasets/ccmusic-database/chest_falsetto/resolve/master/data/audio.zip": {
|
10 |
+
"num_bytes": 39256131,
|
11 |
+
"checksum": null
|
12 |
+
},
|
13 |
+
"https://www.modelscope.cn/datasets/ccmusic-database/chest_falsetto/resolve/master/data/mel.zip": {
|
14 |
+
"num_bytes": 1744488,
|
15 |
+
"checksum": null
|
16 |
+
}
|
17 |
+
},
|
18 |
+
"download_size": 41000619,
|
19 |
+
"features": {
|
20 |
+
"audio": {
|
21 |
+
"sampling_rate": 22050,
|
22 |
+
"_type": "Audio"
|
23 |
+
},
|
24 |
+
"mel": {
|
25 |
+
"_type": "Image"
|
26 |
+
},
|
27 |
+
"label": {
|
28 |
+
"names": [
|
29 |
+
"m_chest",
|
30 |
+
"f_chest",
|
31 |
+
"m_falsetto",
|
32 |
+
"f_falsetto"
|
33 |
+
],
|
34 |
+
"_type": "ClassLabel"
|
35 |
+
},
|
36 |
+
"gender": {
|
37 |
+
"names": [
|
38 |
+
"female",
|
39 |
+
"male"
|
40 |
+
],
|
41 |
+
"_type": "ClassLabel"
|
42 |
+
},
|
43 |
+
"singing_method": {
|
44 |
+
"names": [
|
45 |
+
"falsetto",
|
46 |
+
"chest"
|
47 |
+
],
|
48 |
+
"_type": "ClassLabel"
|
49 |
+
}
|
50 |
+
},
|
51 |
+
"homepage": "https://www.modelscope.cn/datasets/ccmusic-database/chest_falsetto",
|
52 |
+
"license": "CC-BY-NC-ND",
|
53 |
+
"size_in_bytes": 41491169,
|
54 |
+
"splits": {
|
55 |
+
"train": {
|
56 |
+
"name": "train",
|
57 |
+
"num_bytes": 293944,
|
58 |
+
"num_examples": 767,
|
59 |
+
"dataset_name": "chest_falsetto"
|
60 |
+
},
|
61 |
+
"validation": {
|
62 |
+
"name": "validation",
|
63 |
+
"num_bytes": 98112,
|
64 |
+
"num_examples": 256,
|
65 |
+
"dataset_name": "chest_falsetto"
|
66 |
+
},
|
67 |
+
"test": {
|
68 |
+
"name": "test",
|
69 |
+
"num_bytes": 98494,
|
70 |
+
"num_examples": 257,
|
71 |
+
"dataset_name": "chest_falsetto"
|
72 |
+
}
|
73 |
+
},
|
74 |
+
"supervised_keys": {
|
75 |
+
"input": "mel",
|
76 |
+
"output": "label"
|
77 |
+
},
|
78 |
+
"task_templates": [
|
79 |
+
{
|
80 |
+
"task": "image-classification",
|
81 |
+
"image_column": "mel",
|
82 |
+
"label_column": "label"
|
83 |
+
}
|
84 |
+
],
|
85 |
+
"version": {
|
86 |
+
"version_str": "0.0.0",
|
87 |
+
"major": 0,
|
88 |
+
"minor": 0,
|
89 |
+
"patch": 0
|
90 |
+
}
|
91 |
+
}
|
default/train/state.json
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_data_files": [
|
3 |
+
{
|
4 |
+
"filename": "data-00000-of-00001.arrow"
|
5 |
+
}
|
6 |
+
],
|
7 |
+
"_fingerprint": "efb52741ceeab85f",
|
8 |
+
"_format_columns": null,
|
9 |
+
"_format_kwargs": {},
|
10 |
+
"_format_type": null,
|
11 |
+
"_output_all_columns": false,
|
12 |
+
"_split": "train"
|
13 |
+
}
|
default/validation/data-00000-of-00001.arrow
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b346abf5d6d43f9d102a57d37f49681da9a651041224f98c6360ca1121139b96
|
3 |
+
size 8944192
|
default/validation/dataset_info.json
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"builder_name": "chest_falsetto",
|
3 |
+
"citation": "",
|
4 |
+
"config_name": "default",
|
5 |
+
"dataset_name": "chest_falsetto",
|
6 |
+
"dataset_size": 490550,
|
7 |
+
"description": "",
|
8 |
+
"download_checksums": {
|
9 |
+
"https://www.modelscope.cn/datasets/ccmusic-database/chest_falsetto/resolve/master/data/audio.zip": {
|
10 |
+
"num_bytes": 39256131,
|
11 |
+
"checksum": null
|
12 |
+
},
|
13 |
+
"https://www.modelscope.cn/datasets/ccmusic-database/chest_falsetto/resolve/master/data/mel.zip": {
|
14 |
+
"num_bytes": 1744488,
|
15 |
+
"checksum": null
|
16 |
+
}
|
17 |
+
},
|
18 |
+
"download_size": 41000619,
|
19 |
+
"features": {
|
20 |
+
"audio": {
|
21 |
+
"sampling_rate": 22050,
|
22 |
+
"_type": "Audio"
|
23 |
+
},
|
24 |
+
"mel": {
|
25 |
+
"_type": "Image"
|
26 |
+
},
|
27 |
+
"label": {
|
28 |
+
"names": [
|
29 |
+
"m_chest",
|
30 |
+
"f_chest",
|
31 |
+
"m_falsetto",
|
32 |
+
"f_falsetto"
|
33 |
+
],
|
34 |
+
"_type": "ClassLabel"
|
35 |
+
},
|
36 |
+
"gender": {
|
37 |
+
"names": [
|
38 |
+
"female",
|
39 |
+
"male"
|
40 |
+
],
|
41 |
+
"_type": "ClassLabel"
|
42 |
+
},
|
43 |
+
"singing_method": {
|
44 |
+
"names": [
|
45 |
+
"falsetto",
|
46 |
+
"chest"
|
47 |
+
],
|
48 |
+
"_type": "ClassLabel"
|
49 |
+
}
|
50 |
+
},
|
51 |
+
"homepage": "https://www.modelscope.cn/datasets/ccmusic-database/chest_falsetto",
|
52 |
+
"license": "CC-BY-NC-ND",
|
53 |
+
"size_in_bytes": 41491169,
|
54 |
+
"splits": {
|
55 |
+
"train": {
|
56 |
+
"name": "train",
|
57 |
+
"num_bytes": 293944,
|
58 |
+
"num_examples": 767,
|
59 |
+
"dataset_name": "chest_falsetto"
|
60 |
+
},
|
61 |
+
"validation": {
|
62 |
+
"name": "validation",
|
63 |
+
"num_bytes": 98112,
|
64 |
+
"num_examples": 256,
|
65 |
+
"dataset_name": "chest_falsetto"
|
66 |
+
},
|
67 |
+
"test": {
|
68 |
+
"name": "test",
|
69 |
+
"num_bytes": 98494,
|
70 |
+
"num_examples": 257,
|
71 |
+
"dataset_name": "chest_falsetto"
|
72 |
+
}
|
73 |
+
},
|
74 |
+
"supervised_keys": {
|
75 |
+
"input": "mel",
|
76 |
+
"output": "label"
|
77 |
+
},
|
78 |
+
"task_templates": [
|
79 |
+
{
|
80 |
+
"task": "image-classification",
|
81 |
+
"image_column": "mel",
|
82 |
+
"label_column": "label"
|
83 |
+
}
|
84 |
+
],
|
85 |
+
"version": {
|
86 |
+
"version_str": "0.0.0",
|
87 |
+
"major": 0,
|
88 |
+
"minor": 0,
|
89 |
+
"patch": 0
|
90 |
+
}
|
91 |
+
}
|
default/validation/state.json
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_data_files": [
|
3 |
+
{
|
4 |
+
"filename": "data-00000-of-00001.arrow"
|
5 |
+
}
|
6 |
+
],
|
7 |
+
"_fingerprint": "637e96f82567678e",
|
8 |
+
"_format_columns": null,
|
9 |
+
"_format_kwargs": {},
|
10 |
+
"_format_type": null,
|
11 |
+
"_output_all_columns": false,
|
12 |
+
"_split": "validation"
|
13 |
+
}
|
eval/dataset_dict.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"splits": ["train", "validation", "test"]}
|
eval/test/data-00000-of-00001.arrow
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d4a4c4ccf9a00540c3f49a57aa542f7d539fb1a72e47fd061948217338057ded
|
3 |
+
size 19057216
|
eval/test/dataset_info.json
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"builder_name": "chest_falsetto",
|
3 |
+
"citation": "",
|
4 |
+
"config_name": "eval",
|
5 |
+
"dataset_name": "chest_falsetto",
|
6 |
+
"dataset_size": 747345,
|
7 |
+
"description": "",
|
8 |
+
"download_checksums": {
|
9 |
+
"https://www.modelscope.cn/datasets/ccmusic-database/chest_falsetto/resolve/master/data/eval.zip": {
|
10 |
+
"num_bytes": 81547911,
|
11 |
+
"checksum": null
|
12 |
+
}
|
13 |
+
},
|
14 |
+
"download_size": 81547911,
|
15 |
+
"features": {
|
16 |
+
"mel": {
|
17 |
+
"_type": "Image"
|
18 |
+
},
|
19 |
+
"cqt": {
|
20 |
+
"_type": "Image"
|
21 |
+
},
|
22 |
+
"chroma": {
|
23 |
+
"_type": "Image"
|
24 |
+
},
|
25 |
+
"label": {
|
26 |
+
"names": [
|
27 |
+
"m_chest",
|
28 |
+
"f_chest",
|
29 |
+
"m_falsetto",
|
30 |
+
"f_falsetto"
|
31 |
+
],
|
32 |
+
"_type": "ClassLabel"
|
33 |
+
},
|
34 |
+
"gender": {
|
35 |
+
"names": [
|
36 |
+
"female",
|
37 |
+
"male"
|
38 |
+
],
|
39 |
+
"_type": "ClassLabel"
|
40 |
+
},
|
41 |
+
"singing_method": {
|
42 |
+
"names": [
|
43 |
+
"falsetto",
|
44 |
+
"chest"
|
45 |
+
],
|
46 |
+
"_type": "ClassLabel"
|
47 |
+
}
|
48 |
+
},
|
49 |
+
"homepage": "https://www.modelscope.cn/datasets/ccmusic-database/chest_falsetto",
|
50 |
+
"license": "CC-BY-NC-ND",
|
51 |
+
"size_in_bytes": 82295256,
|
52 |
+
"splits": {
|
53 |
+
"train": {
|
54 |
+
"name": "train",
|
55 |
+
"num_bytes": 447819,
|
56 |
+
"num_examples": 767,
|
57 |
+
"dataset_name": "chest_falsetto"
|
58 |
+
},
|
59 |
+
"validation": {
|
60 |
+
"name": "validation",
|
61 |
+
"num_bytes": 149472,
|
62 |
+
"num_examples": 256,
|
63 |
+
"dataset_name": "chest_falsetto"
|
64 |
+
},
|
65 |
+
"test": {
|
66 |
+
"name": "test",
|
67 |
+
"num_bytes": 150054,
|
68 |
+
"num_examples": 257,
|
69 |
+
"dataset_name": "chest_falsetto"
|
70 |
+
}
|
71 |
+
},
|
72 |
+
"supervised_keys": {
|
73 |
+
"input": "mel",
|
74 |
+
"output": "label"
|
75 |
+
},
|
76 |
+
"task_templates": [
|
77 |
+
{
|
78 |
+
"task": "image-classification",
|
79 |
+
"image_column": "mel",
|
80 |
+
"label_column": "label"
|
81 |
+
}
|
82 |
+
],
|
83 |
+
"version": {
|
84 |
+
"version_str": "0.0.0",
|
85 |
+
"major": 0,
|
86 |
+
"minor": 0,
|
87 |
+
"patch": 0
|
88 |
+
}
|
89 |
+
}
|
eval/test/state.json
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_data_files": [
|
3 |
+
{
|
4 |
+
"filename": "data-00000-of-00001.arrow"
|
5 |
+
}
|
6 |
+
],
|
7 |
+
"_fingerprint": "4ea8e3b0252813a0",
|
8 |
+
"_format_columns": null,
|
9 |
+
"_format_kwargs": {},
|
10 |
+
"_format_type": null,
|
11 |
+
"_output_all_columns": false,
|
12 |
+
"_split": "test"
|
13 |
+
}
|
eval/train/data-00000-of-00001.arrow
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5e7d1ff9ee2342fe6062e5553748f105f2ab65fd886fa350dca37182de520c5d
|
3 |
+
size 56935728
|
eval/train/dataset_info.json
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"builder_name": "chest_falsetto",
|
3 |
+
"citation": "",
|
4 |
+
"config_name": "eval",
|
5 |
+
"dataset_name": "chest_falsetto",
|
6 |
+
"dataset_size": 747345,
|
7 |
+
"description": "",
|
8 |
+
"download_checksums": {
|
9 |
+
"https://www.modelscope.cn/datasets/ccmusic-database/chest_falsetto/resolve/master/data/eval.zip": {
|
10 |
+
"num_bytes": 81547911,
|
11 |
+
"checksum": null
|
12 |
+
}
|
13 |
+
},
|
14 |
+
"download_size": 81547911,
|
15 |
+
"features": {
|
16 |
+
"mel": {
|
17 |
+
"_type": "Image"
|
18 |
+
},
|
19 |
+
"cqt": {
|
20 |
+
"_type": "Image"
|
21 |
+
},
|
22 |
+
"chroma": {
|
23 |
+
"_type": "Image"
|
24 |
+
},
|
25 |
+
"label": {
|
26 |
+
"names": [
|
27 |
+
"m_chest",
|
28 |
+
"f_chest",
|
29 |
+
"m_falsetto",
|
30 |
+
"f_falsetto"
|
31 |
+
],
|
32 |
+
"_type": "ClassLabel"
|
33 |
+
},
|
34 |
+
"gender": {
|
35 |
+
"names": [
|
36 |
+
"female",
|
37 |
+
"male"
|
38 |
+
],
|
39 |
+
"_type": "ClassLabel"
|
40 |
+
},
|
41 |
+
"singing_method": {
|
42 |
+
"names": [
|
43 |
+
"falsetto",
|
44 |
+
"chest"
|
45 |
+
],
|
46 |
+
"_type": "ClassLabel"
|
47 |
+
}
|
48 |
+
},
|
49 |
+
"homepage": "https://www.modelscope.cn/datasets/ccmusic-database/chest_falsetto",
|
50 |
+
"license": "CC-BY-NC-ND",
|
51 |
+
"size_in_bytes": 82295256,
|
52 |
+
"splits": {
|
53 |
+
"train": {
|
54 |
+
"name": "train",
|
55 |
+
"num_bytes": 447819,
|
56 |
+
"num_examples": 767,
|
57 |
+
"dataset_name": "chest_falsetto"
|
58 |
+
},
|
59 |
+
"validation": {
|
60 |
+
"name": "validation",
|
61 |
+
"num_bytes": 149472,
|
62 |
+
"num_examples": 256,
|
63 |
+
"dataset_name": "chest_falsetto"
|
64 |
+
},
|
65 |
+
"test": {
|
66 |
+
"name": "test",
|
67 |
+
"num_bytes": 150054,
|
68 |
+
"num_examples": 257,
|
69 |
+
"dataset_name": "chest_falsetto"
|
70 |
+
}
|
71 |
+
},
|
72 |
+
"supervised_keys": {
|
73 |
+
"input": "mel",
|
74 |
+
"output": "label"
|
75 |
+
},
|
76 |
+
"task_templates": [
|
77 |
+
{
|
78 |
+
"task": "image-classification",
|
79 |
+
"image_column": "mel",
|
80 |
+
"label_column": "label"
|
81 |
+
}
|
82 |
+
],
|
83 |
+
"version": {
|
84 |
+
"version_str": "0.0.0",
|
85 |
+
"major": 0,
|
86 |
+
"minor": 0,
|
87 |
+
"patch": 0
|
88 |
+
}
|
89 |
+
}
|
eval/train/state.json
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_data_files": [
|
3 |
+
{
|
4 |
+
"filename": "data-00000-of-00001.arrow"
|
5 |
+
}
|
6 |
+
],
|
7 |
+
"_fingerprint": "48eac2b21742c90e",
|
8 |
+
"_format_columns": null,
|
9 |
+
"_format_kwargs": {},
|
10 |
+
"_format_type": null,
|
11 |
+
"_output_all_columns": false,
|
12 |
+
"_split": "train"
|
13 |
+
}
|
eval/validation/data-00000-of-00001.arrow
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:39c2116f03b407a313dfba5d434e596cfe7c0f7efead1c798d8f583813a9ddab
|
3 |
+
size 18998632
|
eval/validation/dataset_info.json
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"builder_name": "chest_falsetto",
|
3 |
+
"citation": "",
|
4 |
+
"config_name": "eval",
|
5 |
+
"dataset_name": "chest_falsetto",
|
6 |
+
"dataset_size": 747345,
|
7 |
+
"description": "",
|
8 |
+
"download_checksums": {
|
9 |
+
"https://www.modelscope.cn/datasets/ccmusic-database/chest_falsetto/resolve/master/data/eval.zip": {
|
10 |
+
"num_bytes": 81547911,
|
11 |
+
"checksum": null
|
12 |
+
}
|
13 |
+
},
|
14 |
+
"download_size": 81547911,
|
15 |
+
"features": {
|
16 |
+
"mel": {
|
17 |
+
"_type": "Image"
|
18 |
+
},
|
19 |
+
"cqt": {
|
20 |
+
"_type": "Image"
|
21 |
+
},
|
22 |
+
"chroma": {
|
23 |
+
"_type": "Image"
|
24 |
+
},
|
25 |
+
"label": {
|
26 |
+
"names": [
|
27 |
+
"m_chest",
|
28 |
+
"f_chest",
|
29 |
+
"m_falsetto",
|
30 |
+
"f_falsetto"
|
31 |
+
],
|
32 |
+
"_type": "ClassLabel"
|
33 |
+
},
|
34 |
+
"gender": {
|
35 |
+
"names": [
|
36 |
+
"female",
|
37 |
+
"male"
|
38 |
+
],
|
39 |
+
"_type": "ClassLabel"
|
40 |
+
},
|
41 |
+
"singing_method": {
|
42 |
+
"names": [
|
43 |
+
"falsetto",
|
44 |
+
"chest"
|
45 |
+
],
|
46 |
+
"_type": "ClassLabel"
|
47 |
+
}
|
48 |
+
},
|
49 |
+
"homepage": "https://www.modelscope.cn/datasets/ccmusic-database/chest_falsetto",
|
50 |
+
"license": "CC-BY-NC-ND",
|
51 |
+
"size_in_bytes": 82295256,
|
52 |
+
"splits": {
|
53 |
+
"train": {
|
54 |
+
"name": "train",
|
55 |
+
"num_bytes": 447819,
|
56 |
+
"num_examples": 767,
|
57 |
+
"dataset_name": "chest_falsetto"
|
58 |
+
},
|
59 |
+
"validation": {
|
60 |
+
"name": "validation",
|
61 |
+
"num_bytes": 149472,
|
62 |
+
"num_examples": 256,
|
63 |
+
"dataset_name": "chest_falsetto"
|
64 |
+
},
|
65 |
+
"test": {
|
66 |
+
"name": "test",
|
67 |
+
"num_bytes": 150054,
|
68 |
+
"num_examples": 257,
|
69 |
+
"dataset_name": "chest_falsetto"
|
70 |
+
}
|
71 |
+
},
|
72 |
+
"supervised_keys": {
|
73 |
+
"input": "mel",
|
74 |
+
"output": "label"
|
75 |
+
},
|
76 |
+
"task_templates": [
|
77 |
+
{
|
78 |
+
"task": "image-classification",
|
79 |
+
"image_column": "mel",
|
80 |
+
"label_column": "label"
|
81 |
+
}
|
82 |
+
],
|
83 |
+
"version": {
|
84 |
+
"version_str": "0.0.0",
|
85 |
+
"major": 0,
|
86 |
+
"minor": 0,
|
87 |
+
"patch": 0
|
88 |
+
}
|
89 |
+
}
|
eval/validation/state.json
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_data_files": [
|
3 |
+
{
|
4 |
+
"filename": "data-00000-of-00001.arrow"
|
5 |
+
}
|
6 |
+
],
|
7 |
+
"_fingerprint": "45401860ede1618c",
|
8 |
+
"_format_columns": null,
|
9 |
+
"_format_kwargs": {},
|
10 |
+
"_format_type": null,
|
11 |
+
"_output_all_columns": false,
|
12 |
+
"_split": "validation"
|
13 |
+
}
|