monetjoe commited on
Commit
8ebc1a5
1 Parent(s): 243f5d4

Upload pianos.py

Browse files
Files changed (1) hide show
  1. pianos.py +256 -270
pianos.py CHANGED
@@ -1,270 +1,256 @@
1
- import os
2
- import random
3
- import datasets
4
- from datasets.tasks import ImageClassification, AudioClassification
5
-
6
-
7
- _NAMES = [
8
- "PearlRiver",
9
- "YoungChang",
10
- "Steinway-T",
11
- "Hsinghai",
12
- "Kawai",
13
- "Steinway",
14
- "Kawai-G",
15
- "Yamaha",
16
- ]
17
-
18
- _DBNAME = os.path.basename(__file__).split(".")[0]
19
-
20
- _HOMEPAGE = f"https://www.modelscope.cn/datasets/ccmusic/{_DBNAME}"
21
-
22
- _DOMAIN = f"https://www.modelscope.cn/api/v1/datasets/ccmusic/{_DBNAME}/repo?Revision=master&FilePath=data"
23
-
24
- _CITATION = """\
25
- @dataset{zhaorui_liu_2021_5676893,
26
- author = {Monan Zhou, Shenyang Xu, Zhaorui Liu, Zhaowen Wang, Feng Yu, Wei Li and Zijin Li},
27
- title = {CCMusic: an Open and Diverse Database for Chinese and General Music Information Retrieval Research},
28
- month = {mar},
29
- year = {2024},
30
- publisher = {HuggingFace},
31
- version = {1.2},
32
- url = {https://huggingface.co/ccmusic-database}
33
- }
34
- """
35
-
36
- _DESCRIPTION = """\
37
- Piano-Sound-Quality is a dataset of piano sound. It consists of 8 kinds of pianos including PearlRiver, YoungChang, Steinway-T, Hsinghai, Kawai, Steinway, Kawai-G, Yamaha(recorded by Shaohua Ji with SONY PCM-D100). Data was annotated by students from the China Conservatory of Music (CCMUSIC) in Beijing and collected by Monan Zhou.
38
- """
39
-
40
- _PITCHES = {
41
- "009": "A2",
42
- "010": "A2#/B2b",
43
- "011": "B2",
44
- "100": "C1",
45
- "101": "C1#/D1b",
46
- "102": "D1",
47
- "103": "D1#/E1b",
48
- "104": "E1",
49
- "105": "F1",
50
- "106": "F1#/G1b",
51
- "107": "G1",
52
- "108": "G1#/A1b",
53
- "109": "A1",
54
- "110": "A1#/B1b",
55
- "111": "B1",
56
- "200": "C",
57
- "201": "C#/Db",
58
- "202": "D",
59
- "203": "D#/Eb",
60
- "204": "E",
61
- "205": "F",
62
- "206": "F#/Gb",
63
- "207": "G",
64
- "208": "G#/Ab",
65
- "209": "A",
66
- "210": "A#/Bb",
67
- "211": "B",
68
- "300": "c",
69
- "301": "c#/db",
70
- "302": "d",
71
- "303": "d#/eb",
72
- "304": "e",
73
- "305": "f",
74
- "306": "f#/gb",
75
- "307": "g",
76
- "308": "g#/ab",
77
- "309": "a",
78
- "310": "a#/bb",
79
- "311": "b",
80
- "400": "c1",
81
- "401": "c1#/d1b",
82
- "402": "d1",
83
- "403": "d1#/e1b",
84
- "404": "e1",
85
- "405": "f1",
86
- "406": "f1#/g1b",
87
- "407": "g1",
88
- "408": "g1#/a1b",
89
- "409": "a1",
90
- "410": "a1#/b1b",
91
- "411": "b1",
92
- "500": "c2",
93
- "501": "c2#/d2b",
94
- "502": "d2",
95
- "503": "d2#/e2b",
96
- "504": "e2",
97
- "505": "f2",
98
- "506": "f2#/g2b",
99
- "507": "g2",
100
- "508": "g2#/a2b",
101
- "509": "a2",
102
- "510": "a2#/b2b",
103
- "511": "b2",
104
- "600": "c3",
105
- "601": "c3#/d3b",
106
- "602": "d3",
107
- "603": "d3#/e3b",
108
- "604": "e3",
109
- "605": "f3",
110
- "606": "f3#/g3b",
111
- "607": "g3",
112
- "608": "g3#/a3b",
113
- "609": "a3",
114
- "610": "a3#/b3b",
115
- "611": "b3",
116
- "700": "c4",
117
- "701": "c4#/d4b",
118
- "702": "d4",
119
- "703": "d4#/e4b",
120
- "704": "e4",
121
- "705": "f4",
122
- "706": "f4#/g4b",
123
- "707": "g4",
124
- "708": "g4#/a4b",
125
- "709": "a4",
126
- "710": "a4#/b4b",
127
- "711": "b4",
128
- "800": "c5",
129
- }
130
-
131
- _URLS = {
132
- "audio": f"{_DOMAIN}/audio.zip",
133
- "mel": f"{_DOMAIN}/mel.zip",
134
- "eval": f"{_DOMAIN}/eval.zip",
135
- }
136
-
137
-
138
- class pianos_Config(datasets.BuilderConfig):
139
- def __init__(self, features, supervised_keys, task_templates, **kwargs):
140
- super(pianos_Config, self).__init__(version=datasets.Version("0.1.2"), **kwargs)
141
- self.features = features
142
- self.supervised_keys = supervised_keys
143
- self.task_templates = task_templates
144
-
145
-
146
- class pianos(datasets.GeneratorBasedBuilder):
147
- VERSION = datasets.Version("0.1.2")
148
- BUILDER_CONFIGS = [
149
- pianos_Config(
150
- name="eval",
151
- features=datasets.Features(
152
- {
153
- "mel": datasets.Image(),
154
- "label": datasets.features.ClassLabel(names=_NAMES),
155
- "pitch": datasets.features.ClassLabel(
156
- names=list(_PITCHES.values())
157
- ),
158
- }
159
- ),
160
- supervised_keys=("mel", "label"),
161
- task_templates=[
162
- ImageClassification(
163
- task="image-classification",
164
- image_column="mel",
165
- label_column="label",
166
- )
167
- ],
168
- ),
169
- pianos_Config(
170
- name="default",
171
- features=datasets.Features(
172
- {
173
- "audio": datasets.Audio(sampling_rate=22050),
174
- "mel": datasets.Image(),
175
- "label": datasets.features.ClassLabel(names=_NAMES),
176
- "pitch": datasets.features.ClassLabel(
177
- names=list(_PITCHES.values())
178
- ),
179
- }
180
- ),
181
- supervised_keys=("audio", "label"),
182
- task_templates=[
183
- AudioClassification(
184
- task="audio-classification",
185
- audio_column="audio",
186
- label_column="label",
187
- )
188
- ],
189
- ),
190
- ]
191
-
192
- def _info(self):
193
- return datasets.DatasetInfo(
194
- description=_DESCRIPTION,
195
- features=self.config.features,
196
- homepage=_HOMEPAGE,
197
- license="mit",
198
- citation=_CITATION,
199
- supervised_keys=self.config.supervised_keys,
200
- task_templates=self.config.task_templates,
201
- )
202
-
203
- def _split_generators(self, dl_manager):
204
- dataset = []
205
- if self.config.name == "eval":
206
- data_files = dl_manager.download_and_extract(_URLS["eval"])
207
- for path in dl_manager.iter_files([data_files]):
208
- fname = os.path.basename(path)
209
- if fname.endswith(".jpg"):
210
- dataset.append(
211
- {
212
- "mel": path,
213
- "label": os.path.basename(os.path.dirname(path)),
214
- "pitch": _PITCHES[fname.split("_")[0]],
215
- }
216
- )
217
- else:
218
- subset = {}
219
- audio_files = dl_manager.download_and_extract(_URLS["audio"])
220
- for path in dl_manager.iter_files([audio_files]):
221
- fname = os.path.basename(path)
222
- if fname.endswith(".wav"):
223
- subset[fname.split(".")[0]] = {
224
- "audio": path,
225
- "label": os.path.basename(os.path.dirname(path)),
226
- "pitch": _PITCHES[fname[1:4]],
227
- }
228
-
229
- mel_files = dl_manager.download_and_extract(_URLS["mel"])
230
- for path in dl_manager.iter_files([mel_files]):
231
- fname = os.path.basename(path)
232
- if fname.endswith(".jpg"):
233
- subset[fname.split(".")[0]]["mel"] = path
234
-
235
- dataset = list(subset.values())
236
-
237
- random.shuffle(dataset)
238
- count = len(dataset)
239
- p80 = int(0.8 * count)
240
- p90 = int(0.9 * count)
241
-
242
- return [
243
- datasets.SplitGenerator(
244
- name=datasets.Split.TRAIN, gen_kwargs={"files": dataset[:p80]}
245
- ),
246
- datasets.SplitGenerator(
247
- name=datasets.Split.VALIDATION, gen_kwargs={"files": dataset[p80:p90]}
248
- ),
249
- datasets.SplitGenerator(
250
- name=datasets.Split.TEST, gen_kwargs={"files": dataset[p90:]}
251
- ),
252
- ]
253
-
254
- def _generate_examples(self, files):
255
- if self.config.name == "eval":
256
- for i, path in enumerate(files):
257
- yield i, {
258
- "mel": path["mel"],
259
- "label": path["label"],
260
- "pitch": path["pitch"],
261
- }
262
-
263
- else:
264
- for i, path in enumerate(files):
265
- yield i, {
266
- "audio": path["audio"],
267
- "mel": path["mel"],
268
- "label": path["label"],
269
- "pitch": path["pitch"],
270
- }
 
1
+ import os
2
+ import random
3
+ import datasets
4
+ from datasets.tasks import ImageClassification
5
+
6
+
7
+ _NAMES = {
8
+ "PearlRiver": [2.33, 2.53, 2.37, 2.41],
9
+ "YoungChang": [2.53, 2.63, 2.97, 2.71],
10
+ "Steinway-T": [3.6, 3.63, 3.67, 3.63],
11
+ "Hsinghai": [3.4, 3.27, 3.2, 3.29],
12
+ "Kawai": [3.17, 2.5, 2.93, 2.87],
13
+ "Steinway": [4.23, 3.67, 4, 3.97],
14
+ "Kawai-G": [3.37, 2.97, 3.07, 3.14],
15
+ "Yamaha": [3.23, 3.03, 3.17, 3.14],
16
+ }
17
+
18
+ _DBNAME = os.path.basename(__file__).split(".")[0]
19
+
20
+ _HOMEPAGE = f"https://www.modelscope.cn/datasets/ccmusic-database/{_DBNAME}"
21
+
22
+ _DOMAIN = f"https://www.modelscope.cn/api/v1/datasets/ccmusic-database/{_DBNAME}/repo?Revision=master&FilePath=data"
23
+
24
+ _CITATION = """\
25
+ @dataset{zhaorui_liu_2021_5676893,
26
+ author = {Monan Zhou, Shenyang Xu, Zhaorui Liu, Zhaowen Wang, Feng Yu, Wei Li and Baoqiang Han},
27
+ title = {CCMusic: an Open and Diverse Database for Chinese and General Music Information Retrieval Research},
28
+ month = {mar},
29
+ year = {2024},
30
+ publisher = {HuggingFace},
31
+ version = {1.2},
32
+ url = {https://huggingface.co/ccmusic-database}
33
+ }
34
+ """
35
+
36
+ _DESCRIPTION = """\
37
+ Piano-Sound-Quality is a dataset of piano sound. It consists of 8 kinds of pianos including PearlRiver, YoungChang, Steinway-T, Hsinghai, Kawai, Steinway, Kawai-G, Yamaha(recorded by Shaohua Ji with SONY PCM-D100). Data was annotated by students from the China Conservatory of Music (CCMUSIC) in Beijing and collected by Monan Zhou.
38
+ """
39
+
40
+ _PITCHES = {
41
+ "009": "A2",
42
+ "010": "A2#/B2b",
43
+ "011": "B2",
44
+ "100": "C1",
45
+ "101": "C1#/D1b",
46
+ "102": "D1",
47
+ "103": "D1#/E1b",
48
+ "104": "E1",
49
+ "105": "F1",
50
+ "106": "F1#/G1b",
51
+ "107": "G1",
52
+ "108": "G1#/A1b",
53
+ "109": "A1",
54
+ "110": "A1#/B1b",
55
+ "111": "B1",
56
+ "200": "C",
57
+ "201": "C#/Db",
58
+ "202": "D",
59
+ "203": "D#/Eb",
60
+ "204": "E",
61
+ "205": "F",
62
+ "206": "F#/Gb",
63
+ "207": "G",
64
+ "208": "G#/Ab",
65
+ "209": "A",
66
+ "210": "A#/Bb",
67
+ "211": "B",
68
+ "300": "c",
69
+ "301": "c#/db",
70
+ "302": "d",
71
+ "303": "d#/eb",
72
+ "304": "e",
73
+ "305": "f",
74
+ "306": "f#/gb",
75
+ "307": "g",
76
+ "308": "g#/ab",
77
+ "309": "a",
78
+ "310": "a#/bb",
79
+ "311": "b",
80
+ "400": "c1",
81
+ "401": "c1#/d1b",
82
+ "402": "d1",
83
+ "403": "d1#/e1b",
84
+ "404": "e1",
85
+ "405": "f1",
86
+ "406": "f1#/g1b",
87
+ "407": "g1",
88
+ "408": "g1#/a1b",
89
+ "409": "a1",
90
+ "410": "a1#/b1b",
91
+ "411": "b1",
92
+ "500": "c2",
93
+ "501": "c2#/d2b",
94
+ "502": "d2",
95
+ "503": "d2#/e2b",
96
+ "504": "e2",
97
+ "505": "f2",
98
+ "506": "f2#/g2b",
99
+ "507": "g2",
100
+ "508": "g2#/a2b",
101
+ "509": "a2",
102
+ "510": "a2#/b2b",
103
+ "511": "b2",
104
+ "600": "c3",
105
+ "601": "c3#/d3b",
106
+ "602": "d3",
107
+ "603": "d3#/e3b",
108
+ "604": "e3",
109
+ "605": "f3",
110
+ "606": "f3#/g3b",
111
+ "607": "g3",
112
+ "608": "g3#/a3b",
113
+ "609": "a3",
114
+ "610": "a3#/b3b",
115
+ "611": "b3",
116
+ "700": "c4",
117
+ "701": "c4#/d4b",
118
+ "702": "d4",
119
+ "703": "d4#/e4b",
120
+ "704": "e4",
121
+ "705": "f4",
122
+ "706": "f4#/g4b",
123
+ "707": "g4",
124
+ "708": "g4#/a4b",
125
+ "709": "a4",
126
+ "710": "a4#/b4b",
127
+ "711": "b4",
128
+ "800": "c5",
129
+ }
130
+
131
+ _URLS = {
132
+ "audio": f"{_DOMAIN}/audio.zip",
133
+ "mel": f"{_DOMAIN}/mel.zip",
134
+ "eval": f"{_DOMAIN}/eval.zip",
135
+ }
136
+
137
+
138
+ class pianos(datasets.GeneratorBasedBuilder):
139
+ # BUILDER_CONFIGS = [
140
+ # datasets.BuilderConfig(name="default"),
141
+ # datasets.BuilderConfig(name="eval"),
142
+ # ]
143
+
144
+ def _info(self):
145
+ return datasets.DatasetInfo(
146
+ description=_DESCRIPTION,
147
+ features=(
148
+ datasets.Features(
149
+ {
150
+ "audio": datasets.Audio(sampling_rate=22050),
151
+ "mel": datasets.Image(),
152
+ "label": datasets.features.ClassLabel(
153
+ names=list(_NAMES.keys())
154
+ ),
155
+ "pitch": datasets.features.ClassLabel(
156
+ names=list(_PITCHES.values())
157
+ ),
158
+ "bass_score": datasets.Value("float32"),
159
+ "mid_score": datasets.Value("float32"),
160
+ "treble_score": datasets.Value("float32"),
161
+ "avg_score": datasets.Value("float32"),
162
+ }
163
+ )
164
+ if self.config.name == "default"
165
+ else datasets.Features(
166
+ {
167
+ "mel": datasets.Image(),
168
+ "label": datasets.features.ClassLabel(
169
+ names=list(_NAMES.keys())
170
+ ),
171
+ "pitch": datasets.features.ClassLabel(
172
+ names=list(_PITCHES.values())
173
+ ),
174
+ "bass_score": datasets.Value("float32"),
175
+ "mid_score": datasets.Value("float32"),
176
+ "treble_score": datasets.Value("float32"),
177
+ "avg_score": datasets.Value("float32"),
178
+ }
179
+ )
180
+ ),
181
+ homepage=_HOMEPAGE,
182
+ license="CC-BY-NC-ND",
183
+ version="1.2.0",
184
+ citation=_CITATION,
185
+ supervised_keys=("mel", "label"),
186
+ task_templates=ImageClassification(
187
+ image_column="mel",
188
+ label_column="label",
189
+ ),
190
+ )
191
+
192
+ def _split_generators(self, dl_manager):
193
+ dataset = []
194
+ if self.config.name == "default":
195
+ subset = {}
196
+ audio_files = dl_manager.download_and_extract(_URLS["audio"])
197
+ for path in dl_manager.iter_files([audio_files]):
198
+ fname = os.path.basename(path)
199
+ if fname.endswith(".wav"):
200
+ lebal = os.path.basename(os.path.dirname(path))
201
+ subset[fname.split(".")[0]] = {
202
+ "audio": path,
203
+ "label": lebal,
204
+ "pitch": _PITCHES[fname[1:4]],
205
+ "bass_score": _NAMES[lebal][0],
206
+ "mid_score": _NAMES[lebal][1],
207
+ "treble_score": _NAMES[lebal][2],
208
+ "avg_score": _NAMES[lebal][3],
209
+ }
210
+
211
+ mel_files = dl_manager.download_and_extract(_URLS["mel"])
212
+ for path in dl_manager.iter_files([mel_files]):
213
+ fname = os.path.basename(path)
214
+ if fname.endswith(".jpg"):
215
+ subset[fname.split(".")[0]]["mel"] = path
216
+
217
+ dataset = list(subset.values())
218
+
219
+ else:
220
+ data_files = dl_manager.download_and_extract(_URLS["eval"])
221
+ for path in dl_manager.iter_files([data_files]):
222
+ fname: str = os.path.basename(path)
223
+ if fname.endswith(".jpg"):
224
+ lebal = os.path.basename(os.path.dirname(path))
225
+ dataset.append(
226
+ {
227
+ "mel": path,
228
+ "label": lebal,
229
+ "pitch": _PITCHES[fname.split("_")[0]],
230
+ "bass_score": _NAMES[lebal][0],
231
+ "mid_score": _NAMES[lebal][1],
232
+ "treble_score": _NAMES[lebal][2],
233
+ "avg_score": _NAMES[lebal][3],
234
+ }
235
+ )
236
+
237
+ random.shuffle(dataset)
238
+ count = len(dataset)
239
+ p80 = int(0.8 * count)
240
+ p90 = int(0.9 * count)
241
+
242
+ return [
243
+ datasets.SplitGenerator(
244
+ name=datasets.Split.TRAIN, gen_kwargs={"files": dataset[:p80]}
245
+ ),
246
+ datasets.SplitGenerator(
247
+ name=datasets.Split.VALIDATION, gen_kwargs={"files": dataset[p80:p90]}
248
+ ),
249
+ datasets.SplitGenerator(
250
+ name=datasets.Split.TEST, gen_kwargs={"files": dataset[p90:]}
251
+ ),
252
+ ]
253
+
254
+ def _generate_examples(self, files):
255
+ for i, path in enumerate(files):
256
+ yield i, path