|
import os |
|
import random |
|
import datasets |
|
from datasets.tasks import ImageClassification, AudioClassification |
|
|
|
|
|
_NAMES = [ |
|
"PearlRiver", |
|
"YoungChang", |
|
"Steinway-T", |
|
"Hsinghai", |
|
"Kawai", |
|
"Steinway", |
|
"Kawai-G", |
|
"Yamaha", |
|
] |
|
|
|
_DBNAME = os.path.basename(__file__).split(".")[0] |
|
|
|
_HOMEPAGE = f"https://www.modelscope.cn/datasets/ccmusic/{_DBNAME}" |
|
|
|
_DOMAIN = f"https://www.modelscope.cn/api/v1/datasets/ccmusic/{_DBNAME}/repo?Revision=master&FilePath=data" |
|
|
|
_CITATION = """\ |
|
@dataset{zhaorui_liu_2021_5676893, |
|
author = {Monan Zhou, Shenyang Xu, Zhaorui Liu, Zhaowen Wang, Feng Yu, Wei Li and Zijin Li}, |
|
title = {CCMusic: an Open and Diverse Database for Chinese and General Music Information Retrieval Research}, |
|
month = {mar}, |
|
year = {2024}, |
|
publisher = {HuggingFace}, |
|
version = {1.2}, |
|
url = {https://huggingface.co/ccmusic-database} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
Piano-Sound-Quality is a dataset of piano sound. It consists of 8 kinds of pianos including PearlRiver, YoungChang, Steinway-T, Hsinghai, Kawai, Steinway, Kawai-G, Yamaha(recorded by Shaohua Ji with SONY PCM-D100). Data was annotated by students from the China Conservatory of Music (CCMUSIC) in Beijing and collected by Monan Zhou. |
|
""" |
|
|
|
_PITCHES = { |
|
"009": "A2", |
|
"010": "A2#/B2b", |
|
"011": "B2", |
|
"100": "C1", |
|
"101": "C1#/D1b", |
|
"102": "D1", |
|
"103": "D1#/E1b", |
|
"104": "E1", |
|
"105": "F1", |
|
"106": "F1#/G1b", |
|
"107": "G1", |
|
"108": "G1#/A1b", |
|
"109": "A1", |
|
"110": "A1#/B1b", |
|
"111": "B1", |
|
"200": "C", |
|
"201": "C#/Db", |
|
"202": "D", |
|
"203": "D#/Eb", |
|
"204": "E", |
|
"205": "F", |
|
"206": "F#/Gb", |
|
"207": "G", |
|
"208": "G#/Ab", |
|
"209": "A", |
|
"210": "A#/Bb", |
|
"211": "B", |
|
"300": "c", |
|
"301": "c#/db", |
|
"302": "d", |
|
"303": "d#/eb", |
|
"304": "e", |
|
"305": "f", |
|
"306": "f#/gb", |
|
"307": "g", |
|
"308": "g#/ab", |
|
"309": "a", |
|
"310": "a#/bb", |
|
"311": "b", |
|
"400": "c1", |
|
"401": "c1#/d1b", |
|
"402": "d1", |
|
"403": "d1#/e1b", |
|
"404": "e1", |
|
"405": "f1", |
|
"406": "f1#/g1b", |
|
"407": "g1", |
|
"408": "g1#/a1b", |
|
"409": "a1", |
|
"410": "a1#/b1b", |
|
"411": "b1", |
|
"500": "c2", |
|
"501": "c2#/d2b", |
|
"502": "d2", |
|
"503": "d2#/e2b", |
|
"504": "e2", |
|
"505": "f2", |
|
"506": "f2#/g2b", |
|
"507": "g2", |
|
"508": "g2#/a2b", |
|
"509": "a2", |
|
"510": "a2#/b2b", |
|
"511": "b2", |
|
"600": "c3", |
|
"601": "c3#/d3b", |
|
"602": "d3", |
|
"603": "d3#/e3b", |
|
"604": "e3", |
|
"605": "f3", |
|
"606": "f3#/g3b", |
|
"607": "g3", |
|
"608": "g3#/a3b", |
|
"609": "a3", |
|
"610": "a3#/b3b", |
|
"611": "b3", |
|
"700": "c4", |
|
"701": "c4#/d4b", |
|
"702": "d4", |
|
"703": "d4#/e4b", |
|
"704": "e4", |
|
"705": "f4", |
|
"706": "f4#/g4b", |
|
"707": "g4", |
|
"708": "g4#/a4b", |
|
"709": "a4", |
|
"710": "a4#/b4b", |
|
"711": "b4", |
|
"800": "c5", |
|
} |
|
|
|
_URLS = { |
|
"audio": f"{_DOMAIN}/audio.zip", |
|
"mel": f"{_DOMAIN}/mel.zip", |
|
"eval": f"{_DOMAIN}/eval.zip", |
|
} |
|
|
|
|
|
class pianos_Config(datasets.BuilderConfig): |
|
def __init__(self, features, supervised_keys, task_templates, **kwargs): |
|
super(pianos_Config, self).__init__(version=datasets.Version("0.1.2"), **kwargs) |
|
self.features = features |
|
self.supervised_keys = supervised_keys |
|
self.task_templates = task_templates |
|
|
|
|
|
class pianos(datasets.GeneratorBasedBuilder): |
|
VERSION = datasets.Version("0.1.2") |
|
BUILDER_CONFIGS = [ |
|
pianos_Config( |
|
name="eval", |
|
features=datasets.Features( |
|
{ |
|
"mel": datasets.Image(), |
|
"label": datasets.features.ClassLabel(names=_NAMES), |
|
"pitch": datasets.features.ClassLabel( |
|
names=list(_PITCHES.values()) |
|
), |
|
} |
|
), |
|
supervised_keys=("mel", "label"), |
|
task_templates=[ |
|
ImageClassification( |
|
task="image-classification", |
|
image_column="mel", |
|
label_column="label", |
|
) |
|
], |
|
), |
|
pianos_Config( |
|
name="default", |
|
features=datasets.Features( |
|
{ |
|
"audio": datasets.Audio(sampling_rate=22050), |
|
"mel": datasets.Image(), |
|
"label": datasets.features.ClassLabel(names=_NAMES), |
|
"pitch": datasets.features.ClassLabel( |
|
names=list(_PITCHES.values()) |
|
), |
|
} |
|
), |
|
supervised_keys=("audio", "label"), |
|
task_templates=[ |
|
AudioClassification( |
|
task="audio-classification", |
|
audio_column="audio", |
|
label_column="label", |
|
) |
|
], |
|
), |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=self.config.features, |
|
homepage=_HOMEPAGE, |
|
license="mit", |
|
citation=_CITATION, |
|
supervised_keys=self.config.supervised_keys, |
|
task_templates=self.config.task_templates, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
dataset = [] |
|
if self.config.name == "eval": |
|
data_files = dl_manager.download_and_extract(_URLS["eval"]) |
|
for path in dl_manager.iter_files([data_files]): |
|
fname = os.path.basename(path) |
|
if fname.endswith(".jpg"): |
|
dataset.append( |
|
{ |
|
"mel": path, |
|
"label": os.path.basename(os.path.dirname(path)), |
|
"pitch": _PITCHES[fname.split("_")[0]], |
|
} |
|
) |
|
else: |
|
subset = {} |
|
audio_files = dl_manager.download_and_extract(_URLS["audio"]) |
|
for path in dl_manager.iter_files([audio_files]): |
|
fname = os.path.basename(path) |
|
if fname.endswith(".wav"): |
|
subset[fname.split(".")[0]] = { |
|
"audio": path, |
|
"label": os.path.basename(os.path.dirname(path)), |
|
"pitch": _PITCHES[fname[1:4]], |
|
} |
|
|
|
mel_files = dl_manager.download_and_extract(_URLS["mel"]) |
|
for path in dl_manager.iter_files([mel_files]): |
|
fname = os.path.basename(path) |
|
if fname.endswith(".jpg"): |
|
subset[fname.split(".")[0]]["mel"] = path |
|
|
|
dataset = list(subset.values()) |
|
|
|
random.shuffle(dataset) |
|
count = len(dataset) |
|
p80 = int(0.8 * count) |
|
p90 = int(0.9 * count) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, gen_kwargs={"files": dataset[:p80]} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, gen_kwargs={"files": dataset[p80:p90]} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, gen_kwargs={"files": dataset[p90:]} |
|
), |
|
] |
|
|
|
def _generate_examples(self, files): |
|
if self.config.name == "eval": |
|
for i, path in enumerate(files): |
|
yield i, { |
|
"mel": path["mel"], |
|
"label": path["label"], |
|
"pitch": path["pitch"], |
|
} |
|
|
|
else: |
|
for i, path in enumerate(files): |
|
yield i, { |
|
"audio": path["audio"], |
|
"mel": path["mel"], |
|
"label": path["label"], |
|
"pitch": path["pitch"], |
|
} |
|
|