import os import random import datasets from datasets.tasks import ImageClassification _NAMES_1 = { 1: "Classic", 2: "Non_classic" } _NAMES_2 = { 3: "Symphony", 4: "Opera", 5: "Solo", 6: "Chamber", 7: "Pop", 8: "Dance_and_house", 9: "Indie", 10: "Soul_or_r_and_b", 11: "Rock" } _NAMES_3 = { 0: "None", 12: "Pop_vocal_ballad", 13: "Adult_contemporary", 14: "Teen_pop", 15: "Contemporary_dance_pop", 16: "Dance_pop", 17: "Classic_indie_pop", 18: "Chamber_cabaret_and_art_pop", 19: "Adult_alternative_rock", 20: "Uplifting_anthemic_rock", 21: "Soft_rock", 22: "Acoustic_pop" } _HOMEPAGE = f"https://huggingface.co/datasets/ccmusic-database/{os.path.basename(__file__).split('.')[0]}" _CITATION = """\ @dataset{zhaorui_liu_2021_5676893, author = {Zhaorui Liu, Monan Zhou, Shenyang Xu, Zhaowen Wang, Wei Li and Zijin Li}, title = {CCMUSIC DATABASE: A Music Data Sharing Platform for Computational Musicology Research}, month = {nov}, year = {2021}, publisher = {Zenodo}, version = {1.1}, doi = {10.5281/zenodo.5676893}, url = {https://doi.org/10.5281/zenodo.5676893} } """ _DESCRIPTION = """\ This database contains about 1700 musical pieces (.mp3 format) with lengths of 270-300s that are divided into 17 genres in total. """ _URL = _HOMEPAGE + "/resolve/main/data/mel.zip" class music_genre(datasets.GeneratorBasedBuilder): def _info(self): return datasets.DatasetInfo( features=datasets.Features( { "mel": datasets.Image(), "fst_level_label": datasets.features.ClassLabel(names=list(_NAMES_1.values())), "sec_level_label": datasets.features.ClassLabel(names=list(_NAMES_2.values())), "thr_level_label": datasets.features.ClassLabel(names=list(_NAMES_3.values())) } ), supervised_keys=("mel", "sec_level_label"), homepage=_HOMEPAGE, license="mit", citation=_CITATION, description=_DESCRIPTION, task_templates=[ ImageClassification( task="image-classification", image_column="mel", label_column="sec_level_label", ) ] ) def _split_generators(self, dl_manager): data_files = dl_manager.download_and_extract(_URL) files = dl_manager.iter_files([data_files]) dataset = [] for path in files: if os.path.basename(path).endswith(".jpg"): dataset.append(path) random.shuffle(dataset) data_count = len(dataset) p80 = int(data_count * 0.8) p90 = int(data_count * 0.9) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "files": dataset[:p80] }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ "files": dataset[p80:p90] }, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "files": dataset[p90:] }, ), ] def _calc_label(self, path, depth, substr='/mel/'): mel = substr dirpath = os.path.dirname(path) substr_index = dirpath.find(mel) if substr_index < 0: mel = '\\mel\\' substr_index = dirpath.find(mel) labstr = dirpath[substr_index + len(mel):] labs = labstr.split('/') if len(labs) < 2: labs = labstr.split('\\') if depth <= len(labs): return 0 else: return int(labs[-1].split('_')[0]) def _generate_examples(self, files): for i, path in enumerate(files): yield i, { "mel": path, "fst_level_label": _NAMES_1[self._calc_label(path, 1)], "sec_level_label": _NAMES_2[self._calc_label(path, 2)], "thr_level_label": _NAMES_3[self._calc_label(path, 3)] }