import os import socket import random import datasets from datasets.tasks import ImageClassification _NAMES_1 = { 1: "Classic", 2: "Non_classic" } _NAMES_2 = { 3: "Symphony", 4: "Opera", 5: "Solo", 6: "Chamber", 7: "Pop", 8: "Dance_and_house", 9: "Indie", 10: "Soul_or_r_and_b", 11: "Rock" } _NAMES_3 = { 3: "Symphony", 4: "Opera", 5: "Solo", 6: "Chamber", 12: "Pop_vocal_ballad", 13: "Adult_contemporary", 14: "Teen_pop", 15: "Contemporary_dance_pop", 16: "Dance_pop", 17: "Classic_indie_pop", 18: "Chamber_cabaret_and_art_pop", 10: "Soul_or_r_and_b", 19: "Adult_alternative_rock", 20: "Uplifting_anthemic_rock", 21: "Soft_rock", 22: "Acoustic_pop" } _DBNAME = os.path.basename(__file__).split('.')[0] _HOMEPAGE = f"https://huggingface.co/datasets/ccmusic-database/{_DBNAME}" _CITATION = """\ @dataset{zhaorui_liu_2021_5676893, author = {Zhaorui Liu, Monan Zhou, Shenyang Xu, Yuan Wang, Zhaowen Wang, Wei Li and Zijin Li}, title = {CCMUSIC DATABASE: A Music Data Sharing Platform for Computational Musicology Research}, month = {nov}, year = {2021}, publisher = {Zenodo}, version = {1.1}, doi = {10.5281/zenodo.5676893}, url = {https://doi.org/10.5281/zenodo.5676893} } """ _DESCRIPTION = """\ This database contains about 1700 musical pieces (.mp3 format) with lengths of 270-300s that are divided into 17 genres in total. """ class music_genre(datasets.GeneratorBasedBuilder): def _info(self): return datasets.DatasetInfo( features=datasets.Features( { "mel": datasets.Image(), "cqt": datasets.Image(), "chroma": datasets.Image(), "fst_level_label": datasets.features.ClassLabel(names=list(_NAMES_1.values())), "sec_level_label": datasets.features.ClassLabel(names=list(_NAMES_2.values())), "thr_level_label": datasets.features.ClassLabel(names=list(_NAMES_3.values())) } ), supervised_keys=("mel", "sec_level_label"), homepage=_HOMEPAGE, license="mit", citation=_CITATION, description=_DESCRIPTION, task_templates=[ ImageClassification( task="image-classification", image_column="mel", label_column="sec_level_label", ) ] ) def _cdn_url(self, ip='127.0.0.1', port=80): try: # easy for local test with socket.create_connection((ip, port), timeout=5): return f'http://{ip}/{_DBNAME}/data/genre_data.zip' except (socket.timeout, socket.error): return f"{_HOMEPAGE}/resolve/main/data/genre_data.zip" def _split_generators(self, dl_manager): data_files = dl_manager.download_and_extract(self._cdn_url()) files = dl_manager.iter_files([data_files]) dataset = [] for path in files: if os.path.basename(path).endswith(".jpg") and 'mel' in path: dataset.append({ 'mel': path, 'cqt': path.replace('\\mel\\', '\\cqt\\').replace('/mel/', '/cqt/'), 'chroma': path.replace('\\mel\\', '\\chroma\\').replace('/mel/', '/chroma/') }) random.shuffle(dataset) data_count = len(dataset) p80 = int(data_count * 0.8) p90 = int(data_count * 0.9) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "files": dataset[:p80] }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ "files": dataset[p80:p90] }, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "files": dataset[p90:] }, ), ] def _calc_label(self, path, depth, substr='/mel/'): spect = substr dirpath = os.path.dirname(path) substr_index = dirpath.find(spect) if substr_index < 0: spect = spect.replace('/', '\\') substr_index = dirpath.find(spect) labstr = dirpath[substr_index + len(spect):] labs = labstr.split('/') if len(labs) < 2: labs = labstr.split('\\') if depth <= len(labs): return int(labs[depth - 1].split('_')[0]) else: return int(labs[-1].split('_')[0]) def _generate_examples(self, files): for i, path in enumerate(files): yield i, { "mel": path['mel'], "cqt": path['cqt'], "chroma": path['chroma'], "fst_level_label": _NAMES_1[self._calc_label(path['mel'], 1)], "sec_level_label": _NAMES_2[self._calc_label(path['mel'], 2)], "thr_level_label": _NAMES_3[self._calc_label(path['mel'], 3)] }