Datasets:
Size:
10K<n<100K
License:
import os | |
import random | |
import datasets | |
from datasets.tasks import ImageClassification | |
_NAMES = [ | |
"None", | |
"Classic", | |
"Non_classic", | |
"Symphony", | |
"Opera", | |
"Solo", | |
"Chamber", | |
"Pop", | |
"Dance_and_house", | |
"Indie", | |
"Soul_or_r_and_b", | |
"Rock", | |
"Pop_vocal_ballad", | |
"Adult_contemporary", | |
"Teen_pop", | |
"Contemporary_dance_pop", | |
"Dance_pop", | |
"Classic_indie_pop", | |
"Chamber_cabaret_and_art_pop", | |
"Adult_alternative_rock", | |
"Uplifting_anthemic_rock", | |
"Soft_rock", | |
"Acoustic_pop" | |
] | |
_HOMEPAGE = f"https://huggingface.co/datasets/ccmusic-database/{os.path.basename(__file__).split('.')[0]}" | |
_CITATION = """\ | |
@dataset{zhaorui_liu_2021_5676893, | |
author = {Zhaorui Liu, Monan Zhou, Shenyang Xu and Zijin Li}, | |
title = {{Music Data Sharing Platform for Computational Musicology Research (CCMUSIC DATASET)}}, | |
month = nov, | |
year = 2021, | |
publisher = {Zenodo}, | |
version = {1.1}, | |
doi = {10.5281/zenodo.5676893}, | |
url = {https://doi.org/10.5281/zenodo.5676893} | |
} | |
""" | |
_DESCRIPTION = """\ | |
This database contains about 1700 musical pieces (.mp3 format) | |
with lengths of 270-300s that are divided into 17 genres in total. | |
""" | |
_URL = _HOMEPAGE + "/resolve/main/data/mel.zip" | |
class music_genre(datasets.GeneratorBasedBuilder): | |
def _info(self): | |
return datasets.DatasetInfo( | |
features=datasets.Features( | |
{ | |
"image": datasets.Image(), | |
"fst_level_label": datasets.features.ClassLabel(names=_NAMES), | |
"sec_level_label": datasets.features.ClassLabel(names=_NAMES), | |
"thr_level_label": datasets.features.ClassLabel(names=_NAMES), | |
"duration": datasets.Value('float64') | |
} | |
), | |
supervised_keys=("image", "fst_level_label"), | |
homepage=_HOMEPAGE, | |
license="mit", | |
citation=_CITATION, | |
description=_DESCRIPTION, | |
task_templates=[ | |
ImageClassification( | |
task="image-classification", | |
image_column="image", | |
label_column="fst_level_label", | |
) | |
] | |
) | |
def _split_generators(self, dl_manager): | |
data_files = dl_manager.download_and_extract(_URL) | |
files = dl_manager.iter_files([data_files]) | |
dataset = [] | |
for path in files: | |
if os.path.basename(path).endswith(".jpg"): | |
dataset.append(path) | |
random.shuffle(dataset) | |
data_count = len(dataset) | |
p80 = int(data_count * 0.8) | |
p90 = int(data_count * 0.9) | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
gen_kwargs={ | |
"files": dataset[:p80] | |
}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.VALIDATION, | |
gen_kwargs={ | |
"files": dataset[p80:p90] | |
}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.TEST, | |
gen_kwargs={ | |
"files": dataset[p90:] | |
}, | |
), | |
] | |
def _calc_label(self, path, depth, substr='/mel/'): | |
mel = substr | |
dirpath = os.path.dirname(path) | |
substr_index = dirpath.find(mel) | |
if substr_index < 0: | |
mel = '\\mel\\' | |
substr_index = dirpath.find(mel) | |
labstr = dirpath[substr_index + len(mel):] | |
labs = labstr.split('/') | |
if len(labs) < 2: | |
labs = labstr.split('\\') | |
if depth <= len(labs): | |
return int(labs[depth - 1].split('_')[0]) | |
else: | |
return int(labs[-1].split('_')[0]) | |
def _generate_examples(self, files): | |
for i, path in enumerate(files): | |
yield i, { | |
"image": path, | |
"fst_level_label": _NAMES[self._calc_label(path, 1)], | |
"sec_level_label": _NAMES[self._calc_label(path, 2)], | |
"thr_level_label": _NAMES[self._calc_label(path, 3)], | |
"duration": float(os.path.basename(path).split('_')[1].split('.jp')[0]) | |
} | |