Datasets:
Size:
10K<n<100K
License:
import os | |
import random | |
import datasets | |
from datasets.tasks import ImageClassification | |
_NAMES_1 = { | |
1: "Classic", | |
2: "Non_classic", | |
} | |
_NAMES_2 = { | |
3: "Symphony", | |
4: "Opera", | |
5: "Solo", | |
6: "Chamber", | |
7: "Pop", | |
8: "Dance_and_house", | |
9: "Indie", | |
10: "Soul_or_RnB", | |
11: "Rock", | |
} | |
_NAMES_3 = { | |
3: "Symphony", | |
4: "Opera", | |
5: "Solo", | |
6: "Chamber", | |
12: "Pop_vocal_ballad", | |
13: "Adult_contemporary", | |
14: "Teen_pop", | |
15: "Contemporary_dance_pop", | |
16: "Dance_pop", | |
17: "Classic_indie_pop", | |
18: "Chamber_cabaret_and_art_pop", | |
10: "Soul_or_RnB", | |
19: "Adult_alternative_rock", | |
20: "Uplifting_anthemic_rock", | |
21: "Soft_rock", | |
22: "Acoustic_pop", | |
} | |
_DBNAME = os.path.basename(__file__).split(".")[0] | |
_HOMEPAGE = f"https://www.modelscope.cn/datasets/ccmusic-database/{_DBNAME}" | |
_DOMAIN = f"https://www.modelscope.cn/api/v1/datasets/ccmusic-database/{_DBNAME}/repo?Revision=master&FilePath=data" | |
_CITATION = """\ | |
@dataset{zhaorui_liu_2021_5676893, | |
author = {Monan Zhou, Shenyang Xu, Zhaorui Liu, Zhaowen Wang, Feng Yu, Wei Li and Baoqiang Han}, | |
title = {CCMusic: an Open and Diverse Database for Chinese and General Music Information Retrieval Research}, | |
month = {mar}, | |
year = {2024}, | |
publisher = {HuggingFace}, | |
version = {1.2}, | |
url = {https://huggingface.co/ccmusic-database} | |
} | |
""" | |
_DESCRIPTION = """\ | |
The raw dataset comprises approximately 1,700 musical pieces in .mp3 format, sourced from the NetEase music. The lengths of these pieces range from 270 to 300 seconds. All are sampled at the rate of 48,000 Hz. As the website providing the audio music includes style labels for the downloaded music, there are no specific annotators involved. Validation is achieved concurrently with the downloading process. They are categorized into a total of 16 genres. | |
For the pre-processed version, audio is cut into an 11.4-second segment, resulting in 36,375 files, which are then transformed into Mel, CQT and Chroma. In the end, the data entry has six columns: the first three columns represent the Mel, CQT, and Chroma spectrogram slices in .jpg format, respectively, while the last three columns contain the labels for the three levels. The first level comprises two categories, the second level consists of nine categories, and the third level encompasses 16 categories. The entire dataset is shuffled and split into training, validation, and test sets in a ratio of 8:1:1. This dataset can be used for genre classification. | |
""" | |
_URLS = { | |
"audio": f"{_DOMAIN}/audio.zip", | |
"mel": f"{_DOMAIN}/mel.zip", | |
"eval": f"{_DOMAIN}/eval.zip", | |
} | |
class music_genre(datasets.GeneratorBasedBuilder): | |
BUILDER_CONFIGS = [ | |
datasets.BuilderConfig(name="default"), | |
datasets.BuilderConfig(name="eval"), | |
] | |
def _info(self): | |
return datasets.DatasetInfo( | |
features=( | |
datasets.Features( | |
{ | |
"audio": datasets.Audio(sampling_rate=22050), | |
"mel": datasets.Image(), | |
"fst_level_label": datasets.features.ClassLabel( | |
names=list(_NAMES_1.values()) | |
), | |
"sec_level_label": datasets.features.ClassLabel( | |
names=list(_NAMES_2.values()) | |
), | |
"thr_level_label": datasets.features.ClassLabel( | |
names=list(_NAMES_3.values()) | |
), | |
} | |
) | |
if self.config.name == "default" | |
else datasets.Features( | |
{ | |
"mel": datasets.Image(), | |
"cqt": datasets.Image(), | |
"chroma": datasets.Image(), | |
"fst_level_label": datasets.features.ClassLabel( | |
names=list(_NAMES_1.values()) | |
), | |
"sec_level_label": datasets.features.ClassLabel( | |
names=list(_NAMES_2.values()) | |
), | |
"thr_level_label": datasets.features.ClassLabel( | |
names=list(_NAMES_3.values()) | |
), | |
} | |
) | |
), | |
supervised_keys=("mel", "sec_level_label"), | |
homepage=_HOMEPAGE, | |
license="CC-BY-NC-ND", | |
version="1.2.0", | |
citation=_CITATION, | |
description=_DESCRIPTION, | |
task_templates=[ | |
ImageClassification( | |
task="image-classification", | |
image_column="mel", | |
label_column="sec_level_label", | |
) | |
], | |
) | |
def _split_generators(self, dl_manager): | |
dataset = [] | |
if self.config.name == "default": | |
files = {} | |
audio_files = dl_manager.download_and_extract(_URLS["audio"]) | |
mel_files = dl_manager.download_and_extract(_URLS["mel"]) | |
for path in dl_manager.iter_files([audio_files]): | |
fname: str = os.path.basename(path) | |
if fname.endswith(".mp3"): | |
files[fname.split(".mp")[0]] = {"audio": path} | |
for path in dl_manager.iter_files([mel_files]): | |
fname = os.path.basename(path) | |
if fname.endswith(".jpg"): | |
files[fname.split(".jp")[0]]["mel"] = path | |
dataset = list(files.values()) | |
else: | |
data_files = dl_manager.download_and_extract(_URLS["eval"]) | |
for path in dl_manager.iter_files([data_files]): | |
if os.path.basename(path).endswith(".jpg") and "mel" in path: | |
dataset.append( | |
{ | |
"mel": path, | |
"cqt": path.replace("\\mel\\", "\\cqt\\").replace( | |
"/mel/", "/cqt/" | |
), | |
"chroma": path.replace("\\mel\\", "\\chroma\\").replace( | |
"/mel/", "/chroma/" | |
), | |
} | |
) | |
random.shuffle(dataset) | |
data_count = len(dataset) | |
p80 = int(data_count * 0.8) | |
p90 = int(data_count * 0.9) | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
gen_kwargs={"files": dataset[:p80]}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.VALIDATION, | |
gen_kwargs={"files": dataset[p80:p90]}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.TEST, | |
gen_kwargs={"files": dataset[p90:]}, | |
), | |
] | |
def _calc_label(self, path, depth, substr="/mel/"): | |
spect = substr | |
dirpath: str = os.path.dirname(path) | |
substr_index = dirpath.find(spect) | |
if substr_index < 0: | |
spect = spect.replace("/", "\\") | |
substr_index = dirpath.find(spect) | |
labstr = dirpath[substr_index + len(spect) :] | |
labs = labstr.split("/") | |
if len(labs) < 2: | |
labs = labstr.split("\\") | |
if depth <= len(labs): | |
return int(labs[depth - 1].split("_")[0]) | |
else: | |
return int(labs[-1].split("_")[0]) | |
def _generate_examples(self, files): | |
if self.config.name == "default": | |
for i, path in enumerate(files): | |
yield i, { | |
"audio": path["audio"], | |
"mel": path["mel"], | |
"fst_level_label": _NAMES_1[self._calc_label(path["mel"], 1)], | |
"sec_level_label": _NAMES_2[self._calc_label(path["mel"], 2)], | |
"thr_level_label": _NAMES_3[self._calc_label(path["mel"], 3)], | |
} | |
else: | |
for i, path in enumerate(files): | |
yield i, { | |
"mel": path["mel"], | |
"cqt": path["cqt"], | |
"chroma": path["chroma"], | |
"fst_level_label": _NAMES_1[self._calc_label(path["mel"], 1)], | |
"sec_level_label": _NAMES_2[self._calc_label(path["mel"], 2)], | |
"thr_level_label": _NAMES_3[self._calc_label(path["mel"], 3)], | |
} | |