Datasets:
Size:
10K<n<100K
License:
import os | |
import random | |
import datasets | |
import pandas as pd | |
from datasets.tasks import ImageClassification | |
# Once upload a new piano brand, please register its name here | |
_NAMES = [ | |
"0_none", | |
"1_classic", | |
"2_non_classic", | |
"3_symphony", | |
"4_opera", | |
"5_solo", | |
"6_chamber", | |
"7_pop", | |
"8_dance_and_house", | |
"9_indie", | |
"10_soul_or_r_and_b", | |
"11_rock" | |
] | |
_DBNAME = os.path.basename(__file__).split('.')[0] | |
_HOMEPAGE = "https://huggingface.co/datasets/ccmusic-database/" + _DBNAME | |
_CITATION = """\ | |
@dataset{zhaorui_liu_2021_5676893, | |
author = {Zhaorui Liu and Zijin Li}, | |
title = {{Music Data Sharing Platform for Computational Musicology Research (CCMUSIC DATASET)}}, | |
month = nov, | |
year = 2021, | |
publisher = {Zenodo}, | |
version = {1.1}, | |
doi = {10.5281/zenodo.5676893}, | |
url = {https://doi.org/10.5281/zenodo.5676893} | |
} | |
""" | |
_DESCRIPTION = """\ | |
This database contains about 1700 musical pieces (.mp3 format, downloaded from NetEase Cloud Music) | |
with lengths of 270-300s that are divided into 17 genres in total. | |
""" | |
_URL = _HOMEPAGE + "/resolve/main/data/dataset.zip" | |
_CSV = _HOMEPAGE + "/resolve/main/data/labels.csv" | |
class music_genre(datasets.GeneratorBasedBuilder): | |
def _info(self): | |
return datasets.DatasetInfo( | |
features=datasets.Features( | |
{ | |
"image": datasets.Image(), | |
"duration": datasets.Value("string"), | |
"fst_level_label": datasets.features.ClassLabel(names=_NAMES), | |
"sec_level_label": datasets.features.ClassLabel(names=_NAMES), | |
"thr_level_label": datasets.features.ClassLabel(names=_NAMES), | |
} | |
), | |
supervised_keys=("image", "fst_level_label"), | |
homepage=_HOMEPAGE, | |
license="mit", | |
citation=_CITATION, | |
description=_DESCRIPTION, | |
task_templates=[ | |
ImageClassification( | |
task="image-classification", | |
image_column="image", | |
label_column="fst_level_label", | |
) | |
] | |
) | |
# def _set_to_label(self, dataset): | |
# output = [] | |
# for path in dataset: | |
# id = int(os.path.basename(path)[:-4]) | |
def _split_generators(self, dl_manager): | |
data_files = dl_manager.download_and_extract(_URL) | |
files = dl_manager.iter_files([data_files]) | |
labels = dl_manager.download(_CSV) | |
dataset = [] | |
for _, path in enumerate(files): | |
dataset.append(path) | |
random.shuffle(dataset) | |
data_count = len(dataset) | |
p80 = int(data_count * 0.8) | |
p90 = int(data_count * 0.9) | |
# tra_set = dataset[:p80] | |
# val_set = dataset[p80:p90] | |
# tes_set = dataset[p90:] | |
# tra_label = | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
gen_kwargs={ | |
"files": dataset[:p80], | |
"labels": labels | |
}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.VALIDATION, | |
gen_kwargs={ | |
"files": dataset[p80:p90], | |
"labels": labels | |
}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.TEST, | |
gen_kwargs={ | |
"files": dataset[p90:], | |
"labels": labels | |
}, | |
), | |
] | |
def _generate_examples(self, files, labels): | |
label = pd.read_csv(labels, index_col='id') | |
for i, path in enumerate(files): | |
file_name = os.path.basename(path) | |
if file_name.endswith(".png"): | |
yield i, { | |
"image": path, | |
"duration": label.iloc[i]['duration'], | |
"fst_level_label": _NAMES[label.iloc[i]['fst_level_label']], | |
"sec_level_label": _NAMES[label.iloc[i]['sec_level_label']], | |
"thr_level_label": _NAMES[label.iloc[i]['thr_level_label']], | |
} | |