import os import random import datasets import pandas as pd from datasets.tasks import AudioClassification _SYSTEM_TONIC = ["C", "#C/bD", "D", "#D/bE", "E", "F", "#F/bG", "G", "#G/bA", "A", "#A/bB", "B"] _PATTERN = ["Gong", "Shang", "Jue", "Zhi", "Yu"] _TYPE = ["Pentatonic", "Hexatonic_Qingjue", "Hexatonic_Biangong", "Heptatonic_Yayue", "Heptatonic_Qingyue", "Heptatonic_Yanyue"] _HOMEPAGE = f"https://huggingface.co/datasets/ccmusic-database/{os.path.basename(__file__).split('.')[0]}" _CITATION = """\ @dataset{zhaorui_liu_2021_5676893, author = {Zhaorui Liu, Monan Zhou, Shenyang Xu, Yuan Wang, Zhaowen Wang, Wei Li and Zijin Li}, title = {CCMUSIC DATABASE: A Music Data Sharing Platform for Computational Musicology Research}, month = {nov}, year = {2021}, publisher = {Zenodo}, version = {1.1}, doi = {10.5281/zenodo.5676893}, url = {https://doi.org/10.5281/zenodo.5676893} } """ _DESCRIPTION = """\ Based on the working idea of combining manual labeling with computer in the construction of World Music Database, this database collects and labels the audio of five modes (including five tones, six tones and seven tones) of "Gong, Shang, Jue, Zhi and Yu". At the same time, it makes a detailed analysis of the judgment of Chinese national pentatonic modes, and finds application scenarios and technical models, which can provide raw data for the analysis and retrieval of Chinese national music characteristics. """ _URLS = { 'audio': f"{_HOMEPAGE}/resolve/main/data/audio.zip", 'label': f"{_HOMEPAGE}/resolve/main/data/label.csv" } class CNPM(datasets.GeneratorBasedBuilder): def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features({ "audio": datasets.Audio(sampling_rate=44_100), "System": datasets.features.ClassLabel(names=_SYSTEM_TONIC), "Tonic": datasets.features.ClassLabel(names=_SYSTEM_TONIC), "Pattern": datasets.features.ClassLabel(names=_PATTERN), "Type": datasets.features.ClassLabel(names=_TYPE), "Mode_Name": datasets.Value("string"), "Length": datasets.Value("string") }), supervised_keys=("audio", "Type"), homepage=_HOMEPAGE, license="mit", citation=_CITATION, task_templates=[ AudioClassification( task="audio-classification", audio_column="audio", label_column="Type" ) ] ) def _split_generators(self, dl_manager): audio_files = dl_manager.download_and_extract(_URLS['audio']) label_file = dl_manager.download(_URLS['label']) files = dl_manager.iter_files([audio_files]) labels = pd.read_csv( label_file, index_col='File_Name', encoding='gbk') dataset = [] for fpath in files: if os.path.basename(fpath).endswith(".wav"): dataset.append(fpath) random.shuffle(dataset) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "files": dataset, "labels": labels } ) ] def _val_of_key(self, labels: pd.DataFrame, key: str, col: str): try: return labels.loc[key][col] except KeyError: return '' def _generate_examples(self, files, labels): for i, path in enumerate(files): fname = os.path.basename(path) yield i, { "audio": path, "System": _SYSTEM_TONIC[int(self._val_of_key(labels, fname, 'System'))], "Tonic": _SYSTEM_TONIC[int(self._val_of_key(labels, fname, 'Tonic'))], "Pattern": _PATTERN[int(self._val_of_key(labels, fname, 'Pattern'))], "Type": _TYPE[int(self._val_of_key(labels, fname, 'Type'))], "Mode_Name": self._val_of_key(labels, fname, 'Mode_Name'), "Length": self._val_of_key(labels, fname, 'Length') }