Datasets:

Size:
n<1K
ArXiv:
License:
File size: 5,814 Bytes
3f0849b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
import os
import random
import hashlib
import datasets
from datasets.tasks import AudioClassification

_DBNAME = os.path.basename(__file__).split(".")[0]

_DOMAIN = f"https://www.modelscope.cn/api/v1/datasets/ccmusic/{_DBNAME}/repo?Revision=master&FilePath=data"

_HOMEPAGE = f"https://www.modelscope.cn/datasets/ccmusic/{_DBNAME}"

_NAMES = {
    "vibrato": "颤音",
    "upward_portamento": "上滑音",
    "downward_portamento": "下滑音",
    "returning_portamento": "回滑音",
    "glissando": "刮奏, 花指",
    "tremolo": "摇指",
    "harmonics": "泛音",
    "plucks": "勾, 打, 抹, 托, ...",
}

_CITATION = """\
@dataset{zhaorui_liu_2021_5676893,
  author       = {Monan Zhou, Shenyang Xu, Zhaorui Liu, Zhaowen Wang, Feng Yu, Wei Li and Baoqiang Han},
  title        = {CCMusic: an Open and Diverse Database for Chinese and General Music Information Retrieval Research},
  month        = {mar},
  year         = {2024},
  publisher    = {HuggingFace},
  version      = {1.2},
  url          = {https://huggingface.co/ccmusic-database}
}
"""

_DESCRIPTION = """\
The raw dataset comprises 2,824 audio clips showcasing various guzheng playing techniques. Specifically, 2,328 clips were sourced from virtual sound banks, while 496 clips were performed by a skilled professional guzheng artist. These recordings encompass a comprehensive range of tones inherent to the guzheng instrument. Categorization of the clips is based on the diverse playing techniques characteristic of the guzheng, the clips are divided into eight categories: Vibrato (chanyin), Upward Portamento (shanghuayin), Downward Portamento (xiahuayin), Returning Portamento (huihuayin), Glissando (guazou, huazhi), Tremolo (yaozhi), Harmonic (fanyin), Plucks (gou, da, mo, tuo…).

Due to the pre-existing split in the raw dataset, wherein the data has been partitioned approximately in a 4:1 ratio for training and testing sets, we uphold the original data division approach. In contrast to utilizing platform-specific automated splitting mechanisms, we directly employ the pre-split data for subsequent integration steps.
"""

_URLS = {"audio": f"{_DOMAIN}/audio.zip", "mel": f"{_DOMAIN}/mel.zip"}


class GZ_IsoTech(datasets.GeneratorBasedBuilder):
    def _info(self):
        return datasets.DatasetInfo(
            features=datasets.Features(
                {
                    "audio": datasets.Audio(sampling_rate=22050),
                    "mel": datasets.Image(),
                    "label": datasets.features.ClassLabel(names=list(_NAMES.keys())),
                    "cname": datasets.Value("string"),
                }
            ),
            supervised_keys=("audio", "label"),
            homepage=_HOMEPAGE,
            license="mit",
            citation=_CITATION,
            description=_DESCRIPTION,
            task_templates=[
                AudioClassification(
                    task="audio-classification",
                    audio_column="audio",
                    label_column="label",
                )
            ],
        )

    def _str2md5(self, original_string):
        """
        Calculate and return the MD5 hash of a given string.
        Parameters:
        original_string (str): The original string for which the MD5 hash is to be computed.
        Returns:
        str: The hexadecimal representation of the MD5 hash.
        """
        # Create an md5 object
        md5_obj = hashlib.md5()
        # Update the md5 object with the original string encoded as bytes
        md5_obj.update(original_string.encode("utf-8"))
        # Retrieve the hexadecimal representation of the MD5 hash
        md5_hash = md5_obj.hexdigest()
        return md5_hash

    def _split_generators(self, dl_manager):
        audio_files = dl_manager.download_and_extract(_URLS["audio"])
        mel_files = dl_manager.download_and_extract(_URLS["mel"])
        train_files, test_files = {}, {}

        for path in dl_manager.iter_files([audio_files]):
            fname = os.path.basename(path)
            dirname = os.path.dirname(path)
            splt = os.path.basename(os.path.dirname(dirname))
            if fname.endswith(".wav"):
                cls = f"{splt}/{os.path.basename(dirname)}/"
                item_id = self._str2md5(cls + fname.split(".wa")[0])
                if splt == "train":
                    train_files[item_id] = {"audio": path}
                else:
                    test_files[item_id] = {"audio": path}

        for path in dl_manager.iter_files([mel_files]):
            fname = os.path.basename(path)
            dirname = os.path.dirname(path)
            splt = os.path.basename(os.path.dirname(dirname))
            if fname.endswith(".jpg"):
                cls = f"{splt}/{os.path.basename(dirname)}/"
                item_id = self._str2md5(cls + fname.split(".jp")[0])
                if splt == "train":
                    train_files[item_id]["mel"] = path
                else:
                    test_files[item_id]["mel"] = path

        trainset = list(train_files.values())
        testset = list(test_files.values())
        random.shuffle(trainset)
        random.shuffle(testset)

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={"files": trainset},
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={"files": testset},
            ),
        ]

    def _generate_examples(self, files):
        for i, path in enumerate(files):
            pt = os.path.basename(os.path.dirname(path["audio"]))
            yield i, {
                "audio": path["audio"],
                "mel": path["mel"],
                "label": pt,
                "cname": _NAMES[pt],
            }