File size: 7,907 Bytes
4347ab7
 
 
bcd415f
4347ab7
1e4c870
bcd415f
 
 
1e4c870
4347ab7
bcd415f
281da04
bcd415f
 
 
4347ab7
 
 
bcd415f
 
 
 
 
 
 
4347ab7
 
 
 
bcd415f
 
 
 
4347ab7
 
bcd415f
 
 
 
 
 
 
 
 
 
 
 
 
 
4347ab7
 
bcd415f
 
 
 
4347ab7
 
281da04
 
 
bcd415f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4347ab7
 
bcd415f
 
 
 
 
 
 
4347ab7
 
 
 
 
bcd415f
 
 
4347ab7
 
 
 
 
 
bcd415f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4347ab7
6a30e38
 
4347ab7
 
 
 
 
bcd415f
4347ab7
 
bcd415f
4347ab7
 
bcd415f
 
4347ab7
 
 
bcd415f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
import os
import random
import datasets
from datasets.tasks import ImageClassification

_NAMES = {
    "all": ["m_chest", "f_chest", "m_falsetto", "f_falsetto"],
    "gender": ["female", "male"],
    "singing_method": ["falsetto", "chest"],
}

_DBNAME = os.path.basename(__file__).split(".")[0]

_HOMEPAGE = f"https://www.modelscope.cn/datasets/ccmusic/{_DBNAME}"

_DOMAIN = f"https://www.modelscope.cn/api/v1/datasets/ccmusic/{_DBNAME}/repo?Revision=master&FilePath=data"

_CITATION = """\
@dataset{zhaorui_liu_2021_5676893,
  author       = {Monan Zhou, Shenyang Xu, Zhaorui Liu, Zhaowen Wang, Feng Yu, Wei Li and Zijin Li},
  title        = {CCMusic: an Open and Diverse Database for Chinese and General Music Information Retrieval Research},
  month        = {mar},
  year         = {2024},
  publisher    = {HuggingFace},
  version      = {1.2},
  url          = {https://huggingface.co/ccmusic-database}
}
"""

_DESCRIPTION = """\
The raw dataset comprises 1,280 monophonic singing audio files in .wav format (sample rate is 44,100 Hz), consisting of chest and falsetto voices performed, recorded, and annotated by students majoring in Vocal Music at the China Conservatory of Music. The chest voice is tagged as chest and the falsetto voice is tagged as falsetto. Additionally, the dataset includes the Mel spectrogram, Mel frequency cepstral coefficient (MFCC), and spectral characteristics of each audio segment, resulting in a total of 5,120 CSV files.
The original dataset did not differentiate between male and female voices, an omission that is critical for accurately identifying chest and falsetto vocal techniques. To address this, we conducted a meticulous manual review and added gender annotations to the dataset. Besides the original content, the preprocessed version during the evaluation which will be detailed in section IV is also provided. This approach which provides two versions is applied to the two subsequent classification datasets that have not been evaluated as well: Music Genre Dataset, Bel Conto & Chinese Folk Singing Dataset.

For the pre-processed version, the audio clip was into 0.25 seconds and then transformed to Mel, CQT and Chroma spectrogram in .jpg format, resulting in 8,974 files. The chest/falsetto label for each file is given as one of the four classes: m chest, m falsetto, f chest, and f falsetto. The spectrogram, the chest/falsetto label and the gender label are combined into one data entry, with the first three columns representing the Mel, CQT and Chroma. The fourth and the fifth columns are chest/falsetto label and gender label, respectively. Additionally, the integrated dataset provides the function to shuffle and split the dataset into training, validation, and test sets in an 8:1:1 ratio. This dataset can be used for singing-related tasks such as singing gender classification or chest and falsetto voice classification.
"""

_URLS = {
    "audio": f"{_DOMAIN}/audio.zip",
    "mel": f"{_DOMAIN}/mel.zip",
    "eval": f"{_DOMAIN}/eval.zip",
}


class chest_falsetto_Config(datasets.BuilderConfig):
    def __init__(self, features, **kwargs):
        super(chest_falsetto_Config, self).__init__(
            version=datasets.Version("1.2.0"), **kwargs
        )
        self.features = features


class chest_falsetto(datasets.GeneratorBasedBuilder):
    VERSION = datasets.Version("1.2.0")
    BUILDER_CONFIGS = [
        chest_falsetto_Config(
            name="eval",
            features=datasets.Features(
                {
                    "mel": datasets.Image(),
                    "cqt": datasets.Image(),
                    "chroma": datasets.Image(),
                    "label": datasets.features.ClassLabel(names=_NAMES["all"]),
                    "gender": datasets.features.ClassLabel(names=_NAMES["gender"]),
                    "singing_method": datasets.features.ClassLabel(
                        names=_NAMES["singing_method"]
                    ),
                }
            ),
        ),
        chest_falsetto_Config(
            name="default",
            features=datasets.Features(
                {
                    "audio": datasets.Audio(sampling_rate=22050),
                    "mel": datasets.Image(),
                    "label": datasets.features.ClassLabel(names=_NAMES["all"]),
                    "gender": datasets.features.ClassLabel(names=_NAMES["gender"]),
                    "singing_method": datasets.features.ClassLabel(
                        names=_NAMES["singing_method"]
                    ),
                }
            ),
        ),
    ]

    def _info(self):
        return datasets.DatasetInfo(
            features=self.config.features,
            supervised_keys=("mel", "label"),
            homepage=_HOMEPAGE,
            license="mit",
            citation=_CITATION,
            description=_DESCRIPTION,
            task_templates=[
                ImageClassification(
                    task="image-classification",
                    image_column="mel",
                    label_column="label",
                )
            ],
        )

    def _split_generators(self, dl_manager):
        dataset = []

        if self.config.name == "eval":
            data_files = dl_manager.download_and_extract(_URLS["eval"])
            for path in dl_manager.iter_files([data_files]):
                if "mel" in path and os.path.basename(path).endswith(".jpg"):
                    dataset.append(path)

        else:
            files = {}
            audio_files = dl_manager.download_and_extract(_URLS["audio"])
            mel_files = dl_manager.download_and_extract(_URLS["mel"])
            for path in dl_manager.iter_files([audio_files]):
                fname = os.path.basename(path)
                if fname.endswith(".wav"):
                    item_id = fname.split(".")[0]
                    files[item_id] = {"audio": path}

            for path in dl_manager.iter_files([mel_files]):
                fname = os.path.basename(path)
                if fname.endswith(".jpg"):
                    item_id = fname.split(".")[0]
                    files[item_id]["mel"] = path

            dataset = list(files.values())

        random.shuffle(dataset)
        data_count = len(dataset)
        p80 = int(data_count * 0.8)
        p90 = int(data_count * 0.9)

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN, gen_kwargs={"files": dataset[:p80]}
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION, gen_kwargs={"files": dataset[p80:p90]}
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST, gen_kwargs={"files": dataset[p90:]}
            ),
        ]

    def _generate_examples(self, files):
        if self.config.name == "eval":
            for i, fpath in enumerate(files):
                file_name = os.path.basename(fpath)
                sex = file_name.split("_")[1]
                method = file_name.split("_")[2]
                yield i, {
                    "mel": fpath,
                    "cqt": fpath.replace("mel", "cqt"),
                    "chroma": fpath.replace("mel", "chroma"),
                    "label": f"{sex}_{method}",
                    "gender": "male" if sex == "m" else "female",
                    "singing_method": method,
                }

        else:
            for i, fpath in enumerate(files):
                file_name = os.path.basename(fpath["audio"])
                sex = file_name.split("_")[1]
                method = file_name.split("_")[2].split(".")[0]
                yield i, {
                    "audio": fpath["audio"],
                    "mel": fpath["mel"],
                    "label": f"{sex}_{method}",
                    "gender": "male" if sex == "m" else "female",
                    "singing_method": method,
                }