File size: 6,359 Bytes
bf74925
00a729a
 
 
bf74925
 
00a729a
bf74925
00a729a
bf74925
00a729a
bf74925
 
 
00a729a
 
 
 
 
 
 
bf74925
 
 
 
00a729a
bf74925
00a729a
 
bf74925
00a729a
 
 
 
 
bf74925
 
00a729a
bf74925
 
 
 
00a729a
 
 
 
 
 
 
 
 
bf74925
 
00a729a
bf74925
 
 
00a729a
bf74925
 
00a729a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bf74925
00a729a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bf74925
 
 
00a729a
 
 
 
 
 
 
 
bf74925
 
 
 
00a729a
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
import os
import csv
import random
import hashlib
import datasets

_DBNAME = os.path.basename(__file__).split(".")[0]

_HOMEPAGE = f"https://www.modelscope.cn/datasets/ccmusic/{_DBNAME}"

_DOMAIN = f"https://www.modelscope.cn/api/v1/datasets/ccmusic/{_DBNAME}/repo?Revision=master&FilePath=data"

_CITATION = """\
@dataset{zhaorui_liu_2021_5676893,
  author       = {Monan Zhou, Shenyang Xu, Zhaorui Liu, Zhaowen Wang, Feng Yu, Wei Li and Baoqiang Han},
  title        = {CCMusic: an Open and Diverse Database for Chinese and General Music Information Retrieval Research},
  month        = {mar},
  year         = {2024},
  publisher    = {HuggingFace},
  version      = {1.2},
  url          = {https://huggingface.co/ccmusic-database}
}
"""

_DESCRIPTION = """\
The raw dataset comprises 300 pop songs in .mp3 format, sourced from the NetEase music, accompanied by a structure annotation file for each song in .txt format. The annotator for music structure is a professional musician and teacher from the China Conservatory of Music. For the statistics of the dataset, there are 208 Chinese songs, 87 English songs, three Korean songs and two Japanese songs. The song structures are labeled as follows: intro, re-intro, verse, chorus, pre-chorus, post-chorus, bridge, interlude and ending. Fig. 7 shows the frequency of each segment label that appears in the set. The labels chorus and verse are the two most prevalent segment labels in the dataset and they are the most common segment in Western popular music. Among them, the number of “Postchorus” tags is the least, with only two present.

Unlike the above three datasets for classification, this one has not undergone pre-processing such as spectrogram transform. Thus we provide the original content only. The integrated version of the dataset is organized based on audio files, with each item structured into three columns: The first column contains the audio of the song in .mp3 format, sampled at 44,100 Hz. The second column consists of lists indicating the time points that mark the boundaries of different song sections, while the third column contains lists corresponding to the labels of the song structures listed in the second column. Strictly speaking, the first column represents the data, while the subsequent two columns represent the label.
"""

_URLS = {
    "audio": f"{_DOMAIN}/audio.zip",
    "mel": f"{_DOMAIN}/mel.zip",
    "label": f"{_DOMAIN}/label.zip",
}


class song_structure(datasets.GeneratorBasedBuilder):
    def _info(self):
        return datasets.DatasetInfo(
            features=datasets.Features(
                {
                    "audio": datasets.Audio(sampling_rate=22050),
                    "mel": datasets.Image(),
                    "label": datasets.Sequence(
                        feature={
                            "onset_time": datasets.Value("uint32"),
                            "offset_time": datasets.Value("uint32"),
                            "structure": datasets.Value("string"),
                        }
                    ),
                }
            ),
            supervised_keys=("audio", "label"),
            homepage=_HOMEPAGE,
            license="mit",
            citation=_CITATION,
            description=_DESCRIPTION,
        )

    def _parse_txt_label(self, txt_file):
        label = []
        with open(txt_file, mode="r", encoding="utf-8") as file:
            reader = csv.reader(file, delimiter="\t")
            for row in reader:
                if len(row) == 3:
                    label.append(
                        {
                            "onset_time": int(row[0]),
                            "offset_time": int(row[1]),
                            "structure": str(row[2]),
                        }
                    )

        return label

    def _str2md5(self, original_string):
        """
        Calculate and return the MD5 hash of a given string.
        Parameters:
        original_string (str): The original string for which the MD5 hash is to be computed.
        Returns:
        str: The hexadecimal representation of the MD5 hash.
        """
        # Create an md5 object
        md5_obj = hashlib.md5()
        # Update the md5 object with the original string encoded as bytes
        md5_obj.update(original_string.encode("utf-8"))
        # Retrieve the hexadecimal representation of the MD5 hash
        md5_hash = md5_obj.hexdigest()
        return md5_hash

    def _split_generators(self, dl_manager):
        audio_files = dl_manager.download_and_extract(_URLS["audio"])
        mel_files = dl_manager.download_and_extract(_URLS["mel"])
        txt_files = dl_manager.download_and_extract(_URLS["label"])
        files = {}

        for path in dl_manager.iter_files([audio_files]):
            fname: str = os.path.basename(path)
            if fname.endswith(".mp3"):
                item_id = self._str2md5(fname.split(".mp")[0])
                files[item_id] = {"audio": path}

        for path in dl_manager.iter_files([mel_files]):
            fname: str = os.path.basename(path)
            if fname.endswith(".jpg"):
                item_id = self._str2md5(fname.split(".jp")[0])
                files[item_id]["mel"] = path

        for path in dl_manager.iter_files([txt_files]):
            fname: str = os.path.basename(path)
            if fname.endswith(".txt"):
                item_id = self._str2md5(fname.split(".tx")[0])
                files[item_id]["label"] = self._parse_txt_label(path)

        dataset = list(files.values())
        random.shuffle(dataset)
        data_count = len(dataset)
        p80 = int(data_count * 0.8)
        p90 = int(data_count * 0.9)

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN, gen_kwargs={"files": dataset[:p80]}
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION, gen_kwargs={"files": dataset[p80:p90]}
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST, gen_kwargs={"files": dataset[p90:]}
            ),
        ]

    def _generate_examples(self, files):
        for i, path in enumerate(files):
            yield i, {
                "audio": path["audio"],
                "mel": path["mel"],
                "label": path["label"],
            }