monetjoe commited on
Commit
19622af
1 Parent(s): c08d38f

Upload GZ_IsoTech.py

Browse files
Files changed (1) hide show
  1. GZ_IsoTech.py +131 -139
GZ_IsoTech.py CHANGED
@@ -1,139 +1,131 @@
1
- import os
2
- import random
3
- import hashlib
4
- import datasets
5
- from datasets.tasks import AudioClassification
6
-
7
- _DBNAME = os.path.basename(__file__).split(".")[0]
8
-
9
- _DOMAIN = f"https://www.modelscope.cn/api/v1/datasets/ccmusic/{_DBNAME}/repo?Revision=master&FilePath=data"
10
-
11
- _HOMEPAGE = f"https://www.modelscope.cn/datasets/ccmusic/{_DBNAME}"
12
-
13
- _NAMES = {
14
- "vibrato": "颤音",
15
- "upward_portamento": "上滑音",
16
- "downward_portamento": "下滑音",
17
- "returning_portamento": "回滑音",
18
- "glissando": "刮奏, 花指",
19
- "tremolo": "摇指",
20
- "harmonics": "泛音",
21
- "plucks": "勾, 打, 抹, 托, ...",
22
- }
23
-
24
- _CITATION = """\
25
- @dataset{zhaorui_liu_2021_5676893,
26
- author = {Monan Zhou, Shenyang Xu, Zhaorui Liu, Zhaowen Wang, Feng Yu, Wei Li and Baoqiang Han},
27
- title = {CCMusic: an Open and Diverse Database for Chinese and General Music Information Retrieval Research},
28
- month = {mar},
29
- year = {2024},
30
- publisher = {HuggingFace},
31
- version = {1.2},
32
- url = {https://huggingface.co/ccmusic-database}
33
- }
34
- """
35
-
36
- _DESCRIPTION = """\
37
- The raw dataset comprises 2,824 audio clips showcasing various guzheng playing techniques. Specifically, 2,328 clips were sourced from virtual sound banks, while 496 clips were performed by a skilled professional guzheng artist. These recordings encompass a comprehensive range of tones inherent to the guzheng instrument. Categorization of the clips is based on the diverse playing techniques characteristic of the guzheng, the clips are divided into eight categories: Vibrato (chanyin), Upward Portamento (shanghuayin), Downward Portamento (xiahuayin), Returning Portamento (huihuayin), Glissando (guazou, huazhi), Tremolo (yaozhi), Harmonic (fanyin), Plucks (gou, da, mo, tuo…).
38
-
39
- Due to the pre-existing split in the raw dataset, wherein the data has been partitioned approximately in a 4:1 ratio for training and testing sets, we uphold the original data division approach. In contrast to utilizing platform-specific automated splitting mechanisms, we directly employ the pre-split data for subsequent integration steps.
40
- """
41
-
42
- _URLS = {"audio": f"{_DOMAIN}/audio.zip", "mel": f"{_DOMAIN}/mel.zip"}
43
-
44
-
45
- class GZ_IsoTech(datasets.GeneratorBasedBuilder):
46
- def _info(self):
47
- return datasets.DatasetInfo(
48
- features=datasets.Features(
49
- {
50
- "audio": datasets.Audio(sampling_rate=22050),
51
- "mel": datasets.Image(),
52
- "label": datasets.features.ClassLabel(names=list(_NAMES.keys())),
53
- "cname": datasets.Value("string"),
54
- }
55
- ),
56
- supervised_keys=("audio", "label"),
57
- homepage=_HOMEPAGE,
58
- license="mit",
59
- citation=_CITATION,
60
- description=_DESCRIPTION,
61
- task_templates=[
62
- AudioClassification(
63
- task="audio-classification",
64
- audio_column="audio",
65
- label_column="label",
66
- )
67
- ],
68
- )
69
-
70
- def _str2md5(self, original_string):
71
- """
72
- Calculate and return the MD5 hash of a given string.
73
- Parameters:
74
- original_string (str): The original string for which the MD5 hash is to be computed.
75
- Returns:
76
- str: The hexadecimal representation of the MD5 hash.
77
- """
78
- # Create an md5 object
79
- md5_obj = hashlib.md5()
80
- # Update the md5 object with the original string encoded as bytes
81
- md5_obj.update(original_string.encode("utf-8"))
82
- # Retrieve the hexadecimal representation of the MD5 hash
83
- md5_hash = md5_obj.hexdigest()
84
- return md5_hash
85
-
86
- def _split_generators(self, dl_manager):
87
- audio_files = dl_manager.download_and_extract(_URLS["audio"])
88
- mel_files = dl_manager.download_and_extract(_URLS["mel"])
89
- train_files, test_files = {}, {}
90
-
91
- for path in dl_manager.iter_files([audio_files]):
92
- fname = os.path.basename(path)
93
- dirname = os.path.dirname(path)
94
- splt = os.path.basename(os.path.dirname(dirname))
95
- if fname.endswith(".wav"):
96
- cls = f"{splt}/{os.path.basename(dirname)}/"
97
- item_id = self._str2md5(cls + fname.split(".wa")[0])
98
- if splt == "train":
99
- train_files[item_id] = {"audio": path}
100
- else:
101
- test_files[item_id] = {"audio": path}
102
-
103
- for path in dl_manager.iter_files([mel_files]):
104
- fname = os.path.basename(path)
105
- dirname = os.path.dirname(path)
106
- splt = os.path.basename(os.path.dirname(dirname))
107
- if fname.endswith(".jpg"):
108
- cls = f"{splt}/{os.path.basename(dirname)}/"
109
- item_id = self._str2md5(cls + fname.split(".jp")[0])
110
- if splt == "train":
111
- train_files[item_id]["mel"] = path
112
- else:
113
- test_files[item_id]["mel"] = path
114
-
115
- trainset = list(train_files.values())
116
- testset = list(test_files.values())
117
- random.shuffle(trainset)
118
- random.shuffle(testset)
119
-
120
- return [
121
- datasets.SplitGenerator(
122
- name=datasets.Split.TRAIN,
123
- gen_kwargs={"files": trainset},
124
- ),
125
- datasets.SplitGenerator(
126
- name=datasets.Split.TEST,
127
- gen_kwargs={"files": testset},
128
- ),
129
- ]
130
-
131
- def _generate_examples(self, files):
132
- for i, path in enumerate(files):
133
- pt = os.path.basename(os.path.dirname(path["audio"]))
134
- yield i, {
135
- "audio": path["audio"],
136
- "mel": path["mel"],
137
- "label": pt,
138
- "cname": _NAMES[pt],
139
- }
 
1
+ import os
2
+ import random
3
+ import hashlib
4
+ import datasets
5
+ from datasets.tasks import AudioClassification
6
+
7
+ _DBNAME = os.path.basename(__file__).split(".")[0]
8
+
9
+ _DOMAIN = f"https://www.modelscope.cn/api/v1/datasets/ccmusic-database/{_DBNAME}/repo?Revision=master&FilePath=data"
10
+
11
+ _HOMEPAGE = f"https://www.modelscope.cn/datasets/ccmusic-database/{_DBNAME}"
12
+
13
+ _NAMES = {
14
+ "vibrato": "颤音",
15
+ "upward_portamento": "上滑音",
16
+ "downward_portamento": "下滑音",
17
+ "returning_portamento": "回滑音",
18
+ "glissando": "刮奏, 花指",
19
+ "tremolo": "摇指",
20
+ "harmonics": "泛音",
21
+ "plucks": "勾, 打, 抹, 托, ...",
22
+ }
23
+
24
+ _CITATION = """\
25
+ @dataset{zhaorui_liu_2021_5676893,
26
+ author = {Monan Zhou, Shenyang Xu, Zhaorui Liu, Zhaowen Wang, Feng Yu, Wei Li and Baoqiang Han},
27
+ title = {CCMusic: an Open and Diverse Database for Chinese and General Music Information Retrieval Research},
28
+ month = {mar},
29
+ year = {2024},
30
+ publisher = {HuggingFace},
31
+ version = {1.2},
32
+ url = {https://huggingface.co/ccmusic-database}
33
+ }
34
+ """
35
+
36
+ _DESCRIPTION = """\
37
+ The raw dataset comprises 2,824 audio clips showcasing various guzheng playing techniques. Specifically, 2,328 clips were sourced from virtual sound banks, while 496 clips were performed by a skilled professional guzheng artist. These recordings encompass a comprehensive range of tones inherent to the guzheng instrument. Categorization of the clips is based on the diverse playing techniques characteristic of the guzheng, the clips are divided into eight categories: Vibrato (chanyin), Upward Portamento (shanghuayin), Downward Portamento (xiahuayin), Returning Portamento (huihuayin), Glissando (guazou, huazhi), Tremolo (yaozhi), Harmonic (fanyin), Plucks (gou, da, mo, tuo…).
38
+
39
+ Due to the pre-existing split in the raw dataset, wherein the data has been partitioned approximately in a 4:1 ratio for training and testing sets, we uphold the original data division approach. In contrast to utilizing platform-specific automated splitting mechanisms, we directly employ the pre-split data for subsequent integration steps.
40
+ """
41
+
42
+ _URLS = {
43
+ "audio": f"{_DOMAIN}/audio.zip",
44
+ "mel": f"{_DOMAIN}/mel.zip",
45
+ }
46
+
47
+
48
+ class GZ_IsoTech(datasets.GeneratorBasedBuilder):
49
+ def _info(self):
50
+ return datasets.DatasetInfo(
51
+ features=datasets.Features(
52
+ {
53
+ "audio": datasets.Audio(sampling_rate=22050),
54
+ "mel": datasets.Image(),
55
+ "label": datasets.features.ClassLabel(names=list(_NAMES.keys())),
56
+ "cname": datasets.Value("string"),
57
+ }
58
+ ),
59
+ supervised_keys=("audio", "label"),
60
+ homepage=_HOMEPAGE,
61
+ license="CC-BY-NC-ND",
62
+ version="1.2.0",
63
+ citation=_CITATION,
64
+ description=_DESCRIPTION,
65
+ task_templates=[
66
+ AudioClassification(
67
+ task="audio-classification",
68
+ audio_column="audio",
69
+ label_column="label",
70
+ )
71
+ ],
72
+ )
73
+
74
+ def _str2md5(self, original_string: str):
75
+ md5_obj = hashlib.md5()
76
+ md5_obj.update(original_string.encode("utf-8"))
77
+ return md5_obj.hexdigest()
78
+
79
+ def _split_generators(self, dl_manager):
80
+ audio_files = dl_manager.download_and_extract(_URLS["audio"])
81
+ mel_files = dl_manager.download_and_extract(_URLS["mel"])
82
+ train_files, test_files = {}, {}
83
+ for path in dl_manager.iter_files([audio_files]):
84
+ fname: str = os.path.basename(path)
85
+ dirname = os.path.dirname(path)
86
+ splt = os.path.basename(os.path.dirname(dirname))
87
+ if fname.endswith(".wav"):
88
+ cls = f"{splt}/{os.path.basename(dirname)}/"
89
+ item_id = self._str2md5(cls + fname.split(".wa")[0])
90
+ if splt == "train":
91
+ train_files[item_id] = {"audio": path}
92
+ else:
93
+ test_files[item_id] = {"audio": path}
94
+
95
+ for path in dl_manager.iter_files([mel_files]):
96
+ fname = os.path.basename(path)
97
+ dirname = os.path.dirname(path)
98
+ splt = os.path.basename(os.path.dirname(dirname))
99
+ if fname.endswith(".jpg"):
100
+ cls = f"{splt}/{os.path.basename(dirname)}/"
101
+ item_id = self._str2md5(cls + fname.split(".jp")[0])
102
+ if splt == "train":
103
+ train_files[item_id]["mel"] = path
104
+ else:
105
+ test_files[item_id]["mel"] = path
106
+
107
+ trainset = list(train_files.values())
108
+ testset = list(test_files.values())
109
+ random.shuffle(trainset)
110
+ random.shuffle(testset)
111
+
112
+ return [
113
+ datasets.SplitGenerator(
114
+ name=datasets.Split.TRAIN,
115
+ gen_kwargs={"files": trainset},
116
+ ),
117
+ datasets.SplitGenerator(
118
+ name=datasets.Split.TEST,
119
+ gen_kwargs={"files": testset},
120
+ ),
121
+ ]
122
+
123
+ def _generate_examples(self, files):
124
+ for i, path in enumerate(files):
125
+ pt = os.path.basename(os.path.dirname(path["audio"]))
126
+ yield i, {
127
+ "audio": path["audio"],
128
+ "mel": path["mel"],
129
+ "label": pt,
130
+ "cname": _NAMES[pt],
131
+ }