monetjoe commited on
Commit
3333ddf
1 Parent(s): 0047cd5

Upload acapella.py

Browse files
Files changed (1) hide show
  1. acapella.py +141 -139
acapella.py CHANGED
@@ -1,139 +1,141 @@
1
- import os
2
- import datasets
3
- import pandas as pd
4
- from datasets.tasks import AudioClassification
5
-
6
-
7
- _NAMES = {
8
- "songs": ["song" + str(i) for i in range(1, 7)],
9
- "singers": ["singer" + str(i) for i in range(1, 23)],
10
- }
11
-
12
- _DBNAME = os.path.basename(__file__).split(".")[0]
13
-
14
- _DOMAIN = f"https://www.modelscope.cn/api/v1/datasets/ccmusic/{_DBNAME}/repo?Revision=master&FilePath=data"
15
-
16
- _HOMEPAGE = f"https://www.modelscope.cn/datasets/ccmusic/{_DBNAME}"
17
-
18
- _CITATION = """\
19
- @dataset{zhaorui_liu_2021_5676893,
20
- author = {Monan Zhou, Shenyang Xu, Zhaorui Liu, Zhaowen Wang, Feng Yu, Wei Li and Zijin Li},
21
- title = {CCMusic: an Open and Diverse Database for Chinese and General Music Information Retrieval Research},
22
- month = {mar},
23
- year = {2024},
24
- publisher = {HuggingFace},
25
- version = {1.2},
26
- url = {https://huggingface.co/ccmusic-database}
27
- }
28
- """
29
-
30
- _DESCRIPTION = """\
31
- This raw dataset comprises six Mandarin pop song segments performed by 22 singers, resulting in a total of 132 audio clips. Each segment includes both a verse and a chorus. Four judges from the China Conservatory of Music assess the singing across nine dimensions: pitch, rhythm, vocal range, timbre, pronunciation, vibrato, dynamics, breath control, and overall performance, using a 10-point scale. The evaluations are recorded in an Excel spreadsheet in .xls format.
32
-
33
- Due to the original dataset comprising separate files for audio recordings and evaluation sheets, which hindered efficient data retrieval, we have consolidated the raw vocal recordings with their corresponding assessments. The dataset is divided into six segments, each representing a different song, resulting in a total of six divisions. Each segment contains 22 entries, with each entry detailing the vocal recording of an individual singer sampled at 44,100 Hz, the singer's ID, and evaluations across the nine dimensions previously mentioned. Consequently, each entry encompasses 11 columns of data. This dataset is well-suited for tasks such as vocal analysis and regression-based singing voice rating. For instance, as previously stated, the final column of each entry denotes the overall performance score, allowing the audio to be utilized as data and this score to serve as the label for regression analysis.
34
- """
35
-
36
- _URLS = {"audio": f"{_DOMAIN}/audio.zip", "mel": f"{_DOMAIN}/mel.zip"}
37
-
38
-
39
- class acapella(datasets.GeneratorBasedBuilder):
40
- def _info(self):
41
- return datasets.DatasetInfo(
42
- features=datasets.Features(
43
- {
44
- "audio": datasets.Audio(sampling_rate=22050),
45
- "mel": datasets.Image(),
46
- "singer_id": datasets.features.ClassLabel(names=_NAMES["singers"]),
47
- "pitch": datasets.Value("float64"),
48
- "rhythm": datasets.Value("float64"),
49
- "vocal_range": datasets.Value("float64"),
50
- "timbre": datasets.Value("float64"),
51
- "pronunciation": datasets.Value("float64"),
52
- "vibrato": datasets.Value("float64"),
53
- "dynamic": datasets.Value("float64"),
54
- "breath_control": datasets.Value("float64"),
55
- "overall_performance": datasets.Value("float64"),
56
- }
57
- ),
58
- supervised_keys=("audio", "singer_id"),
59
- homepage=_HOMEPAGE,
60
- license="mit",
61
- citation=_CITATION,
62
- description=_DESCRIPTION,
63
- task_templates=[
64
- AudioClassification(
65
- task="audio-classification",
66
- audio_column="audio",
67
- label_column="singer_id",
68
- )
69
- ],
70
- )
71
-
72
- def _split_generators(self, dl_manager):
73
- songs = {}
74
- for index in _NAMES["songs"]:
75
- csv_files = dl_manager.download(f"{_DOMAIN}/{index}.csv")
76
- song_eval = pd.read_csv(csv_files, index_col="singer_id")
77
- scores = []
78
- for id in range(22):
79
- scores.append(
80
- {
81
- "pitch": song_eval.iloc[id]["pitch"],
82
- "rhythm": song_eval.iloc[id]["rhythm"],
83
- "vocal_range": song_eval.iloc[id]["vocal_range"],
84
- "timbre": song_eval.iloc[id]["timbre"],
85
- "pronunciation": song_eval.iloc[id]["pronunciation"],
86
- "vibrato": song_eval.iloc[id]["vibrato"],
87
- "dynamic": song_eval.iloc[id]["dynamic"],
88
- "breath_control": song_eval.iloc[id]["breath_control"],
89
- "overall_performance": song_eval.iloc[id][
90
- "overall_performance"
91
- ],
92
- }
93
- )
94
-
95
- songs[index] = scores
96
-
97
- audio_files = dl_manager.download_and_extract(_URLS["audio"])
98
- for path in dl_manager.iter_files([audio_files]):
99
- fname = os.path.basename(path)
100
- if fname.endswith(".wav"):
101
- song_id = os.path.basename(os.path.dirname(path))
102
- singer_id = int(fname.split("(")[1].split(")")[0]) - 1
103
- songs[song_id][singer_id]["audio"] = path
104
-
105
- mel_files = dl_manager.download_and_extract(_URLS["mel"])
106
- for path in dl_manager.iter_files([mel_files]):
107
- fname = os.path.basename(path)
108
- if fname.endswith(".jpg"):
109
- song_id = os.path.basename(os.path.dirname(path))
110
- singer_id = int(fname.split("(")[1].split(")")[0]) - 1
111
- songs[song_id][singer_id]["mel"] = path
112
-
113
- split_generator = []
114
- for key in songs.keys():
115
- split_generator.append(
116
- datasets.SplitGenerator(
117
- name=key,
118
- gen_kwargs={"files": songs[key]},
119
- )
120
- )
121
-
122
- return split_generator
123
-
124
- def _generate_examples(self, files):
125
- for i, path in enumerate(files):
126
- yield i, {
127
- "audio": path["audio"],
128
- "mel": path["mel"],
129
- "singer_id": i,
130
- "pitch": path["pitch"],
131
- "rhythm": path["rhythm"],
132
- "vocal_range": path["vocal_range"],
133
- "timbre": path["timbre"],
134
- "pronunciation": path["pronunciation"],
135
- "vibrato": path["vibrato"],
136
- "dynamic": path["dynamic"],
137
- "breath_control": path["breath_control"],
138
- "overall_performance": path["overall_performance"],
139
- }
 
 
 
1
+ import os
2
+ import datasets
3
+ import pandas as pd
4
+ from datasets.tasks import AudioClassification
5
+
6
+
7
+ _NAMES = {
8
+ "songs": [f"song{i}" for i in range(1, 7)],
9
+ "singers": [f"singer{i}" for i in range(1, 23)],
10
+ }
11
+
12
+ _DBNAME = os.path.basename(__file__).split(".")[0]
13
+
14
+ _DOMAIN = f"https://www.modelscope.cn/api/v1/datasets/ccmusic-database/{_DBNAME}/repo?Revision=master&FilePath=data"
15
+
16
+ _HOMEPAGE = f"https://www.modelscope.cn/datasets/ccmusic-database/{_DBNAME}"
17
+
18
+ _CITATION = """\
19
+ @dataset{zhaorui_liu_2021_5676893,
20
+ author = {Monan Zhou, Shenyang Xu, Zhaorui Liu, Zhaowen Wang, Feng Yu, Wei Li and Baoqiang Han},
21
+ title = {CCMusic: an Open and Diverse Database for Chinese and General Music Information Retrieval Research},
22
+ month = {mar},
23
+ year = {2024},
24
+ publisher = {HuggingFace},
25
+ version = {1.2},
26
+ url = {https://huggingface.co/ccmusic-database}
27
+ }
28
+ """
29
+
30
+ _DESCRIPTION = """\
31
+ This raw dataset comprises six Mandarin pop song segments performed by 22 singers, resulting in a total of 132 audio clips. Each segment includes both a verse and a chorus. Four judges from the China Conservatory of Music assess the singing across nine dimensions: pitch, rhythm, vocal range, timbre, pronunciation, vibrato, dynamics, breath control, and overall performance, using a 10-point scale. The evaluations are recorded in an Excel spreadsheet in .xls format.
32
+
33
+ Due to the original dataset comprising separate files for audio recordings and evaluation sheets, which hindered efficient data retrieval, we have consolidated the raw vocal recordings with their corresponding assessments. The dataset is divided into six segments, each representing a different song, resulting in a total of six divisions. Each segment contains 22 entries, with each entry detailing the vocal recording of an individual singer sampled at 44,100 Hz, the singer's ID, and evaluations across the nine dimensions previously mentioned. Consequently, each entry encompasses 11 columns of data. This dataset is well-suited for tasks such as vocal analysis and regression-based singing voice rating. For instance, as previously stated, the final column of each entry denotes the overall performance score, allowing the audio to be utilized as data and this score to serve as the label for regression analysis.
34
+ """
35
+
36
+ _URLS = {
37
+ "audio": f"{_DOMAIN}/audio.zip",
38
+ "mel": f"{_DOMAIN}/mel.zip",
39
+ }
40
+
41
+
42
+ class acapella(datasets.GeneratorBasedBuilder):
43
+ def _info(self):
44
+ return datasets.DatasetInfo(
45
+ features=datasets.Features(
46
+ {
47
+ "audio": datasets.Audio(sampling_rate=22050),
48
+ "mel": datasets.Image(),
49
+ "singer_id": datasets.features.ClassLabel(names=_NAMES["singers"]),
50
+ "pitch": datasets.Value("float32"),
51
+ "rhythm": datasets.Value("float32"),
52
+ "vocal_range": datasets.Value("float32"),
53
+ "timbre": datasets.Value("float32"),
54
+ "pronunciation": datasets.Value("float32"),
55
+ "vibrato": datasets.Value("float32"),
56
+ "dynamic": datasets.Value("float32"),
57
+ "breath_control": datasets.Value("float32"),
58
+ "overall_performance": datasets.Value("float32"),
59
+ }
60
+ ),
61
+ supervised_keys=("audio", "singer_id"),
62
+ homepage=_HOMEPAGE,
63
+ license="CC-BY-NC-ND",
64
+ version="1.2.0",
65
+ citation=_CITATION,
66
+ description=_DESCRIPTION,
67
+ task_templates=[
68
+ AudioClassification(
69
+ task="audio-classification",
70
+ audio_column="audio",
71
+ label_column="singer_id",
72
+ )
73
+ ],
74
+ )
75
+
76
+ def _split_generators(self, dl_manager):
77
+ songs = {}
78
+ for index in _NAMES["songs"]:
79
+ csv_files = dl_manager.download(f"{_DOMAIN}/{index}.csv")
80
+ song_eval = pd.read_csv(csv_files, index_col="singer_id")
81
+ scores = []
82
+ for i in range(22):
83
+ scores.append(
84
+ {
85
+ "pitch": song_eval.iloc[i]["pitch"],
86
+ "rhythm": song_eval.iloc[i]["rhythm"],
87
+ "vocal_range": song_eval.iloc[i]["vocal_range"],
88
+ "timbre": song_eval.iloc[i]["timbre"],
89
+ "pronunciation": song_eval.iloc[i]["pronunciation"],
90
+ "vibrato": song_eval.iloc[i]["vibrato"],
91
+ "dynamic": song_eval.iloc[i]["dynamic"],
92
+ "breath_control": song_eval.iloc[i]["breath_control"],
93
+ "overall_performance": song_eval.iloc[i]["overall_performance"],
94
+ }
95
+ )
96
+
97
+ songs[index] = scores
98
+
99
+ audio_files = dl_manager.download_and_extract(_URLS["audio"])
100
+ for path in dl_manager.iter_files([audio_files]):
101
+ fname: str = os.path.basename(path)
102
+ if fname.endswith(".wav"):
103
+ song_id = os.path.basename(os.path.dirname(path))
104
+ singer_id = int(fname.split("(")[1].split(")")[0]) - 1
105
+ songs[song_id][singer_id]["audio"] = path
106
+
107
+ mel_files = dl_manager.download_and_extract(_URLS["mel"])
108
+ for path in dl_manager.iter_files([mel_files]):
109
+ fname = os.path.basename(path)
110
+ if fname.endswith(".jpg"):
111
+ song_id = os.path.basename(os.path.dirname(path))
112
+ singer_id = int(fname.split("(")[1].split(")")[0]) - 1
113
+ songs[song_id][singer_id]["mel"] = path
114
+
115
+ split_generator = []
116
+ for key in songs.keys():
117
+ split_generator.append(
118
+ datasets.SplitGenerator(
119
+ name=key,
120
+ gen_kwargs={"files": songs[key]},
121
+ )
122
+ )
123
+
124
+ return split_generator
125
+
126
+ def _generate_examples(self, files):
127
+ for i, path in enumerate(files):
128
+ yield i, {
129
+ "audio": path["audio"],
130
+ "mel": path["mel"],
131
+ "singer_id": i,
132
+ "pitch": path["pitch"],
133
+ "rhythm": path["rhythm"],
134
+ "vocal_range": path["vocal_range"],
135
+ "timbre": path["timbre"],
136
+ "pronunciation": path["pronunciation"],
137
+ "vibrato": path["vibrato"],
138
+ "dynamic": path["dynamic"],
139
+ "breath_control": path["breath_control"],
140
+ "overall_performance": path["overall_performance"],
141
+ }