acapella / acapella.py
MuGeminorum
sync ms
8ea8580
raw
history blame
No virus
6.64 kB
import os
import datasets
import pandas as pd
from datasets.tasks import AudioClassification
_NAMES = {
"songs": ["song" + str(i) for i in range(1, 7)],
"singers": ["singer" + str(i) for i in range(1, 23)],
}
_DBNAME = os.path.basename(__file__).split(".")[0]
_DOMAIN = f"https://www.modelscope.cn/api/v1/datasets/ccmusic/{_DBNAME}/repo?Revision=master&FilePath=data"
_HOMEPAGE = f"https://www.modelscope.cn/datasets/ccmusic/{_DBNAME}"
_CITATION = """\
@dataset{zhaorui_liu_2021_5676893,
author = {Monan Zhou, Shenyang Xu, Zhaorui Liu, Zhaowen Wang, Feng Yu, Wei Li and Zijin Li},
title = {CCMusic: an Open and Diverse Database for Chinese and General Music Information Retrieval Research},
month = {mar},
year = {2024},
publisher = {HuggingFace},
version = {1.2},
url = {https://huggingface.co/ccmusic-database}
}
"""
_DESCRIPTION = """\
This raw dataset comprises six Mandarin pop song segments performed by 22 singers, resulting in a total of 132 audio clips. Each segment includes both a verse and a chorus. Four judges from the China Conservatory of Music assess the singing across nine dimensions: pitch, rhythm, vocal range, timbre, pronunciation, vibrato, dynamics, breath control, and overall performance, using a 10-point scale. The evaluations are recorded in an Excel spreadsheet in .xls format.
Due to the original dataset comprising separate files for audio recordings and evaluation sheets, which hindered efficient data retrieval, we have consolidated the raw vocal recordings with their corresponding assessments. The dataset is divided into six segments, each representing a different song, resulting in a total of six divisions. Each segment contains 22 entries, with each entry detailing the vocal recording of an individual singer sampled at 44,100 Hz, the singer's ID, and evaluations across the nine dimensions previously mentioned. Consequently, each entry encompasses 11 columns of data. This dataset is well-suited for tasks such as vocal analysis and regression-based singing voice rating. For instance, as previously stated, the final column of each entry denotes the overall performance score, allowing the audio to be utilized as data and this score to serve as the label for regression analysis.
"""
_URLS = {"audio": f"{_DOMAIN}/audio.zip", "mel": f"{_DOMAIN}/mel.zip"}
class acapella(datasets.GeneratorBasedBuilder):
def _info(self):
return datasets.DatasetInfo(
features=datasets.Features(
{
"audio": datasets.Audio(sampling_rate=22050),
"mel": datasets.Image(),
"singer_id": datasets.features.ClassLabel(names=_NAMES["singers"]),
"pitch": datasets.Value("float64"),
"rhythm": datasets.Value("float64"),
"vocal_range": datasets.Value("float64"),
"timbre": datasets.Value("float64"),
"pronunciation": datasets.Value("float64"),
"vibrato": datasets.Value("float64"),
"dynamic": datasets.Value("float64"),
"breath_control": datasets.Value("float64"),
"overall_performance": datasets.Value("float64"),
}
),
supervised_keys=("audio", "singer_id"),
homepage=_HOMEPAGE,
license="mit",
citation=_CITATION,
description=_DESCRIPTION,
task_templates=[
AudioClassification(
task="audio-classification",
audio_column="audio",
label_column="singer_id",
)
],
)
def _split_generators(self, dl_manager):
songs = {}
for index in _NAMES["songs"]:
csv_files = dl_manager.download(f"{_DOMAIN}/{index}.csv")
song_eval = pd.read_csv(csv_files, index_col="singer_id")
scores = []
for id in range(22):
scores.append(
{
"pitch": song_eval.iloc[id]["pitch"],
"rhythm": song_eval.iloc[id]["rhythm"],
"vocal_range": song_eval.iloc[id]["vocal_range"],
"timbre": song_eval.iloc[id]["timbre"],
"pronunciation": song_eval.iloc[id]["pronunciation"],
"vibrato": song_eval.iloc[id]["vibrato"],
"dynamic": song_eval.iloc[id]["dynamic"],
"breath_control": song_eval.iloc[id]["breath_control"],
"overall_performance": song_eval.iloc[id][
"overall_performance"
],
}
)
songs[index] = scores
audio_files = dl_manager.download_and_extract(_URLS["audio"])
for path in dl_manager.iter_files([audio_files]):
fname = os.path.basename(path)
if fname.endswith(".wav"):
song_id = os.path.basename(os.path.dirname(path))
singer_id = int(fname.split("(")[1].split(")")[0]) - 1
songs[song_id][singer_id]["audio"] = path
mel_files = dl_manager.download_and_extract(_URLS["mel"])
for path in dl_manager.iter_files([mel_files]):
fname = os.path.basename(path)
if fname.endswith(".jpg"):
song_id = os.path.basename(os.path.dirname(path))
singer_id = int(fname.split("(")[1].split(")")[0]) - 1
songs[song_id][singer_id]["mel"] = path
split_generator = []
for key in songs.keys():
split_generator.append(
datasets.SplitGenerator(
name=key,
gen_kwargs={"files": songs[key]},
)
)
return split_generator
def _generate_examples(self, files):
for i, path in enumerate(files):
yield i, {
"audio": path["audio"],
"mel": path["mel"],
"singer_id": i,
"pitch": path["pitch"],
"rhythm": path["rhythm"],
"vocal_range": path["vocal_range"],
"timbre": path["timbre"],
"pronunciation": path["pronunciation"],
"vibrato": path["vibrato"],
"dynamic": path["dynamic"],
"breath_control": path["breath_control"],
"overall_performance": path["overall_performance"],
}