chest_falsetto / chest_falsetto.py
MuGeminorum
sync ms
bcd415f
import os
import random
import datasets
from datasets.tasks import ImageClassification
_NAMES = {
"all": ["m_chest", "f_chest", "m_falsetto", "f_falsetto"],
"gender": ["female", "male"],
"singing_method": ["falsetto", "chest"],
}
_DBNAME = os.path.basename(__file__).split(".")[0]
_HOMEPAGE = f"https://www.modelscope.cn/datasets/ccmusic/{_DBNAME}"
_DOMAIN = f"https://www.modelscope.cn/api/v1/datasets/ccmusic/{_DBNAME}/repo?Revision=master&FilePath=data"
_CITATION = """\
@dataset{zhaorui_liu_2021_5676893,
author = {Monan Zhou, Shenyang Xu, Zhaorui Liu, Zhaowen Wang, Feng Yu, Wei Li and Zijin Li},
title = {CCMusic: an Open and Diverse Database for Chinese and General Music Information Retrieval Research},
month = {mar},
year = {2024},
publisher = {HuggingFace},
version = {1.2},
url = {https://huggingface.co/ccmusic-database}
}
"""
_DESCRIPTION = """\
The raw dataset comprises 1,280 monophonic singing audio files in .wav format (sample rate is 44,100 Hz), consisting of chest and falsetto voices performed, recorded, and annotated by students majoring in Vocal Music at the China Conservatory of Music. The chest voice is tagged as chest and the falsetto voice is tagged as falsetto. Additionally, the dataset includes the Mel spectrogram, Mel frequency cepstral coefficient (MFCC), and spectral characteristics of each audio segment, resulting in a total of 5,120 CSV files.
The original dataset did not differentiate between male and female voices, an omission that is critical for accurately identifying chest and falsetto vocal techniques. To address this, we conducted a meticulous manual review and added gender annotations to the dataset. Besides the original content, the preprocessed version during the evaluation which will be detailed in section IV is also provided. This approach which provides two versions is applied to the two subsequent classification datasets that have not been evaluated as well: Music Genre Dataset, Bel Conto & Chinese Folk Singing Dataset.
For the pre-processed version, the audio clip was into 0.25 seconds and then transformed to Mel, CQT and Chroma spectrogram in .jpg format, resulting in 8,974 files. The chest/falsetto label for each file is given as one of the four classes: m chest, m falsetto, f chest, and f falsetto. The spectrogram, the chest/falsetto label and the gender label are combined into one data entry, with the first three columns representing the Mel, CQT and Chroma. The fourth and the fifth columns are chest/falsetto label and gender label, respectively. Additionally, the integrated dataset provides the function to shuffle and split the dataset into training, validation, and test sets in an 8:1:1 ratio. This dataset can be used for singing-related tasks such as singing gender classification or chest and falsetto voice classification.
"""
_URLS = {
"audio": f"{_DOMAIN}/audio.zip",
"mel": f"{_DOMAIN}/mel.zip",
"eval": f"{_DOMAIN}/eval.zip",
}
class chest_falsetto_Config(datasets.BuilderConfig):
def __init__(self, features, **kwargs):
super(chest_falsetto_Config, self).__init__(
version=datasets.Version("1.2.0"), **kwargs
)
self.features = features
class chest_falsetto(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.2.0")
BUILDER_CONFIGS = [
chest_falsetto_Config(
name="eval",
features=datasets.Features(
{
"mel": datasets.Image(),
"cqt": datasets.Image(),
"chroma": datasets.Image(),
"label": datasets.features.ClassLabel(names=_NAMES["all"]),
"gender": datasets.features.ClassLabel(names=_NAMES["gender"]),
"singing_method": datasets.features.ClassLabel(
names=_NAMES["singing_method"]
),
}
),
),
chest_falsetto_Config(
name="default",
features=datasets.Features(
{
"audio": datasets.Audio(sampling_rate=22050),
"mel": datasets.Image(),
"label": datasets.features.ClassLabel(names=_NAMES["all"]),
"gender": datasets.features.ClassLabel(names=_NAMES["gender"]),
"singing_method": datasets.features.ClassLabel(
names=_NAMES["singing_method"]
),
}
),
),
]
def _info(self):
return datasets.DatasetInfo(
features=self.config.features,
supervised_keys=("mel", "label"),
homepage=_HOMEPAGE,
license="mit",
citation=_CITATION,
description=_DESCRIPTION,
task_templates=[
ImageClassification(
task="image-classification",
image_column="mel",
label_column="label",
)
],
)
def _split_generators(self, dl_manager):
dataset = []
if self.config.name == "eval":
data_files = dl_manager.download_and_extract(_URLS["eval"])
for path in dl_manager.iter_files([data_files]):
if "mel" in path and os.path.basename(path).endswith(".jpg"):
dataset.append(path)
else:
files = {}
audio_files = dl_manager.download_and_extract(_URLS["audio"])
mel_files = dl_manager.download_and_extract(_URLS["mel"])
for path in dl_manager.iter_files([audio_files]):
fname = os.path.basename(path)
if fname.endswith(".wav"):
item_id = fname.split(".")[0]
files[item_id] = {"audio": path}
for path in dl_manager.iter_files([mel_files]):
fname = os.path.basename(path)
if fname.endswith(".jpg"):
item_id = fname.split(".")[0]
files[item_id]["mel"] = path
dataset = list(files.values())
random.shuffle(dataset)
data_count = len(dataset)
p80 = int(data_count * 0.8)
p90 = int(data_count * 0.9)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"files": dataset[:p80]}
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION, gen_kwargs={"files": dataset[p80:p90]}
),
datasets.SplitGenerator(
name=datasets.Split.TEST, gen_kwargs={"files": dataset[p90:]}
),
]
def _generate_examples(self, files):
if self.config.name == "eval":
for i, fpath in enumerate(files):
file_name = os.path.basename(fpath)
sex = file_name.split("_")[1]
method = file_name.split("_")[2]
yield i, {
"mel": fpath,
"cqt": fpath.replace("mel", "cqt"),
"chroma": fpath.replace("mel", "chroma"),
"label": f"{sex}_{method}",
"gender": "male" if sex == "m" else "female",
"singing_method": method,
}
else:
for i, fpath in enumerate(files):
file_name = os.path.basename(fpath["audio"])
sex = file_name.split("_")[1]
method = file_name.split("_")[2].split(".")[0]
yield i, {
"audio": fpath["audio"],
"mel": fpath["mel"],
"label": f"{sex}_{method}",
"gender": "male" if sex == "m" else "female",
"singing_method": method,
}