Datasets:

Modalities:
Audio
Text
Formats:
parquet
Size:
< 1K
ArXiv:
DOI:
Libraries:
Datasets
pandas
jam-alt / loader.py
cifkao's picture
Version 1.1.0
db8d621
raw
history blame
4.58 kB
"""HuggingFace loading script for the JamALT dataset."""
import csv
from dataclasses import dataclass
import json
import os
from pathlib import Path
from typing import Optional
import datasets
_VERSION = "1.1.0"
_CITATION = """\
@misc{cifka-2023-jam-alt,
author = {Ond\v{r}ej C\'ifka and
Constantinos Dimitriou and
{Cheng-i} Wang and
Hendrik Schreiber and
Luke Miner and
Fabian-Robert St\"oter},
title = {{Jam-ALT}: A Formatting-Aware Lyrics Transcription Benchmark},
eprint = {arXiv:2311.13987},
year = 2023
}
@inproceedings{durand-2023-contrastive,
author={Durand, Simon and Stoller, Daniel and Ewert, Sebastian},
booktitle={2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
title={Contrastive Learning-Based Audio to Lyrics Alignment for Multiple Languages},
year={2023},
pages={1-5},
address={Rhodes Island, Greece},
doi={10.1109/ICASSP49357.2023.10096725}
}
"""
_DESCRIPTION = """\
Jam-ALT: A formatting-aware lyrics transcription benchmark.
"""
_HOMEPAGE = "https://audioshake.github.io/jam-alt"
_METADATA_FILENAME = "metadata.csv"
_LANGUAGE_NAME_TO_CODE = {
"English": "en",
"French": "fr",
"German": "de",
"Spanish": "es",
}
@dataclass
class JamAltBuilderConfig(datasets.BuilderConfig):
language: Optional[str] = None
with_audio: bool = True
decode_audio: bool = True
sampling_rate: Optional[int] = None
mono: bool = True
class JamAltDataset(datasets.GeneratorBasedBuilder):
_DESCRIPTION
VERSION = datasets.Version(_VERSION)
BUILDER_CONFIG_CLASS = JamAltBuilderConfig
BUILDER_CONFIGS = [JamAltBuilderConfig("all")] + [
JamAltBuilderConfig(lang, language=lang)
for lang in _LANGUAGE_NAME_TO_CODE.values()
]
DEFAULT_CONFIG_NAME = "all"
def _info(self):
feat_dict = {
"name": datasets.Value("string"),
"text": datasets.Value("string"),
"language": datasets.Value("string"),
"license_type": datasets.Value("string"),
}
if self.config.with_audio:
feat_dict["audio"] = datasets.Audio(
decode=self.config.decode_audio,
sampling_rate=self.config.sampling_rate,
mono=self.config.mono,
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(feat_dict),
supervised_keys=("audio", "text") if "audio" in feat_dict else None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
metadata_path = dl_manager.download(_METADATA_FILENAME)
audio_paths, text_paths, metadata = [], [], []
with open(metadata_path, encoding="utf-8") as f:
for row in csv.DictReader(f):
if (
self.config.language is None
or _LANGUAGE_NAME_TO_CODE[row["Language"]] == self.config.language
):
audio_paths.append("audio/" + row["Filepath"])
text_paths.append(
"lyrics/" + os.path.splitext(row["Filepath"])[0] + ".txt"
)
metadata.append(row)
text_paths = dl_manager.download(text_paths)
audio_paths = (
dl_manager.download(audio_paths) if self.config.with_audio else None
)
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs=dict(
text_paths=text_paths,
audio_paths=audio_paths,
metadata=metadata,
),
),
]
def _generate_examples(self, text_paths, audio_paths, metadata):
if audio_paths is None:
audio_paths = [None] * len(text_paths)
for text_path, audio_path, meta in zip(text_paths, audio_paths, metadata):
name = os.path.splitext(meta["Filepath"])[0]
with open(text_path, encoding="utf-8") as text_f:
record = {
"name": name,
"text": text_f.read(),
"language": _LANGUAGE_NAME_TO_CODE[meta["Language"]],
"license_type": meta["LicenseType"],
}
if audio_path is not None:
record["audio"] = audio_path
yield name, record