Datasets:
Languages:
English
Multilinguality:
monolingual
Size Categories:
10K<n<100K
Language Creators:
found
Annotations Creators:
expert-generated
Source Datasets:
original
ArXiv:
License:
from typing import List | |
import os | |
import csv | |
import ast | |
import gzip | |
import datasets | |
from datasets.utils.logging import get_logger | |
logger = get_logger(__name__) | |
_URL = "https://asappresearch.github.io/slue-toolkit/" | |
_DL_URL = "https://public-dataset-model-store.awsdev.asapp.com/users/sshon/public/slue/" | |
_DL_URLS = { | |
"slue-voxpopuli": _DL_URL + "slue-voxpopuli_v0.2_blind.zip", | |
"slue-voxceleb": _DL_URL + "slue-voxceleb_v0.2_blind.zip", | |
} | |
_LICENSE = """ | |
======================================================= | |
The license of this script | |
MIT License | |
Copyright (c) 2022 ASAPP Inc. | |
Permission is hereby granted, free of charge, to any person obtaining a copy | |
of this software and associated documentation files (the "Software"), to deal | |
in the Software without restriction, including without limitation the rights | |
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
copies of the Software, and to permit persons to whom the Software is | |
furnished to do so, subject to the following conditions: | |
The above copyright notice and this permission notice shall be included in all | |
copies or substantial portions of the Software. | |
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
SOFTWARE. | |
======================================================= | |
SLUE-VoxPopuli Dataset | |
SLUE-VoxPopuli dataset contains a subset of VoxPopuli dataset and the copyright of this subset remains the same with the original license, CC0. See also European Parliament's legal notice (https://www.europarl.europa.eu/legal-notice/en/) | |
Additionally, we provide named entity annotation (normalized_ner and raw_ner column in .tsv files) and it is covered with the same license as CC0. | |
======================================================= | |
SLUE-VoxCeleb Dataset | |
SLUE-VoxCeleb Dataset contains a subset of OXFORD VoxCeleb dataset and the copyright of this subset remains the same Creative Commons Attribution 4.0 International license as below. Additionally, we provide transcription, sentiment annotation and timestamp (start, end) that follows the same license to OXFORD VoxCeleb dataset. | |
======================================================= | |
OXFORD VGG VoxCeleb Dataset | |
VoxCeleb1 contains over 100,000 utterances for 1,251 celebrities, extracted from videos uploaded to YouTube. | |
VoxCeleb2 contains over a million utterances for 6,112 celebrities, extracted from videos uploaded to YouTube. | |
The speakers span a wide range of different ethnicities, accents, professions and ages. | |
We provide Youtube URLs, associated face detections, and timestamps, as | |
well as cropped audio segments and cropped face videos from the | |
dataset. The copyright of both the original and cropped versions | |
of the videos remains with the original owners. | |
The data is covered under a Creative Commons | |
Attribution 4.0 International license (Please read the | |
license terms here. https://creativecommons.org/licenses/by/4.0/). | |
Downloading this dataset implies agreement to follow the same | |
conditions for any modification and/or | |
re-distribution of the dataset in any form. | |
Additionally any entity using this dataset agrees to the following conditions: | |
THIS DATASET IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS | |
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | |
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A | |
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
HOLDER BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | |
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | |
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | |
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF | |
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING | |
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | |
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
Please cite [1,2] below if you make use of the dataset. | |
[1] J. S. Chung, A. Nagrani, A. Zisserman | |
VoxCeleb2: Deep Speaker Recognition | |
INTERSPEECH, 2018. | |
[2] A. Nagrani, J. S. Chung, A. Zisserman | |
VoxCeleb: a large-scale speaker identification dataset | |
INTERSPEECH, 2017 | |
======================================================= | |
""" | |
_CITATION = """\ | |
@inproceedings{shon2022slue, | |
title={Slue: New benchmark tasks for spoken language understanding evaluation on natural speech}, | |
author={Shon, Suwon and Pasad, Ankita and Wu, Felix and Brusco, Pablo and Artzi, Yoav and Livescu, Karen and Han, Kyu J}, | |
booktitle={ICASSP 2022-2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)}, | |
pages={7927--7931}, | |
year={2022}, | |
organization={IEEE} | |
} | |
""" | |
_DESCRIPTION = """\ | |
Spoken Language Understanding Evaluation (SLUE) benchmark. There are two subsets: (i) SLUE-VoxPopuli which has ASR and NER tasks and (ii) SLUE-VoxCeleb which has ASR and SA tasks. | |
""" | |
raw_to_combined_tag_map = { | |
"DATE": "WHEN", | |
"TIME": "WHEN", | |
"CARDINAL": "QUANT", | |
"ORDINAL": "QUANT", | |
"QUANTITY": "QUANT", | |
"MONEY": "QUANT", | |
"PERCENT": "QUANT", | |
"GPE": "PLACE", | |
"LOC": "PLACE", | |
"NORP": "NORP", | |
"ORG": "ORG", | |
"LAW": "LAW", | |
"PERSON": "PERSON", | |
"FAC": "DISCARD", | |
"EVENT": "DISCARD", | |
"WORK_OF_ART": "DISCARD", | |
"PRODUCT": "DISCARD", | |
"LANGUAGE": "DISCARD", | |
} | |
def parse_ner_label(label, combined=False): | |
label = ast.literal_eval(label) | |
if label is None: | |
return [] | |
if combined: | |
return [ | |
{"type": raw_to_combined_tag_map[t], "start": s, "length": l} | |
for t, s, l in label | |
if raw_to_combined_tag_map[t] != "DISCARD" | |
] | |
else: | |
return [{"type": t, "start": s, "length": l} for t, s, l in label] | |
class SLUEConfig(datasets.BuilderConfig): | |
"""BuilderConfig for SLUE.""" | |
def __init__(self, **kwargs): | |
""" | |
Args: | |
data_dir: `string`, the path to the folder containing the files in the | |
downloaded .tar | |
citation: `string`, citation for the data set | |
url: `string`, url for information about the data set | |
**kwargs: keyword arguments forwarded to super. | |
""" | |
super(SLUEConfig, self).__init__( | |
version=datasets.Version("2.4.0", ""), **kwargs | |
) | |
class SLUE(datasets.GeneratorBasedBuilder): | |
"""Librispeech dataset.""" | |
DEFAULT_WRITER_BATCH_SIZE = 256 | |
DEFAULT_CONFIG_NAME = "voxpopuli" | |
BUILDER_CONFIGS = [ | |
SLUEConfig( | |
name="voxpopuli", | |
description="SLUE VoxPopuli set which includes ASR and Named Entity Recognition tasks.", | |
), | |
SLUEConfig( | |
name="voxceleb", | |
description="SLUE VoxCeleb set which includes ASR and Santiment Analysis tasks.", | |
), | |
] | |
def _info(self): | |
features = { | |
"id": datasets.Value("string"), | |
"audio": datasets.Audio(sampling_rate=16_000), | |
"speaker_id": datasets.Value("string"), | |
"normalized_text": datasets.Value("string"), | |
} | |
if self.config.name == "voxpopuli": | |
features.update( | |
{ | |
"raw_text": datasets.Value("string"), | |
"raw_ner": datasets.Sequence( | |
{ | |
"type": datasets.Value("string"), | |
"start": datasets.Value("int32"), | |
"length": datasets.Value("int32"), | |
} | |
), | |
"normalized_ner": datasets.Sequence( | |
{ | |
"type": datasets.Value("string"), | |
"start": datasets.Value("int32"), | |
"length": datasets.Value("int32"), | |
} | |
), | |
"raw_combined_ner": datasets.Sequence( | |
{ | |
"type": datasets.Value("string"), | |
"start": datasets.Value("int32"), | |
"length": datasets.Value("int32"), | |
} | |
), | |
"normalized_combined_ner": datasets.Sequence( | |
{ | |
"type": datasets.Value("string"), | |
"start": datasets.Value("int32"), | |
"length": datasets.Value("int32"), | |
} | |
), | |
} | |
) | |
elif self.config.name == "voxceleb": | |
features.update( | |
{ | |
"sentiment": datasets.Value("string"), | |
"start_second": datasets.Value("float64"), | |
"end_second": datasets.Value("float64"), | |
} | |
) | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
features=datasets.Features(features), | |
supervised_keys=("file", "text"), | |
homepage=_URL, | |
citation=_CITATION, | |
license=_LICENSE, | |
) | |
def _split_generators( | |
self, dl_manager: datasets.DownloadManager | |
) -> List[datasets.SplitGenerator]: | |
config_name = f"slue-{self.config.name}" | |
dl_dir = dl_manager.download_and_extract(_DL_URLS[config_name]) | |
data_dir = os.path.join(dl_dir, config_name) | |
print(data_dir) | |
splits = [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
gen_kwargs={ | |
"filepath": os.path.join( | |
data_dir or "", f"{config_name}_fine-tune.tsv" | |
), | |
"data_dir": data_dir, | |
}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.VALIDATION, | |
gen_kwargs={ | |
"filepath": os.path.join(data_dir or "", f"{config_name}_dev.tsv"), | |
"data_dir": data_dir, | |
}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.TEST, | |
gen_kwargs={ | |
"filepath": os.path.join( | |
data_dir or "", f"{config_name}_test_blind.tsv" | |
), | |
"data_dir": data_dir, | |
}, | |
), | |
] | |
return splits | |
def _generate_examples(self, filepath, data_dir): | |
logger.info(f"generating examples from = {filepath}") | |
with open(filepath) as f: | |
reader = csv.DictReader(f, delimiter="\t") | |
for idx, row in enumerate(reader): | |
if self.config.name == "voxpopuli": | |
audio_file = os.path.join( | |
data_dir, row["split"], row["id"] + ".ogg" | |
) | |
example = { | |
"id": row["id"], | |
"audio": audio_file, | |
"speaker_id": row["speaker_id"], | |
"raw_text": row["raw_text"], | |
"normalized_text": row["normalized_text"], | |
"raw_ner": parse_ner_label(row.get("raw_ner", "[]")), | |
"normalized_ner": parse_ner_label( | |
row.get("normalized_ner", "[]") | |
), | |
"raw_combined_ner": parse_ner_label( | |
row.get("raw_ner", "[]"), combined=True | |
), | |
"normalized_combined_ner": parse_ner_label( | |
row.get("normalized_ner", "[]"), combined=True | |
), | |
} | |
elif self.config.name == "voxceleb": | |
audio_file = os.path.join( | |
data_dir, row["split"] + "_raw", row["id"] + ".flac" | |
) | |
example = { | |
"id": row["id"], | |
"audio": audio_file, | |
"speaker_id": row["speaker_id"], | |
"normalized_text": row.get("normalized_text", ""), | |
"sentiment": row.get("sentiment", ""), | |
"start_second": row["start_second"], | |
"end_second": row["end_second"], | |
} | |
yield idx, example | |