Datasets:

ArXiv:
License:
evi / evi.py
Georgios Spithourakis
Update evi.py
0bf18b9
raw
history blame
5.97 kB
# coding=utf-8
# Copyright 2022 The PolyAI and HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import json
import os
import datasets
logger = datasets.logging.get_logger(__name__)
""" EVI Dataset"""
_CITATION = """\
@inproceedings{Spithourakis2022evi,
author = {Georgios P. Spithourakis and Ivan Vuli\'{c} and Micha\l{} Lis and I\~{n}igo Casanueva and Pawe\l{} Budzianowski},
title = {{EVI}: Multilingual Spoken Dialogue Tasks and Dataset for Knowledge-Based Enrolment, Verification, and Identification},
year = {2022},
note = {Data available at https://github.com/PolyAI-LDN/evi-paper},
url = {https://arxiv.org/abs/2204.13496},
booktitle = {Findings of NAACL (publication pending)}
}
""" # noqa
_ALL_CONFIGS = sorted([
"en-GB", "fr-FR", "pl-PL"
])
_DESCRIPTION = "EVI is a dataset for enrolment, identification, and verification" # noqa
_HOMEPAGE_URL = "https://arxiv.org/abs/2204.13496"
_AUDIO_DATA_URL = "https://poly-public-data.s3.eu-west-2.amazonaws.com/evi-paper/audios.zip" # noqa
_VERSION = datasets.Version("0.0.5", "")
class EviConfig(datasets.BuilderConfig):
"""BuilderConfig for EVI"""
def __init__(
self, name, version, description, homepage, audio_data_url
):
super().__init__(
name=self.name,
version=version,
description=self.description,
)
self.name = name
self.description = description
self.homepage = homepage
self.audio_data_url = audio_data_url
def _build_config(name):
return EviConfig(
name=name,
version=_VERSION,
description=_DESCRIPTION,
homepage=_HOMEPAGE_URL,
audio_data_url=_AUDIO_DATA_URL,
)
class Evi(datasets.GeneratorBasedBuilder):
DEFAULT_WRITER_BATCH_SIZE = 1000
BUILDER_CONFIGS = [
_build_config(name) for name in _ALL_CONFIGS
]
def _info(self):
task_templates = None
langs = _ALL_CONFIGS
features = datasets.Features(
{
"lang_id": datasets.ClassLabel(names=langs),
"dialogue_id": datasets.Value("string"),
"speaker_id": datasets.Value("string"),
"turn_id": datasets.Value("int32"),
#
"target_profile_id": datasets.Value("string"),
#
"asr_transcription": datasets.Value("string"),
"asr_nbest": datasets.Sequence(datasets.Value("string")),
#
"path": datasets.Value("string"),
"audio": datasets.Audio(sampling_rate=8_000),
}
)
return datasets.DatasetInfo(
version=self.config.version,
description=self.config.description,
homepage=self.config.homepage,
license="CC-BY-4.0",
citation=_CITATION,
features=features,
supervised_keys=None,
task_templates=task_templates,
)
def _split_generators(self, dl_manager):
langs = ([self.config.name])
#audio_path = dl_manager.download_and_extract(
# self.config.audio_data_url
#)
audio_path = ""
text_path = ""
lang2text_path = {
_lang: os.path.join(
text_path,
f"dialogues.{_lang.split('-')[0]}.csv"
)
for _lang in langs
}
lang2audio_path = {
_lang: os.path.join(
audio_path,
f"{_lang.split('-')[0]}"
)
for _lang in langs
}
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"audio_paths": lang2audio_path,
"text_paths": lang2text_path,
},
)
]
def _generate_examples(self, audio_paths, text_paths):
key = 0
for lang in text_paths.keys():
text_path = text_paths[lang]
audio_path = audio_paths[lang]
with open(text_path, encoding="utf-8") as fin:
reader = csv.DictReader(
fin, delimiter=",", skipinitialspace=True
)
for dictrow in reader:
dialogue_id = dictrow["dialogue_id"]
turn_id = dictrow["turn_num"]
file_path = os.path.join(
audio_path,
dialogue_id,
f'{turn_id}.wav'
)
if not os.path.isfile(file_path):
file_path = None
example = {
"lang_id": _ALL_CONFIGS.index(lang),
"dialogue_id": dialogue_id,
"speaker_id": dictrow["speaker_id"],
"turn_id": turn_id,
"target_profile_id": dictrow["scenario_id"],
"asr_transcription": dictrow["transcription"],
"asr_nbest": json.loads(dictrow["nbest"]),
"path": file_path,
"audio": file_path,
}
print(example)
yield key, example
key += 1