|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import csv |
|
import os |
|
|
|
import datasets |
|
|
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{pudo23_interspeech, |
|
author={Mikołaj Pudo and Mateusz Wosik and Adam Cieślak and Justyna Krzywdziak and Bożena Łukasiak and Artur Janicki}, |
|
title={{MOCKS} 1.0: Multilingual Open Custom Keyword Spotting Testset}, |
|
year={2023}, |
|
booktitle={Proc. Interspeech 2023}, |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
Multilingual Open Custom Keyword Spotting Testset (MOCKS) is a comprehensive |
|
audio testset for evaluation and benchmarking Open-Vocabulary Keyword Spotting (OV-KWS) models. |
|
""" |
|
|
|
|
|
_BASE_URL = "https://huggingface.co/datasets/voiceintelligenceresearch/MOCKS/tree/main" |
|
_DL_URLS_TEMPLATE = { |
|
"data": "%s/%s/test/%s/data.tar.gz", |
|
"transcription" : "%s/%s/test/data_%s_transcription.tsv", |
|
"positive" : "%s/%s/test/%s/all.pair.positive.tsv", |
|
"similar" : "%s/%s/test/%s/all.pair.similar.tsv", |
|
"different" : "%s/%s/test/%s/all.pair.different.tsv", |
|
"positive_subset" : "%s/%s/test/%s/subset.pair.positive.tsv", |
|
"similar_subset" : "%s/%s/test/%s/subset.pair.similar.tsv", |
|
"different_subset" : "%s/%s/test/%s/subset.pair.different.tsv", |
|
} |
|
|
|
_MOCKS_SETS = [ |
|
"en.LS-clean", |
|
"en.LS-other", |
|
"en.MCV", |
|
"de.MCV", |
|
"es.MCV", |
|
"fr.MCV", |
|
"it.MCV"] |
|
|
|
_MOCKS_SUFFIXES = [ |
|
"", |
|
".positive", |
|
".similar", |
|
".different", |
|
".subset", |
|
".positive_subset", |
|
".similar_subset", |
|
".different_subset"] |
|
|
|
|
|
class Mocks(datasets.GeneratorBasedBuilder): |
|
"""Mocks Dataset.""" |
|
DEFAULT_CONFIG_NAME = "en.LS-clean" |
|
|
|
BUILDER_CONFIGS = [datasets.BuilderConfig(name=subset+suffix, description=subset+suffix) |
|
for subset in _MOCKS_SETS for suffix in _MOCKS_SUFFIXES] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features({ |
|
"keyword_id": datasets.Value("string"), |
|
"keyword_transcription": datasets.Value("string"), |
|
"test_id": datasets.Value("string"), |
|
"test_transcription": datasets.Value("string"), |
|
"test_audio": datasets.Audio(sampling_rate=16000), |
|
"label": datasets.Value("bool"), |
|
} |
|
), |
|
homepage=_BASE_URL, |
|
citation=_CITATION |
|
) |
|
|
|
|
|
def _split_generators(self, dl_manager): |
|
logger.info("split_generator") |
|
name_split = self.config.name.split(".") |
|
subset_lang = name_split[0] |
|
subset_name = name_split[1] |
|
|
|
if len(name_split) == 2: |
|
pairs_types = ["positive", "similar", "different"] |
|
elif name_split[2] == "subset": |
|
pairs_types = ["positive_subset", "similar_subset", "different_subset"] |
|
else: |
|
pairs_types = [name_split[2]] |
|
|
|
offline_archive_path = dl_manager.download({ |
|
k: v%(subset_lang, subset_name, "offline") |
|
for k, v in _DL_URLS_TEMPLATE.items() |
|
}) |
|
online_archive_path = dl_manager.download({ |
|
k: v%(subset_lang, subset_name, "online") |
|
for k, v in _DL_URLS_TEMPLATE.items() |
|
}) |
|
|
|
split_offline = [datasets.SplitGenerator( |
|
name="offline", |
|
gen_kwargs={ |
|
"audio_files": dl_manager.iter_archive(offline_archive_path["data"]), |
|
"transcription_keyword": offline_archive_path["transcription"], |
|
"transcription_test": offline_archive_path["transcription"], |
|
"pairs": [offline_archive_path[pair_type] for pair_type in pairs_types], |
|
} |
|
) |
|
] |
|
|
|
split_online = [datasets.SplitGenerator( |
|
name="online", |
|
gen_kwargs={ |
|
"audio_files": dl_manager.iter_archive(online_archive_path["data"]), |
|
"transcription_keyword": offline_archive_path["transcription"], |
|
"transcription_test": online_archive_path["transcription"], |
|
"pairs": [online_archive_path[pair_type] for pair_type in pairs_types], |
|
} |
|
) |
|
] |
|
|
|
return split_offline + split_online |
|
|
|
|
|
def _read_transcription(self, transcription_path): |
|
transcription_metadata = {} |
|
|
|
with open(transcription_path, encoding="utf-8") as f: |
|
reader = csv.reader(f, delimiter="\t") |
|
next(reader, None) |
|
|
|
for row in reader: |
|
_, audio_id = os.path.split(row[0]) |
|
transcription = row[1] |
|
transcription_metadata[audio_id] = { |
|
"audio_id": audio_id, |
|
"transcription": transcription} |
|
|
|
return transcription_metadata |
|
|
|
|
|
def _generate_examples(self, audio_files, transcription_keyword, transcription_test, pairs): |
|
transcription_keyword_metadata = self._read_transcription(transcription_keyword) |
|
|
|
transcription_test_metadata = self._read_transcription(transcription_test) |
|
|
|
pair_metadata = {} |
|
for pair in pairs: |
|
with open(pair, encoding="utf-8") as f: |
|
reader = csv.reader(f, delimiter="\t") |
|
next(reader, None) |
|
|
|
for row in reader: |
|
_, keyword_id = os.path.split(row[0]) |
|
_, test_id = os.path.split(row[1]) |
|
|
|
if keyword_id not in transcription_keyword_metadata: |
|
logger.error("No transcription and audio for keyword %s"%(keyword_id)) |
|
continue |
|
if test_id not in transcription_test_metadata: |
|
logger.error("No transcription and audio for test case %s"%(test_id)) |
|
continue |
|
|
|
if test_id not in pair_metadata: |
|
pair_metadata[test_id] = [] |
|
|
|
pair_metadata[test_id].append([keyword_id, int(row[-1])]) |
|
|
|
id_ = 0 |
|
for test_path, test_f in audio_files: |
|
_, test_id = os.path.split(test_path) |
|
if test_id in pair_metadata: |
|
test_audio = {"bytes": test_f.read()} |
|
for keyword_id, label in pair_metadata[test_id]: |
|
yield id_, { |
|
"keyword_id": keyword_id, |
|
"keyword_transcription": transcription_keyword_metadata[keyword_id]["transcription"], |
|
"test_id": test_id, |
|
"test_transcription": transcription_test_metadata[test_id]["transcription"], |
|
"test_audio": test_audio, |
|
"label": label} |
|
id_ += 1 |
|
|
|
|