vystadial2016_asr / vystadial2016_asr.py
comodoro's picture
Final final fix
219094a
# coding=utf-8
# Copyright 2022 Vojtěch Drábek
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Vystadial 2016 Czech automatic speech recognition dataset."""
import os
import datasets
from datasets.tasks import AutomaticSpeechRecognition
_CITATION = """\
@misc{11234/1-1740,
title = {Vystadial 2016 – Czech data},
author = {Pl{\'a}tek, Ond{\v r}ej and Du{\v s}ek, Ond{\v r}ej and Jur{\v c}{\'{\i}}{\v c}ek, Filip},
url = {http://hdl.handle.net/11234/1-1740},
note = {{LINDAT}/{CLARIAH}-{CZ} digital library at the Institute of Formal and Applied Linguistics ({{\'U}FAL}), Faculty of Mathematics and Physics, Charles University},
copyright = {Creative Commons - Attribution-{ShareAlike} 4.0 International ({CC} {BY}-{SA} 4.0)},
year = {2016} }
"""
_DESCRIPTION = """\
This is the Czech data collected during the `VYSTADIAL` project. It is an extension of the 'Vystadial 2013' Czech part data release. The dataset comprises of telephone conversations in Czech, developed for training acoustic models for automatic speech recognition in spoken dialogue systems.
"""
_URL = "https://lindat.mff.cuni.cz/repository/xmlui/handle/11234/1-1740"
_DL_URL = "https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-1740/data_voip_cs_2016.tar.gz"
class Vystadial2016ASRConfig(datasets.BuilderConfig):
"""BuilderConfig for Vysadial 2016."""
def __init__(self, **kwargs):
"""
Args:
data_dir: `string`, the path to the folder containing the files in the
downloaded .tar
citation: `string`, citation for the data set
url: `string`, url for information about the data set
**kwargs: keyword arguments forwarded to super.
"""
super(Vystadial2016ASRConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
class Vystadial2016ASR(datasets.GeneratorBasedBuilder):
"""Vystadial 2016 dataset."""
DEFAULT_WRITER_BATCH_SIZE = 256
DEFAULT_CONFIG_NAME = "all"
BUILDER_CONFIGS = [
Vystadial2016ASRConfig(name="all", description="All samples."),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"file": datasets.Value("string"),
"audio": datasets.Audio(sampling_rate=16_000),
"text": datasets.Value("string"),
}
),
supervised_keys=("file", "text"),
homepage=_URL,
citation=_CITATION,
task_templates=[AutomaticSpeechRecognition(audio_column="audio", transcription_column="text")],
)
def _split_generators(self, dl_manager):
archive_path = dl_manager.download(_DL_URL)
# (Optional) In non-streaming mode, we can extract the archive locally to have actual local audio files:
local_extracted_archive = dl_manager.extract(archive_path) if not dl_manager.is_streaming else {}
return [ datasets.SplitGenerator(
name="train",
gen_kwargs={
"files": dl_manager.iter_archive(archive_path),
"local_extracted_archive": local_extracted_archive,
},
), ]
def _generate_examples(self, files, local_extracted_archive):
"""Generate examples from a Vystadial2016 archive_path."""
key = 0
samples = {}
transcripts = {}
id_transcripts = b''
id_samples = b''
for path, f in files:
if path.endswith(".wav"):
id_ = path.split('/')[-1][:-4]
id_samples = bytes(a ^ b for a, b in itertools.zip_longest(id_.encode('utf-8'), id_samples, fillvalue=0))
audio_data = f.read()
audio_file = f"{id_}.wav"
audio_file = (
os.path.join(local_extracted_archive, audio_file)
if local_extracted_archive
else audio_file )
samples[id_] = {'audio': audio_file, 'bytes': audio_data, 'file': path}
elif path.endswith(".trn"):
id_ = path.split('/')[-1][:-8]
id_transcripts = bytes(a ^ b for a, b in itertools.zip_longest(id_.encode('utf-8') , id_transcripts, fillvalue=0))
lines = f.readlines()
if not lines:
continue
line = lines[0].decode("utf-8").strip()
transcripts[id_] = line
if (samples and len(samples) == len(transcripts)
and id_samples == id_transcripts):
for id_, sample in samples.items():
audio = {"path": sample["audio"], "bytes": sample["bytes"]}
yield key, {'audio': audio, 'file': sample['audio'], 'text': transcripts[id_]}
key += 1
samples = {}
transcripts = {}