albayzin / albayzin.py
mpenagar's picture
correct indentation error
e70d3c2
raw
history blame contribute delete
No virus
5.67 kB
# coding=utf-8
# Copyright 2023 Mikel Penagarikano
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Albayzin automatic speech recognition dataset.
"""
import os
from pathlib import Path
import datasets
from datasets.tasks import AutomaticSpeechRecognition
_CITATION = """\
@inproceedings{conf/interspeech/MorenoPBLLMN93,
title = {Albayzin speech database: design of the phonetic corpus.},
author = {Moreno, Asunción and Poch, Dolors and Bonafonte, Antonio and Lleida, Eduardo and Llisterri, Joaquim and Mariño, José B. and Nadeu, Climent},
booktitle = {EUROSPEECH},
crossref = {conf/interspeech/1993},
ee = {http://www.isca-speech.org/archive/eurospeech_1993/e93_0175.html},
publisher = {ISCA},
url = {https://dblp.uni-trier.de/rec/conf/interspeech/MorenoPBLLMN93.html},
year = 1993
}
"""
_DESCRIPTION = """\
Albayzín, an Spanish Speech Recognition Database.
Albayzin is a spoken database for Spanish designed for speech recognition purposes.
It is composed by utterances from a set of phonetically balanced sentences, containing
a balanced set of males and females of ages from 18 to 55.
"""
_HOMEPAGE = "http://catalogue.elra.info/en-us/repository/browse/ELRA-S0089/"
class AlbayzinConfig(datasets.BuilderConfig):
"""BuilderConfig for Albayzin."""
def __init__(self, **kwargs):
"""
Args:
data_dir: `string`, the path to the folder containing the files in the
downloaded .tar
citation: `string`, citation for the data set
url: `string`, url for information about the data set
**kwargs: keyword arguments forwarded to super.
"""
super(AlbayzinConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
class Albayzin(datasets.GeneratorBasedBuilder):
"""Albayzin dataset."""
BUILDER_CONFIGS = [AlbayzinConfig(name="default", description="'Default' configuration.")]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"file": datasets.Value("string"),
"audio": datasets.Audio(sampling_rate=16_000),
"text": datasets.Value("string"),
"id": datasets.Value("string"),
}
),
supervised_keys=("file", "text"),
homepage=_HOMEPAGE,
citation=_CITATION,
task_templates=[AutomaticSpeechRecognition(audio_column="audio", transcription_column="text")],
)
def _split_generators(self, dl_manager):
if dl_manager.manual_dir == None :
raise ValueError(
f"Make sure you insert a manual dir via `datasets.load_dataset('Albayzin', data_dir=...)` that points to the Albayzin dataset directory."
)
data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
if not os.path.exists(data_dir):
raise FileNotFoundError(
f"{data_dir} does not exist. Make sure you insert a manual dir via `datasets.load_dataset('Albayzin', data_dir=...)` that points to the Albayzin dataseti directory."
)
#assert False
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"split": "train", "data_dir": data_dir}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"split": "test", "data_dir": data_dir}),
]
def _load_transcripts(self, split, data_dir):
# replacement for things such as 'a --> á
replacements = {
"'a":"á" , "'e":"é" , "'i":"í" , "'o":"ó" , "'u":"ú" ,
"'A":"Á" , "'E":"É" , "'I":"Í" , "'O":"Ó" , "'U":"Ú" ,
"~n":"ñ" , "'~N":"Ñ" , "~u":"ü"
}
split2dir = {'train':'FA.TXT' , 'test':'FT.TXT'}
path = Path(data_dir,'texto',split2dir[split])
with path.open(encoding="ascii") as f:
text = {}
for line in f:
for k,v in replacements.items():
line = line.replace(k,v)
words = line.split()
# ASCII text, with CRLF, CR line terminators
if len(words) < 3 : continue
# text_id_number - text
text[int(words[0])] = ' '.join(words[2:])
return text
def _generate_examples(self, split, data_dir):
"""Generate examples from Albayzin archive_path."""
# Iterating the contents of the data to extract the relevant information
split2dir = {'train':'SUB_APRE' , 'test':'SUB_PRUE'}
wav_paths = sorted(Path(data_dir,'wav/CF',split2dir[split]).glob(f"**/*.wav"))
transcripts = self._load_transcripts(split, data_dir)
for key,wav_path in enumerate(wav_paths):
id_ = wav_path.stem
transcript = transcripts[int(id_[-4:])]
example = {
"file": str(wav_path),
"audio": str(wav_path),
"text": transcript,
"id": id_,
}
yield key, example