common_voice_11_0_eues / common_voice_11_0_eues.py
mpenagar's picture
ad all segments
d2b586f
# coding=utf-8
# Copyright 2023 Mikel Penagarikano
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Albayzin automatic speech recognition dataset.
"""
import os
from pathlib import Path
import datasets
from datasets.tasks import AutomaticSpeechRecognition
from datasets.utils import logging
from random import shuffle
import re
_CITATION = """\
"""
_DESCRIPTION = """\
"""
_HOMEPAGE = ""
class CommonVoiceEUESConfig(datasets.BuilderConfig):
"""BuilderConfig for Common Voice Mixed."""
def __init__(self, **kwargs):
"""
Args:
data_dir: `string`, the path to the folder containing the files in the
downloaded .tar
citation: `string`, citation for the data set
url: `string`, url for information about the data set
**kwargs: keyword arguments forwarded to super.
"""
super(CommonVoiceEUESConfig, self).__init__(version=datasets.Version("11.0.0", ""), **kwargs)
class CommonVoiceEUES(datasets.GeneratorBasedBuilder):
"""Common Voice Mixed dataset."""
BUILDER_CONFIGS = [CommonVoiceEUESConfig(name="eues", description="eu+es joint configuration.")]
CV_EU_ARGS = ['mozilla-foundation/common_voice_11_0','eu']
print('Loading',*CV_EU_ARGS)
CV_EU_INFO = datasets.load_dataset_builder(*CV_EU_ARGS)
CV_EU = datasets.load_dataset(*CV_EU_ARGS)
CV_ES_ARGS = ['mozilla-foundation/common_voice_11_0','es']
print('Loading',*CV_ES_ARGS)
CV_ES_INFO = datasets.load_dataset_builder(*CV_ES_ARGS)
CV_ES = datasets.load_dataset(*CV_ES_ARGS)
assert CV_EU_INFO.info.features == CV_ES_INFO.info.features
def _info(self):
features = self.CV_EU_INFO.info.features.copy()
features['simplified_sentence'] = datasets.Value('string')
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
split_generators = []
for name in self.CV_EU_INFO.info.splits.keys():
split_generators.append(
datasets.SplitGenerator(
name=name ,
gen_kwargs={"split":name}
)
)
return split_generators
_TRANTAB = str.maketrans('áéíóúÁÉÍÓÚüÜv', 'aeiouaeiouuub')
_ALPHABET_PATTERN = re.compile('[^abcdefghijklmnñopqrstuvwxyz ]+')
def _simplyfy(self,txt):
txt = txt.lower()
txt = txt.translate(self._TRANTAB)
txt = txt.replace('ch','X').replace('h','').replace('X','ch')
txt = self._ALPHABET_PATTERN.sub(' ',txt)
return ' '.join(txt.split())
def _generate_examples(self, split):
index = ([0] * len(self.CV_EU[split])) + ([1] * len(self.CV_ES[split]))
shuffle(index)
it = ( iter(self.CV_EU[split]) , iter(self.CV_ES[split]) )
#print('\n************************ ELIMINAR [:2000] ***************')
#for key,lang in enumerate(index[:2000]) :
for key,lang in enumerate(index) :
feature = next(it[lang])
feature['simplified_sentence'] = self._simplyfy(feature['sentence'])
yield key,feature