File size: 3,766 Bytes
d2818c9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d2b586f
 
 
d2818c9
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
# coding=utf-8
# Copyright 2023 Mikel Penagarikano
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Lint as: python3
"""Albayzin automatic speech recognition  dataset.
"""

import os
from pathlib import Path

import datasets
from datasets.tasks import AutomaticSpeechRecognition
from datasets.utils import logging
from random import shuffle
import re

_CITATION = """\
"""

_DESCRIPTION = """\
"""

_HOMEPAGE = ""


class CommonVoiceEUESConfig(datasets.BuilderConfig):
    """BuilderConfig for Common Voice Mixed."""

    def __init__(self, **kwargs):
        """
        Args:
          data_dir: `string`, the path to the folder containing the files in the
            downloaded .tar
          citation: `string`, citation for the data set
          url: `string`, url for information about the data set
          **kwargs: keyword arguments forwarded to super.
        """
        super(CommonVoiceEUESConfig, self).__init__(version=datasets.Version("11.0.0", ""), **kwargs)


class CommonVoiceEUES(datasets.GeneratorBasedBuilder):
    """Common Voice Mixed dataset."""

    BUILDER_CONFIGS = [CommonVoiceEUESConfig(name="eues", description="eu+es joint configuration.")]

    CV_EU_ARGS = ['mozilla-foundation/common_voice_11_0','eu']
    print('Loading',*CV_EU_ARGS)
    CV_EU_INFO = datasets.load_dataset_builder(*CV_EU_ARGS)
    CV_EU = datasets.load_dataset(*CV_EU_ARGS)
    CV_ES_ARGS = ['mozilla-foundation/common_voice_11_0','es']
    print('Loading',*CV_ES_ARGS)
    CV_ES_INFO = datasets.load_dataset_builder(*CV_ES_ARGS)
    CV_ES = datasets.load_dataset(*CV_ES_ARGS)
    assert CV_EU_INFO.info.features == CV_ES_INFO.info.features

    def _info(self):
        features = self.CV_EU_INFO.info.features.copy()
        features['simplified_sentence'] = datasets.Value('string')
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            homepage=_HOMEPAGE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        split_generators = []
        for name in self.CV_EU_INFO.info.splits.keys():
            split_generators.append(
                datasets.SplitGenerator(
                    name=name ,
                    gen_kwargs={"split":name}
                )
            )

        return split_generators
    
    _TRANTAB = str.maketrans('áéíóúÁÉÍÓÚüÜv', 'aeiouaeiouuub')
    _ALPHABET_PATTERN = re.compile('[^abcdefghijklmnñopqrstuvwxyz ]+')
    def _simplyfy(self,txt):
        txt = txt.lower()
        txt = txt.translate(self._TRANTAB)
        txt = txt.replace('ch','X').replace('h','').replace('X','ch')
        txt = self._ALPHABET_PATTERN.sub(' ',txt)
        return ' '.join(txt.split())

    def _generate_examples(self, split):
        index = ([0] * len(self.CV_EU[split])) + ([1] * len(self.CV_ES[split]))
        shuffle(index)
        it = ( iter(self.CV_EU[split]) , iter(self.CV_ES[split]) )
        #print('\n************************  ELIMINAR [:2000] ***************')
        #for key,lang in enumerate(index[:2000]) :
        for key,lang in enumerate(index) :
            feature = next(it[lang])
            feature['simplified_sentence'] = self._simplyfy(feature['sentence'])
            yield key,feature