File size: 5,770 Bytes
76c7ab9
c8b97e5
fae35ee
5323116
76c7ab9
 
 
 
 
fae35ee
 
 
 
76c7ab9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c8b97e5
76c7ab9
 
 
 
 
 
 
 
 
 
 
 
 
 
c8b97e5
76c7ab9
 
 
 
 
 
 
c8b97e5
76c7ab9
 
 
 
 
 
 
 
 
 
c8b97e5
76c7ab9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fae35ee
 
 
 
 
 
 
 
5323116
 
fae35ee
76c7ab9
 
 
 
 
5323116
76c7ab9
 
 
 
 
 
5323116
76c7ab9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
import os
import string
import shutil
import wget

import datasets
from datasets.tasks import AutomaticSpeechRecognition


_DATA_CLIPS_URL = "https://www.openslr.org/resources/52/asr_sinhala_{}.zip"

_TRAIN_DATA_URL = "https://raw.githubusercontent.com/keshan/sinhala-asr/main/train.tsv"
_TEST_DATA_URL = "https://raw.githubusercontent.com/keshan/sinhala-asr/main/test.tsv"

_CITATION = """\
 @inproceedings{kjartansson-etal-sltu2018,
    title = {{Crowd-Sourced Speech Corpora for Javanese, Sundanese,  Sinhala, Nepali, and Bangladeshi Bengali}},
    author = {Oddur Kjartansson and Supheakmungkol Sarin and Knot Pipatsrisawat and Martin Jansche and Linne Ha},
    booktitle = {Proc. The 6th Intl. Workshop on Spoken Language Technologies for Under-Resourced Languages (SLTU)},
    year  = {2018},
    address = {Gurugram, India},
    month = aug,
    pages = {52--55},
    URL   = {http://dx.doi.org/10.21437/SLTU.2018-11}
  }
"""

_DESCRIPTION = """\
This data set contains ~185K transcribed audio data for Sinhala. The data set consists of wave files, and a TSV file. The file utt_spk_text.tsv contains a FileID, anonymized UserID and the transcription of audio in the file.
The data set has been manually quality checked, but there might still be errors.

See LICENSE.txt file for license information.

Copyright 2016, 2017, 2018 Google, Inc.
"""

_HOMEPAGE = "https://www.openslr.org/52/"

_LICENSE = "https://www.openslr.org/resources/52/LICENSE"

_LANGUAGES = {
    "si": {
        "Language": "Sinhala",
        "Date": "2018",
    },
}


class LargeASRConfig(datasets.BuilderConfig):
    """BuilderConfig for LargeASR."""

    def __init__(self, name, **kwargs):
        """
        Args:
          data_dir: `string`, the path to the folder containing the files in the
            downloaded .tar
          citation: `string`, citation for the data set
          url: `string`, url for information about the data set
          **kwargs: keyword arguments forwarded to super.
        """
        self.language = kwargs.pop("language", None)
        self.date_of_snapshot = kwargs.pop("date", None)
        description = f"Large Sinhala dataset in {self.language} of {self.date_of_snapshot}."
        super(LargeASRConfig, self).__init__(
            name=name, version=datasets.Version("1.0.0", ""), description=description, **kwargs
        )


class LargeASR(datasets.GeneratorBasedBuilder):

    BUILDER_CONFIGS = [
        LargeASRConfig(
            name=lang_id,
            language=_LANGUAGES[lang_id]["Language"],
            date=_LANGUAGES[lang_id]["Date"],
        )
        for lang_id in _LANGUAGES.keys()
    ]

    def _info(self):
        features = datasets.Features(
            {
                "filename": datasets.Value("string"),
                "x": datasets.Value("string"),
                "sentence": datasets.Value("string"),
                "file": datasets.Value("string"),
            }
        )

        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            supervised_keys=None,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
            task_templates=[
                AutomaticSpeechRecognition(audio_file_path_column="file", transcription_column="sentence")
            ],
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        data_file_urls = [_DATA_CLIPS_URL.format(i) for i in (string.digits + string.ascii_lowercase[:6])]
        dl_paths = dl_manager.download_and_extract(data_file_urls)
        
        # Moving all the downloaded audio clips to one parent folder.
        dirname = os.path.dirname
        for path in dl_paths:
            shutil.copytree(path, dirname(path), dirs_exist_ok=True)
              
        abs_path_to_train_data = os.path.abspath(wget.download(_TRAIN_DATA_URL)) # dl_manager.download_and_extract(_TRAIN_DATA_URL)
        abs_path_to_test_data = os.path.abspath(wget.download(_TEST_DATA_URL)) # dl_manager.download_and_extract(_TEST_DATA_URL)
        abs_path_to_clips = os.path.dirname(dl_paths[0])

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "filepath": abs_path_to_train_data,
                    "path_to_clips": abs_path_to_clips,
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={
                    "filepath": abs_path_to_test_data,
                    "path_to_clips": abs_path_to_clips,
                },
            ),
        ]

    def _generate_examples(self, filepath, path_to_clips):
        """Yields examples."""
        data_fields = list(self._info().features.keys())
        path_idx = data_fields.index("file")

        with open(filepath, encoding="utf-8") as f:
            lines = f.readlines()
            headline = lines[0]

            column_names = headline.strip().split("\t")
            assert (
                column_names == data_fields
            ), f"The file should have {data_fields} as column names, but has {column_names}"

            for id_, line in enumerate(lines[1:]):
                field_values = line.strip().split("\t")
                
                # set absolute path for wav audio file
                field_values[path_idx] = os.path.join(path_to_clips, field_values[path_idx])

                # if data is incomplete, fill with empty values
                if len(field_values) < len(data_fields):
                    field_values += (len(data_fields) - len(field_values)) * ["''"]

                yield id_, {key: value for key, value in zip(data_fields, field_values)}