holylovenia commited on
Commit
841d13e
1 Parent(s): 93a1a19

Upload indspeech_digit_cdsr.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. indspeech_digit_cdsr.py +232 -0
indspeech_digit_cdsr.py ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+ from cgitb import text
18
+ from itertools import chain
19
+ from pathlib import Path
20
+ from typing import Dict, List, Tuple
21
+
22
+ import datasets
23
+
24
+ from nusacrowd.utils import schemas
25
+ from nusacrowd.utils.configs import NusantaraConfig
26
+ from nusacrowd.utils.constants import Tasks
27
+
28
+ _CITATION = """\
29
+ @inproceedings{sakti-icslp-2004,
30
+ title = "Indonesian Speech Recognition for Hearing and Speaking Impaired People",
31
+ author = "Sakti, Sakriani and Hutagaol, Paulus and Arman, Arry Akhmad and Nakamura, Satoshi",
32
+ booktitle = "Proc. International Conference on Spoken Language Processing (INTERSPEECH - ICSLP)",
33
+ year = "2004",
34
+ pages = "1037--1040"
35
+ address = "Jeju Island, Korea"
36
+ }
37
+ """
38
+ _DATASETNAME = "indspeech_digit_cdsr"
39
+ _LANGUAGES = ["ind"]
40
+ _DESCRIPTION = """\
41
+ INDspeech_DIGIT_CDSR is the first Indonesian speech dataset for connected digit speech recognition (CDSR). The data was developed by TELKOMRisTI (R&D Division, PT Telekomunikasi Indonesia) in collaboration with Advanced Telecommunication Research Institute International (ATR) Japan and Bandung Institute of Technology (ITB) under the Asia-Pacific Telecommunity (APT) project in 2004 [Sakti et al., 2004]. Although it was originally developed for a telecommunication system for hearing and speaking impaired people, it can be used for other applications, i.e., automatic call centers that recognize telephone numbers.
42
+ """
43
+
44
+ _HOMEPAGE = "https://github.com/s-sakti/data_indsp_digit_cdsr"
45
+ _LOCAL = False
46
+ _LICENSE = "CC-BY-NC-SA-4.0"
47
+
48
+ _TMP_URL = {
49
+ "lst": "https://raw.githubusercontent.com/s-sakti/data_indsp_digit_cdsr/main/lst/",
50
+ "text": "https://github.com/s-sakti/data_indsp_digit_cdsr/raw/main/text/",
51
+ "speech": "https://github.com/s-sakti/data_indsp_digit_cdsr/raw/main/speech/",
52
+ }
53
+
54
+ _URLS = {
55
+ "lst": {
56
+ "train_spk": _TMP_URL["lst"] + "train_spk.lst",
57
+ "train_fname": _TMP_URL["lst"] + "train_fname.lst",
58
+ "test_spk": [_TMP_URL["lst"] + "test" + str(i) + "_spk.lst" for i in range(1, 5)],
59
+ "test_fname": [_TMP_URL["lst"] + "test" + str(i) + "_fname.lst" for i in range(1, 5)],
60
+ },
61
+ "train": {"speech": _TMP_URL["speech"] + "train/", "text": _TMP_URL["text"] + "train/"},
62
+ "test": {"speech": _TMP_URL["speech"] + "test", "text": _TMP_URL["text"] + "test"},
63
+ }
64
+
65
+ _SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION]
66
+ _SOURCE_VERSION = "1.0.0"
67
+ _NUSANTARA_VERSION = "1.0.0"
68
+
69
+
70
+ class INDspeechDIGITCDSR(datasets.GeneratorBasedBuilder):
71
+ """Indonesian speech dataset for connected digit speech recognition"""
72
+
73
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
74
+ NUSANTARA_VERSION = datasets.Version(_NUSANTARA_VERSION)
75
+
76
+ BUILDER_CONFIGS = [
77
+ NusantaraConfig(
78
+ name="indspeech_digit_cdsr_source",
79
+ version=SOURCE_VERSION,
80
+ description="indspeech_digit_cdsr source schema",
81
+ schema="source",
82
+ subset_id="indspeech_digit_cdsr",
83
+ ),
84
+ NusantaraConfig(
85
+ name="indspeech_digit_cdsr_nusantara_sptext",
86
+ version=NUSANTARA_VERSION,
87
+ description="indspeech_digit_cdsr Nusantara schema",
88
+ schema="nusantara_sptext",
89
+ subset_id="indspeech_digit_cdsr",
90
+ ),
91
+ ]
92
+
93
+ DEFAULT_CONFIG_NAME = "indspeech_digit_cdsr_source"
94
+
95
+ def _info(self) -> datasets.DatasetInfo:
96
+
97
+ if self.config.schema == "source":
98
+ features = datasets.Features(
99
+ {
100
+ "id": datasets.Value("string"),
101
+ "speaker_id": datasets.Value("string"),
102
+ "gender": datasets.Value("string"),
103
+ "path": datasets.Value("string"),
104
+ "audio": datasets.Audio(sampling_rate=16_000),
105
+ "text": datasets.Value("string"),
106
+ }
107
+ )
108
+ elif self.config.schema == "nusantara_sptext":
109
+ features = schemas.speech_text_features
110
+
111
+ return datasets.DatasetInfo(
112
+ description=_DESCRIPTION,
113
+ features=features,
114
+ homepage=_HOMEPAGE,
115
+ license=_LICENSE,
116
+ citation=_CITATION,
117
+ )
118
+
119
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
120
+ """Returns SplitGenerators."""
121
+ lst_train_spk = Path(dl_manager.download_and_extract(_URLS["lst"]["train_spk"]))
122
+ lst_train_fname = Path(dl_manager.download_and_extract(_URLS["lst"]["train_fname"]))
123
+ lst_test_spk = [Path(dl_manager.download_and_extract(url)) for url in _URLS["lst"]["test_spk"]]
124
+ lst_test_fname = [Path(dl_manager.download_and_extract(url)) for url in _URLS["lst"]["test_fname"]]
125
+
126
+ fnames = {"test": []}
127
+ speech = {"test": {}}
128
+ text = {"test": {}}
129
+
130
+ with open(lst_train_spk, "r") as f:
131
+ speakers = [spk.replace("\n", "") for spk in f.readlines()]
132
+ tmp_speech = [Path(dl_manager.download_and_extract(_URLS["train"]["speech"] + spk + ".zip")) for spk in speakers]
133
+ tmp_text = [Path(dl_manager.download_and_extract(_URLS["train"]["text"] + spk + ".zip")) for spk in speakers]
134
+ speech["train"] = {speech[:-4]: os.path.join(spk, speech) for spk in tmp_speech for speech in os.listdir(spk)}
135
+ text["train"] = {text[:-4]: os.path.join(spk, text) for spk in tmp_text for text in os.listdir(spk)}
136
+ f.close()
137
+
138
+ with open(lst_train_fname, "r") as f:
139
+ fnames["train"] = [fname.replace("\n", "") for fname in f.readlines()]
140
+ f.close()
141
+
142
+ for i in range(1, 5):
143
+ with open(lst_test_fname[i - 1], "r") as f:
144
+ fnames["test"].append([spk.replace("\n", "") for spk in f.readlines()])
145
+ f.close()
146
+
147
+ with open(lst_test_spk[i - 1], "r") as f:
148
+ speakers = [spk.replace("\n", "") for spk in f.readlines()]
149
+ tmp_speech = [Path(dl_manager.download_and_extract(_URLS["test"]["speech"] + str(i) + "/" + spk + ".zip")) for spk in speakers]
150
+ tmp_text = [Path(dl_manager.download_and_extract(_URLS["test"]["text"] + str(i) + "/" + spk + ".zip")) for spk in speakers]
151
+ tmp_dict_speech = {speech[:-4]: os.path.join(spk, speech) for spk in tmp_speech for speech in os.listdir(spk)}
152
+ tmp_dict_text = {text[:-4]: os.path.join(spk, text) for spk in tmp_text for text in os.listdir(spk)}
153
+ f.close()
154
+
155
+ for k, v in tmp_dict_speech.items():
156
+ if k in speech["test"]:
157
+ continue
158
+ else:
159
+ speech["test"][k] = v
160
+
161
+ for k, v in tmp_dict_text.items():
162
+ if k in text["test"]:
163
+ continue
164
+ else:
165
+ text["test"][k] = v
166
+
167
+ fnames["test"] = list(chain(*fnames["test"]))
168
+
169
+ return [
170
+ datasets.SplitGenerator(
171
+ name=datasets.Split.TRAIN,
172
+ gen_kwargs={
173
+ "filepath": {
174
+ "fnames": fnames["train"],
175
+ "speech": speech["train"],
176
+ "text": text["train"],
177
+ },
178
+ "split": "train",
179
+ },
180
+ ),
181
+ datasets.SplitGenerator(
182
+ name=datasets.Split.TEST,
183
+ gen_kwargs={
184
+ "filepath": {
185
+ "fnames": fnames["test"],
186
+ "speech": speech["test"],
187
+ "text": text["test"],
188
+ },
189
+ "split": "test",
190
+ },
191
+ ),
192
+ ]
193
+
194
+ @staticmethod
195
+ def text_process(utterance_path):
196
+ with open(utterance_path, "r") as f:
197
+ w = [r.replace("\n", "") for r in f.readlines()]
198
+ f.close()
199
+ return " ".join(w[1:-1])
200
+
201
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
202
+ """Yields examples as (key, example) tuples."""
203
+
204
+ had_used = []
205
+ for key, example in enumerate(filepath["fnames"]):
206
+ if example not in had_used:
207
+ had_used.append(example)
208
+ spk_id, _ = example.split("_")
209
+ if self.config.schema == "source":
210
+ yield key, {
211
+ "id": example,
212
+ "speaker_id": spk_id,
213
+ "gender": spk_id[0],
214
+ "path": filepath["speech"][example],
215
+ "audio": filepath["speech"][example],
216
+ "text": self.text_process(filepath["text"][example]),
217
+ }
218
+
219
+ elif self.config.schema == "nusantara_sptext":
220
+ yield key, {
221
+ "id": example,
222
+ "speaker_id": spk_id,
223
+ "text": self.text_process(filepath["text"][example]),
224
+ "path": filepath["speech"][example],
225
+ "audio": filepath["speech"][example],
226
+ "metadata": {
227
+ "speaker_age": None,
228
+ "speaker_gender": spk_id[0],
229
+ },
230
+ }
231
+ else:
232
+ continue