holylovenia commited on
Commit
bd27a09
1 Parent(s): 55a4d91

Upload indspeech_teldialog_svcsr.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. indspeech_teldialog_svcsr.py +216 -0
indspeech_teldialog_svcsr.py ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+ from pathlib import Path
18
+ from typing import Dict, List, Tuple
19
+
20
+ import datasets
21
+
22
+ from nusacrowd.utils import schemas
23
+ from nusacrowd.utils.configs import NusantaraConfig
24
+ from nusacrowd.utils.constants import Tasks
25
+
26
+
27
+ _CITATION = """\
28
+ @inproceedings{sakti-icslp-2004,
29
+ title = "Indonesian Speech Recognition for Hearing and Speaking Impaired People",
30
+ author = "Sakti, Sakriani and Hutagaol, Paulus and Arman, Arry Akhmad and Nakamura, Satoshi",
31
+ booktitle = "Proc. International Conference on Spoken Language Processing (INTERSPEECH - ICSLP)",
32
+ year = "2004",
33
+ pages = "1037--1040"
34
+ address = "Jeju Island, Korea"
35
+ }
36
+ """
37
+
38
+ _DATASETNAME = "indspeech_teldialog_svcsr"
39
+
40
+ _DESCRIPTION = """\
41
+ This is the first Indonesian speech dataset for small vocabulary continuous speech recognition (SVCSR).
42
+ The data was developed by TELKOMRisTI (R&D Division, PT Telekomunikasi Indonesia) in collaboration with Advanced
43
+ Telecommunication Research Institute International (ATR) Japan and Bandung Institute of Technology (ITB) under the
44
+ Asia-Pacific Telecommunity (APT) project in 2004 [Sakti et al., 2004]. Although it was originally developed for
45
+ a telecommunication system for hearing and speaking impaired people, it can be used for other applications,
46
+ i.e., automatic call centers. Furthermore, as all speakers utter the same sentences,
47
+ it can also be used for voice conversion tasks.
48
+
49
+ The text is based on a word vocabulary which is derived from some necessary dialog calls,
50
+ such as dialog calls with the 119 emergency department, 108 telephone information department,
51
+ and ticket reservation department. In total, it consists of 20,000 utterances (about 18 hours of speech) from the
52
+ 70-word dialog vocabulary of 100 sentences (including single word sentences) each uttered by 200 speakers
53
+ (100 Females, 100 Males). The age is limited to middle age (20-40 years), but they present a wide range of spoken
54
+ dialects from different ethnic groups. The recording is conducted in parallel for both clean and telephone speech,
55
+ but we open only the clean speech due to quality issues on telephone speech.
56
+ Each audio file is a single-channel 16-bit PCM WAV with a sample rate of 16000 Hz.
57
+ These utterances are equally split into training and test sets with 100 speakers (50 Females, 50 Males) in each set.
58
+ """
59
+
60
+ _HOMEPAGE = "https://github.com/s-sakti/data_indsp_teldialog_svcsr/"
61
+
62
+ _LICENSE = "CC-BY-NC-SA-4.0"
63
+
64
+ _LANGUAGES = ["ind"]
65
+ _LOCAL = False
66
+
67
+ URL_TEMPLATE = "https://raw.githubusercontent.com/s-sakti/data_indsp_teldialog_svcsr/main/"
68
+ _URLS = {
69
+ _DATASETNAME: {"lst": URL_TEMPLATE + "lst/", "speech": URL_TEMPLATE + "speech/", "text": URL_TEMPLATE + "text/"},
70
+ }
71
+
72
+ _SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION] # example: [Tasks.TRANSLATION, Tasks.NAMED_ENTITY_RECOGNITION, Tasks.RELATION_EXTRACTION]
73
+
74
+ _SOURCE_VERSION = "1.0.0"
75
+
76
+ _NUSANTARA_VERSION = "1.0.0"
77
+
78
+
79
+ class INDspeechTELDIALOGSVCSR(datasets.GeneratorBasedBuilder):
80
+ """
81
+ This is an Indonesian speech dataset on small vocabulary continuous speech recognition (SVCSR) from necessary
82
+ dialog calls. The dataset loader is designed for speech recognition task.
83
+ There are 20000 utterances (train: 10000, test:10000) uttered by 200 speakers (50 male 50 female each in train and
84
+ test).
85
+ """
86
+
87
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
88
+ NUSANTARA_VERSION = datasets.Version(_NUSANTARA_VERSION)
89
+
90
+ BUILDER_CONFIGS = [
91
+ NusantaraConfig(
92
+ name="indspeech_teldialog_svcsr_source",
93
+ version=SOURCE_VERSION,
94
+ description="indspeech_teldialog_svcsr source schema",
95
+ schema="source",
96
+ subset_id="indspeech_teldialog_svcsr",
97
+ ),
98
+ NusantaraConfig(
99
+ name="indspeech_teldialog_svcsr_nusantara_sptext",
100
+ version=NUSANTARA_VERSION,
101
+ description="indspeech_teldialog_svcsr Nusantara schema",
102
+ schema="nusantara_sptext",
103
+ subset_id="indspeech_teldialog_svcsr",
104
+ ),
105
+ ]
106
+
107
+ DEFAULT_CONFIG_NAME = "indspeech_teldialog_svcsr_source"
108
+
109
+ def _info(self) -> datasets.DatasetInfo:
110
+
111
+ # Create the source schema; this schema will keep all keys/information/labels as close to the original dataset as possible.
112
+
113
+ # You can arbitrarily nest lists and dictionaries.
114
+ # For iterables, use lists over tuples or `datasets.Sequence`
115
+
116
+ if self.config.schema == "source":
117
+ features = datasets.Features(
118
+ {
119
+ "speaker_id": datasets.Value("string"),
120
+ "gender_id": datasets.Value("string"),
121
+ "utterance_id": datasets.Value("string"),
122
+ "audio": datasets.Audio(sampling_rate=16_000),
123
+ "text": datasets.Value("string"),
124
+ }
125
+ )
126
+ elif self.config.schema == "nusantara_sptext":
127
+ features = schemas.speech_text_features
128
+
129
+ return datasets.DatasetInfo(
130
+ description=_DESCRIPTION,
131
+ features=features,
132
+ homepage=_HOMEPAGE,
133
+ license=_LICENSE,
134
+ citation=_CITATION,
135
+ task_templates=[datasets.AutomaticSpeechRecognition(audio_column="audio", transcription_column="sentences")],
136
+ )
137
+
138
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
139
+ urls = _URLS[_DATASETNAME]
140
+ data_dir = {
141
+ "spk_data": {"train": dl_manager.download_and_extract(os.path.join(urls["lst"], "train_spk.lst")), "test": dl_manager.download_and_extract(os.path.join(urls["lst"], "test_spk.lst"))},
142
+ "wav_data": {"train": dl_manager.download_and_extract(os.path.join(urls["lst"], "train_wav.lst")), "test": dl_manager.download_and_extract(os.path.join(urls["lst"], "test_wav.lst"))},
143
+ "txt_data": dl_manager.download_and_extract(os.path.join(urls["text"], "text.zip")),
144
+ }
145
+ speakers = {}
146
+ with open(data_dir["spk_data"]["train"], "r") as f:
147
+ speakers["train"] = [sp.replace("\n", "") for sp in f.readlines()]
148
+ f.close()
149
+ with open(data_dir["spk_data"]["test"], "r") as f:
150
+ speakers["test"] = [sp.replace("\n", "") for sp in f.readlines()]
151
+ f.close()
152
+ data_dir["speech_path"] = {
153
+ "train": {sp: dl_manager.download_and_extract(os.path.join(urls["speech"], "train", sp + ".zip")) for sp in speakers["train"]},
154
+ "test": {sp: dl_manager.download_and_extract(os.path.join(urls["speech"], "test", sp + ".zip")) for sp in speakers["test"]},
155
+ }
156
+
157
+ return [
158
+ datasets.SplitGenerator(
159
+ name=datasets.Split.TRAIN,
160
+ # Whatever you put in gen_kwargs will be passed to _generate_examples
161
+ gen_kwargs={
162
+ "filepath": data_dir["wav_data"]["train"],
163
+ "audio_path": data_dir["speech_path"]["train"],
164
+ "text_path": data_dir["txt_data"],
165
+ "split": "train",
166
+ },
167
+ ),
168
+ datasets.SplitGenerator(
169
+ name=datasets.Split.TEST,
170
+ gen_kwargs={
171
+ "filepath": data_dir["wav_data"]["test"],
172
+ "audio_path": data_dir["speech_path"]["test"],
173
+ "text_path": data_dir["txt_data"],
174
+ "split": "test",
175
+ },
176
+ ),
177
+ ]
178
+
179
+ @staticmethod
180
+ def text_process(utterance_txt_dir):
181
+ with open(utterance_txt_dir + ".ANS", "r") as f:
182
+ lines = [x.replace("\n", "") for x in f.readlines()]
183
+ f.close()
184
+ return " ".join(lines)
185
+
186
+ def _generate_examples(self, filepath: Path, audio_path, text_path: Path, split: str) -> Tuple[int, Dict]:
187
+ with open(filepath, "r") as f:
188
+ filelist = [x.replace("\n", "") for x in f.readlines()]
189
+ f.close()
190
+ for fn in filelist:
191
+ speaker_id = fn[:3]
192
+ gender_id = fn[:1]
193
+ utterance_id = fn[4:8]
194
+ _id = fn.replace(".wav", "")
195
+ text = self.text_process(os.path.join(text_path, utterance_id))
196
+ if self.config.schema == "source":
197
+ yield _id, {
198
+ "speaker_id": speaker_id,
199
+ "gender_id": gender_id,
200
+ "utterance_id": utterance_id,
201
+ "audio": os.path.join(audio_path[speaker_id], fn),
202
+ "text": text,
203
+ }
204
+
205
+ elif self.config.schema == "nusantara_sptext":
206
+ yield _id, {
207
+ "id": _id,
208
+ "speaker_id": speaker_id,
209
+ "text": text,
210
+ "path": os.path.join(audio_path[speaker_id], fn),
211
+ "audio": os.path.join(audio_path[speaker_id], fn),
212
+ "metadata": {
213
+ "speaker_age": None,
214
+ "speaker_gender": gender_id,
215
+ },
216
+ }