holylovenia commited on
Commit
3472b5c
1 Parent(s): d144e1b

Upload indspeech_teldialog_lvcsr.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. indspeech_teldialog_lvcsr.py +226 -0
indspeech_teldialog_lvcsr.py ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import Dict, List, Tuple
3
+
4
+ import datasets
5
+ import json
6
+ import os
7
+
8
+ from seacrowd.utils import schemas
9
+ from seacrowd.utils.configs import SEACrowdConfig
10
+ from seacrowd.utils.constants import Tasks
11
+ from zipfile import ZipFile
12
+
13
+ _CITATION = """\
14
+ @inproceedings{sakti-tcast-2008,
15
+ title = "Development of {I}ndonesian Large Vocabulary Continuous Speech Recognition System within {A-STAR} Project",
16
+ author = "Sakti, Sakriani and Kelana, Eka and Riza, Hammam and Sakai, Shinsuke and Markov, Konstantin and Nakamura, Satoshi",
17
+ booktitle = "Proc. IJCNLP Workshop on Technologies and Corpora for Asia-Pacific Speech Translation (TCAST)",
18
+ year = "2008",
19
+ pages = "19--24"
20
+ address = "Hyderabad, India"
21
+ }
22
+
23
+
24
+ @inproceedings{sakti-icslp-2004,
25
+ title = "Indonesian Speech Recognition for Hearing and Speaking Impaired People",
26
+ author = "Sakti, Sakriani and Hutagaol, Paulus and Arman, Arry Akhmad and Nakamura, Satoshi",
27
+ booktitle = "Proc. International Conference on Spoken Language Processing (INTERSPEECH - ICSLP)",
28
+ year = "2004",
29
+ pages = "1037--1040"
30
+ address = "Jeju Island, Korea"
31
+ }
32
+
33
+ @article{sakti-s2st-csl-2013,
34
+ title = "{A-STAR}: Toward Tranlating Asian Spoken Languages",
35
+ author = "Sakti, Sakriani and Paul, Michael and Finch, Andrew and Sakai, Shinsuke and Thang, Tat Vu, and Kimura, Noriyuki
36
+ and Hori, Chiori and Sumita, Eiichiro and Nakamura, Satoshi and Park, Jun and Wutiwiwatchai, Chai and Xu, Bo and Riza, Hammam
37
+ and Arora, Karunesh and Luong, Chi Mai and Li, Haizhou",
38
+ journal = "Special issue on Speech-to-Speech Translation, Computer Speech and Language Journal",
39
+ volume = "27",
40
+ number ="2",
41
+ pages = "509--527",
42
+ year = "2013",
43
+ publisher = "Elsevier"
44
+ }
45
+ """
46
+
47
+ _LOCAL = False
48
+ _LANGUAGES = ["ind"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
49
+ _DATASETNAME = "indspeech_teldialog_lvcsr"
50
+
51
+ _DESCRIPTION = """
52
+ INDspeech_TELDIALOG_LVCSR is one of the first Indonesian speech datasets for large vocabulary continuous speech recognition (LVCSR) based on telephon application. R&D Division of PT Telekomunikasi Indonesia developed the data in 2005-2006, in collaboration with Advanced Telecommunication Research Institute International (ATR) Japan, as the continuation of the Asia-Pacific Telecommunity (APT) project [Sakti et al., 2004]. It has also been successfully used for developing Indonesian LVCSR in the Asian speech translation advanced research (A-STAR) project [Sakti et al., 2013].
53
+ """
54
+
55
+ _HOMEPAGE = "https://github.com/s-sakti/data_indsp_teldialog_lvcsr"
56
+
57
+ _LICENSE = "CC-BY-NC-SA 4.0"
58
+
59
+
60
+ URL_TEMPLATE = {
61
+ "lst": "https://raw.githubusercontent.com/s-sakti/data_indsp_teldialog_lvcsr/main/lst/", # transcript.lst
62
+ "speech": "https://github.com/s-sakti/data_indsp_teldialog_lvcsr/raw/main/speech/", # Ind3/Ind304.zip~Ind400.zip
63
+ "text": "https://github.com/s-sakti/data_indsp_teldialog_lvcsr/raw/main/text/", # all_transcript.zip
64
+ }
65
+
66
+ _URLS = {
67
+ "lst_spk_Ind": [URL_TEMPLATE["lst"] + "spk_Ind" + str(n) + ".lst" for n in range(0, 4)],
68
+ "lst_spk_all": URL_TEMPLATE["lst"] + "spk_all.lst",
69
+ "lst_spk_test": URL_TEMPLATE["lst"] + "spk_test.lst",
70
+ "lst_spk_train": URL_TEMPLATE["lst"] + "spk_train.lst",
71
+ "lst_transcript": URL_TEMPLATE["lst"] + "transcript.lst",
72
+ "speech_Ind": [URL_TEMPLATE["speech"] + "Ind" + str(n) + "/Ind" + str(p).zfill(3) + ".zip" for n in range(0, 4) for p in range(n * 100 + 1, n * 100 + 101)],
73
+ "transcript_all": URL_TEMPLATE["text"] + "all_transcript.zip",
74
+ "transcript_spk": URL_TEMPLATE["text"] + "spk_transcript.zip",
75
+ }
76
+
77
+ _SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION]
78
+ _SOURCE_VERSION = "1.0.0"
79
+ _SEACROWD_VERSION = "2024.06.20"
80
+
81
+
82
+ class IndSpeechTelDialLVCSR(datasets.GeneratorBasedBuilder):
83
+ """INDspeech_TELDIALOG_LVCSR is one of the first Indonesian speech datasets for large vocabulary continuous speech recognition (LVCSR) based on telephon application. R&D Division of PT Telekomunikasi Indonesia developed the data in 2005-2006, in collaboration with Advanced Telecommunication Research Institute International (ATR) Japan, as the continuation of the Asia-Pacific Telecommunity (APT) project [Sakti et al., 2004]. It has also been successfully used for developing Indonesian LVCSR in the Asian speech translation advanced research (A-STAR) project [Sakti et al., 2013]."""
84
+
85
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
86
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
87
+
88
+ BUILDER_CONFIGS = [
89
+ SEACrowdConfig(
90
+ name=f"indspeech_teldialog_lvcsr_source",
91
+ version=_SOURCE_VERSION,
92
+ description="indspeech_teldialog_lvcsr source schema",
93
+ schema="source",
94
+ subset_id=f"indspeech_teldialog_lvcsr"
95
+ ),
96
+ SEACrowdConfig(
97
+ name=f"indspeech_teldialog_lvcsr_seacrowd_sptext",
98
+ version=_SOURCE_VERSION,
99
+ description="indspeech_teldialog_lvcsr Nusantara schema",
100
+ schema="seacrowd_sptext",
101
+ subset_id=f"indspeech_teldialog_lvcsr"
102
+ ),]
103
+
104
+ DEFAULT_CONFIG_NAME = "indspeech_teldialog_lvcsr_source"
105
+
106
+ def _info(self) -> datasets.DatasetInfo:
107
+
108
+ if self.config.schema == "source":
109
+
110
+ features = datasets.Features(
111
+ {
112
+ "id": datasets.Value("string"),
113
+ "speaker_id": datasets.Value("string"),
114
+ "path": datasets.Value("string"),
115
+ "audio": datasets.Audio(sampling_rate=16_000),
116
+ "text": datasets.Value("string"),
117
+ }
118
+ )
119
+
120
+ elif self.config.schema == "seacrowd_sptext":
121
+ features = schemas.speech_text_features
122
+
123
+ return datasets.DatasetInfo(
124
+ description=_DESCRIPTION,
125
+ features=features,
126
+ homepage=_HOMEPAGE,
127
+ license=_LICENSE,
128
+ citation=_CITATION,
129
+ task_templates=[datasets.AutomaticSpeechRecognition(audio_column="audio", transcription_column="text")],
130
+ )
131
+
132
+
133
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
134
+
135
+ audio_files_dir = []
136
+ for aud_url in _URLS["speech_Ind"]:
137
+ onespeaker_folder = dl_manager.download_and_extract(aud_url)
138
+ audio_files_dir.append(Path(os.path.join(onespeaker_folder, aud_url.split("/")[-1][:-4])))
139
+
140
+ text_path = Path(dl_manager.download_and_extract(_URLS["lst_transcript"]))
141
+ speak_list = Path(dl_manager.download_and_extract(_URLS["lst_spk_all"]))
142
+ train_list = Path(dl_manager.download_and_extract(_URLS["lst_spk_train"]))
143
+ test_list = Path(dl_manager.download_and_extract(_URLS["lst_spk_test"]))
144
+
145
+
146
+
147
+ speaker_num2id = {}
148
+ with open(speak_list) as f:
149
+ for l in f.readlines():
150
+ l = l.strip()
151
+ speaker_num2id.update({l.split("_")[0]: l})
152
+
153
+
154
+ return [
155
+ datasets.SplitGenerator(
156
+ name=datasets.Split.TRAIN,
157
+
158
+ gen_kwargs={
159
+ "audio_files_dir": audio_files_dir,
160
+ "text_path": text_path,
161
+ "split": "train",
162
+ "file_list": train_list,
163
+ "speaker_num2id": speaker_num2id
164
+ },
165
+ ),
166
+
167
+ datasets.SplitGenerator(
168
+ name=datasets.Split.TEST,
169
+ gen_kwargs={
170
+ "audio_files_dir": audio_files_dir,
171
+ "text_path": text_path,
172
+ "split": "test",
173
+ "file_list": test_list,
174
+ "speaker_num2id": speaker_num2id
175
+ },
176
+ )
177
+ ]
178
+
179
+
180
+ def _generate_examples(self, audio_files_dir: List, text_path: Path, split: str, file_list: Path, speaker_num2id: Dict) -> Tuple[int, Dict]:
181
+ speaker_nums = []
182
+ with open(file_list) as f:
183
+ for l in f.readlines():
184
+ speaker_nums.append(l.strip())
185
+
186
+
187
+ sentid = {}
188
+ with open(text_path) as f:
189
+ for i, l in enumerate(f.readlines()):
190
+ sentid.update({"appl_"+"%04d" % i: l.strip()})
191
+
192
+
193
+ for wav_one_speaker_folder in audio_files_dir: #XXXX/Ind0/Ind001
194
+ if wav_one_speaker_folder.name in speaker_nums:
195
+ speaker_num = wav_one_speaker_folder.name #Ind001
196
+ speaker_id = speaker_num2id[speaker_num] #Ind001_F_B
197
+
198
+ for wave_file in os.listdir(wav_one_speaker_folder):
199
+ audio_id = wave_file[:-4]
200
+ sentence_id = "appl_"+wave_file[:-4].split('_')[-1]
201
+ text = sentid[sentence_id]
202
+ wav_path = os.path.join(wav_one_speaker_folder, wave_file)
203
+
204
+ if self.config.schema == "source":
205
+ ex = {
206
+ "id": audio_id,
207
+ "speaker_id": speaker_id,
208
+ "path": wav_path,
209
+ "audio": wav_path,
210
+ "text": text,
211
+ }
212
+ yield audio_id, ex
213
+
214
+ elif self.config.schema == "seacrowd_sptext":
215
+ ex = {
216
+ "id": audio_id,
217
+ "speaker_id": speaker_id,
218
+ "path": wav_path,
219
+ "audio": wav_path,
220
+ "text": text,
221
+ "metadata": {
222
+ "speaker_age": None,
223
+ "speaker_gender": speaker_id.split("_")[1],
224
+ }
225
+ }
226
+ yield audio_id, ex