Datasets:

Languages:
Thai
ArXiv:
License:
holylovenia commited on
Commit
d496762
·
verified ·
1 Parent(s): 4d069d0

Upload tha_lotus.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. tha_lotus.py +272 -0
tha_lotus.py ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ SEA Crowd Data Loader for Thai LOTUS.
3
+ """
4
+ import os
5
+ from typing import Dict, List, Tuple
6
+
7
+ import datasets
8
+ from datasets.download.download_manager import DownloadManager
9
+
10
+ from seacrowd.utils import schemas
11
+ from seacrowd.utils.configs import SEACrowdConfig
12
+ from seacrowd.utils.constants import TASK_TO_SCHEMA, Licenses, Tasks
13
+
14
+ import pandas as pd
15
+ from collections import Counter
16
+ from collections.abc import KeysView, Iterable
17
+
18
+ _CITATION = r"""
19
+ @INPROCEEDINGS{thaiLOTUSBN,
20
+ author={Chotimongkol, Ananlada and Saykhum, Kwanchiva and Chootrakool, Patcharika and Thatphithakkul, Nattanun and Wutiwiwatchai, Chai},
21
+ booktitle={2009 Oriental COCOSDA International Conference on Speech Database and Assessments},
22
+ title={LOTUS-BN: A Thai broadcast news corpus and its research applications},
23
+ year={2009},
24
+ volume={},
25
+ number={},
26
+ pages={44-50},
27
+ doi={10.1109/ICSDA.2009.5278377}}
28
+ """
29
+
30
+ logger = datasets.logging.get_logger(__name__)
31
+
32
+ _LOCAL = False
33
+ _LANGUAGES = ["tha"]
34
+
35
+
36
+ _DATASETNAME = "tha_lotus"
37
+ _DESCRIPTION = r"""
38
+ The Large vOcabualry Thai continUous Speech recognition (LOTUS) corpus was designed for developing large vocabulary
39
+ continuous speech recognition (LVCSR), spoken dialogue system, speech dictation, broadcast news transcriber.
40
+ It contains two datasets, one for training acoustic model, another for training a language model.
41
+ """
42
+
43
+ _HOMEPAGE = "https://github.com/korakot/corpus/tree/main/LOTUS"
44
+ _LICENSE = Licenses.CC_BY_NC_SA_3_0.value
45
+
46
+ _URL = "https://github.com/korakot/corpus/releases/download/v1.0/AIFORTHAI-LotusCorpus.zip"
47
+
48
+
49
+ _SUPPORTED_TASKS = [Tasks.SPEECH_RECOGNITION]
50
+ _SOURCE_VERSION = "1.0.0"
51
+ _SEACROWD_VERSION = "2024.06.20"
52
+
53
+ CONFIG_SUFFIXES_FOR_TASK = [TASK_TO_SCHEMA.get(task).lower() for task in _SUPPORTED_TASKS]
54
+ assert len(CONFIG_SUFFIXES_FOR_TASK) == 1
55
+
56
+ config_choices_folder_structure = {
57
+ "unidrection_clean": ("PD", "U", "Clean"),
58
+ "unidrection_office": ("PD", "U", "Office"),
59
+ "closetalk_clean": ("PD", "C", "Clean"),
60
+ "closetalk_office": ("PD", "C", "Office")}
61
+
62
+
63
+ class ThaiLOTUS(datasets.GeneratorBasedBuilder):
64
+ """Thai Lotus free-version dataset, re-implemented for SEACrowd from https://github.com/korakot/corpus/blob/main/LOTUS"""
65
+
66
+ BUILDER_CONFIGS = [
67
+ SEACrowdConfig(
68
+ name=f"{_DATASETNAME}_{config_name}_source",
69
+ version=datasets.Version(_SOURCE_VERSION),
70
+ description=f"{_DATASETNAME} source schema for config {config_name}",
71
+ schema=f"source",
72
+ subset_id=config_name
73
+ ) for config_name in config_choices_folder_structure.keys()
74
+ ] + [
75
+ SEACrowdConfig(
76
+ name=f"{_DATASETNAME}_{config_name}_seacrowd_{CONFIG_SUFFIXES_FOR_TASK[0]}",
77
+ version=datasets.Version(_SEACROWD_VERSION),
78
+ description=f"{_DATASETNAME} seacrowd schema for {_SUPPORTED_TASKS[0].name} and config {config_name}",
79
+ schema=f"seacrowd_{CONFIG_SUFFIXES_FOR_TASK[0]}",
80
+ subset_id=config_name
81
+ ) for config_name in config_choices_folder_structure.keys()
82
+ ]
83
+
84
+ def _info(self) -> datasets.DatasetInfo:
85
+ _config_schema_name = self.config.schema
86
+ logger.info(f"Received schema name: {self.config.schema}")
87
+ # source schema
88
+ if _config_schema_name == "source":
89
+ features = datasets.Features(
90
+ {
91
+ "id": datasets.Value("string"),
92
+ "audio_id": datasets.Value("string"),
93
+ "file": datasets.Value("string"),
94
+ "audio": datasets.Audio(sampling_rate=16_000),
95
+ "thai_text": datasets.Value("string"),
96
+ "audio_arr_pos_start": datasets.Sequence(datasets.Value("float")),
97
+ "audio_arr_pos_end": datasets.Sequence(datasets.Value("float")),
98
+ "phonemes": datasets.Sequence(datasets.Value("string"))
99
+ }
100
+ )
101
+
102
+ # speech-text schema
103
+ elif _config_schema_name == f"seacrowd_{CONFIG_SUFFIXES_FOR_TASK[0]}":
104
+ features = schemas.speech_text_features
105
+
106
+ else:
107
+ raise ValueError(f"Received unexpected config schema of {_config_schema_name}!")
108
+
109
+ return datasets.DatasetInfo(
110
+ description=_DESCRIPTION,
111
+ features=features,
112
+ homepage=_HOMEPAGE,
113
+ license=_LICENSE,
114
+ citation=_CITATION,
115
+ )
116
+
117
+ @staticmethod
118
+ def __strip_text_iterables(input: Iterable):
119
+ if not isinstance(input, str):
120
+ return list(map(str.strip, input))
121
+ else:
122
+ return input.strip()
123
+
124
+ @classmethod
125
+ def __read_text_files(cls, path: str, init_lines_to_skip:int=0, remove_empty_line: bool=True, strip_trailing_whitespace: bool=True):
126
+ with open(path, "r") as f:
127
+ data = cls.__strip_text_iterables(f.readlines())
128
+
129
+ # pre-processing steps based on args
130
+ if init_lines_to_skip>0:
131
+ data = data[init_lines_to_skip:]
132
+ if remove_empty_line:
133
+ data = [_data for _data in data if len(_data.strip()) != 0]
134
+ if strip_trailing_whitespace:
135
+ data = [_data.strip() for _data in data]
136
+
137
+ return data
138
+
139
+ @classmethod
140
+ def __preprocess_cc_lab_file(cls, cc_lab_file: str):
141
+ if not cc_lab_file.endswith(".lab"):
142
+ raise ValueError("The file isn't a .lab!")
143
+
144
+ meta = ["audio_arr_pos_start", "audio_arr_pos_end", "phonemes"]
145
+ raw_data = cls.__read_text_files(cc_lab_file)
146
+
147
+ data = pd.DataFrame([dict(zip(meta, cls.__strip_text_iterables(_data.split(" ")))) for _data in raw_data])
148
+
149
+ # since the ratio of end time and audio array length around (624.5, 625.5) is 97.50074382624219%
150
+ # we can divide the array ratio by 625
151
+ len_ratio = 625
152
+ data["audio_arr_pos_start"] = data["audio_arr_pos_start"].astype("int")/len_ratio
153
+ data["audio_arr_pos_end"] = data["audio_arr_pos_end"].astype("int")/len_ratio
154
+
155
+ return data.to_dict(orient="list")
156
+
157
+ @classmethod
158
+ def __folder_walk_file_grabber(cls, folder_dir: str, ext: str=""):
159
+ all_files = []
160
+ for child_dir in os.listdir(folder_dir):
161
+ _full_path = os.path.join(folder_dir, child_dir)
162
+ if os.path.isdir(_full_path):
163
+ all_files.extend(cls.__folder_walk_file_grabber(_full_path, ext))
164
+ elif _full_path.endswith(ext):
165
+ all_files.append(_full_path)
166
+
167
+ return all_files
168
+
169
+ @classmethod
170
+ def __lotus_index_generator(cls, root_folder: str):
171
+ index_raw_data = cls.__read_text_files(f"{root_folder}/index.txt", init_lines_to_skip=5)
172
+
173
+ # since in the index file we have many-to-one audio recording to the same identifier of sentence values in PDsen.txt
174
+ # except for PD data (phonetically distributed -- one sentence, multiple audios) we will filter such occurrences (for now)
175
+ _index_candidates = [data.split("\t")[2] for data in index_raw_data]
176
+ valid_idx = [idx for idx, val in Counter(_index_candidates).items() if val == 1 or "pd" in idx]
177
+
178
+ # contains triplets of ("dataset number", "sequence number", "text identifier")
179
+ metadata = ("dataset_number", "sequence_number")
180
+ text_index_data = {
181
+ data.split("\t")[2].strip():
182
+ dict(zip(metadata, cls.__strip_text_iterables(data.split("\t")[:2])))
183
+ for data in index_raw_data if data.split("\t")[2] in valid_idx}
184
+
185
+ audio_index_data = {
186
+ "_".join(values.values()): key for key, values in text_index_data.items()
187
+ }
188
+
189
+ return text_index_data, audio_index_data
190
+
191
+ @classmethod
192
+ def __lotus_pd_sen_generator(cls, root_folder: str, valid_idx_key: KeysView):
193
+ text_data = [text for text in cls.__read_text_files(f"{root_folder}/PDsen.txt")]
194
+
195
+ metadata = ("thai_text", "phonemes")
196
+ captioned_text_data = {
197
+ text.split("\t")[0].strip():
198
+ dict(zip(metadata, cls.__strip_text_iterables(text.split("\t")[1:])))
199
+ for text in text_data if text.split("\t")[0].strip() in valid_idx_key}
200
+
201
+ return captioned_text_data
202
+
203
+
204
+ def _split_generators(self, dl_manager: DownloadManager) -> List[datasets.SplitGenerator]:
205
+ # since the folder are zipped, the zipped URL containing whole resource of this dataset must be downloaded
206
+ _all_folder_local = os.path.join(dl_manager.download_and_extract(_URL), "LOTUS")
207
+
208
+ # Process all suplement files
209
+ # supplement files is used regardless of the config
210
+ # it contains the index mapper of text & audio, word list and its Phonemes
211
+ supplement_folder = os.path.join(_all_folder_local, "Supplement")
212
+
213
+ text_index_data, audio_index_data = self.__lotus_index_generator(supplement_folder)
214
+ audio_level_text_data = self.__lotus_pd_sen_generator(supplement_folder, text_index_data.keys())
215
+
216
+ _folder_structure = config_choices_folder_structure[self.config.subset_id]
217
+ # for lab folder, it could be UC, UO, CC, or CO, depending on the folder_structure choice based on dataset config name
218
+ _lab_foldername = _folder_structure[1][0].upper() + _folder_structure[2][0].upper() + "lab"
219
+
220
+ wav_folder = os.path.join(_all_folder_local, os.path.join(*_folder_structure), "Wav")
221
+ cc_lab_folder = os.path.join(_all_folder_local, os.path.join(*_folder_structure), _lab_foldername)
222
+
223
+ return [
224
+ datasets.SplitGenerator(
225
+ name=datasets.Split.TRAIN,
226
+ gen_kwargs={
227
+ "wav_folder": wav_folder,
228
+ "cc_lab_folder": cc_lab_folder,
229
+ "captioned_data": audio_level_text_data,
230
+ "audio_index_data": audio_index_data}
231
+ )]
232
+
233
+ def _generate_examples(self, wav_folder, cc_lab_folder, captioned_data, audio_index_data) -> Tuple[int, Dict]:
234
+ """
235
+ This dataset contains 2 version of texts:
236
+ 1. Transcriptions per syllables and its timestamp
237
+ 2. A Text DB (in PDsen.txt) containing the whole text in Thai Script and its Romanized Morphemes
238
+ """
239
+ _config_schema_name = self.config.schema
240
+ # this record list will contain short .wav files contain of Thai short audio
241
+ wav_record_list = self.__folder_walk_file_grabber(wav_folder, ".wav")
242
+
243
+ idx = 1
244
+ for audio_path in wav_record_list:
245
+ audio_id = audio_path.split("/")[-1][:-4]
246
+ example_data = {"id": idx, "audio_id": audio_id, "file": audio_path, "audio": audio_path}
247
+
248
+ # for obtaining pd_text_supplement_data, we get the audio_index from the filename
249
+ # then chaining it to the captioned data which uses the value from audio_index_data
250
+ default_pd_text_data = {"thai_text": "", "romanized_phonemes":""}
251
+
252
+ _pd_text_key = audio_index_data.get("_".join(audio_id.split("_")[1:]))
253
+ pd_text_supplement_data = captioned_data.get(_pd_text_key, default_pd_text_data)
254
+
255
+ example_data.update(pd_text_supplement_data)
256
+
257
+ if _config_schema_name == "source":
258
+ # add sequential data from cc_lab_data
259
+ cc_lab_data = self.__preprocess_cc_lab_file(os.path.join(cc_lab_folder, audio_id + ".lab"))
260
+ example_data.update(cc_lab_data)
261
+
262
+ yield idx, {colname: example_data[colname] for colname in self.info.features}
263
+
264
+ elif _config_schema_name == "seacrowd_sptext":
265
+ # skip if the text data not found
266
+ if pd_text_supplement_data != default_pd_text_data:
267
+ yield idx, {"id": idx, "path": example_data["file"], "audio": example_data["audio"], "text": example_data["thai_text"], "speaker_id": None, "metadata": {"speaker_age": None, "speaker_gender": None}}
268
+
269
+ else:
270
+ raise ValueError(f"Received unexpected config schema of {_config_schema_name}!")
271
+
272
+ idx += 1