Jzuluaga commited on
Commit
0427629
1 Parent(s): a35f9e7

updating the repo with the loader script

Browse files
Files changed (1) hide show
  1. atc_data_loader.py +274 -0
atc_data_loader.py ADDED
@@ -0,0 +1,274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ #
4
+ # SPDX-FileCopyrightText: Copyright © <2022> Idiap Research Institute <contact@idiap.ch>
5
+ #
6
+ # SPDX-FileContributor: Juan Zuluaga-Gomez <jzuluaga@idiap.ch>
7
+ #
8
+ # SPDX-License-Identifier: MIT-License
9
+
10
+ """\
11
+ Script for loading air traffic control (ATC) speech datasets for automatic speech recognition (ASR).
12
+ This script has been designed for ATC datasets that are in Kaldi format
13
+
14
+ Required files: text, wav.scp and segments files
15
+
16
+ - Databases
17
+ - Training:
18
+ - ATCOSIM, LDC-ATCC and, UWB-ATCC corpora.
19
+ - Testing:
20
+ - ATCO2-test-set, ATCOSIM, LDC-ATCC and, UWB-ATCC corpora.
21
+ """
22
+
23
+ import os
24
+ import re
25
+
26
+ import datasets
27
+ import numpy as np
28
+ import soundfile as sf
29
+ from datasets.tasks import AutomaticSpeechRecognition
30
+
31
+ _CITATION = """\
32
+ @article{zuluaga2022does,
33
+ title={How Does Pre-trained Wav2Vec 2.0 Perform on Domain Shifted ASR? An Extensive Benchmark on Air Traffic Control Communications},
34
+ author={Zuluaga-Gomez, Juan and Prasad, Amrutha and Nigmatulina, Iuliia and Sarfjoo, Saeed and Motlicek, Petr and Kleinert, Matthias and Helmke, Hartmut and Ohneiser, Oliver and Zhan, Qingran},
35
+ journal={2022 IEEE Spoken Language Technology Workshop (SLT), Doha, Qatar},
36
+ year={2022}
37
+ }
38
+ @article{zuluagabertraffic,
39
+ title={BERTraffic: BERT-based Joint Speaker Role and Speaker Change Detection for Air Traffic Control Communications (submitted to @ SLT-2022)},
40
+ author={Zuluaga-Gomez, Juan and Sarfjoo, Seyyed Saeed and Prasad, Amrutha and Nigmatulina, Iuliia and Motlicek, Petr and Ohneiser, Oliver and Helmke, Hartmut},
41
+ journal={2022 IEEE Spoken Language Technology Workshop (SLT), Doha, Qatar},
42
+ year={2022}
43
+ }
44
+ """
45
+
46
+ _DESCRIPTION = """\
47
+ ATC speech DATASET. This DataLoader works with data in Kaldi format.
48
+ - We use the following files: text, segments and wav.scp
49
+ - text --> utt_id transcript
50
+ - segments --> utt_id recording_id t_begin t_end
51
+ - wav.scp --> recording_id /path/to/wav/
52
+ The default dataset is from ATCO2 project, a 1-hour sample: https://www.replaywell.com/atco2/download/ATCO2-ASRdataset-v1_beta.tgz
53
+ """
54
+
55
+ _DATA_URL = "http://catalog.elra.info/en-us/repository/browse/ELRA-S0484/"
56
+
57
+ _HOMEPAGE = "https://github.com/idiap/w2v2-air-traffic"
58
+
59
+ logger = datasets.logging.get_logger(__name__)
60
+
61
+ # Our models work with audio data at 16kHZ,
62
+ _SAMPLING_RATE = int(16000)
63
+
64
+
65
+ class ATCDataASRConfig(datasets.BuilderConfig):
66
+ """BuilderConfig for air traffic control datasets."""
67
+
68
+ def __init__(self, **kwargs):
69
+ """
70
+ Args:
71
+ data_dir: `string`, the path to the folder containing the files required to read: json or wav.scp
72
+ **kwargs: keyword arguments forwarded to super.
73
+ """
74
+ super(ATCDataASRConfig, self).__init__(**kwargs)
75
+
76
+
77
+ class ATCDataASR(datasets.GeneratorBasedBuilder):
78
+
79
+ DEFAULT_WRITER_BATCH_SIZE = 256
80
+ DEFAULT_CONFIG_NAME = "all"
81
+ BUILDER_CONFIGS = [
82
+ # TRAIN, DEV AND TEST DATASETS
83
+ ATCDataASRConfig(name="train", description="ATC train dataset."),
84
+ ATCDataASRConfig(name="dev", description="ATC dev dataset."),
85
+ ATCDataASRConfig(name="test", description="ATC test dataset."),
86
+ # UNSUPERVISED DATASETS
87
+ ATCDataASRConfig(name="unsupervised", description="ATC unsupervised dataset."),
88
+ ]
89
+
90
+ # provide some information about the Dataset we just gathered
91
+ def _info(self):
92
+ return datasets.DatasetInfo(
93
+ description=_DESCRIPTION,
94
+ features=datasets.Features(
95
+ {
96
+ "id": datasets.Value("string"),
97
+ "file": datasets.Value("string"),
98
+ "audio": datasets.features.Audio(sampling_rate=_SAMPLING_RATE),
99
+ "text": datasets.Value("string"),
100
+ "segment_start_time": datasets.Value("float"),
101
+ "segment_end_time": datasets.Value("float"),
102
+ "duration": datasets.Value("float"),
103
+ }
104
+ ),
105
+ supervised_keys=("audio", "text"),
106
+ homepage=_HOMEPAGE,
107
+ citation=_CITATION,
108
+ task_templates=[
109
+ AutomaticSpeechRecognition(
110
+ audio_column="audio", transcription_column="text"
111
+ )
112
+ ],
113
+ )
114
+
115
+ def _split_generators(self, dlmanager):
116
+ """Returns SplitGenerators."""
117
+
118
+ split = self.config.name
119
+
120
+ # UNSUPERVISED set (used only for decoding)
121
+ if "unsupervised" in split:
122
+ split_name = datasets.Split.TEST
123
+ elif "test" in split or "dev" in split or "dummy" in split:
124
+ split_name = datasets.Split.TEST
125
+ # The last option left is: Train set
126
+ else:
127
+ split_name = datasets.Split.TRAIN
128
+
129
+ # you need to pass a data directory where the Kaldi folder is stored
130
+ filepath = self.config.data_dir
131
+
132
+ return [
133
+ datasets.SplitGenerator(
134
+ name=split_name,
135
+ # These kwargs will be passed to _generate_examples
136
+ gen_kwargs={
137
+ "filepath": filepath,
138
+ "split": split,
139
+ },
140
+ )
141
+ ]
142
+
143
+ def _generate_examples(self, filepath, split):
144
+ """You need to pass a path with the kaldi data, the folder should have
145
+ audio: wav.scp,
146
+ transcripts: text,
147
+ timing information: segments
148
+ """
149
+
150
+ logger.info("Generating examples located in: %s", filepath)
151
+
152
+ text_file = os.path.join(filepath, "text")
153
+ wavscp = os.path.join(filepath, "wav.scp")
154
+ segments = os.path.join(filepath, "segments")
155
+
156
+ id_ = ""
157
+ text_dict, wav_dict = {}, {}
158
+ segments_dict, utt2wav_id = {}, {}
159
+
160
+ line = 0
161
+ # get the text file
162
+ with open(text_file) as text_f:
163
+ for line in text_f:
164
+ if len(line.split(" ")) > 1:
165
+ id_, transcript = line.split(" ", maxsplit=1)
166
+ transcript = _remove_special_characters(transcript)
167
+ if len(transcript.split(" ")) == 0:
168
+ continue
169
+ if len(transcript) < 2:
170
+ continue
171
+ text_dict[id_] = transcript
172
+ else: # line is empty
173
+ # if unsupervised set, then it's normal. else, continue
174
+ if not "test_unsup" in self.config.name:
175
+ continue
176
+ id_ = line.rstrip().split(" ")[0]
177
+ text_dict[id_] = ""
178
+
179
+ # get wav.scp and load data into memory
180
+ with open(wavscp) as text_f:
181
+ for line in text_f:
182
+ if line:
183
+ if len(line.split()) < 2:
184
+ continue
185
+ id_, wavpath = line.split(" ", maxsplit=1)
186
+ # only selects the part that ends of wav, flac or sph
187
+ wavpath = [
188
+ x
189
+ for x in wavpath.split(" ")
190
+ if ".wav" in x or ".WAV" in x or ".flac" in x or ".sph" in x
191
+ ][0].rstrip()
192
+
193
+ # make the output
194
+ segment, sampling_rate = sf.read(wavpath, dtype=np.int16)
195
+ wav_dict[id_] = [wavpath.rstrip(), segment, sampling_rate]
196
+
197
+ # get segments dictionary
198
+ with open(segments) as text_f:
199
+ for line in text_f:
200
+ if line:
201
+ if len(line.split()) < 4:
202
+ continue
203
+ id_, wavid_, start, end = line.rstrip().split(" ")
204
+ segments_dict[id_] = start.rstrip(), end.rstrip()
205
+ utt2wav_id[id_] = wavid_
206
+
207
+ for rec_id, text in text_dict.items():
208
+ if rec_id in utt2wav_id and rec_id in segments_dict:
209
+
210
+ # get audio data from memory and the path of the file
211
+ wavpath, segment, sampling_rate = wav_dict[utt2wav_id[rec_id]]
212
+ # get timing information
213
+ seg_start, seg_end = segments_dict[rec_id]
214
+ seg_start, seg_end = float(seg_start), float(seg_end)
215
+ duration = round((seg_end - seg_start), 3)
216
+
217
+ # get the samples, bytes, already cropping by segment,
218
+ samples = _extract_audio_segment(
219
+ segment, sampling_rate, float(seg_start), float(seg_end)
220
+ )
221
+
222
+ # output data for given dataset
223
+ example = {
224
+ "audio": {
225
+ "path": wavpath,
226
+ "array": samples,
227
+ "sampling_rate": sampling_rate,
228
+ },
229
+ "id": rec_id,
230
+ "file": wavpath,
231
+ "text": text,
232
+ "segment_start_time": format(float(seg_start), ".3f"),
233
+ "segment_end_time": format(float(seg_end), ".3f"),
234
+ "duration": format(float(duration), ".3f"),
235
+ }
236
+
237
+ yield rec_id, example
238
+
239
+
240
+ def _remove_special_characters(text):
241
+ """Function to remove some special chars/symbols from the given transcript"""
242
+
243
+ text = text.split(" ")
244
+ # first remove words between [] and <>
245
+ text = " ".join(
246
+ [
247
+ x for x in text
248
+ if "[" not in x and "]" not in x and "<" not in x and ">" not in x
249
+ ]
250
+ )
251
+
252
+ # regex with predifined symbols to ignore/remove,
253
+ chars_to_ignore_regex2 = '[\{\[\]\<\>\/\,\?\.\!\u00AC\;\:"\\%\\\]|[0-9]'
254
+
255
+ text = re.sub(chars_to_ignore_regex2, "", text).lower()
256
+ sentence = text.replace("\u2013", "-")
257
+ sentence = sentence.replace("\u2014", "-")
258
+ sentence = sentence.replace("\u2018", "'")
259
+ sentence = sentence.replace("\u201C", "")
260
+ sentence = sentence.replace("\u201D", "")
261
+ sentence = sentence.replace("ñ", "n")
262
+ sentence = sentence.replace(" - ", " ")
263
+ sentence = sentence.replace("-", "")
264
+ sentence = sentence.replace("'", " ")
265
+ return sentence.lower().rstrip()
266
+
267
+
268
+ def _extract_audio_segment(segment, sampling_rate, start_sec, end_sec):
269
+ """Extracts segment of audio samples (as an ndarray) from the given segment."""
270
+ # The dataset only contains mono audio.
271
+ start_sample = int(start_sec * sampling_rate)
272
+ end_sample = min(int(end_sec * sampling_rate), segment.shape[0])
273
+ samples = segment[start_sample:end_sample]
274
+ return samples