Datasets:

Multilinguality:
multilingual
Language Creators:
crowdsourced
Annotations Creators:
crowdsourced
Source Datasets:
extended|common_voice
ArXiv:
Tags:
License:
Files changed (1) hide show
  1. mgb2.py +217 -0
mgb2.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """Untitled2.ipynb
3
+
4
+ Automatically generated by Colaboratory.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1Jy8fwFO774TM_FTwK-0to2L0qHoUAT-U
8
+ """
9
+
10
+ # -*- coding: utf-8 -*-
11
+ """MGB2.ipynb
12
+ Automatically generated by Colaboratory.
13
+ Original file is located at
14
+ https://colab.research.google.com/drive/15ejoy2EWN9bj2s5ORQRZb5aTmFlcgA9d
15
+ """
16
+
17
+ import datasets
18
+ import os
19
+
20
+
21
+ _DESCRIPTION = "MGB2 speech recognition dataset AR"
22
+ _HOMEPAGE = "https://arabicspeech.org/mgb2/"
23
+ _LICENSE = "MGB-2 License agreement"
24
+ _CITATION = """@misc{https://doi.org/10.48550/arxiv.1609.05625,
25
+ doi = {10.48550/ARXIV.1609.05625},
26
+
27
+ url = {https://arxiv.org/abs/1609.05625},
28
+
29
+ author = {Ali, Ahmed and Bell, Peter and Glass, James and Messaoui, Yacine and Mubarak, Hamdy and Renals, Steve and Zhang, Yifan},
30
+
31
+ keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences},
32
+
33
+ title = {The MGB-2 Challenge: Arabic Multi-Dialect Broadcast Media Recognition},
34
+
35
+ publisher = {arXiv},
36
+
37
+ year = {2016},
38
+
39
+ copyright = {arXiv.org perpetual, non-exclusive license}
40
+ }
41
+ """
42
+ _DATA_ARCHIVE_ROOT = "Data/archives/"
43
+ _DATA_URL = {
44
+ "test": _DATA_ARCHIVE_ROOT + "mgb2_wav.test.zip",
45
+ "dev": _DATA_ARCHIVE_ROOT + "mgb2_wav.dev.zip",
46
+ "train": _DATA_ARCHIVE_ROOT + "mgb2_wav.train.zip",
47
+
48
+ #"train": [_DATA_ARCHIVE_ROOT + f"mgb2_wav_{x}.train.tar.gz" for x in range(48)], # we have 48 archives
49
+ }
50
+ _TEXT_URL = {
51
+ "test": _DATA_ARCHIVE_ROOT + "mgb2_txt.test.zip",
52
+ "dev": _DATA_ARCHIVE_ROOT + "mgb2_txt.dev.zip",
53
+ "train": _DATA_ARCHIVE_ROOT + "mgb2_txt.train.zip",
54
+ }
55
+
56
+ class MGDB2Dataset(datasets.GeneratorBasedBuilder):
57
+ def _info(self):
58
+ return datasets.DatasetInfo(
59
+ description=_DESCRIPTION,
60
+ features=datasets.Features(
61
+ {
62
+ "path": datasets.Value("string"),
63
+ "audio": datasets.Audio(sampling_rate=16_000),
64
+ "text": datasets.Value("string"),
65
+ }
66
+ ),
67
+ supervised_keys=None,
68
+ homepage=_HOMEPAGE,
69
+ license=_LICENSE,
70
+ citation=_CITATION,
71
+ )
72
+
73
+ def _split_generators(self, dl_manager):
74
+ wav_archive = dl_manager.download(_DATA_URL)
75
+ txt_archive = dl_manager.download(_TEXT_URL)
76
+ test_dir = "dataset/test"
77
+ dev_dir = "dataset/dev"
78
+ train_dir = "dataset/train"
79
+
80
+
81
+ print("Starting write datasets.........................................................")
82
+
83
+
84
+ if dl_manager.is_streaming:
85
+ print("from streaming.........................................................")
86
+
87
+
88
+
89
+
90
+ return [
91
+ datasets.SplitGenerator(
92
+ name=datasets.Split.TEST,
93
+ gen_kwargs={
94
+ "path_to_txt": test_dir + "/txt",
95
+ "path_to_wav": test_dir + "/wav",
96
+ "wav_files": dl_manager.iter_archive(wav_archive['test']),
97
+ "txt_files": dl_manager.iter_archive(txt_archive['test']),
98
+ },
99
+ ),
100
+ datasets.SplitGenerator(
101
+ name=datasets.Split.VALIDATION,
102
+ gen_kwargs={
103
+ "path_to_txt": dev_dir + "/txt",
104
+ "path_to_wav": dev_dir + "/wav",
105
+ "wav_files": dl_manager.iter_archive(wav_archive['dev']),
106
+ "txt_files": dl_manager.iter_archive(txt_archive['dev']),
107
+ },
108
+ ),
109
+ datasets.SplitGenerator(
110
+ name=datasets.Split.TRAIN,
111
+ gen_kwargs={
112
+ "path_to_txt": train_dir + "/txt",
113
+ "path_to_wav": train_dir + "/wav",
114
+ "wav_files": dl_manager.iter_archive(wav_archive['train']),
115
+ "txt_files": dl_manager.iter_archive(txt_archive['train']),
116
+ },
117
+ ),
118
+ ]
119
+ else:
120
+ print("from non streaming.........................................................")
121
+
122
+
123
+ test_txt_files=dl_manager.extract(txt_archive['test']);
124
+ print("txt file list .....................................",txt_archive['test'])
125
+
126
+
127
+ print("txt file names .....................................",test_txt_files)
128
+
129
+
130
+ return [
131
+ datasets.SplitGenerator(
132
+ name=datasets.Split.TEST,
133
+ gen_kwargs={
134
+ "path_to_txt": test_dir + "/txt",
135
+ "path_to_wav": test_dir + "/wav",
136
+ "wav_files": dl_manager.extract(wav_archive['test']),
137
+ "txt_files": test_txt_files,
138
+ },
139
+ ),
140
+ datasets.SplitGenerator(
141
+ name=datasets.Split.VALIDATION,
142
+ gen_kwargs={
143
+ "path_to_txt": dev_dir + "/txt",
144
+ "path_to_wav": dev_dir + "/wav",
145
+ "wav_files": dl_manager.extract(wav_archive['dev']),
146
+ "txt_files": dl_manager.extract(txt_archive['dev']),
147
+ },
148
+ ),
149
+ datasets.SplitGenerator(
150
+ name=datasets.Split.TRAIN,
151
+ gen_kwargs={
152
+ "path_to_txt": train_dir + "/txt",
153
+ "path_to_wav": train_dir + "/wav",
154
+ "wav_files": dl_manager.extract(wav_archive['train']),
155
+ "txt_files": dl_manager.extract(txt_archive['train']),
156
+ },
157
+ ),
158
+ ]
159
+ print("end of generation.........................................................")
160
+
161
+
162
+
163
+
164
+ def _generate_examples(self, path_to_txt, path_to_wav, wav_files, txt_files):
165
+ """
166
+ This assumes that the text directory alphabetically precedes the wav dir
167
+ The file names for wav and text seem to match and are unique
168
+ We can use them for the dictionary matching them
169
+ """
170
+
171
+ print("start of generate examples.........................................................")
172
+
173
+ print("txt file names............................",txt_files)
174
+ print("wav_files names....................................",wav_files)
175
+
176
+ examples = {}
177
+ id_ = 0
178
+ # need to prepare the transcript - wave map
179
+ for item in txt_files:
180
+
181
+
182
+ print("copying txt file...............",item)
183
+
184
+ if type(item) is tuple:
185
+ # iter_archive will return path and file
186
+ path, f = item
187
+ txt = f.read().decode(encoding="utf-8").strip()
188
+ else:
189
+ # extract will return path only
190
+ path = item
191
+ with open(path, encoding="utf-8") as f:
192
+ txt = f.read().strip()
193
+
194
+ if path.find(path_to_txt) > -1:
195
+ # construct the wav path
196
+ # which is used as an identifier
197
+ wav_path = os.path.split(path)[1].replace("_utf8", "").replace(".txt", ".wav").strip()
198
+
199
+ examples[wav_path] = {
200
+ "text": txt,
201
+ "path": wav_path,
202
+ }
203
+
204
+ for wf in wav_files:
205
+ for item in wf:
206
+ if type(item) is tuple:
207
+ path, f = item
208
+ wav_data = f.read()
209
+ else:
210
+ path = item
211
+ with open(path, "rb") as f:
212
+ wav_data = f.read()
213
+ if path.find(path_to_wav) > -1:
214
+ wav_path = os.path.split(path)[1].strip()
215
+ audio = {"path": path, "bytes": wav_data}
216
+ yield id_, {**examples[wav_path], "audio": audio}
217
+ id_ += 1