taqwa92 commited on
Commit
2e479eb
1 Parent(s): 27c561d

Upload mg.trial4.py

Browse files
Files changed (1) hide show
  1. mg.trial4.py +202 -0
mg.trial4.py ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """Untitled2.ipynb
3
+
4
+ Automatically generated by Colaboratory.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1Jy8fwFO774TM_FTwK-0to2L0qHoUAT-U
8
+ """
9
+
10
+ # -*- coding: utf-8 -*-
11
+ """MGB2.ipynb
12
+ Automatically generated by Colaboratory.
13
+ Original file is located at
14
+ https://colab.research.google.com/drive/15ejoy2EWN9bj2s5ORQRZb5aTmFlcgA9d
15
+ """
16
+
17
+ import datasets
18
+ import os
19
+
20
+
21
+ _DESCRIPTION = "MGB2 speech recognition dataset AR"
22
+ _HOMEPAGE = "https://arabicspeech.org/mgb2/"
23
+ _LICENSE = "MGB-2 License agreement"
24
+
25
+
26
+
27
+
28
+
29
+ _DATA_ARCHIVE_ROOT = "https://huggingface.co/datasets/taqwa92/mg.trial4/blob/main/audio/ar/test"
30
+ _DATA_ARCHIVE_ROOT1 ="https://huggingface.co/datasets/taqwa92/mg.trial4/blob/main/transcript/ar/test"
31
+
32
+ _DATA_URL = {
33
+ "test": _DATA_ARCHIVE_ROOT + "test.zip",
34
+
35
+ }
36
+
37
+ _TEXT_URL = {
38
+ "test": _DATA_ARCHIVE_ROOT1 + "test.tsv",
39
+
40
+
41
+ class MGDB2Dataset(datasets.GeneratorBasedBuilder):
42
+ def _info(self):
43
+ return datasets.DatasetInfo(
44
+ description=_DESCRIPTION,
45
+ features=datasets.Features(
46
+ {
47
+ "path": datasets.Value("string"),
48
+ "audio": datasets.Audio(sampling_rate=16_000),
49
+ "text": datasets.Value("string"),
50
+ }
51
+ ),
52
+ supervised_keys=None,
53
+ homepage=_HOMEPAGE,
54
+ license=_LICENSE,
55
+
56
+ )
57
+
58
+ def _split_generators(self, dl_manager):
59
+ wav_archive = dl_manager.download(_DATA_URL)
60
+ txt_archive = dl_manager.download(_TEXT_URL)
61
+ test_dir = "dataset/test"
62
+ dev_dir = "dataset/dev"
63
+ train_dir = "dataset/train"
64
+
65
+
66
+ print("Starting write datasets.........................................................")
67
+
68
+
69
+ if dl_manager.is_streaming:
70
+ print("from streaming.........................................................")
71
+
72
+
73
+
74
+
75
+ return [
76
+ datasets.SplitGenerator(
77
+ name=datasets.Split.TEST,
78
+ gen_kwargs={
79
+ "path_to_txt": test_dir + "/txt",
80
+ "path_to_wav": test_dir + "/wav",
81
+ "wav_files": dl_manager.iter_archive(wav_archive['test']),
82
+ "txt_files": dl_manager.iter_archive(txt_archive['test']),
83
+ },
84
+ ),
85
+ datasets.SplitGenerator(
86
+ name=datasets.Split.VALIDATION,
87
+ gen_kwargs={
88
+ "path_to_txt": dev_dir + "/txt",
89
+ "path_to_wav": dev_dir + "/wav",
90
+ "wav_files": dl_manager.iter_archive(wav_archive['dev']),
91
+ "txt_files": dl_manager.iter_archive(txt_archive['dev']),
92
+ },
93
+ ),
94
+ datasets.SplitGenerator(
95
+ name=datasets.Split.TRAIN,
96
+ gen_kwargs={
97
+ "path_to_txt": train_dir + "/txt",
98
+ "path_to_wav": train_dir + "/wav",
99
+ "wav_files": dl_manager.iter_archive(wav_archive['train']),
100
+ "txt_files": dl_manager.iter_archive(txt_archive['train']),
101
+ },
102
+ ),
103
+ ]
104
+ else:
105
+ print("from non streaming.........................................................")
106
+
107
+
108
+ test_txt_files=dl_manager.extract(txt_archive['test']);
109
+ print("txt file list .....................................",txt_archive['test'])
110
+
111
+
112
+ print("txt file names .....................................",test_txt_files)
113
+
114
+
115
+ return [
116
+ datasets.SplitGenerator(
117
+ name=datasets.Split.TEST,
118
+ gen_kwargs={
119
+ "path_to_txt": test_dir + "/txt",
120
+ "path_to_wav": test_dir + "/wav",
121
+ "wav_files": dl_manager.extract(wav_archive['test']),
122
+ "txt_files": test_txt_files,
123
+ },
124
+ ),
125
+ datasets.SplitGenerator(
126
+ name=datasets.Split.VALIDATION,
127
+ gen_kwargs={
128
+ "path_to_txt": dev_dir + "/txt",
129
+ "path_to_wav": dev_dir + "/wav",
130
+ "wav_files": dl_manager.extract(wav_archive['dev']),
131
+ "txt_files": dl_manager.extract(txt_archive['dev']),
132
+ },
133
+ ),
134
+ datasets.SplitGenerator(
135
+ name=datasets.Split.TRAIN,
136
+ gen_kwargs={
137
+ "path_to_txt": train_dir + "/txt",
138
+ "path_to_wav": train_dir + "/wav",
139
+ "wav_files": dl_manager.extract(wav_archive['train']),
140
+ "txt_files": dl_manager.extract(txt_archive['train']),
141
+ },
142
+ ),
143
+ ]
144
+ print("end of generation.........................................................")
145
+
146
+
147
+
148
+
149
+ def _generate_examples(self, path_to_txt, path_to_wav, wav_files, txt_files):
150
+ """
151
+ This assumes that the text directory alphabetically precedes the wav dir
152
+ The file names for wav and text seem to match and are unique
153
+ We can use them for the dictionary matching them
154
+ """
155
+
156
+ print("start of generate examples.........................................................")
157
+
158
+ print("txt file names............................",txt_files)
159
+ print("wav_files names....................................",wav_files)
160
+
161
+ examples = {}
162
+ id_ = 0
163
+ # need to prepare the transcript - wave map
164
+ for item in txt_files:
165
+
166
+
167
+ print("copying txt file...............",item)
168
+
169
+ if type(item) is tuple:
170
+ # iter_archive will return path and file
171
+ path, f = item
172
+ txt = f.read().decode(encoding="utf-8").strip()
173
+ else:
174
+ # extract will return path only
175
+ path = item
176
+ with open(path, encoding="utf-8") as f:
177
+ txt = f.read().strip()
178
+
179
+ if path.find(path_to_txt) > -1:
180
+ # construct the wav path
181
+ # which is used as an identifier
182
+ wav_path = os.path.split(path)[1].replace("_utf8", "").replace(".txt", ".wav").strip()
183
+
184
+ examples[wav_path] = {
185
+ "text": txt,
186
+ "path": wav_path,
187
+ }
188
+
189
+ for wf in wav_files:
190
+ for item in wf:
191
+ if type(item) is tuple:
192
+ path, f = item
193
+ wav_data = f.read()
194
+ else:
195
+ path = item
196
+ with open(path, "rb") as f:
197
+ wav_data = f.read()
198
+ if path.find(path_to_wav) > -1:
199
+ wav_path = os.path.split(path)[1].strip()
200
+ audio = {"path": path, "bytes": wav_data}
201
+ yield id_, {**examples[wav_path], "audio": audio}
202
+ id_ += 1