Datasets:

ArXiv:
License:
xinjianl commited on
Commit
b929e20
1 Parent(s): fc6063c

Upload 2 files

Browse files
Files changed (2) hide show
  1. meta.py +233 -0
  2. yodas2.py +206 -0
meta.py ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ lang2shard_cnt = {
2
+ 'aa000': 2,
3
+ 'ab000': 2,
4
+ 'af000': 2,
5
+ 'ak000': 2,
6
+ 'am000': 9,
7
+ 'ar000': 154,
8
+ 'as000': 2,
9
+ 'ay000': 2,
10
+ 'az000': 4,
11
+ 'ba000': 2,
12
+ 'be000': 7,
13
+ 'bg000': 12,
14
+ 'bh000': 2,
15
+ 'bi000': 2,
16
+ 'bm000': 2,
17
+ 'bn000': 92,
18
+ 'bo000': 2,
19
+ 'br000': 2,
20
+ 'bs000': 2,
21
+ 'ca000': 10,
22
+ 'co000': 2,
23
+ 'cr000': 2,
24
+ 'cs000': 24,
25
+ 'cy000': 2,
26
+ 'da000': 6,
27
+ 'de000': 369,
28
+ 'de100': 500,
29
+ 'de101': 500,
30
+ 'de102': 114,
31
+ 'dz000': 2,
32
+ 'ee000': 2,
33
+ 'el000': 18,
34
+ 'en000': 500,
35
+ 'en001': 500,
36
+ 'en002': 500,
37
+ 'en003': 500,
38
+ 'en004': 500,
39
+ 'en005': 500,
40
+ 'en006': 500,
41
+ 'en007': 437,
42
+ 'en100': 500,
43
+ 'en101': 500,
44
+ 'en102': 500,
45
+ 'en103': 500,
46
+ 'en104': 500,
47
+ 'en105': 500,
48
+ 'en106': 500,
49
+ 'en107': 500,
50
+ 'en108': 500,
51
+ 'en109': 500,
52
+ 'en110': 500,
53
+ 'en111': 500,
54
+ 'en112': 500,
55
+ 'en113': 500,
56
+ 'en114': 500,
57
+ 'en115': 500,
58
+ 'en116': 500,
59
+ 'en117': 500,
60
+ 'en118': 500,
61
+ 'en119': 500,
62
+ 'en120': 500,
63
+ 'en121': 500,
64
+ 'en122': 500,
65
+ 'en123': 500,
66
+ 'en124': 500,
67
+ 'en125': 500,
68
+ 'en126': 500,
69
+ 'en127': 500,
70
+ 'en128': 500,
71
+ 'en129': 62,
72
+ 'eo000': 4,
73
+ 'es000': 483,
74
+ 'es100': 500,
75
+ 'es101': 500,
76
+ 'es102': 500,
77
+ 'es103': 500,
78
+ 'es104': 500,
79
+ 'es105': 500,
80
+ 'es106': 500,
81
+ 'es107': 500,
82
+ 'es108': 201,
83
+ 'et000': 2,
84
+ 'eu000': 4,
85
+ 'fa000': 12,
86
+ 'ff000': 2,
87
+ 'fi000': 28,
88
+ 'fj000': 2,
89
+ 'fo000': 2,
90
+ 'fr000': 315,
91
+ 'fr100': 500,
92
+ 'fr101': 500,
93
+ 'fr102': 500,
94
+ 'fr103': 401,
95
+ 'fy000': 1,
96
+ 'ga000': 2,
97
+ 'gd000': 2,
98
+ 'gl000': 3,
99
+ 'gn000': 2,
100
+ 'gu000': 8,
101
+ 'ha000': 4,
102
+ 'hi000': 182,
103
+ 'hi100': 7,
104
+ 'ho000': 2,
105
+ 'hr000': 5,
106
+ 'ht000': 3,
107
+ 'hu000': 32,
108
+ 'hy000': 3,
109
+ 'ia000': 2,
110
+ 'id000': 493,
111
+ 'id100': 500,
112
+ 'id101': 419,
113
+ 'ie000': 2,
114
+ 'ig000': 2,
115
+ 'ik000': 2,
116
+ 'is000': 2,
117
+ 'it000': 185,
118
+ 'it100': 500,
119
+ 'it101': 432,
120
+ 'iu000': 2,
121
+ 'iw000': 21,
122
+ 'ja000': 211,
123
+ 'ja100': 303,
124
+ 'jv000': 2,
125
+ 'ka000': 4,
126
+ 'ki000': 1,
127
+ 'kk000': 6,
128
+ 'kl000': 2,
129
+ 'km000': 10,
130
+ 'kn000': 7,
131
+ 'ko000': 391,
132
+ 'ko100': 500,
133
+ 'ko101': 500,
134
+ 'ko102': 500,
135
+ 'ko103': 287,
136
+ 'ks000': 2,
137
+ 'ku000': 2,
138
+ 'ky000': 4,
139
+ 'la000': 2,
140
+ 'lb000': 2,
141
+ 'lg000': 2,
142
+ 'ln000': 2,
143
+ 'lo000': 2,
144
+ 'lt000': 4,
145
+ 'lv000': 2,
146
+ 'mg000': 2,
147
+ 'mi000': 2,
148
+ 'mk000': 2,
149
+ 'ml000': 12,
150
+ 'mn000': 2,
151
+ 'mr000': 18,
152
+ 'ms000': 8,
153
+ 'mt000': 0,
154
+ 'my000': 2,
155
+ 'na000': 2,
156
+ 'nd000': 1,
157
+ 'ne000': 6,
158
+ 'nl000': 52,
159
+ 'nl100': 263,
160
+ 'no000': 17,
161
+ 'nv000': 2,
162
+ 'oc000': 2,
163
+ 'om000': 2,
164
+ 'or000': 3,
165
+ 'pa000': 5,
166
+ 'pl000': 140,
167
+ 'ps000': 2,
168
+ 'pt000': 202,
169
+ 'pt100': 500,
170
+ 'pt101': 500,
171
+ 'pt102': 500,
172
+ 'pt103': 382,
173
+ 'qu000': 2,
174
+ 'rm000': 2,
175
+ 'rn000': 2,
176
+ 'ro000': 18,
177
+ 'ru000': 500,
178
+ 'ru001': 287,
179
+ 'ru100': 500,
180
+ 'ru101': 500,
181
+ 'ru102': 500,
182
+ 'ru103': 500,
183
+ 'ru104': 500,
184
+ 'ru105': 500,
185
+ 'ru106': 439,
186
+ 'rw000': 2,
187
+ 'sa000': 2,
188
+ 'sc000': 2,
189
+ 'sd000': 2,
190
+ 'sg000': 1,
191
+ 'sh000': 1,
192
+ 'si000': 8,
193
+ 'sk000': 6,
194
+ 'sl000': 4,
195
+ 'sm000': 2,
196
+ 'sn000': 2,
197
+ 'so000': 4,
198
+ 'sq000': 2,
199
+ 'sr000': 4,
200
+ 'st000': 2,
201
+ 'su000': 2,
202
+ 'sv000': 17,
203
+ 'sw000': 4,
204
+ 'ta000': 40,
205
+ 'te000': 14,
206
+ 'tg000': 2,
207
+ 'th000': 113,
208
+ 'th100': 2,
209
+ 'ti000': 2,
210
+ 'tk000': 2,
211
+ 'tn000': 2,
212
+ 'to000': 2,
213
+ 'tr000': 155,
214
+ 'tr100': 440,
215
+ 'ts000': 1,
216
+ 'tt000': 2,
217
+ 'ug000': 2,
218
+ 'uk000': 63,
219
+ 'uk100': 56,
220
+ 'ur000': 35,
221
+ 'uz000': 8,
222
+ 've000': 2,
223
+ 'vi000': 465,
224
+ 'vi100': 500,
225
+ 'vi101': 472,
226
+ 'vo000': 2,
227
+ 'wo000': 2,
228
+ 'xh000': 2,
229
+ 'yi000': 2,
230
+ 'yo000': 2,
231
+ 'zh000': 42,
232
+ 'zu000': 2,
233
+ }
yodas2.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from collections import OrderedDict
3
+ from pathlib import Path
4
+ import datasets
5
+ import os
6
+ from .meta import lang2shard_cnt
7
+ import json
8
+
9
+
10
+ class Yodas2Config(datasets.BuilderConfig):
11
+ """BuilderConfig for Yodas2."""
12
+
13
+ def __init__(self, lang, version, **kwargs):
14
+ self.language = lang
15
+ self.base_data_path = f"data/{lang}"
16
+
17
+ description = (
18
+ f"Youtube speech to text dataset in {self.language}."
19
+ )
20
+ super(Yodas2Config, self).__init__(
21
+ name=lang,
22
+ version=datasets.Version(version),
23
+ description=description,
24
+ **kwargs,
25
+ )
26
+
27
+
28
+ DEFAULT_CONFIG_NAME = "all"
29
+ LANGS = list(lang2shard_cnt.keys())
30
+ VERSION = "1.0.0"
31
+
32
+ class Yodas2(datasets.GeneratorBasedBuilder):
33
+ """YodasSample dataset."""
34
+
35
+ BUILDER_CONFIGS = [
36
+ Yodas2Config(lang, version=VERSION) for lang in LANGS
37
+ ]
38
+
39
+ VERSION = datasets.Version("1.0.1")
40
+
41
+ def _info(self):
42
+ return datasets.DatasetInfo(
43
+ description="Yodas Sample",
44
+ features=datasets.Features(
45
+ OrderedDict(
46
+ [
47
+ ("id", datasets.Value("string")),
48
+ ("video_id", datasets.Value("string")),
49
+ ('duration', datasets.Value('float')),
50
+ ("audio", datasets.Audio(sampling_rate=24_000)),
51
+ ("utterances", datasets.Sequence(feature={'utt_id': datasets.Value(dtype='string'),
52
+ 'text': datasets.Value(dtype='string'),
53
+ 'start': datasets.Value(dtype='float'),
54
+ 'end': datasets.Value(dtype='float')}))
55
+ ]
56
+ )
57
+ ),
58
+ supervised_keys=None,
59
+ homepage="", # TODO
60
+ citation="", # TODO
61
+ )
62
+
63
+
64
+ def _split_generators(self, dl_manager):
65
+ """Returns SplitGenerators."""
66
+ # TODO
67
+
68
+
69
+ total_cnt = lang2shard_cnt[self.config.name]
70
+
71
+ idx_lst = [f"{i:08d}" for i in range(total_cnt)]
72
+ audio_tar_files = dl_manager.download([f"{self.config.base_data_path}/audio/{i:08d}.tar.gz" for i in range(total_cnt)])
73
+ text_files = dl_manager.download([f"{self.config.base_data_path}/text/{i:08d}.json" for i in range(total_cnt)])
74
+ duration_files = dl_manager.download([f"{self.config.base_data_path}/duration/{i:08d}.txt" for i in range(total_cnt)])
75
+
76
+ if dl_manager.is_streaming:
77
+ audio_archives = [dl_manager.iter_archive(audio_tar_file) for audio_tar_file in audio_tar_files]
78
+ text_archives = [dl_manager.extract(text_file) for text_file in text_files]
79
+ duration_archives = [dl_manager.extract(duration_file) for duration_file in duration_files]
80
+
81
+ else:
82
+ print("extracting audio ...")
83
+ print("audio tarfiles... ")
84
+ print(audio_tar_files)
85
+ extracted_audio_archives = dl_manager.extract(audio_tar_files)
86
+ print("extracted archives...")
87
+ print(extracted_audio_archives)
88
+
89
+ audio_archives = []
90
+ text_archives = []
91
+ duration_archives = []
92
+ for idx, audio_tar_file, extracted_dir, text_file, duration_file in zip(idx_lst, audio_tar_files, extracted_audio_archives, text_files, duration_files):
93
+ audio_archives.append(extracted_dir)
94
+ text_archives.append(text_file)
95
+ duration_archives.append(duration_file)
96
+
97
+
98
+ return [
99
+ datasets.SplitGenerator(
100
+ name=datasets.Split.TRAIN,
101
+ gen_kwargs={
102
+ "is_streaming": dl_manager.is_streaming,
103
+ "audio_archives": audio_archives,
104
+ 'text_archives': text_archives,
105
+ 'duration_archives': duration_archives,
106
+ },
107
+ ),
108
+ ]
109
+
110
+ def _generate_examples(self, is_streaming, audio_archives, text_archives, duration_archives):
111
+ """Yields examples."""
112
+
113
+ global_id = 0
114
+
115
+ if is_streaming:
116
+ for tar_file, text_file, duration_file in zip(audio_archives, text_archives, duration_archives):
117
+
118
+ # video to text
119
+ video2text = {}
120
+
121
+ json_obj_lst = json.loads(open(text_file, 'r').read())
122
+ for json_obj in json_obj_lst:
123
+ video_id = json_obj['audio_id']
124
+ video2text[video_id] = []
125
+
126
+ for k,v in sorted(json_obj['text'].items()):
127
+ fields = k.split('-')
128
+ start_timestamp = float(fields[-2]) / 100
129
+ end_timestamp = float(fields[-1]) / 100
130
+ video2text[video_id].append({'utt_id': k, 'text': v, 'start': start_timestamp, 'end': end_timestamp})
131
+
132
+
133
+ # video to duration
134
+ video2duration = {}
135
+ with open(duration_file) as f:
136
+ for id_, row in enumerate(f):
137
+ fields = row.strip().split()
138
+ video_id = fields[0]
139
+ duration = float(fields[1])
140
+ video2duration[video_id] = duration
141
+
142
+
143
+ for path, audio_f in tar_file:
144
+
145
+ path = Path(path)
146
+ video_id = path.stem
147
+
148
+ if video_id in video2text and video_id in video2duration:
149
+
150
+ result = {
151
+ 'id': global_id,
152
+ 'video_id': video_id,
153
+ 'audio': {"path": None, "bytes": audio_f.read()},
154
+ 'duration': video2duration[video_id],
155
+ 'utterances': video2text[video_id]
156
+ }
157
+
158
+ yield global_id, result
159
+ global_id += 1
160
+ else:
161
+ for extracted_dir, text_file, duration_file in zip(audio_archives, text_archives, duration_archives):
162
+
163
+ print('extracted_dir ', extracted_dir)
164
+
165
+ print('actual extracted dir', extracted_dir)
166
+
167
+ # video to text
168
+ video2text = {}
169
+ json_obj_lst = json.loads(open(text_file, 'r').read())
170
+ for json_obj in json_obj_lst:
171
+ video_id = json_obj['audio_id']
172
+ video2text[video_id] = []
173
+
174
+ for k,v in sorted(json_obj['text'].items()):
175
+ fields = k.split('-')
176
+ start_timestamp = float(fields[-2]) / 100
177
+ end_timestamp = float(fields[-1]) / 100
178
+ video2text[video_id].append({'utt_id': k, 'text': v, 'start': start_timestamp, 'end': end_timestamp})
179
+
180
+
181
+ # video to duration
182
+ video2duration = {}
183
+ with open(duration_file) as f:
184
+ for id_, row in enumerate(f):
185
+ fields = row.strip().split()
186
+ video_id = fields[0]
187
+ duration = float(fields[1])
188
+ video2duration[video_id] = duration
189
+
190
+
191
+ for audio_file in list(Path(extracted_dir).glob('*')):
192
+
193
+ video_id = audio_file.stem
194
+
195
+ if video_id in video2text and video_id in video2duration:
196
+
197
+ result = {
198
+ 'id': global_id,
199
+ 'video_id': video_id,
200
+ 'duration': video2duration[video_id],
201
+ 'audio': {"path": str(audio_file.absolute()), "bytes": open(audio_file, 'rb').read()},
202
+ 'utterances': video2text[video_id]
203
+ }
204
+
205
+ yield global_id, result
206
+ global_id += 1