Update loader
Browse files- forai_ml-ted_talk_iwslt.py +442 -0
forai_ml-ted_talk_iwslt.py
ADDED
@@ -0,0 +1,442 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""TED TALKS IWSLT: Web Inventory of Transcribed and Translated Ted Talks in 109 languages."""
|
16 |
+
|
17 |
+
|
18 |
+
import io
|
19 |
+
import xml.etree.ElementTree as ET
|
20 |
+
import zipfile
|
21 |
+
from collections import defaultdict
|
22 |
+
|
23 |
+
import datasets
|
24 |
+
|
25 |
+
logger = datasets.logging.get_logger(__name__)
|
26 |
+
|
27 |
+
|
28 |
+
# TODO: Add BibTeX citation
|
29 |
+
# Find for instance the citation on arxiv or on the dataset repo/website
|
30 |
+
_CITATION = """\
|
31 |
+
@inproceedings{cettolo-etal-2012-wit3,
|
32 |
+
title = "{WIT}3: Web Inventory of Transcribed and Translated Talks",
|
33 |
+
author = "Cettolo, Mauro and
|
34 |
+
Girardi, Christian and
|
35 |
+
Federico, Marcello",
|
36 |
+
booktitle = "Proceedings of the 16th Annual conference of the European Association for Machine Translation",
|
37 |
+
month = may # " 28{--}30",
|
38 |
+
year = "2012",
|
39 |
+
address = "Trento, Italy",
|
40 |
+
publisher = "European Association for Machine Translation",
|
41 |
+
url = "https://www.aclweb.org/anthology/2012.eamt-1.60",
|
42 |
+
pages = "261--268",
|
43 |
+
}
|
44 |
+
"""
|
45 |
+
|
46 |
+
# TODO: Add description of the dataset here
|
47 |
+
# You can copy an official description
|
48 |
+
_DESCRIPTION = """\
|
49 |
+
The core of WIT3 is the TED Talks corpus, that basically redistributes the original content published by the TED Conference website (http://www.ted.com). Since 2007,
|
50 |
+
the TED Conference, based in California, has been posting all video recordings of its talks together with subtitles in English
|
51 |
+
and their translations in more than 80 languages. Aside from its cultural and social relevance, this content, which is published under the Creative Commons BYNC-ND license, also represents a precious
|
52 |
+
language resource for the machine translation research community, thanks to its size, variety of topics, and covered languages.
|
53 |
+
This effort repurposes the original content in a way which is more convenient for machine translation researchers.
|
54 |
+
"""
|
55 |
+
|
56 |
+
# TODO: Add a link to an official homepage for the dataset here
|
57 |
+
_HOMEPAGE = "https://wit3.fbk.eu/"
|
58 |
+
|
59 |
+
# TODO: Add the licence for the dataset here if you can find it
|
60 |
+
_LICENSE = "CC-BY-NC-4.0"
|
61 |
+
|
62 |
+
# TODO: Add link to the official dataset URLs here
|
63 |
+
# The HuggingFace dataset library don't host the datasets but only point to the original files
|
64 |
+
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
65 |
+
# _URL = "data/XML_releases.tgz"
|
66 |
+
_URL = "https://huggingface.co/datasets/ted_talks_iwslt/resolve/main/data/XML_releases.tgz"
|
67 |
+
|
68 |
+
|
69 |
+
_LANGUAGES = (
|
70 |
+
"mr",
|
71 |
+
"eu",
|
72 |
+
"hr",
|
73 |
+
"rup",
|
74 |
+
"szl",
|
75 |
+
"lo",
|
76 |
+
"ms",
|
77 |
+
"ht",
|
78 |
+
"hy",
|
79 |
+
"mg",
|
80 |
+
"arq",
|
81 |
+
"uk",
|
82 |
+
"ku",
|
83 |
+
"ig",
|
84 |
+
"sr",
|
85 |
+
"ug",
|
86 |
+
"ne",
|
87 |
+
"pt-br",
|
88 |
+
"sq",
|
89 |
+
"af",
|
90 |
+
"km",
|
91 |
+
"en",
|
92 |
+
"tt",
|
93 |
+
"ja",
|
94 |
+
"inh",
|
95 |
+
"mn",
|
96 |
+
"eo",
|
97 |
+
"ka",
|
98 |
+
"nb",
|
99 |
+
"fil",
|
100 |
+
"uz",
|
101 |
+
"fi",
|
102 |
+
"tl",
|
103 |
+
"el",
|
104 |
+
"tg",
|
105 |
+
"bn",
|
106 |
+
"si",
|
107 |
+
"gu",
|
108 |
+
"sk",
|
109 |
+
"kn",
|
110 |
+
"ar",
|
111 |
+
"hup",
|
112 |
+
"zh-tw",
|
113 |
+
"sl",
|
114 |
+
"be",
|
115 |
+
"bo",
|
116 |
+
"fr",
|
117 |
+
"ps",
|
118 |
+
"tr",
|
119 |
+
"ltg",
|
120 |
+
"la",
|
121 |
+
"ko",
|
122 |
+
"lv",
|
123 |
+
"nl",
|
124 |
+
"fa",
|
125 |
+
"ru",
|
126 |
+
"et",
|
127 |
+
"vi",
|
128 |
+
"pa",
|
129 |
+
"my",
|
130 |
+
"sw",
|
131 |
+
"az",
|
132 |
+
"sv",
|
133 |
+
"ga",
|
134 |
+
"sh",
|
135 |
+
"it",
|
136 |
+
"da",
|
137 |
+
"lt",
|
138 |
+
"kk",
|
139 |
+
"mk",
|
140 |
+
"tlh",
|
141 |
+
"he",
|
142 |
+
"ceb",
|
143 |
+
"bg",
|
144 |
+
"fr-ca",
|
145 |
+
"ha",
|
146 |
+
"ml",
|
147 |
+
"mt",
|
148 |
+
"as",
|
149 |
+
"pt",
|
150 |
+
"zh-cn",
|
151 |
+
"cnh",
|
152 |
+
"ro",
|
153 |
+
"hi",
|
154 |
+
"es",
|
155 |
+
"id",
|
156 |
+
"bs",
|
157 |
+
"so",
|
158 |
+
"cs",
|
159 |
+
"te",
|
160 |
+
"ky",
|
161 |
+
"hu",
|
162 |
+
"th",
|
163 |
+
"pl",
|
164 |
+
"nn",
|
165 |
+
"ca",
|
166 |
+
"is",
|
167 |
+
"ta",
|
168 |
+
"de",
|
169 |
+
"srp",
|
170 |
+
"ast",
|
171 |
+
"bi",
|
172 |
+
"lb",
|
173 |
+
"art-x-bork",
|
174 |
+
"am",
|
175 |
+
"oc",
|
176 |
+
"zh",
|
177 |
+
"ur",
|
178 |
+
"gl",
|
179 |
+
)
|
180 |
+
|
181 |
+
# Please note that only few pairs are shown here. You can use config to generate data for all language pairs
|
182 |
+
_LANGUAGE_PAIRS = [
|
183 |
+
("eu", "ca"),
|
184 |
+
("nl", "en"),
|
185 |
+
("nl", "hi"),
|
186 |
+
("de", "ja"),
|
187 |
+
("fr-ca", "hi"),
|
188 |
+
]
|
189 |
+
|
190 |
+
# Year subscripts for the specific folder
|
191 |
+
_YEAR = {"2014": "-20140120", "2015": "-20150530", "2016": "-20160408"}
|
192 |
+
|
193 |
+
_YEAR_FOLDER = {
|
194 |
+
"2014": "XML_releases/xml-20140120",
|
195 |
+
"2015": "XML_releases/xml-20150616",
|
196 |
+
"2016": "XML_releases/xml",
|
197 |
+
}
|
198 |
+
|
199 |
+
|
200 |
+
class TedTalksIWSLTConfig(datasets.BuilderConfig):
|
201 |
+
""" "Builder Config for the TedTalks IWSLT dataset"""
|
202 |
+
|
203 |
+
def __init__(self, language_pair=(None, None), year=None, **kwargs):
|
204 |
+
"""BuilderConfig for TedTalks IWSLT dataset.
|
205 |
+
Args:
|
206 |
+
for the `datasets.features.text.TextEncoder` used for the features feature.
|
207 |
+
language_pair: pair of languages that will be used for translation. Should
|
208 |
+
contain 2-letter coded strings. First will be used at source and second
|
209 |
+
as target in supervised mode. For example: ("pl", "en").
|
210 |
+
**kwargs: keyword arguments forwarded to super.
|
211 |
+
"""
|
212 |
+
# Validate language pair.
|
213 |
+
name = "%s_%s_%s" % (language_pair[0], language_pair[1], year)
|
214 |
+
source, target = language_pair
|
215 |
+
assert source in _LANGUAGES, f"Invalid source code in language pair: {source}"
|
216 |
+
assert target in _LANGUAGES, f"Invalid target code in language pair: {target}"
|
217 |
+
assert (
|
218 |
+
source != target
|
219 |
+
), f"Source::{source} and Target::{target} language pairs cannot be the same!"
|
220 |
+
assert year in _YEAR.keys()
|
221 |
+
|
222 |
+
description = (
|
223 |
+
f"Translation Ted Talks dataset (WIT3) between {source} and {target}"
|
224 |
+
)
|
225 |
+
super(TedTalksIWSLTConfig, self).__init__(
|
226 |
+
name=name,
|
227 |
+
description=description,
|
228 |
+
**kwargs,
|
229 |
+
)
|
230 |
+
|
231 |
+
self.language_pair = language_pair
|
232 |
+
self.year = year
|
233 |
+
|
234 |
+
|
235 |
+
# TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
|
236 |
+
class TedTalksIWSLT(datasets.GeneratorBasedBuilder):
|
237 |
+
"""TED TALKS IWSLT: Web Inventory of Transcribed and Translated Ted Talks in 109 languages."""
|
238 |
+
|
239 |
+
VERSION = datasets.Version("1.1.0")
|
240 |
+
|
241 |
+
BUILDER_CONFIG_CLASS = TedTalksIWSLTConfig
|
242 |
+
|
243 |
+
BUILDER_CONFIGS = [
|
244 |
+
TedTalksIWSLTConfig(
|
245 |
+
language_pair=language_pair, year=year, version=datasets.Version("1.1.0")
|
246 |
+
)
|
247 |
+
for language_pair in _LANGUAGE_PAIRS
|
248 |
+
for year in _YEAR.keys()
|
249 |
+
]
|
250 |
+
|
251 |
+
def _info(self):
|
252 |
+
features = datasets.Features(
|
253 |
+
{
|
254 |
+
"translation": datasets.features.Translation(
|
255 |
+
languages=self.config.language_pair
|
256 |
+
),
|
257 |
+
},
|
258 |
+
)
|
259 |
+
|
260 |
+
return datasets.DatasetInfo(
|
261 |
+
# This is the description that will appear on the datasets page.
|
262 |
+
description=_DESCRIPTION,
|
263 |
+
# This defines the different columns of the dataset and their types
|
264 |
+
features=features, # Here we define them above because they are different between the two configurations
|
265 |
+
# If there's a common (input, target) tuple from the features,
|
266 |
+
# specify them here. They'll be used if as_supervised=True in
|
267 |
+
# builder.as_dataset.
|
268 |
+
supervised_keys=None,
|
269 |
+
# Homepage of the dataset for documentation
|
270 |
+
homepage=_HOMEPAGE,
|
271 |
+
# License for the dataset if available
|
272 |
+
license=_LICENSE,
|
273 |
+
# Citation for the dataset
|
274 |
+
citation=_CITATION,
|
275 |
+
)
|
276 |
+
|
277 |
+
def _split_generators(self, dl_manager):
|
278 |
+
"""Returns SplitGenerators."""
|
279 |
+
data_dir = dl_manager.download(_URL)
|
280 |
+
|
281 |
+
return [
|
282 |
+
datasets.SplitGenerator(
|
283 |
+
name=datasets.Split.TRAIN,
|
284 |
+
gen_kwargs={
|
285 |
+
"files": dl_manager.iter_archive(data_dir),
|
286 |
+
},
|
287 |
+
),
|
288 |
+
]
|
289 |
+
|
290 |
+
def _generate_examples(self, files):
|
291 |
+
"""Yields examples."""
|
292 |
+
|
293 |
+
def parse_zip_file(path, file):
|
294 |
+
def et_to_dict(tree):
|
295 |
+
"""This is used to convert the xml to a list of dicts"""
|
296 |
+
|
297 |
+
dct = {tree.tag: {} if tree.attrib else None}
|
298 |
+
children = list(tree)
|
299 |
+
if children:
|
300 |
+
dd = defaultdict(list)
|
301 |
+
for dc in map(et_to_dict, children):
|
302 |
+
for k, v in dc.items():
|
303 |
+
dd[k].append(v)
|
304 |
+
dct = {tree.tag: dd}
|
305 |
+
if tree.attrib:
|
306 |
+
dct[tree.tag].update((k, v) for k, v in tree.attrib.items())
|
307 |
+
if tree.text:
|
308 |
+
text = tree.text.strip()
|
309 |
+
if children or tree.attrib:
|
310 |
+
if text:
|
311 |
+
dct[tree.tag]["text"] = text
|
312 |
+
else:
|
313 |
+
dct[tree.tag] = text
|
314 |
+
return dct
|
315 |
+
|
316 |
+
with zipfile.ZipFile(io.BytesIO(file)) as zf:
|
317 |
+
try:
|
318 |
+
tree = ET.parse(zf.open(path.split("/")[-1][:-3] + "xml"))
|
319 |
+
root = tree.getroot()
|
320 |
+
talks = et_to_dict(root).get("xml").get("file")
|
321 |
+
ids = [talk.get("head")[0].get("talkid") for talk in talks]
|
322 |
+
except Exception as pe:
|
323 |
+
logger.warning(f"ERROR: {pe}")
|
324 |
+
logger.warning(
|
325 |
+
"This likely means that you have a malformed XML file!"
|
326 |
+
)
|
327 |
+
ids = []
|
328 |
+
return talks, ids
|
329 |
+
|
330 |
+
language_pair = self.config.language_pair
|
331 |
+
year = self.config.year
|
332 |
+
|
333 |
+
source_file_path = (
|
334 |
+
_YEAR_FOLDER[year] + "/ted_" + language_pair[0] + _YEAR[year] + ".zip"
|
335 |
+
)
|
336 |
+
target_file_path = (
|
337 |
+
_YEAR_FOLDER[year] + "/ted_" + language_pair[1] + _YEAR[year] + ".zip"
|
338 |
+
)
|
339 |
+
|
340 |
+
source_talks, source_ids = None, None
|
341 |
+
target_talks, target_ids = None, None
|
342 |
+
for path, file in files:
|
343 |
+
if source_ids is not None and target_ids is not None:
|
344 |
+
break
|
345 |
+
|
346 |
+
if source_ids is None and path.endswith(source_file_path):
|
347 |
+
source_talks, source_ids = parse_zip_file(path, file.read())
|
348 |
+
elif target_ids is None and path.endswith(target_file_path):
|
349 |
+
target_talks, target_ids = parse_zip_file(path, file.read())
|
350 |
+
|
351 |
+
if source_ids is None or target_ids is None:
|
352 |
+
source_ids = list()
|
353 |
+
target_ids = list()
|
354 |
+
|
355 |
+
comm_talkids = [talkid for talkid in target_ids if talkid in source_ids]
|
356 |
+
|
357 |
+
translation = list()
|
358 |
+
|
359 |
+
for talkid in comm_talkids:
|
360 |
+
source = list(
|
361 |
+
filter(
|
362 |
+
lambda talk: talk.get("head")[0].get("talkid") == talkid,
|
363 |
+
source_talks,
|
364 |
+
)
|
365 |
+
)
|
366 |
+
target = list(
|
367 |
+
filter(
|
368 |
+
lambda talk: talk.get("head")[0].get("talkid") == talkid,
|
369 |
+
target_talks,
|
370 |
+
)
|
371 |
+
)
|
372 |
+
|
373 |
+
if len(source) == 0 or len(target) == 0:
|
374 |
+
pass
|
375 |
+
else:
|
376 |
+
source = source[0]
|
377 |
+
target = target[0]
|
378 |
+
|
379 |
+
if source.get("head")[0].get("description") and target.get("head")[0].get(
|
380 |
+
"description"
|
381 |
+
):
|
382 |
+
if (
|
383 |
+
source.get("head")[0].get("description")[0]
|
384 |
+
and target.get("head")[0].get("description")[0]
|
385 |
+
):
|
386 |
+
temp_dict = dict()
|
387 |
+
temp_dict["id"] = source.get("head")[0].get("talkid")[0] + "_1"
|
388 |
+
temp_dict[language_pair[0]] = (
|
389 |
+
source.get("head")[0]
|
390 |
+
.get("description")[0]
|
391 |
+
.replace("TED Talk Subtitles and Transcript: ", "")
|
392 |
+
)
|
393 |
+
temp_dict[language_pair[1]] = (
|
394 |
+
target.get("head")[0]
|
395 |
+
.get("description")[0]
|
396 |
+
.replace("TED Talk Subtitles and Transcript: ", "")
|
397 |
+
)
|
398 |
+
translation.append(temp_dict)
|
399 |
+
|
400 |
+
if source.get("head")[0].get("title") and target.get("head")[0].get(
|
401 |
+
"title"
|
402 |
+
):
|
403 |
+
if (
|
404 |
+
source.get("head")[0].get("title")[0]
|
405 |
+
and target.get("head")[0].get("title")[0]
|
406 |
+
):
|
407 |
+
temp_dict = dict()
|
408 |
+
temp_dict["id"] = source.get("head")[0].get("talkid")[0] + "_2"
|
409 |
+
temp_dict[language_pair[0]] = source.get("head")[0].get("title")[0]
|
410 |
+
temp_dict[language_pair[1]] = target.get("head")[0].get("title")[0]
|
411 |
+
translation.append(temp_dict)
|
412 |
+
|
413 |
+
if source.get("head")[0].get("seekvideo") and target.get("head")[0].get(
|
414 |
+
"seekvideo"
|
415 |
+
):
|
416 |
+
source_transc = (
|
417 |
+
source.get("head")[0].get("transcription")[0].get("seekvideo")
|
418 |
+
)
|
419 |
+
target_transc = (
|
420 |
+
target.get("head")[0].get("transcription")[0].get("seekvideo")
|
421 |
+
)
|
422 |
+
|
423 |
+
transc = zip(source_transc, target_transc)
|
424 |
+
transcriptions = [
|
425 |
+
{
|
426 |
+
"id": s.get("id"),
|
427 |
+
language_pair[0]: s.get("text"),
|
428 |
+
language_pair[1]: t.get("text"),
|
429 |
+
}
|
430 |
+
for s, t in transc
|
431 |
+
]
|
432 |
+
translation.extend(transcriptions)
|
433 |
+
for talk_segment in translation:
|
434 |
+
result = {
|
435 |
+
"translation": {
|
436 |
+
"source": talk_segment[language_pair[0]],
|
437 |
+
"target": talk_segment[language_pair[1]],
|
438 |
+
"src_lang" : language_pair[0],
|
439 |
+
"tgt_lang" : language_pair[1],
|
440 |
+
}
|
441 |
+
}
|
442 |
+
yield talk_segment["id"], result
|