may-ohta commited on
Commit
9dbfe46
1 Parent(s): 5dcb683

Create iwslt14.py

Browse files
Files changed (1) hide show
  1. iwslt14.py +192 -0
iwslt14.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ """The IWSLT 2014 Evaluation Campaign includes a multilingual TED Talks MT task."""
3
+
4
+ import datasets
5
+
6
+ from bs4 import BeautifulSoup
7
+ from pathlib import Path
8
+
9
+ _CITATION = """\
10
+ @inproceedings{cettoloEtAl:EAMT2012,
11
+ Address = {Trento, Italy},
12
+ Author = {Mauro Cettolo and Christian Girardi and Marcello Federico},
13
+ Booktitle = {Proceedings of the 16$^{th}$ Conference of the European Association for Machine Translation (EAMT)},
14
+ Date = {28-30},
15
+ Month = {May},
16
+ Pages = {261--268},
17
+ Title = {WIT$^3$: Web Inventory of Transcribed and Translated Talks},
18
+ Year = {2012}}
19
+ """
20
+
21
+ _DESCRIPTION = """\
22
+ The IWSLT 2014 Evaluation Campaign includes the MT track on TED Talks. In this edition, the official language pairs are five:
23
+
24
+ from English to French
25
+ from English to German
26
+ from German to English
27
+ from English to Italian
28
+ from Italian to English
29
+
30
+ Optional tasks are proposed with English paired in both directions with other twelve languages:
31
+
32
+ from/to English to/from Arabic, Spanish, Farsi, Hebrew, Dutch, Polish, Portuguese-Brazil, Romanian, Russian, Slovenian, Turkish and Chinese
33
+
34
+ Submitted runs on additional pairs will be evaluated as well, in the hope to stimulate the MT community to evaluate systems on common benchmarks and to share achievements on challenging translation tasks.
35
+ """
36
+
37
+ _URL = "https://drive.google.com/file/d/1GnBarJIbNgEIIDvUyKDtLmv35Qcxg6Ed/view"
38
+
39
+ _HOMEPAGE = "https://wit3.fbk.eu/2014-01"
40
+
41
+ _LANGUAGES = ["ar", "de", "es", "fa", "he", "it", "nl", "pl", "pt-br", "ro", "ru", "sl", "tr", "zh"]
42
+ _PAIRS = [(lang, "en") for lang in _LANGUAGES] + [("en", lang) for lang in _LANGUAGES]
43
+
44
+
45
+ class IWSLT14Config(datasets.BuilderConfig):
46
+ """BuilderConfig for IWSLT14 Dataset"""
47
+
48
+ def __init__(self, language_pair=(None, None), **kwargs):
49
+ """
50
+
51
+ Args:
52
+ language_pair: the language pair to consider. Should
53
+ contain 2-letter coded strings. For example: ("ja", "en").
54
+ **kwargs: keyword arguments forwarded to super.
55
+ """
56
+ super(IWSLT14Config, self).__init__(
57
+ name="%s-%s" % (language_pair[0], language_pair[1]),
58
+ description="IWSLT 2014 multilingual dataset.",
59
+ version=datasets.Version("1.0.0", ""),
60
+ **kwargs,
61
+ )
62
+
63
+ # Validate language pair.
64
+ assert language_pair in _PAIRS
65
+
66
+ self.language_pair = language_pair
67
+
68
+
69
+ class IWSLT14(datasets.GeneratorBasedBuilder):
70
+ """The IWSLT 2014 Evaluation Campaign includes a multilingual TED Talks MT task."""
71
+
72
+ BUILDER_CONFIGS = [IWSLT14Config(language_pair=pair) for pair in _PAIRS]
73
+
74
+ def _info(self):
75
+ return datasets.DatasetInfo(
76
+ # This is the description that will appear on the datasets page.
77
+ description=_DESCRIPTION,
78
+ # datasets.features.FeatureConnectors
79
+ features=datasets.Features(
80
+ {
81
+ "id": datasets.features.Value(dtype='string', id=None),
82
+ "translation": datasets.features.Translation(languages=self.config.language_pair),
83
+ }
84
+ ),
85
+ # If there's a common (input, target) tuple from the features,
86
+ # specify them here. They'll be used if as_supervised=True in
87
+ # builder.as_dataset.
88
+ supervised_keys=None,
89
+ # Homepage of the dataset for documentation
90
+ homepage=_HOMEPAGE,
91
+ citation=_CITATION,
92
+ )
93
+
94
+ def _split_generators(self, dl_manager):
95
+ """Returns SplitGenerators."""
96
+
97
+ def _get_drive_url(url):
98
+ return f"https://drive.google.com/uc?id={url.split('/')[5]}"
99
+
100
+ source, target = self.config.language_pair
101
+ pair = f"{source}-{target}"
102
+ ex_dir = dl_manager.download_and_extract(_get_drive_url(_URL))
103
+ dl_dir = dl_manager.extract(f"{ex_dir}/2014-01/texts/{source}/{target}/{pair}.tgz")
104
+ path_tmpl = f"{dl_dir}/{pair}/IWSLT14.%s.{pair}.%s.xml"
105
+ subsets = {
106
+ "dev": ["TED.dev2010", "TEDX.dev2012"],
107
+ "test": ["TED.tst2010", "TED.tst2011", "TED.tst2012"],
108
+ }
109
+ files = {
110
+ "train": {
111
+ "source_files": [f"{dl_dir}/{pair}/train.tags.{pair}.{source}"],
112
+ "target_files": [f"{dl_dir}/{pair}/train.tags.{pair}.{target}"],
113
+ "split": "train",
114
+ },
115
+ "dev": {
116
+ "source_files": [path_tmpl % (year, source) for year in subsets["dev"]],
117
+ "target_files": [path_tmpl % (year, target) for year in subsets["dev"]],
118
+ "split": "validation",
119
+ },
120
+ "test": {
121
+ "source_files": [path_tmpl % (year, source) for year in subsets["test"]],
122
+ "target_files": [path_tmpl % (year, target) for year in subsets["test"]],
123
+ "split": "test",
124
+ },
125
+ }
126
+
127
+ return [
128
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs=files["train"]),
129
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs=files["dev"]),
130
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs=files["test"]),
131
+ ]
132
+
133
+ def _generate_examples(self, source_files, target_files, split):
134
+ """Yields examples."""
135
+ source, target = self.config.language_pair
136
+
137
+ def _parse_doc(file_path: str):
138
+ html_doc = Path(file_path).read_text()
139
+ html_doc = ''.join([f'<seg>{line}</seg>' if not line.startswith('<') else line
140
+ for line in html_doc.splitlines()])
141
+ soup = BeautifulSoup(html_doc, features="lxml")
142
+ docs = {}
143
+ docid = 0
144
+ for url in soup.find_all('url'):
145
+ docid += 1
146
+ s = url
147
+ segid = 0
148
+ while s.next_sibling and s.next_sibling.name == 'seg':
149
+ segid += 1
150
+ docs[f'docid-{docid}_segid-{segid}'] = s.next_sibling.text.strip()
151
+ s = s.next_sibling
152
+ return docs
153
+
154
+ def _parse_xml(file_path: str):
155
+ html_doc = Path(file_path).read_text()
156
+ soup = BeautifulSoup(html_doc, features="lxml")
157
+ docs = {}
158
+ for doc in soup.find_all('doc'):
159
+ docid = doc.attrs['docid']
160
+ for title in doc.find_all('title'):
161
+ docs[f'docid-{docid}_title'] = title.text.strip()
162
+ for desc in doc.find_all('description'):
163
+ docs[f'docid-{docid}_desc'] = desc.text.strip()
164
+ for seg in doc.find_all('seg'):
165
+ segid = seg.attrs['id']
166
+ docs[f'docid-{docid}_segid-{segid}'] = seg.text.strip()
167
+ return docs
168
+
169
+ def _parse(file_path: str):
170
+ if file_path.endswith('.xml'):
171
+ return _parse_xml(file_path)
172
+ else:
173
+ return _parse_doc(file_path)
174
+
175
+ id_ = 0
176
+ seg_counter = 0
177
+ doc_counter = 10000
178
+ talk_id = f"d{doc_counter}"
179
+ flag = ""
180
+ for source_file, target_file in zip(source_files, target_files):
181
+ src = _parse(source_file)
182
+ trg = _parse(target_file)
183
+
184
+ for k, src_sent in src.items():
185
+ if k in trg:
186
+ trg_sent = trg[k]
187
+
188
+ yield id_, {
189
+ "id": k,
190
+ "translation": {source: src_sent, target: trg_sent}
191
+ }
192
+ id_ += 1