vgaraujov commited on
Commit
23e4d0d
1 Parent(s): d03c4e8

Upload 5 files

Browse files
Files changed (5) hide show
  1. README.md +71 -0
  2. data/fapesp-v2.zip +3 -0
  3. dataset_info.json +57 -0
  4. fapesp-v2.py +61 -0
  5. wmt_utils.py +549 -0
README.md ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ - es
5
+ - pt
6
+ license:
7
+ - cc-by-2.0
8
+ multilinguality:
9
+ - translation
10
+ task_categories:
11
+ - translation
12
+ task_ids: []
13
+ dataset_info:
14
+ - config_name: en-pt
15
+ features:
16
+ - name: translation
17
+ dtype:
18
+ translation:
19
+ languages:
20
+ - en
21
+ - pt
22
+ splits:
23
+ - name: train
24
+ num_bytes: 47417503
25
+ num_examples: 160975
26
+ - name: validation
27
+ num_bytes: 405055
28
+ num_examples: 1375
29
+ - name: test
30
+ num_bytes: 407579
31
+ num_examples: 1447
32
+ download_size: 29615550
33
+ dataset_size: 48230137
34
+ - config_name: es-pt
35
+ features:
36
+ - name: translation
37
+ dtype:
38
+ translation:
39
+ languages:
40
+ - es
41
+ - pt
42
+ splits:
43
+ - name: train
44
+ num_bytes: 47480897
45
+ num_examples: 158197
46
+ - name: validation
47
+ num_bytes: 377101
48
+ num_examples: 1302
49
+ - name: test
50
+ num_bytes: 400915
51
+ num_examples: 1379
52
+ download_size: 29829573
53
+ dataset_size: 48258913
54
+ configs:
55
+ - config_name: en-pt
56
+ data_files:
57
+ - split: train
58
+ path: en-pt/train-*
59
+ - split: validation
60
+ path: en-pt/validation-*
61
+ - split: test
62
+ path: en-pt/test-*
63
+ - config_name: es-pt
64
+ data_files:
65
+ - split: train
66
+ path: es-pt/train-*
67
+ - split: validation
68
+ path: es-pt/validation-*
69
+ - split: test
70
+ path: es-pt/test-*
71
+ ---
data/fapesp-v2.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf6bdf40cd2de72fe8b8bd1a260dd975aaf1b2b0d51a6f2bcad868ef77777d3f
3
+ size 35276668
dataset_info.json ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "description": "Translation dataset based on the data from statmt.org.\n\nVersions exist for different years using a combination of data\nsources. The base `wmt` allows you to create a custom dataset by choosing\nyour own data/language pair. This can be done as follows:\n\n```python\nfrom datasets import inspect_dataset, load_dataset_builder\n\ninspect_dataset(\"wmt16\", \"path/to/scripts\")\nbuilder = load_dataset_builder(\n \"path/to/scripts/wmt_utils.py\",\n language_pair=(\"fr\", \"de\"),\n subsets={\n datasets.Split.TRAIN: [\"commoncrawl_frde\"],\n datasets.Split.VALIDATION: [\"euelections_dev2019\"],\n },\n)\n\n# Standard version\nbuilder.download_and_prepare()\nds = builder.as_dataset()\n\n# Streamable version\nds = builder.as_streaming_dataset()\n```\n\n",
3
+ "citation": "\n@inproceedings{aziz-specia-2011-fully,\n title = "Fully Automatic Compilation of {P}ortuguese-{E}nglish and {P}ortuguese-{S}panish Parallel Corpora",\n author = "Aziz, Wilker and
4
+ Specia, Lucia",\n booktitle = "Proceedings of the 8th {B}razilian Symposium in Information and Human Language Technology",\n year = "2011",\n url = "https://aclanthology.org/W11-4533",\n}\n",
5
+ "homepage": "http://www.nilc.icmc.usp.br/nilc/tools/Fapesp%20Corpora.htm",
6
+ "license": "",
7
+ "features": {
8
+ "translation": {
9
+ "languages": [
10
+ "es",
11
+ "pt"
12
+ ],
13
+ "_type": "Translation"
14
+ }
15
+ },
16
+ "supervised_keys": {
17
+ "input": "es",
18
+ "output": "pt"
19
+ },
20
+ "builder_name": "fapesp-v2",
21
+ "config_name": "es-pt",
22
+ "version": {
23
+ "version_str": "1.0.0",
24
+ "major": 1,
25
+ "minor": 0,
26
+ "patch": 0
27
+ },
28
+ "splits": {
29
+ "train": {
30
+ "name": "train",
31
+ "num_bytes": 47480897,
32
+ "num_examples": 158197,
33
+ "dataset_name": "fapesp-v2"
34
+ },
35
+ "validation": {
36
+ "name": "validation",
37
+ "num_bytes": 377101,
38
+ "num_examples": 1302,
39
+ "dataset_name": "fapesp-v2"
40
+ },
41
+ "test": {
42
+ "name": "test",
43
+ "num_bytes": 400915,
44
+ "num_examples": 1379,
45
+ "dataset_name": "fapesp-v2"
46
+ }
47
+ },
48
+ "download_checksums": {
49
+ "https://huggingface.co/datasets/vgaraujov/fapesp-v2/resolve/main/data/fapesp-v2.zip": {
50
+ "num_bytes": 35276668,
51
+ "checksum": "cf6bdf40cd2de72fe8b8bd1a260dd975aaf1b2b0d51a6f2bcad868ef77777d3f"
52
+ }
53
+ },
54
+ "download_size": 35276668,
55
+ "dataset_size": 48258913,
56
+ "size_in_bytes": 83535581
57
+ }
fapesp-v2.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """Portuguese-English and Portuguese-Spanish bilingual collections."""
18
+
19
+ import datasets
20
+
21
+ from .wmt_utils import Wmt, WmtConfig
22
+
23
+
24
+ _URL = "http://www.nilc.icmc.usp.br/nilc/tools/Fapesp%20Corpora.htm"
25
+ _CITATION = """
26
+ @inproceedings{aziz-specia-2011-fully,
27
+ title = "Fully Automatic Compilation of {P}ortuguese-{E}nglish and {P}ortuguese-{S}panish Parallel Corpora",
28
+ author = "Aziz, Wilker and
29
+ Specia, Lucia",
30
+ booktitle = "Proceedings of the 8th {B}razilian Symposium in Information and Human Language Technology",
31
+ year = "2011",
32
+ url = "https://aclanthology.org/W11-4533",
33
+ }
34
+ """
35
+
36
+ _LANGUAGE_PAIRS = [(lang, "pt") for lang in ["es", "en"]]
37
+
38
+
39
+ class Fapespv2(Wmt):
40
+ """Fapesp v2 translation datasets for all {xx, "pt"} language pairs."""
41
+
42
+ BUILDER_CONFIGS = [
43
+ WmtConfig( # pylint:disable=g-complex-comprehension
44
+ description="Fapesp v2 %s-%s translation task dataset." % (l1, l2),
45
+ url=_URL,
46
+ citation=_CITATION,
47
+ language_pair=(l1, l2),
48
+ version=datasets.Version("1.0.0"),
49
+ )
50
+ for l1, l2 in _LANGUAGE_PAIRS
51
+ ]
52
+
53
+ @property
54
+ def _subsets(self):
55
+ return {
56
+ datasets.Split.TRAIN: [
57
+ "fapesp-v2",
58
+ ],
59
+ datasets.Split.VALIDATION: ["dev"],
60
+ datasets.Split.TEST: ["test-b"],
61
+ }
wmt_utils.py ADDED
@@ -0,0 +1,549 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ #
16
+ # Script borrewed and adapted from https://huggingface.co/datasets/wmt16.
17
+
18
+ # Lint as: python3
19
+ """WMT: Translate dataset."""
20
+
21
+
22
+ import codecs
23
+ import functools
24
+ import glob
25
+ import gzip
26
+ import itertools
27
+ import os
28
+ import re
29
+ import xml.etree.cElementTree as ElementTree
30
+
31
+ import datasets
32
+
33
+
34
+ logger = datasets.logging.get_logger(__name__)
35
+
36
+
37
+ _DESCRIPTION = """\
38
+ Translation dataset based on the data from statmt.org.
39
+
40
+ Versions exist for different years using a combination of data
41
+ sources. The base `wmt` allows you to create a custom dataset by choosing
42
+ your own data/language pair. This can be done as follows:
43
+
44
+ ```python
45
+ from datasets import inspect_dataset, load_dataset_builder
46
+
47
+ inspect_dataset("wmt16", "path/to/scripts")
48
+ builder = load_dataset_builder(
49
+ "path/to/scripts/wmt_utils.py",
50
+ language_pair=("fr", "de"),
51
+ subsets={
52
+ datasets.Split.TRAIN: ["commoncrawl_frde"],
53
+ datasets.Split.VALIDATION: ["euelections_dev2019"],
54
+ },
55
+ )
56
+
57
+ # Standard version
58
+ builder.download_and_prepare()
59
+ ds = builder.as_dataset()
60
+
61
+ # Streamable version
62
+ ds = builder.as_streaming_dataset()
63
+ ```
64
+
65
+ """
66
+
67
+
68
+ class SubDataset:
69
+ """Class to keep track of information on a sub-dataset of WMT."""
70
+
71
+ def __init__(self, name, target, sources, url, path, manual_dl_files=None):
72
+ """Sub-dataset of WMT.
73
+
74
+ Args:
75
+ name: `string`, a unique dataset identifier.
76
+ target: `string`, the target language code.
77
+ sources: `set<string>`, the set of source language codes.
78
+ url: `string` or `(string, string)`, URL(s) or URL template(s) specifying
79
+ where to download the raw data from. If two strings are provided, the
80
+ first is used for the source language and the second for the target.
81
+ Template strings can either contain '{src}' placeholders that will be
82
+ filled in with the source language code, '{0}' and '{1}' placeholders
83
+ that will be filled in with the source and target language codes in
84
+ alphabetical order, or all 3.
85
+ path: `string` or `(string, string)`, path(s) or path template(s)
86
+ specifing the path to the raw data relative to the root of the
87
+ downloaded archive. If two strings are provided, the dataset is assumed
88
+ to be made up of parallel text files, the first being the source and the
89
+ second the target. If one string is provided, both languages are assumed
90
+ to be stored within the same file and the extension is used to determine
91
+ how to parse it. Template strings should be formatted the same as in
92
+ `url`.
93
+ manual_dl_files: `<list>(string)` (optional), the list of files that must
94
+ be manually downloaded to the data directory.
95
+ """
96
+ self._paths = (path,) if isinstance(path, str) else path
97
+ self._urls = (url,) if isinstance(url, str) else url
98
+ self._manual_dl_files = manual_dl_files if manual_dl_files else []
99
+ self.name = name
100
+ self.target = target
101
+ self.sources = set(sources)
102
+
103
+ def _inject_language(self, src, strings):
104
+ """Injects languages into (potentially) template strings."""
105
+ if src not in self.sources:
106
+ raise ValueError(f"Invalid source for '{self.name}': {src}")
107
+
108
+ def _format_string(s):
109
+ if "{0}" in s and "{1}" and "{src}" in s:
110
+ return s.format(*sorted([src, self.target]), src=src)
111
+ elif "{0}" in s and "{1}" in s:
112
+ return s.format(*sorted([src, self.target]))
113
+ elif "{src}" in s:
114
+ return s.format(src=src)
115
+ else:
116
+ return s
117
+
118
+ return [_format_string(s) for s in strings]
119
+
120
+ def get_url(self, src):
121
+ return self._inject_language(src, self._urls)
122
+
123
+ def get_manual_dl_files(self, src):
124
+ return self._inject_language(src, self._manual_dl_files)
125
+
126
+ def get_path(self, src):
127
+ return self._inject_language(src, self._paths)
128
+
129
+
130
+ # Subsets used in the training sets for AmericasNLP 2021 Shared Task on Machine Translation.
131
+ _TRAIN_SUBSETS = [
132
+ SubDataset(
133
+ name="fapesp-v2",
134
+ target="pt",
135
+ sources={"es", "en"},
136
+ url="https://huggingface.co/datasets/vgaraujov/fapesp-v2/resolve/main/data/fapesp-v2.zip",
137
+ path=("fapesp-v2.pt-{src}.train.{src}", "fapesp-v2.pt-{src}.train.pt"),
138
+ ),
139
+ ]
140
+
141
+ _DEV_SUBSETS = [
142
+ SubDataset(
143
+ name="dev",
144
+ target="pt",
145
+ sources={"es", "en"},
146
+ url="https://huggingface.co/datasets/vgaraujov/fapesp-v2/resolve/main/data/fapesp-v2.zip",
147
+ path=("fapesp-v2.pt-{src}.dev.{src}", "fapesp-v2.pt-{src}.dev.pt"),
148
+ ),
149
+ SubDataset(
150
+ name="dev-test",
151
+ target="pt",
152
+ sources={"es", "en"},
153
+ url="https://huggingface.co/datasets/vgaraujov/fapesp-v2/resolve/main/data/fapesp-v2.zip",
154
+ path=("fapesp-v2.pt-{src}.dev-test.{src}", "fapesp-v2.pt-{src}.dev-test.pt"),
155
+ ),
156
+ SubDataset(
157
+ name="test-a",
158
+ target="pt",
159
+ sources={"es", "en"},
160
+ url="https://huggingface.co/datasets/vgaraujov/fapesp-v2/resolve/main/data/fapesp-v2.zip",
161
+ path=("fapesp-v2.pt-{src}.test-a.{src}", "fapesp-v2.pt-{src}.test-a.pt"),
162
+ ),
163
+ SubDataset(
164
+ name="test-b",
165
+ target="pt",
166
+ sources={"es", "en"},
167
+ url="https://huggingface.co/datasets/vgaraujov/fapesp-v2/resolve/main/data/fapesp-v2.zip",
168
+ path=("fapesp-v2.pt-{src}.test-b.{src}", "fapesp-v2.pt-{src}.test-b.pt"),
169
+ ),
170
+ ]
171
+
172
+ DATASET_MAP = {dataset.name: dataset for dataset in _TRAIN_SUBSETS + _DEV_SUBSETS}
173
+
174
+
175
+ class WmtConfig(datasets.BuilderConfig):
176
+ """BuilderConfig for WMT."""
177
+
178
+ def __init__(self, url=None, citation=None, description=None, language_pair=(None, None), subsets=None, **kwargs):
179
+ """BuilderConfig for WMT.
180
+
181
+ Args:
182
+ url: The reference URL for the dataset.
183
+ citation: The paper citation for the dataset.
184
+ description: The description of the dataset.
185
+ language_pair: pair of languages that will be used for translation. Should
186
+ contain 2 letter coded strings. For example: ("en", "de").
187
+ configuration for the `datasets.features.text.TextEncoder` used for the
188
+ `datasets.features.text.Translation` features.
189
+ subsets: Dict[split, list[str]]. List of the subset to use for each of the
190
+ split. Note that WMT subclasses overwrite this parameter.
191
+ **kwargs: keyword arguments forwarded to super.
192
+ """
193
+ name = "%s-%s" % (language_pair[0], language_pair[1])
194
+ if "name" in kwargs: # Add name suffix for custom configs
195
+ name += "." + kwargs.pop("name")
196
+
197
+ super(WmtConfig, self).__init__(name=name, description=description, **kwargs)
198
+
199
+ self.url = url or "http://www.statmt.org"
200
+ self.citation = citation
201
+ self.language_pair = language_pair
202
+ self.subsets = subsets
203
+
204
+
205
+ class Wmt(datasets.GeneratorBasedBuilder):
206
+ """WMT translation dataset."""
207
+
208
+ BUILDER_CONFIG_CLASS = WmtConfig
209
+
210
+ def __init__(self, *args, **kwargs):
211
+ super(Wmt, self).__init__(*args, **kwargs)
212
+
213
+ @property
214
+ def _subsets(self):
215
+ """Subsets that make up each split of the dataset."""
216
+ raise NotImplementedError("This is a abstract method")
217
+
218
+ @property
219
+ def subsets(self):
220
+ """Subsets that make up each split of the dataset for the language pair."""
221
+ source, target = self.config.language_pair
222
+ filtered_subsets = {}
223
+ subsets = self._subsets if self.config.subsets is None else self.config.subsets
224
+ for split, ss_names in subsets.items():
225
+ filtered_subsets[split] = []
226
+ for ss_name in ss_names:
227
+ dataset = DATASET_MAP[ss_name]
228
+ if dataset.target != target or source not in dataset.sources:
229
+ logger.info("Skipping sub-dataset that does not include language pair: %s", ss_name)
230
+ else:
231
+ filtered_subsets[split].append(ss_name)
232
+ logger.info("Using sub-datasets: %s", filtered_subsets)
233
+ return filtered_subsets
234
+
235
+ def _info(self):
236
+ src, target = self.config.language_pair
237
+ return datasets.DatasetInfo(
238
+ description=_DESCRIPTION,
239
+ features=datasets.Features(
240
+ {"translation": datasets.features.Translation(languages=self.config.language_pair)}
241
+ ),
242
+ supervised_keys=(src, target),
243
+ homepage=self.config.url,
244
+ citation=self.config.citation,
245
+ )
246
+
247
+ def _vocab_text_gen(self, split_subsets, extraction_map, language):
248
+ for _, ex in self._generate_examples(split_subsets, extraction_map, with_translation=False):
249
+ yield ex[language]
250
+
251
+ def _split_generators(self, dl_manager):
252
+ source, _ = self.config.language_pair
253
+ manual_paths_dict = {}
254
+ urls_to_download = {}
255
+ for ss_name in itertools.chain.from_iterable(self.subsets.values()):
256
+
257
+ # get dataset
258
+ dataset = DATASET_MAP[ss_name]
259
+ if dataset.get_manual_dl_files(source):
260
+ # TODO(PVP): following two lines skip configs that are incomplete for now
261
+ # +++++++++++++++++++++
262
+ logger.info("Skipping {dataset.name} for now. Incomplete dataset for {self.config.name}")
263
+ continue
264
+ # +++++++++++++++++++++
265
+
266
+ manual_dl_files = dataset.get_manual_dl_files(source)
267
+ manual_paths = [
268
+ os.path.join(os.path.abspath(os.path.expanduser(dl_manager.manual_dir)), fname)
269
+ for fname in manual_dl_files
270
+ ]
271
+ assert all(
272
+ os.path.exists(path) for path in manual_paths
273
+ ), f"For {dataset.name}, you must manually download the following file(s) from {dataset.get_url(source)} and place them in {dl_manager.manual_dir}: {', '.join(manual_dl_files)}"
274
+
275
+ # set manual path for correct subset
276
+ manual_paths_dict[ss_name] = manual_paths
277
+ else:
278
+ urls_to_download[ss_name] = dataset.get_url(source)
279
+
280
+ # Download and extract files from URLs.
281
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
282
+ # Extract manually downloaded files.
283
+ manual_files = dl_manager.extract(manual_paths_dict)
284
+ extraction_map = dict(downloaded_files, **manual_files)
285
+
286
+ for language in self.config.language_pair:
287
+ self._vocab_text_gen(self.subsets[datasets.Split.TRAIN], extraction_map, language)
288
+
289
+ return [
290
+ datasets.SplitGenerator( # pylint:disable=g-complex-comprehension
291
+ name=split, gen_kwargs={"split_subsets": split_subsets, "extraction_map": extraction_map}
292
+ )
293
+ for split, split_subsets in self.subsets.items()
294
+ ]
295
+
296
+ def _generate_examples(self, split_subsets, extraction_map, with_translation=True):
297
+ """Returns the examples in the raw (text) form."""
298
+ source, _ = self.config.language_pair
299
+
300
+ def _get_local_paths(dataset, extract_dirs):
301
+ rel_paths = dataset.get_path(source)
302
+ if len(extract_dirs) == 1:
303
+ extract_dirs = extract_dirs * len(rel_paths)
304
+ return [
305
+ os.path.join(ex_dir, rel_path) if rel_path else ex_dir
306
+ for ex_dir, rel_path in zip(extract_dirs, rel_paths)
307
+ ]
308
+
309
+ def _get_filenames(dataset):
310
+ rel_paths = dataset.get_path(source)
311
+ urls = dataset.get_url(source)
312
+ if len(urls) == 1:
313
+ urls = urls * len(rel_paths)
314
+ return [rel_path if rel_path else os.path.basename(url) for url, rel_path in zip(urls, rel_paths)]
315
+
316
+ for ss_name in split_subsets:
317
+ # TODO(PVP) remove following five lines when manual data works
318
+ # +++++++++++++++++++++
319
+ dataset = DATASET_MAP[ss_name]
320
+ source, _ = self.config.language_pair
321
+ if dataset.get_manual_dl_files(source):
322
+ logger.info(f"Skipping {dataset.name} for now. Incomplete dataset for {self.config.name}")
323
+ continue
324
+ # +++++++++++++++++++++
325
+
326
+ logger.info("Generating examples from: %s", ss_name)
327
+ dataset = DATASET_MAP[ss_name]
328
+ extract_dirs = extraction_map[ss_name]
329
+ files = _get_local_paths(dataset, extract_dirs)
330
+ filenames = _get_filenames(dataset)
331
+
332
+ sub_generator_args = tuple(files)
333
+
334
+ if ss_name.startswith("czeng"):
335
+ if ss_name.endswith("16pre"):
336
+ sub_generator = functools.partial(_parse_tsv, language_pair=("en", "cs"))
337
+ sub_generator_args += tuple(filenames)
338
+ else:
339
+ sub_generator = _parse_czeng
340
+ elif ss_name == "hindencorp_01":
341
+ sub_generator = _parse_hindencorp
342
+ elif len(files) == 2:
343
+ if ss_name.endswith("_frde"):
344
+ sub_generator = _parse_frde_bitext
345
+ else:
346
+ sub_generator = _parse_parallel_sentences
347
+ sub_generator_args += tuple(filenames)
348
+ elif len(files) == 1:
349
+ fname = filenames[0]
350
+ # Note: Due to formatting used by `download_manager`, the file
351
+ # extension may not be at the end of the file path.
352
+ if ".tsv" in fname:
353
+ sub_generator = _parse_tsv
354
+ sub_generator_args += tuple(filenames)
355
+ elif (
356
+ ss_name.startswith("newscommentary_v14")
357
+ or ss_name.startswith("europarl_v9")
358
+ or ss_name.startswith("wikititles_v1")
359
+ ):
360
+ sub_generator = functools.partial(_parse_tsv, language_pair=self.config.language_pair)
361
+ sub_generator_args += tuple(filenames)
362
+ elif "tmx" in fname or ss_name.startswith("paracrawl_v3"):
363
+ sub_generator = _parse_tmx
364
+ elif ss_name.startswith("wikiheadlines"):
365
+ sub_generator = _parse_wikiheadlines
366
+ else:
367
+ raise ValueError("Unsupported file format: %s" % fname)
368
+ else:
369
+ raise ValueError("Invalid number of files: %d" % len(files))
370
+
371
+ for sub_key, ex in sub_generator(*sub_generator_args):
372
+ if not all(ex.values()):
373
+ continue
374
+ # TODO(adarob): Add subset feature.
375
+ # ex["subset"] = subset
376
+ key = f"{ss_name}/{sub_key}"
377
+ if with_translation is True:
378
+ ex = {"translation": ex}
379
+ yield key, ex
380
+
381
+
382
+ def _parse_parallel_sentences(f1, f2, filename1, filename2):
383
+ """Returns examples from parallel SGML or text files, which may be gzipped."""
384
+
385
+ def _parse_text(path, original_filename):
386
+ """Returns the sentences from a single text file, which may be gzipped."""
387
+ split_path = original_filename.split(".")
388
+
389
+ if split_path[-1] == "gz":
390
+ lang = split_path[-2]
391
+
392
+ def gen():
393
+ with open(path, "rb") as f, gzip.GzipFile(fileobj=f) as g:
394
+ for line in g:
395
+ yield line.decode("utf-8").rstrip()
396
+
397
+ return gen(), lang
398
+
399
+ if split_path[-1] == "txt":
400
+ # CWMT
401
+ lang = split_path[-2].split("_")[-1]
402
+ lang = "zh" if lang in ("ch", "cn", "c[hn]") else lang
403
+ else:
404
+ lang = split_path[-1]
405
+
406
+ def gen():
407
+ with open(path, "rb") as f:
408
+ for line in f:
409
+ yield line.decode("utf-8").rstrip()
410
+
411
+ return gen(), lang
412
+
413
+ def _parse_sgm(path, original_filename):
414
+ """Returns sentences from a single SGML file."""
415
+ lang = original_filename.split(".")[-2]
416
+ # Note: We can't use the XML parser since some of the files are badly
417
+ # formatted.
418
+ seg_re = re.compile(r"<seg id=\"\d+\">(.*)</seg>")
419
+
420
+ def gen():
421
+ with open(path, encoding="utf-8") as f:
422
+ for line in f:
423
+ seg_match = re.match(seg_re, line)
424
+ if seg_match:
425
+ assert len(seg_match.groups()) == 1
426
+ yield seg_match.groups()[0]
427
+
428
+ return gen(), lang
429
+
430
+ parse_file = _parse_sgm if os.path.basename(f1).endswith(".sgm") else _parse_text
431
+
432
+ # Some datasets (e.g., CWMT) contain multiple parallel files specified with
433
+ # a wildcard. We sort both sets to align them and parse them one by one.
434
+ f1_files = sorted(glob.glob(f1))
435
+ f2_files = sorted(glob.glob(f2))
436
+
437
+ assert f1_files and f2_files, "No matching files found: %s, %s." % (f1, f2)
438
+ assert len(f1_files) == len(f2_files), "Number of files do not match: %d vs %d for %s vs %s." % (
439
+ len(f1_files),
440
+ len(f2_files),
441
+ f1,
442
+ f2,
443
+ )
444
+
445
+ for f_id, (f1_i, f2_i) in enumerate(zip(sorted(f1_files), sorted(f2_files))):
446
+ l1_sentences, l1 = parse_file(f1_i, filename1)
447
+ l2_sentences, l2 = parse_file(f2_i, filename2)
448
+
449
+ for line_id, (s1, s2) in enumerate(zip(l1_sentences, l2_sentences)):
450
+ key = f"{f_id}/{line_id}"
451
+ yield key, {l1: s1, l2: s2}
452
+
453
+
454
+ def _parse_frde_bitext(fr_path, de_path):
455
+ with open(fr_path, encoding="utf-8") as fr_f:
456
+ with open(de_path, encoding="utf-8") as de_f:
457
+ for line_id, (s1, s2) in enumerate(zip(fr_f, de_f)):
458
+ yield line_id, {"fr": s1.rstrip(), "de": s2.rstrip()}
459
+
460
+
461
+ def _parse_tmx(path):
462
+ """Generates examples from TMX file."""
463
+
464
+ def _get_tuv_lang(tuv):
465
+ for k, v in tuv.items():
466
+ if k.endswith("}lang"):
467
+ return v
468
+ raise AssertionError("Language not found in `tuv` attributes.")
469
+
470
+ def _get_tuv_seg(tuv):
471
+ segs = tuv.findall("seg")
472
+ assert len(segs) == 1, "Invalid number of segments: %d" % len(segs)
473
+ return segs[0].text
474
+
475
+ with open(path, "rb") as f:
476
+ # Workaround due to: https://github.com/tensorflow/tensorflow/issues/33563
477
+ utf_f = codecs.getreader("utf-8")(f)
478
+ for line_id, (_, elem) in enumerate(ElementTree.iterparse(utf_f)):
479
+ if elem.tag == "tu":
480
+ yield line_id, {_get_tuv_lang(tuv): _get_tuv_seg(tuv) for tuv in elem.iterfind("tuv")}
481
+ elem.clear()
482
+
483
+
484
+ def _parse_tsv(path, filename, language_pair=None):
485
+ """Generates examples from TSV file."""
486
+ if language_pair is None:
487
+ lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])\.tsv", filename)
488
+ assert lang_match is not None, "Invalid TSV filename: %s" % filename
489
+ l1, l2 = lang_match.groups()
490
+ else:
491
+ l1, l2 = language_pair
492
+ with open(path, encoding="utf-8") as f:
493
+ for j, line in enumerate(f):
494
+ cols = line.split("\t")
495
+ if len(cols) != 2:
496
+ logger.warning("Skipping line %d in TSV (%s) with %d != 2 columns.", j, path, len(cols))
497
+ continue
498
+ s1, s2 = cols
499
+ yield j, {l1: s1.strip(), l2: s2.strip()}
500
+
501
+
502
+ def _parse_wikiheadlines(path):
503
+ """Generates examples from Wikiheadlines dataset file."""
504
+ lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])$", path)
505
+ assert lang_match is not None, "Invalid Wikiheadlines filename: %s" % path
506
+ l1, l2 = lang_match.groups()
507
+ with open(path, encoding="utf-8") as f:
508
+ for line_id, line in enumerate(f):
509
+ s1, s2 = line.split("|||")
510
+ yield line_id, {l1: s1.strip(), l2: s2.strip()}
511
+
512
+
513
+ def _parse_czeng(*paths, **kwargs):
514
+ """Generates examples from CzEng v1.6, with optional filtering for v1.7."""
515
+ filter_path = kwargs.get("filter_path", None)
516
+ if filter_path:
517
+ re_block = re.compile(r"^[^-]+-b(\d+)-\d\d[tde]")
518
+ with open(filter_path, encoding="utf-8") as f:
519
+ bad_blocks = {blk for blk in re.search(r"qw{([\s\d]*)}", f.read()).groups()[0].split()}
520
+ logger.info("Loaded %d bad blocks to filter from CzEng v1.6 to make v1.7.", len(bad_blocks))
521
+
522
+ for path in paths:
523
+ for gz_path in sorted(glob.glob(path)):
524
+ with open(gz_path, "rb") as g, gzip.GzipFile(fileobj=g) as f:
525
+ filename = os.path.basename(gz_path)
526
+ for line_id, line in enumerate(f):
527
+ line = line.decode("utf-8") # required for py3
528
+ if not line.strip():
529
+ continue
530
+ id_, unused_score, cs, en = line.split("\t")
531
+ if filter_path:
532
+ block_match = re.match(re_block, id_)
533
+ if block_match and block_match.groups()[0] in bad_blocks:
534
+ continue
535
+ sub_key = f"{filename}/{line_id}"
536
+ yield sub_key, {
537
+ "cs": cs.strip(),
538
+ "en": en.strip(),
539
+ }
540
+
541
+
542
+ def _parse_hindencorp(path):
543
+ with open(path, encoding="utf-8") as f:
544
+ for line_id, line in enumerate(f):
545
+ split_line = line.split("\t")
546
+ if len(split_line) != 5:
547
+ logger.warning("Skipping invalid HindEnCorp line: %s", line)
548
+ continue
549
+ yield line_id, {"translation": {"en": split_line[3].strip(), "hi": split_line[4].strip()}}