stas commited on
Commit
e1c470c
1 Parent(s): e4680ac
Files changed (3) hide show
  1. README.md +16 -0
  2. process.txt +21 -0
  3. wmt14-en-de-pre-processed.py +133 -0
README.md ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # WMT14 English-German Translation Data w/ further preprocessing
2
+
3
+ The original pre-processing script is [here](https://github.com/pytorch/fairseq/blob/master/examples/translation/prepare-wmt14en2de.sh).
4
+
5
+ This pre-processed dataset was created by running:
6
+
7
+ ```
8
+ git clone https://github.com/pytorch/fairseq
9
+ cd fairseq
10
+ cd examples/translation/
11
+ ./prepare-wmt14en2de.sh
12
+ ```
13
+
14
+ It was originally used by `transformers` [`finetune_trainer.py`](https://github.com/huggingface/transformers/blob/641f418e102218c4bf16fcd3124bfebed6217ef6/examples/seq2seq/finetune_trainer.py)
15
+
16
+ The data itself resides at https://cdn-datasets.huggingface.co/translation/wmt_en_de.tgz
process.txt ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # how this build script and dataset_infos.json were generated
3
+
4
+ # this is translation, so let's adapt flores - it has an almost identical input format, just files are named differently:
5
+ cp https://github.com/huggingface/datasets/blob/master/datasets/flores/flores.py wmt14-en-de-pre-processed.py
6
+ perl -pi -e 's|Flores|wmt14-en-de-pre-processed|g' wmt14-en-de-pre-processed.py
7
+
8
+ (good models for other tasks can be found here: https://huggingface.co/docs/datasets/add_dataset.html#dataset-scripts-of-reference)
9
+
10
+ # then edit to change the language pairs, file template and the data url
11
+ git add wmt14-en-de-pre-processed.py
12
+ git commit -m "build script" wmt14-en-de-pre-processed.py
13
+ git push
14
+
15
+ # finally test
16
+ datasets-cli test stas/wmt14-en-de-pre-processed --save_infos --all_configs
17
+
18
+ # add push the generated config
19
+ git add dataset_infos.json
20
+ git commit -m "add dataset_infos.json" dataset_infos.json
21
+ git push
wmt14-en-de-pre-processed.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ WMT16 English-Romanian Translation Data with further preprocessing """
16
+
17
+ from __future__ import absolute_import, division, print_function
18
+
19
+ import csv
20
+ import json
21
+ import os
22
+
23
+ import datasets
24
+
25
+ _CITATION = """\
26
+ @InProceedings{huggingface:dataset,
27
+ title = {WMT14 English-German Translation Data with further preprocessing},
28
+ authors={},
29
+ year={2016}
30
+ }
31
+ """
32
+
33
+ _DESCRIPTION = "WMT14 English-German Translation Data with further preprocessing"
34
+ _HOMEPAGE = "http://www.statmt.org/wmt16/"
35
+ _LICENSE = ""
36
+
37
+
38
+ _DATA_URL = "https://cdn-datasets.huggingface.co/translation/wmt_en_de.tgz"
39
+
40
+
41
+ class Wmt14EnDePreProcessedConfig(datasets.BuilderConfig):
42
+ """BuilderConfig for wmt16."""
43
+
44
+ def __init__(self, language_pair=(None, None), **kwargs):
45
+ """BuilderConfig for wmt16
46
+
47
+ Args:
48
+ for the `datasets.features.text.TextEncoder` used for the features feature.
49
+ language_pair: pair of languages that will be used for translation. Should
50
+ contain 2-letter coded strings. First will be used at source and second
51
+ as target in supervised mode. For example: ("se", "en").
52
+ **kwargs: keyword arguments forwarded to super.
53
+ """
54
+ name = "%s%s" % (language_pair[0], language_pair[1])
55
+
56
+ description = ("Translation dataset from %s to %s") % (language_pair[0], language_pair[1])
57
+ super(Wmt14EnDePreProcessedConfig, self).__init__(
58
+ name=name,
59
+ description=description,
60
+ version=datasets.Version("1.1.0", ""),
61
+ **kwargs,
62
+ )
63
+
64
+ # Validate language pair.
65
+ assert "en" in language_pair, ("Config language pair must contain `en`, got: %s", language_pair)
66
+ source, target = language_pair
67
+ non_en = source if target == "en" else target
68
+ assert non_en in ["de"], ("Invalid non-en language in pair: %s", non_en)
69
+
70
+ self.language_pair = language_pair
71
+
72
+
73
+ # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
74
+ class Wmt14EnDePreProcessed(datasets.GeneratorBasedBuilder):
75
+
76
+ BUILDER_CONFIGS = [
77
+ Wmt14EnDePreProcessedConfig(
78
+ language_pair=("en", "ro"),
79
+ ),
80
+ ]
81
+
82
+ def _info(self):
83
+ source, target = self.config.language_pair
84
+ return datasets.DatasetInfo(
85
+ description=_DESCRIPTION,
86
+ features=datasets.Features(
87
+ {"translation": datasets.features.Translation(languages=self.config.language_pair)}
88
+ ),
89
+ supervised_keys=(source, target),
90
+ homepage=_HOMEPAGE,
91
+ citation=_CITATION,
92
+ )
93
+
94
+ def _split_generators(self, dl_manager):
95
+ dl_dir = dl_manager.download_and_extract(_DATA_URL)
96
+
97
+ source, target = self.config.language_pair
98
+ non_en = source if target == "en" else target
99
+ path_tmpl = "{dl_dir}/wmt_en_ro/{split}.{type}"
100
+
101
+ files = {}
102
+ for split in ("train", "val", "test"):
103
+ files[split] = {
104
+ "source_file": path_tmpl.format(dl_dir=dl_dir, split=split, type="source"),
105
+ "target_file": path_tmpl.format(dl_dir=dl_dir, split=split, type="target"),
106
+ }
107
+
108
+ return [
109
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs=files["train"]),
110
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs=files["val"]),
111
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs=files["test"]),
112
+ ]
113
+
114
+ def _generate_examples(self, source_file, target_file):
115
+ """This function returns the examples in the raw (text) form."""
116
+ with open(source_file, encoding="utf-8") as f:
117
+ source_sentences = f.read().split("\n")
118
+ with open(target_file, encoding="utf-8") as f:
119
+ target_sentences = f.read().split("\n")
120
+
121
+ assert len(target_sentences) == len(source_sentences), "Sizes do not match: %d vs %d for %s vs %s." % (
122
+ len(source_sentences),
123
+ len(target_sentences),
124
+ source_file,
125
+ target_file,
126
+ )
127
+
128
+ source, target = self.config.language_pair
129
+ for idx, (l1, l2) in enumerate(zip(source_sentences, target_sentences)):
130
+ result = {"translation": {source: l1, target: l2}}
131
+ # Make sure that both translations are non-empty.
132
+ if all(result.values()):
133
+ yield idx, result