stas commited on
Commit
6b0ef43
1 Parent(s): d68916b

add script

Browse files
Files changed (5) hide show
  1. .gitattributes +0 -3
  2. test.json +0 -3
  3. train.json +0 -3
  4. val.json +0 -3
  5. wmt16-en-ro-pre-processed.py +148 -0
.gitattributes CHANGED
@@ -14,6 +14,3 @@
14
  *.pb filter=lfs diff=lfs merge=lfs -text
15
  *.pt filter=lfs diff=lfs merge=lfs -text
16
  *.pth filter=lfs diff=lfs merge=lfs -text
17
- test.json filter=lfs diff=lfs merge=lfs -text
18
- train.json filter=lfs diff=lfs merge=lfs -text
19
- val.json filter=lfs diff=lfs merge=lfs -text
14
  *.pb filter=lfs diff=lfs merge=lfs -text
15
  *.pt filter=lfs diff=lfs merge=lfs -text
16
  *.pth filter=lfs diff=lfs merge=lfs -text
 
 
 
test.json DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:132df683c4c2008c60dc27c772acb1e5cbcf5f6d79f2fb6adb0b08bd896fbd0e
3
- size 608194
 
 
 
train.json DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9602761a4f8c56a5c7677fbd78e0a66987fdede826aaaffbb45b6faaf11cb651
3
- size 209195474
 
 
 
val.json DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:2ebee4e57c65b2a06484e1de295b6f63a2646d1095a0b05ca0cf1854a3de955b
3
- size 631127
 
 
 
wmt16-en-ro-pre-processed.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """TODO: Add a description here."""
16
+
17
+ from __future__ import absolute_import, division, print_function
18
+
19
+ import csv
20
+ import json
21
+ import os
22
+
23
+ import datasets
24
+
25
+
26
+ # TODO: Add BibTeX citation
27
+ # Find for instance the citation on arxiv or on the dataset repo/website
28
+ _CITATION = """\
29
+ @InProceedings{,
30
+ title = {WMT16 English-Romanian Translation Data w/ further preprocessing},
31
+ authors={},
32
+ year={2016}
33
+ }
34
+ """
35
+
36
+ # TODO: Add description of the dataset here
37
+ # You can copy an official description
38
+ _DESCRIPTION = """\
39
+ WMT16 English-Romanian Translation Data with further preprocessing
40
+ """
41
+
42
+ # TODO: Add a link to an official homepage for the dataset here
43
+ _HOMEPAGE = "http://www.statmt.org/wmt16/"
44
+
45
+ # TODO: Add the licence for the dataset here if you can find it
46
+ _LICENSE = ""
47
+
48
+ # TODO: Add link to the official dataset URLs here
49
+ # The HuggingFace dataset library don't host the datasets but only point to the original files
50
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
51
+ _URLs = {
52
+ 'hf': "https://cdn-datasets.huggingface.co/translation/wmt_en_ro.tar.gz"
53
+ }
54
+
55
+
56
+ class Wmt16EnRoPreProcessedConfig(datasets.BuilderConfig):
57
+ """BuilderConfig for wmt16."""
58
+
59
+ def __init__(self, language_pair=(None, None), **kwargs):
60
+ """BuilderConfig for wmt16
61
+
62
+ Args:
63
+ for the `datasets.features.text.TextEncoder` used for the features feature.
64
+ language_pair: pair of languages that will be used for translation. Should
65
+ contain 2-letter coded strings. First will be used at source and second
66
+ as target in supervised mode. For example: ("se", "en").
67
+ **kwargs: keyword arguments forwarded to super.
68
+ """
69
+ name = "%s%s" % (language_pair[0], language_pair[1])
70
+
71
+ description = ("Translation dataset from %s to %s") % (language_pair[0], language_pair[1])
72
+ super(Wmt16EnRoPreProcessedConfig, self).__init__(
73
+ name=name,
74
+ description=description,
75
+ version=datasets.Version("1.1.0", ""),
76
+ **kwargs,
77
+ )
78
+
79
+ # Validate language pair.
80
+ assert "en" in language_pair, ("Config language pair must contain `en`, got: %s", language_pair)
81
+ source, target = language_pair
82
+ non_en = source if target == "en" else target
83
+ assert non_en in ["ro"], ("Invalid non-en language in pair: %s", non_en)
84
+
85
+ self.language_pair = language_pair
86
+
87
+
88
+ # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
89
+ class Wmt16EnRoPreProcessed(datasets.GeneratorBasedBuilder):
90
+
91
+ BUILDER_CONFIGS = [
92
+ Wmt16EnRoPreProcessedConfig(
93
+ language_pair=("en", "ro"),
94
+ ),
95
+ ]
96
+
97
+ def _info(self):
98
+ source, target = self.config.language_pair
99
+ return datasets.DatasetInfo(
100
+ description=_DESCRIPTION,
101
+ features=datasets.Features(
102
+ {"translation": datasets.features.Translation(languages=self.config.language_pair)}
103
+ ),
104
+ supervised_keys=(source, target),
105
+ homepage=_HOMEPAGE,
106
+ citation=_CITATION,
107
+ )
108
+
109
+ def _split_generators(self, dl_manager):
110
+ dl_dir = dl_manager.download_and_extract(_DATA_URL)
111
+
112
+ source, target = self.config.language_pair
113
+ non_en = source if target == "en" else target
114
+ path_tmpl = "{dl_dir}/wmt_en_ro/{split}.{type}"
115
+
116
+ files = {}
117
+ for split in ("train", "val", "test"):
118
+ files[split] = {
119
+ "source_file": path_tmpl.format(dl_dir=dl_dir, split=split, type="source"),
120
+ "target_file": path_tmpl.format(dl_dir=dl_dir, split=split, type="target"),
121
+ }
122
+
123
+ return [
124
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs=files["train"]),
125
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs=files["val"]),
126
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs=files["test"]),
127
+ ]
128
+
129
+ def _generate_examples(self, source_file, target_file):
130
+ """This function returns the examples in the raw (text) form."""
131
+ with open(source_file, encoding="utf-8") as f:
132
+ source_sentences = f.read().split("\n")
133
+ with open(target_file, encoding="utf-8") as f:
134
+ target_sentences = f.read().split("\n")
135
+
136
+ assert len(target_sentences) == len(source_sentences), "Sizes do not match: %d vs %d for %s vs %s." % (
137
+ len(source_sentences),
138
+ len(target_sentences),
139
+ source_file,
140
+ target_file,
141
+ )
142
+
143
+ source, target = self.config.language_pair
144
+ for idx, (l1, l2) in enumerate(zip(source_sentences, target_sentences)):
145
+ result = {"translation": {source: l1, target: l2}}
146
+ # Make sure that both translations are non-empty.
147
+ if all(result.values()):
148
+ yield idx, result