Datasets:

Modalities:
Text
Formats:
parquet
ArXiv:
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
24c4248
1 Parent(s): 9cf5a76

Delete loading script

Browse files
Files changed (1) hide show
  1. opus100.py +0 -271
opus100.py DELETED
@@ -1,271 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """OPUS-100"""
16
-
17
-
18
- import datasets
19
-
20
-
21
- _CITATION = """\
22
- @misc{zhang2020improving,
23
- title={Improving Massively Multilingual Neural Machine Translation and Zero-Shot Translation},
24
- author={Biao Zhang and Philip Williams and Ivan Titov and Rico Sennrich},
25
- year={2020},
26
- eprint={2004.11867},
27
- archivePrefix={arXiv},
28
- primaryClass={cs.CL}
29
- }
30
- """
31
-
32
- _DESCRIPTION = """\
33
- OPUS-100 is English-centric, meaning that all training pairs include English on either the source or target side.
34
- The corpus covers 100 languages (including English).OPUS-100 contains approximately 55M sentence pairs.
35
- Of the 99 language pairs, 44 have 1M sentence pairs of training data, 73 have at least 100k, and 95 have at least 10k.
36
- """
37
-
38
- _URL = {
39
- "supervised": "https://object.pouta.csc.fi/OPUS-100/v1.0/opus-100-corpus-{}-v1.0.tar.gz",
40
- "zero-shot": "https://object.pouta.csc.fi/OPUS-100/v1.0/opus-100-corpus-zeroshot-v1.0.tar.gz",
41
- }
42
-
43
- _SupervisedLanguagePairs = [
44
- "af-en",
45
- "am-en",
46
- "an-en",
47
- "ar-en",
48
- "as-en",
49
- "az-en",
50
- "be-en",
51
- "bg-en",
52
- "bn-en",
53
- "br-en",
54
- "bs-en",
55
- "ca-en",
56
- "cs-en",
57
- "cy-en",
58
- "da-en",
59
- "de-en",
60
- "dz-en",
61
- "el-en",
62
- "en-eo",
63
- "en-es",
64
- "en-et",
65
- "en-eu",
66
- "en-fa",
67
- "en-fi",
68
- "en-fr",
69
- "en-fy",
70
- "en-ga",
71
- "en-gd",
72
- "en-gl",
73
- "en-gu",
74
- "en-ha",
75
- "en-he",
76
- "en-hi",
77
- "en-hr",
78
- "en-hu",
79
- "en-hy",
80
- "en-id",
81
- "en-ig",
82
- "en-is",
83
- "en-it",
84
- "en-ja",
85
- "en-ka",
86
- "en-kk",
87
- "en-km",
88
- "en-ko",
89
- "en-kn",
90
- "en-ku",
91
- "en-ky",
92
- "en-li",
93
- "en-lt",
94
- "en-lv",
95
- "en-mg",
96
- "en-mk",
97
- "en-ml",
98
- "en-mn",
99
- "en-mr",
100
- "en-ms",
101
- "en-mt",
102
- "en-my",
103
- "en-nb",
104
- "en-ne",
105
- "en-nl",
106
- "en-nn",
107
- "en-no",
108
- "en-oc",
109
- "en-or",
110
- "en-pa",
111
- "en-pl",
112
- "en-ps",
113
- "en-pt",
114
- "en-ro",
115
- "en-ru",
116
- "en-rw",
117
- "en-se",
118
- "en-sh",
119
- "en-si",
120
- "en-sk",
121
- "en-sl",
122
- "en-sq",
123
- "en-sr",
124
- "en-sv",
125
- "en-ta",
126
- "en-te",
127
- "en-tg",
128
- "en-th",
129
- "en-tk",
130
- "en-tr",
131
- "en-tt",
132
- "en-ug",
133
- "en-uk",
134
- "en-ur",
135
- "en-uz",
136
- "en-vi",
137
- "en-wa",
138
- "en-xh",
139
- "en-yi",
140
- "en-yo",
141
- "en-zh",
142
- "en-zu",
143
- ]
144
-
145
- _0shotLanguagePairs = [
146
- "ar-de",
147
- "ar-fr",
148
- "ar-nl",
149
- "ar-ru",
150
- "ar-zh",
151
- "de-fr",
152
- "de-nl",
153
- "de-ru",
154
- "de-zh",
155
- "fr-nl",
156
- "fr-ru",
157
- "fr-zh",
158
- "nl-ru",
159
- "nl-zh",
160
- "ru-zh",
161
- ]
162
-
163
-
164
- class Opus100Config(datasets.BuilderConfig):
165
- """BuilderConfig for Opus100"""
166
-
167
- def __init__(self, language_pair, **kwargs):
168
- super().__init__(**kwargs)
169
- """
170
-
171
- Args:
172
- language_pair: language pair, you want to load
173
- **kwargs: keyword arguments forwarded to super.
174
- """
175
- self.language_pair = language_pair
176
-
177
-
178
- class Opus100(datasets.GeneratorBasedBuilder):
179
- """OPUS-100 is English-centric, meaning that all training pairs include English on either the source or target side."""
180
-
181
- VERSION = datasets.Version("1.0.0")
182
-
183
- BUILDER_CONFIG_CLASS = Opus100Config
184
- BUILDER_CONFIGS = [
185
- Opus100Config(name=pair, description=_DESCRIPTION, language_pair=pair)
186
- for pair in _SupervisedLanguagePairs + _0shotLanguagePairs
187
- ]
188
-
189
- def _info(self):
190
- src_tag, tgt_tag = self.config.language_pair.split("-")
191
- return datasets.DatasetInfo(
192
- description=_DESCRIPTION,
193
- features=datasets.Features({"translation": datasets.features.Translation(languages=(src_tag, tgt_tag))}),
194
- supervised_keys=(src_tag, tgt_tag),
195
- homepage="http://opus.nlpl.eu/opus-100.php",
196
- citation=_CITATION,
197
- )
198
-
199
- def _split_generators(self, dl_manager):
200
-
201
- lang_pair = self.config.language_pair
202
- src_tag, tgt_tag = lang_pair.split("-")
203
-
204
- domain = "supervised"
205
- if lang_pair in _0shotLanguagePairs:
206
- domain = "zero-shot"
207
-
208
- if domain == "supervised":
209
- archive = dl_manager.download(_URL["supervised"].format(lang_pair))
210
- elif domain == "zero-shot":
211
- archive = dl_manager.download(_URL["zero-shot"])
212
-
213
- data_dir = "/".join(["opus-100-corpus", "v1.0", domain, lang_pair])
214
- output = []
215
-
216
- test = datasets.SplitGenerator(
217
- name=datasets.Split.TEST,
218
- # These kwargs will be passed to _generate_examples
219
- gen_kwargs={
220
- "filepath": f"{data_dir}/opus.{lang_pair}-test.{src_tag}",
221
- "labelpath": f"{data_dir}/opus.{lang_pair}-test.{tgt_tag}",
222
- "files": dl_manager.iter_archive(archive),
223
- },
224
- )
225
-
226
- available_files = [path for path, _ in dl_manager.iter_archive(archive)]
227
- if f"{data_dir}/opus.{lang_pair}-test.{src_tag}" in available_files:
228
- output.append(test)
229
-
230
- if domain == "supervised":
231
-
232
- train = datasets.SplitGenerator(
233
- name=datasets.Split.TRAIN,
234
- gen_kwargs={
235
- "filepath": f"{data_dir}/opus.{lang_pair}-train.{src_tag}",
236
- "labelpath": f"{data_dir}/opus.{lang_pair}-train.{tgt_tag}",
237
- "files": dl_manager.iter_archive(archive),
238
- },
239
- )
240
-
241
- if f"{data_dir}/opus.{lang_pair}-train.{src_tag}" in available_files:
242
- output.append(train)
243
-
244
- valid = datasets.SplitGenerator(
245
- name=datasets.Split.VALIDATION,
246
- # These kwargs will be passed to _generate_examples
247
- gen_kwargs={
248
- "filepath": f"{data_dir}/opus.{lang_pair}-dev.{src_tag}",
249
- "labelpath": f"{data_dir}/opus.{lang_pair}-dev.{tgt_tag}",
250
- "files": dl_manager.iter_archive(archive),
251
- },
252
- )
253
-
254
- if f"{data_dir}/opus.{lang_pair}-dev.{src_tag}" in available_files:
255
- output.append(valid)
256
-
257
- return output
258
-
259
- def _generate_examples(self, filepath, labelpath, files):
260
- """Yields examples."""
261
- src_tag, tgt_tag = self.config.language_pair.split("-")
262
- src, tgt = None, None
263
- for path, f in files:
264
- if path == filepath:
265
- src = f.read().decode("utf-8").split("\n")[:-1]
266
- elif path == labelpath:
267
- tgt = f.read().decode("utf-8").split("\n")[:-1]
268
- if src is not None and tgt is not None:
269
- for idx, (s, t) in enumerate(zip(src, tgt)):
270
- yield idx, {"translation": {src_tag: s, tgt_tag: t}}
271
- break