Datasets:

Multilinguality:
multilingual
Size Categories:
10K<n<100K
Language Creators:
found
Annotations Creators:
found
Source Datasets:
original
Tags:
License:
albertvillanova HF staff commited on
Commit
5d5d216
1 Parent(s): 6ec8745

Delete loading script

Browse files
Files changed (1) hide show
  1. news_commentary.py +0 -193
news_commentary.py DELETED
@@ -1,193 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- import os
18
-
19
- import datasets
20
-
21
-
22
- _DESCRIPTION = """\
23
- A parallel corpus of News Commentaries provided by WMT for training SMT. The source is taken from CASMACAT: http://www.casmacat.eu/corpus/news-commentary.html
24
-
25
- 12 languages, 63 bitexts
26
- total number of files: 61,928
27
- total number of tokens: 49.66M
28
- total number of sentence fragments: 1.93M
29
- """
30
- _HOMEPAGE_URL = "https://opus.nlpl.eu/News-Commentary/corpus/version/News-Commentary"
31
- _CITATION = """\
32
- @inproceedings{tiedemann-2012-parallel,
33
- title = "Parallel Data, Tools and Interfaces in {OPUS}",
34
- author = {Tiedemann, J{\\"o}rg},
35
- editor = "Calzolari, Nicoletta and
36
- Choukri, Khalid and
37
- Declerck, Thierry and
38
- Do{\\u{g}}an, Mehmet U{\\u{g}}ur and
39
- Maegaard, Bente and
40
- Mariani, Joseph and
41
- Moreno, Asuncion and
42
- Odijk, Jan and
43
- Piperidis, Stelios",
44
- booktitle = "Proceedings of the Eighth International Conference on Language Resources and Evaluation ({LREC}'12)",
45
- month = may,
46
- year = "2012",
47
- address = "Istanbul, Turkey",
48
- publisher = "European Language Resources Association (ELRA)",
49
- url = "http://www.lrec-conf.org/proceedings/lrec2012/pdf/463_Paper.pdf",
50
- pages = "2214--2218",
51
- }
52
- """
53
-
54
- _VERSION = "11.0.0"
55
- _BASE_NAME = "News-Commentary.{}.{}"
56
- _BASE_URL = "https://object.pouta.csc.fi/OPUS-News-Commentary/v11/moses/{}-{}.txt.zip"
57
-
58
- _LANGUAGE_PAIRS = [
59
- ("ar", "cs"),
60
- ("ar", "de"),
61
- ("cs", "de"),
62
- ("ar", "en"),
63
- ("cs", "en"),
64
- ("de", "en"),
65
- ("ar", "es"),
66
- ("cs", "es"),
67
- ("de", "es"),
68
- ("en", "es"),
69
- ("ar", "fr"),
70
- ("cs", "fr"),
71
- ("de", "fr"),
72
- ("en", "fr"),
73
- ("es", "fr"),
74
- ("ar", "it"),
75
- ("cs", "it"),
76
- ("de", "it"),
77
- ("en", "it"),
78
- ("es", "it"),
79
- ("fr", "it"),
80
- ("ar", "ja"),
81
- ("cs", "ja"),
82
- ("de", "ja"),
83
- ("en", "ja"),
84
- ("es", "ja"),
85
- ("fr", "ja"),
86
- ("ar", "nl"),
87
- ("cs", "nl"),
88
- ("de", "nl"),
89
- ("en", "nl"),
90
- ("es", "nl"),
91
- ("fr", "nl"),
92
- ("it", "nl"),
93
- ("ar", "pt"),
94
- ("cs", "pt"),
95
- ("de", "pt"),
96
- ("en", "pt"),
97
- ("es", "pt"),
98
- ("fr", "pt"),
99
- ("it", "pt"),
100
- ("nl", "pt"),
101
- ("ar", "ru"),
102
- ("cs", "ru"),
103
- ("de", "ru"),
104
- ("en", "ru"),
105
- ("es", "ru"),
106
- ("fr", "ru"),
107
- ("it", "ru"),
108
- ("ja", "ru"),
109
- ("nl", "ru"),
110
- ("pt", "ru"),
111
- ("ar", "zh"),
112
- ("cs", "zh"),
113
- ("de", "zh"),
114
- ("en", "zh"),
115
- ("es", "zh"),
116
- ("fr", "zh"),
117
- ("it", "zh"),
118
- ("ja", "zh"),
119
- ("nl", "zh"),
120
- ("pt", "zh"),
121
- ("ru", "zh"),
122
- ]
123
-
124
-
125
- class NewsCommentaryConfig(datasets.BuilderConfig):
126
- def __init__(self, *args, lang1=None, lang2=None, **kwargs):
127
- super().__init__(
128
- *args,
129
- name=f"{lang1}-{lang2}",
130
- **kwargs,
131
- )
132
- self.lang1 = lang1
133
- self.lang2 = lang2
134
-
135
-
136
- class NewsCommentary(datasets.GeneratorBasedBuilder):
137
- BUILDER_CONFIGS = [
138
- NewsCommentaryConfig(
139
- lang1=lang1,
140
- lang2=lang2,
141
- description=f"Translating {lang1} to {lang2} or vice versa",
142
- version=datasets.Version(_VERSION),
143
- )
144
- for lang1, lang2 in _LANGUAGE_PAIRS
145
- ]
146
- BUILDER_CONFIG_CLASS = NewsCommentaryConfig
147
-
148
- def _info(self):
149
- return datasets.DatasetInfo(
150
- description=_DESCRIPTION,
151
- features=datasets.Features(
152
- {
153
- "id": datasets.Value("string"),
154
- "translation": datasets.Translation(languages=(self.config.lang1, self.config.lang2)),
155
- },
156
- ),
157
- supervised_keys=None,
158
- homepage=_HOMEPAGE_URL,
159
- citation=_CITATION,
160
- )
161
-
162
- def _split_generators(self, dl_manager):
163
- def _base_url(lang1, lang2):
164
- return _BASE_URL.format(lang1, lang2)
165
-
166
- download_url = _base_url(self.config.lang1, self.config.lang2)
167
- path = dl_manager.download_and_extract(download_url)
168
- return [
169
- datasets.SplitGenerator(
170
- name=datasets.Split.TRAIN,
171
- gen_kwargs={"datapath": path},
172
- )
173
- ]
174
-
175
- def _generate_examples(self, datapath):
176
- l1, l2 = self.config.lang1, self.config.lang2
177
- folder = l1 + "-" + l2
178
- l1_file = _BASE_NAME.format(folder, l1)
179
- l2_file = _BASE_NAME.format(folder, l2)
180
- l1_path = os.path.join(datapath, l1_file)
181
- l2_path = os.path.join(datapath, l2_file)
182
- with open(l1_path, encoding="utf-8") as f1, open(l2_path, encoding="utf-8") as f2:
183
- for sentence_counter, (x, y) in enumerate(zip(f1, f2)):
184
- x = x.strip()
185
- y = y.strip()
186
- result = (
187
- sentence_counter,
188
- {
189
- "id": str(sentence_counter),
190
- "translation": {l1: x, l2: y},
191
- },
192
- )
193
- yield result