Datasets:

Multilinguality:
multilingual
Language Creators:
found
Annotations Creators:
found
Source Datasets:
original
Tags:
License:
albertvillanova HF staff commited on
Commit
8bb0bcf
1 Parent(s): 1e6ba09

Convert dataset to Parquet (#2)

Browse files

- Convert dataset to Parquet (89a0293708fe02a73b24fd213d7823f527377bee)
- Add en-km data files (1f3fdc43106f5b5b7c6a21f5cf5c89d2b9338a63)
- Add en-so data files (8ea51e90c9bfac12b2282fc3aac5aca4ec9b7202)
- Add de-pl data files (d228146b571a3e2a3f2fdf77ca6690e4c78aa85b)
- Add fr-nl data files (b55469b73fc3ed5d747168ede03fa13fcb2ae78b)
- Add en-sw data files (048acf312a960d27e40b495d8143d67bca31887d)
- Add en-tl data files (668d6e5626ea04292384e74a758cd564d9b438a0)
- Add es-gl data files (a5b056900ed339c2a835b3fb0431ccfa8c7d2bb7)
- Delete loading script (915521e9f39dd1a14dc17e01927b41df8cd83824)

README.md CHANGED
@@ -58,9 +58,35 @@ source_datasets:
58
  task_categories:
59
  - translation
60
  task_ids: []
61
- paperswithcode_id: null
62
  pretty_name: OpusParaCrawl
 
 
 
 
 
 
 
 
 
 
 
63
  dataset_info:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
  - config_name: el-en
65
  features:
66
  - name: id
@@ -73,10 +99,10 @@ dataset_info:
73
  - en
74
  splits:
75
  - name: train
76
- num_bytes: 6760375061
77
  num_examples: 21402471
78
- download_size: 2317102846
79
- dataset_size: 6760375061
80
  - config_name: en-ha
81
  features:
82
  - name: id
@@ -121,10 +147,10 @@ dataset_info:
121
  - km
122
  splits:
123
  - name: train
124
- num_bytes: 31964493
125
  num_examples: 65115
126
- download_size: 9907279
127
- dataset_size: 31964493
128
  - config_name: en-so
129
  features:
130
  - name: id
@@ -137,42 +163,10 @@ dataset_info:
137
  - so
138
  splits:
139
  - name: train
140
- num_bytes: 5791003
141
  num_examples: 14880
142
- download_size: 2227727
143
- dataset_size: 5791003
144
- - config_name: de-pl
145
- features:
146
- - name: id
147
- dtype: string
148
- - name: translation
149
- dtype:
150
- translation:
151
- languages:
152
- - de
153
- - pl
154
- splits:
155
- - name: train
156
- num_bytes: 298637031
157
- num_examples: 916643
158
- download_size: 106891602
159
- dataset_size: 298637031
160
- - config_name: fr-nl
161
- features:
162
- - name: id
163
- dtype: string
164
- - name: translation
165
- dtype:
166
- translation:
167
- languages:
168
- - fr
169
- - nl
170
- splits:
171
- - name: train
172
- num_bytes: 862303220
173
- num_examples: 2687673
174
- download_size: 319804705
175
- dataset_size: 862303220
176
  - config_name: en-sw
177
  features:
178
  - name: id
@@ -185,10 +179,10 @@ dataset_info:
185
  - sw
186
  splits:
187
  - name: train
188
- num_bytes: 44264442
189
  num_examples: 132520
190
- download_size: 18611087
191
- dataset_size: 44264442
192
  - config_name: en-tl
193
  features:
194
  - name: id
@@ -201,10 +195,10 @@ dataset_info:
201
  - tl
202
  splits:
203
  - name: train
204
- num_bytes: 82502798
205
  num_examples: 248689
206
- download_size: 32933118
207
- dataset_size: 82502798
208
  - config_name: es-gl
209
  features:
210
  - name: id
@@ -217,21 +211,59 @@ dataset_info:
217
  - gl
218
  splits:
219
  - name: train
220
- num_bytes: 582660901
221
  num_examples: 1879689
222
- download_size: 236696353
223
- dataset_size: 582660901
224
- config_names:
225
- - de-pl
226
- - el-en
227
- - en-ha
228
- - en-ig
229
- - en-km
230
- - en-so
231
- - en-sw
232
- - en-tl
233
- - es-gl
234
- - fr-nl
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
235
  ---
236
 
237
  # Dataset Card for OpusParaCrawl
58
  task_categories:
59
  - translation
60
  task_ids: []
 
61
  pretty_name: OpusParaCrawl
62
+ config_names:
63
+ - de-pl
64
+ - el-en
65
+ - en-ha
66
+ - en-ig
67
+ - en-km
68
+ - en-so
69
+ - en-sw
70
+ - en-tl
71
+ - es-gl
72
+ - fr-nl
73
  dataset_info:
74
+ - config_name: de-pl
75
+ features:
76
+ - name: id
77
+ dtype: string
78
+ - name: translation
79
+ dtype:
80
+ translation:
81
+ languages:
82
+ - de
83
+ - pl
84
+ splits:
85
+ - name: train
86
+ num_bytes: 298635927
87
+ num_examples: 916643
88
+ download_size: 183957290
89
+ dataset_size: 298635927
90
  - config_name: el-en
91
  features:
92
  - name: id
99
  - en
100
  splits:
101
  - name: train
102
+ num_bytes: 6760349369
103
  num_examples: 21402471
104
+ download_size: 4108379167
105
+ dataset_size: 6760349369
106
  - config_name: en-ha
107
  features:
108
  - name: id
147
  - km
148
  splits:
149
  - name: train
150
+ num_bytes: 31964409
151
  num_examples: 65115
152
+ download_size: 16582595
153
+ dataset_size: 31964409
154
  - config_name: en-so
155
  features:
156
  - name: id
163
  - so
164
  splits:
165
  - name: train
166
+ num_bytes: 5790979
167
  num_examples: 14880
168
+ download_size: 3718608
169
+ dataset_size: 5790979
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
170
  - config_name: en-sw
171
  features:
172
  - name: id
179
  - sw
180
  splits:
181
  - name: train
182
+ num_bytes: 44264274
183
  num_examples: 132520
184
+ download_size: 30553316
185
+ dataset_size: 44264274
186
  - config_name: en-tl
187
  features:
188
  - name: id
195
  - tl
196
  splits:
197
  - name: train
198
+ num_bytes: 82502498
199
  num_examples: 248689
200
+ download_size: 54686324
201
+ dataset_size: 82502498
202
  - config_name: es-gl
203
  features:
204
  - name: id
211
  - gl
212
  splits:
213
  - name: train
214
+ num_bytes: 582658645
215
  num_examples: 1879689
216
+ download_size: 406732310
217
+ dataset_size: 582658645
218
+ - config_name: fr-nl
219
+ features:
220
+ - name: id
221
+ dtype: string
222
+ - name: translation
223
+ dtype:
224
+ translation:
225
+ languages:
226
+ - fr
227
+ - nl
228
+ splits:
229
+ - name: train
230
+ num_bytes: 862299992
231
+ num_examples: 2687673
232
+ download_size: 550812954
233
+ dataset_size: 862299992
234
+ configs:
235
+ - config_name: de-pl
236
+ data_files:
237
+ - split: train
238
+ path: de-pl/train-*
239
+ - config_name: el-en
240
+ data_files:
241
+ - split: train
242
+ path: el-en/train-*
243
+ - config_name: en-km
244
+ data_files:
245
+ - split: train
246
+ path: en-km/train-*
247
+ - config_name: en-so
248
+ data_files:
249
+ - split: train
250
+ path: en-so/train-*
251
+ - config_name: en-sw
252
+ data_files:
253
+ - split: train
254
+ path: en-sw/train-*
255
+ - config_name: en-tl
256
+ data_files:
257
+ - split: train
258
+ path: en-tl/train-*
259
+ - config_name: es-gl
260
+ data_files:
261
+ - split: train
262
+ path: es-gl/train-*
263
+ - config_name: fr-nl
264
+ data_files:
265
+ - split: train
266
+ path: fr-nl/train-*
267
  ---
268
 
269
  # Dataset Card for OpusParaCrawl
de-pl/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9757d9b7e6e18c92af1e511c32613f9287967a130b9dc38258892c0ab7c00d74
3
+ size 183957290
el-en/train-00000-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5737cf3f7bebd5d11590e9553bee9b61e19a5a5ec98355b20bdd2893d5cd2f7a
3
+ size 293608487
el-en/train-00001-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c358a4d35c5a6146f61874386a30601632a16ccfaa4c605c684ca2fe3b5a1cab
3
+ size 293628636
el-en/train-00002-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f9e22db79387ebe769831cc79a26bdef7faa75c5e51935f4ad78413707f0e38
3
+ size 293478941
el-en/train-00003-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37a3beb5ba7cb239f0bfaca11878eab8525d66ab9025cbe532db464e53360a75
3
+ size 293638533
el-en/train-00004-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d81fa504129605bcfaa399c0900013d0a4078eef0d5fdee60c174276b63b90c9
3
+ size 293326617
el-en/train-00005-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dea2333a8360280baccfade60c95b52796e9c83701ce29d158511eca2480fb95
3
+ size 293103091
el-en/train-00006-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9218c5553e4276bc265a9d6a5dd68054bd4d978597fdab08118890c73380fb5f
3
+ size 293553577
el-en/train-00007-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d030e0e03ee0936498e7a2cd34602a549cbfcdffaf9c52a4295c85325446f068
3
+ size 293476723
el-en/train-00008-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1511cfc96e8fb8fcc0ab706ad6d70c9d3bc323c865dc576f03af2787ad2a8bd6
3
+ size 293480805
el-en/train-00009-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5255e3ca1e67a7dc39f441d5b4f44da562c1bf64b7d2bd3cf249e16c03655d85
3
+ size 293293484
el-en/train-00010-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c58470b1834bb96ecd9e86d957a1930086e216fcb948a96fd4dcb2893cb54d0
3
+ size 293496276
el-en/train-00011-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4aa1feb66fe3658beca5c8c5c52127072b0a2097c546124188eaaaa328e9669c
3
+ size 293519459
el-en/train-00012-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51128fcb70adc4210cf1987316193626894080c290c2282c2193cd95eabb3d2c
3
+ size 293456176
el-en/train-00013-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d311b9ae0f4754fe569fdd682b6fb11f202c4746e57a1d19f356f51bc9374b0c
3
+ size 293318362
en-km/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5daf5c32af896dd8669ae9367344487a13111039cbd0fc7825d604771e15fd6
3
+ size 16582595
en-so/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2bd66e42dfb05c0daf0e20e9dac0db0812c3f03f76643a94bcca7c5b0fccc7c
3
+ size 3718608
en-sw/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2b1a34c9036b96611edd9980d6b577b418580b4970c96582f962849f66a27e0
3
+ size 30553316
en-tl/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a0e963ba6d912295ac9394e91c0739318044aa472802075a649f3a8954d8a38
3
+ size 54686324
es-gl/train-00000-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f9ddfc0f1c229538d8dcf448c6f164b108dcc58ada40023f8c7cad6fa703e8a
3
+ size 203401981
es-gl/train-00001-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65f917470534a690fe39d64ff874906e496401dd4a196f329a9236bb41d28888
3
+ size 203330329
fr-nl/train-00000-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b48c3b5e5a112351e144fe3bde6da35f4e0643aac2fa439686d0eb420d2c7a6
3
+ size 244233570
fr-nl/train-00001-of-00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac8efa0d91423420b0ad85a38f1d1c016cd61faaa49e36a51c63305afa5bb7ae
3
+ size 306579384
opus_paracrawl.py DELETED
@@ -1,155 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- import os
18
-
19
- import datasets
20
-
21
-
22
- _DESCRIPTION = """\
23
- Parallel corpora from Web Crawls collected in the ParaCrawl project.
24
-
25
- 42 languages, 43 bitexts
26
- total number of files: 59,996
27
- total number of tokens: 56.11G
28
- total number of sentence fragments: 3.13G
29
- """
30
- _HOMEPAGE = "http://opus.nlpl.eu/ParaCrawl.php"
31
- _CITATION = r"""\
32
- @inproceedings{banon-etal-2020-paracrawl,
33
- title = "{P}ara{C}rawl: Web-Scale Acquisition of Parallel Corpora",
34
- author = "Ba{\~n}{\'o}n, Marta and
35
- Chen, Pinzhen and
36
- Haddow, Barry and
37
- Heafield, Kenneth and
38
- Hoang, Hieu and
39
- Espl{\`a}-Gomis, Miquel and
40
- Forcada, Mikel L. and
41
- Kamran, Amir and
42
- Kirefu, Faheem and
43
- Koehn, Philipp and
44
- Ortiz Rojas, Sergio and
45
- Pla Sempere, Leopoldo and
46
- Ram{\'\i}rez-S{\'a}nchez, Gema and
47
- Sarr{\'\i}as, Elsa and
48
- Strelec, Marek and
49
- Thompson, Brian and
50
- Waites, William and
51
- Wiggins, Dion and
52
- Zaragoza, Jaume",
53
- booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
54
- month = jul,
55
- year = "2020",
56
- address = "Online",
57
- publisher = "Association for Computational Linguistics",
58
- url = "https://aclanthology.org/2020.acl-main.417",
59
- doi = "10.18653/v1/2020.acl-main.417",
60
- pages = "4555--4567",
61
- }
62
- @InProceedings{TIEDEMANN12.463,
63
- author = {Jörg Tiedemann},
64
- title = {Parallel Data, Tools and Interfaces in OPUS},
65
- booktitle = {Proceedings of the Eight International Conference on Language Resources and Evaluation (LREC'12)},
66
- year = {2012},
67
- month = {may},
68
- date = {23-25},
69
- address = {Istanbul, Turkey},
70
- editor = {Nicoletta Calzolari (Conference Chair) and Khalid Choukri and Thierry Declerck and Mehmet Uğur Doğan and Bente Maegaard and Joseph Mariani and Asuncion Moreno and Jan Odijk and Stelios Piperidis},
71
- publisher = {European Language Resources Association (ELRA)},
72
- isbn = {978-2-9517408-7-7},
73
- language = {english}
74
- }
75
- """
76
-
77
- _VERSION = "9.0.0"
78
- _BASE_NAME = "ParaCrawl.{}.{}"
79
- _BASE_URL = "https://object.pouta.csc.fi/OPUS-ParaCrawl/v9/moses/{}-{}.txt.zip"
80
- # Please note that only few pairs are shown here. You can use config to generate data for all language pairs
81
- _LANGUAGE_PAIRS = [
82
- ("el", "en"),
83
- ("en", "km"),
84
- ("en", "so"),
85
- ("de", "pl"),
86
- ("fr", "nl"),
87
- ("en", "sw"),
88
- ("en", "tl"),
89
- ("es", "gl"),
90
- ]
91
-
92
-
93
- class ParaCrawlConfig(datasets.BuilderConfig):
94
- def __init__(self, *args, lang1=None, lang2=None, **kwargs):
95
- super().__init__(
96
- *args,
97
- name=f"{lang1}-{lang2}",
98
- **kwargs,
99
- )
100
- assert lang1 != lang2, "'language 1' & 'language 2' should be different from each other"
101
- self.lang1 = lang1
102
- self.lang2 = lang2
103
-
104
-
105
- class OpusParaCrawl(datasets.GeneratorBasedBuilder):
106
- BUILDER_CONFIGS = [
107
- ParaCrawlConfig(
108
- lang1=lang1,
109
- lang2=lang2,
110
- description=f"Translating {lang1} to {lang2} or vice versa",
111
- version=datasets.Version(_VERSION),
112
- )
113
- for lang1, lang2 in _LANGUAGE_PAIRS
114
- ]
115
- BUILDER_CONFIG_CLASS = ParaCrawlConfig
116
-
117
- def _info(self):
118
- return datasets.DatasetInfo(
119
- description=_DESCRIPTION,
120
- features=datasets.Features(
121
- {
122
- "id": datasets.Value("string"),
123
- "translation": datasets.Translation(languages=(self.config.lang1, self.config.lang2)),
124
- },
125
- ),
126
- supervised_keys=None,
127
- homepage=_HOMEPAGE,
128
- citation=_CITATION,
129
- )
130
-
131
- def _split_generators(self, dl_manager):
132
- download_url = _BASE_URL.format(self.config.lang1, self.config.lang2)
133
- path = dl_manager.download_and_extract(download_url)
134
- return [
135
- datasets.SplitGenerator(
136
- name=datasets.Split.TRAIN,
137
- gen_kwargs={"datapath": path},
138
- )
139
- ]
140
-
141
- def _generate_examples(self, datapath):
142
- lang1, lang2 = self.config.lang1, self.config.lang2
143
- folder = lang1 + "-" + lang2
144
- lang1_filename = _BASE_NAME.format(folder, lang1)
145
- lang2_filename = _BASE_NAME.format(folder, lang2)
146
- lang1_path = os.path.join(datapath, lang1_filename)
147
- lang2_path = os.path.join(datapath, lang2_filename)
148
- with open(lang1_path, encoding="utf-8") as f1, open(lang2_path, encoding="utf-8") as f2:
149
- for id_, (lang1_sentence, lang2_sentence) in enumerate(zip(f1, f2)):
150
- lang1_sentence = lang1_sentence.strip()
151
- lang2_sentence = lang2_sentence.strip()
152
- yield id_, {
153
- "id": str(id_),
154
- "translation": {lang1: lang1_sentence, lang2: lang2_sentence},
155
- }