Datasets:

Modalities:
Text
Formats:
parquet
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
df99ecc
1 Parent(s): 92a3ce9

Convert dataset to Parquet (#2)

Browse files

- Convert dataset to Parquet (afe644189b3d30f8be69095d32fe69d3b1bb27bd)
- Add ar-pl data files (39c657a89fb8fb98a25d2d233ff74687b409cbae)
- Add en-ru data files (bc3519ae82a9c1202e4cc5e390d6f531d4461d35)
- Add en-sl data files (4464bf71262a70cc4dd51a47eeb4087d042e70a7)
- Add en-vi data files (cee4f9124b94402a62935a968b3b5224bbfbaf57)
- Delete loading script (d202709fb2dde294984767633328bb544eda6753)

README.md CHANGED
@@ -36,8 +36,13 @@ source_datasets:
36
  task_categories:
37
  - translation
38
  task_ids: []
39
- paperswithcode_id: null
40
  pretty_name: OpusWikipedia
 
 
 
 
 
 
41
  dataset_info:
42
  - config_name: ar-en
43
  features:
@@ -51,10 +56,10 @@ dataset_info:
51
  - en
52
  splits:
53
  - name: train
54
- num_bytes: 45207715
55
  num_examples: 151136
56
- download_size: 16097997
57
- dataset_size: 45207715
58
  - config_name: ar-pl
59
  features:
60
  - name: id
@@ -67,11 +72,11 @@ dataset_info:
67
  - pl
68
  splits:
69
  - name: train
70
- num_bytes: 304851676
71
  num_examples: 823715
72
- download_size: 104585718
73
- dataset_size: 304851676
74
- - config_name: en-sl
75
  features:
76
  - name: id
77
  dtype: string
@@ -80,14 +85,14 @@ dataset_info:
80
  translation:
81
  languages:
82
  - en
83
- - sl
84
  splits:
85
  - name: train
86
- num_bytes: 30479739
87
- num_examples: 140124
88
- download_size: 11727538
89
- dataset_size: 30479739
90
- - config_name: en-ru
91
  features:
92
  - name: id
93
  dtype: string
@@ -96,13 +101,13 @@ dataset_info:
96
  translation:
97
  languages:
98
  - en
99
- - ru
100
  splits:
101
  - name: train
102
- num_bytes: 167649057
103
- num_examples: 572717
104
- download_size: 57356138
105
- dataset_size: 167649057
106
  - config_name: en-vi
107
  features:
108
  - name: id
@@ -115,16 +120,31 @@ dataset_info:
115
  - vi
116
  splits:
117
  - name: train
118
- num_bytes: 7571598
119
  num_examples: 58116
120
- download_size: 2422413
121
- dataset_size: 7571598
122
- config_names:
123
- - ar-en
124
- - ar-pl
125
- - en-ru
126
- - en-sl
127
- - en-vi
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
  ---
129
 
130
  # Dataset Card for OpusWikipedia
 
36
  task_categories:
37
  - translation
38
  task_ids: []
 
39
  pretty_name: OpusWikipedia
40
+ config_names:
41
+ - ar-en
42
+ - ar-pl
43
+ - en-ru
44
+ - en-sl
45
+ - en-vi
46
  dataset_info:
47
  - config_name: ar-en
48
  features:
 
56
  - en
57
  splits:
58
  - name: train
59
+ num_bytes: 45207523
60
  num_examples: 151136
61
+ download_size: 26617751
62
+ dataset_size: 45207523
63
  - config_name: ar-pl
64
  features:
65
  - name: id
 
72
  - pl
73
  splits:
74
  - name: train
75
+ num_bytes: 304850680
76
  num_examples: 823715
77
+ download_size: 175806051
78
+ dataset_size: 304850680
79
+ - config_name: en-ru
80
  features:
81
  - name: id
82
  dtype: string
 
85
  translation:
86
  languages:
87
  - en
88
+ - ru
89
  splits:
90
  - name: train
91
+ num_bytes: 167648361
92
+ num_examples: 572717
93
+ download_size: 97008376
94
+ dataset_size: 167648361
95
+ - config_name: en-sl
96
  features:
97
  - name: id
98
  dtype: string
 
101
  translation:
102
  languages:
103
  - en
104
+ - sl
105
  splits:
106
  - name: train
107
+ num_bytes: 30479559
108
+ num_examples: 140124
109
+ download_size: 18557819
110
+ dataset_size: 30479559
111
  - config_name: en-vi
112
  features:
113
  - name: id
 
120
  - vi
121
  splits:
122
  - name: train
123
+ num_bytes: 7571526
124
  num_examples: 58116
125
+ download_size: 3969559
126
+ dataset_size: 7571526
127
+ configs:
128
+ - config_name: ar-en
129
+ data_files:
130
+ - split: train
131
+ path: ar-en/train-*
132
+ - config_name: ar-pl
133
+ data_files:
134
+ - split: train
135
+ path: ar-pl/train-*
136
+ - config_name: en-ru
137
+ data_files:
138
+ - split: train
139
+ path: en-ru/train-*
140
+ - config_name: en-sl
141
+ data_files:
142
+ - split: train
143
+ path: en-sl/train-*
144
+ - config_name: en-vi
145
+ data_files:
146
+ - split: train
147
+ path: en-vi/train-*
148
  ---
149
 
150
  # Dataset Card for OpusWikipedia
ar-en/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:941c69f31ce60f286fddc87ef9f83e2b545e9ef0bbd7a8e87088665c237aad4f
3
+ size 26617751
ar-pl/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a956863553d3c90cb56d21799c1c4183dd56a844d32910dcf57aafd35b021fe
3
+ size 175806051
en-ru/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:774267e5caf84e9c13037634a8a5b8b2679399c9fd9cdcce8855934c2badba19
3
+ size 97008376
en-sl/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df11bc87c0de85596bdac4bf4065f1d852e6f7923830a2942d44ea8228043162
3
+ size 18557819
en-vi/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2d02de8f871e31837473a966bfc4bda15d759919a7d1fd292af020741c08a69
3
+ size 3969559
opus_wikipedia.py DELETED
@@ -1,127 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- import os
18
-
19
- import datasets
20
-
21
-
22
- _DESCRIPTION = """\
23
- This is a corpus of parallel sentences extracted from Wikipedia by Krzysztof Wołk and Krzysztof Marasek. Please cite the following publication if you use the data: Krzysztof Wołk and Krzysztof Marasek: Building Subject-aligned Comparable Corpora and Mining it for Truly Parallel Sentence Pairs., Procedia Technology, 18, Elsevier, p.126-132, 2014
24
- 20 languages, 36 bitexts
25
- total number of files: 114
26
- total number of tokens: 610.13M
27
- total number of sentence fragments: 25.90M
28
- """
29
- _HOMEPAGE_URL = "http://opus.nlpl.eu/Wikipedia.php"
30
- _CITATION = """\
31
- @InProceedings{TIEDEMANN12.463,
32
- author = {J{\"o}rg Tiedemann},
33
- title = {Parallel Data, Tools and Interfaces in OPUS},
34
- booktitle = {Proceedings of the Eight International Conference on Language Resources and Evaluation (LREC'12)},
35
- year = {2012},
36
- month = {may},
37
- date = {23-25},
38
- address = {Istanbul, Turkey},
39
- editor = {Nicoletta Calzolari (Conference Chair) and Khalid Choukri and Thierry Declerck and Mehmet Ugur Dogan and Bente Maegaard and Joseph Mariani and Jan Odijk and Stelios Piperidis},
40
- publisher = {European Language Resources Association (ELRA)},
41
- isbn = {978-2-9517408-7-7},
42
- language = {english}
43
- }
44
- """
45
-
46
- _VERSION = "1.0.0"
47
- _BASE_NAME = "Wikipedia.{}.{}"
48
- _BASE_URL = "https://object.pouta.csc.fi/OPUS-Wikipedia/v1.0/moses/{}-{}.txt.zip"
49
- # Please note that only few pairs are shown here. You can use config to generate data for all language pairs
50
- _LANGUAGE_PAIRS = [
51
- ("ar", "en"),
52
- ("ar", "pl"),
53
- ("en", "sl"),
54
- ("en", "ru"),
55
- ("en", "vi"),
56
- ]
57
-
58
-
59
- class WikipediaConfig(datasets.BuilderConfig):
60
- def __init__(self, *args, lang1=None, lang2=None, **kwargs):
61
- super().__init__(
62
- *args,
63
- name=f"{lang1}-{lang2}",
64
- **kwargs,
65
- )
66
- self.lang1 = lang1
67
- self.lang2 = lang2
68
-
69
-
70
- class OpusWikipedia(datasets.GeneratorBasedBuilder):
71
- BUILDER_CONFIGS = [
72
- WikipediaConfig(
73
- lang1=lang1,
74
- lang2=lang2,
75
- description=f"Translating {lang1} to {lang2} or vice versa",
76
- version=datasets.Version(_VERSION),
77
- )
78
- for lang1, lang2 in _LANGUAGE_PAIRS
79
- ]
80
- BUILDER_CONFIG_CLASS = WikipediaConfig
81
-
82
- def _info(self):
83
- return datasets.DatasetInfo(
84
- description=_DESCRIPTION,
85
- features=datasets.Features(
86
- {
87
- "id": datasets.Value("string"),
88
- "translation": datasets.Translation(languages=(self.config.lang1, self.config.lang2)),
89
- },
90
- ),
91
- supervised_keys=None,
92
- homepage=_HOMEPAGE_URL,
93
- citation=_CITATION,
94
- )
95
-
96
- def _split_generators(self, dl_manager):
97
- def _base_url(lang1, lang2):
98
- return _BASE_URL.format(lang1, lang2)
99
-
100
- download_url = _base_url(self.config.lang1, self.config.lang2)
101
- path = dl_manager.download_and_extract(download_url)
102
- return [
103
- datasets.SplitGenerator(
104
- name=datasets.Split.TRAIN,
105
- gen_kwargs={"datapath": path},
106
- )
107
- ]
108
-
109
- def _generate_examples(self, datapath):
110
- l1, l2 = self.config.lang1, self.config.lang2
111
- folder = l1 + "-" + l2
112
- l1_file = _BASE_NAME.format(folder, l1)
113
- l2_file = _BASE_NAME.format(folder, l2)
114
- l1_path = os.path.join(datapath, l1_file)
115
- l2_path = os.path.join(datapath, l2_file)
116
- with open(l1_path, encoding="utf-8") as f1, open(l2_path, encoding="utf-8") as f2:
117
- for sentence_counter, (x, y) in enumerate(zip(f1, f2)):
118
- x = x.strip()
119
- y = y.strip()
120
- result = (
121
- sentence_counter,
122
- {
123
- "id": str(sentence_counter),
124
- "translation": {l1: x, l2: y},
125
- },
126
- )
127
- yield result