SamiunIslam parquet-converter commited on
Commit
e2a60b6
·
0 Parent(s):

Duplicate from Helsinki-NLP/ecb

Browse files

Co-authored-by: Parquet-converter (BOT) <parquet-converter@users.noreply.huggingface.co>

Files changed (3) hide show
  1. .gitattributes +27 -0
  2. README.md +251 -0
  3. ecb.py +128 -0
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,251 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - found
4
+ language_creators:
5
+ - found
6
+ language:
7
+ - cs
8
+ - da
9
+ - de
10
+ - el
11
+ - en
12
+ - es
13
+ - et
14
+ - fi
15
+ - fr
16
+ - hu
17
+ - it
18
+ - lt
19
+ - lv
20
+ - mt
21
+ - nl
22
+ - pl
23
+ - pt
24
+ - sk
25
+ - sl
26
+ license:
27
+ - unknown
28
+ multilinguality:
29
+ - multilingual
30
+ size_categories:
31
+ - 100K<n<1M
32
+ source_datasets:
33
+ - original
34
+ task_categories:
35
+ - translation
36
+ task_ids: []
37
+ paperswithcode_id: ecb
38
+ pretty_name: extension to the EventCorefBank
39
+ dataset_info:
40
+ - config_name: de-fr
41
+ features:
42
+ - name: id
43
+ dtype: string
44
+ - name: translation
45
+ dtype:
46
+ translation:
47
+ languages:
48
+ - de
49
+ - fr
50
+ splits:
51
+ - name: train
52
+ num_bytes: 39514115
53
+ num_examples: 105116
54
+ download_size: 10326178
55
+ dataset_size: 39514115
56
+ - config_name: cs-en
57
+ features:
58
+ - name: id
59
+ dtype: string
60
+ - name: translation
61
+ dtype:
62
+ translation:
63
+ languages:
64
+ - cs
65
+ - en
66
+ splits:
67
+ - name: train
68
+ num_bytes: 19524831
69
+ num_examples: 63716
70
+ download_size: 5360485
71
+ dataset_size: 19524831
72
+ - config_name: el-it
73
+ features:
74
+ - name: id
75
+ dtype: string
76
+ - name: translation
77
+ dtype:
78
+ translation:
79
+ languages:
80
+ - el
81
+ - it
82
+ splits:
83
+ - name: train
84
+ num_bytes: 47300471
85
+ num_examples: 94712
86
+ download_size: 10394277
87
+ dataset_size: 47300471
88
+ - config_name: en-nl
89
+ features:
90
+ - name: id
91
+ dtype: string
92
+ - name: translation
93
+ dtype:
94
+ translation:
95
+ languages:
96
+ - en
97
+ - nl
98
+ splits:
99
+ - name: train
100
+ num_bytes: 43118164
101
+ num_examples: 126482
102
+ download_size: 11360895
103
+ dataset_size: 43118164
104
+ - config_name: fi-pl
105
+ features:
106
+ - name: id
107
+ dtype: string
108
+ - name: translation
109
+ dtype:
110
+ translation:
111
+ languages:
112
+ - fi
113
+ - pl
114
+ splits:
115
+ - name: train
116
+ num_bytes: 12973283
117
+ num_examples: 41686
118
+ download_size: 3521950
119
+ dataset_size: 12973283
120
+ ---
121
+
122
+ # Dataset Card for extension to the EventCorefBank
123
+
124
+ ## Table of Contents
125
+ - [Dataset Description](#dataset-description)
126
+ - [Dataset Summary](#dataset-summary)
127
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
128
+ - [Languages](#languages)
129
+ - [Dataset Structure](#dataset-structure)
130
+ - [Data Instances](#data-instances)
131
+ - [Data Fields](#data-fields)
132
+ - [Data Splits](#data-splits)
133
+ - [Dataset Creation](#dataset-creation)
134
+ - [Curation Rationale](#curation-rationale)
135
+ - [Source Data](#source-data)
136
+ - [Annotations](#annotations)
137
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
138
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
139
+ - [Social Impact of Dataset](#social-impact-of-dataset)
140
+ - [Discussion of Biases](#discussion-of-biases)
141
+ - [Other Known Limitations](#other-known-limitations)
142
+ - [Additional Information](#additional-information)
143
+ - [Dataset Curators](#dataset-curators)
144
+ - [Licensing Information](#licensing-information)
145
+ - [Citation Information](#citation-information)
146
+ - [Contributions](#contributions)
147
+
148
+ ## Dataset Description
149
+
150
+ - **Homepage:** http://opus.nlpl.eu/ECB.php
151
+ - **Repository:** None
152
+ - **Paper:** http://www.lrec-conf.org/proceedings/lrec2012/pdf/463_Paper.pdf
153
+ - **Leaderboard:** [More Information Needed]
154
+ - **Point of Contact:** [More Information Needed]
155
+
156
+ ### Dataset Summary
157
+
158
+ To load a language pair which isn't part of the config, all you need to do is specify the language code as pairs.
159
+ You can find the valid pairs in Homepage section of Dataset Description: http://opus.nlpl.eu/ECB.php
160
+ E.g.
161
+
162
+ `dataset = load_dataset("ecb", lang1="en", lang2="fi")`
163
+
164
+ ### Supported Tasks and Leaderboards
165
+
166
+ [More Information Needed]
167
+
168
+ ### Languages
169
+
170
+ [More Information Needed]
171
+
172
+ ## Dataset Structure
173
+
174
+ ### Data Instances
175
+
176
+ Here are some examples of questions and facts:
177
+
178
+
179
+ ### Data Fields
180
+
181
+ [More Information Needed]
182
+
183
+ ### Data Splits
184
+
185
+ [More Information Needed]
186
+
187
+ ## Dataset Creation
188
+
189
+ ### Curation Rationale
190
+
191
+ [More Information Needed]
192
+
193
+ ### Source Data
194
+
195
+ [More Information Needed]
196
+
197
+ #### Initial Data Collection and Normalization
198
+
199
+ [More Information Needed]
200
+
201
+ #### Who are the source language producers?
202
+
203
+ [More Information Needed]
204
+
205
+ ### Annotations
206
+
207
+ [More Information Needed]
208
+
209
+ #### Annotation process
210
+
211
+ [More Information Needed]
212
+
213
+ #### Who are the annotators?
214
+
215
+ [More Information Needed]
216
+
217
+ ### Personal and Sensitive Information
218
+
219
+ [More Information Needed]
220
+
221
+ ## Considerations for Using the Data
222
+
223
+ ### Social Impact of Dataset
224
+
225
+ [More Information Needed]
226
+
227
+ ### Discussion of Biases
228
+
229
+ [More Information Needed]
230
+
231
+ ### Other Known Limitations
232
+
233
+ [More Information Needed]
234
+
235
+ ## Additional Information
236
+
237
+ ### Dataset Curators
238
+
239
+ [More Information Needed]
240
+
241
+ ### Licensing Information
242
+
243
+ [More Information Needed]
244
+
245
+ ### Citation Information
246
+
247
+ [More Information Needed]
248
+
249
+ ### Contributions
250
+
251
+ Thanks to [@abhishekkrthakur](https://github.com/abhishekkrthakur) for adding this dataset.
ecb.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ import os
18
+
19
+ import datasets
20
+
21
+
22
+ _DESCRIPTION = """\
23
+ Original source: Website and documentatuion from the European Central Bank, compiled and made available by Alberto Simoes (thank you very much!)
24
+ 19 languages, 170 bitexts
25
+ total number of files: 340
26
+ total number of tokens: 757.37M
27
+ total number of sentence fragments: 30.55M
28
+ """
29
+ _HOMEPAGE_URL = ""
30
+ _CITATION = """\
31
+ @InProceedings{TIEDEMANN12.463,
32
+ author = {J�rg Tiedemann},
33
+ title = {Parallel Data, Tools and Interfaces in OPUS},
34
+ booktitle = {Proceedings of the Eight International Conference on Language Resources and Evaluation (LREC'12)},
35
+ year = {2012},
36
+ month = {may},
37
+ date = {23-25},
38
+ address = {Istanbul, Turkey},
39
+ editor = {Nicoletta Calzolari (Conference Chair) and Khalid Choukri and Thierry Declerck and Mehmet Ugur Dogan and Bente Maegaard and Joseph Mariani and Jan Odijk and Stelios Piperidis},
40
+ publisher = {European Language Resources Association (ELRA)},
41
+ isbn = {978-2-9517408-7-7},
42
+ language = {english}
43
+ }
44
+ """
45
+
46
+ _VERSION = "1.0.0"
47
+ _BASE_NAME = "ECB.{}.{}"
48
+ _BASE_URL = "https://object.pouta.csc.fi/OPUS-ECB/v1/moses/{}-{}.txt.zip"
49
+
50
+ # Please note that only few pairs are shown here. You can use config to generate data for all language pairs
51
+ _LANGUAGE_PAIRS = [
52
+ ("de", "fr"),
53
+ ("cs", "en"),
54
+ ("el", "it"),
55
+ ("en", "nl"),
56
+ ("fi", "pl"),
57
+ ]
58
+
59
+
60
+ class EcbConfig(datasets.BuilderConfig):
61
+ def __init__(self, *args, lang1=None, lang2=None, **kwargs):
62
+ super().__init__(
63
+ *args,
64
+ name=f"{lang1}-{lang2}",
65
+ **kwargs,
66
+ )
67
+ self.lang1 = lang1
68
+ self.lang2 = lang2
69
+
70
+
71
+ class Ecb(datasets.GeneratorBasedBuilder):
72
+ BUILDER_CONFIGS = [
73
+ EcbConfig(
74
+ lang1=lang1,
75
+ lang2=lang2,
76
+ description=f"Translating {lang1} to {lang2} or vice versa",
77
+ version=datasets.Version(_VERSION),
78
+ )
79
+ for lang1, lang2 in _LANGUAGE_PAIRS
80
+ ]
81
+ BUILDER_CONFIG_CLASS = EcbConfig
82
+
83
+ def _info(self):
84
+ return datasets.DatasetInfo(
85
+ description=_DESCRIPTION,
86
+ features=datasets.Features(
87
+ {
88
+ "id": datasets.Value("string"),
89
+ "translation": datasets.Translation(languages=(self.config.lang1, self.config.lang2)),
90
+ },
91
+ ),
92
+ supervised_keys=None,
93
+ homepage=_HOMEPAGE_URL,
94
+ citation=_CITATION,
95
+ )
96
+
97
+ def _split_generators(self, dl_manager):
98
+ def _base_url(lang1, lang2):
99
+ return _BASE_URL.format(lang1, lang2)
100
+
101
+ download_url = _base_url(self.config.lang1, self.config.lang2)
102
+ path = dl_manager.download_and_extract(download_url)
103
+ return [
104
+ datasets.SplitGenerator(
105
+ name=datasets.Split.TRAIN,
106
+ gen_kwargs={"datapath": path},
107
+ )
108
+ ]
109
+
110
+ def _generate_examples(self, datapath):
111
+ l1, l2 = self.config.lang1, self.config.lang2
112
+ folder = l1 + "-" + l2
113
+ l1_file = _BASE_NAME.format(folder, l1)
114
+ l2_file = _BASE_NAME.format(folder, l2)
115
+ l1_path = os.path.join(datapath, l1_file)
116
+ l2_path = os.path.join(datapath, l2_file)
117
+ with open(l1_path, encoding="utf-8") as f1, open(l2_path, encoding="utf-8") as f2:
118
+ for sentence_counter, (x, y) in enumerate(zip(f1, f2)):
119
+ x = x.strip()
120
+ y = y.strip()
121
+ result = (
122
+ sentence_counter,
123
+ {
124
+ "id": str(sentence_counter),
125
+ "translation": {l1: x, l2: y},
126
+ },
127
+ )
128
+ yield result