j-chim commited on
Commit
b1a8573
1 Parent(s): feaec9f

Update data loader for v2

Browse files
Files changed (1) hide show
  1. wiki_lingua.py +174 -202
wiki_lingua.py CHANGED
@@ -15,6 +15,9 @@
15
  """WikiLingua: A benchmark dataset for multilingual abstractive summarization."""
16
 
17
  import os
 
 
 
18
  import datasets
19
 
20
 
@@ -40,100 +43,105 @@ _HOMEPAGE = "https://github.com/esdurmus/Wikilingua"
40
 
41
  _LICENSE = "CC BY-NC-SA 3.0"
42
 
43
- # TODO update script with new splits
44
- _URLs = {
45
- "wiki_lingua_es_en_v0": {
46
- "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua.zip",
47
- },
48
- "wiki_lingua_ru_en_v0": {
49
- "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua.zip",
50
- },
51
- "wiki_lingua_tr_en_v0": {
52
- "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua.zip",
53
- },
54
- "wiki_lingua_vi_en_v0": {
55
- "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua.zip",
56
- },
57
- "wiki_lingua_arabic_ar": {
58
- "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/arabic.zip",
59
- },
60
- "wiki_lingua_chinese_zh": {
61
- "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/chinese.zip",
62
- },
63
- "wiki_lingua_czech_cs": {
64
- "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/czech.zip",
65
- },
66
- "wiki_lingua_dutch_nl": {
67
- "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/dutch.zip",
68
- },
69
- "wiki_lingua_english_en": {
70
- "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/english.zip",
71
- },
72
- "wiki_lingua_french_fr": {
73
- "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/french.zip",
74
- },
75
- "wiki_lingua_german_de": {
76
- "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/german.zip",
77
- },
78
- "wiki_lingua_hindi_hi": {
79
- "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/hindi.zip",
80
- },
81
- "wiki_lingua_indonesian_id": {
82
- "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/indonesian.zip",
83
- },
84
- "wiki_lingua_italian_it": {
85
- "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/italian.zip",
86
- },
87
- "wiki_lingua_japanese_ja": {
88
- "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/japanese.zip",
89
- },
90
- "wiki_lingua_korean_ko": {
91
- "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/korean.zip",
92
- },
93
- "wiki_lingua_portuguese_pt": {
94
- "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/portuguese.zip",
95
- },
96
- "wiki_lingua_russian_ru": {
97
- "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/russian.zip",
98
- },
99
- "wiki_lingua_spanish_es": {
100
- "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/spanish.zip",
101
- },
102
- "wiki_lingua_thai_th": {
103
- "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/thai.zip",
104
- },
105
- "wiki_lingua_turkish_tr": {
106
- "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/turkish.zip",
107
- },
108
- "wiki_lingua_vietnamese_vi": {
109
- "data": "https://storage.googleapis.com/huggingface-nlp/datasets/gem/gem_wikilingua_full/vietnamese.zip",
110
- },
111
  }
112
 
113
- VERSION = datasets.Version("1.1.0")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
 
115
 
116
  class WikilinguaConfig(datasets.BuilderConfig):
117
  """BuilderConfig for WikiLingua."""
118
 
119
  def __init__(self, name, **kwargs):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
 
121
  eles = name.split("_")
122
- is_v0 = "v0" in name
123
- if is_v0:
124
- source_lang, target_lang = eles[-3], eles[-2]
 
 
 
125
  else:
126
- target_lang = eles[-1]
127
- source_lang = target_lang
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
 
129
  super().__init__(
130
  name=name,
131
- description=f"Wikilingua summarisation data ({source_lang} to {target_lang})",
132
  **kwargs,
133
  )
134
- self.is_v0 = is_v0
135
- self.source_lang = source_lang
136
- self.target_lang = target_lang
137
 
138
 
139
  class WikiLingua(datasets.GeneratorBasedBuilder):
@@ -143,41 +151,27 @@ class WikiLingua(datasets.GeneratorBasedBuilder):
143
 
144
  BUILDER_CONFIGS = [
145
  WikilinguaConfig(
146
- name=lang,
147
  version=VERSION,
148
  )
149
- for lang in _URLs
150
  ]
151
 
152
- DEFAULT_CONFIG_NAME = "wiki_lingua_es_en_v0"
153
 
154
  def _info(self):
155
- if self.config.is_v0:
156
- features = datasets.Features(
157
- {
158
- "gem_id": datasets.Value("string"),
159
- "gem_parent_id": datasets.Value("string"),
160
- "source": datasets.Value("string"),
161
- "target": datasets.Value("string"),
162
- "references": [datasets.Value("string")],
163
- }
164
- )
165
- else:
166
- lang = self.config.source_lang
167
- features = datasets.Features(
168
  {
169
  "gem_id": datasets.Value("string"),
170
  "gem_parent_id": datasets.Value("string"),
171
- "source_aligned": datasets.Translation(languages=[lang, "en"]),
172
- "target_aligned": datasets.Translation(languages=[lang, "en"]),
173
  "source": datasets.Value("string"),
174
  "target": datasets.Value("string"),
175
- "references": [datasets.Value("string")],
176
  }
177
- )
178
- return datasets.DatasetInfo(
179
- description=_DESCRIPTION,
180
- features=features,
181
  supervised_keys=None,
182
  homepage=_HOMEPAGE,
183
  license=_LICENSE,
@@ -186,109 +180,87 @@ class WikiLingua(datasets.GeneratorBasedBuilder):
186
 
187
  def _split_generators(self, dl_manager):
188
  """Returns SplitGenerators."""
189
- dl_dir = dl_manager.download_and_extract(_URLs[self.config.name])
190
- if self.config.is_v0:
191
 
192
- lang = self.config.source_lang
193
- base_dir = os.path.join(
194
- dl_dir["data"], "GEM_data_crosslingual", f"{lang}_en"
 
 
 
 
 
 
 
 
 
 
195
  )
196
- return [
197
- datasets.SplitGenerator(
198
- name=datasets.Split.TRAIN,
199
- gen_kwargs={
200
- "filepath": base_dir,
201
- "split": "train",
202
- },
203
- ),
204
- datasets.SplitGenerator(
205
- name=datasets.Split.VALIDATION,
206
- gen_kwargs={
207
- "filepath": base_dir,
208
- "split": "val",
209
- },
210
- ),
211
- datasets.SplitGenerator(
212
- name=datasets.Split.TEST,
213
- gen_kwargs={
214
- "filepath": base_dir,
215
- "split": "test",
216
- },
217
- ),
218
- ]
219
- else:
220
- lang = self.config.source_lang
221
- lang_name = self.config.name.split("_")[-2]
222
- base_dir = os.path.join(dl_dir["data"], lang_name)
223
- return [
224
- datasets.SplitGenerator(
225
- name=datasets.Split.TRAIN,
226
- gen_kwargs={
227
- "filepath": base_dir,
228
- "split": "train",
229
- "lang": lang,
230
- },
231
- ),
232
- datasets.SplitGenerator(
233
- name=datasets.Split.VALIDATION,
234
- gen_kwargs={
235
- "filepath": base_dir,
236
- "split": "val",
237
- "lang": lang,
238
- },
239
- ),
240
- datasets.SplitGenerator(
241
- name=datasets.Split.TEST,
242
- gen_kwargs={
243
- "filepath": base_dir,
244
- "split": "test",
245
- "lang": lang,
246
- },
247
- ),
248
- ]
249
-
250
- def _generate_examples(self, filepath, split, lang=None):
251
  """Yields examples."""
252
- if self.config.is_v0:
253
- source_path = os.path.join(filepath, f"{split}.src")
254
- target_path = os.path.join(filepath, f"{split}.tgt")
255
- with open(source_path, encoding="utf-8") as f_in:
256
- with open(target_path, encoding="utf-8") as f_out:
257
- for id_, (src, tgt) in enumerate(zip(f_in, f_out)):
258
- yield id_, {
259
- "gem_id": f"{self.config.name}-{split}-{id_}",
260
- "gem_parent_id": f"{self.config.name}-{split}-{id_}",
261
- "source": src.strip(),
262
- "target": tgt.strip(),
263
- "references": [] if split == "train" else [tgt.strip()],
264
- }
 
265
  else:
266
- source_path = os.path.join(filepath, f"{split}.src.{lang}")
267
- source_path_en = os.path.join(filepath, f"{split}.src.en")
268
- target_path = os.path.join(filepath, f"{split}.tgt.{lang}")
269
- target_path_en = os.path.join(filepath, f"{split}.tgt.en")
270
-
271
- with open(source_path, encoding="utf-8") as f_in_ln:
272
- with open(source_path_en, encoding="utf-8") as f_in_en:
273
- with open(target_path, encoding="utf-8") as f_out_ln:
274
- with open(target_path_en, encoding="utf-8") as f_out_en:
275
- for id_, (src_ln, src_en, tgt_ln, tgt_en) in enumerate(
276
- zip(f_in_ln, f_in_en, f_out_ln, f_out_en)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
277
  ):
278
- yield id_, {
279
- "gem_id": f"{self.config.name}-{split}-{id_}",
280
- "gem_parent_id": f"{self.config.name}-{split}-{id_}",
281
- "source_aligned": {
282
- lang: src_ln.strip(),
283
- "en": src_en.strip(),
284
- },
285
- "target_aligned": {
286
- lang: tgt_ln.strip(),
287
- "en": tgt_en.strip(),
288
- },
289
- "source": src_ln.strip(),
290
- "target": tgt_en.strip(),
291
- "references": []
292
- if split == "train"
293
- else [tgt_en.strip()],
294
- }
 
15
  """WikiLingua: A benchmark dataset for multilingual abstractive summarization."""
16
 
17
  import os
18
+ import glob
19
+ import pickle
20
+ import re
21
  import datasets
22
 
23
 
 
43
 
44
  _LICENSE = "CC BY-NC-SA 3.0"
45
 
46
+ _URL = "./wikilingua_GEM_v2.tar.gz"
47
+
48
+ VERSION = datasets.Version("2.0.0")
49
+
50
+ valid_language_codes = {
51
+ "ar",
52
+ "cs",
53
+ "de",
54
+ "en",
55
+ "es",
56
+ "fr",
57
+ "hi",
58
+ "id",
59
+ "it",
60
+ "ja",
61
+ "ko",
62
+ "nl",
63
+ "pt",
64
+ "ru",
65
+ "th",
66
+ "tr",
67
+ "vi",
68
+ "zh",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
  }
70
 
71
+ valid_config_names = (
72
+ # multilingual
73
+ list(valid_language_codes)
74
+ + [
75
+ # crosslingual / bridge
76
+ f"{src}_{tgt}"
77
+ for src in valid_language_codes
78
+ for tgt in valid_language_codes
79
+ if src != tgt
80
+ ]
81
+ # load all multilingual / all crosslingual
82
+ + ["multilingual", "crosslingual"]
83
+ )
84
+
85
+
86
+ class WikilinguaModes:
87
+ MULTILINGUAL = "multilingual" # L -> L
88
+ CROSSLINGUAL = "crosslingual" # L1 -> L1, L2 -> L2, L1 -> L2, L2 -> L1
89
+ BRIDGE = "bridge" # L -> en, en -> L, L -> L
90
 
91
 
92
  class WikilinguaConfig(datasets.BuilderConfig):
93
  """BuilderConfig for WikiLingua."""
94
 
95
  def __init__(self, name, **kwargs):
96
+ """
97
+ Args:
98
+ name (string): configuration name that indicates task setup and languages.
99
+
100
+ 1. multilingual - <lang>
101
+ 2. crosslingual - <lang1>_<lang2>
102
+ 3. english as bridge - en_<lang>
103
+ 4. load all multilingual - multilingual
104
+ 5. load all crosslingual - crosslingual
105
+
106
+ lang refers to the respective two-letter language code.
107
+ note that the order of lang1/lang2 does not matter;
108
+ for language pair (L1, L2), we load L1 <-> L2 and L1 -> L1, L2 -> L2.
109
+ """
110
+ if name not in valid_config_names:
111
+ raise ValueError(
112
+ f"Expected config name to be one of: {', '.join(valid_config_names)}"
113
+ )
114
 
115
  eles = name.split("_")
116
+
117
+ if name in (WikilinguaModes.MULTILINGUAL, WikilinguaModes.CROSSLINGUAL):
118
+ self.mode = name
119
+ self.source_lang = None
120
+ self.target_lang = None
121
+ description = f"Wikilingua summarisation data ({self.mode}; all instances)"
122
  else:
123
+ if len(eles) == 1:
124
+ mode = WikilinguaModes.MULTILINGUAL
125
+ source_lang, target_lang = name, name
126
+ elif len(eles) == 2:
127
+ source_lang, target_lang = eles
128
+ if source_lang == "en" or target_lang == "en":
129
+ mode = WikilinguaModes.BRIDGE
130
+ else:
131
+ mode = WikilinguaModes.CROSSLINGUAL
132
+ self.source_lang = source_lang
133
+ self.target_lang = target_lang
134
+ self.mode = mode
135
+ description = (
136
+ f"Wikilingua summarisation data ({mode}; {source_lang}, {target_lang})"
137
+ )
138
+ self.languages = set([self.source_lang, self.target_lang])
139
 
140
  super().__init__(
141
  name=name,
142
+ description=description,
143
  **kwargs,
144
  )
 
 
 
145
 
146
 
147
  class WikiLingua(datasets.GeneratorBasedBuilder):
 
151
 
152
  BUILDER_CONFIGS = [
153
  WikilinguaConfig(
154
+ name=config_name,
155
  version=VERSION,
156
  )
157
+ for config_name in valid_config_names
158
  ]
159
 
160
+ DEFAULT_CONFIG_NAME = "en"
161
 
162
  def _info(self):
163
+ return datasets.DatasetInfo(
164
+ description=_DESCRIPTION,
165
+ features=datasets.Features(
 
 
 
 
 
 
 
 
 
 
166
  {
167
  "gem_id": datasets.Value("string"),
168
  "gem_parent_id": datasets.Value("string"),
169
+ "source_language": datasets.Value("string"),
170
+ "target_language": datasets.Value("string"),
171
  "source": datasets.Value("string"),
172
  "target": datasets.Value("string"),
 
173
  }
174
+ ),
 
 
 
175
  supervised_keys=None,
176
  homepage=_HOMEPAGE,
177
  license=_LICENSE,
 
180
 
181
  def _split_generators(self, dl_manager):
182
  """Returns SplitGenerators."""
183
+ dl_dir = dl_manager.download_and_extract(_URL)
184
+ data_dir = os.path.join(dl_dir, "GEM_V2")
185
 
186
+ return [
187
+ datasets.SplitGenerator(
188
+ name=datasets.Split.TRAIN,
189
+ gen_kwargs={"filepath": data_dir, "split": "train"},
190
+ ),
191
+ datasets.SplitGenerator(
192
+ name=datasets.Split.VALIDATION,
193
+ gen_kwargs={"filepath": data_dir, "split": "validation"},
194
+ ),
195
+ ] + [
196
+ datasets.SplitGenerator(
197
+ name=re.search("wikilingua_(.+test)", fn).group(1).replace(".", "_"),
198
+ gen_kwargs={"filepath": fn, "split": "test"},
199
  )
200
+ for fn in glob.glob(os.path.join(data_dir, "wikilingua_*.test.pk"))
201
+ ]
202
+
203
+ def _generate_examples(self, filepath, split):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204
  """Yields examples."""
205
+
206
+ if split == "test":
207
+ with open(filepath, "rb") as f:
208
+ data = pickle.load(f)
209
+ for d in data:
210
+ idx = d["id"].replace(".", "-")
211
+ yield idx, {
212
+ "gem_id": idx,
213
+ "gem_parent_id": idx,
214
+ "source_language": d["source"],
215
+ "target_language": d["target"],
216
+ "source": d["document"].strip(),
217
+ "target": d["summary"].strip(),
218
+ }
219
  else:
220
+ # filter data as needed for train & validation sets
221
+ if split == "validation":
222
+ filepaths = glob.glob(os.path.join(filepath, "wikilingua_*.val.pk"))
223
+ else:
224
+ filepaths = glob.glob(os.path.join(filepath, "wikilingua_*.train.pk"))
225
+ for filepath in filepaths:
226
+ # skip files if they are irrelevant to task mode
227
+ if (
228
+ self.config.mode == WikilinguaModes.MULTILINGUAL
229
+ and "crosslingual" in filepath
230
+ ) or (
231
+ self.config.mode == WikilinguaModes.CROSSLINGUAL
232
+ and "multilingual" in filepath
233
+ ):
234
+ yield from []
235
+
236
+ with open(filepath, "rb") as f:
237
+ data = pickle.load(f)
238
+
239
+ for d in data:
240
+ idx = d["id"].replace(".", "-")
241
+ src = d["document"].strip()
242
+ tgt = d["summary"].strip()
243
+ src_lang = d["source"]
244
+ tgt_lang = d["target"]
245
+
246
+ # if loading specific language pair, filter for those
247
+ if any(self.config.languages):
248
+ if not (
249
+ src_lang in self.config.languages
250
+ and tgt_lang in self.config.languages
251
  ):
252
+ continue
253
+
254
+ # in bridge, we are inerested in L <-> en and L -> L, but not en -> en
255
+ if self.config.mode == WikilinguaModes.BRIDGE:
256
+ if src_lang == "en" and tgt_lang == "en":
257
+ continue
258
+
259
+ yield idx, {
260
+ "gem_id": idx,
261
+ "gem_parent_id": idx,
262
+ "source_language": src_lang,
263
+ "target_language": tgt_lang,
264
+ "source": src,
265
+ "target": tgt,
266
+ }