lhoestq HF staff commited on
Commit
21f88c0
1 Parent(s): 68bd9e0

Delete loading script

Browse files
Files changed (1) hide show
  1. xtreme.py +0 -940
xtreme.py DELETED
@@ -1,940 +0,0 @@
1
- """TODO(xtreme): Add a description here."""
2
-
3
-
4
- import csv
5
- import json
6
- import os
7
- import textwrap
8
-
9
- import datasets
10
-
11
-
12
- # TODO(xtreme): BibTeX citation
13
- _CITATION = """\
14
- @article{hu2020xtreme,
15
- author = {Junjie Hu and Sebastian Ruder and Aditya Siddhant and Graham Neubig and Orhan Firat and Melvin Johnson},
16
- title = {XTREME: A Massively Multilingual Multi-task Benchmark for Evaluating Cross-lingual Generalization},
17
- journal = {CoRR},
18
- volume = {abs/2003.11080},
19
- year = {2020},
20
- archivePrefix = {arXiv},
21
- eprint = {2003.11080}
22
- }
23
- """
24
-
25
- # TODO(xtrem):
26
- _DESCRIPTION = """\
27
- The Cross-lingual TRansfer Evaluation of Multilingual Encoders (XTREME) benchmark is a benchmark for the evaluation of
28
- the cross-lingual generalization ability of pre-trained multilingual models. It covers 40 typologically diverse languages
29
- (spanning 12 language families) and includes nine tasks that collectively require reasoning about different levels of
30
- syntax and semantics. The languages in XTREME are selected to maximize language diversity, coverage in existing tasks,
31
- and availability of training data. Among these are many under-studied languages, such as the Dravidian languages Tamil
32
- (spoken in southern India, Sri Lanka, and Singapore), Telugu and Malayalam (spoken mainly in southern India), and the
33
- Niger-Congo languages Swahili and Yoruba, spoken in Africa.
34
- """
35
- _MLQA_LANG = ["ar", "de", "vi", "zh", "en", "es", "hi"]
36
- _XQUAD_LANG = ["ar", "de", "vi", "zh", "en", "es", "hi", "el", "ru", "th", "tr"]
37
- _PAWSX_LANG = ["de", "en", "es", "fr", "ja", "ko", "zh"]
38
- _BUCC_LANG = ["de", "fr", "zh", "ru"]
39
- _TATOEBA_LANG = [
40
- "afr",
41
- "ara",
42
- "ben",
43
- "bul",
44
- "deu",
45
- "cmn",
46
- "ell",
47
- "est",
48
- "eus",
49
- "fin",
50
- "fra",
51
- "heb",
52
- "hin",
53
- "hun",
54
- "ind",
55
- "ita",
56
- "jav",
57
- "jpn",
58
- "kat",
59
- "kaz",
60
- "kor",
61
- "mal",
62
- "mar",
63
- "nld",
64
- "pes",
65
- "por",
66
- "rus",
67
- "spa",
68
- "swh",
69
- "tam",
70
- "tel",
71
- "tgl",
72
- "tha",
73
- "tur",
74
- "urd",
75
- "vie",
76
- ]
77
-
78
- _UD_POS_LANG = [
79
- "Afrikaans",
80
- "Arabic",
81
- "Basque",
82
- "Bulgarian",
83
- "Dutch",
84
- "English",
85
- "Estonian",
86
- "Finnish",
87
- "French",
88
- "German",
89
- "Greek",
90
- "Hebrew",
91
- "Hindi",
92
- "Hungarian",
93
- "Indonesian",
94
- "Italian",
95
- "Japanese",
96
- "Kazakh",
97
- "Korean",
98
- "Chinese",
99
- "Marathi",
100
- "Persian",
101
- "Portuguese",
102
- "Russian",
103
- "Spanish",
104
- "Tagalog",
105
- "Tamil",
106
- "Telugu",
107
- "Thai",
108
- "Turkish",
109
- "Urdu",
110
- "Vietnamese",
111
- "Yoruba",
112
- ]
113
- _PAN_X_LANG = [
114
- "af",
115
- "ar",
116
- "bg",
117
- "bn",
118
- "de",
119
- "el",
120
- "en",
121
- "es",
122
- "et",
123
- "eu",
124
- "fa",
125
- "fi",
126
- "fr",
127
- "he",
128
- "hi",
129
- "hu",
130
- "id",
131
- "it",
132
- "ja",
133
- "jv",
134
- "ka",
135
- "kk",
136
- "ko",
137
- "ml",
138
- "mr",
139
- "ms",
140
- "my",
141
- "nl",
142
- "pt",
143
- "ru",
144
- "sw",
145
- "ta",
146
- "te",
147
- "th",
148
- "tl",
149
- "tr",
150
- "ur",
151
- "vi",
152
- "yo",
153
- "zh",
154
- ]
155
-
156
- _NAMES = ["XNLI", "tydiqa", "SQuAD"]
157
- for lang in _PAN_X_LANG:
158
- _NAMES.append(f"PAN-X.{lang}")
159
- for lang1 in _MLQA_LANG:
160
- for lang2 in _MLQA_LANG:
161
- _NAMES.append(f"MLQA.{lang1}.{lang2}")
162
- for lang in _XQUAD_LANG:
163
- _NAMES.append(f"XQuAD.{lang}")
164
- for lang in _BUCC_LANG:
165
- _NAMES.append(f"bucc18.{lang}")
166
- for lang in _PAWSX_LANG:
167
- _NAMES.append(f"PAWS-X.{lang}")
168
- for lang in _TATOEBA_LANG:
169
- _NAMES.append(f"tatoeba.{lang}")
170
- for lang in _UD_POS_LANG:
171
- _NAMES.append(f"udpos.{lang}")
172
-
173
- _DESCRIPTIONS = {
174
- "tydiqa": textwrap.dedent(
175
- """Gold passage task (GoldP): Given a passage that is guaranteed to contain the
176
- answer, predict the single contiguous span of characters that answers the question. This is more similar to
177
- existing reading comprehension datasets (as opposed to the information-seeking task outlined above).
178
- This task is constructed with two goals in mind: (1) more directly comparing with prior work and (2) providing
179
- a simplified way for researchers to use TyDi QA by providing compatibility with existing code for SQuAD 1.1,
180
- XQuAD, and MLQA. Toward these goals, the gold passage task differs from the primary task in several ways:
181
- only the gold answer passage is provided rather than the entire Wikipedia article;
182
- unanswerable questions have been discarded, similar to MLQA and XQuAD;
183
- we evaluate with the SQuAD 1.1 metrics like XQuAD; and
184
- Thai and Japanese are removed since the lack of whitespace breaks some tools.
185
- """
186
- ),
187
- "XNLI": textwrap.dedent(
188
- """
189
- The Cross-lingual Natural Language Inference (XNLI) corpus is a crowd-sourced collection of 5,000 test and
190
- 2,500 dev pairs for the MultiNLI corpus. The pairs are annotated with textual entailment and translated into
191
- 14 languages: French, Spanish, German, Greek, Bulgarian, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese,
192
- Hindi, Swahili and Urdu. This results in 112.5k annotated pairs. Each premise can be associated with the
193
- corresponding hypothesis in the 15 languages, summing up to more than 1.5M combinations. The corpus is made to
194
- evaluate how to perform inference in any language (including low-resources ones like Swahili or Urdu) when only
195
- English NLI data is available at training time. One solution is cross-lingual sentence encoding, for which XNLI
196
- is an evaluation benchmark."""
197
- ),
198
- "PAWS-X": textwrap.dedent(
199
- """
200
- This dataset contains 23,659 human translated PAWS evaluation pairs and 296,406 machine translated training
201
- pairs in six typologically distinct languages: French, Spanish, German, Chinese, Japanese, and Korean. All
202
- translated pairs are sourced from examples in PAWS-Wiki."""
203
- ),
204
- "XQuAD": textwrap.dedent(
205
- """\
206
- XQuAD (Cross-lingual Question Answering Dataset) is a benchmark dataset for evaluating cross-lingual question
207
- answering performance. The dataset consists of a subset of 240 paragraphs and 1190 question-answer pairs from
208
- the development set of SQuAD v1.1 (Rajpurkar et al., 2016) together with their professional translations into
209
- ten languages: Spanish, German, Greek, Russian, Turkish, Arabic, Vietnamese, Thai, Chinese, and Hindi. Consequently,
210
- the dataset is entirely parallel across 11 languages."""
211
- ),
212
- "MLQA": textwrap.dedent(
213
- """\
214
- MLQA (MultiLingual Question Answering) is a benchmark dataset for evaluating cross-lingual question answering performance.
215
- MLQA consists of over 5K extractive QA instances (12K in English) in SQuAD format in seven languages - English, Arabic,
216
- German, Spanish, Hindi, Vietnamese and Simplified Chinese. MLQA is highly parallel, with QA instances parallel between
217
- 4 different languages on average."""
218
- ),
219
- "tatoeba": textwrap.dedent(
220
- """\
221
- his data is extracted from the Tatoeba corpus, dated Saturday 2018/11/17.
222
- For each languages, we have selected 1000 English sentences and their translations, if available. Please check
223
- this paper for a description of the languages, their families and scripts as well as baseline results.
224
- Please note that the English sentences are not identical for all language pairs. This means that the results are
225
- not directly comparable across languages. In particular, the sentences tend to have less variety for several
226
- low-resource languages, e.g. "Tom needed water", "Tom needs water", "Tom is getting water", ...
227
- """
228
- ),
229
- "bucc18": textwrap.dedent(
230
- """Building and Using Comparable Corpora
231
- """
232
- ),
233
- "udpos": textwrap.dedent(
234
- """\
235
- Universal Dependencies (UD) is a framework for consistent annotation of grammar (parts of speech, morphological
236
- features, and syntactic dependencies) across different human languages. UD is an open community effort with over 200
237
- contributors producing more than 100 treebanks in over 70 languages. If you’re new to UD, you should start by reading
238
- the first part of the Short Introduction and then browsing the annotation guidelines.
239
- """
240
- ),
241
- "SQuAD": textwrap.dedent(
242
- """\
243
- Stanford Question Answering Dataset (SQuAD) is a reading comprehension \
244
- dataset, consisting of questions posed by crowdworkers on a set of Wikipedia \
245
- articles, where the answer to every question is a segment of text, or span, \
246
- from the corresponding reading passage, or the question might be unanswerable."""
247
- ),
248
- "PAN-X": textwrap.dedent(
249
- """\
250
- The WikiANN dataset (Pan et al. 2017) is a dataset with NER annotations for PER, ORG and LOC. It has been
251
- constructed using the linked entities in Wikipedia pages for 282 different languages including Danish. The dataset
252
- can be loaded with the DaNLP package:"""
253
- ),
254
- }
255
- _CITATIONS = {
256
- "tydiqa": textwrap.dedent(
257
- (
258
- """\
259
- @article{tydiqa,
260
- title = {TyDi QA: A Benchmark for Information-Seeking Question Answering in Typologically Diverse Languages},
261
- author = {Jonathan H. Clark and Eunsol Choi and Michael Collins and Dan Garrette and Tom Kwiatkowski and Vitaly Nikolaev and Jennimaria Palomaki}
262
- year = {2020},
263
- journal = {Transactions of the Association for Computational Linguistics}
264
- }"""
265
- )
266
- ),
267
- "XNLI": textwrap.dedent(
268
- """\
269
- @InProceedings{conneau2018xnli,
270
- author = {Conneau, Alexis
271
- and Rinott, Ruty
272
- and Lample, Guillaume
273
- and Williams, Adina
274
- and Bowman, Samuel R.
275
- and Schwenk, Holger
276
- and Stoyanov, Veselin},
277
- title = {XNLI: Evaluating Cross-lingual Sentence Representations},
278
- booktitle = {Proceedings of the 2018 Conference on Empirical Methods
279
- in Natural Language Processing},
280
- year = {2018},
281
- publisher = {Association for Computational Linguistics},
282
- location = {Brussels, Belgium},
283
- }"""
284
- ),
285
- "XQuAD": textwrap.dedent(
286
- """
287
- @article{Artetxe:etal:2019,
288
- author = {Mikel Artetxe and Sebastian Ruder and Dani Yogatama},
289
- title = {On the cross-lingual transferability of monolingual representations},
290
- journal = {CoRR},
291
- volume = {abs/1910.11856},
292
- year = {2019},
293
- archivePrefix = {arXiv},
294
- eprint = {1910.11856}
295
- }
296
- """
297
- ),
298
- "MLQA": textwrap.dedent(
299
- """\
300
- @article{lewis2019mlqa,
301
- title={MLQA: Evaluating Cross-lingual Extractive Question Answering},
302
- author={Lewis, Patrick and Oguz, Barlas and Rinott, Ruty and Riedel, Sebastian and Schwenk, Holger},
303
- journal={arXiv preprint arXiv:1910.07475},
304
- year={2019}"""
305
- ),
306
- "PAWS-X": textwrap.dedent(
307
- """\
308
- @InProceedings{pawsx2019emnlp,
309
- title = {{PAWS-X: A Cross-lingual Adversarial Dataset for Paraphrase Identification}},
310
- author = {Yang, Yinfei and Zhang, Yuan and Tar, Chris and Baldridge, Jason},
311
- booktitle = {Proc. of EMNLP},
312
- year = {2019}
313
- }"""
314
- ),
315
- "tatoeba": textwrap.dedent(
316
- """\
317
- @article{tatoeba,
318
- title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},
319
- author={Mikel, Artetxe and Holger, Schwenk,},
320
- journal={arXiv:1812.10464v2},
321
- year={2018}
322
- }"""
323
- ),
324
- "bucc18": textwrap.dedent(""""""),
325
- "udpos": textwrap.dedent(""""""),
326
- "SQuAD": textwrap.dedent(
327
- """\
328
- @article{2016arXiv160605250R,
329
- author = {{Rajpurkar}, Pranav and {Zhang}, Jian and {Lopyrev},
330
- Konstantin and {Liang}, Percy},
331
- title = "{SQuAD: 100,000+ Questions for Machine Comprehension of Text}",
332
- journal = {arXiv e-prints},
333
- year = 2016,
334
- eid = {arXiv:1606.05250},
335
- pages = {arXiv:1606.05250},
336
- archivePrefix = {arXiv},
337
- eprint = {1606.05250},
338
- }"""
339
- ),
340
- "PAN-X": textwrap.dedent(
341
- """\
342
- @article{pan-x,
343
- title={Massively Multilingual Sentence Embeddings for Zero-Shot Cross-Lingual Transfer and Beyond},
344
- author={Xiaoman, Pan and Boliang, Zhang and Jonathan, May and Joel, Nothman and Kevin, Knight and Heng, Ji},
345
- volume={Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers}
346
- year={2017}
347
- }"""
348
- ),
349
- }
350
-
351
- _TEXT_FEATURES = {
352
- "XNLI": {
353
- "language": "language",
354
- "sentence1": "sentence1",
355
- "sentence2": "sentence2",
356
- },
357
- "tydiqa": {
358
- "id": "id",
359
- "title": "title",
360
- "context": "context",
361
- "question": "question",
362
- "answers": "answers",
363
- },
364
- "XQuAD": {
365
- "id": "id",
366
- "context": "context",
367
- "question": "question",
368
- "answers": "answers",
369
- },
370
- "MLQA": {
371
- "id": "id",
372
- "title": "title",
373
- "context": "context",
374
- "question": "question",
375
- "answers": "answers",
376
- },
377
- "tatoeba": {
378
- "source_sentence": "",
379
- "target_sentence": "",
380
- "source_lang": "",
381
- "target_lang": "",
382
- },
383
- "bucc18": {
384
- "source_sentence": "",
385
- "target_sentence": "",
386
- "source_lang": "",
387
- "target_lang": "",
388
- },
389
- "PAWS-X": {"sentence1": "sentence1", "sentence2": "sentence2"},
390
- "udpos": {"tokens": "", "pos_tags": ""},
391
- "SQuAD": {
392
- "id": "id",
393
- "title": "title",
394
- "context": "context",
395
- "question": "question",
396
- "answers": "answers",
397
- },
398
- "PAN-X": {"tokens": "", "ner_tags": "", "lang": ""},
399
- }
400
- _DATA_URLS = {
401
- "tydiqa": "https://storage.googleapis.com/tydiqa/",
402
- "XNLI": "https://dl.fbaipublicfiles.com/XNLI/XNLI-1.0.zip",
403
- "XQuAD": "https://github.com/deepmind/xquad/raw/master/",
404
- "MLQA": "https://dl.fbaipublicfiles.com/MLQA/MLQA_V1.zip",
405
- "PAWS-X": "https://storage.googleapis.com/paws/pawsx/x-final.tar.gz",
406
- "bucc18": "https://comparable.limsi.fr/bucc2018/",
407
- "tatoeba": "https://github.com/facebookresearch/LASER/raw/main/data/tatoeba/v1/",
408
- "udpos": "https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz",
409
- "SQuAD": "https://rajpurkar.github.io/SQuAD-explorer/dataset/",
410
- "PAN-X": "https://s3.amazonaws.com/datasets.huggingface.co/wikiann/1.1.0/panx_dataset.zip",
411
- }
412
-
413
- _URLS = {
414
- "tydiqa": "https://github.com/google-research-datasets/tydiqa",
415
- "XQuAD": "https://github.com/deepmind/xquad",
416
- "XNLI": "https://www.nyu.edu/projects/bowman/xnli/",
417
- "MLQA": "https://github.com/facebookresearch/MLQA",
418
- "PAWS-X": "https://github.com/google-research-datasets/paws/tree/master/pawsx",
419
- "bucc18": "https://comparable.limsi.fr/bucc2018/",
420
- "tatoeba": "https://github.com/facebookresearch/LASER/blob/main/data/tatoeba/v1/README.md",
421
- "udpos": "https://universaldependencies.org/",
422
- "SQuAD": "https://rajpurkar.github.io/SQuAD-explorer/",
423
- "PAN-X": "https://github.com/afshinrahimi/mmner",
424
- }
425
-
426
-
427
- class XtremeConfig(datasets.BuilderConfig):
428
- """BuilderConfig for Break"""
429
-
430
- def __init__(self, data_url, citation, url, text_features, **kwargs):
431
- """
432
- Args:
433
- text_features: `dict[string, string]`, map from the name of the feature
434
- dict for each text field to the name of the column in the tsv file
435
- label_column:
436
- label_classes
437
- **kwargs: keyword arguments forwarded to super.
438
- """
439
- super(XtremeConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
440
- self.text_features = text_features
441
- self.data_url = data_url
442
- self.citation = citation
443
- self.url = url
444
-
445
-
446
- class Xtreme(datasets.GeneratorBasedBuilder):
447
- """TODO(xtreme): Short description of my dataset."""
448
-
449
- # TODO(xtreme): Set up version.
450
- VERSION = datasets.Version("0.1.0")
451
- BUILDER_CONFIGS = [
452
- XtremeConfig(
453
- name=name,
454
- description=_DESCRIPTIONS[name.split(".")[0]],
455
- citation=_CITATIONS[name.split(".")[0]],
456
- text_features=_TEXT_FEATURES[name.split(".")[0]],
457
- data_url=_DATA_URLS[name.split(".")[0]],
458
- url=_URLS[name.split(".")[0]],
459
- )
460
- for name in _NAMES
461
- ]
462
-
463
- def _info(self):
464
- features = {text_feature: datasets.Value("string") for text_feature in self.config.text_features.keys()}
465
- if "answers" in features.keys():
466
- features["answers"] = datasets.features.Sequence(
467
- {
468
- "answer_start": datasets.Value("int32"),
469
- "text": datasets.Value("string"),
470
- }
471
- )
472
- if self.config.name.startswith("PAWS-X"):
473
- features = PawsxParser.features
474
- elif self.config.name == "XNLI":
475
- features["gold_label"] = datasets.Value("string")
476
- elif self.config.name.startswith("udpos"):
477
- features = UdposParser.features
478
- elif self.config.name.startswith("PAN-X"):
479
- features = PanxParser.features
480
- return datasets.DatasetInfo(
481
- # This is the description that will appear on the datasets page.
482
- description=self.config.description + "\n" + _DESCRIPTION,
483
- # datasets.features.FeatureConnectors
484
- features=datasets.Features(
485
- features
486
- # These are the features of your dataset like images, labels ...
487
- ),
488
- # If there's a common (input, target) tuple from the features,
489
- # specify them here. They'll be used if as_supervised=True in
490
- # builder.as_dataset.
491
- supervised_keys=None,
492
- # Homepage of the dataset for documentation
493
- homepage="https://github.com/google-research/xtreme" + "\t" + self.config.url,
494
- citation=self.config.citation + "\n" + _CITATION,
495
- )
496
-
497
- def _split_generators(self, dl_manager):
498
- """Returns SplitGenerators."""
499
- if self.config.name == "tydiqa":
500
- train_url = "v1.1/tydiqa-goldp-v1.1-train.json"
501
- dev_url = "v1.1/tydiqa-goldp-v1.1-dev.json"
502
- urls_to_download = {
503
- "train": self.config.data_url + train_url,
504
- "dev": self.config.data_url + dev_url,
505
- }
506
- dl_dir = dl_manager.download_and_extract(urls_to_download)
507
- return [
508
- datasets.SplitGenerator(
509
- name=datasets.Split.TRAIN,
510
- # These kwargs will be passed to _generate_examples
511
- gen_kwargs={"filepath": dl_dir["train"]},
512
- ),
513
- datasets.SplitGenerator(
514
- name=datasets.Split.VALIDATION,
515
- # These kwargs will be passed to _generate_examples
516
- gen_kwargs={"filepath": dl_dir["dev"]},
517
- ),
518
- ]
519
- if self.config.name == "XNLI":
520
- dl_dir = dl_manager.download_and_extract(self.config.data_url)
521
- data_dir = os.path.join(dl_dir, "XNLI-1.0")
522
- return [
523
- datasets.SplitGenerator(
524
- name=datasets.Split.TEST,
525
- gen_kwargs={"filepath": os.path.join(data_dir, "xnli.test.tsv")},
526
- ),
527
- datasets.SplitGenerator(
528
- name=datasets.Split.VALIDATION,
529
- gen_kwargs={"filepath": os.path.join(data_dir, "xnli.dev.tsv")},
530
- ),
531
- ]
532
-
533
- if self.config.name.startswith("MLQA"):
534
- mlqa_downloaded_files = dl_manager.download_and_extract(self.config.data_url)
535
- l1 = self.config.name.split(".")[1]
536
- l2 = self.config.name.split(".")[2]
537
- return [
538
- datasets.SplitGenerator(
539
- name=datasets.Split.TEST,
540
- # These kwargs will be passed to _generate_examples
541
- gen_kwargs={
542
- "filepath": os.path.join(
543
- os.path.join(mlqa_downloaded_files, "MLQA_V1/test"),
544
- f"test-context-{l1}-question-{l2}.json",
545
- )
546
- },
547
- ),
548
- datasets.SplitGenerator(
549
- name=datasets.Split.VALIDATION,
550
- # These kwargs will be passed to _generate_examples
551
- gen_kwargs={
552
- "filepath": os.path.join(
553
- os.path.join(mlqa_downloaded_files, "MLQA_V1/dev"),
554
- f"dev-context-{l1}-question-{l2}.json",
555
- )
556
- },
557
- ),
558
- ]
559
-
560
- if self.config.name.startswith("XQuAD"):
561
- lang = self.config.name.split(".")[1]
562
- xquad_downloaded_file = dl_manager.download_and_extract(self.config.data_url + f"xquad.{lang}.json")
563
- return [
564
- datasets.SplitGenerator(
565
- name=datasets.Split.VALIDATION,
566
- # These kwargs will be passed to _generate_examples
567
- gen_kwargs={"filepath": xquad_downloaded_file},
568
- ),
569
- ]
570
- if self.config.name.startswith("PAWS-X"):
571
- return PawsxParser.split_generators(dl_manager=dl_manager, config=self.config)
572
- elif self.config.name.startswith("tatoeba"):
573
- lang = self.config.name.split(".")[1]
574
-
575
- tatoeba_source_data = dl_manager.download_and_extract(self.config.data_url + f"tatoeba.{lang}-eng.{lang}")
576
- tatoeba_eng_data = dl_manager.download_and_extract(self.config.data_url + f"tatoeba.{lang}-eng.eng")
577
- return [
578
- datasets.SplitGenerator(
579
- name=datasets.Split.VALIDATION,
580
- # These kwargs will be passed to _generate_examples
581
- gen_kwargs={"filepath": (tatoeba_source_data, tatoeba_eng_data)},
582
- ),
583
- ]
584
- if self.config.name.startswith("bucc18"):
585
- lang = self.config.name.split(".")[1]
586
- bucc18_dl_test_archive = dl_manager.download(
587
- self.config.data_url + f"bucc2018-{lang}-en.training-gold.tar.bz2"
588
- )
589
- bucc18_dl_dev_archive = dl_manager.download(
590
- self.config.data_url + f"bucc2018-{lang}-en.sample-gold.tar.bz2"
591
- )
592
- return [
593
- datasets.SplitGenerator(
594
- name=datasets.Split.VALIDATION,
595
- gen_kwargs={"filepath": dl_manager.iter_archive(bucc18_dl_dev_archive)},
596
- ),
597
- datasets.SplitGenerator(
598
- name=datasets.Split.TEST,
599
- gen_kwargs={"filepath": dl_manager.iter_archive(bucc18_dl_test_archive)},
600
- ),
601
- ]
602
- if self.config.name.startswith("udpos"):
603
- return UdposParser.split_generators(dl_manager=dl_manager, config=self.config)
604
-
605
- if self.config.name == "SQuAD":
606
-
607
- urls_to_download = {
608
- "train": self.config.data_url + "train-v1.1.json",
609
- "dev": self.config.data_url + "dev-v1.1.json",
610
- }
611
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
612
-
613
- return [
614
- datasets.SplitGenerator(
615
- name=datasets.Split.TRAIN,
616
- gen_kwargs={"filepath": downloaded_files["train"]},
617
- ),
618
- datasets.SplitGenerator(
619
- name=datasets.Split.VALIDATION,
620
- gen_kwargs={"filepath": downloaded_files["dev"]},
621
- ),
622
- ]
623
-
624
- if self.config.name.startswith("PAN-X"):
625
- return PanxParser.split_generators(dl_manager=dl_manager, config=self.config)
626
-
627
- def _generate_examples(self, filepath=None, **kwargs):
628
- """Yields examples."""
629
- # TODO(xtreme): Yields (key, example) tuples from the dataset
630
-
631
- if self.config.name == "tydiqa" or self.config.name.startswith("MLQA") or self.config.name == "SQuAD":
632
- with open(filepath, encoding="utf-8") as f:
633
- data = json.load(f)
634
- for article in data["data"]:
635
- title = article.get("title", "").strip()
636
- for paragraph in article["paragraphs"]:
637
- context = paragraph["context"].strip()
638
- for qa in paragraph["qas"]:
639
- question = qa["question"].strip()
640
- id_ = qa["id"]
641
-
642
- answer_starts = [answer["answer_start"] for answer in qa["answers"]]
643
- answers = [answer["text"].strip() for answer in qa["answers"]]
644
-
645
- # Features currently used are "context", "question", and "answers".
646
- # Others are extracted here for the ease of future expansions.
647
- yield id_, {
648
- "title": title,
649
- "context": context,
650
- "question": question,
651
- "id": id_,
652
- "answers": {
653
- "answer_start": answer_starts,
654
- "text": answers,
655
- },
656
- }
657
- if self.config.name == "XNLI":
658
- with open(filepath, encoding="utf-8") as f:
659
- data = csv.DictReader(f, delimiter="\t")
660
- for id_, row in enumerate(data):
661
- yield id_, {
662
- "sentence1": row["sentence1"],
663
- "sentence2": row["sentence2"],
664
- "language": row["language"],
665
- "gold_label": row["gold_label"],
666
- }
667
- if self.config.name.startswith("PAWS-X"):
668
- yield from PawsxParser.generate_examples(config=self.config, filepath=filepath, **kwargs)
669
- if self.config.name.startswith("XQuAD"):
670
- with open(filepath, encoding="utf-8") as f:
671
- xquad = json.load(f)
672
- for article in xquad["data"]:
673
- for paragraph in article["paragraphs"]:
674
- context = paragraph["context"].strip()
675
- for qa in paragraph["qas"]:
676
- question = qa["question"].strip()
677
- id_ = qa["id"]
678
-
679
- answer_starts = [answer["answer_start"] for answer in qa["answers"]]
680
- answers = [answer["text"].strip() for answer in qa["answers"]]
681
-
682
- # Features currently used are "context", "question", and "answers".
683
- # Others are extracted here for the ease of future expansions.
684
- yield id_, {
685
- "context": context,
686
- "question": question,
687
- "id": id_,
688
- "answers": {
689
- "answer_start": answer_starts,
690
- "text": answers,
691
- },
692
- }
693
- if self.config.name.startswith("bucc18"):
694
- lang = self.config.name.split(".")[1]
695
- data_dir = f"bucc2018/{lang}-en"
696
- for path, file in filepath:
697
- if path.startswith(data_dir):
698
- csv_content = [line.decode("utf-8") for line in file]
699
- if path.endswith("en"):
700
- target_sentences = dict(list(csv.reader(csv_content, delimiter="\t", quotechar=None)))
701
- elif path.endswith("gold"):
702
- source_target_ids = list(csv.reader(csv_content, delimiter="\t", quotechar=None))
703
- else:
704
- source_sentences = dict(list(csv.reader(csv_content, delimiter="\t", quotechar=None)))
705
-
706
- for id_, (source_id, target_id) in enumerate(source_target_ids):
707
- yield id_, {
708
- "source_sentence": source_sentences[source_id],
709
- "target_sentence": target_sentences[target_id],
710
- "source_lang": source_id,
711
- "target_lang": target_id,
712
- }
713
- if self.config.name.startswith("tatoeba"):
714
- source_file = filepath[0]
715
- target_file = filepath[1]
716
- source_sentences = []
717
- target_sentences = []
718
- with open(source_file, encoding="utf-8") as f1:
719
- for row in f1:
720
- source_sentences.append(row)
721
- with open(target_file, encoding="utf-8") as f2:
722
- for row in f2:
723
- target_sentences.append(row)
724
- for i in range(len(source_sentences)):
725
- yield i, {
726
- "source_sentence": source_sentences[i],
727
- "target_sentence": target_sentences[i],
728
- "source_lang": source_file.split(".")[-1],
729
- "target_lang": "eng",
730
- }
731
- if self.config.name.startswith("udpos"):
732
- yield from UdposParser.generate_examples(config=self.config, filepath=filepath, **kwargs)
733
- if self.config.name.startswith("PAN-X"):
734
- yield from PanxParser.generate_examples(filepath=filepath, **kwargs)
735
-
736
-
737
- class PanxParser:
738
-
739
- features = datasets.Features(
740
- {
741
- "tokens": datasets.Sequence(datasets.Value("string")),
742
- "ner_tags": datasets.Sequence(
743
- datasets.features.ClassLabel(
744
- names=[
745
- "O",
746
- "B-PER",
747
- "I-PER",
748
- "B-ORG",
749
- "I-ORG",
750
- "B-LOC",
751
- "I-LOC",
752
- ]
753
- )
754
- ),
755
- "langs": datasets.Sequence(datasets.Value("string")),
756
- }
757
- )
758
-
759
- @staticmethod
760
- def split_generators(dl_manager=None, config=None):
761
- data_dir = dl_manager.download_and_extract(config.data_url)
762
- lang = config.name.split(".")[1]
763
- archive = os.path.join(data_dir, lang + ".tar.gz")
764
- split_filenames = {
765
- datasets.Split.TRAIN: "train",
766
- datasets.Split.VALIDATION: "dev",
767
- datasets.Split.TEST: "test",
768
- }
769
- return [
770
- datasets.SplitGenerator(
771
- name=split,
772
- gen_kwargs={
773
- "filepath": dl_manager.iter_archive(archive),
774
- "filename": split_filenames[split],
775
- },
776
- )
777
- for split in split_filenames
778
- ]
779
-
780
- @staticmethod
781
- def generate_examples(filepath=None, filename=None):
782
- idx = 1
783
- for path, file in filepath:
784
- if path.endswith(filename):
785
- tokens = []
786
- ner_tags = []
787
- langs = []
788
- for line in file:
789
- line = line.decode("utf-8")
790
- if line == "" or line == "\n":
791
- if tokens:
792
- yield idx, {
793
- "tokens": tokens,
794
- "ner_tags": ner_tags,
795
- "langs": langs,
796
- }
797
- idx += 1
798
- tokens = []
799
- ner_tags = []
800
- langs = []
801
- else:
802
- # pan-x data is tab separated
803
- splits = line.split("\t")
804
- # strip out en: prefix
805
- langs.append(splits[0][:2])
806
- tokens.append(splits[0][3:])
807
- if len(splits) > 1:
808
- ner_tags.append(splits[-1].replace("\n", ""))
809
- else:
810
- # examples have no label in test set
811
- ner_tags.append("O")
812
- if tokens:
813
- yield idx, {
814
- "tokens": tokens,
815
- "ner_tags": ner_tags,
816
- "langs": langs,
817
- }
818
-
819
-
820
- class PawsxParser:
821
-
822
- features = datasets.Features(
823
- {
824
- "sentence1": datasets.Value("string"),
825
- "sentence2": datasets.Value("string"),
826
- "label": datasets.Value("string"),
827
- }
828
- )
829
-
830
- @staticmethod
831
- def split_generators(dl_manager=None, config=None):
832
- lang = config.name.split(".")[1]
833
- archive = dl_manager.download(config.data_url)
834
- split_filenames = {
835
- datasets.Split.TRAIN: "translated_train.tsv" if lang != "en" else "train.tsv",
836
- datasets.Split.VALIDATION: "dev_2k.tsv",
837
- datasets.Split.TEST: "test_2k.tsv",
838
- }
839
- return [
840
- datasets.SplitGenerator(
841
- name=split,
842
- gen_kwargs={"filepath": dl_manager.iter_archive(archive), "filename": split_filenames[split]},
843
- )
844
- for split in split_filenames
845
- ]
846
-
847
- @staticmethod
848
- def generate_examples(config=None, filepath=None, filename=None):
849
- lang = config.name.split(".")[1]
850
- for path, file in filepath:
851
- if f"/{lang}/" in path and path.endswith(filename):
852
- lines = (line.decode("utf-8") for line in file)
853
- data = csv.reader(lines, delimiter="\t")
854
- next(data) # skip header
855
- for id_, row in enumerate(data):
856
- if len(row) == 4:
857
- yield id_, {
858
- "sentence1": row[1],
859
- "sentence2": row[2],
860
- "label": row[3],
861
- }
862
-
863
-
864
- class UdposParser:
865
-
866
- features = datasets.Features(
867
- {
868
- "tokens": datasets.Sequence(datasets.Value("string")),
869
- "pos_tags": datasets.Sequence(
870
- datasets.features.ClassLabel(
871
- names=[
872
- "ADJ",
873
- "ADP",
874
- "ADV",
875
- "AUX",
876
- "CCONJ",
877
- "DET",
878
- "INTJ",
879
- "NOUN",
880
- "NUM",
881
- "PART",
882
- "PRON",
883
- "PROPN",
884
- "PUNCT",
885
- "SCONJ",
886
- "SYM",
887
- "VERB",
888
- "X",
889
- ]
890
- )
891
- ),
892
- }
893
- )
894
-
895
- @staticmethod
896
- def split_generators(dl_manager=None, config=None):
897
- archive = dl_manager.download(config.data_url)
898
- split_names = {datasets.Split.TRAIN: "train", datasets.Split.VALIDATION: "dev", datasets.Split.TEST: "test"}
899
- split_generators = {
900
- split: datasets.SplitGenerator(
901
- name=split,
902
- gen_kwargs={
903
- "filepath": dl_manager.iter_archive(archive),
904
- "split": split_names[split],
905
- },
906
- )
907
- for split in split_names
908
- }
909
- lang = config.name.split(".")[1]
910
- if lang in ["Tagalog", "Thai", "Yoruba"]:
911
- return [split_generators["test"]]
912
- elif lang == "Kazakh":
913
- return [split_generators["train"], split_generators["test"]]
914
- else:
915
- return [split_generators["train"], split_generators["validation"], split_generators["test"]]
916
-
917
- @staticmethod
918
- def generate_examples(config=None, filepath=None, split=None):
919
- lang = config.name.split(".")[1]
920
- idx = 0
921
- for path, file in filepath:
922
- if f"_{lang}" in path and split in path and path.endswith(".conllu"):
923
- # For lang other than [see below], we exclude Arabic-NYUAD which does not contains any words, only _
924
- if lang in ["Kazakh", "Tagalog", "Thai", "Yoruba"] or "NYUAD" not in path:
925
- lines = (line.decode("utf-8") for line in file)
926
- data = csv.reader(lines, delimiter="\t", quoting=csv.QUOTE_NONE)
927
- tokens = []
928
- pos_tags = []
929
- for id_row, row in enumerate(data):
930
- if len(row) >= 10 and row[1] != "_" and row[3] != "_":
931
- tokens.append(row[1])
932
- pos_tags.append(row[3])
933
- if len(row) == 0 and len(tokens) > 0:
934
- yield idx, {
935
- "tokens": tokens,
936
- "pos_tags": pos_tags,
937
- }
938
- idx += 1
939
- tokens = []
940
- pos_tags = []