ju-resplande commited on
Commit
34e5bfa
1 Parent(s): 6ce4646

loading script

Browse files
Files changed (1) hide show
  1. plue.py +612 -0
plue.py ADDED
@@ -0,0 +1,612 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ # https://github.com/huggingface/datasets/blob/master/datasets/glue/glue.py
18
+ # https://github.com/huggingface/datasets/blob/master/datasets/scitail/scitail.py
19
+ """The General Language Understanding Evaluation (GLUE) benchmark."""
20
+
21
+
22
+ import csv
23
+ import os
24
+ import textwrap
25
+
26
+ import numpy as np
27
+
28
+ import datasets
29
+
30
+
31
+ _PLUE_CITATION = """\
32
+ @misc{Gomes2020,
33
+ author = {GOMES, J. R. S.},
34
+ title = {Portuguese Language Understanding Evaluation},
35
+ year = {2020},
36
+ publisher = {GitHub},
37
+ journal = {GitHub repository},
38
+ howpublished = {\\url{https://github.com/jubs12/PLUE}},
39
+ commit = {CURRENT_COMMIT}
40
+ }
41
+
42
+ @inproceedings{wang2019glue,
43
+ title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
44
+ author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
45
+ note={In the Proceedings of ICLR.},
46
+ year={2019}
47
+ }
48
+ """
49
+
50
+ _PLUE_DESCRIPTION = """\
51
+ PLUE: Portuguese Language Understanding Evaluationis a Portuguese translation of
52
+ the GLUE benchmark and Scitail using OPUS-MT model and Google Cloud Translation.
53
+ """
54
+
55
+ MNLI_URL = "https://github.com/jubs12/PLUE/releases/download/v1.0.0/MNLI.zip"
56
+
57
+ _MNLI_BASE_KWARGS = dict(
58
+ text_features={"premise": "sentence1", "hypothesis": "sentence2",},
59
+ label_classes=["entailment", "neutral", "contradiction"],
60
+ label_column="gold_label",
61
+ data_dir="PLUE-1.0.1/datasets/MNLI",
62
+ citation=textwrap.dedent(
63
+ """\
64
+ @InProceedings{N18-1101,
65
+ author = "Williams, Adina
66
+ and Nangia, Nikita
67
+ and Bowman, Samuel",
68
+ title = "A Broad-Coverage Challenge Corpus for
69
+ Sentence Understanding through Inference",
70
+ booktitle = "Proceedings of the 2018 Conference of
71
+ the North American Chapter of the
72
+ Association for Computational Linguistics:
73
+ Human Language Technologies, Volume 1 (Long
74
+ Papers)",
75
+ year = "2018",
76
+ publisher = "Association for Computational Linguistics",
77
+ pages = "1112--1122",
78
+ location = "New Orleans, Louisiana",
79
+ url = "http://aclweb.org/anthology/N18-1101"
80
+ }
81
+ @article{bowman2015large,
82
+ title={A large annotated corpus for learning natural language inference},
83
+ author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},
84
+ journal={arXiv preprint arXiv:1508.05326},
85
+ year={2015}
86
+ }"""
87
+ ),
88
+ url="http://www.nyu.edu/projects/bowman/multinli/",
89
+ )
90
+
91
+
92
+ class PlueConfig(datasets.BuilderConfig):
93
+ """BuilderConfig for GLUE."""
94
+
95
+ def __init__(
96
+ self,
97
+ text_features,
98
+ label_column,
99
+ data_dir,
100
+ citation,
101
+ url,
102
+ label_classes=None,
103
+ process_label=lambda x: x,
104
+ **kwargs,
105
+ ):
106
+ """BuilderConfig for GLUE.
107
+
108
+ Args:
109
+ text_features: `dict[string, string]`, map from the name of the feature
110
+ dict for each text field to the name of the column in the tsv file
111
+ label_column: `string`, name of the column in the tsv file corresponding
112
+ to the label
113
+ data_url: `string`, url to download the zip file from
114
+ data_dir: `string`, the path to the folder containing the tsv files in the
115
+ downloaded zip
116
+ citation: `string`, citation for the data set
117
+ url: `string`, url for information about the data set
118
+ label_classes: `list[string]`, the list of classes if the label is
119
+ categorical. If not provided, then the label will be of type
120
+ `datasets.Value('float32')`.
121
+ process_label: `Function[string, any]`, function taking in the raw value
122
+ of the label and processing it to the form required by the label feature
123
+ **kwargs: keyword arguments forwarded to super.
124
+ """
125
+ super(PlueConfig, self).__init__(
126
+ version=datasets.Version("1.0.0", ""), **kwargs
127
+ )
128
+ self.text_features = text_features
129
+ self.label_column = label_column
130
+ self.label_classes = label_classes
131
+ self.data_url = "https://github.com/jubs12/PLUE/archive/refs/tags/v1.0.1.zip"
132
+ self.data_dir = data_dir
133
+ self.citation = citation
134
+ self.url = url
135
+ self.process_label = process_label
136
+
137
+
138
+ class Plue(datasets.GeneratorBasedBuilder):
139
+ """The General Language Understanding Evaluation (GLUE) benchmark."""
140
+
141
+ BUILDER_CONFIGS = [
142
+ PlueConfig(
143
+ name="cola",
144
+ description=textwrap.dedent(
145
+ """\
146
+ The Corpus of Linguistic Acceptability consists of English
147
+ acceptability judgments drawn from books and journal articles on
148
+ linguistic theory. Each example is a sequence of words annotated
149
+ with whether it is a grammatical English sentence."""
150
+ ),
151
+ text_features={"sentence": "sentence"},
152
+ label_classes=["unacceptable", "acceptable"],
153
+ label_column="is_acceptable",
154
+ data_dir="PLUE-1.0.1/datasets/CoLA",
155
+ citation=textwrap.dedent(
156
+ """\
157
+ @article{warstadt2018neural,
158
+ title={Neural Network Acceptability Judgments},
159
+ author={Warstadt, Alex and Singh, Amanpreet and Bowman, Samuel R},
160
+ journal={arXiv preprint arXiv:1805.12471},
161
+ year={2018}
162
+ }"""
163
+ ),
164
+ url="https://nyu-mll.github.io/CoLA/",
165
+ ),
166
+ PlueConfig(
167
+ name="sst2",
168
+ description=textwrap.dedent(
169
+ """\
170
+ The Stanford Sentiment Treebank consists of sentences from movie reviews and
171
+ human annotations of their sentiment. The task is to predict the sentiment of a
172
+ given sentence. We use the two-way (positive/negative) class split, and use only
173
+ sentence-level labels."""
174
+ ),
175
+ text_features={"sentence": "sentence"},
176
+ label_classes=["negative", "positive"],
177
+ label_column="label",
178
+ data_dir="PLUE-1.0.1/datasets/SST-2",
179
+ citation=textwrap.dedent(
180
+ """\
181
+ @inproceedings{socher2013recursive,
182
+ title={Recursive deep models for semantic compositionality over a sentiment treebank},
183
+ author={Socher, Richard and Perelygin, Alex and Wu, Jean and Chuang, Jason and Manning, Christopher D and Ng, Andrew and Potts, Christopher},
184
+ booktitle={Proceedings of the 2013 conference on empirical methods in natural language processing},
185
+ pages={1631--1642},
186
+ year={2013}
187
+ }"""
188
+ ),
189
+ url="https://datasets.stanford.edu/sentiment/index.html",
190
+ ),
191
+ PlueConfig(
192
+ name="mrpc",
193
+ description=textwrap.dedent(
194
+ """\
195
+ The Microsoft Research Paraphrase Corpus (Dolan & Brockett, 2005) is a corpus of
196
+ sentence pairs automatically extracted from online news sources, with human annotations
197
+ for whether the sentences in the pair are semantically equivalent."""
198
+ ), # pylint: disable=line-too-long
199
+ text_features={"sentence1": "", "sentence2": ""},
200
+ label_classes=["not_equivalent", "equivalent"],
201
+ label_column="Quality",
202
+ data_dir="PLUE-1.0.1/datasets/MRPC",
203
+ citation=textwrap.dedent(
204
+ """\
205
+ @inproceedings{dolan2005automatically,
206
+ title={Automatically constructing a corpus of sentential paraphrases},
207
+ author={Dolan, William B and Brockett, Chris},
208
+ booktitle={Proceedings of the Third International Workshop on Paraphrasing (IWP2005)},
209
+ year={2005}
210
+ }"""
211
+ ),
212
+ url="https://www.microsoft.com/en-us/download/details.aspx?id=52398",
213
+ ),
214
+ PlueConfig(
215
+ name="qqp",
216
+ description=textwrap.dedent(
217
+ """\
218
+ The Quora Question Pairs2 dataset is a collection of question pairs from the
219
+ community question-answering website Quora. The task is to determine whether a
220
+ pair of questions are semantically equivalent."""
221
+ ),
222
+ text_features={"question1": "question1", "question2": "question2",},
223
+ label_classes=["not_duplicate", "duplicate"],
224
+ label_column="is_duplicate",
225
+ data_dir="PLUE-1.0.1/datasets/QQP_v2",
226
+ citation=textwrap.dedent(
227
+ """\
228
+ @online{WinNT,
229
+ author = {Iyer, Shankar and Dandekar, Nikhil and Csernai, Kornel},
230
+ title = {First Quora Dataset Release: Question Pairs},
231
+ year = {2017},
232
+ url = {https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs},
233
+ urldate = {2019-04-03}
234
+ }"""
235
+ ),
236
+ url="https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs",
237
+ ),
238
+ PlueConfig(
239
+ name="stsb",
240
+ description=textwrap.dedent(
241
+ """\
242
+ The Semantic Textual Similarity Benchmark (Cer et al., 2017) is a collection of
243
+ sentence pairs drawn from news headlines, video and image captions, and natural
244
+ language inference data. Each pair is human-annotated with a similarity score
245
+ from 1 to 5."""
246
+ ),
247
+ text_features={"sentence1": "sentence1", "sentence2": "sentence2",},
248
+ label_column="score",
249
+ data_dir="PLUE-1.0.1/datasets/STS-B",
250
+ citation=textwrap.dedent(
251
+ """\
252
+ @article{cer2017semeval,
253
+ title={Semeval-2017 task 1: Semantic textual similarity-multilingual and cross-lingual focused evaluation},
254
+ author={Cer, Daniel and Diab, Mona and Agirre, Eneko and Lopez-Gazpio, Inigo and Specia, Lucia},
255
+ journal={arXiv preprint arXiv:1708.00055},
256
+ year={2017}
257
+ }"""
258
+ ),
259
+ url="http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark",
260
+ process_label=np.float32,
261
+ ),
262
+ PlueConfig(
263
+ name="mnli",
264
+ description=textwrap.dedent(
265
+ """\
266
+ The Multi-Genre Natural Language Inference Corpus is a crowdsourced
267
+ collection of sentence pairs with textual entailment annotations. Given a premise sentence
268
+ and a hypothesis sentence, the task is to predict whether the premise entails the hypothesis
269
+ (entailment), contradicts the hypothesis (contradiction), or neither (neutral). The premise sentences are
270
+ gathered from ten different sources, including transcribed speech, fiction, and government reports.
271
+ We use the standard test set, for which we obtained private labels from the authors, and evaluate
272
+ on both the matched (in-domain) and mismatched (cross-domain) section. We also use and recommend
273
+ the SNLI corpus as 550k examples of auxiliary training data."""
274
+ ),
275
+ **_MNLI_BASE_KWARGS,
276
+ ),
277
+ PlueConfig(
278
+ name="mnli_mismatched",
279
+ description=textwrap.dedent(
280
+ """\
281
+ The mismatched validation and test splits from MNLI.
282
+ See the "mnli" BuilderConfig for additional information."""
283
+ ),
284
+ **_MNLI_BASE_KWARGS,
285
+ ),
286
+ PlueConfig(
287
+ name="mnli_matched",
288
+ description=textwrap.dedent(
289
+ """\
290
+ The matched validation and test splits from MNLI.
291
+ See the "mnli" BuilderConfig for additional information."""
292
+ ),
293
+ **_MNLI_BASE_KWARGS,
294
+ ),
295
+ PlueConfig(
296
+ name="qnli",
297
+ description=textwrap.dedent(
298
+ """\
299
+ The Stanford Question Answering Dataset is a question-answering
300
+ dataset consisting of question-paragraph pairs, where one of the sentences in the paragraph (drawn
301
+ from Wikipedia) contains the answer to the corresponding question (written by an annotator). We
302
+ convert the task into sentence pair classification by forming a pair between each question and each
303
+ sentence in the corresponding context, and filtering out pairs with low lexical overlap between the
304
+ question and the context sentence. The task is to determine whether the context sentence contains
305
+ the answer to the question. This modified version of the original task removes the requirement that
306
+ the model select the exact answer, but also removes the simplifying assumptions that the answer
307
+ is always present in the input and that lexical overlap is a reliable cue."""
308
+ ), # pylint: disable=line-too-long
309
+ text_features={"question": "question", "sentence": "sentence",},
310
+ label_classes=["entailment", "not_entailment"],
311
+ label_column="label",
312
+ data_dir="PLUE-1.0.1/datasets/QNLI",
313
+ citation=textwrap.dedent(
314
+ """\
315
+ @article{rajpurkar2016squad,
316
+ title={Squad: 100,000+ questions for machine comprehension of text},
317
+ author={Rajpurkar, Pranav and Zhang, Jian and Lopyrev, Konstantin and Liang, Percy},
318
+ journal={arXiv preprint arXiv:1606.05250},
319
+ year={2016}
320
+ }"""
321
+ ),
322
+ url="https://rajpurkar.github.io/SQuAD-explorer/",
323
+ ),
324
+ PlueConfig(
325
+ name="rte",
326
+ description=textwrap.dedent(
327
+ """\
328
+ The Recognizing Textual Entailment (RTE) datasets come from a series of annual textual
329
+ entailment challenges. We combine the data from RTE1 (Dagan et al., 2006), RTE2 (Bar Haim
330
+ et al., 2006), RTE3 (Giampiccolo et al., 2007), and RTE5 (Bentivogli et al., 2009).4 Examples are
331
+ constructed based on news and Wikipedia text. We convert all datasets to a two-class split, where
332
+ for three-class datasets we collapse neutral and contradiction into not entailment, for consistency."""
333
+ ), # pylint: disable=line-too-long
334
+ text_features={"sentence1": "sentence1", "sentence2": "sentence2",},
335
+ label_classes=["entailment", "not_entailment"],
336
+ label_column="label",
337
+ data_dir="PLUE-1.0.1/datasets/RTE",
338
+ citation=textwrap.dedent(
339
+ """\
340
+ @inproceedings{dagan2005pascal,
341
+ title={The PASCAL recognising textual entailment challenge},
342
+ author={Dagan, Ido and Glickman, Oren and Magnini, Bernardo},
343
+ booktitle={Machine Learning Challenges Workshop},
344
+ pages={177--190},
345
+ year={2005},
346
+ organization={Springer}
347
+ }
348
+ @inproceedings{bar2006second,
349
+ title={The second pascal recognising textual entailment challenge},
350
+ author={Bar-Haim, Roy and Dagan, Ido and Dolan, Bill and Ferro, Lisa and Giampiccolo, Danilo and Magnini, Bernardo and Szpektor, Idan},
351
+ booktitle={Proceedings of the second PASCAL challenges workshop on recognising textual entailment},
352
+ volume={6},
353
+ number={1},
354
+ pages={6--4},
355
+ year={2006},
356
+ organization={Venice}
357
+ }
358
+ @inproceedings{giampiccolo2007third,
359
+ title={The third pascal recognizing textual entailment challenge},
360
+ author={Giampiccolo, Danilo and Magnini, Bernardo and Dagan, Ido and Dolan, Bill},
361
+ booktitle={Proceedings of the ACL-PASCAL workshop on textual entailment and paraphrasing},
362
+ pages={1--9},
363
+ year={2007},
364
+ organization={Association for Computational Linguistics}
365
+ }
366
+ @inproceedings{bentivogli2009fifth,
367
+ title={The Fifth PASCAL Recognizing Textual Entailment Challenge.},
368
+ author={Bentivogli, Luisa and Clark, Peter and Dagan, Ido and Giampiccolo, Danilo},
369
+ booktitle={TAC},
370
+ year={2009}
371
+ }"""
372
+ ),
373
+ url="https://aclweb.org/aclwiki/Recognizing_Textual_Entailment",
374
+ ),
375
+ PlueConfig(
376
+ name="wnli",
377
+ description=textwrap.dedent(
378
+ """\
379
+ The Winograd Schema Challenge (Levesque et al., 2011) is a reading comprehension task
380
+ in which a system must read a sentence with a pronoun and select the referent of that pronoun from
381
+ a list of choices. The examples are manually constructed to foil simple statistical methods: Each
382
+ one is contingent on contextual information provided by a single word or phrase in the sentence.
383
+ To convert the problem into sentence pair classification, we construct sentence pairs by replacing
384
+ the ambiguous pronoun with each possible referent. The task is to predict if the sentence with the
385
+ pronoun substituted is entailed by the original sentence. We use a small evaluation set consisting of
386
+ new examples derived from fiction books that was shared privately by the authors of the original
387
+ corpus. While the included training set is balanced between two classes, the test set is imbalanced
388
+ between them (65% not entailment). Also, due to a data quirk, the development set is adversarial:
389
+ hypotheses are sometimes shared between training and development examples, so if a model memorizes the
390
+ training examples, they will predict the wrong label on corresponding development set
391
+ example. As with QNLI, each example is evaluated separately, so there is not a systematic correspondence
392
+ between a model's score on this task and its score on the unconverted original task. We
393
+ call converted dataset WNLI (Winograd NLI)."""
394
+ ),
395
+ text_features={"sentence1": "sentence1", "sentence2": "sentence2",},
396
+ label_classes=["not_entailment", "entailment"],
397
+ label_column="label",
398
+ data_dir="PLUE-1.0.1/datasets/WNLI",
399
+ citation=textwrap.dedent(
400
+ """\
401
+ @inproceedings{levesque2012winograd,
402
+ title={The winograd schema challenge},
403
+ author={Levesque, Hector and Davis, Ernest and Morgenstern, Leora},
404
+ booktitle={Thirteenth International Conference on the Principles of Knowledge Representation and Reasoning},
405
+ year={2012}
406
+ }"""
407
+ ),
408
+ url="https://cs.nyu.edu/faculty/davise/papers/WinogradSchemas/WS.html",
409
+ ),
410
+ PlueConfig(
411
+ name="scitail",
412
+ description=textwrap.dedent(
413
+ """\
414
+ The SciTail dataset is an entailment dataset created from multiple-choice science exams and web sentences. Each question
415
+ and the correct answer choice are converted into an assertive statement to form the hypothesis. We use information
416
+ retrieval to obtain relevant text from a large text corpus of web sentences, and use these sentences as a premise P. We
417
+ crowdsource the annotation of such premise-hypothesis pair as supports (entails) or not (neutral), in order to create
418
+ the SciTail dataset. The dataset contains 27,026 examples with 10,101 examples with entails label and 16,925 examples
419
+ with neutral label"""
420
+ ),
421
+ text_features={"premise": "premise", "hypothesis": "hypothesis",},
422
+ label_classes=["entails", "neutral"],
423
+ label_column="label",
424
+ data_dir="PLUE-1.0.1/datasets/SciTail",
425
+ citation=""""\
426
+ inproceedings{scitail,
427
+ Author = {Tushar Khot and Ashish Sabharwal and Peter Clark},
428
+ Booktitle = {AAAI},
429
+ Title = {{SciTail}: A Textual Entailment Dataset from Science Question Answering},
430
+ Year = {2018}
431
+ }
432
+ """,
433
+ url="https://gluebenchmark.com/diagnostics",
434
+ ),
435
+ ]
436
+
437
+ def _info(self):
438
+ features = {
439
+ text_feature: datasets.Value("string")
440
+ for text_feature in self.config.text_features.keys()
441
+ }
442
+ if self.config.label_classes:
443
+ features["label"] = datasets.features.ClassLabel(
444
+ names=self.config.label_classes
445
+ )
446
+ else:
447
+ features["label"] = datasets.Value("float32")
448
+ features["idx"] = datasets.Value("int32")
449
+ return datasets.DatasetInfo(
450
+ description=_PLUE_DESCRIPTION,
451
+ features=datasets.Features(features),
452
+ homepage=self.config.url,
453
+ citation=self.config.citation + "\n" + _PLUE_CITATION,
454
+ )
455
+
456
+ def _split_generators(self, dl_manager):
457
+ data_url = MNLI_URL if self.config.name == "mnli" else self.config.data_url
458
+ dl_dir = dl_manager.download_and_extract(data_url)
459
+ data_dir = os.path.join(dl_dir, self.config.data_dir)
460
+
461
+ train_split = datasets.SplitGenerator(
462
+ name=datasets.Split.TRAIN,
463
+ gen_kwargs={
464
+ "data_file": os.path.join(data_dir or "", "train.tsv"),
465
+ "split": "train",
466
+ },
467
+ )
468
+ if self.config.name == "mnli":
469
+ return [
470
+ train_split,
471
+ _mnli_split_generator(
472
+ "validation_matched", data_dir, "dev", matched=True
473
+ ),
474
+ _mnli_split_generator(
475
+ "validation_mismatched", data_dir, "dev", matched=False
476
+ ),
477
+ _mnli_split_generator("test_matched", data_dir, "test", matched=True),
478
+ _mnli_split_generator(
479
+ "test_mismatched", data_dir, "test", matched=False
480
+ ),
481
+ ]
482
+ elif self.config.name == "mnli_matched":
483
+ return [
484
+ _mnli_split_generator("validation", data_dir, "dev", matched=True),
485
+ _mnli_split_generator("test", data_dir, "test", matched=True),
486
+ ]
487
+ elif self.config.name == "mnli_mismatched":
488
+ return [
489
+ _mnli_split_generator("validation", data_dir, "dev", matched=False),
490
+ _mnli_split_generator("test", data_dir, "test", matched=False),
491
+ ]
492
+ else:
493
+ return [
494
+ train_split,
495
+ datasets.SplitGenerator(
496
+ name=datasets.Split.VALIDATION,
497
+ gen_kwargs={
498
+ "data_file": os.path.join(data_dir or "", "dev.tsv"),
499
+ "split": "dev",
500
+ },
501
+ ),
502
+ datasets.SplitGenerator(
503
+ name=datasets.Split.TEST,
504
+ gen_kwargs={
505
+ "data_file": os.path.join(data_dir or "", "test.tsv"),
506
+ "split": "test",
507
+ },
508
+ ),
509
+ ]
510
+
511
+ def _generate_examples(self, data_file, split):
512
+ if self.config.name in ["mrpc", "scitail"]:
513
+ if self.config.name == "mrpc":
514
+ examples = self._generate_example_mrpc_files(
515
+ data_file=data_file, split=split
516
+ )
517
+ elif self.config.name == "scitail":
518
+ examples = self._generate_example_scitail_files(
519
+ data_file=data_file, split=split
520
+ )
521
+
522
+ for example in examples:
523
+ yield example["idx"], example
524
+
525
+ else:
526
+ process_label = self.config.process_label
527
+ label_classes = self.config.label_classes
528
+
529
+ # The train and dev files for CoLA are the only tsv files without a
530
+ # header.
531
+ is_cola_non_test = self.config.name == "cola" and split != "test"
532
+
533
+ with open(data_file, encoding="utf8") as f:
534
+ reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
535
+ if is_cola_non_test:
536
+ reader = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
537
+
538
+ for n, row in enumerate(reader):
539
+ if is_cola_non_test:
540
+ row = {
541
+ "sentence": row[3],
542
+ "is_acceptable": row[1],
543
+ }
544
+
545
+ example = {
546
+ feat: row[col]
547
+ for feat, col in self.config.text_features.items()
548
+ }
549
+ example["idx"] = n
550
+
551
+ if self.config.label_column in row:
552
+ label = row[self.config.label_column]
553
+ # For some tasks, the label is represented as 0 and 1 in the tsv
554
+ # files and needs to be cast to integer to work with the feature.
555
+ if label_classes and label not in label_classes:
556
+ label = int(label) if label else None
557
+ example["label"] = process_label(label)
558
+ else:
559
+ example["label"] = process_label(-1)
560
+
561
+ # Filter out corrupted rows.
562
+ for value in example.values():
563
+ if value is None:
564
+ break
565
+ else:
566
+ yield example["idx"], example
567
+
568
+ def _generate_example_mrpc_files(self, data_file, split):
569
+ print(data_file)
570
+
571
+ with open(data_file, encoding="utf8") as f:
572
+ reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
573
+ for idx, row in enumerate(reader):
574
+ label = row["Quality"] if split != "test" else -1
575
+
576
+ yield {
577
+ "sentence1": row["#1 String"],
578
+ "sentence2": row["#2 String"],
579
+ "label": int(label),
580
+ "idx": idx,
581
+ }
582
+
583
+ def _generate_example_scitail_files(self, data_file, split):
584
+ with open(data_file, encoding="utf8") as f:
585
+ reader = csv.DictReader(
586
+ f,
587
+ delimiter="\t",
588
+ quoting=csv.QUOTE_NONE,
589
+ fieldnames=["premise", "hypothesis", "label"],
590
+ )
591
+ for idx, row in enumerate(reader):
592
+ label = row["label"] if split != "test" else -1
593
+
594
+ yield {
595
+ "premise": row["premise"],
596
+ "hypothesis": row["hypothesis"],
597
+ "label": label,
598
+ "idx": idx,
599
+ }
600
+
601
+
602
+ def _mnli_split_generator(name, data_dir, split, matched):
603
+ return datasets.SplitGenerator(
604
+ name=name,
605
+ gen_kwargs={
606
+ "data_file": os.path.join(
607
+ data_dir, "%s_%s.tsv" % (split, "matched" if matched else "mismatched")
608
+ ),
609
+ "split": split,
610
+ },
611
+ )
612
+