Datasets:

Languages:
Portuguese
Size Categories:
10K<n<100K
Language Creators:
machine-generated
Source Datasets:
glue
ArXiv:
Tags:
luismsgomes commited on
Commit
262722f
1 Parent(s): 83bfc18

first working version (v0.0.1)

Browse files
Files changed (3) hide show
  1. README.md +5 -1
  2. glue_data_ptpt_v0.0.1.tar.gz +3 -0
  3. glueptpt.py +662 -0
README.md CHANGED
@@ -5,7 +5,11 @@ language_creators:
5
  - machine-generated
6
  source_datasets:
7
  - glue
8
- pretty_name: GLUE for European Portuguese (machine translated)
9
  size_categories:
10
  - 10K<n<100K
11
  ---
 
 
 
 
 
5
  - machine-generated
6
  source_datasets:
7
  - glue
8
+ pretty_name: The General Language Understanding Evaluation (GLUE) benchmark translated to European Portuguese (pt_PT)
9
  size_categories:
10
  - 10K<n<100K
11
  ---
12
+
13
+ See [gluebenchmark.com](https://gluebenchmark.com/) for information about the General Language Understanding Evaluation (GLUE) dataset.
14
+
15
+
glue_data_ptpt_v0.0.1.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64e0750f2b7ecfb20c42d8e6f48798dfa45039b1f887521cc9878a9c36a1b7c8
3
+ size 2010782
glueptpt.py ADDED
@@ -0,0 +1,662 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ # https://github.com/huggingface/datasets/blob/master/datasets/glue/glue.py
18
+ """The General Language Understanding Evaluation (GLUE) benchmark translated to European Portuguese (pt_PT)."""
19
+
20
+
21
+ import csv
22
+ import os
23
+ import textwrap
24
+
25
+ import numpy as np
26
+
27
+ import datasets
28
+
29
+
30
+ _GLUEPTPT_CITATION = """\
31
+ @misc{Gomes2023,
32
+ author = {Gomes, Luís M. S. and Silva, João R. and Santos, Rodrigo and Rodrigues, João and Branco, António H.},
33
+ title = {The General Language Understanding Evaluation (GLUE) benchmark translated to European Portuguese (pt_PT)},
34
+ year = {2023},
35
+ publisher = {GitHub},
36
+ journal = {GitHub repository},
37
+ howpublished = {\\url{https://github.com/nlx-group/glueptpt}},
38
+ commit = {CURRENT_COMMIT}
39
+ }
40
+ """
41
+
42
+ _GLUEPTPT_DESCRIPTION = """\
43
+ GLUEPTPT is an European Portuguese translation of the GLUE benchmark using DeepL Pro.
44
+ """
45
+
46
+
47
+ _MNLI_BASE_KWARGS = dict(
48
+ text_features={"premise": "sentence1", "hypothesis": "sentence2",},
49
+ label_classes=["entailment", "neutral", "contradiction"],
50
+ label_column="gold_label",
51
+ data_dir="MNLI",
52
+ citation=textwrap.dedent(
53
+ """\
54
+ @InProceedings{N18-1101,
55
+ author = "Williams, Adina
56
+ and Nangia, Nikita
57
+ and Bowman, Samuel",
58
+ title = "A Broad-Coverage Challenge Corpus for
59
+ Sentence Understanding through Inference",
60
+ booktitle = "Proceedings of the 2018 Conference of
61
+ the North American Chapter of the
62
+ Association for Computational Linguistics:
63
+ Human Language Technologies, Volume 1 (Long
64
+ Papers)",
65
+ year = "2018",
66
+ publisher = "Association for Computational Linguistics",
67
+ pages = "1112--1122",
68
+ location = "New Orleans, Louisiana",
69
+ url = "http://aclweb.org/anthology/N18-1101"
70
+ }
71
+ @article{bowman2015large,
72
+ title={A large annotated corpus for learning natural language inference},
73
+ author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},
74
+ journal={arXiv preprint arXiv:1508.05326},
75
+ year={2015}
76
+ }"""
77
+ ),
78
+ url="http://www.nyu.edu/projects/bowman/multinli/",
79
+ )
80
+
81
+
82
+ class GLUEPTPTConfig(datasets.BuilderConfig):
83
+ """BuilderConfig for GLUE."""
84
+
85
+ def __init__(
86
+ self,
87
+ text_features,
88
+ label_column,
89
+ data_dir,
90
+ citation,
91
+ url,
92
+ label_classes=None,
93
+ process_label=lambda x: x,
94
+ **kwargs,
95
+ ):
96
+ """BuilderConfig for GLUEPTPT.
97
+
98
+ Args:
99
+ text_features: `dict[string, string]`, map from the name of the feature
100
+ dict for each text field to the name of the column in the tsv file
101
+ label_column: `string`, name of the column in the tsv file corresponding
102
+ to the label
103
+ data_url: `string`, url to download the zip file from
104
+ data_dir: `string`, the path to the folder containing the tsv files in the
105
+ downloaded zip
106
+ citation: `string`, citation for the data set
107
+ url: `string`, url for information about the data set
108
+ label_classes: `list[string]`, the list of classes if the label is
109
+ categorical. If not provided, then the label will be of type
110
+ `datasets.Value('float32')`.
111
+ process_label: `Function[string, any]`, function taking in the raw value
112
+ of the label and processing it to the form required by the label feature
113
+ **kwargs: keyword arguments forwarded to super.
114
+ """
115
+ super(GLUEPTPTConfig, self).__init__(
116
+ version=datasets.Version("0.0.1", ""), **kwargs
117
+ )
118
+ self.text_features = text_features
119
+ self.label_column = label_column
120
+ self.label_classes = label_classes
121
+ self.data_url = (
122
+ "/workspace/glueptpt/glue_data_ptpt_v0.0.1.tar.gz"
123
+ # "https://github.com/nlx-group/glueptpt/archive/refs/tags/v0.0.1.zip"
124
+ )
125
+ self.data_dir = data_dir
126
+ self.citation = citation
127
+ self.url = url
128
+ self.process_label = process_label
129
+
130
+
131
+ class GLUEPTPT(datasets.GeneratorBasedBuilder):
132
+ """The General Language Understanding Evaluation (GLUE) benchmark."""
133
+
134
+ BUILDER_CONFIGS = [
135
+ GLUEPTPTConfig(
136
+ name="cola",
137
+ description=textwrap.dedent(
138
+ """\
139
+ The Corpus of Linguistic Acceptability consists of English
140
+ acceptability judgments drawn from books and journal articles on
141
+ linguistic theory. Each example is a sequence of words annotated
142
+ with whether it is a grammatical English sentence."""
143
+ ),
144
+ text_features={"sentence": "sentence"},
145
+ label_classes=["unacceptable", "acceptable"],
146
+ label_column="is_acceptable",
147
+ data_dir="glue_data_ptpt/CoLA",
148
+ citation=textwrap.dedent(
149
+ """\
150
+ @article{warstadt2018neural,
151
+ title={Neural Network Acceptability Judgments},
152
+ author={Warstadt, Alex and Singh, Amanpreet and Bowman, Samuel R},
153
+ journal={arXiv preprint arXiv:1805.12471},
154
+ year={2018}
155
+ }"""
156
+ ),
157
+ url="https://nyu-mll.github.io/CoLA/",
158
+ ),
159
+ GLUEPTPTConfig(
160
+ name="sst2",
161
+ description=textwrap.dedent(
162
+ """\
163
+ The Stanford Sentiment Treebank consists of sentences from movie reviews and
164
+ human annotations of their sentiment. The task is to predict the sentiment of a
165
+ given sentence. We use the two-way (positive/negative) class split, and use only
166
+ sentence-level labels."""
167
+ ),
168
+ text_features={"sentence": "sentence"},
169
+ label_classes=["negative", "positive"],
170
+ label_column="label",
171
+ data_dir="glue_data_ptpt/SST-2",
172
+ citation=textwrap.dedent(
173
+ """\
174
+ @inproceedings{socher2013recursive,
175
+ title={Recursive deep models for semantic compositionality over a sentiment treebank},
176
+ author={Socher, Richard and Perelygin, Alex and Wu, Jean and Chuang, Jason and Manning, Christopher D and Ng, Andrew and Potts, Christopher},
177
+ booktitle={Proceedings of the 2013 conference on empirical methods in natural language processing},
178
+ pages={1631--1642},
179
+ year={2013}
180
+ }"""
181
+ ),
182
+ url="https://datasets.stanford.edu/sentiment/index.html",
183
+ ),
184
+ GLUEPTPTConfig(
185
+ name="mrpc",
186
+ description=textwrap.dedent(
187
+ """\
188
+ The Microsoft Research Paraphrase Corpus (Dolan & Brockett, 2005) is a corpus of
189
+ sentence pairs automatically extracted from online news sources, with human annotations
190
+ for whether the sentences in the pair are semantically equivalent."""
191
+ ), # pylint: disable=line-too-long
192
+ text_features={"sentence1": "", "sentence2": ""},
193
+ label_classes=["not_equivalent", "equivalent"],
194
+ label_column="Quality",
195
+ data_dir="glue_data_ptpt/MRPC",
196
+ citation=textwrap.dedent(
197
+ """\
198
+ @inproceedings{dolan2005automatically,
199
+ title={Automatically constructing a corpus of sentential paraphrases},
200
+ author={Dolan, William B and Brockett, Chris},
201
+ booktitle={Proceedings of the Third International Workshop on Paraphrasing (IWP2005)},
202
+ year={2005}
203
+ }"""
204
+ ),
205
+ url="https://www.microsoft.com/en-us/download/details.aspx?id=52398",
206
+ ),
207
+ GLUEPTPTConfig(
208
+ name="qqp_v2",
209
+ description=textwrap.dedent(
210
+ """\
211
+ The Quora Question Pairs2 dataset is a collection of question pairs from the
212
+ community question-answering website Quora. The task is to determine whether a
213
+ pair of questions are semantically equivalent."""
214
+ ),
215
+ text_features={"question1": "question1", "question2": "question2",},
216
+ label_classes=["not_duplicate", "duplicate"],
217
+ label_column="is_duplicate",
218
+ data_dir="glue_data_ptpt/QQP_v2",
219
+ citation=textwrap.dedent(
220
+ """\
221
+ @online{WinNT,
222
+ author = {Iyer, Shankar and Dandekar, Nikhil and Csernai, Kornel},
223
+ title = {First Quora Dataset Release: Question Pairs},
224
+ year = {2017},
225
+ url = {https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs},
226
+ urldate = {2019-04-03}
227
+ }"""
228
+ ),
229
+ url="https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs",
230
+ ),
231
+ GLUEPTPTConfig(
232
+ name="stsb",
233
+ description=textwrap.dedent(
234
+ """\
235
+ The Semantic Textual Similarity Benchmark (Cer et al., 2017) is a collection of
236
+ sentence pairs drawn from news headlines, video and image captions, and natural
237
+ language inference data. Each pair is human-annotated with a similarity score
238
+ from 1 to 5."""
239
+ ),
240
+ text_features={"sentence1": "sentence1", "sentence2": "sentence2",},
241
+ label_column="score",
242
+ data_dir="glue_data_ptpt/STS-B",
243
+ citation=textwrap.dedent(
244
+ """\
245
+ @article{cer2017semeval,
246
+ title={Semeval-2017 task 1: Semantic textual similarity-multilingual and cross-lingual focused evaluation},
247
+ author={Cer, Daniel and Diab, Mona and Agirre, Eneko and Lopez-Gazpio, Inigo and Specia, Lucia},
248
+ journal={arXiv preprint arXiv:1708.00055},
249
+ year={2017}
250
+ }"""
251
+ ),
252
+ url="http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark",
253
+ process_label=np.float32,
254
+ ),
255
+ GLUEPTPTConfig(
256
+ name="snli",
257
+ description=textwrap.dedent(
258
+ """\
259
+ The SNLI corpus (version 1.0) is a collection of 570k human-written English
260
+ sentence pairs manually labeled for balanced classification with the labels
261
+ entailment, contradiction, and neutral, supporting the task of natural language
262
+ inference (NLI), also known as recognizing textual entailment (RTE).
263
+ """
264
+ ),
265
+ text_features={"premise": "sentence1", "hypothesis": "sentence2",},
266
+ label_classes=["entailment", "neutral", "contradiction"],
267
+ label_column="gold_label",
268
+ data_dir="SNLI",
269
+ citation=textwrap.dedent(
270
+ """\
271
+ @inproceedings{snli:emnlp2015,
272
+ Author = {Bowman, Samuel R. and Angeli, Gabor and Potts, Christopher, and Manning, Christopher D.},
273
+ Booktitle = {Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing (EMNLP)},
274
+ Publisher = {Association for Computational Linguistics},
275
+ Title = {A large annotated corpus for learning natural language inference},
276
+ Year = {2015}
277
+ }
278
+ """
279
+ ),
280
+ url="https://nlp.stanford.edu/projects/snli/",
281
+ ),
282
+ GLUEPTPTConfig(
283
+ name="mnli",
284
+ description=textwrap.dedent(
285
+ """\
286
+ The Multi-Genre Natural Language Inference Corpus is a crowdsourced
287
+ collection of sentence pairs with textual entailment annotations. Given a premise sentence
288
+ and a hypothesis sentence, the task is to predict whether the premise entails the hypothesis
289
+ (entailment), contradicts the hypothesis (contradiction), or neither (neutral). The premise sentences are
290
+ gathered from ten different sources, including transcribed speech, fiction, and government reports.
291
+ We use the standard test set, for which we obtained private labels from the authors, and evaluate
292
+ on both the matched (in-domain) and mismatched (cross-domain) section. We also use and recommend
293
+ the SNLI corpus as 550k examples of auxiliary training data."""
294
+ ),
295
+ **_MNLI_BASE_KWARGS,
296
+ ),
297
+ GLUEPTPTConfig(
298
+ name="mnli_mismatched",
299
+ description=textwrap.dedent(
300
+ """\
301
+ The mismatched validation and test splits from MNLI.
302
+ See the "mnli" BuilderConfig for additional information."""
303
+ ),
304
+ **_MNLI_BASE_KWARGS,
305
+ ),
306
+ GLUEPTPTConfig(
307
+ name="mnli_matched",
308
+ description=textwrap.dedent(
309
+ """\
310
+ The matched validation and test splits from MNLI.
311
+ See the "mnli" BuilderConfig for additional information."""
312
+ ),
313
+ **_MNLI_BASE_KWARGS,
314
+ ),
315
+ GLUEPTPTConfig(
316
+ name="qnli",
317
+ description=textwrap.dedent(
318
+ """\
319
+ The Stanford Question Answering Dataset is a question-answering
320
+ dataset consisting of question-paragraph pairs, where one of the sentences in the paragraph (drawn
321
+ from Wikipedia) contains the answer to the corresponding question (written by an annotator). We
322
+ convert the task into sentence pair classification by forming a pair between each question and each
323
+ sentence in the corresponding context, and filtering out pairs with low lexical overlap between the
324
+ question and the context sentence. The task is to determine whether the context sentence contains
325
+ the answer to the question. This modified version of the original task removes the requirement that
326
+ the model select the exact answer, but also removes the simplifying assumptions that the answer
327
+ is always present in the input and that lexical overlap is a reliable cue."""
328
+ ), # pylint: disable=line-too-long
329
+ text_features={"question": "question", "sentence": "sentence",},
330
+ label_classes=["entailment", "not_entailment"],
331
+ label_column="label",
332
+ data_dir="glue_data_ptpt/QNLI",
333
+ citation=textwrap.dedent(
334
+ """\
335
+ @article{rajpurkar2016squad,
336
+ title={Squad: 100,000+ questions for machine comprehension of text},
337
+ author={Rajpurkar, Pranav and Zhang, Jian and Lopyrev, Konstantin and Liang, Percy},
338
+ journal={arXiv preprint arXiv:1606.05250},
339
+ year={2016}
340
+ }"""
341
+ ),
342
+ url="https://rajpurkar.github.io/SQuAD-explorer/",
343
+ ),
344
+ GLUEPTPTConfig(
345
+ name="qnli_v2",
346
+ description=textwrap.dedent(
347
+ """\
348
+ The Stanford Question Answering Dataset is a question-answering
349
+ dataset consisting of question-paragraph pairs, where one of the sentences in the paragraph (drawn
350
+ from Wikipedia) contains the answer to the corresponding question (written by an annotator). We
351
+ convert the task into sentence pair classification by forming a pair between each question and each
352
+ sentence in the corresponding context, and filtering out pairs with low lexical overlap between the
353
+ question and the context sentence. The task is to determine whether the context sentence contains
354
+ the answer to the question. This modified version of the original task removes the requirement that
355
+ the model select the exact answer, but also removes the simplifying assumptions that the answer
356
+ is always present in the input and that lexical overlap is a reliable cue."""
357
+ ), # pylint: disable=line-too-long
358
+ text_features={"question": "question", "sentence": "sentence",},
359
+ label_classes=["entailment", "not_entailment"],
360
+ label_column="label",
361
+ data_dir="glue_data_ptpt/QNLI_v2",
362
+ citation=textwrap.dedent(
363
+ """\
364
+ @article{rajpurkar2016squad,
365
+ title={Squad: 100,000+ questions for machine comprehension of text},
366
+ author={Rajpurkar, Pranav and Zhang, Jian and Lopyrev, Konstantin and Liang, Percy},
367
+ journal={arXiv preprint arXiv:1606.05250},
368
+ year={2016}
369
+ }"""
370
+ ),
371
+ url="https://rajpurkar.github.io/SQuAD-explorer/",
372
+ ),
373
+ GLUEPTPTConfig(
374
+ name="rte",
375
+ description=textwrap.dedent(
376
+ """\
377
+ The Recognizing Textual Entailment (RTE) datasets come from a series of annual textual
378
+ entailment challenges. We combine the data from RTE1 (Dagan et al., 2006), RTE2 (Bar Haim
379
+ et al., 2006), RTE3 (Giampiccolo et al., 2007), and RTE5 (Bentivogli et al., 2009).4 Examples are
380
+ constructed based on news and Wikipedia text. We convert all datasets to a two-class split, where
381
+ for three-class datasets we collapse neutral and contradiction into not entailment, for consistency."""
382
+ ), # pylint: disable=line-too-long
383
+ text_features={"sentence1": "sentence1", "sentence2": "sentence2",},
384
+ label_classes=["entailment", "not_entailment"],
385
+ label_column="label",
386
+ data_dir="glue_data_ptpt/RTE",
387
+ citation=textwrap.dedent(
388
+ """\
389
+ @inproceedings{dagan2005pascal,
390
+ title={The PASCAL recognising textual entailment challenge},
391
+ author={Dagan, Ido and Glickman, Oren and Magnini, Bernardo},
392
+ booktitle={Machine Learning Challenges Workshop},
393
+ pages={177--190},
394
+ year={2005},
395
+ organization={Springer}
396
+ }
397
+ @inproceedings{bar2006second,
398
+ title={The second pascal recognising textual entailment challenge},
399
+ author={Bar-Haim, Roy and Dagan, Ido and Dolan, Bill and Ferro, Lisa and Giampiccolo, Danilo and Magnini, Bernardo and Szpektor, Idan},
400
+ booktitle={Proceedings of the second PASCAL challenges workshop on recognising textual entailment},
401
+ volume={6},
402
+ number={1},
403
+ pages={6--4},
404
+ year={2006},
405
+ organization={Venice}
406
+ }
407
+ @inproceedings{giampiccolo2007third,
408
+ title={The third pascal recognizing textual entailment challenge},
409
+ author={Giampiccolo, Danilo and Magnini, Bernardo and Dagan, Ido and Dolan, Bill},
410
+ booktitle={Proceedings of the ACL-PASCAL workshop on textual entailment and paraphrasing},
411
+ pages={1--9},
412
+ year={2007},
413
+ organization={Association for Computational Linguistics}
414
+ }
415
+ @inproceedings{bentivogli2009fifth,
416
+ title={The Fifth PASCAL Recognizing Textual Entailment Challenge.},
417
+ author={Bentivogli, Luisa and Clark, Peter and Dagan, Ido and Giampiccolo, Danilo},
418
+ booktitle={TAC},
419
+ year={2009}
420
+ }"""
421
+ ),
422
+ url="https://aclweb.org/aclwiki/Recognizing_Textual_Entailment",
423
+ ),
424
+ GLUEPTPTConfig(
425
+ name="wnli",
426
+ description=textwrap.dedent(
427
+ """\
428
+ The Winograd Schema Challenge (Levesque et al., 2011) is a reading comprehension task
429
+ in which a system must read a sentence with a pronoun and select the referent of that pronoun from
430
+ a list of choices. The examples are manually constructed to foil simple statistical methods: Each
431
+ one is contingent on contextual information provided by a single word or phrase in the sentence.
432
+ To convert the problem into sentence pair classification, we construct sentence pairs by replacing
433
+ the ambiguous pronoun with each possible referent. The task is to predict if the sentence with the
434
+ pronoun substituted is entailed by the original sentence. We use a small evaluation set consisting of
435
+ new examples derived from fiction books that was shared privately by the authors of the original
436
+ corpus. While the included training set is balanced between two classes, the test set is imbalanced
437
+ between them (65% not entailment). Also, due to a data quirk, the development set is adversarial:
438
+ hypotheses are sometimes shared between training and development examples, so if a model memorizes the
439
+ training examples, they will predict the wrong label on corresponding development set
440
+ example. As with QNLI, each example is evaluated separately, so there is not a systematic correspondence
441
+ between a model's score on this task and its score on the unconverted original task. We
442
+ call converted dataset WNLI (Winograd NLI)."""
443
+ ),
444
+ text_features={"sentence1": "sentence1", "sentence2": "sentence2",},
445
+ label_classes=["not_entailment", "entailment"],
446
+ label_column="label",
447
+ data_dir="glue_data_ptpt/WNLI",
448
+ citation=textwrap.dedent(
449
+ """\
450
+ @inproceedings{levesque2012winograd,
451
+ title={The winograd schema challenge},
452
+ author={Levesque, Hector and Davis, Ernest and Morgenstern, Leora},
453
+ booktitle={Thirteenth International Conference on the Principles of Knowledge Representation and Reasoning},
454
+ year={2012}
455
+ }"""
456
+ ),
457
+ url="https://cs.nyu.edu/faculty/davise/papers/WinogradSchemas/WS.html",
458
+ ),
459
+ GLUEPTPTConfig(
460
+ name="scitail",
461
+ description=textwrap.dedent(
462
+ """\
463
+ The SciTail dataset is an entailment dataset created from multiple-choice science exams and web sentences. Each question
464
+ and the correct answer choice are converted into an assertive statement to form the hypothesis. We use information
465
+ retrieval to obtain relevant text from a large text corpus of web sentences, and use these sentences as a premise P. We
466
+ crowdsource the annotation of such premise-hypothesis pair as supports (entails) or not (neutral), in order to create
467
+ the SciTail dataset. The dataset contains 27,026 examples with 10,101 examples with entails label and 16,925 examples
468
+ with neutral label"""
469
+ ),
470
+ text_features={"premise": "premise", "hypothesis": "hypothesis",},
471
+ label_classes=["entails", "neutral"],
472
+ label_column="label",
473
+ data_dir="glue_data_ptpt/SciTail",
474
+ citation=""""\
475
+ inproceedings{scitail,
476
+ Author = {Tushar Khot and Ashish Sabharwal and Peter Clark},
477
+ Booktitle = {AAAI},
478
+ Title = {{SciTail}: A Textual Entailment Dataset from Science Question Answering},
479
+ Year = {2018}
480
+ }
481
+ """,
482
+ url="https://gluebenchmark.com/diagnostics",
483
+ ),
484
+ ]
485
+
486
+ def _info(self):
487
+ features = {
488
+ text_feature: datasets.Value("string")
489
+ for text_feature in self.config.text_features.keys()
490
+ }
491
+ if self.config.label_classes:
492
+ features["label"] = datasets.features.ClassLabel(
493
+ names=self.config.label_classes
494
+ )
495
+ else:
496
+ features["label"] = datasets.Value("float32")
497
+ features["idx"] = datasets.Value("int32")
498
+ return datasets.DatasetInfo(
499
+ description=_GLUEPTPT_DESCRIPTION,
500
+ features=datasets.Features(features),
501
+ homepage=self.config.url,
502
+ citation=self.config.citation + "\n" + _GLUEPTPT_CITATION,
503
+ )
504
+
505
+ def _split_generators(self, dl_manager):
506
+ data_url = self.config.data_url
507
+
508
+ dl_dir = dl_manager.download_and_extract(data_url)
509
+ data_dir = os.path.join(dl_dir, self.config.data_dir)
510
+
511
+ train_split = datasets.SplitGenerator(
512
+ name=datasets.Split.TRAIN,
513
+ gen_kwargs={
514
+ "data_file": os.path.join(data_dir, "train.tsv"),
515
+ "split": "train",
516
+ },
517
+ )
518
+ if self.config.name == "mnli":
519
+ return [
520
+ train_split,
521
+ _mnli_split_generator(
522
+ "validation_matched", data_dir, "dev", matched=True
523
+ ),
524
+ _mnli_split_generator(
525
+ "validation_mismatched", data_dir, "dev", matched=False
526
+ ),
527
+ _mnli_split_generator("test_matched", data_dir, "test", matched=True),
528
+ _mnli_split_generator(
529
+ "test_mismatched", data_dir, "test", matched=False
530
+ ),
531
+ ]
532
+ elif self.config.name == "mnli_matched":
533
+ return [
534
+ _mnli_split_generator("validation", data_dir, "dev", matched=True),
535
+ _mnli_split_generator("test", data_dir, "test", matched=True),
536
+ ]
537
+ elif self.config.name == "mnli_mismatched":
538
+ return [
539
+ _mnli_split_generator("validation", data_dir, "dev", matched=False),
540
+ _mnli_split_generator("test", data_dir, "test", matched=False),
541
+ ]
542
+ else:
543
+ return [
544
+ train_split,
545
+ datasets.SplitGenerator(
546
+ name=datasets.Split.VALIDATION,
547
+ gen_kwargs={
548
+ "data_file": os.path.join(data_dir, "dev.tsv"),
549
+ "split": "dev",
550
+ },
551
+ ),
552
+ datasets.SplitGenerator(
553
+ name=datasets.Split.TEST,
554
+ gen_kwargs={
555
+ "data_file": os.path.join(data_dir, "test.tsv"),
556
+ "split": "test",
557
+ },
558
+ ),
559
+ ]
560
+
561
+ def _generate_examples(self, data_file, split):
562
+ if self.config.name in ["mrpc", "scitail"]:
563
+ if self.config.name == "mrpc":
564
+ examples = self._generate_example_mrpc_files(
565
+ data_file=data_file, split=split
566
+ )
567
+ elif self.config.name == "scitail":
568
+ examples = self._generate_example_scitail_files(
569
+ data_file=data_file, split=split
570
+ )
571
+
572
+ for example in examples:
573
+ yield example["idx"], example
574
+
575
+ else:
576
+ process_label = self.config.process_label
577
+ label_classes = self.config.label_classes
578
+
579
+ # The train and dev files for CoLA are the only tsv files without a
580
+ # header.
581
+ is_cola_non_test = self.config.name == "cola" and split != "test"
582
+
583
+ with open(data_file, encoding="utf8") as f:
584
+ reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
585
+ if is_cola_non_test:
586
+ reader = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
587
+
588
+ for n, row in enumerate(reader):
589
+ if is_cola_non_test:
590
+ row = {
591
+ "sentence": row[3],
592
+ "is_acceptable": row[1],
593
+ }
594
+
595
+ example = {
596
+ feat: row[col]
597
+ for feat, col in self.config.text_features.items()
598
+ }
599
+ example["idx"] = n
600
+
601
+ if self.config.label_column in row:
602
+ label = row[self.config.label_column]
603
+ # For some tasks, the label is represented as 0 and 1 in the tsv
604
+ # files and needs to be cast to integer to work with the feature.
605
+ if label_classes and label not in label_classes:
606
+ label = int(label) if label else None
607
+ example["label"] = process_label(label)
608
+ else:
609
+ example["label"] = process_label(-1)
610
+
611
+ # Filter out corrupted rows.
612
+ for value in example.values():
613
+ if value is None:
614
+ break
615
+ else:
616
+ yield example["idx"], example
617
+
618
+ def _generate_example_mrpc_files(self, data_file, split):
619
+ print(data_file)
620
+
621
+ with open(data_file, encoding="utf-8-sig") as f:
622
+ reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
623
+ for idx, row in enumerate(reader):
624
+ label = row["Quality"] if split != "test" else -1
625
+
626
+ yield {
627
+ "sentence1": row["#1 String"],
628
+ "sentence2": row["#2 String"],
629
+ "label": int(label),
630
+ "idx": idx,
631
+ }
632
+
633
+ def _generate_example_scitail_files(self, data_file, split):
634
+ with open(data_file, encoding="utf8") as f:
635
+ reader = csv.DictReader(
636
+ f,
637
+ delimiter="\t",
638
+ quoting=csv.QUOTE_NONE,
639
+ fieldnames=["premise", "hypothesis", "label"],
640
+ )
641
+ for idx, row in enumerate(reader):
642
+ label = row["label"] if split != "test" else -1
643
+
644
+ yield {
645
+ "premise": row["premise"],
646
+ "hypothesis": row["hypothesis"],
647
+ "label": label,
648
+ "idx": idx,
649
+ }
650
+
651
+
652
+ def _mnli_split_generator(name, data_dir, split, matched):
653
+ return datasets.SplitGenerator(
654
+ name=name,
655
+ gen_kwargs={
656
+ "data_file": os.path.join(
657
+ data_dir, "%s_%s.tsv" % (split, "matched" if matched else "mismatched")
658
+ ),
659
+ "split": split,
660
+ },
661
+ )
662
+