Datasets:

Modalities:
Tabular
Text
Languages:
Portuguese
ArXiv:
Libraries:
Datasets
luismsgomes commited on
Commit
ea4e798
1 Parent(s): 474ce9c

disable unstranslated tasks

Browse files
Files changed (2) hide show
  1. README.md +9 -0
  2. glue-ptpt.py +216 -216
README.md CHANGED
@@ -25,6 +25,15 @@ If you use this dataset please cite:
25
  primaryClass={cs.CL}
26
  }
27
 
 
 
 
 
 
 
 
 
 
28
 
29
  See [gluebenchmark.com](https://gluebenchmark.com/) for information about the General Language Understanding Evaluation (GLUE) dataset.
30
 
 
25
  primaryClass={cs.CL}
26
  }
27
 
28
+ Thus far, only 4 tasks have been translated to European Portuguese:
29
+
30
+ - MRPC
31
+ - RTE
32
+ - STS-B
33
+ - WNLI
34
+
35
+ The remainder tasks will be added in the future.
36
+
37
 
38
  See [gluebenchmark.com](https://gluebenchmark.com/) for information about the General Language Understanding Evaluation (GLUE) dataset.
39
 
glue-ptpt.py CHANGED
@@ -130,55 +130,55 @@ class GLUEPTPT(datasets.GeneratorBasedBuilder):
130
  """The General Language Understanding Evaluation (GLUE) benchmark."""
131
 
132
  BUILDER_CONFIGS = [
133
- GLUEPTPTConfig(
134
- name="cola",
135
- description=textwrap.dedent(
136
- """\
137
- The Corpus of Linguistic Acceptability consists of English
138
- acceptability judgments drawn from books and journal articles on
139
- linguistic theory. Each example is a sequence of words annotated
140
- with whether it is a grammatical English sentence."""
141
- ),
142
- text_features={"sentence": "sentence"},
143
- label_classes=["unacceptable", "acceptable"],
144
- label_column="is_acceptable",
145
- data_dir="glue_data_ptpt/CoLA",
146
- citation=textwrap.dedent(
147
- """\
148
- @article{warstadt2018neural,
149
- title={Neural Network Acceptability Judgments},
150
- author={Warstadt, Alex and Singh, Amanpreet and Bowman, Samuel R},
151
- journal={arXiv preprint arXiv:1805.12471},
152
- year={2018}
153
- }"""
154
- ),
155
- url="https://nyu-mll.github.io/CoLA/",
156
- ),
157
- GLUEPTPTConfig(
158
- name="sst2",
159
- description=textwrap.dedent(
160
- """\
161
- The Stanford Sentiment Treebank consists of sentences from movie reviews and
162
- human annotations of their sentiment. The task is to predict the sentiment of a
163
- given sentence. We use the two-way (positive/negative) class split, and use only
164
- sentence-level labels."""
165
- ),
166
- text_features={"sentence": "sentence"},
167
- label_classes=["negative", "positive"],
168
- label_column="label",
169
- data_dir="glue_data_ptpt/SST-2",
170
- citation=textwrap.dedent(
171
- """\
172
- @inproceedings{socher2013recursive,
173
- title={Recursive deep models for semantic compositionality over a sentiment treebank},
174
- author={Socher, Richard and Perelygin, Alex and Wu, Jean and Chuang, Jason and Manning, Christopher D and Ng, Andrew and Potts, Christopher},
175
- booktitle={Proceedings of the 2013 conference on empirical methods in natural language processing},
176
- pages={1631--1642},
177
- year={2013}
178
- }"""
179
- ),
180
- url="https://datasets.stanford.edu/sentiment/index.html",
181
- ),
182
  GLUEPTPTConfig(
183
  name="mrpc",
184
  description=textwrap.dedent(
@@ -202,30 +202,30 @@ class GLUEPTPT(datasets.GeneratorBasedBuilder):
202
  ),
203
  url="https://www.microsoft.com/en-us/download/details.aspx?id=52398",
204
  ),
205
- GLUEPTPTConfig(
206
- name="qqp_v2",
207
- description=textwrap.dedent(
208
- """\
209
- The Quora Question Pairs2 dataset is a collection of question pairs from the
210
- community question-answering website Quora. The task is to determine whether a
211
- pair of questions are semantically equivalent."""
212
- ),
213
- text_features={"question1": "question1", "question2": "question2",},
214
- label_classes=["not_duplicate", "duplicate"],
215
- label_column="is_duplicate",
216
- data_dir="glue_data_ptpt/QQP_v2",
217
- citation=textwrap.dedent(
218
- """\
219
- @online{WinNT,
220
- author = {Iyer, Shankar and Dandekar, Nikhil and Csernai, Kornel},
221
- title = {First Quora Dataset Release: Question Pairs},
222
- year = {2017},
223
- url = {https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs},
224
- urldate = {2019-04-03}
225
- }"""
226
- ),
227
- url="https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs",
228
- ),
229
  GLUEPTPTConfig(
230
  name="stsb",
231
  description=textwrap.dedent(
@@ -250,124 +250,124 @@ class GLUEPTPT(datasets.GeneratorBasedBuilder):
250
  url="http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark",
251
  process_label=np.float32,
252
  ),
253
- GLUEPTPTConfig(
254
- name="snli",
255
- description=textwrap.dedent(
256
- """\
257
- The SNLI corpus (version 1.0) is a collection of 570k human-written English
258
- sentence pairs manually labeled for balanced classification with the labels
259
- entailment, contradiction, and neutral, supporting the task of natural language
260
- inference (NLI), also known as recognizing textual entailment (RTE).
261
- """
262
- ),
263
- text_features={"premise": "sentence1", "hypothesis": "sentence2",},
264
- label_classes=["entailment", "neutral", "contradiction"],
265
- label_column="gold_label",
266
- data_dir="SNLI",
267
- citation=textwrap.dedent(
268
- """\
269
- @inproceedings{snli:emnlp2015,
270
- Author = {Bowman, Samuel R. and Angeli, Gabor and Potts, Christopher, and Manning, Christopher D.},
271
- Booktitle = {Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing (EMNLP)},
272
- Publisher = {Association for Computational Linguistics},
273
- Title = {A large annotated corpus for learning natural language inference},
274
- Year = {2015}
275
- }
276
- """
277
- ),
278
- url="https://nlp.stanford.edu/projects/snli/",
279
- ),
280
- GLUEPTPTConfig(
281
- name="mnli",
282
- description=textwrap.dedent(
283
- """\
284
- The Multi-Genre Natural Language Inference Corpus is a crowdsourced
285
- collection of sentence pairs with textual entailment annotations. Given a premise sentence
286
- and a hypothesis sentence, the task is to predict whether the premise entails the hypothesis
287
- (entailment), contradicts the hypothesis (contradiction), or neither (neutral). The premise sentences are
288
- gathered from ten different sources, including transcribed speech, fiction, and government reports.
289
- We use the standard test set, for which we obtained private labels from the authors, and evaluate
290
- on both the matched (in-domain) and mismatched (cross-domain) section. We also use and recommend
291
- the SNLI corpus as 550k examples of auxiliary training data."""
292
- ),
293
- **_MNLI_BASE_KWARGS,
294
- ),
295
- GLUEPTPTConfig(
296
- name="mnli_mismatched",
297
- description=textwrap.dedent(
298
- """\
299
- The mismatched validation and test splits from MNLI.
300
- See the "mnli" BuilderConfig for additional information."""
301
- ),
302
- **_MNLI_BASE_KWARGS,
303
- ),
304
- GLUEPTPTConfig(
305
- name="mnli_matched",
306
- description=textwrap.dedent(
307
- """\
308
- The matched validation and test splits from MNLI.
309
- See the "mnli" BuilderConfig for additional information."""
310
- ),
311
- **_MNLI_BASE_KWARGS,
312
- ),
313
- GLUEPTPTConfig(
314
- name="qnli",
315
- description=textwrap.dedent(
316
- """\
317
- The Stanford Question Answering Dataset is a question-answering
318
- dataset consisting of question-paragraph pairs, where one of the sentences in the paragraph (drawn
319
- from Wikipedia) contains the answer to the corresponding question (written by an annotator). We
320
- convert the task into sentence pair classification by forming a pair between each question and each
321
- sentence in the corresponding context, and filtering out pairs with low lexical overlap between the
322
- question and the context sentence. The task is to determine whether the context sentence contains
323
- the answer to the question. This modified version of the original task removes the requirement that
324
- the model select the exact answer, but also removes the simplifying assumptions that the answer
325
- is always present in the input and that lexical overlap is a reliable cue."""
326
- ), # pylint: disable=line-too-long
327
- text_features={"question": "question", "sentence": "sentence",},
328
- label_classes=["entailment", "not_entailment"],
329
- label_column="label",
330
- data_dir="glue_data_ptpt/QNLI",
331
- citation=textwrap.dedent(
332
- """\
333
- @article{rajpurkar2016squad,
334
- title={Squad: 100,000+ questions for machine comprehension of text},
335
- author={Rajpurkar, Pranav and Zhang, Jian and Lopyrev, Konstantin and Liang, Percy},
336
- journal={arXiv preprint arXiv:1606.05250},
337
- year={2016}
338
- }"""
339
- ),
340
- url="https://rajpurkar.github.io/SQuAD-explorer/",
341
- ),
342
- GLUEPTPTConfig(
343
- name="qnli_v2",
344
- description=textwrap.dedent(
345
- """\
346
- The Stanford Question Answering Dataset is a question-answering
347
- dataset consisting of question-paragraph pairs, where one of the sentences in the paragraph (drawn
348
- from Wikipedia) contains the answer to the corresponding question (written by an annotator). We
349
- convert the task into sentence pair classification by forming a pair between each question and each
350
- sentence in the corresponding context, and filtering out pairs with low lexical overlap between the
351
- question and the context sentence. The task is to determine whether the context sentence contains
352
- the answer to the question. This modified version of the original task removes the requirement that
353
- the model select the exact answer, but also removes the simplifying assumptions that the answer
354
- is always present in the input and that lexical overlap is a reliable cue."""
355
- ), # pylint: disable=line-too-long
356
- text_features={"question": "question", "sentence": "sentence",},
357
- label_classes=["entailment", "not_entailment"],
358
- label_column="label",
359
- data_dir="glue_data_ptpt/QNLI_v2",
360
- citation=textwrap.dedent(
361
- """\
362
- @article{rajpurkar2016squad,
363
- title={Squad: 100,000+ questions for machine comprehension of text},
364
- author={Rajpurkar, Pranav and Zhang, Jian and Lopyrev, Konstantin and Liang, Percy},
365
- journal={arXiv preprint arXiv:1606.05250},
366
- year={2016}
367
- }"""
368
- ),
369
- url="https://rajpurkar.github.io/SQuAD-explorer/",
370
- ),
371
  GLUEPTPTConfig(
372
  name="rte",
373
  description=textwrap.dedent(
@@ -454,31 +454,31 @@ class GLUEPTPT(datasets.GeneratorBasedBuilder):
454
  ),
455
  url="https://cs.nyu.edu/faculty/davise/papers/WinogradSchemas/WS.html",
456
  ),
457
- GLUEPTPTConfig(
458
- name="scitail",
459
- description=textwrap.dedent(
460
- """\
461
- The SciTail dataset is an entailment dataset created from multiple-choice science exams and web sentences. Each question
462
- and the correct answer choice are converted into an assertive statement to form the hypothesis. We use information
463
- retrieval to obtain relevant text from a large text corpus of web sentences, and use these sentences as a premise P. We
464
- crowdsource the annotation of such premise-hypothesis pair as supports (entails) or not (neutral), in order to create
465
- the SciTail dataset. The dataset contains 27,026 examples with 10,101 examples with entails label and 16,925 examples
466
- with neutral label"""
467
- ),
468
- text_features={"premise": "premise", "hypothesis": "hypothesis",},
469
- label_classes=["entails", "neutral"],
470
- label_column="label",
471
- data_dir="glue_data_ptpt/SciTail",
472
- citation=""""\
473
- inproceedings{scitail,
474
- Author = {Tushar Khot and Ashish Sabharwal and Peter Clark},
475
- Booktitle = {AAAI},
476
- Title = {{SciTail}: A Textual Entailment Dataset from Science Question Answering},
477
- Year = {2018}
478
- }
479
- """,
480
- url="https://gluebenchmark.com/diagnostics",
481
- ),
482
  ]
483
 
484
  def _info(self):
 
130
  """The General Language Understanding Evaluation (GLUE) benchmark."""
131
 
132
  BUILDER_CONFIGS = [
133
+ # GLUEPTPTConfig(
134
+ # name="cola",
135
+ # description=textwrap.dedent(
136
+ # """\
137
+ # The Corpus of Linguistic Acceptability consists of English
138
+ # acceptability judgments drawn from books and journal articles on
139
+ # linguistic theory. Each example is a sequence of words annotated
140
+ # with whether it is a grammatical English sentence."""
141
+ # ),
142
+ # text_features={"sentence": "sentence"},
143
+ # label_classes=["unacceptable", "acceptable"],
144
+ # label_column="is_acceptable",
145
+ # data_dir="glue_data_ptpt/CoLA",
146
+ # citation=textwrap.dedent(
147
+ # """\
148
+ # @article{warstadt2018neural,
149
+ # title={Neural Network Acceptability Judgments},
150
+ # author={Warstadt, Alex and Singh, Amanpreet and Bowman, Samuel R},
151
+ # journal={arXiv preprint arXiv:1805.12471},
152
+ # year={2018}
153
+ # }"""
154
+ # ),
155
+ # url="https://nyu-mll.github.io/CoLA/",
156
+ # ),
157
+ # GLUEPTPTConfig(
158
+ # name="sst2",
159
+ # description=textwrap.dedent(
160
+ # """\
161
+ # The Stanford Sentiment Treebank consists of sentences from movie reviews and
162
+ # human annotations of their sentiment. The task is to predict the sentiment of a
163
+ # given sentence. We use the two-way (positive/negative) class split, and use only
164
+ # sentence-level labels."""
165
+ # ),
166
+ # text_features={"sentence": "sentence"},
167
+ # label_classes=["negative", "positive"],
168
+ # label_column="label",
169
+ # data_dir="glue_data_ptpt/SST-2",
170
+ # citation=textwrap.dedent(
171
+ # """\
172
+ # @inproceedings{socher2013recursive,
173
+ # title={Recursive deep models for semantic compositionality over a sentiment treebank},
174
+ # author={Socher, Richard and Perelygin, Alex and Wu, Jean and Chuang, Jason and Manning, Christopher D and Ng, Andrew and Potts, Christopher},
175
+ # booktitle={Proceedings of the 2013 conference on empirical methods in natural language processing},
176
+ # pages={1631--1642},
177
+ # year={2013}
178
+ # }"""
179
+ # ),
180
+ # url="https://datasets.stanford.edu/sentiment/index.html",
181
+ # ),
182
  GLUEPTPTConfig(
183
  name="mrpc",
184
  description=textwrap.dedent(
 
202
  ),
203
  url="https://www.microsoft.com/en-us/download/details.aspx?id=52398",
204
  ),
205
+ # GLUEPTPTConfig(
206
+ # name="qqp_v2",
207
+ # description=textwrap.dedent(
208
+ # """\
209
+ # The Quora Question Pairs2 dataset is a collection of question pairs from the
210
+ # community question-answering website Quora. The task is to determine whether a
211
+ # pair of questions are semantically equivalent."""
212
+ # ),
213
+ # text_features={"question1": "question1", "question2": "question2",},
214
+ # label_classes=["not_duplicate", "duplicate"],
215
+ # label_column="is_duplicate",
216
+ # data_dir="glue_data_ptpt/QQP_v2",
217
+ # citation=textwrap.dedent(
218
+ # """\
219
+ # @online{WinNT,
220
+ # author = {Iyer, Shankar and Dandekar, Nikhil and Csernai, Kornel},
221
+ # title = {First Quora Dataset Release: Question Pairs},
222
+ # year = {2017},
223
+ # url = {https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs},
224
+ # urldate = {2019-04-03}
225
+ # }"""
226
+ # ),
227
+ # url="https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs",
228
+ # ),
229
  GLUEPTPTConfig(
230
  name="stsb",
231
  description=textwrap.dedent(
 
250
  url="http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark",
251
  process_label=np.float32,
252
  ),
253
+ # GLUEPTPTConfig(
254
+ # name="snli",
255
+ # description=textwrap.dedent(
256
+ # """\
257
+ # The SNLI corpus (version 1.0) is a collection of 570k human-written English
258
+ # sentence pairs manually labeled for balanced classification with the labels
259
+ # entailment, contradiction, and neutral, supporting the task of natural language
260
+ # inference (NLI), also known as recognizing textual entailment (RTE).
261
+ # """
262
+ # ),
263
+ # text_features={"premise": "sentence1", "hypothesis": "sentence2",},
264
+ # label_classes=["entailment", "neutral", "contradiction"],
265
+ # label_column="gold_label",
266
+ # data_dir="SNLI",
267
+ # citation=textwrap.dedent(
268
+ # """\
269
+ # @inproceedings{snli:emnlp2015,
270
+ # Author = {Bowman, Samuel R. and Angeli, Gabor and Potts, Christopher, and Manning, Christopher D.},
271
+ # Booktitle = {Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing (EMNLP)},
272
+ # Publisher = {Association for Computational Linguistics},
273
+ # Title = {A large annotated corpus for learning natural language inference},
274
+ # Year = {2015}
275
+ # }
276
+ # """
277
+ # ),
278
+ # url="https://nlp.stanford.edu/projects/snli/",
279
+ # ),
280
+ # GLUEPTPTConfig(
281
+ # name="mnli",
282
+ # description=textwrap.dedent(
283
+ # """\
284
+ # The Multi-Genre Natural Language Inference Corpus is a crowdsourced
285
+ # collection of sentence pairs with textual entailment annotations. Given a premise sentence
286
+ # and a hypothesis sentence, the task is to predict whether the premise entails the hypothesis
287
+ # (entailment), contradicts the hypothesis (contradiction), or neither (neutral). The premise sentences are
288
+ # gathered from ten different sources, including transcribed speech, fiction, and government reports.
289
+ # We use the standard test set, for which we obtained private labels from the authors, and evaluate
290
+ # on both the matched (in-domain) and mismatched (cross-domain) section. We also use and recommend
291
+ # the SNLI corpus as 550k examples of auxiliary training data."""
292
+ # ),
293
+ # **_MNLI_BASE_KWARGS,
294
+ # ),
295
+ # GLUEPTPTConfig(
296
+ # name="mnli_mismatched",
297
+ # description=textwrap.dedent(
298
+ # """\
299
+ # The mismatched validation and test splits from MNLI.
300
+ # See the "mnli" BuilderConfig for additional information."""
301
+ # ),
302
+ # **_MNLI_BASE_KWARGS,
303
+ # ),
304
+ # GLUEPTPTConfig(
305
+ # name="mnli_matched",
306
+ # description=textwrap.dedent(
307
+ # """\
308
+ # The matched validation and test splits from MNLI.
309
+ # See the "mnli" BuilderConfig for additional information."""
310
+ # ),
311
+ # **_MNLI_BASE_KWARGS,
312
+ # ),
313
+ # GLUEPTPTConfig(
314
+ # name="qnli",
315
+ # description=textwrap.dedent(
316
+ # """\
317
+ # The Stanford Question Answering Dataset is a question-answering
318
+ # dataset consisting of question-paragraph pairs, where one of the sentences in the paragraph (drawn
319
+ # from Wikipedia) contains the answer to the corresponding question (written by an annotator). We
320
+ # convert the task into sentence pair classification by forming a pair between each question and each
321
+ # sentence in the corresponding context, and filtering out pairs with low lexical overlap between the
322
+ # question and the context sentence. The task is to determine whether the context sentence contains
323
+ # the answer to the question. This modified version of the original task removes the requirement that
324
+ # the model select the exact answer, but also removes the simplifying assumptions that the answer
325
+ # is always present in the input and that lexical overlap is a reliable cue."""
326
+ # ), # pylint: disable=line-too-long
327
+ # text_features={"question": "question", "sentence": "sentence",},
328
+ # label_classes=["entailment", "not_entailment"],
329
+ # label_column="label",
330
+ # data_dir="glue_data_ptpt/QNLI",
331
+ # citation=textwrap.dedent(
332
+ # """\
333
+ # @article{rajpurkar2016squad,
334
+ # title={Squad: 100,000+ questions for machine comprehension of text},
335
+ # author={Rajpurkar, Pranav and Zhang, Jian and Lopyrev, Konstantin and Liang, Percy},
336
+ # journal={arXiv preprint arXiv:1606.05250},
337
+ # year={2016}
338
+ # }"""
339
+ # ),
340
+ # url="https://rajpurkar.github.io/SQuAD-explorer/",
341
+ # ),
342
+ # GLUEPTPTConfig(
343
+ # name="qnli_v2",
344
+ # description=textwrap.dedent(
345
+ # """\
346
+ # The Stanford Question Answering Dataset is a question-answering
347
+ # dataset consisting of question-paragraph pairs, where one of the sentences in the paragraph (drawn
348
+ # from Wikipedia) contains the answer to the corresponding question (written by an annotator). We
349
+ # convert the task into sentence pair classification by forming a pair between each question and each
350
+ # sentence in the corresponding context, and filtering out pairs with low lexical overlap between the
351
+ # question and the context sentence. The task is to determine whether the context sentence contains
352
+ # the answer to the question. This modified version of the original task removes the requirement that
353
+ # the model select the exact answer, but also removes the simplifying assumptions that the answer
354
+ # is always present in the input and that lexical overlap is a reliable cue."""
355
+ # ), # pylint: disable=line-too-long
356
+ # text_features={"question": "question", "sentence": "sentence",},
357
+ # label_classes=["entailment", "not_entailment"],
358
+ # label_column="label",
359
+ # data_dir="glue_data_ptpt/QNLI_v2",
360
+ # citation=textwrap.dedent(
361
+ # """\
362
+ # @article{rajpurkar2016squad,
363
+ # title={Squad: 100,000+ questions for machine comprehension of text},
364
+ # author={Rajpurkar, Pranav and Zhang, Jian and Lopyrev, Konstantin and Liang, Percy},
365
+ # journal={arXiv preprint arXiv:1606.05250},
366
+ # year={2016}
367
+ # }"""
368
+ # ),
369
+ # url="https://rajpurkar.github.io/SQuAD-explorer/",
370
+ # ),
371
  GLUEPTPTConfig(
372
  name="rte",
373
  description=textwrap.dedent(
 
454
  ),
455
  url="https://cs.nyu.edu/faculty/davise/papers/WinogradSchemas/WS.html",
456
  ),
457
+ # GLUEPTPTConfig(
458
+ # name="scitail",
459
+ # description=textwrap.dedent(
460
+ # """\
461
+ # The SciTail dataset is an entailment dataset created from multiple-choice science exams and web sentences. Each question
462
+ # and the correct answer choice are converted into an assertive statement to form the hypothesis. We use information
463
+ # retrieval to obtain relevant text from a large text corpus of web sentences, and use these sentences as a premise P. We
464
+ # crowdsource the annotation of such premise-hypothesis pair as supports (entails) or not (neutral), in order to create
465
+ # the SciTail dataset. The dataset contains 27,026 examples with 10,101 examples with entails label and 16,925 examples
466
+ # with neutral label"""
467
+ # ),
468
+ # text_features={"premise": "premise", "hypothesis": "hypothesis",},
469
+ # label_classes=["entails", "neutral"],
470
+ # label_column="label",
471
+ # data_dir="glue_data_ptpt/SciTail",
472
+ # citation=""""\
473
+ # inproceedings{scitail,
474
+ # Author = {Tushar Khot and Ashish Sabharwal and Peter Clark},
475
+ # Booktitle = {AAAI},
476
+ # Title = {{SciTail}: A Textual Entailment Dataset from Science Question Answering},
477
+ # Year = {2018}
478
+ # }
479
+ # """,
480
+ # url="https://gluebenchmark.com/diagnostics",
481
+ # ),
482
  ]
483
 
484
  def _info(self):