Datasets:

Languages:
English
Size Categories:
100K<n<1M
ArXiv:
Tags:
License:
mnli / mnli.py
yuvalr's picture
Upload mnli.py
39c51f6
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""The General Language Understanding Evaluation (GLUE) benchmark."""
import csv
import os
import textwrap
import json
import numpy as np
import datasets
_GLUE_CITATION = """\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
"""
_GLUE_DESCRIPTION = """\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
"""
_MNLI_BASE_KWARGS = dict(
text_features={
"premise": "sentence1",
"hypothesis": "sentence2",
},
label_classes=["entailment", "neutral", "contradiction"],
label_column="label",
data_url="https://dl.fbaipublicfiles.com/glue/data/MNLI.zip",
data_dir="MNLI",
citation=textwrap.dedent(
"""\
@InProceedings{N18-1101,
author = "Williams, Adina
and Nangia, Nikita
and Bowman, Samuel",
title = "A Broad-Coverage Challenge Corpus for
Sentence Understanding through Inference",
booktitle = "Proceedings of the 2018 Conference of
the North American Chapter of the
Association for Computational Linguistics:
Human Language Technologies, Volume 1 (Long
Papers)",
year = "2018",
publisher = "Association for Computational Linguistics",
pages = "1112--1122",
location = "New Orleans, Louisiana",
url = "http://aclweb.org/anthology/N18-1101"
}
@article{bowman2015large,
title={A large annotated corpus for learning natural language inference},
author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},
journal={arXiv preprint arXiv:1508.05326},
year={2015}
}"""
),
url="http://www.nyu.edu/projects/bowman/multinli/",
)
class GlueConfig(datasets.BuilderConfig):
"""BuilderConfig for GLUE."""
def __init__(
self,
text_features,
label_column,
data_url,
data_dir,
citation,
url,
label_classes=None,
process_label=lambda x: x,
**kwargs,
):
"""BuilderConfig for GLUE.
Args:
text_features: `dict[string, string]`, map from the name of the feature
dict for each text field to the name of the column in the tsv file
label_column: `string`, name of the column in the tsv file corresponding
to the label
data_url: `string`, url to download the zip file from
data_dir: `string`, the path to the folder containing the tsv files in the
downloaded zip
citation: `string`, citation for the data set
url: `string`, url for information about the data set
label_classes: `list[string]`, the list of classes if the label is
categorical. If not provided, then the label will be of type
`datasets.Value('float32')`.
process_label: `Function[string, any]`, function taking in the raw value
of the label and processing it to the form required by the label feature
**kwargs: keyword arguments forwarded to super.
"""
super(GlueConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
self.text_features = text_features
self.label_column = label_column
self.label_classes = label_classes
self.data_url = data_url
self.data_dir = data_dir
self.citation = citation
self.url = url
self.process_label = process_label
class Glue(datasets.GeneratorBasedBuilder):
"""The General Language Understanding Evaluation (GLUE) benchmark."""
BUILDER_CONFIGS = [
GlueConfig(
name=bias_amplified_splits_type,
description=textwrap.dedent(
"""\
The Multi-Genre Natural Language Inference Corpus is a crowdsourced
collection of sentence pairs with textual entailment annotations. Given a premise sentence
and a hypothesis sentence, the task is to predict whether the premise entails the hypothesis
(entailment), contradicts the hypothesis (contradiction), or neither (neutral). The premise sentences are
gathered from ten different sources, including transcribed speech, fiction, and government reports.
We use the standard test set, for which we obtained private labels from the authors, and evaluate
on both the matched (in-domain) and mismatched (cross-domain) section. We also use and recommend
the SNLI corpus as 550k examples of auxiliary training data."""
),
**_MNLI_BASE_KWARGS,
) for bias_amplified_splits_type in ["minority_examples", "partial_input"]
]
def _info(self):
features = {text_feature: datasets.Value("string") for text_feature in self.config.text_features.keys()}
if self.config.label_classes:
features["label"] = datasets.features.ClassLabel(names=self.config.label_classes)
else:
features["label"] = datasets.Value("float32")
features["idx"] = datasets.Value("int32")
return datasets.DatasetInfo(
description=_GLUE_DESCRIPTION,
features=datasets.Features(features),
homepage=self.config.url,
citation=self.config.citation + "\n" + _GLUE_CITATION,
)
def _split_generators(self, dl_manager):
return [
datasets.SplitGenerator(
name="train.biased",
gen_kwargs={
"filepath": dl_manager.download(os.path.join(self.config.name, "train.biased.jsonl")),
},
),
datasets.SplitGenerator(
name="train.anti_biased",
gen_kwargs={
"filepath": dl_manager.download(os.path.join(self.config.name, "train.anti_biased.jsonl")),
},
),
datasets.SplitGenerator(
name="validation_matched.biased",
gen_kwargs={
"filepath": dl_manager.download(os.path.join(self.config.name, "validation_matched.biased.jsonl")),
},
),
datasets.SplitGenerator(
name="validation_matched.anti_biased",
gen_kwargs={
"filepath": dl_manager.download(os.path.join(self.config.name, "validation_matched.anti_biased.jsonl")),
},
),
datasets.SplitGenerator(
name="validation_mismatched.biased",
gen_kwargs={
"filepath": dl_manager.download(os.path.join(self.config.name, "validation_mismatched.biased.jsonl")),
},
),
datasets.SplitGenerator(
name="validation_mismatched.anti_biased",
gen_kwargs={
"filepath": dl_manager.download(os.path.join(self.config.name, "validation_mismatched.anti_biased.jsonl")),
},
),
]
def _generate_examples(self, filepath):
"""Generate examples.
Args:
filepath: a string
Yields:
dictionaries containing "premise", "hypothesis" and "label" strings
"""
process_label = self.config.process_label
label_classes = self.config.label_classes
for idx, line in enumerate(open(filepath, "rb")):
if line is not None:
line = line.strip().decode("utf-8")
item = json.loads(line)
example = {
"idx": item["idx"],
"premise": item["premise"],
"hypothesis": item["hypothesis"],
}
if self.config.label_column in item:
label = item[self.config.label_column]
example["label"] = process_label(label)
else:
example["label"] = process_label(-1)
yield example["idx"], example