recast / recast.py
sileod's picture
Update recast.py
843f993
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Recast datasets"""
from __future__ import absolute_import, division, print_function
import csv
import os
import textwrap
import six
import datasets
_Recast_CITATION = r"""@inproceedings{poliak-etal-2018-collecting,
title = "Collecting Diverse Natural Language Inference Problems for Sentence Representation Evaluation",
author = "Poliak, Adam and
Haldar, Aparajita and
Rudinger, Rachel and
Hu, J. Edward and
Pavlick, Ellie and
White, Aaron Steven and
Van Durme, Benjamin",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing",
month = oct # "-" # nov,
year = "2018",
address = "Brussels, Belgium",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/D18-1007",
doi = "10.18653/v1/D18-1007",
pages = "67--81",
abstract = "We present a large-scale collection of diverse natural language inference (NLI) datasets that help provide insight into how well a sentence representation captures distinct types of reasoning. The collection results from recasting 13 existing datasets from 7 semantic phenomena into a common NLI structure, resulting in over half a million labeled context-hypothesis pairs in total. We refer to our collection as the DNC: Diverse Natural Language Inference Collection. The DNC is available online at \url{https://www.decomp.net}, and will grow over time as additional resources are recast and added from novel sources.",
}
"""
_Recast_DESCRIPTION = """\
A diverse collection of tasks recasted as natural language inference tasks.
"""
DATA_URL = "https://www.dropbox.com/s/z1mcq6ygfsae0wj/recast.zip?dl=1"
TASK_TO_LABELS = {
"recast_kg_relations": ["1", "2", "3", "4", "5", "6"],
"recast_puns": ["not-entailed", "entailed"],
"recast_factuality": ["not-entailed", "entailed"],
"recast_verbnet": ["not-entailed", "entailed"],
"recast_verbcorner": ["not-entailed", "entailed"],
"recast_sentiment": ["not-entailed", "entailed"],
"recast_megaveridicality": ["not-entailed", "entailed"],
"recast_ner": ["not-entailed", "entailed"],
"recast_winogender": ["not-entailed", "entailed"],
"recast_ner": ["not-entailed", "entailed"],
}
def get_labels(task):
return TASK_TO_LABELS[task]
class RecastConfig(datasets.BuilderConfig):
"""BuilderConfig for Recast."""
def __init__(
self,
text_features,
label_classes=None,
process_label=lambda x: x,
**kwargs,
):
"""BuilderConfig for Recast.
Args:
text_features: `dict[string, string]`, map from the name of the feature
dict for each text field to the name of the column in the tsv file
label_column: `string`, name of the column in the tsv file corresponding
to the label
data_url: `string`, url to download the zip file from
data_dir: `string`, the path to the folder containing the tsv files in the
downloaded zip
citation: `string`, citation for the data set
url: `string`, url for information about the data set
label_classes: `list[string]`, the list of classes if the label is
categorical. If not provided, then the label will be of type
`datasets.Value('float32')`.
process_label: `Function[string, any]`, function taking in the raw value
of the label and processing it to the form required by the label feature
**kwargs: keyword arguments forwarded to super.
"""
super(RecastConfig, self).__init__(
version=datasets.Version("1.0.0", ""), **kwargs
)
self.text_features = text_features
self.label_column = "label"
self.label_classes = get_labels(self.name)
self.data_url = DATA_URL
self.data_dir = os.path.join("recast", self.name)
self.citation = textwrap.dedent(_Recast_CITATION)
self.process_label = lambda x: str(x)
self.description = ""
self.url = ""
class Recast(datasets.GeneratorBasedBuilder):
"""The General Language Understanding Evaluation (Recast) benchmark."""
BUILDER_CONFIG_CLASS = RecastConfig
BUILDER_CONFIGS = [
RecastConfig(
name="recast_kg_relations",
text_features={"context": "context", "hypothesis": "hypothesis"},
),
RecastConfig(
name="recast_puns",
text_features={"context": "context", "hypothesis": "hypothesis"},
),
RecastConfig(
name="recast_factuality",
text_features={"context": "context", "hypothesis": "hypothesis"},
),
RecastConfig(
name="recast_verbnet",
text_features={"context": "context", "hypothesis": "hypothesis"},
),
RecastConfig(
name="recast_verbcorner",
text_features={"context": "context", "hypothesis": "hypothesis"},
),
RecastConfig(
name="recast_ner",
text_features={"context": "context", "hypothesis": "hypothesis"},
),
RecastConfig(
name="recast_sentiment",
text_features={"context": "context", "hypothesis": "hypothesis"},
),
RecastConfig(
name="recast_megaveridicality",
text_features={"context": "context", "hypothesis": "hypothesis"},
),
]
def _info(self):
features = {
text_feature: datasets.Value("string")
for text_feature in six.iterkeys(self.config.text_features)
}
if self.config.label_classes:
features["label"] = datasets.features.ClassLabel(
names=self.config.label_classes
)
else:
features["label"] = datasets.Value("float32")
features["idx"] = datasets.Value("int32")
return datasets.DatasetInfo(
description=_Recast_DESCRIPTION,
features=datasets.Features(features),
homepage=self.config.url,
citation=self.config.citation + "\n" + _Recast_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(self.config.data_url)
data_dir = os.path.join(dl_dir, self.config.data_dir)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data_file": os.path.join(data_dir or "", "train.tsv"),
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"data_file": os.path.join(data_dir or "", "dev.tsv"),
"split": "dev",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"data_file": os.path.join(data_dir or "", "test.tsv"),
"split": "test",
},
),
]
def _generate_examples(self, data_file, split):
process_label = self.config.process_label
label_classes = self.config.label_classes
with open(data_file, encoding="utf8") as f:
reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
for n, row in enumerate(reader):
example = {
feat: row[col]
for feat, col in six.iteritems(self.config.text_features)
}
example["idx"] = n
if self.config.label_column in row:
label = row[self.config.label_column]
if label_classes and label not in label_classes:
label = int(label) if label else None
example["label"] = process_label(label)
else:
example["label"] = process_label(-1)
yield example["idx"], example