|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""e-SNLI: Natural Language Inference with Natural Language Explanations.""" |
|
|
|
|
|
import csv |
|
|
|
import datasets |
|
|
|
|
|
_CITATION = """ |
|
@incollection{NIPS2018_8163, |
|
title = {e-SNLI: Natural Language Inference with Natural Language Explanations}, |
|
author = {Camburu, Oana-Maria and Rockt\"{a}schel, Tim and Lukasiewicz, Thomas and Blunsom, Phil}, |
|
booktitle = {Advances in Neural Information Processing Systems 31}, |
|
editor = {S. Bengio and H. Wallach and H. Larochelle and K. Grauman and N. Cesa-Bianchi and R. Garnett}, |
|
pages = {9539--9549}, |
|
year = {2018}, |
|
publisher = {Curran Associates, Inc.}, |
|
url = {http://papers.nips.cc/paper/8163-e-snli-natural-language-inference-with-natural-language-explanations.pdf} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """ |
|
The e-SNLI dataset extends the Stanford Natural Language Inference Dataset to |
|
include human-annotated natural language explanations of the entailment |
|
relations. |
|
""" |
|
_URL = "https://raw.githubusercontent.com/OanaMariaCamburu/e-SNLI/master/dataset/" |
|
|
|
|
|
class Esnli(datasets.GeneratorBasedBuilder): |
|
"""e-SNLI: Natural Language Inference with Natural Language Explanations corpus.""" |
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name="plain_text", |
|
version=datasets.Version("0.0.2"), |
|
description="Plain text import of e-SNLI", |
|
) |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"premise": datasets.Value("string"), |
|
"hypothesis": datasets.Value("string"), |
|
"label": datasets.features.ClassLabel(names=["entailment", "neutral", "contradiction"]), |
|
"explanation_1": datasets.Value("string"), |
|
"explanation_2": datasets.Value("string"), |
|
"explanation_3": datasets.Value("string"), |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage="https://github.com/OanaMariaCamburu/e-SNLI", |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
files = dl_manager.download_and_extract( |
|
{ |
|
"train": [_URL + "esnli_train_1.csv", _URL + "esnli_train_2.csv"], |
|
"validation": [_URL + "esnli_dev.csv"], |
|
"test": [_URL + "esnli_test.csv"], |
|
} |
|
) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={"files": files["train"]}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={"files": files["validation"]}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={"files": files["test"]}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, files): |
|
"""Yields examples.""" |
|
for filepath in files: |
|
with open(filepath, encoding="utf-8") as f: |
|
reader = csv.DictReader(f) |
|
for _, row in enumerate(reader): |
|
yield row["pairID"], { |
|
"premise": row["Sentence1"], |
|
"hypothesis": row["Sentence2"], |
|
"label": row["gold_label"], |
|
"explanation_1": row["Explanation_1"], |
|
"explanation_2": row.get("Explanation_2", ""), |
|
"explanation_3": row.get("Explanation_3", ""), |
|
} |
|
|