# coding=utf-8 # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """bAbI_nli datasets""" from __future__ import absolute_import, division, print_function import csv import os import textwrap import six import datasets bAbI_nli_CITATION = r"""@article{weston2015towards, title={Towards ai-complete question answering: A set of prerequisite toy tasks}, author={Weston, Jason and Bordes, Antoine and Chopra, Sumit and Rush, Alexander M and Van Merri{\"e}nboer, Bart and Joulin, Armand and Mikolov, Tomas}, journal={arXiv preprint arXiv:1502.05698}, year={2015} } @inproceedings{sileo-moens-2022-analysis, title = "Analysis and Prediction of {NLP} Models via Task Embeddings", author = "Sileo, Damien and Moens, Marie-Francine", booktitle = "Proceedings of the Thirteenth Language Resources and Evaluation Conference", month = jun, year = "2022", address = "Marseille, France", publisher = "European Language Resources Association", url = "https://aclanthology.org/2022.lrec-1.67", pages = "633--647", abstract = "Task embeddings are low-dimensional representations that are trained to capture task properties. In this paper, we propose MetaEval, a collection of 101 NLP tasks. We fit a single transformer to all MetaEval tasks jointly while conditioning it on learned embeddings. The resulting task embeddings enable a novel analysis of the space of tasks. We then show that task aspects can be mapped to task embeddings for new tasks without using any annotated examples. Predicted embeddings can modulate the encoder for zero-shot inference and outperform a zero-shot baseline on GLUE tasks. The provided multitask setup can function as a benchmark for future transfer learning research.", } """ _babi_nli_DESCRIPTION = """\ bAbi tasks recasted as natural language inference. """ DATA_URL = "https://www.dropbox.com/s/0b98tbrv2mej3cu/babi_nli.zip?dl=1" LABELS=["not-entailed", "entailed"] CONFIGS=['single-supporting-fact', 'two-supporting-facts', 'three-supporting-facts', 'two-arg-relations', 'three-arg-relations', 'yes-no-questions', 'counting', 'lists-sets', 'simple-negation', 'indefinite-knowledge', 'basic-coreference', 'conjunction', 'compound-coreference', 'time-reasoning', 'basic-deduction', 'basic-induction', 'positional-reasoning', 'size-reasoning', 'path-finding', 'agents-motivations'] class bAbI_nli_Config(datasets.BuilderConfig): """BuilderConfig for bAbI_nli.""" def __init__( self, text_features, label_classes=None, process_label=lambda x: x, **kwargs, ): """BuilderConfig for bAbI_nli. Args: text_features: `dict[string, string]`, map from the name of the feature dict for each text field to the name of the column in the tsv file label_column: `string`, name of the column in the tsv file corresponding to the label data_url: `string`, url to download the zip file from data_dir: `string`, the path to the folder containing the tsv files in the downloaded zip citation: `string`, citation for the data set url: `string`, url for information about the data set label_classes: `list[string]`, the list of classes if the label is categorical. If not provided, then the label will be of type `datasets.Value('float32')`. process_label: `Function[string, any]`, function taking in the raw value of the label and processing it to the form required by the label feature **kwargs: keyword arguments forwarded to super. """ super(bAbI_nli_Config, self).__init__( version=datasets.Version("1.0.0", ""), **kwargs ) self.text_features = text_features self.label_column = "label" self.label_classes = LABELS self.data_url = DATA_URL self.data_dir = self.name #os.path.join("babi_nli", self.name) self.citation = textwrap.dedent(bAbI_nli_CITATION) self.process_label = lambda x: str(x) self.description = "" self.url = "" class bAbI_nli(datasets.GeneratorBasedBuilder): """The General Language Understanding Evaluation (bAbI_nli) benchmark.""" BUILDER_CONFIG_CLASS = bAbI_nli_Config BUILDER_CONFIGS = [ bAbI_nli_Config( name=name, text_features={"premise": "premise", "hypothesis": "hypothesis"}, ) for name in CONFIGS ] def _info(self): features = { text_feature: datasets.Value("string") for text_feature in six.iterkeys(self.config.text_features) } if self.config.label_classes: features["label"] = datasets.features.ClassLabel( names=self.config.label_classes ) else: features["label"] = datasets.Value("float32") features["idx"] = datasets.Value("int32") return datasets.DatasetInfo( description=_babi_nli_DESCRIPTION, features=datasets.Features(features), homepage=self.config.url, citation=self.config.citation + "\n" + bAbI_nli_CITATION, ) def _split_generators(self, dl_manager): dl_dir = dl_manager.download_and_extract(self.config.data_url) data_dir = os.path.join(dl_dir, self.config.data_dir) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "data_file": os.path.join(data_dir or "", "train.tsv"), "split": "train", }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ "data_file": os.path.join(data_dir or "", "validation.tsv"), "split": "validation", }, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "data_file": os.path.join(data_dir or "", "test.tsv"), "split": "test", }, ), ] def _generate_examples(self, data_file, split): process_label = self.config.process_label label_classes = self.config.label_classes with open(data_file, encoding="utf8") as f: reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE) for n, row in enumerate(reader): example = { feat: row[col] for feat, col in six.iteritems(self.config.text_features) } example["idx"] = n if self.config.label_column in row: label = row[self.config.label_column] if label_classes and label not in label_classes: label = int(label) if label else None example["label"] = process_label(label) else: example["label"] = process_label(-1) yield example["idx"], example