''' Author: Xiang Pan Date: 2021-09-20 01:38:55 LastEditTime: 2021-09-20 01:45:46 LastEditors: Xiang Pan Description: FilePath: /SNLI_OOD/cached_datasets/snli_break/snli_break.py xiangpan@nyu.edu ''' import csv import os import datasets _DESCRIPTION = """\ The SNLI corpus (version 1.0) is a collection of 570k human-written English sentence pairs manually labeled for balanced classification with the labels entailment, contradiction, and neutral, supporting the task of natural language inference (NLI), also known as recognizing textual entailment (RTE). """ # _DATA_URL = "https://nlp.stanford.edu/projects/snli/snli_1.0.zip" class snli_break(datasets.GeneratorBasedBuilder): BUILDER_CONFIGS = [ datasets.BuilderConfig( name="plain_text", version=datasets.Version("1.0.0", ""), description="Plain text import of SNLI", ) ] def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, # features=datasets.Features( # { # "premise": datasets.Value("string"), # "hypothesis": datasets.Value("string"), # "label": datasets.features.ClassLabel(names=["entailment", "neutral", "contradiction"]), # } # ), # # No default supervised_keys (as we have to pass both premise # # and hypothesis as input). # supervised_keys=None, # homepage="https://nlp.stanford.edu/projects/snli/", # # citation=_CITATION, ) def _split_generators(self, dl_manager): # dl_dir = dl_manager.download_and_extract(_DATA_URL) data_dir = ("./") return [ datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"filepath": os.path.join(data_dir, "dataset.jsonl")} ), # datasets.SplitGenerator( # name=datasets.Split.VALIDATION, gen_kwargs={"filepath": os.path.join(data_dir, "snli_1.0_dev.txt")} # ), # datasets.SplitGenerator( # name=datasets.Split.TRAIN, gen_kwargs={"filepath": os.path.join(data_dir, "snli_1.0_train.txt")} # ), ] def _generate_examples(self, filepath): """This function returns the examples in the raw (text) form.""" with open(filepath, encoding="utf-8") as f: reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE) for idx, row in enumerate(reader): label = -1 if row["gold_label"] == "-" else row["gold_label"] yield idx, { "premise": row["sentence1"], "hypothesis": row["sentence2"], "label": label, }