File size: 5,970 Bytes
7ea0c76 f0d6058 7ea0c76 f0d6058 7ea0c76 d84c763 2659279 7ea0c76 0d63227 7ea0c76 0d63227 7ea0c76 1d7dcf0 7ea0c76 0d63227 7ea0c76 2659279 7ea0c76 ebd6d45 a22c62c 7ea0c76 6f45fe1 7ea0c76 c3d763c 5a3bd07 7ea0c76 0d63227 c3d763c 194281c 7ea0c76 c3d763c 7ea0c76 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 |
"""NVLR2 loading script."""
import json
import os
import datasets
import logging
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@article{xie2019visual,
title={Visual Entailment: A Novel Task for Fine-grained Image Understanding},
author={Xie, Ning and Lai, Farley and Doran, Derek and Kadav, Asim},
journal={arXiv preprint arXiv:1901.06706},
year={2019}
}
@article{xie2018visual,
title={Visual Entailment Task for Visually-Grounded Language Learning},
author={Xie, Ning and Lai, Farley and Doran, Derek and Kadav, Asim},
journal={arXiv preprint arXiv:1811.10582},
year={2018}
}
@article{young-etal-2014-image,
title = "From image descriptions to visual denotations: New similarity metrics for semantic inference over event descriptions",
author = "Young, Peter and
Lai, Alice and
Hodosh, Micah and
Hockenmaier, Julia",
journal = "Transactions of the Association for Computational Linguistics",
volume = "2",
year = "2014",
address = "Cambridge, MA",
publisher = "MIT Press",
url = "https://aclanthology.org/Q14-1006",
doi = "10.1162/tacl_a_00166",
pages = "67--78",
abstract = "We propose to use the visual denotations of linguistic expressions (i.e. the set of images they describe) to define novel denotational similarity metrics, which we show to be at least as beneficial as distributional similarities for two tasks that require semantic inference. To compute these denotational similarities, we construct a denotation graph, i.e. a subsumption hierarchy over constituents and their denotations, based on a large corpus of 30K images and 150K descriptive captions.",
}
"""
_DESCRIPTION = """\
SNLI-VE is the dataset proposed for the Visual Entailment (VE) task investigated in Visual Entailment Task for Visually-Grounded Language Learning accpeted to NeurIPS 2018 ViGIL workshop).
SNLI-VE is built on top of SNLI and Flickr30K. The problem that VE is trying to solve is to reason about the relationship between an image premise Pimage and a text hypothesis Htext.
Specifically, given an image as premise, and a natural language sentence as hypothesis, three labels (entailment, neutral and contradiction) are assigned based on the relationship conveyed by the (Pimage, Htext)
entailment holds if there is enough evidence in Pimage to conclude that Htext is true.
contradiction holds if there is enough evidence in Pimage to conclude that Htext is false.
Otherwise, the relationship is neutral, implying the evidence in Pimage is insufficient to draw a conclusion about Htext.
"""
_HOMEPAGE = "https://github.com/necla-ml/SNLI-VE"
_LICENSE = "BSD-3-clause"
_SNLI_VE_URL_BASE = "https://huggingface.co/datasets/HuggingFaceM4/SNLI-VE/resolve/main/"
_SNLI_VE_SPLITS = {
"train": "snli_ve_train.jsonl",
"validation": "snli_ve_dev.jsonl",
"test": "snli_ve_test.jsonl",
}
JZ_FOLDER_PATH = f"{os.environ['cnw_ALL_CCFRSCRATCH']}/local_datasets/flickr30k-images.tar.gz"
_FEATURES = datasets.Features(
{
"image": datasets.Image(),
"filename": datasets.Value("string"),
"premise": datasets.Value("string"),
"hypothesis": datasets.Value("string"),
"label": datasets.Value("string"),
}
)
class SNLIVE(datasets.GeneratorBasedBuilder):
"""SNLIVE."""
DEFAULT_CONFIG_NAME = "Default"
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=_FEATURES,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
urls = {
"Default": {
"train": os.path.join(_SNLI_VE_URL_BASE, _SNLI_VE_SPLITS["train"]),
"validation": os.path.join(_SNLI_VE_URL_BASE, _SNLI_VE_SPLITS["validation"]),
"test": os.path.join(_SNLI_VE_URL_BASE, _SNLI_VE_SPLITS["test"]),
},
}
snli_ve_annotation_path = dl_manager.download_and_extract(urls)
images_path = os.path.join(
dl_manager.extract(JZ_FOLDER_PATH),
"flickr30k-images"
)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"snli_ve_annotation_path": snli_ve_annotation_path[self.config.name]["train"],
"images_path": images_path
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"snli_ve_annotation_path": snli_ve_annotation_path[self.config.name]["validation"],
"images_path": images_path
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"snli_ve_annotation_path": snli_ve_annotation_path[self.config.name]["test"],
"images_path": images_path
},
),
]
def _generate_examples(self, snli_ve_annotation_path, images_path):
counter = 0
print(snli_ve_annotation_path)
with open(snli_ve_annotation_path, 'r') as json_file:
for elem in json_file:
elem = json.loads(elem)
img_filename = str(elem["Flickr30K_ID"]) + ".jpg"
assert os.path.exists(os.path.join(images_path, img_filename))
record = {
"image": os.path.join(images_path, img_filename),
"filename": img_filename,
"premise": elem["sentence1"],
"hypothesis": elem["sentence2"],
"label": elem["gold_label"],
}
yield counter, record
counter += 1
|