Leyo commited on
Commit
7ea0c76
1 Parent(s): a3c3868

add dataset loading script

Browse files
Files changed (1) hide show
  1. SNLIVE.py +155 -0
SNLIVE.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """NVLR2 loading script."""
2
+
3
+
4
+ import json
5
+ import os
6
+ import datasets
7
+
8
+
9
+ _CITATION = """\
10
+ @article{xie2019visual,
11
+ title={Visual Entailment: A Novel Task for Fine-grained Image Understanding},
12
+ author={Xie, Ning and Lai, Farley and Doran, Derek and Kadav, Asim},
13
+ journal={arXiv preprint arXiv:1901.06706},
14
+ year={2019}
15
+ }
16
+
17
+ @article{xie2018visual,
18
+ title={Visual Entailment Task for Visually-Grounded Language Learning},
19
+ author={Xie, Ning and Lai, Farley and Doran, Derek and Kadav, Asim},
20
+ journal={arXiv preprint arXiv:1811.10582},
21
+ year={2018}
22
+ }
23
+ @article{young-etal-2014-image,
24
+ title = "From image descriptions to visual denotations: New similarity metrics for semantic inference over event descriptions",
25
+ author = "Young, Peter and
26
+ Lai, Alice and
27
+ Hodosh, Micah and
28
+ Hockenmaier, Julia",
29
+ journal = "Transactions of the Association for Computational Linguistics",
30
+ volume = "2",
31
+ year = "2014",
32
+ address = "Cambridge, MA",
33
+ publisher = "MIT Press",
34
+ url = "https://aclanthology.org/Q14-1006",
35
+ doi = "10.1162/tacl_a_00166",
36
+ pages = "67--78",
37
+ abstract = "We propose to use the visual denotations of linguistic expressions (i.e. the set of images they describe) to define novel denotational similarity metrics, which we show to be at least as beneficial as distributional similarities for two tasks that require semantic inference. To compute these denotational similarities, we construct a denotation graph, i.e. a subsumption hierarchy over constituents and their denotations, based on a large corpus of 30K images and 150K descriptive captions.",
38
+ }
39
+ """
40
+
41
+ _DESCRIPTION = """\
42
+ SNLI-VE is the dataset proposed for the Visual Entailment (VE) task investigated in Visual Entailment Task for Visually-Grounded Language Learning accpeted to NeurIPS 2018 ViGIL workshop).
43
+ SNLI-VE is built on top of SNLI and Flickr30K. The problem that VE is trying to solve is to reason about the relationship between an image premise Pimage and a text hypothesis Htext.
44
+
45
+ Specifically, given an image as premise, and a natural language sentence as hypothesis, three labels (entailment, neutral and contradiction) are assigned based on the relationship conveyed by the (Pimage, Htext)
46
+
47
+ entailment holds if there is enough evidence in Pimage to conclude that Htext is true.
48
+ contradiction holds if there is enough evidence in Pimage to conclude that Htext is false.
49
+ Otherwise, the relationship is neutral, implying the evidence in Pimage is insufficient to draw a conclusion about Htext.
50
+
51
+ """
52
+
53
+ _HOMEPAGE = "https://github.com/necla-ml/SNLI-VE"
54
+
55
+ _LICENSE = "BSD-3-clause"
56
+
57
+ _SNLI_VE_URL_BASE = "https://huggingface.co/datasets/HuggingFaceM4/SNLI-VE/raw/main/"
58
+ _SNLE_VE_SPLITS = {
59
+ "train": "snli_ve_train.jsonl",
60
+ "validation": "snli_ve_dev.jsonl",
61
+ "test": "snli_ve_test.jsonl",
62
+ }
63
+
64
+
65
+ _FEATURES = datasets.Features(
66
+ {
67
+ "image": datasets.Image(),
68
+ "filename": datasets.Value("string"),
69
+ "premise": datasets.Value("string"),
70
+ "hypothesis": datasets.Value("string"),
71
+ "label": datasets.Value("string"),
72
+ }
73
+ ),
74
+
75
+
76
+ class SNLIVE(datasets.GeneratorBasedBuilder):
77
+ """SNLIVE."""
78
+
79
+ @property
80
+ def manual_download_instructions(self):
81
+ return """\
82
+ In order to get the flickr data on which SNLI-VE is built, You need to go to http://shannon.cs.illinois.edu/DenotationGraph/data/index.html,
83
+ and manually download the dataset ("Flickr 30k images."). Once it is completed,
84
+ a file named `flickr30k-images.tar.gz` will appear in your Downloads folder
85
+ or whichever folder your browser chooses to save files to.
86
+ Then, the dataset can be loaded using the following command `datasets.load_dataset("flickr30k", data_dir="<path/to/folder>")`.
87
+ """
88
+ DEFAULT_CONFIG_NAME = "Default"
89
+
90
+ def _info(self):
91
+
92
+ return datasets.DatasetInfo(
93
+ description=_DESCRIPTION,
94
+ features=_FEATURES,
95
+ homepage=_HOMEPAGE,
96
+ license=_LICENSE,
97
+ citation=_CITATION,
98
+ )
99
+
100
+ def _split_generators(self, dl_manager):
101
+
102
+ urls = {
103
+ "Default": {
104
+ "train": os.path.join(_SNLI_VE_URL_BASE, _SNLE_VE_SPLITS["train"]),
105
+ "validation": os.path.join(_SNLI_VE_URL_BASE, _SNLE_VE_SPLITS["validation"]),
106
+ "test": os.path.join(_SNLI_VE_URL_BASE, _SNLE_VE_SPLITS["test"]),
107
+ },
108
+ }
109
+ snli_ve_annotations_zip = dl_manager.download_and_extract(urls)
110
+ snli_ve_annotation_path = os.path.join(snli_ve_annotations_zip, "dataset_flickr30k.json")
111
+ images_path = os.path.join(
112
+ dl_manager.extract(os.path.join(dl_manager.manual_dir, "flickr30k-images.tar.gz")),
113
+ "flickr30k-images"
114
+ )
115
+
116
+ return [
117
+ datasets.SplitGenerator(
118
+ name=datasets.Split.TRAIN,
119
+ gen_kwargs={
120
+ "snli_ve_annotation_path": snli_ve_annotation_path[self.config.name]["train"],
121
+ "images_path": images_path
122
+ },
123
+ ),
124
+ datasets.SplitGenerator(
125
+ name=datasets.Split.VALIDATION,
126
+ gen_kwargs={
127
+ "snli_ve_annotation_path": snli_ve_annotation_path[self.config.name]["validation"],
128
+ "images_path": images_path
129
+ },
130
+ ),
131
+ datasets.SplitGenerator(
132
+ name=datasets.Split.TEST,
133
+ gen_kwargs={
134
+ "snli_ve_annotation_path": snli_ve_annotation_path[self.config.name]["test"],
135
+ "images_path": images_path
136
+ },
137
+ ),
138
+ ]
139
+
140
+ def _generate_examples(self, snli_ve_annotation_path, images_path):
141
+ counter = 0
142
+ print(snli_ve_annotation_path)
143
+ with open(snli_ve_annotation_path, "r", encoding="utf-8") as json_file:
144
+ data = list(json_file)
145
+ for elem in data:
146
+ img_filename = str(elem["Flickr30K_ID"]) + ".jpg"
147
+ assert os.path.exists(os.path.join(images_path, img_filename))
148
+ yield counter, {
149
+ "image": os.path.join(images_path, img_filename),
150
+ "filename": img_filename,
151
+ "premise": elem["sentence1"],
152
+ "hypothesis": elem["sentence2"],
153
+ "label": elem["gold_label"],
154
+ }
155
+ counter += 1