yonatanbitton commited on
Commit
f7ac0e2
1 Parent(s): 132a562

Create vasr.py

Browse files
Files changed (1) hide show
  1. vasr.py +130 -0
vasr.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """ VASR Loading Script """
15
+
16
+ import json
17
+ import os
18
+ import pandas as pd
19
+ import datasets
20
+ from huggingface_hub import hf_hub_url
21
+
22
+ # Find for instance the citation on arxiv or on the dataset repo/website
23
+ _CITATION = """
24
+ """
25
+
26
+ _DESCRIPTION = """\
27
+ VASR is a challenging dataset for evaluating computer vision commonsense reasoning abilities. Given a triplet of images, the task is to select an image candidate B' that completes the analogy (A to A' is like B to what?). Unlike previous work on visual analogy that focused on simple image transformations, we tackle complex analogies requiring understanding of scenes. Our experiments demonstrate that state-of-the-art models struggle with carefully chosen distractors (±53%, compared to 90% human accuracy).
28
+ """
29
+
30
+ _HOMEPAGE = "https://vasr-dataset.github.io/"
31
+
32
+ _LICENSE = "https://creativecommons.org/licenses/by/4.0/"
33
+
34
+ _URL = "https://huggingface.co/datasets/nlphuji/vasr/blob/main"
35
+ _URLS = {
36
+ "train": os.path.join(_URL, "train_gold.csv"),
37
+ "dev": os.path.join(_URL, "dev_gold.csv"),
38
+ "test": os.path.join(_URL, "test_gold.csv"),
39
+ }
40
+
41
+
42
+ class Vasr(datasets.GeneratorBasedBuilder):
43
+ VERSION = datasets.Version("1.1.0")
44
+
45
+ # If you need to make complex sub-parts in the datasets with configurable options
46
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
47
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
48
+
49
+ # You will be able to load one or the other configurations in the following list with
50
+ # data = datasets.load_dataset('vasr', 'test')
51
+ BUILDER_CONFIGS = [
52
+ datasets.BuilderConfig(name="TEST", version=VERSION, description="vasr dataset gold test"),
53
+ datasets.BuilderConfig(name="VALIDATION", version=VERSION, description="vasr dataset gold validation"),
54
+ datasets.BuilderConfig(name="TRAIN", version=VERSION, description="vasr dataset gold train")
55
+ ]
56
+ IMAGE_EXTENSION = "jpg"
57
+
58
+ def _info(self):
59
+ features = datasets.Features(
60
+ {
61
+ "A_img": [datasets.Value("string")],
62
+ "B_img": [datasets.Value("string")],
63
+ "C_img": [datasets.Value("string")],
64
+ "candidates": [datasets.Value("string")],
65
+ "label": datasets.Value("int64"),
66
+ "D_img": [datasets.Value("string")],
67
+ "A_verb": [datasets.Value("string")],
68
+ "B_verb": [datasets.Value("string")],
69
+ "C_verb": [datasets.Value("string")],
70
+ "D_verb": [datasets.Value("string")],
71
+ "diff_item_A": [datasets.Value("string")],
72
+ "diff_item_A_str_first": [datasets.Value("string")],
73
+ "diff_item_B": [datasets.Value("string")],
74
+ "diff_item_B_str_first": [datasets.Value("string")],
75
+ }
76
+ )
77
+ return datasets.DatasetInfo(
78
+ # This is the description that will appear on the datasets page.
79
+ description=_DESCRIPTION,
80
+ # This defines the different columns of the dataset and their types
81
+ features=features, # Here we define them above because they are different between the two configurations
82
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
83
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
84
+ # supervised_keys=("sentence", "label"),
85
+ # Homepage of the dataset for documentation
86
+ homepage=_HOMEPAGE,
87
+ # License for the dataset if available
88
+ license=_LICENSE,
89
+ # Citation for the dataset
90
+ citation=_CITATION,
91
+ )
92
+
93
+ def _split_generators(self, dl_manager):
94
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
95
+ downloaded_files = dl_manager.download_and_extract(_URLS)
96
+
97
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
98
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
99
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
100
+ data_dir = dl_manager.download_and_extract({
101
+ "images_dir": hf_hub_url("datasets/nlphuji/vasr", filename="vasr_images.zip")
102
+ })
103
+
104
+ # return [datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs=data_dir)]
105
+ return [
106
+ datasets.SplitGenerator(name=datasets.Split.TEST,
107
+ gen_kwargs={**data_dir, **{'filepath': downloaded_files["test"]}}),
108
+ datasets.SplitGenerator(name=datasets.Split.TRAIN,
109
+ gen_kwargs={**data_dir, **{'filepath': downloaded_files["train"]}}),
110
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION,
111
+ gen_kwargs={**data_dir, **{'filepath': downloaded_files["dev"]}}),
112
+ ]
113
+
114
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
115
+ def _generate_examples(self, examples_csv, images_dir):
116
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
117
+
118
+ df = pd.read_csv(examples_csv)
119
+
120
+ # columns_to_serialize = ['candidates', 'associations']
121
+ # for c in columns_to_serialize:
122
+ # df[c] = df[c].apply(json.loads)
123
+
124
+ for r_idx, r in df.iterrows():
125
+ r_dict = r.to_dict()
126
+ r_dict['candidates'] = json.loads(r_dict['candidates'])
127
+ candidates_images = [os.path.join(images_dir, "vasr_images", f"{x}.{self.IMAGE_EXTENSION}") for x in
128
+ r_dict['candidates']]
129
+ r_dict['candidates_images'] = candidates_images
130
+ yield r_idx, r_dict