File size: 6,478 Bytes
f7ac0e2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bf01044
 
 
 
 
f7ac0e2
3395d86
f7ac0e2
 
 
 
 
 
 
bf01044
f7ac0e2
 
 
 
 
c4b382e
 
 
f7ac0e2
f8472a9
f7ac0e2
c4b382e
 
 
 
 
 
 
 
 
f7ac0e2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ca64b19
 
 
 
f7ac0e2
d78ecbb
f7ac0e2
ad7086e
 
 
f7ac0e2
ad7086e
 
 
75c8083
 
f5593f3
f7ac0e2
 
 
 
 
 
e4de6dd
f7ac0e2
 
 
 
c4b382e
f7ac0e2
 
e4de6dd
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" VASR Loading Script """

import json
import os
import pandas as pd
import datasets
from huggingface_hub import hf_hub_url

# Find for instance the citation on arxiv or on the dataset repo/website
_CITATION = """
"""

_DESCRIPTION = """\
VASR is a challenging dataset for evaluating computer vision commonsense reasoning abilities. Given a triplet of images, the task is to select an image candidate B' that completes the analogy (A to A' is like B to what?). Unlike previous work on visual analogy that focused on simple image transformations, we tackle complex analogies requiring understanding of scenes. Our experiments demonstrate that state-of-the-art models struggle with carefully chosen distractors (±53%, compared to 90% human accuracy).
"""

_HOMEPAGE = "https://vasr-dataset.github.io/"

_LICENSE = "https://creativecommons.org/licenses/by/4.0/"

_URL = "https://huggingface.co/datasets/nlphuji/vasr/blob/main"
_URLS = {
    "train": os.path.join(_URL, "train_gold.csv"),
    "dev": os.path.join(_URL, "dev_gold.csv"),
    "test": os.path.join(_URL, "test_gold.csv"),
}

class Vasr(datasets.GeneratorBasedBuilder):
    VERSION = datasets.Version("1.1.0")

    # If you need to make complex sub-parts in the datasets with configurable options
    # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
    # BUILDER_CONFIG_CLASS = MyBuilderConfig

    BUILDER_CONFIGS = [
        datasets.BuilderConfig(name="TEST", version=VERSION, description="vasr gold test dataset"),
    ]

    def _info(self):
        features = datasets.Features(
            {
                "A_img": datasets.Value("string"),
                "B_img": datasets.Value("string"),
                "C_img": datasets.Value("string"),
                "candidates": [datasets.Value("string")],
                "candidates_images": [datasets.Value("string")],
                "label": datasets.Value("int64"),
                "D_img": datasets.Value("string"),
                "A_verb": datasets.Value("string"),
                "B_verb": datasets.Value("string"),
                "C_verb": datasets.Value("string"),
                "D_verb": datasets.Value("string"),
                "diff_item_A": datasets.Value("string"),
                "diff_item_A_str_first": datasets.Value("string"),
                "diff_item_B": datasets.Value("string"),
                "diff_item_B_str_first": datasets.Value("string"),
            }
        )
        return datasets.DatasetInfo(
            # This is the description that will appear on the datasets page.
            description=_DESCRIPTION,
            # This defines the different columns of the dataset and their types
            features=features,  # Here we define them above because they are different between the two configurations
            # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
            # specify them. They'll be used if as_supervised=True in builder.as_dataset.
            # supervised_keys=("sentence", "label"),
            # Homepage of the dataset for documentation
            homepage=_HOMEPAGE,
            # License for the dataset if available
            license=_LICENSE,
            # Citation for the dataset
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name

        # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
        # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
        # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
        data_dir = dl_manager.download_and_extract({
            "images_dir": hf_hub_url(repo_id="nlphuji/vasr", repo_type='dataset', filename="vasr_images.zip")
        })
        test_examples = hf_hub_url(repo_id="nlphuji/vasr", repo_type='dataset', filename="test_gold.csv")
        dev_examples = hf_hub_url(repo_id="nlphuji/vasr", repo_type='dataset', filename="dev_gold.csv")
        train_examples = hf_hub_url(repo_id="nlphuji/vasr", repo_type='dataset', filename="train_gold.csv")

        train_gen = datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={**data_dir, **{'examples_csv': train_examples}})
        dev_gen = datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={**data_dir, **{'examples_csv': dev_examples}})
        test_gen = datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={**data_dir, **{'examples_csv': test_examples}})

        return [train_gen, dev_gen, test_gen]

    # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
    def _generate_examples(self, examples_csv, images_dir):
        # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.

        df = pd.read_csv(examples_csv)

        d_keys = ['A_img', 'B_img', 'C_img', 'candidates', 'label', 'D_img', 'A_verb', 'B_verb', 'C_verb', 'D_verb', 'diff_item_A', 'diff_item_A_str_first', 'diff_item_B', 'diff_item_B_str_first']

        for r_idx, r in df.iterrows():
            r_dict = r.to_dict()
            r_dict['candidates'] = json.loads(r_dict['candidates'])
            candidates_images = [os.path.join(images_dir, "vasr_images", x) for x in
                                 r_dict['candidates']]
            r_dict['candidates_images'] = candidates_images
            relevant_r_dict = {k:v for k,v in r_dict.items() if k in d_keys or k == 'candidates_images'}
            yield r_idx, relevant_r_dict