Commit
•
132a562
1
Parent(s):
3c4e300
Delete vasr.py
Browse files
vasr.py
DELETED
@@ -1,128 +0,0 @@
|
|
1 |
-
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
""" VASR Loading Script """
|
15 |
-
|
16 |
-
import json
|
17 |
-
import os
|
18 |
-
import pandas as pd
|
19 |
-
import datasets
|
20 |
-
from huggingface_hub import hf_hub_url
|
21 |
-
|
22 |
-
# Find for instance the citation on arxiv or on the dataset repo/website
|
23 |
-
_CITATION = """
|
24 |
-
"""
|
25 |
-
|
26 |
-
_DESCRIPTION = """\
|
27 |
-
VASR is a challenging dataset for evaluating computer vision commonsense reasoning abilities. Given a triplet of images, the task is to select an image candidate B' that completes the analogy (A to A' is like B to what?). Unlike previous work on visual analogy that focused on simple image transformations, we tackle complex analogies requiring understanding of scenes. Our experiments demonstrate that state-of-the-art models struggle with carefully chosen distractors (±53%, compared to 90% human accuracy).
|
28 |
-
"""
|
29 |
-
|
30 |
-
_HOMEPAGE = "https://vasr-dataset.github.io/"
|
31 |
-
|
32 |
-
_LICENSE = "https://creativecommons.org/licenses/by/4.0/"
|
33 |
-
|
34 |
-
_URL = "https://huggingface.co/datasets/nlphuji/vasr/blob/main"
|
35 |
-
_URLS = {
|
36 |
-
"train": os.path.join(_URL, "train_gold.csv"),
|
37 |
-
"dev": os.path.join(_URL, "dev_gold.csv"),
|
38 |
-
"test": os.path.join(_URL, "test_gold.csv"),
|
39 |
-
}
|
40 |
-
|
41 |
-
|
42 |
-
class Vasr(datasets.GeneratorBasedBuilder):
|
43 |
-
VERSION = datasets.Version("1.1.0")
|
44 |
-
|
45 |
-
# If you need to make complex sub-parts in the datasets with configurable options
|
46 |
-
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
|
47 |
-
# BUILDER_CONFIG_CLASS = MyBuilderConfig
|
48 |
-
|
49 |
-
# You will be able to load one or the other configurations in the following list with
|
50 |
-
# data = datasets.load_dataset('vasr', 'test')
|
51 |
-
BUILDER_CONFIGS = [
|
52 |
-
datasets.BuilderConfig(name="plain_text", version=VERSION, description="vasr dataset")
|
53 |
-
]
|
54 |
-
IMAGE_EXTENSION = "jpg"
|
55 |
-
|
56 |
-
def _info(self):
|
57 |
-
features = datasets.Features(
|
58 |
-
{
|
59 |
-
"A_img": [datasets.Value("string")],
|
60 |
-
"B_img": [datasets.Value("string")],
|
61 |
-
"C_img": [datasets.Value("string")],
|
62 |
-
"candidates": [datasets.Value("string")],
|
63 |
-
"label": datasets.Value("int64"),
|
64 |
-
"D_img": [datasets.Value("string")],
|
65 |
-
"A_verb": [datasets.Value("string")],
|
66 |
-
"B_verb": [datasets.Value("string")],
|
67 |
-
"C_verb": [datasets.Value("string")],
|
68 |
-
"D_verb": [datasets.Value("string")],
|
69 |
-
"diff_item_A": [datasets.Value("string")],
|
70 |
-
"diff_item_A_str_first": [datasets.Value("string")],
|
71 |
-
"diff_item_B": [datasets.Value("string")],
|
72 |
-
"diff_item_B_str_first": [datasets.Value("string")],
|
73 |
-
}
|
74 |
-
)
|
75 |
-
return datasets.DatasetInfo(
|
76 |
-
# This is the description that will appear on the datasets page.
|
77 |
-
description=_DESCRIPTION,
|
78 |
-
# This defines the different columns of the dataset and their types
|
79 |
-
features=features, # Here we define them above because they are different between the two configurations
|
80 |
-
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
|
81 |
-
# specify them. They'll be used if as_supervised=True in builder.as_dataset.
|
82 |
-
# supervised_keys=("sentence", "label"),
|
83 |
-
# Homepage of the dataset for documentation
|
84 |
-
homepage=_HOMEPAGE,
|
85 |
-
# License for the dataset if available
|
86 |
-
license=_LICENSE,
|
87 |
-
# Citation for the dataset
|
88 |
-
citation=_CITATION,
|
89 |
-
)
|
90 |
-
|
91 |
-
def _split_generators(self, dl_manager):
|
92 |
-
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
|
93 |
-
downloaded_files = dl_manager.download_and_extract(_URLS)
|
94 |
-
|
95 |
-
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
|
96 |
-
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
97 |
-
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
98 |
-
data_dir = dl_manager.download_and_extract({
|
99 |
-
"images_dir": hf_hub_url("datasets/nlphuji/vasr", filename="vasr_images.zip")
|
100 |
-
})
|
101 |
-
|
102 |
-
# return [datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs=data_dir)]
|
103 |
-
return [
|
104 |
-
datasets.SplitGenerator(name=datasets.Split.TEST,
|
105 |
-
gen_kwargs={**data_dir, **{'filepath': downloaded_files["test"]}}),
|
106 |
-
datasets.SplitGenerator(name=datasets.Split.TRAIN,
|
107 |
-
gen_kwargs={**data_dir, **{'filepath': downloaded_files["train"]}}),
|
108 |
-
datasets.SplitGenerator(name=datasets.Split.VALIDATION,
|
109 |
-
gen_kwargs={**data_dir, **{'filepath': downloaded_files["dev"]}}),
|
110 |
-
]
|
111 |
-
|
112 |
-
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
113 |
-
def _generate_examples(self, examples_csv, images_dir):
|
114 |
-
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
115 |
-
|
116 |
-
df = pd.read_csv(examples_csv)
|
117 |
-
|
118 |
-
# columns_to_serialize = ['candidates', 'associations']
|
119 |
-
# for c in columns_to_serialize:
|
120 |
-
# df[c] = df[c].apply(json.loads)
|
121 |
-
|
122 |
-
for r_idx, r in df.iterrows():
|
123 |
-
r_dict = r.to_dict()
|
124 |
-
r_dict['candidates'] = json.loads(r_dict['candidates'])
|
125 |
-
candidates_images = [os.path.join(images_dir, "vasr_images", f"{x}.{self.IMAGE_EXTENSION}") for x in
|
126 |
-
r_dict['candidates']]
|
127 |
-
r_dict['candidates_images'] = candidates_images
|
128 |
-
yield r_idx, r_dict
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|