# coding=utf-8 # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The Visual Question Answering (VQA) dataset preprocessed for LXMERT.""" import base64 import csv import json import os import sys import datasets import numpy as np csv.field_size_limit(sys.maxsize) _CITATION = """\ @inproceedings{antol2015vqa, title={Vqa: Visual question answering}, author={Antol, Stanislaw and Agrawal, Aishwarya and Lu, Jiasen and Mitchell, Margaret and Batra, Dhruv and Zitnick, C Lawrence and Parikh, Devi}, booktitle={Proceedings of the IEEE international conference on computer vision}, pages={2425--2433}, year={2015} } """ _DESCRIPTION = """\ VQA is a new dataset containing open-ended questions about images. These questions require an understanding of vision, language and commonsense knowledge to answer. """ _URLS = { "train": "https://nlp.cs.unc.edu/data/lxmert_data/vqa/train.json", "train_feat": "https://nlp.cs.unc.edu/data/lxmert_data/mscoco_imgfeat/train2014_obj36.zip", "valid": "https://nlp.cs.unc.edu/data/lxmert_data/vqa/valid.json", "valid_feat": "https://nlp.cs.unc.edu/data/lxmert_data/mscoco_imgfeat/val2014_obj36.zip", "test": "https://nlp.cs.unc.edu/data/lxmert_data/vqa/test.json", "test_feat": "https://nlp.cs.unc.edu/data/lxmert_data/mscoco_imgfeat/test2015_obj36.zip", "ans2label": "https://raw.githubusercontent.com/airsplay/lxmert/master/data/vqa/trainval_ans2label.json", } _TRAIN_FEAT_PATH = "train2014_obj36.tsv" _VALID_FEAT_PATH = "mscoco_imgfeat/val2014_obj36.tsv" _TEST_FEAT_PATH = "mscoco_imgfeat/test2015_obj36.tsv" FIELDNAMES = [ "img_id", "img_h", "img_w", "objects_id", "objects_conf", "attrs_id", "attrs_conf", "num_boxes", "boxes", "features" ] _SHAPE_FEATURES = (36, 2048) _SHAPE_BOXES = (36, 4) class VqaV2Lxmert(datasets.GeneratorBasedBuilder): """The VQAv2.0 dataset preprocessed for LXMERT, with the objects features detected by a Faster RCNN replacing the raw images.""" BUILDER_CONFIGS = [ datasets.BuilderConfig(name="vqa", version=datasets.Version("2.0.0"), description="VQA version 2 dataset."), ] def _info(self): features = datasets.Features( { "question": datasets.Value("string"), "question_type": datasets.Value("string"), "question_id": datasets.Value("int32"), "image_id": datasets.Value("string"), "features": datasets.Array2D(_SHAPE_FEATURES, dtype="float32"), "normalized_boxes": datasets.Array2D(_SHAPE_BOXES, dtype="float32"), "answer_type": datasets.Value("string"), "label": datasets.Sequence( { "ids": datasets.ClassLabel(num_classes=3129), "weights": datasets.Value("float32"), } ), } ) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, supervised_keys=None, citation=_CITATION, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" dl_dir = dl_manager.download_and_extract(_URLS) self.ans2label = json.load(open(dl_dir["ans2label"])) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"filepath": dl_dir["train"], "imgfeat": os.path.join(dl_dir["train_feat"], _TRAIN_FEAT_PATH)}, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_dir["valid"], "imgfeat": os.path.join(dl_dir["valid_feat"], _VALID_FEAT_PATH)}, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"filepath": dl_dir["test"], "imgfeat": os.path.join(dl_dir["test_feat"], _TEST_FEAT_PATH), "labeled": False}, ), ] def _load_features(self, filepath): """Returns a dictionary mapping an image id to the corresponding image's objects features.""" id2features = {} with open(filepath) as f: reader = csv.DictReader(f, FIELDNAMES, delimiter="\t") for i, item in enumerate(reader): features = {} img_h = int(item["img_h"]) img_w = int(item["img_w"]) num_boxes = int(item["num_boxes"]) features["features"] = np.frombuffer(base64.b64decode(item["features"]), dtype=np.float32).reshape( (num_boxes, -1) ) boxes = np.frombuffer(base64.b64decode(item["boxes"]), dtype=np.float32).reshape((num_boxes, 4)) features["normalized_boxes"] = self._normalize_boxes(boxes, img_h, img_w) id2features[item["img_id"]] = features return id2features def _normalize_boxes(self, boxes, img_h, img_w): """ Normalize the input boxes given the original image size.""" normalized_boxes = boxes.copy() normalized_boxes[:, (0, 2)] /= img_w normalized_boxes[:, (1, 3)] /= img_h return normalized_boxes def _generate_examples(self, filepath, imgfeat, labeled=True): """ Yields examples as (key, example) tuples.""" id2features = self._load_features(imgfeat) with open(filepath, encoding="utf-8") as f: vqa = json.load(f) if labeled: for id_, d in enumerate(vqa): img_features = id2features[d["img_id"]] ids = [self.ans2label[x] for x in d["label"].keys()] weights = list(d["label"].values()) yield id_, { "question": d["sent"], "question_type": d["question_type"], "question_id": d["question_id"], "image_id": d["img_id"], "features": img_features["features"], "normalized_boxes": img_features["normalized_boxes"], "answer_type": d["answer_type"], "label": { "ids": ids, "weights": weights, }, } else: for id_, d in enumerate(vqa): img_features = id2features[d["img_id"]] yield id_, { "question": d["sent"], "question_type": "", "question_id": d["question_id"], "image_id": d["img_id"], "features": img_features["features"], "normalized_boxes": img_features["normalized_boxes"], "answer_type": "", "label": { "ids": [], "weights": [], }, }