# Copyright 2020 The HuggingFace Datasets Authors and the current dataset | |
# script contributor. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
# TODO: Address all TODOs and remove all explanatory comments | |
"""TODO: Add a description here.""" | |
import os.path as osp | |
import datasets | |
from .refer import REFER | |
# TODO: Add BibTeX citation | |
# Find for instance the citation on arxiv or on the dataset repo/website | |
_CITATION = """\ | |
@InProceedings{huggingface:dataset, | |
title = {A great new dataset}, | |
author={huggingface, Inc. | |
}, | |
year={2020} | |
} | |
""" | |
# TODO: Add description of the dataset here | |
# You can copy an official description | |
_DESCRIPTION = """\ | |
This RefCOCO dataset is designed to load refcoco, refcoco+, and refcocog. | |
""" | |
# TODO: Add a link to an official homepage for the dataset here | |
_HOMEPAGE = "" | |
# TODO: Add the licence for the dataset here if you can find it | |
_LICENSE = "" | |
# TODO: Add link to the official dataset URLs here | |
# The HuggingFace Datasets library doesn't host the datasets but only points | |
# to the original files. | |
# This can be an arbitrary nested dict/list of URLs | |
# (see below in `_split_generators` method) | |
_URLS = {} | |
VALID_SPLIT_NAMES = ("train", "val", "testA", "testB") | |
class ReferitBuilderConfig(datasets.BuilderConfig): | |
def __init__(self, name: str, split_by: str, **kwargs): | |
super().__init__(name, **kwargs) | |
self.split_by = split_by | |
# TODO: Name of the dataset usually matches the script name with CamelCase | |
# instead of snake_case | |
class ReferitDataset(datasets.GeneratorBasedBuilder): | |
"""TODO: Short description of my dataset.""" | |
VERSION = datasets.Version("0.0.1") | |
# This is an example of a dataset with multiple configurations. | |
# If you don't want/need to define several sub-sets in your dataset, | |
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes. | |
# If you need to make complex sub-parts in the datasets with configurable | |
# options | |
# You can create your own builder configuration class to store attribute, | |
# inheriting from datasets.BuilderConfig | |
BUILDER_CONFIG_CLASS = ReferitBuilderConfig | |
# You will be able to load one or the other configurations | |
# in the following list with | |
# data = datasets.load_dataset('my_dataset', 'first_domain') | |
# data = datasets.load_dataset('my_dataset', 'second_domain') | |
BUILDER_CONFIGS = [ | |
# refcoco | |
ReferitBuilderConfig( | |
name="refcoco", split_by="unc", | |
version=VERSION, description="refcoco."), | |
# refcoco+ | |
ReferitBuilderConfig( | |
name="refcoco+", split_by="unc", | |
version=VERSION, description="refcoco+"), | |
# refcocog | |
ReferitBuilderConfig( | |
name="refcocog", split_by="umd", | |
version=VERSION, description="refcocog"), | |
] | |
# It's not mandatory to have a default configuration. | |
# Just use one if it make sense. | |
DEFAULT_CONFIG_NAME = "refcoco" | |
def _info(self): | |
self.config: ReferitBuilderConfig | |
features = datasets.Features( | |
{ | |
"ref_id": datasets.Value("int32"), | |
"img_id": datasets.Value("int32"), | |
"ann_id": datasets.Value("int32"), | |
"file_name": datasets.Value("string"), | |
"image_path": datasets.Value("string"), | |
"height": datasets.Value("int32"), | |
"width": datasets.Value("int32"), | |
"coco_url": datasets.Value("string"), | |
"sentences": [datasets.Value("string")], | |
"segmentation": [[[datasets.Value("float")]]], | |
"bbox": [[datasets.Value("float")]], | |
"area": datasets.Value("float"), | |
"iscrowd": datasets.Value("int32"), | |
"category_id": datasets.Value("int32"), | |
} | |
) | |
return datasets.DatasetInfo( | |
# This is the description that will appear on the datasets page. | |
description=_DESCRIPTION, | |
# This defines the different columns of the dataset and their types | |
features=features, | |
# If there's a common (input, target) tuple from the features, | |
# uncomment supervised_keys line below and specify them. | |
# They'll be used if as_supervised=True in builder.as_dataset. | |
# supervised_keys=("sentence", "label"), | |
# Homepage of the dataset for documentation | |
homepage=_HOMEPAGE, | |
# License for the dataset if available | |
license=_LICENSE, | |
# Citation for the dataset | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager): | |
# TODO: This method is tasked with downloading/extracting the data and | |
# defining the splits depending on the configuration | |
# If several configurations are possible (listed in BUILDER_CONFIGS), | |
# the configuration selected by the user is in self.config.name | |
# dl_manager is a datasets.download.DownloadManager that can be used | |
# to download and extract URLS. It can accept any type | |
# or nested list/dict and will give back the same structure with | |
# the url replaced with path to local files. | |
# By default the archives will be extracted and a path to a cached | |
# folder where they are extracted is returned instead of the archive | |
# urls = _URLS[self.config.name] | |
# data_dir = dl_manager.download_and_extract(urls) | |
splits = [] | |
split_names = ("train", "val", "test") | |
if self.config.name in ("refcoco", "refcoco+"): | |
split_names += ("testA", "testB") | |
for split in split_names: | |
splits.append(datasets.SplitGenerator( | |
name=datasets.NamedSplit(split), | |
gen_kwargs={ | |
"split": split, | |
}, | |
)) | |
return splits | |
# method parameters are unpacked from `gen_kwargs` as given in | |
# `_split_generators` | |
def _generate_examples(self, split: str): | |
# TODO: This method handles input defined in _split_generators to | |
# yield (key, example) tuples from the dataset. | |
# The `key` is for legacy reasons (tfds) and is not important | |
# in itself, but must be unique for each example. | |
refer = REFER(data_root=self.config.data_dir, | |
dataset=self.config.name, | |
splitBy=self.config.split_by) | |
ref_ids = refer.getRefIds(split=split) | |
for ref_id in ref_ids: | |
ref = refer.loadRefs(ref_id)[0] | |
ann_id = ref['ann_id'] | |
ann = refer.loadAnns(ann_id)[0] | |
img_id = ann['image_id'] | |
img = refer.loadImgs(img_id)[0] | |
file_name = img['file_name'] | |
image_path = osp.join( | |
self.config.data_dir, "images", "train2014", file_name) | |
descriptions = [r['raw'] for r in ref['sentences']] | |
yield ref_id, { | |
"ref_id": ref_id, | |
"img_id": img_id, | |
"ann_id": ann_id, | |
"file_name": file_name, | |
"image_path": image_path, | |
"height": img['height'], | |
"width": img['width'], | |
"coco_url": img['coco_url'], | |
"sentences": descriptions, | |
"segmentation": [ann['segmentation']], | |
"bbox": [ann['bbox']], | |
"area": ann['area'], | |
"iscrowd": ann['iscrowd'], | |
"category_id": ann['category_id'], | |
} | |