SPair-71k / SPair-71k.py
JonasLoos's picture
Update SPair-71k.py
15bf54e verified
"""SPair-71k: A Large-scale Benchmark for Semantic Correspondence"""
import json
import datasets
from datasets import BuilderConfig, Features, Value, SplitGenerator, Split, ClassLabel, Array2D, Sequence, Image
from pathlib import Path
_CITATION = """\
@article{min2019spair,
title={SPair-71k: A Large-scale Benchmark for Semantic Correspondence},
author={Juhong Min and Jongmin Lee and Jean Ponce and Minsu Cho},
journal={arXiv prepreint arXiv:1908.10543},
year={2019}
}
"""
_DESCRIPTION = """\
Establishing visual correspondences under large intra-class variations, which is often referred to as semantic correspondence or semantic matching, remains a challenging problem in computer vision. Despite its significance, however, most of the datasets for semantic correspondence are limited to a small amount of image pairs with similar viewpoints and scales. In this paper, we present a new large-scale benchmark dataset of semantically paired images, SPair-71k, which contains 70,958 image pairs with diverse variations in viewpoint and scale. Compared to previous datasets, it is significantly larger in number and contains more accurate and richer annotations. We believe this dataset will provide a reliable testbed to study the problem of semantic correspondence and will help to advance research in this area. We provide the results of recent methods on our new dataset as baselines for further research.
This huggingface version of the dataset is inofficial. It downloads the data from the original source and converts it to the huggingface format.
## Terms of Use
The SPair-71k data includes images and metadata obtained from the [PASCAL-VOC](http://host.robots.ox.ac.uk/pascal/VOC/) and [flickr](https://www.flickr.com/) website. Use of these images and metadata must respect the corresponding [terms of use](https://www.flickr.com/help/terms).
"""
_HOMEPAGE = "https://cvlab.postech.ac.kr/research/SPair-71k/"
# TODO: Add the licence for the dataset here if you can find it
_LICENSE = ""
_URL = "https://cvlab.postech.ac.kr/research/SPair-71k/data/SPair-71k.tar.gz"
class SPair71k(datasets.GeneratorBasedBuilder):
"""SPair-71k: A Large-scale Benchmark for Semantic Correspondence"""
VERSION = datasets.Version("0.5.0")
BUILDER_CONFIGS = [
BuilderConfig(name="pairs", version=VERSION, description="SPair-71k: dataset of image pairs"),
BuilderConfig(name="data", version=VERSION, description="SPair-71k: dataset of image data"),
]
DEFAULT_CONFIG_NAME = "pairs"
def _info(self):
if self.config.name == "pairs":
features = Features(
{
"pair_id": Value("uint32"),
"src_img": Image(),
"src_segmentation": Image(),
"src_data_index": Value("uint32"),
"src_name": Value("string"),
"src_imsize": Sequence(Value("uint32"), length=3),
"src_bndbox": Sequence(Value("uint32"), length=4),
"src_pose": ClassLabel(names=['Unspecified', 'Frontal', 'Left', 'Rear', 'Right']),
"src_kps": Array2D(dtype="uint32", shape=(None,2)),
"trg_img": Image(),
"trg_segmentation": Image(),
"trg_data_index": Value("uint32"),
"trg_name": Value("string"),
"trg_imsize": Sequence(Value("uint32"), length=3),
"trg_bndbox": Sequence(Value("uint32"), length=4),
"trg_pose": ClassLabel(names=['Unspecified', 'Frontal', 'Left', 'Rear', 'Right']),
"trg_kps": Array2D(dtype="uint32", shape=(None,2)),
"kps_ids": Sequence(Value("uint32")),
"category": ClassLabel(names=['cat', 'pottedplant', 'train', 'bicycle', 'car', 'bus', 'aeroplane', 'dog', 'bird', 'chair', 'motorbike', 'cow', 'bottle', 'person', 'boat', 'sheep', 'horse', 'tvmonitor']),
"viewpoint_variation": Value("uint8"),
"scale_variation": Value("uint8"),
"truncation": Value("uint8"),
"occlusion": Value("uint8"),
}
)
elif self.config.name == "data":
features = Features(
{
"img": Image(),
"segmentation": Image(),
"annotation": Value('string'), # full json annotation
"name": Value('string'),
}
)
else:
raise ValueError(f"Unknown configuration name {self.config.name}")
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
# make sure to download the data even when streaming
if not isinstance(dl_manager, datasets.DownloadManager):
dl_manager = datasets.DownloadManager(dl_manager._dataset_name)
# download and extract tar.gz file
data_path = dl_manager.download_and_extract(_URL)
if self.config.name == "pairs":
return [
SplitGenerator(
name=Split.TRAIN,
gen_kwargs={
"path": data_path,
"split": "trn",
},
),
SplitGenerator(
name=Split.VALIDATION,
gen_kwargs={
"path": data_path,
"split": "val",
},
),
SplitGenerator(
name=Split.TEST,
gen_kwargs={
"path": data_path,
"split": "test"
},
),
]
elif self.config.name == "data":
# the data is not split
return [
SplitGenerator(
name=Split.TRAIN,
gen_kwargs={
"path": data_path,
"split": "data",
},
),
]
else:
raise ValueError(f"Unknown configuration name {self.config.name}")
def _generate_examples(self, path, split):
path = Path(path) / 'SPair-71k'
image_indices = {f'{cat}/{name}': i for i, (name, cat) in enumerate(sorted((img_name.name[:-4], folder.name) for folder in (path / 'JPEGImages').glob('*') for img_name in folder.glob('*.jpg')))}
if self.config.name == 'pairs':
features: list[str] = [x for x in self.info.features or []]
for pair_file in (path / 'PairAnnotation' / split).glob('*.json'):
with open(pair_file, 'r') as f:
data = json.load(f)
cat = data['category']
src_name = f'{cat}/{data["src_imname"][:-4]}'
trg_name = f'{cat}/{data["trg_imname"][:-4]}'
yield data["pair_id"], {
"src_name": src_name,
"src_data_index": image_indices[src_name],
"src_img": {"path": str(path / 'JPEGImages' / f'{src_name}.jpg'), "bytes": None},
"src_segmentation": {"path": str(path / 'Segmentation' / f'{src_name}.png'), "bytes": None},
"trg_name": trg_name,
"trg_data_index": image_indices[trg_name],
"trg_img": {"path": str(path / 'JPEGImages' / f'{trg_name}.jpg'), "bytes": None},
"trg_segmentation": {"path": str(path / 'Segmentation' / f'{trg_name}.png'), "bytes": None},
**{k: data[k] for k in data if k in features},
}
elif self.config.name == 'data':
for img_name, i in image_indices.items():
yield i, {
"img": {"path": str(path / 'JPEGImages' / f'{img_name}.jpg'), "bytes": None},
"segmentation": {"path": str(path / 'Segmentation' / f'{img_name}.png'), "bytes": None},
"annotation": (path / 'ImageAnnotation' / f'{img_name}.json').read_text(),
"name": img_name,
}