topviewrs / topviewrs.py
Chengzu Li
Update topviewrs.py
c18cdf1 verified
raw
history blame
5.32 kB
"""
This is the huggingface data loader for TOPVIEWRS Benchmark.
"""
import json
import os
import shutil
import datasets
_CITATION = """
@misc{li2024topviewrs,
title={TopViewRS: Vision-Language Models as Top-View Spatial Reasoners},
author={Chengzu Li and Caiqi Zhang and Han Zhou and Nigel Collier and Anna Korhonen and Ivan Vulić},
year={2024},
eprint={2406.02537},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_DESCRIPTION = """
TopViewRS dataset, comprising 11,384 multiple-choice questions with either photo-realistic
or semantic top-view maps of real-world scenarios through a pipeline of automatic collection followed by human alignment.
"""
_HOMEPAGE = "https://topviewrs.github.io/"
_LICENSE = "MIT"
TASK_SPLIT = ['top_view_recognition', 'top_view_localization', 'static_spatial_reasoning', 'dynamic_spatial_reasoning']
_URLS = {
"rgb_json": f"released_realistic_datasets.json",
"semantic_json": f"released_semantic_datasets.json",
"images": f"released_data.zip"
}
class TOPVIEWRSConfig(datasets.BuilderConfig):
"""BuilderConfig for TOPVIEWRS."""
def __init__(self, task_split, map_type, **kwargs):
"""BuilderConfig for TOPVIEWRS.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(TOPVIEWRSConfig, self).__init__(**kwargs)
self.task_split = task_split
self.map_type = map_type
class TOPVIEWRS(datasets.GeneratorBasedBuilder):
"""TOPVIEWRS Dataset"""
BUILDER_CONFIG_CLASS = TOPVIEWRSConfig
BUILDER_CONFIGS = [
TOPVIEWRSConfig(
name="topviewrs",
version=datasets.Version("0.0.0"),
description=_DESCRIPTION,
task_split=None,
map_type=None,
)
]
DEFAULT_CONFIG_NAME = "topviewrs"
def _info(self):
features = datasets.Features(
{
"index": datasets.Value("int32"),
"scene_id": datasets.Value("string"),
"question": datasets.Value("string"),
"choices": datasets.Sequence(datasets.Value("string")),
"labels": datasets.Sequence(datasets.Value("string")),
"choice_type": datasets.Value("string"),
"map_path": datasets.Value("string"),
"question_ability": datasets.Value("string"),
}
)
if self.config.task_split == "dynamic_spatial_reasoning":
features = datasets.Features(
{
"index": datasets.Value("int32"),
"scene_id": datasets.Value("string"),
"question": datasets.Value("string"),
"choices": datasets.Sequence(datasets.Value("string")),
"labels": datasets.Sequence(datasets.Value("string")),
"choice_type": datasets.Value("string"),
"map_path": datasets.Value("string"),
"question_ability": datasets.Value("string"),
"reference_path": datasets.Sequence(datasets.Sequence(datasets.Value("int32")))
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
downloaded_files = dl_manager.download_and_extract(_URLS)
for k, v in downloaded_files.items():
if v.endswith("zip"):
try:
cwd = os.getcwd()
shutil.unpack_archive(os.path.join(cwd, v))
except:
raise FileNotFoundError(f"Unpacking the image data.zip failed. Make sure that you have the zip file at {os.path.join(cwd, v)}. ")
base_file_dir = os.path.dirname(v)
return [
datasets.SplitGenerator(
name=datasets.Split('val'),
gen_kwargs={
"file_path": base_file_dir
},
)
]
def _generate_examples(self, file_path: str):
task = self.config.task_split
map_type = self.config.map_type
file_name = "RGB_datasets.json" if map_type.lower() == "realistic" else "semantic_datasets.json"
map_key = "rgb" if map_type.lower() == "realistic" else map_type
with open(os.path.join(file_path, file_name)) as f:
data_list = json.load(f)[task]
for idx, data_item in enumerate(data_list):
return_item = {
"index": idx,
"scene_id": data_item['scene_id'],
"question": data_item['question'],
"choices": data_item['choices'],
"labels": data_item['labels'],
"choice_type": str(data_item["question_meta_data"]["choices"]),
"map_path": os.path.join(file_path, data_item[f"{map_key}_map"]),
"question_ability": data_item['ability'],
}
if "reference_path" in data_item.keys():
return_item["reference_path"] = data_item["reference_path"]
yield idx, return_item
idx += 1