File size: 4,738 Bytes
c174f79 868fdaa 3a77c97 c174f79 9d8cb08 c174f79 ba7b8ac c174f79 9d8cb08 c174f79 daae59b c174f79 ba7b8ac c174f79 3a77c97 23ab82c 3a77c97 c174f79 dc0e5e7 23ab82c 868fdaa |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 |
#
# This file is part of the SMVB distribution (https://huggingface.co/datasets/ABC-iRobotics/SMVB).
# Copyright (c) 2023 ABC-iRobotics.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""SMVB dataset"""
import sys
import io
import numpy as np
if sys.version_info < (3, 9):
from typing import Sequence, Generator, Tuple
else:
from collections.abc import Sequence, Generator
Tuple = tuple
from typing import Optional, IO
import datasets
import itertools
# ---- Constants ----
_CITATION = """\
@INPROCEEDINGS{karoly2024synthetic,
author={Károly, Artúr I. and Nádas, Imre and Galambos, Péter},
booktitle={2024 IEEE 22nd World Symposium on Applied Machine Intelligence and Informatics (SAMI)},
title={Synthetic Multimodal Video Benchmark (SMVB): Utilizing Blender for rich dataset generation},
year={2024},
volume={},
number={},
pages={},
doi={}}
"""
_DESCRIPTION = """\
Amultimodal video benchmark for evaluating models in multi-task learning scenarios.
"""
_HOMEPAGE = "https://huggingface.co/ABC-iRobotics/SMVB"
_LICENSE = "GNU General Public License v3.0"
_BASE_URL = "https://huggingface.co/datasets/ABC-iRobotics/SMVB/resolve/main/data"
_VERSION = '1.0.0'
_SCENES = ['car']
# ---- SMVB dataset Configs ----
class SMVBDatasetConfig(datasets.BuilderConfig):
"""BuilderConfig for SMVB dataset."""
def __init__(self, name: str, data_urls: Sequence[str], version: Optional[str] = None, **kwargs):
super(SMVBDatasetConfig, self).__init__(version=datasets.Version(version), name=name, **kwargs)
self._data_urls = data_urls
@property
def features(self):
return datasets.Features(
{
"image": datasets.Image(),
"mask": datasets.Image(),
"depth": datasets.Sequence(datasets.Value("float32")),
"flow": datasets.Sequence(datasets.Value("float32")),
"normal": datasets.Sequence(datasets.Value("float32"))
}
)
@property
def keys(self):
return ("image", "mask", "depth", "flow", "normal")
# ---- SMVB dataset Loader ----
class SMVBDataset(datasets.GeneratorBasedBuilder):
"""SMVB dataset."""
BUILDER_CONFIG_CLASS = SMVBDatasetConfig
BUILDER_CONFIGS = [
SMVBDatasetConfig(
name = "all",
description = "Photorealistic synthetic images",
data_urls = [_BASE_URL + '/' + s + '.tar.gz' for s in _SCENES],
version = _VERSION
),
]
DEFAULT_WRITER_BATCH_SIZE = 10
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=self.config.features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
version=self.config.version,
)
def _split_generators(self, dl_manager):
local_data_paths = dl_manager.download(self.config._data_urls)
local_data_gen = itertools.chain.from_iterable([dl_manager.iter_archive(path) for path in local_data_paths])
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data": local_data_gen
}
)
]
def _generate_examples(
self,
data: Generator[Tuple[str,IO], None, None]
):
file_infos = []
keys = self.config.keys
for i, info in enumerate(data):
if file_infos and i%len(keys) == 0:
img_features_dict = {k:{'path':d[0],'bytes':d[1]} for k,d in zip(keys,file_infos) if k in ['image','mask']}
array_features_dict = {k:d[1] for k,d in zip(keys,file_infos) if not k in ['image','mask']}
data_dict = {**img_features_dict, **array_features_dict}
yield (i//len(keys))-1, data_dict
file_infos = []
file_path, file_object = info
if i%len(keys) < 2:
file_infos.append((file_path, file_object.read()))
else:
file_infos.append((file_path, np.load(io.BytesIO(file_object.read())).flatten())) |