dronescapes / dronescapes_reader /multitask_dataset.py
3v324v23's picture
loggez
b77c177
raw
history blame
13.5 kB
#!/usr/bin/env python3
"""MultiTask Dataset module compatible with torch.utils.data.Dataset & DataLoader."""
from __future__ import annotations
from pathlib import Path
from typing import Dict, List, Tuple
from argparse import ArgumentParser
from pprint import pprint
from natsort import natsorted
from loggez import loggez_logger as logger
import torch as tr
import numpy as np
from torch.utils.data import Dataset, DataLoader
from lovely_tensors import monkey_patch
monkey_patch()
BuildDatasetTuple = Tuple[Dict[str, List[Path]], List[str]]
MultiTaskItem = Tuple[Dict[str, tr.Tensor], str, List[str]] # [{task: data}, stem(name) | list[stem(name)], [tasks]]
class NpzRepresentation:
"""Generic Task with data read from/saved to npz files. Tries to read data as-is from disk and store it as well"""
def __init__(self, name: str):
self.name = name
def load_from_disk(self, path: Path) -> tr.Tensor:
"""Reads the npz data from the disk and transforms it properly"""
data = np.load(path, allow_pickle=False)
data = data if isinstance(data, np.ndarray) else data["arr_0"] # in case on npz, we need this as well
return tr.from_numpy(data) # can be uint8, float16, float32 etc.
def save_to_disk(self, data: tr.Tensor, path: Path):
"""stores this item to the disk which can then be loaded via `load_from_disk`"""
np.save(path, data.cpu().detach().numpy(), allow_pickle=False)
def plot_fn(self, x: tr.Tensor) -> np.ndarray:
"""very basic implementation of converting this representation to a viewable image. You should overwrite this"""
assert isinstance(x, tr.Tensor), type(x)
if len(x.shape) == 2: x = x.unsqueeze(-1)
assert len(x.shape) == 3, x.shape # guaranteed to be (H, W, C) at this point
if x.shape[-1] != 3: x = x[..., 0:1]
if x.shape[-1] == 1: x = x.repeat(1, 1, 3)
x = x.nan_to_num(0).cpu().detach().numpy() # guaranteed to be (H, W, 3) at this point hopefully
_min, _max = x.min((0, 1), keepdims=True), x.max((0, 1), keepdims=True)
if x.dtype != np.uint8: x = np.nan_to_num((x - _min) / (_max - _min) * 255, 0).astype(np.uint8)
return x
def __repr__(self):
return str(self)
def __str__(self):
return f"{str(type(self)).split('.')[-1][0:-2]}({self.name})"
class MultiTaskDataset(Dataset):
"""
MultiTaskDataset implementation. Reads data from npz files and returns them as a dict.
Parameters:
- path: Path to the directory containing the npz files.
- task_names: List of tasks that are present in the dataset. If set to None, will infer from the files on disk.
- handle_missing_data: Modes to handle missing data. Valid options are:
- drop: Drop the data point if any of the representations is missing.
- fill_none: Fill the missing data with Nones.
Expected directory structure:
path/
- task_1/0.npz, ..., N.npz
- ...
- task_n/0.npz, ..., N.npz
Names can be in a different format (i.e. 2022-01-01.npz), but must be consistent and equal across all tasks.
"""
def __init__(self, path: Path, task_names: list[str] | None = None, handle_missing_data: str = "fill_none",
files_suffix: str = "npz", task_types: dict[str, type] | None = None):
assert Path(path).exists(), f"Provided path '{path}' doesn't exist!"
assert handle_missing_data in ("drop", "fill_none", "fill_zero", "fill_nan"), \
f"Invalid handle_missing_data mode: {handle_missing_data}"
assert files_suffix == "npz", "Only npz supported right now (though trivial to update)"
self.path = Path(path).absolute()
self.handle_missing_data = handle_missing_data
self.suffix = files_suffix
self.all_files_per_repr = self._get_all_npz_files()
self.files_per_repr, self.file_names = self._build_dataset() # these are filtered by 'drop' or 'fill_none' logic
if task_types is None:
logger.debug("No explicit task types. Defaulting all of them to NpzRepresentation.")
task_types = {}
if task_names is None:
task_names = list(self.files_per_repr.keys())
logger.debug(f"No explicit tasks provided. Using all of them as read from the paths ({len(task_names)}).")
self.task_types = {k: task_types.get(k, NpzRepresentation) for k in task_names}
assert all(isinstance(x, str) for x in task_names), tuple(zip(task_names, (type(x) for x in task_names)))
self.task_names = sorted(task_names)
self._data_shape: tuple[int, ...] | None = None
self._tasks: list[NpzRepresentation] | None = None
self.name_to_task = {task.name: task for task in self.tasks}
logger.info(f"Tasks used in this dataset: {self.task_names}")
self._default_vals: dict[str, tr.Tensor] | None = None
# Public methods and properties
@property
def default_vals(self) -> dict[str, tr.Tensor]:
"""default values for __getitem__ if item is not on disk but we retrieve a full batch anyway"""
if self._default_vals is None:
_default_val = float("nan") if self.handle_missing_data == "fill_nan" else 0
self._default_vals = {task: None if self.handle_missing_data == "fill_none" else
tr.full(self.data_shape[task], _default_val) for task in self.task_names}
return self._default_vals
@property
def data_shape(self) -> dict[str, tuple[int, ...]]:
"""Returns a {task: shape_tuple} for all representations. At least one npz file must exist for each."""
first_npz = {task: [_v for _v in files if _v is not None][0] for task, files in self.files_per_repr.items()}
data_shape = {task: self.name_to_task[task].load_from_disk(first_npz[task]).shape for task in self.task_names}
return data_shape
@property
def tasks(self) -> list[NpzRepresentation]:
"""
Returns a list of instantiated tasks in the same order as self.task_names. Overwrite this to add
new tasks and semantics (i.e. plot_fn or doing some preprocessing after loading from disk in some tasks.
"""
if self._tasks is not None:
return self._tasks
self._tasks = []
for task_name in self.task_names:
t = self.task_types[task_name]
try:
t = t(task_name) # hack for not isinstance(self.task_types, NpzRepresentation) but callable
except Exception:
pass
self._tasks.append(t)
assert all(t.name == t_n for t, t_n in zip(self._tasks, self.task_names)), (self.task_names, self._tasks)
return self._tasks
def collate_fn(self, items: list[MultiTaskItem]) -> MultiTaskItem:
"""
given a list of items (i.e. from a reader[n:n+k] call), return the item batched on 1st dimension.
Nones (missing data points) are turned into nans as per the data shape of that dim.
"""
assert all(item[2] == self.task_names for item in items), ([item[2] for item in items], self.task_names)
items_name = [item[1] for item in items]
res = {k: tr.zeros(len(items), *self.data_shape[k]).float() for k in self.task_names} # float32 always
for i in range(len(items)):
for k in self.task_names:
res[k][i][:] = items[i][0][k] if items[i][0][k] is not None else float("nan")
return res, items_name, self.task_names
# Private methods
def _get_all_npz_files(self) -> dict[str, list[Path]]:
"""returns a dict of form: {"rgb": ["0.npz", "1.npz", ..., "N.npz"]}"""
in_files = {}
all_repr_dirs: list[str] = [x.name for x in self.path.iterdir() if x.is_dir()]
for repr_dir_name in all_repr_dirs:
dir_name = self.path / repr_dir_name
if all(f.is_dir() for f in dir_name.iterdir()): # dataset is stored as repr/part_x/0.npz, ..., part_k/n.npz
all_files = []
for part in dir_name.iterdir():
all_files.extend(part.glob(f"*.{self.suffix}"))
else: # dataset is stored as repr/0.npz, ..., repr/n.npz
all_files = dir_name.glob(f"*.{self.suffix}")
in_files[repr_dir_name] = natsorted(all_files, key=lambda x: x.name) # important: use natsorted() here
assert not any(len(x) == 0 for x in in_files.values()), f"{ [k for k, v in in_files.items() if len(v) == 0] }"
return in_files
def _build_dataset_drop_missing(self) -> BuildDatasetTuple:
in_files = self.all_files_per_repr
name_to_node_path = {k: {_v.name: _v for _v in v} for k, v in in_files.items()} # {node: {name: path}}
common = set(x.name for x in next(iter(in_files.values())))
for node in (nodes := in_files.keys()):
common = common.intersection([f.name for f in in_files[node]])
assert len(common) > 0, f"Node '{node}' made the intersection null"
common = natsorted(list(common))
logger.info(f"Found {len(common)} data points for each node ({len(nodes)} nodes).")
files_per_repr = {node: [name_to_node_path[node][x] for x in common] for node in nodes}
assert len(files_per_repr) > 0
return files_per_repr, common
def _build_dataset_fill_missing(self) -> BuildDatasetTuple:
in_files = self.all_files_per_repr
name_to_node_path = {k: {_v.name: _v for _v in v} for k, v in in_files.items()}
all_files = set(x.name for x in next(iter(in_files.values())))
nodes = in_files.keys()
for node in (nodes := in_files.keys()):
all_files = all_files.union([f.name for f in in_files[node]])
all_files = natsorted(list(all_files))
logger.info(f"Found {len(all_files)} data points as union of all nodes' data ({len(nodes)} nodes).")
files_per_repr = {node: [] for node in nodes}
for node in nodes:
for file_name in all_files:
file_path = name_to_node_path[node].get(file_name, None)
files_per_repr[node].append(file_path)
assert len(files_per_repr) > 0
return files_per_repr, all_files
def _build_dataset(self) -> BuildDatasetTuple:
logger.debug(f"Building dataset from: '{self.path}'")
if self.handle_missing_data == "drop":
return self._build_dataset_drop_missing()
else:
return self._build_dataset_fill_missing()
# Python magic methods (pretty printing the reader object, reader[0], len(reader) etc.)
def __getitem__(self, index: int | slice | list[int] | tuple) -> MultiTaskItem:
"""Read the data all the desired nodes"""
assert isinstance(index, (int, slice, list, tuple, str)), type(index)
if isinstance(index, slice):
assert index.start is not None and index.stop is not None and index.step is None, "Only reader[l:r] allowed"
index = list(range(index.stop)[index])
if isinstance(index, (list, tuple)):
return self.collate_fn([self.__getitem__(ix) for ix in index])
if isinstance(index, str):
return self.__getitem__(self.file_names.index(index))
res = {}
item_name = self.file_names[index]
for task in self.tasks:
file_path = self.files_per_repr[task.name][index]
file_path = None if file_path is None or not (fpr := file_path.resolve()).exists() else fpr
res[task.name] = task.load_from_disk(file_path) if file_path is not None else self.default_vals[task.name]
return (res, item_name, self.task_names)
def __len__(self) -> int:
return len(self.files_per_repr[self.task_names[0]]) # all of them have the same number (filled with None or not)
def __str__(self):
f_str = f"[{str(type(self)).rsplit('.', maxsplit=1)[-1][0:-2]}]"
f_str += f"\n - Path: '{self.path}'"
f_str += f"\n - Tasks ({len(self.tasks)}): {self.tasks}"
f_str += f"\n - Length: {len(self)}"
f_str += f"\n - Handle missing data mode: '{self.handle_missing_data}'"
return f_str
def __repr__(self):
return str(self)
def main():
"""main fn"""
parser = ArgumentParser()
parser.add_argument("dataset_path", type=Path)
parser.add_argument("--handle_missing_data", choices=("drop", "fill_none"), default="fill_none")
args = parser.parse_args()
reader = MultiTaskDataset(args.dataset_path, task_names=None, handle_missing_data=args.handle_missing_data)
print(reader)
print(f"Shape: {reader.data_shape}")
rand_ix = np.random.randint(len(reader))
data, name, repr_names = reader[rand_ix] # get a random single data point
print(f"Name: {name}. Nodes: {repr_names}")
pprint({k: v for k, v in data.items()})
data, name, repr_names = reader[rand_ix: min(len(reader), rand_ix + 5)] # get a random batch
print(f"Name: {name}. Nodes: {repr_names}")
pprint({k: v for k, v in data.items()}) # Nones are converted to 0s automagically
loader = DataLoader(reader, collate_fn=reader.collate_fn, batch_size=5, shuffle=True)
data, name, repr_names = next(iter(loader)) # get a random batch using torch DataLoader
print(f"Name: {name}. Nodes: {repr_names}")
pprint({k: v for k, v in data.items()}) # Nones are converted to 0s automagically
if __name__ == "__main__":
main()