more fixes to multitask dataset. Seems stable.
Browse files
dronescapes_reader/multitask_dataset.py
CHANGED
@@ -3,7 +3,7 @@
|
|
3 |
from __future__ import annotations
|
4 |
import os
|
5 |
from pathlib import Path
|
6 |
-
from typing import Dict, List, Tuple
|
7 |
from argparse import ArgumentParser
|
8 |
from pprint import pprint
|
9 |
from natsort import natsorted
|
@@ -56,7 +56,7 @@ class MultiTaskDataset(Dataset):
|
|
56 |
"""
|
57 |
|
58 |
def __init__(self, path: Path,
|
59 |
-
task_names: list[str]
|
60 |
task_types: dict[str, type],
|
61 |
normalization: str | None | dict[str],
|
62 |
handle_missing_data: str = "fill_none",
|
@@ -68,17 +68,14 @@ class MultiTaskDataset(Dataset):
|
|
68 |
assert Path(path).exists(), f"Provided path '{path}' doesn't exist!"
|
69 |
assert handle_missing_data in ("drop", "fill_none", "fill_zero", "fill_nan"), \
|
70 |
f"Invalid handle_missing_data mode: {handle_missing_data}"
|
71 |
-
assert
|
72 |
self.path = Path(path).absolute()
|
73 |
self.handle_missing_data = handle_missing_data
|
74 |
self.suffix = files_suffix
|
75 |
-
self.files_per_repr, self.file_names = self._build_dataset(task_types, task_names) #
|
76 |
self.cache_task_stats = cache_task_stats
|
77 |
self.batch_size_stats = batch_size_stats
|
78 |
|
79 |
-
if task_names is None:
|
80 |
-
task_names = list(self.files_per_repr.keys())
|
81 |
-
logger.debug(f"No explicit tasks provided. Using all of them as read from the paths ({len(task_names)}).")
|
82 |
assert all(isinstance(x, str) for x in task_names), tuple(zip(task_names, (type(x) for x in task_names)))
|
83 |
assert (diff := set(self.files_per_repr).difference(task_names)) == set(), f"Not all tasks in files: {diff}"
|
84 |
self.task_types = {k: v for k, v in task_types.items() if k in task_names} # all task_types must be provided!
|
@@ -210,6 +207,7 @@ class MultiTaskDataset(Dataset):
|
|
210 |
|
211 |
def _get_all_npz_files(self) -> dict[str, list[Path]]:
|
212 |
"""returns a dict of form: {"rgb": ["0.npz", "1.npz", ..., "N.npz"]}"""
|
|
|
213 |
in_files = {}
|
214 |
all_repr_dirs: list[str] = [x.name for x in self.path.iterdir() if x.is_dir()]
|
215 |
for repr_dir_name in all_repr_dirs:
|
@@ -224,12 +222,11 @@ class MultiTaskDataset(Dataset):
|
|
224 |
assert not any(len(x) == 0 for x in in_files.values()), f"{ [k for k, v in in_files.items() if len(v) == 0] }"
|
225 |
return in_files
|
226 |
|
227 |
-
def _build_dataset(self, task_types: dict[str, NpzRepresentation],
|
228 |
-
task_names: list[str] | None) -> BuildDatasetTuple:
|
229 |
logger.debug(f"Building dataset from: '{self.path}'")
|
230 |
all_npz_files = self._get_all_npz_files()
|
231 |
-
all_files: dict[str, dict[str,
|
232 |
-
|
233 |
relevant_tasks_for_files = set() # hsv requires only rgb, so we look at dependencies later on
|
234 |
for task_name in task_names:
|
235 |
relevant_tasks_for_files.update(task_types[task_name].dependencies)
|
@@ -321,7 +318,8 @@ class MultiTaskDataset(Dataset):
|
|
321 |
return res
|
322 |
|
323 |
def _load_external_statistics(self, statistics: dict[str, TaskStatistics | list]) -> dict[str, TaskStatistics]:
|
324 |
-
|
|
|
325 |
res: dict[str, TaskStatistics] = {}
|
326 |
for k, v in statistics.items():
|
327 |
if k in self.task_names:
|
|
|
3 |
from __future__ import annotations
|
4 |
import os
|
5 |
from pathlib import Path
|
6 |
+
from typing import Dict, List, Tuple, Iterable
|
7 |
from argparse import ArgumentParser
|
8 |
from pprint import pprint
|
9 |
from natsort import natsorted
|
|
|
56 |
"""
|
57 |
|
58 |
def __init__(self, path: Path,
|
59 |
+
task_names: list[str],
|
60 |
task_types: dict[str, type],
|
61 |
normalization: str | None | dict[str],
|
62 |
handle_missing_data: str = "fill_none",
|
|
|
68 |
assert Path(path).exists(), f"Provided path '{path}' doesn't exist!"
|
69 |
assert handle_missing_data in ("drop", "fill_none", "fill_zero", "fill_nan"), \
|
70 |
f"Invalid handle_missing_data mode: {handle_missing_data}"
|
71 |
+
assert isinstance(task_names, Iterable), type(task_names)
|
72 |
self.path = Path(path).absolute()
|
73 |
self.handle_missing_data = handle_missing_data
|
74 |
self.suffix = files_suffix
|
75 |
+
self.files_per_repr, self.file_names = self._build_dataset(task_types, task_names) # + handle_missing_data
|
76 |
self.cache_task_stats = cache_task_stats
|
77 |
self.batch_size_stats = batch_size_stats
|
78 |
|
|
|
|
|
|
|
79 |
assert all(isinstance(x, str) for x in task_names), tuple(zip(task_names, (type(x) for x in task_names)))
|
80 |
assert (diff := set(self.files_per_repr).difference(task_names)) == set(), f"Not all tasks in files: {diff}"
|
81 |
self.task_types = {k: v for k, v in task_types.items() if k in task_names} # all task_types must be provided!
|
|
|
207 |
|
208 |
def _get_all_npz_files(self) -> dict[str, list[Path]]:
|
209 |
"""returns a dict of form: {"rgb": ["0.npz", "1.npz", ..., "N.npz"]}"""
|
210 |
+
assert self.suffix == "npz", f"Only npz supported right now (though trivial to update): {self.suffix}"
|
211 |
in_files = {}
|
212 |
all_repr_dirs: list[str] = [x.name for x in self.path.iterdir() if x.is_dir()]
|
213 |
for repr_dir_name in all_repr_dirs:
|
|
|
222 |
assert not any(len(x) == 0 for x in in_files.values()), f"{ [k for k, v in in_files.items() if len(v) == 0] }"
|
223 |
return in_files
|
224 |
|
225 |
+
def _build_dataset(self, task_types: dict[str, NpzRepresentation], task_names: list[str]) -> BuildDatasetTuple:
|
|
|
226 |
logger.debug(f"Building dataset from: '{self.path}'")
|
227 |
all_npz_files = self._get_all_npz_files()
|
228 |
+
all_files: dict[str, dict[str, Path]] = {k: {_v.name: _v for _v in v} for k, v in all_npz_files.items()}
|
229 |
+
|
230 |
relevant_tasks_for_files = set() # hsv requires only rgb, so we look at dependencies later on
|
231 |
for task_name in task_names:
|
232 |
relevant_tasks_for_files.update(task_types[task_name].dependencies)
|
|
|
318 |
return res
|
319 |
|
320 |
def _load_external_statistics(self, statistics: dict[str, TaskStatistics | list]) -> dict[str, TaskStatistics]:
|
321 |
+
tasks_no_classif = [t for t in set(self.task_names) if not self.name_to_task[t].is_classification]
|
322 |
+
assert (diff := set(tasks_no_classif).difference(statistics)) == set(), f"Missing tasks: {diff}"
|
323 |
res: dict[str, TaskStatistics] = {}
|
324 |
for k, v in statistics.items():
|
325 |
if k in self.task_names:
|
scripts/semantic_mapper.ipynb
CHANGED
The diff for this file is too large to render.
See raw diff
|
|