code stringlengths 17 6.64M |
|---|
def save(file: Path, **kwargs) -> None:
'Save a list of arrays as a npz file.'
print(f'''
-> Saving to "{file}"...''')
np.savez_compressed(file, **kwargs)
|
def export_sintel(mode, save_stem: str=None, overwrite: bool=False) -> None:
'Export the ground-truth synthetic depth images for Sintel.\n\n :param mode: (str) Split mode to use.\n :param save_stem: (str) Exported depth file stem (i.e. no suffix).\n :param overwrite: (bool) If `True`, overwrite existing ... |
def save(file: Path, **kwargs) -> None:
'Save a list of arrays as a npz file.'
print(f'''
-> Saving to '{file}'...''')
np.savez_compressed(file, **kwargs)
|
def export_tum(mode: str, save_stem: str, overwrite: bool=False) -> None:
'Export the ground-truth depth maps for TUM.\n\n :param mode: (str) Split mode to use.\n :param save_stem: (str) Exported depth file stem (i.e. no suffix).\n :param overwrite: (bool) If `True`, overwrite existing exported files.\n ... |
def process_dataset(src_dir: Path, dst_dir: Path, use_hints: bool=True, use_benchmark: bool=True, overwrite: bool=False) -> None:
'Process the entire Kitti Raw Sync dataset.'
(HINTS_DIR, BENCHMARK_DIR) = ('depth_hints', 'depth_benchmark')
if (not (path := (dst_dir / 'splits')).is_dir()):
shutil.co... |
def process_sequence(src_dir: Path, dst_dir: Path, overwrite: bool=False) -> None:
'Process a full Kitti Raw sequence: e.g. kitti_raw_sync/2011_09_26.'
print(f"-> Processing sequence '{src_dir}'")
for src_path in sorted(src_dir.iterdir()):
if src_path.is_file():
continue
dst_pa... |
def process_drive(src_dir: Path, dst_dir: Path, overwrite: bool=False) -> None:
'Process a full Kitti Raw sequence: e.g. kitti_raw_sync/2011_09_26/2011_09_26_drive_0005.'
print(f" -> Processing drive '{src_dir}'")
for src_path in sorted(src_dir.iterdir()):
dst_path = (dst_dir / src_path.name)
... |
def process_dir(src_dir: Path, dst_dir: Path, overwrite: bool=False) -> None:
'Processes a data directory within a given drive.\n\n Cases:\n - Base dataset: images_00, images_01, velodyne_points, oxts (/data & /timestamps for each)\n - Depth hints: images_02, images_03\n - Depth benchmark:... |
def export_calibration(src_seq: Path, dst_seq: Path, overwrite: bool=False) -> None:
'Exports sequence calibration information as a LabelDatabase of arrays.'
dst_dir = (dst_seq / 'calibration')
if ((not overwrite) and dst_dir.is_dir()):
print(f" -> Skipping calib '{dst_dir}'")
return
e... |
def export_images(src_dir: Path, dst_dir: Path) -> None:
'Export images as an ImageDatabase.'
image_paths = {file.stem: file for file in sorted(src_dir.iterdir())}
write_image_database(image_paths, dst_dir)
|
def export_oxts(src_dir: Path, dst_dir: Path) -> None:
'Export OXTS dicts as a LabelDatabase.'
data = {file.stem: kr.load_oxts(file) for file in sorted(src_dir.iterdir())}
write_label_database(data, dst_dir)
|
def export_velodyne(src_dir: Path, dst_dir: Path) -> None:
'Export Velodyne points as a LabelDatabase of arrays.'
data = {file.stem: kr.load_velo(file) for file in sorted(src_dir.iterdir())}
write_label_database(data, dst_dir)
|
def export_hints(src_dir: Path, dst_dir: Path) -> None:
'Export depth hints as a LabelDatabase of arrays.'
data = {file.stem: np.load(file) for file in sorted(src_dir.iterdir())}
write_array_database(data, dst_dir)
|
def process_dataset(src_dir: Path, dst_dir: Path, overwrite: bool=False) -> None:
'Process the entire MannequinChallenge dataset.'
print(f"-> Copying splits directory '{(dst_dir / 'splits')}'...")
shutil.copytree((src_dir / 'splits'), (dst_dir / 'splits'), dirs_exist_ok=True)
for mode in ('train', 'va... |
def process_mode(src_dir: Path, dst_dir: Path, overwrite: bool=False) -> None:
'Process a full MannequinChallenge mode, e.g. train or val.'
calibs = {d.stem: mc.load_info(dst_dir.stem, d.stem) for d in tqdm(src_dir.iterdir())}
export_intrinsics(src_dir, (dst_dir / 'intrinsics'), calibs, overwrite)
exp... |
def export_intrinsics(src_dir: Path, dst_dir: Path, calibs: dict[(str, dict)], overwrite: bool=False) -> None:
'Create camera intrinsics LMDB.'
if ((not overwrite) and dst_dir.is_dir()):
print(f"-> Intrinsics already exist for dir '{src_dir.stem}'")
return
all_Ks = {}
for (k, v) in tqd... |
def export_shapes(src_dir: Path, dst_dir: Path, calibs: dict[(str, dict)], overwrite: bool=False) -> None:
'Create image shapes LMDB.'
if ((not overwrite) and dst_dir.is_dir()):
print(f"-> Shapes already exist for dir '{src_dir.stem}'")
return
all_shapes = {}
for (k, v) in tqdm(calibs.... |
def export_poses(src_dir: Path, dst_dir: Path, calibs: dict[(str, dict)], overwrite: bool=False) -> None:
'Create camera poses LMDB.'
if ((not overwrite) and dst_dir.is_dir()):
print(f"-> Poses already exist for dir '{src_dir.stem}'")
return
print(f'-> Exporting poses for dir {src_dir.stem... |
def export_images(src_dir: Path, dst_dir: Path, overwrite: bool=False) -> None:
'Create images LMDB.'
if ((not overwrite) and dst_dir.is_dir()):
print(f"-> Images already exist for dir '{src_dir.stem}'")
return
print(f"-> Exporting images for dir '{src_dir.stem}'")
files = {f'{d.stem}/... |
def process_dataset(overwrite=False):
(src, dst) = (PATHS['slow_tv'], PATHS['slow_tv_lmdb'])
print(f"-> Copying splits directory '{(dst / 'splits')}'...")
shutil.copytree((src / 'splits'), (dst / 'splits'), dirs_exist_ok=True)
export_intrinsics(dst, overwrite)
args = [((src / seq), dst, overwrite)... |
def export_seq(path: Path, save_root: Path, overwrite: bool=False) -> None:
'Convert SlowTV video into an LMDB.'
seq = path.stem
out_dir = (save_root / seq)
if ((not overwrite) and out_dir.is_dir()):
print(f'-> Skipping directory "{out_dir}"...')
return
print(f'-> Export LMDB for d... |
def export_intrinsics(save_root: Path, overwrite: bool=False) -> None:
'Export SlowTV intrinsics as an LMDB.'
out_dir = (save_root / 'calibs')
if ((not overwrite) and out_dir.is_dir()):
print(f'-> Skipping LMDB calibrations...')
return
print(f"""-> Exporting intrinsics "{(save_root / '... |
def read_array(path):
with open(path, 'rb') as fid:
(width, height, channels) = np.genfromtxt(fid, delimiter='&', max_rows=1, usecols=(0, 1, 2), dtype=int)
fid.seek(0)
num_delimiter = 0
byte = fid.read(1)
while True:
if (byte == b'&'):
num_delimi... |
def export_split(split, src, dst, overwrite=False):
print(f'-> Exporting "{split}" split...')
dst = (dst / split)
io.mkdirs(dst)
seqs = io.get_dirs((src / split))
dsts = [(dst / s.stem) for s in seqs]
ovs = [overwrite for _ in seqs]
with Pool(8) as p:
for _ in tqdm(p.imap_unordered... |
def export_seq(args):
try:
(src, dst, overwrite) = args
depth_dir = (dst / 'depths')
if ((not overwrite) and depth_dir.is_dir()):
print(f'-> Skipping "{src.parent.stem}" sequence "{src.stem}"...')
return
print(f'-> Exporting "{src.parent.stem}" sequence "{sr... |
def main(root):
dst = (root / 'colmap')
io.mkdirs(dst)
splits = ['test']
fails = {}
for s in tqdm(splits):
fails[s] = export_split(s, root, dst, overwrite=False)
print(fails)
|
def main(src, dst):
TARGET_DIR = 'depth_benchmark'
(K_DEPTH, K_RAW) = (src, dst)
print(f'-> Exporting Kitti Benchmark from "{K_DEPTH}" to "{K_RAW}"...')
ROOT = (K_RAW / TARGET_DIR)
ROOT.mkdir(exist_ok=True)
for seq in kr.SEQS:
(ROOT / seq).mkdir(exist_ok=True)
for mode in ('train',... |
def loadmat(file):
'Conflict with specific matfile versions?'
f = h5py.File(file)
arr = {k: np.array(v) for (k, v) in f.items()}
return arr
|
def export_split(mode, idxs, data, dst):
img_dir = ((dst / mode) / 'rgb')
depth_dir = ((dst / mode) / 'depth')
split_file = ((dst / 'splits') / f'{mode}_files.txt')
io.mkdirs(img_dir, depth_dir, split_file.parent)
with open(split_file, 'w') as f:
for i in tqdm(idxs):
i -= 1
... |
def main(dst):
data_file = (dst / 'nyu_depth_v2_labeled.mat')
split_file = (dst / 'splits.mat')
data = loadmat(data_file)
splits = sio.loadmat(split_file)
export_split('train', splits['trainNdxs'].squeeze(), data, dst)
export_split('test', splits['testNdxs'].squeeze(), data, dst)
data_file... |
def save_settings(**kwargs):
io.write_yaml(((PATHS['slow_tv'] / 'splits') / 'config.yaml'), kwargs)
|
def export_scene(args):
(vid_file, cat) = args
seq = vid_file.stem
seq_dir = (PATHS['slow_tv'] / seq)
stv.extract_frames(vid_file, save_dir=seq_dir, fps=fps, trim_start=trim, n_keep=n_keep, per_interval=per_interval, overwrite=overwrite)
seeds = [42, 195, 335, 558, 724]
for seed in seeds:
... |
def main(args):
if write_settings:
save_settings(fps=fps, trim=trim, data_scale=data_scale, n_keep=n_keep, per_interval=per_interval, p_train=p_train, val_skip=val_skip, n_colmap_imgs=n_colmap_imgs, colmap_interval=colmap_interval)
cats = stv.load_categories(subcats=False)
video_files = io.get_fil... |
def main(dst):
print(f'-> Copying splits to "{dst}"...')
shutil.copytree((REPO_ROOT / 'api/data/splits'), dst, dirs_exist_ok=True)
(dst / FILE.name).unlink()
|
def save_metrics(file: Path, metrics: ty.U[(Metrics, ty.S[Metrics])]):
'Helper to save metrics.'
LOGGER.info(f'Saving results to "{file}"...')
file.parent.mkdir(exist_ok=True, parents=True)
write_yaml(file, metrics, mkdir=True)
|
def compute_eval_metrics(preds: ty.A, cfg_file: Path, align_mode: ty.U[(str, float)], nproc: ty.N[int]=None, max_items: ty.N[int]=None) -> tuple[(Metrics, ty.S[Metrics])]:
'Compute evaluation metrics from scaleless network disparities (see `compute_eval_preds`).\n\n :param preds: (NDArray) (b, h, w) Precompute... |
def save_preds(file: Path, preds: ty.A) -> None:
'Helper to save network predictions to a NPZ file. Required for submitted to the challenge.'
io.mkdirs(file.parent)
logging.info(f"Saving network predictions to '{file}'...")
np.savez_compressed(file, pred=preds)
|
def compute_preds(cfg: dict, ckpt: str, cfg_model: ty.N[list[Path]], device: ty.N[str], overwrite: bool) -> ty.A:
'Compute predictions for a given dataset and network cfg.\n\n `ckpt` can be provided as:\n - Path: Path to a pretrained checkpoint trained using the benchmark repository.\n - Name: Na... |
def get_models(root: Path, exp: str, dataset: str, ckpt: str='last', mode: str='*', res: str='results', models: ty.N[list[str]]=None, tag: str='') -> tuple[(dict[(str, list[Path])], list[str])]:
"Find all models and files associated with a particular experiment.\n NOTE: Parameters can use regex expressions, bu... |
def load_dfs(files: dict[(str, list[Path])]) -> pd.DataFrame:
'Load dict of YAML files into a single dataframe.\n\n :param files: (dict[str, list[Path]]) List of files for each model.\n :return: (DataFrame) Loaded dataframe, index based on the model key and a potential item number.\n '
dfs = [pd.json... |
def filter_df(df: pd.DataFrame) -> tuple[(pd.DataFrame, ty.S[int])]:
'Preprocess dataframe to include only AbsRel and (F-Score or delta) metrics.'
(metrics, metric_type) = (['AbsRel'], [(- 1)])
(delta, delta_legacy) = ('$\\delta_{.25}$', '$\\delta < 1.25$')
(f, f_legacy) = ('F-Score (10)', 'F-Score')
... |
def get_df_mean(df: pd.DataFrame, models: ty.S[str], name: str='Mean') -> tuple[(pd.DataFrame, pd.DataFrame)]:
'Compute the average metrics and stddev across all model seeds.'
df2 = df.groupby(level=0)
df_mean = df2.agg('mean').reindex(models)
df_std = df2.agg('std').reindex(models)
df_std.columns... |
def add_multitask_metrics(df: pd.DataFrame, metric_types: ty.S[int], ref_idx: int=None) -> tuple[(pd.DataFrame, ty.S[int])]:
'Prepend multi-task metrics computed across all metrics.'
rel = compute_rel_improvement(df, metric_types, ref=ref_idx)
df.insert(0, ('MT', '\\%'), rel)
metric_types.insert(0, 1)... |
def compute_rel_improvement(df: pd.DataFrame, metric_types: ty.S[int], ref: int=0) -> pd.Series:
'Compute average relative improvement w.r.t. a reference row index.\n\n :param df: (DataFrame) Input dataframe.\n :param metric_types: (list[int]) Metric type for each metric. {+1: Higher is better, -1: Lower is... |
def compute_mean_rank(df: pd.DataFrame, metric_types: ty.S[int]) -> pd.Series:
'Compute the average ranking position across all metrics for each model.\n\n :param df: (DataFrame) Input dataframe.\n :param metric_types: (list[int]) Metric type for each metric. {+1: Higher is better, -1: Lower is better}\n ... |
def main():
pd.set_option('display.max_rows', None, 'display.max_columns', None)
root = MODEL_ROOTS[(- 1)]
splits = ['kitti_eigen_benchmark', 'mc', 'ddad', 'diode_outdoor', 'sintel', 'syns_test', 'diode_indoors', 'nyud', 'tum']
ref = 0
(dfs, stds, metric_types) = ([], [], [])
for split in spli... |
def compute_preds(name: str, cfg: dict, ckpt: str, cfg_model: ty.N[list[Path]], device: ty.N[str], overwrite: bool) -> None:
'Compute predictions for a given dataset and network cfg.\n\n :param name: (str) Name used when saving predictions.\n :param cfg: (dict) Dataset cfg, following `MonoDepthModule` conve... |
def process_batch_preds(batch: ty.BatchData, preds: ty.A, name: str, pool: Pool) -> None:
'Align depth predictions and save files.'
m = batch[2]
files = [mfr.Item(*items).get_depth_file(name) for items in zip(m['mode'], m['scene'], m['seq'], m['stem'])]
(targets, preds) = ops.to_np([batch[1]['depth'].... |
def process_single_pred(args):
'Upsample, align and save a single prediction.'
(target, pred, file) = args
pred = upsample(pred, target)
pred = align(pred, target)
save_depth_image(file, pred)
|
def upsample(pred: ty.A, target: ty.A) -> ty.A:
'Upsample predictions to match target shape.'
if (pred.shape == target.shape):
return pred
(h, w) = target.shape
pred = cv2.resize(pred, (w, h), interpolation=cv2.INTER_LINEAR)
return pred
|
def align(pred: ty.A, target: ty.A) -> ty.A:
'Align predictions to ground-truth depth using least-squares and convert into depths.'
mask = ((target > 0) & (target < 100))
(scale, shift) = MonoDepthEvaluator._align_lsqr(pred[mask], to_inv(target[mask]))
pred = ((scale * pred) + shift)
pred = to_inv... |
def save_depth_image(path: str, depth: ty.A) -> None:
'Save depth map in MapFreeReloc format (png with depth in mm).'
depth = (depth * 1000).astype(np.uint16)
cv2.imwrite(str(path), depth)
|
def align_median(pred: np.ndarray, target: np.ndarray) -> float:
'Return scale factor for median-depth alignment.'
return (np.median(target) / np.median(pred))
|
def align_lsqr(pred: np.ndarray, target: np.ndarray) -> list[(float, float)]:
'Return scale & shift factor for least-squares alignment.'
A = np.array([[(pred ** 2).sum(), pred.sum()], [pred.sum(), pred.shape[0]]])
if (np.linalg.det(A) <= 0):
return (0, 0)
b = np.array([(pred * target).sum(), t... |
def main():
def to_inv(depth: np.ndarray, eps: float=1e-05) -> np.ndarray:
return ((depth > 0) / (depth + eps))
depth = np.load('.../kbr/file.npy')
lidar = np.load('.../lidar/file.npy')
valid = ((lidar > 0) & (lidar < 100))
(depth_mask, lidar_mask) = (depth[valid], lidar[valid])
(scal... |
def forward_beit(net, x):
return forward_adapted_unflatten(net, x, 'forward_features')
|
def make_beitl16_512(pretrained, use_readout='ignore', hooks=(5, 11, 17, 23)):
model = timm.create_model('beit_large_patch16_512', pretrained=pretrained)
return _make_beit_backbone(model, features=[256, 512, 1024, 1024], size=[512, 512], hooks=hooks, vit_features=1024, use_readout=use_readout)
|
def make_beitl16_384(pretrained, use_readout='ignore', hooks=(5, 11, 17, 23)):
model = timm.create_model('beit_large_patch16_384', pretrained=pretrained)
return _make_beit_backbone(model, features=[256, 512, 1024, 1024], hooks=hooks, vit_features=1024, use_readout=use_readout)
|
def make_beitb16_384(pretrained, use_readout='ignore', hooks=(2, 5, 8, 11)):
model = timm.create_model('beit_base_patch16_384', pretrained=pretrained)
return _make_beit_backbone(model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout)
|
def _make_beit_backbone(model, features=(96, 192, 384, 768), size=(384, 384), hooks=(0, 4, 8, 11), vit_features=768, use_readout='ignore', start_index=1, start_index_readout=1):
backbone = make_backbone_default(model, features, size, hooks, vit_features, use_readout, start_index, start_index_readout)
backbone... |
def _patch_embed_forward(self, x):
'Modification of timm.models.layers.patch_embed.py: PatchEmbed.forward to support arbitrary window sizes.'
x = self.proj(x)
if self.flatten:
x = x.flatten(2).transpose(1, 2)
x = self.norm(x)
return x
|
def _beit_forward_features(self, x):
'Modification of timm.models.beit.py: Beit.forward_features to support arbitrary window sizes.'
resolution = x.shape[2:]
x = self.patch_embed(x)
x = torch.cat((self.cls_token.expand(x.shape[0], (- 1), (- 1)), x), dim=1)
if (self.pos_embed is not None):
... |
def _get_rel_pos_bias(self, window_size):
'Modification of timm.models.beit.py: Attention._get_rel_pos_bias to support arbitrary window sizes.'
old_height = ((2 * self.window_size[0]) - 1)
old_width = ((2 * self.window_size[1]) - 1)
new_height = ((2 * window_size[0]) - 1)
new_width = ((2 * window_... |
def _attention_forward(self, x, resolution, shared_rel_pos_bias=None):
'Modification of timm.models.beit.py: Attention.forward to support arbitrary window sizes.'
(B, N, C) = x.shape
qkv_bias = (torch.cat((self.q_bias, self.k_bias, self.v_bias)) if (self.q_bias is not None) else None)
qkv = F.linear(i... |
def _block_forward(self, x, resolution, shared_rel_pos_bias=None):
'Modification of timm.models.beit.py: Block.forward to support arbitrary window sizes.'
if (self.gamma_1 is None):
x = (x + self.drop_path(self.attn(self.norm1(x), resolution, shared_rel_pos_bias=shared_rel_pos_bias)))
x = (x +... |
class FeatureInfo():
'Encoder multi-scale feature information. Used for compatibility with `timm`.'
def __init__(self, n_ch: ty.S[int]):
self.n_ch = n_ch
self.red = [(32 // (2 ** i)) for i in range((len(self.n_ch) - 1), (- 1), (- 1))]
def channels(self) -> ty.S[int]:
return self.... |
class DptEncoder(nn.Module):
def __init__(self, enc_name: str, pretrained: bool=True, use_readout: str='project'):
super().__init__()
(n, pt, r) = (enc_name, pretrained, use_readout)
if (n == 'beitl16_512'):
self.net = make_beitl16_512(pt, hooks=[5, 11, 17, 23], use_readout=r)... |
def forward_swin(net, x):
return forward_default(net, x)
|
def make_swinl12_384(pretrained, hooks=(1, 1, 17, 1)):
model = timm.create_model('swin_large_patch4_window12_384', pretrained=pretrained)
return _make_swin_backbone(model, hooks=hooks)
|
def make_swin2l24_384(pretrained, hooks=(1, 1, 17, 1)):
model = timm.create_model('swinv2_large_window12to24_192to384_22kft1k', pretrained=pretrained)
return _make_swin_backbone(model, hooks=hooks)
|
def make_swin2b24_384(pretrained, hooks=(1, 1, 17, 1)):
model = timm.create_model('swinv2_base_window12to24_192to384_22kft1k', pretrained=pretrained)
return _make_swin_backbone(model, hooks=hooks)
|
def make_swin2t16_256(pretrained, hooks=(1, 1, 17, 1)):
model = timm.create_model('swinv2_tiny_window16_256', pretrained=pretrained)
return _make_swin_backbone(model, hooks=hooks, patch_grid=[64, 64])
|
def _make_swin_backbone(model, hooks=(1, 1, 17, 1), patch_grid=(96, 96)):
net = nn.Module()
net.model = model
net.model.layers[0].blocks[hooks[0]].register_forward_hook(get_activation('1'))
net.model.layers[1].blocks[hooks[1]].register_forward_hook(get_activation('2'))
net.model.layers[2].blocks[h... |
class ResidualBlock(nn.Module):
'Residual convolution module.'
def __init__(self, ch: int, act: nn.Module, use_bn: bool=False):
super().__init__()
self.bn = use_bn
self.conv1 = nn.Conv2d(ch, ch, kernel_size=3, stride=1, padding=1, bias=True, groups=1)
self.conv2 = nn.Conv2d(ch... |
class FeatureFusionBlock(nn.Module):
'Feature fusion block.'
def __init__(self, ch: int, act: nn.Module, deconv: bool=False, use_bn: bool=False, expand: bool=False, align_corners: bool=True, size: Optional[tuple[(int, int)]]=None):
super().__init__()
self.deconv = deconv
self.align_co... |
class DptDecoder(nn.Module):
def __init__(self, num_ch_enc: list[int], enc_sc: list[int], upsample_mode: str='nearest', use_skip: bool=True, out_sc: list[int]=(0, 1, 2, 3), out_ch: int=1, out_act: str='relu'):
super().__init__()
self.num_ch_enc = num_ch_enc
self.enc_sc = enc_sc
se... |
class MonodepthDecoder(nn.Module):
"From Monodepth(2) (https://arxiv.org/abs/1806.01260)\n\n Generic convolutional decoder incorporating multi-scale predictions and skip connections.\n\n :param num_ch_enc: (list[int]) List of channels per encoder stage.\n :param enc_sc: (list[int]) List of downsampling f... |
def main():
parser = ArgumentParser(description='Monocular depth trainer.')
parser.add_argument('--cfg-files', '-c', type=Path, nargs='*', help='Path to YAML config files to load (default, override).')
parser.add_argument('--ckpt-dir', '-o', default=MODEL_ROOTS[(- 1)], type=Path, help='Root path to store ... |
def main():
parser = ArgumentParser(description='Monocular depth trainer.')
parser.add_argument('--cfg-files', '-c', type=Path, nargs='*', help='Path to YAML config files to load (default, override).')
parser.add_argument('--ckpt-dir', '-o', default=Path('/tmp'), type=Path, help='Root path to store checkp... |
def _num_pix(shape: ty.S[int]) -> int:
'Return the number of elements in a 2D image.'
assert (len(shape) == 2)
return (shape[0] * shape[1])
|
def _find_closest_multiple(i: ty.U[(int, float)], n: int=32) -> int:
'Return the closest multiple of `n` wrt the input `i`.'
return (round((i / n)) * n)
|
@torch.no_grad()
def aspect_ratio_aug(batch: ty.BatchData, p: float=1.0, crop_min: float=0.5, crop_max: float=1.0, ref_shape: ty.N[ty.S[int]]=None) -> ty.BatchData:
'Augmentation to change the aspect ratio of the input images.\n\n NOTE: Augmentation happens in-place!\n NOTE: If available, ground-truth depth... |
def crop_aug(batch: ty.BatchData, min: float=0.5, max: float=1.0) -> ty.BatchData:
'Apply a centre crop with a random aspect ratio.\n\n :param batch: (BatchData) Input training batch.\n :param min: (float) Minimum relative size of the sampled crop [0, 1].\n :param max: (float) Maximum relative size of th... |
def sample_crop(shape: ty.S[int], min: float=0.5, max: float=1.0) -> tuple[(ty.S[int], float)]:
'Randomly sample a centre crop with a new aspect ratio.\n\n NOTE: In practice, we only guarantee that one of the dimensions will be between [min, max]. This is done to allow\n for additional flexibility when samp... |
def resize_aug(batch: ty.BatchData, ref_shape: ty.S[int], eps: float=0.8) -> ty.BatchData:
'Apply a resize augmentation to match the number of pixels in `ref_shape`.\n\n NOTE: Resizing depth maps (especially sparse LiDAR) is questionable and will likely lead to unreliable metrics.\n\n :param batch: (BatchDa... |
def sample_resize(shape: ty.S[int], ref_shape: ty.S[int], eps: float=0.8) -> ty.S[int]:
"Sample the resize shape for the new aspect ratio that provides the same number of pixels as `ref_shape`.\n\n NOTE: Sampled shape will always be a multiple of 32, as required by most networks. This also means the output sha... |
class BaseDataset(ABC, Dataset):
'Base dataset class that all others should inherit from.\n\n The idea is to provide a common structure and data format. Additionally, provide some nice functionality and\n automation for the more boring stuff. Datasets are defined as providing the following dicts for each it... |
class MdeBaseDataset(BaseDataset, retry_exc=ty.SuppImageNotFoundError):
'Base class used for Monocular Depth Estimation datasets.\n See the documentation from `BaseDataset` for additional information.\n\n Assumes most datasets provide:\n - Image: Target image from which to predict depth.\n - S... |
@register('ddad')
class DdadDataset(MdeBaseDataset):
'DDAD Dataset. From: https://arxiv.org/abs/1905.02693.\n\n This dataset is a simple wrapper over the official `SynchronizedSceneDataset` provided by the DGP repo\n (https://github.com/TRI-ML/dgp, downloaded to `/PATH/TO/ROOT/src/external_libs/dgp`).\n\n ... |
def validated_init(__init__: ty.Callable):
'Decorator to ensure a BaseDataset child always calls argument validation after init.'
@wraps(__init__)
def wrapper(self, *args, **kwargs) -> None:
self.logger.info(f"Creating '{self.__class__.__qualname__}'...")
__init__(self, *args, **kwargs)
... |
@opt_args_deco
def retry_new_on_error(__getitem__: ty.Callable, exc: ty.U[(BaseException, ty.S[BaseException])]=Exception, silent: bool=False, max: ty.N[int]=None, use_blacklist: bool=False) -> ty.Callable:
'Decorator to wrap a BaseDataset __getitem__ function and retry a different item if there is an error.\n\n ... |
@register('diode')
class DiodeDataset(MdeBaseDataset):
VALID_DATUM = 'image depth mask'
SHAPE = (768, 1024)
def __init__(self, scene: str, mode: str, datum='image depth mask', **kwargs):
super().__init__(datum=datum, **kwargs)
self.scene = scene
self.mode = mode
(self.spli... |
@register('kitti')
class KittiRawDataset(MdeBaseDataset):
'Kitti Raw dataset.\n\n Datum:\n - Image: Target image from which to predict depth.\n - Support: Adjacent frames (either monocular or stereo) used to compute photometric consistency losses.\n - Depth: Target ground-truth benchmark d... |
@register('kitti_lmdb')
class KittiRawLmdbDataset(KittiRawDataset):
'Kitti Raw dataset using LMDBs. See `KittiRawDataset` for additional details.'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.image_dbs = {}
self.depth_dbs = {}
self.poses_dbs = {}... |
@register('mannequin')
class MannequinDataset(MdeBaseDataset):
'Mannequin Challenge dataset.\n\n Datum:\n - Image: Target image from which to predict depth.\n - Support: Adjacent frames (monocular) used to compute photometric consistency losses.\n - Depth: Target ground-truth COLMAP depth.\n ... |
@register('mannequin_lmdb')
class MannequinLmdbDataset(MannequinDataset):
'Mannequin Challenge dataset using LMDBs. See `MannequinDataset` for additional details.'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.img_db = mc.load_imgs(self.mode)
self.depth_d... |
@register('mapfree')
class MapFreeRelocDataset(MdeBaseDataset):
'MapFreeReloc dataset.\n\n Datum:\n - Image: Target image from which to predict depth.\n - Support: Adjacent frames (monocular) used to compute photometric consistency losses.\n - Pose: Camera extrinsic parameters.\n - ... |
@register('nyud')
class NyudDataset(MdeBaseDataset):
VALID_DATUM = 'image depth'
SHAPE = (480, 640)
def __init__(self, mode: str, datum: ty.U[(str, ty.S[str])]='image depth', **kwargs):
super().__init__(datum=datum, **kwargs)
self.mode = mode
(self.split_file, self.items_data) = s... |
@register('sintel')
class SintelDataset(MdeBaseDataset):
VALID_DATUM = 'image depth K'
SHAPE = (436, 1024)
def __init__(self, mode: str, datum: ty.U[(str, ty.S[str])]='image depth K', **kwargs):
super().__init__(datum=datum, **kwargs)
self.mode = mode
(self.split_file, self.items_... |
@register('slow_tv')
class SlowTvDataset(MdeBaseDataset):
'SlowTV dataset.\n\n Datum:\n - Image: Target image from which to predict depth.\n - Support: Adjacent frames (monocular) used to compute photometric consistency losses.\n - K: Camera intrinsic parameters.\n\n See BaseDataset for... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.