{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# find the dataset definition by name, for example dtu_yao (dtu_yao.py)\n",
    "def find_dataset_def(dataset_name):\n",
    "    module_name = 'datasets.{}'.format(dataset_name)\n",
    "    module = importlib.import_module(module_name)\n",
    "    return getattr(module, \"MVSDataset\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\"\"\"\n",
    "Implementation of Pytorch layer primitives, such as Conv+BN+ReLU, differentiable warping layers,\n",
    "and depth regression based upon expectation of an input probability distribution.\n",
    "\"\"\"\n",
    "\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "\n",
    "\n",
    "class ConvBnReLU(nn.Module):\n",
    "    \"\"\"Implements 2d Convolution + batch normalization + ReLU\"\"\"\n",
    "\n",
    "    def __init__(\n",
    "        self,\n",
    "        in_channels: int,\n",
    "        out_channels: int,\n",
    "        kernel_size: int = 3,\n",
    "        stride: int = 1,\n",
    "        pad: int = 1,\n",
    "        dilation: int = 1,\n",
    "    ) -> None:\n",
    "        \"\"\"initialization method for convolution2D + batch normalization + relu module\n",
    "        Args:\n",
    "            in_channels: input channel number of convolution layer\n",
    "            out_channels: output channel number of convolution layer\n",
    "            kernel_size: kernel size of convolution layer\n",
    "            stride: stride of convolution layer\n",
    "            pad: pad of convolution layer\n",
    "            dilation: dilation of convolution layer\n",
    "        \"\"\"\n",
    "        super(ConvBnReLU, self).__init__()\n",
    "        self.conv = nn.Conv2d(\n",
    "            in_channels, out_channels, kernel_size, stride=stride, padding=pad, dilation=dilation, bias=False\n",
    "        )\n",
    "        self.bn = nn.BatchNorm2d(out_channels)\n",
    "\n",
    "    def forward(self, x: torch.Tensor) -> torch.Tensor:\n",
    "        \"\"\"forward method\"\"\"\n",
    "        return F.relu(self.bn(self.conv(x)), inplace=True)\n",
    "\n",
    "\n",
    "class ConvBnReLU3D(nn.Module):\n",
    "    \"\"\"Implements of 3d convolution + batch normalization + ReLU.\"\"\"\n",
    "\n",
    "    def __init__(\n",
    "        self,\n",
    "        in_channels: int,\n",
    "        out_channels: int,\n",
    "        kernel_size: int = 3,\n",
    "        stride: int = 1,\n",
    "        pad: int = 1,\n",
    "        dilation: int = 1,\n",
    "    ) -> None:\n",
    "        \"\"\"initialization method for convolution3D + batch normalization + relu module\n",
    "        Args:\n",
    "            in_channels: input channel number of convolution layer\n",
    "            out_channels: output channel number of convolution layer\n",
    "            kernel_size: kernel size of convolution layer\n",
    "            stride: stride of convolution layer\n",
    "            pad: pad of convolution layer\n",
    "            dilation: dilation of convolution layer\n",
    "        \"\"\"\n",
    "        super(ConvBnReLU3D, self).__init__()\n",
    "        self.conv = nn.Conv3d(\n",
    "            in_channels, out_channels, kernel_size, stride=stride, padding=pad, dilation=dilation, bias=False\n",
    "        )\n",
    "        self.bn = nn.BatchNorm3d(out_channels)\n",
    "\n",
    "    def forward(self, x: torch.Tensor) -> torch.Tensor:\n",
    "        \"\"\"forward method\"\"\"\n",
    "        return F.relu(self.bn(self.conv(x)), inplace=True)\n",
    "\n",
    "\n",
    "class ConvBnReLU1D(nn.Module):\n",
    "    \"\"\"Implements 1d Convolution + batch normalization + ReLU.\"\"\"\n",
    "\n",
    "    def __init__(\n",
    "        self,\n",
    "        in_channels: int,\n",
    "        out_channels: int,\n",
    "        kernel_size: int = 3,\n",
    "        stride: int = 1,\n",
    "        pad: int = 1,\n",
    "        dilation: int = 1,\n",
    "    ) -> None:\n",
    "        \"\"\"initialization method for convolution1D + batch normalization + relu module\n",
    "        Args:\n",
    "            in_channels: input channel number of convolution layer\n",
    "            out_channels: output channel number of convolution layer\n",
    "            kernel_size: kernel size of convolution layer\n",
    "            stride: stride of convolution layer\n",
    "            pad: pad of convolution layer\n",
    "            dilation: dilation of convolution layer\n",
    "        \"\"\"\n",
    "        super(ConvBnReLU1D, self).__init__()\n",
    "        self.conv = nn.Conv1d(\n",
    "            in_channels, out_channels, kernel_size, stride=stride, padding=pad, dilation=dilation, bias=False\n",
    "        )\n",
    "        self.bn = nn.BatchNorm1d(out_channels)\n",
    "\n",
    "    def forward(self, x: torch.Tensor) -> torch.Tensor:\n",
    "        \"\"\"forward method\"\"\"\n",
    "        return F.relu(self.bn(self.conv(x)), inplace=True)\n",
    "\n",
    "\n",
    "class ConvBn(nn.Module):\n",
    "    \"\"\"Implements of 2d convolution + batch normalization.\"\"\"\n",
    "\n",
    "    def __init__(\n",
    "        self, in_channels: int, out_channels: int, kernel_size: int = 3, stride: int = 1, pad: int = 1\n",
    "    ) -> None:\n",
    "        \"\"\"initialization method for convolution2D + batch normalization + ReLU module\n",
    "        Args:\n",
    "            in_channels: input channel number of convolution layer\n",
    "            out_channels: output channel number of convolution layer\n",
    "            kernel_size: kernel size of convolution layer\n",
    "            stride: stride of convolution layer\n",
    "            pad: pad of convolution layer\n",
    "        \"\"\"\n",
    "        super(ConvBn, self).__init__()\n",
    "        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=pad, bias=False)\n",
    "        self.bn = nn.BatchNorm2d(out_channels)\n",
    "\n",
    "    def forward(self, x: torch.Tensor) -> torch.Tensor:\n",
    "        \"\"\"forward method\"\"\"\n",
    "        return self.bn(self.conv(x))\n",
    "\n",
    "\n",
    "def differentiable_warping(\n",
    "    src_fea: torch.Tensor, src_proj: torch.Tensor, ref_proj: torch.Tensor, depth_samples: torch.Tensor\n",
    "):\n",
    "    \"\"\"Differentiable homography-based warping, implemented in Pytorch.\n",
    "\n",
    "    Args:\n",
    "        src_fea: [B, C, H, W] source features, for each source view in batch\n",
    "        src_proj: [B, 4, 4] source camera projection matrix, for each source view in batch\n",
    "        ref_proj: [B, 4, 4] reference camera projection matrix, for each ref view in batch\n",
    "        depth_samples: [B, Ndepth, H, W] virtual depth layers\n",
    "    Returns:\n",
    "        warped_src_fea: [B, C, Ndepth, H, W] features on depths after perspective transformation\n",
    "    \"\"\"\n",
    "\n",
    "    batch, channels, height, width = src_fea.shape\n",
    "    num_depth = depth_samples.shape[1]\n",
    "\n",
    "    with torch.no_grad():\n",
    "        proj = torch.matmul(src_proj, torch.inverse(ref_proj))\n",
    "        rot = proj[:, :3, :3]  # [B,3,3]\n",
    "        trans = proj[:, :3, 3:4]  # [B,3,1]\n",
    "\n",
    "        y, x = torch.meshgrid(\n",
    "            [\n",
    "                torch.arange(0, height, dtype=torch.float32, device=src_fea.device),\n",
    "                torch.arange(0, width, dtype=torch.float32, device=src_fea.device),\n",
    "            ]\n",
    "        )\n",
    "        y, x = y.contiguous(), x.contiguous()\n",
    "        y, x = y.view(height * width), x.view(height * width)\n",
    "        xyz = torch.stack((x, y, torch.ones_like(x)))  # [3, H*W]\n",
    "        xyz = torch.unsqueeze(xyz, 0).repeat(batch, 1, 1)  # [B, 3, H*W]\n",
    "        rot_xyz = torch.matmul(rot, xyz)  # [B, 3, H*W]\n",
    "\n",
    "        rot_depth_xyz = rot_xyz.unsqueeze(2).repeat(1, 1, num_depth, 1) * depth_samples.view(\n",
    "            batch, 1, num_depth, height * width\n",
    "        )  # [B, 3, Ndepth, H*W]\n",
    "        proj_xyz = rot_depth_xyz + trans.view(batch, 3, 1, 1)  # [B, 3, Ndepth, H*W]\n",
    "        # avoid negative depth\n",
    "        negative_depth_mask = proj_xyz[:, 2:] <= 1e-3\n",
    "        proj_xyz[:, 0:1][negative_depth_mask] = float(width)\n",
    "        proj_xyz[:, 1:2][negative_depth_mask] = float(height)\n",
    "        proj_xyz[:, 2:3][negative_depth_mask] = 1.0\n",
    "        proj_xy = proj_xyz[:, :2, :, :] / proj_xyz[:, 2:3, :, :]  # [B, 2, Ndepth, H*W]\n",
    "        proj_x_normalized = proj_xy[:, 0, :, :] / ((width - 1) / 2) - 1  # [B, Ndepth, H*W]\n",
    "        proj_y_normalized = proj_xy[:, 1, :, :] / ((height - 1) / 2) - 1\n",
    "        proj_xy = torch.stack((proj_x_normalized, proj_y_normalized), dim=3)  # [B, Ndepth, H*W, 2]\n",
    "        grid = proj_xy\n",
    "\n",
    "    warped_src_fea = F.grid_sample(\n",
    "        src_fea,\n",
    "        grid.view(batch, num_depth * height, width, 2),\n",
    "        mode=\"bilinear\",\n",
    "        padding_mode=\"zeros\",\n",
    "        align_corners=True,\n",
    "    )\n",
    "\n",
    "    return warped_src_fea.view(batch, channels, num_depth, height, width)\n",
    "\n",
    "\n",
    "def depth_regression(p: torch.Tensor, depth_values: torch.Tensor) -> torch.Tensor:\n",
    "    \"\"\"Implements per-pixel depth regression based upon a probability distribution per-pixel.\n",
    "\n",
    "    The regressed depth value D(p) at pixel p is found as the expectation w.r.t. P of the hypotheses.\n",
    "\n",
    "    Args:\n",
    "        p: probability volume [B, D, H, W]\n",
    "        depth_values: discrete depth values [B, D]\n",
    "    Returns:\n",
    "        result depth: expected value, soft argmin [B, 1, H, W]\n",
    "    \"\"\"\n",
    "\n",
    "    return torch.sum(p * depth_values.view(depth_values.shape[0], 1, 1), dim=1).unsqueeze(1)\n",
    "\n",
    "\n",
    "def is_empty(x: torch.Tensor) -> bool:\n",
    "    return x.numel() == 0\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from typing import Dict, List, Tuple\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "from .module import ConvBnReLU, depth_regression\n",
    "from .patchmatch import PatchMatch\n",
    "\n",
    "\n",
    "class FeatureNet(nn.Module):\n",
    "    \"\"\"Feature Extraction Network: to extract features of original images from each view\"\"\"\n",
    "\n",
    "    def __init__(self):\n",
    "        \"\"\"Initialize different layers in the network\"\"\"\n",
    "\n",
    "        super(FeatureNet, self).__init__()\n",
    "\n",
    "        self.conv0 = ConvBnReLU(3, 8, 3, 1, 1)\n",
    "        # [B,8,H,W]\n",
    "        self.conv1 = ConvBnReLU(8, 8, 3, 1, 1)\n",
    "        # [B,16,H/2,W/2]\n",
    "        self.conv2 = ConvBnReLU(8, 16, 5, 2, 2)\n",
    "        self.conv3 = ConvBnReLU(16, 16, 3, 1, 1)\n",
    "        self.conv4 = ConvBnReLU(16, 16, 3, 1, 1)\n",
    "        # [B,32,H/4,W/4]\n",
    "        self.conv5 = ConvBnReLU(16, 32, 5, 2, 2)\n",
    "        self.conv6 = ConvBnReLU(32, 32, 3, 1, 1)\n",
    "        self.conv7 = ConvBnReLU(32, 32, 3, 1, 1)\n",
    "        # [B,64,H/8,W/8]\n",
    "        self.conv8 = ConvBnReLU(32, 64, 5, 2, 2)\n",
    "        self.conv9 = ConvBnReLU(64, 64, 3, 1, 1)\n",
    "        self.conv10 = ConvBnReLU(64, 64, 3, 1, 1)\n",
    "\n",
    "        self.output1 = nn.Conv2d(64, 64, 1, bias=False)\n",
    "        self.inner1 = nn.Conv2d(32, 64, 1, bias=True)\n",
    "        self.inner2 = nn.Conv2d(16, 64, 1, bias=True)\n",
    "        self.output2 = nn.Conv2d(64, 32, 1, bias=False)\n",
    "        self.output3 = nn.Conv2d(64, 16, 1, bias=False)\n",
    "\n",
    "    def forward(self, x: torch.Tensor) -> Dict[int, torch.Tensor]:\n",
    "        \"\"\"Forward method\n",
    "\n",
    "        Args:\n",
    "            x: images from a single view, in the shape of [B, C, H, W]. Generally, C=3\n",
    "\n",
    "        Returns:\n",
    "            output_feature: a python dictionary contains extracted features from stage 1 to stage 3\n",
    "                keys are 1, 2, and 3\n",
    "        \"\"\"\n",
    "        output_feature: Dict[int, torch.Tensor] = {}\n",
    "\n",
    "        conv1 = self.conv1(self.conv0(x))\n",
    "        conv4 = self.conv4(self.conv3(self.conv2(conv1)))\n",
    "\n",
    "        conv7 = self.conv7(self.conv6(self.conv5(conv4)))\n",
    "        conv10 = self.conv10(self.conv9(self.conv8(conv7)))\n",
    "\n",
    "        output_feature[3] = self.output1(conv10)\n",
    "        intra_feat = F.interpolate(conv10, scale_factor=2.0, mode=\"bilinear\", align_corners=False) + self.inner1(conv7)\n",
    "        del conv7\n",
    "        del conv10\n",
    "\n",
    "        output_feature[2] = self.output2(intra_feat)\n",
    "        intra_feat = F.interpolate(\n",
    "            intra_feat, scale_factor=2.0, mode=\"bilinear\", align_corners=False) + self.inner2(conv4)\n",
    "        del conv4\n",
    "\n",
    "        output_feature[1] = self.output3(intra_feat)\n",
    "        del intra_feat\n",
    "\n",
    "        return output_feature\n",
    "\n",
    "\n",
    "class Refinement(nn.Module):\n",
    "    \"\"\"Depth map refinement network\"\"\"\n",
    "\n",
    "    def __init__(self):\n",
    "        \"\"\"Initialize\"\"\"\n",
    "\n",
    "        super(Refinement, self).__init__()\n",
    "\n",
    "        # img: [B,3,H,W]\n",
    "        self.conv0 = ConvBnReLU(in_channels=3, out_channels=8)\n",
    "        # depth map:[B,1,H/2,W/2]\n",
    "        self.conv1 = ConvBnReLU(in_channels=1, out_channels=8)\n",
    "        self.conv2 = ConvBnReLU(in_channels=8, out_channels=8)\n",
    "        self.deconv = nn.ConvTranspose2d(\n",
    "            in_channels=8, out_channels=8, kernel_size=3, padding=1, output_padding=1, stride=2, bias=False\n",
    "        )\n",
    "\n",
    "        self.bn = nn.BatchNorm2d(8)\n",
    "        self.conv3 = ConvBnReLU(in_channels=16, out_channels=8)\n",
    "        self.res = nn.Conv2d(in_channels=8, out_channels=1, kernel_size=3, padding=1, bias=False)\n",
    "\n",
    "    def forward(\n",
    "        self, img: torch.Tensor, depth_0: torch.Tensor, depth_min: torch.Tensor, depth_max: torch.Tensor\n",
    "    ) -> torch.Tensor:\n",
    "        \"\"\"Forward method\n",
    "\n",
    "        Args:\n",
    "            img: input reference images (B, 3, H, W)\n",
    "            depth_0: current depth map (B, 1, H//2, W//2)\n",
    "            depth_min: pre-defined minimum depth (B, )\n",
    "            depth_max: pre-defined maximum depth (B, )\n",
    "\n",
    "        Returns:\n",
    "            depth: refined depth map (B, 1, H, W)\n",
    "        \"\"\"\n",
    "\n",
    "        batch_size = depth_min.size()[0]\n",
    "        # pre-scale the depth map into [0,1]\n",
    "        depth = (depth_0 - depth_min.view(batch_size, 1, 1, 1)) / (depth_max - depth_min).view(batch_size, 1, 1, 1)\n",
    "\n",
    "        conv0 = self.conv0(img)\n",
    "        deconv = F.relu(self.bn(self.deconv(self.conv2(self.conv1(depth)))), inplace=True)\n",
    "        # depth residual\n",
    "        res = self.res(self.conv3(torch.cat((deconv, conv0), dim=1)))\n",
    "        del conv0\n",
    "        del deconv\n",
    "\n",
    "        depth = F.interpolate(depth, scale_factor=2.0, mode=\"nearest\") + res\n",
    "        # convert the normalized depth back\n",
    "        return depth * (depth_max - depth_min).view(batch_size, 1, 1, 1) + depth_min.view(batch_size, 1, 1, 1)\n",
    "\n",
    "\n",
    "class PatchmatchNet(nn.Module):\n",
    "    \"\"\" Implementation of complete structure of PatchmatchNet\"\"\"\n",
    "\n",
    "    def __init__(\n",
    "        self,\n",
    "        patchmatch_interval_scale: List[float] = [0.005, 0.0125, 0.025],\n",
    "        propagation_range: List[int] = [6, 4, 2],\n",
    "        patchmatch_iteration: List[int] = [1, 2, 2],\n",
    "        patchmatch_num_sample: List[int] = [8, 8, 16],\n",
    "        propagate_neighbors: List[int] = [0, 8, 16],\n",
    "        evaluate_neighbors: List[int] = [9, 9, 9],\n",
    "    ) -> None:\n",
    "        \"\"\"Initialize modules in PatchmatchNet\n",
    "\n",
    "        Args:\n",
    "            patchmatch_interval_scale: depth interval scale in patchmatch module\n",
    "            propagation_range: propagation range\n",
    "            patchmatch_iteration: patchmatch iteration number\n",
    "            patchmatch_num_sample: patchmatch number of samples\n",
    "            propagate_neighbors: number of propagation neighbors\n",
    "            evaluate_neighbors: number of propagation neighbors for evaluation\n",
    "        \"\"\"\n",
    "        super(PatchmatchNet, self).__init__()\n",
    "\n",
    "        self.stages = 4\n",
    "        self.feature = FeatureNet()\n",
    "        self.patchmatch_num_sample = patchmatch_num_sample\n",
    "\n",
    "        num_features = [16, 32, 64]\n",
    "\n",
    "        self.propagate_neighbors = propagate_neighbors\n",
    "        self.evaluate_neighbors = evaluate_neighbors\n",
    "        # number of groups for group-wise correlation\n",
    "        self.G = [4, 8, 8]\n",
    "\n",
    "        for i in range(self.stages - 1):\n",
    "            patchmatch = PatchMatch(\n",
    "                propagation_out_range=propagation_range[i],\n",
    "                patchmatch_iteration=patchmatch_iteration[i],\n",
    "                patchmatch_num_sample=patchmatch_num_sample[i],\n",
    "                patchmatch_interval_scale=patchmatch_interval_scale[i],\n",
    "                num_feature=num_features[i],\n",
    "                G=self.G[i],\n",
    "                propagate_neighbors=self.propagate_neighbors[i],\n",
    "                evaluate_neighbors=evaluate_neighbors[i],\n",
    "                stage=i + 1,\n",
    "            )\n",
    "            setattr(self, f\"patchmatch_{i+1}\", patchmatch)\n",
    "\n",
    "        self.upsample_net = Refinement()\n",
    "\n",
    "    def forward(\n",
    "        self,\n",
    "        images: Dict[str, torch.Tensor],\n",
    "        proj_matrices: Dict[str, torch.Tensor],\n",
    "        depth_min: torch.Tensor,\n",
    "        depth_max: torch.Tensor,\n",
    "    ) -> Tuple[torch.Tensor, torch.Tensor, Dict[int, List[torch.Tensor]]]:\n",
    "        \"\"\"Forward method for PatchMatchNet\n",
    "\n",
    "        Args:\n",
    "            images: different stages of images (B, 3, H, W) stored in the dictionary\n",
    "            proj_matrices: different stages of camera projection matrices (B, 4, 4) stored in the dictionary\n",
    "            depth_min: minimum virtual depth (B, )\n",
    "            depth_max: maximum virtual depth (B, )\n",
    "\n",
    "        Returns:\n",
    "            output tuple of PatchMatchNet, containing refined depthmap, depth patchmatch, and photometric confidence.\n",
    "        \"\"\"\n",
    "        imgs_0 = torch.unbind(images[\"stage_0\"], 1)\n",
    "        del images\n",
    "\n",
    "        ref_image = imgs_0[0]\n",
    "\n",
    "        proj_mtx = {\n",
    "            0: torch.unbind(proj_matrices[\"stage_0\"].float(), 1),\n",
    "            1: torch.unbind(proj_matrices[\"stage_1\"].float(), 1),\n",
    "            2: torch.unbind(proj_matrices[\"stage_2\"].float(), 1),\n",
    "            3: torch.unbind(proj_matrices[\"stage_3\"].float(), 1)\n",
    "        }\n",
    "        del proj_matrices\n",
    "\n",
    "        assert len(imgs_0) == len(proj_mtx[0]), \"Different number of images and projection matrices\"\n",
    "\n",
    "        # step 1. Multi-scale feature extraction\n",
    "        features: List[Dict[int, torch.Tensor]] = []\n",
    "        for img in imgs_0:\n",
    "            output_feature = self.feature(img)\n",
    "            features.append(output_feature)\n",
    "        del imgs_0\n",
    "        ref_feature, src_features = features[0], features[1:]\n",
    "\n",
    "        depth_min = depth_min.float()\n",
    "        depth_max = depth_max.float()\n",
    "\n",
    "        # step 2. Learning-based patchmatch\n",
    "        depth = torch.empty(0)\n",
    "        depths: List[torch.Tensor] = []\n",
    "        score = torch.empty(0)\n",
    "        view_weights = torch.empty(0)\n",
    "        depth_patchmatch: Dict[int, List[torch.Tensor]] = {}\n",
    "\n",
    "        for stage in range(self.stages - 1, 0, -1):\n",
    "            src_features_l = [src_fea[stage] for src_fea in src_features]\n",
    "            ref_proj, src_projs = proj_mtx[stage][0], proj_mtx[stage][1:]\n",
    "            # Need conditional since TorchScript only allows \"getattr\" access with string literals\n",
    "            if stage == 3:\n",
    "                depths, _, view_weights = self.patchmatch_3(\n",
    "                    ref_feature=ref_feature[stage],\n",
    "                    src_features=src_features_l,\n",
    "                    ref_proj=ref_proj,\n",
    "                    src_projs=src_projs,\n",
    "                    depth_min=depth_min,\n",
    "                    depth_max=depth_max,\n",
    "                    depth=depth,\n",
    "                    view_weights=view_weights,\n",
    "                )\n",
    "            elif stage == 2:\n",
    "                depths, _, view_weights = self.patchmatch_2(\n",
    "                    ref_feature=ref_feature[stage],\n",
    "                    src_features=src_features_l,\n",
    "                    ref_proj=ref_proj,\n",
    "                    src_projs=src_projs,\n",
    "                    depth_min=depth_min,\n",
    "                    depth_max=depth_max,\n",
    "                    depth=depth,\n",
    "                    view_weights=view_weights,\n",
    "                )\n",
    "            elif stage == 1:\n",
    "                depths, score, _ = self.patchmatch_1(\n",
    "                    ref_feature=ref_feature[stage],\n",
    "                    src_features=src_features_l,\n",
    "                    ref_proj=ref_proj,\n",
    "                    src_projs=src_projs,\n",
    "                    depth_min=depth_min,\n",
    "                    depth_max=depth_max,\n",
    "                    depth=depth,\n",
    "                    view_weights=view_weights,\n",
    "                )\n",
    "\n",
    "            depth_patchmatch[stage] = depths\n",
    "            depth = depths[-1].detach()\n",
    "\n",
    "            if stage > 1:\n",
    "                # upsampling the depth map and pixel-wise view weight for next stage\n",
    "                depth = F.interpolate(depth, scale_factor=2.0, mode=\"nearest\")\n",
    "                view_weights = F.interpolate(view_weights, scale_factor=2.0, mode=\"nearest\")\n",
    "\n",
    "        del ref_feature\n",
    "        del src_features\n",
    "\n",
    "        # step 3. Refinement\n",
    "        depth = self.upsample_net(ref_image, depth, depth_min, depth_max)\n",
    "\n",
    "        if self.training:\n",
    "            return depth, torch.empty(0), depth_patchmatch\n",
    "        else:\n",
    "            num_depth = self.patchmatch_num_sample[0]\n",
    "            score_sum4 = 4 * F.avg_pool3d(\n",
    "                F.pad(score.unsqueeze(1), pad=(0, 0, 0, 0, 1, 2)), (4, 1, 1), stride=1, padding=0\n",
    "            ).squeeze(1)\n",
    "            # [B, 1, H, W]\n",
    "            depth_index = depth_regression(\n",
    "                score, depth_values=torch.arange(num_depth, device=score.device, dtype=torch.float)\n",
    "            ).long().clamp(0, num_depth - 1)\n",
    "            photometric_confidence = torch.gather(score_sum4, 1, depth_index)\n",
    "            photometric_confidence = F.interpolate(photometric_confidence, scale_factor=2.0, mode=\"nearest\").squeeze(1)\n",
    "\n",
    "            return depth, photometric_confidence, depth_patchmatch\n",
    "\n",
    "\n",
    "def patchmatchnet_loss(\n",
    "    depth_patchmatch: Dict[int, List[torch.Tensor]],\n",
    "    depth_gt: Dict[str, torch.Tensor],\n",
    "    mask: Dict[str, torch.Tensor],\n",
    ") -> torch.Tensor:\n",
    "    \"\"\"Patchmatch Net loss function\n",
    "\n",
    "    Args:\n",
    "        depth_patchmatch: depth map predicted by patchmatch net\n",
    "        depth_gt: ground truth depth map\n",
    "        mask: mask for filter valid points\n",
    "\n",
    "    Returns:\n",
    "        loss: result loss value\n",
    "    \"\"\"\n",
    "    loss = 0\n",
    "    for i in range(0, 4):\n",
    "        mask_i = mask[f\"stage_{i}\"] > 0.5\n",
    "        gt_depth = depth_gt[f\"stage_{i}\"][mask_i]\n",
    "        for depth in depth_patchmatch[i]:\n",
    "            loss = loss + F.smooth_l1_loss(depth[mask_i], gt_depth, reduction=\"mean\")\n",
    "\n",
    "    return loss\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\"\"\"\n",
    "PatchmatchNet uses the following main steps:\n",
    "\n",
    "1. Initialization: generate random hypotheses;\n",
    "2. Propagation: propagate hypotheses to neighbors;\n",
    "3. Evaluation: compute the matching costs for all the hypotheses and choose best solutions.\n",
    "\"\"\"\n",
    "from typing import List, Tuple\n",
    "\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "\n",
    "from .module import ConvBnReLU3D, differentiable_warping, is_empty\n",
    "\n",
    "\n",
    "class DepthInitialization(nn.Module):\n",
    "    \"\"\"Initialization Stage Class\"\"\"\n",
    "\n",
    "    def __init__(self, patchmatch_num_sample: int = 1) -> None:\n",
    "        \"\"\"Initialize method\n",
    "\n",
    "        Args:\n",
    "            patchmatch_num_sample: number of samples used in patchmatch process\n",
    "        \"\"\"\n",
    "        super(DepthInitialization, self).__init__()\n",
    "        self.patchmatch_num_sample = patchmatch_num_sample\n",
    "\n",
    "    def forward(\n",
    "        self,\n",
    "        min_depth: torch.Tensor,\n",
    "        max_depth: torch.Tensor,\n",
    "        height: int,\n",
    "        width: int,\n",
    "        depth_interval_scale: float,\n",
    "        device: torch.device,\n",
    "        depth: torch.Tensor = torch.empty(0),\n",
    "    ) -> torch.Tensor:\n",
    "        \"\"\"Forward function for depth initialization\n",
    "\n",
    "        Args:\n",
    "            min_depth: minimum virtual depth, (B, )\n",
    "            max_depth: maximum virtual depth, (B, )\n",
    "            height: height of depth map\n",
    "            width: width of depth map\n",
    "            depth_interval_scale: depth interval scale\n",
    "            device: device on which to place tensor\n",
    "            depth: current depth (B, 1, H, W)\n",
    "\n",
    "        Returns:\n",
    "            depth_sample: initialized sample depth map by randomization or local perturbation (B, Ndepth, H, W)\n",
    "        \"\"\"\n",
    "        batch_size = min_depth.size()[0]\n",
    "        inverse_min_depth = 1.0 / min_depth\n",
    "        inverse_max_depth = 1.0 / max_depth\n",
    "        if is_empty(depth):\n",
    "            # first iteration of Patchmatch on stage 3, sample in the inverse depth range\n",
    "            # divide the range into several intervals and sample in each of them\n",
    "            patchmatch_num_sample = 48\n",
    "            # [B,Ndepth,H,W]\n",
    "            depth_sample = torch.rand(\n",
    "                size=(batch_size, patchmatch_num_sample, height, width), device=device\n",
    "            ) + torch.arange(start=0, end=patchmatch_num_sample, step=1, device=device).view(\n",
    "                1, patchmatch_num_sample, 1, 1\n",
    "            )\n",
    "\n",
    "            depth_sample = inverse_max_depth.view(batch_size, 1, 1, 1) + depth_sample / patchmatch_num_sample * (\n",
    "                inverse_min_depth.view(batch_size, 1, 1, 1) - inverse_max_depth.view(batch_size, 1, 1, 1)\n",
    "            )\n",
    "\n",
    "            return 1.0 / depth_sample\n",
    "\n",
    "        elif self.patchmatch_num_sample == 1:\n",
    "            return depth.detach()\n",
    "        else:\n",
    "            # other Patchmatch, local perturbation is performed based on previous result\n",
    "            # uniform samples in an inversed depth range\n",
    "            depth_sample = (\n",
    "                torch.arange(-self.patchmatch_num_sample // 2, self.patchmatch_num_sample // 2, 1, device=device)\n",
    "                .view(1, self.patchmatch_num_sample, 1, 1).repeat(batch_size, 1, height, width).float()\n",
    "            )\n",
    "            inverse_depth_interval = (inverse_min_depth - inverse_max_depth) * depth_interval_scale\n",
    "            inverse_depth_interval = inverse_depth_interval.view(batch_size, 1, 1, 1)\n",
    "\n",
    "            depth_sample = 1.0 / depth.detach() + inverse_depth_interval * depth_sample\n",
    "\n",
    "            depth_clamped = []\n",
    "            del depth\n",
    "            for k in range(batch_size):\n",
    "                depth_clamped.append(\n",
    "                    torch.clamp(depth_sample[k], min=inverse_max_depth[k], max=inverse_min_depth[k]).unsqueeze(0)\n",
    "                )\n",
    "\n",
    "            return 1.0 / torch.cat(depth_clamped, dim=0)\n",
    "\n",
    "\n",
    "class Propagation(nn.Module):\n",
    "    \"\"\" Propagation module implementation\"\"\"\n",
    "\n",
    "    def __init__(self) -> None:\n",
    "        \"\"\"Initialize method\"\"\"\n",
    "        super(Propagation, self).__init__()\n",
    "\n",
    "    def forward(self, depth_sample: torch.Tensor, grid: torch.Tensor) -> torch.Tensor:\n",
    "        # [B,D,H,W]\n",
    "        \"\"\"Forward method of adaptive propagation\n",
    "\n",
    "        Args:\n",
    "            depth_sample: sample depth map, in shape of [batch, num_depth, height, width],\n",
    "            grid: 2D grid for bilinear gridding, in shape of [batch, neighbors*H, W, 2]\n",
    "\n",
    "        Returns:\n",
    "            propagate depth: sorted propagate depth map [batch, num_depth+num_neighbors, height, width]\n",
    "        \"\"\"\n",
    "        batch, num_depth, height, width = depth_sample.size()\n",
    "        num_neighbors = grid.size()[1] // height\n",
    "        propagate_depth_sample = F.grid_sample(\n",
    "            depth_sample[:, num_depth // 2, :, :].unsqueeze(1),\n",
    "            grid,\n",
    "            mode=\"bilinear\",\n",
    "            padding_mode=\"border\",\n",
    "            align_corners=False\n",
    "        ).view(batch, num_neighbors, height, width)\n",
    "        return torch.sort(torch.cat((depth_sample, propagate_depth_sample), dim=1), dim=1)[0]\n",
    "\n",
    "\n",
    "class Evaluation(nn.Module):\n",
    "    \"\"\"Evaluation module for adaptive evaluation step in Learning-based Patchmatch\n",
    "    Used to compute the matching costs for all the hypotheses and choose best solutions.\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, G: int = 8) -> None:\n",
    "        \"\"\"Initialize method`\n",
    "\n",
    "        Args:\n",
    "            G: the feature channels of input will be divided evenly into G groups\n",
    "        \"\"\"\n",
    "        super(Evaluation, self).__init__()\n",
    "\n",
    "        self.G = G\n",
    "        self.pixel_wise_net = PixelwiseNet(self.G)\n",
    "        self.softmax = nn.LogSoftmax(dim=1)\n",
    "        self.similarity_net = SimilarityNet(self.G)\n",
    "\n",
    "    def forward(\n",
    "        self,\n",
    "        ref_feature: torch.Tensor,\n",
    "        src_features: List[torch.Tensor],\n",
    "        ref_proj: torch.Tensor,\n",
    "        src_projs: List[torch.Tensor],\n",
    "        depth_sample: torch.Tensor,\n",
    "        grid: torch.Tensor,\n",
    "        weight: torch.Tensor,\n",
    "        view_weights: torch.Tensor = torch.empty(0),\n",
    "        is_inverse: bool = False\n",
    "    ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n",
    "        \"\"\"Forward method for adaptive evaluation\n",
    "\n",
    "        Args:\n",
    "            ref_feature: feature from reference view, (B, C, H, W)\n",
    "            src_features: features from (Nview-1) source views, (Nview-1) * (B, C, H, W), where Nview is the number of\n",
    "                input images (or views) of PatchmatchNet\n",
    "            ref_proj: projection matrix of reference view, (B, 4, 4)\n",
    "            src_projs: source matrices of source views, (Nview-1) * (B, 4, 4), where Nview is the number of input\n",
    "                images (or views) of PatchmatchNet\n",
    "            depth_sample: sample depth map, (B,Ndepth,H,W)\n",
    "            grid: grid, (B, evaluate_neighbors*H, W, 2)\n",
    "            weight: weight, (B,Ndepth,1,H,W)\n",
    "            view_weights: Tensor to store weights of source views, in shape of (B,Nview-1,H,W),\n",
    "                Nview-1 represents the number of source views\n",
    "            is_inverse: Flag for inverse depth regression\n",
    "\n",
    "        Returns:\n",
    "            depth_sample: expectation of depth sample, (B,H,W)\n",
    "            score: probability map, (B,Ndepth,H,W)\n",
    "            view_weights: optional, Tensor to store weights of source views, in shape of (B,Nview-1,H,W),\n",
    "                Nview-1 represents the number of source views\n",
    "        \"\"\"\n",
    "        batch, feature_channel, height, width = ref_feature.size()\n",
    "        device = ref_feature.device\n",
    "\n",
    "        num_depth = depth_sample.size()[1]\n",
    "        assert (\n",
    "            len(src_features) == len(src_projs)\n",
    "        ), \"Patchmatch Evaluation: Different number of images and projection matrices\"\n",
    "        if not is_empty(view_weights):\n",
    "            assert (\n",
    "                len(src_features) == view_weights.size()[1]\n",
    "            ), \"Patchmatch Evaluation: Different number of images and view weights\"\n",
    "\n",
    "        # Change to a tensor with value 1e-5\n",
    "        pixel_wise_weight_sum = 1e-5 * torch.ones((batch, 1, 1, height, width), dtype=torch.float32, device=device)\n",
    "        ref_feature = ref_feature.view(batch, self.G, feature_channel // self.G, 1, height, width)\n",
    "        similarity_sum = torch.zeros((batch, self.G, num_depth, height, width), dtype=torch.float32, device=device)\n",
    "\n",
    "        i = 0\n",
    "        view_weights_list = []\n",
    "        for src_feature, src_proj in zip(src_features, src_projs):\n",
    "            warped_feature = differentiable_warping(\n",
    "                src_feature, src_proj, ref_proj, depth_sample\n",
    "            ).view(batch, self.G, feature_channel // self.G, num_depth, height, width)\n",
    "            # group-wise correlation\n",
    "            similarity = (warped_feature * ref_feature).mean(2)\n",
    "            # pixel-wise view weight\n",
    "            if is_empty(view_weights):\n",
    "                view_weight = self.pixel_wise_net(similarity)\n",
    "                view_weights_list.append(view_weight)\n",
    "            else:\n",
    "                # reuse the pixel-wise view weight from first iteration of Patchmatch on stage 3\n",
    "                view_weight = view_weights[:, i].unsqueeze(1)  # [B,1,H,W]\n",
    "                i = i + 1\n",
    "\n",
    "            similarity_sum += similarity * view_weight.unsqueeze(1)\n",
    "            pixel_wise_weight_sum += view_weight.unsqueeze(1)\n",
    "\n",
    "        # aggregated matching cost across all the source views\n",
    "        similarity = similarity_sum.div_(pixel_wise_weight_sum)  # [B, G, Ndepth, H, W]\n",
    "        # adaptive spatial cost aggregation\n",
    "        score = self.similarity_net(similarity, grid, weight)  # [B, G, Ndepth, H, W]\n",
    "        # apply softmax to get probability\n",
    "        score = torch.exp(self.softmax(score))\n",
    "\n",
    "        if is_empty(view_weights):\n",
    "            view_weights = torch.cat(view_weights_list, dim=1)  # [B,4,H,W], 4 is the number of source views\n",
    "\n",
    "        if is_inverse:\n",
    "            # depth regression: inverse depth regression\n",
    "            depth_index = torch.arange(0, num_depth, 1, device=device).view(1, num_depth, 1, 1)\n",
    "            depth_index = torch.sum(depth_index * score, dim=1)\n",
    "\n",
    "            inverse_min_depth = 1.0 / depth_sample[:, -1, :, :]\n",
    "            inverse_max_depth = 1.0 / depth_sample[:, 0, :, :]\n",
    "            depth_sample = inverse_max_depth + depth_index / (num_depth - 1) * (inverse_min_depth - inverse_max_depth)\n",
    "            depth_sample = 1.0 / depth_sample\n",
    "        else:\n",
    "            # depth regression: expectation\n",
    "            depth_sample = torch.sum(depth_sample * score, dim=1)\n",
    "\n",
    "        return depth_sample, score, view_weights.detach()\n",
    "\n",
    "\n",
    "class PatchMatch(nn.Module):\n",
    "    \"\"\"Patchmatch module\"\"\"\n",
    "\n",
    "    def __init__(\n",
    "        self,\n",
    "        propagation_out_range: int = 2,\n",
    "        patchmatch_iteration: int = 2,\n",
    "        patchmatch_num_sample: int = 16,\n",
    "        patchmatch_interval_scale: float = 0.025,\n",
    "        num_feature: int = 64,\n",
    "        G: int = 8,\n",
    "        propagate_neighbors: int = 16,\n",
    "        evaluate_neighbors: int = 9,\n",
    "        stage: int = 3,\n",
    "    ) -> None:\n",
    "        \"\"\"Initialize method\n",
    "\n",
    "        Args:\n",
    "            propagation_out_range: range of propagation out,\n",
    "            patchmatch_iteration: number of iterations in patchmatch,\n",
    "            patchmatch_num_sample: number of samples in patchmatch,\n",
    "            patchmatch_interval_scale: interval scale,\n",
    "            num_feature: number of features,\n",
    "            G: the feature channels of input will be divided evenly into G groups,\n",
    "            propagate_neighbors: number of neighbors to be sampled in propagation,\n",
    "            stage: number of stage,\n",
    "            evaluate_neighbors: number of neighbors to be sampled in evaluation,\n",
    "        \"\"\"\n",
    "        super(PatchMatch, self).__init__()\n",
    "        self.patchmatch_iteration = patchmatch_iteration\n",
    "        self.patchmatch_interval_scale = patchmatch_interval_scale\n",
    "        self.propa_num_feature = num_feature\n",
    "        # group wise correlation\n",
    "        self.G = G\n",
    "        self.stage = stage\n",
    "        self.dilation = propagation_out_range\n",
    "        self.propagate_neighbors = propagate_neighbors\n",
    "        self.evaluate_neighbors = evaluate_neighbors\n",
    "        # Using dictionary instead of Enum since TorchScript cannot recognize and export it correctly\n",
    "        self.grid_type = {\"propagation\": 1, \"evaluation\": 2}\n",
    "\n",
    "        self.depth_initialization = DepthInitialization(patchmatch_num_sample)\n",
    "        self.propagation = Propagation()\n",
    "        self.evaluation = Evaluation(self.G)\n",
    "        # adaptive propagation: last iteration on stage 1 does not have propagation,\n",
    "        # but we still define this for TorchScript export compatibility\n",
    "        self.propa_conv = nn.Conv2d(\n",
    "            in_channels=self.propa_num_feature,\n",
    "            out_channels=max(2 * self.propagate_neighbors, 1),\n",
    "            kernel_size=3,\n",
    "            stride=1,\n",
    "            padding=self.dilation,\n",
    "            dilation=self.dilation,\n",
    "            bias=True,\n",
    "        )\n",
    "        nn.init.constant_(self.propa_conv.weight, 0.0)\n",
    "        nn.init.constant_(self.propa_conv.bias, 0.0)\n",
    "\n",
    "        # adaptive spatial cost aggregation (adaptive evaluation)\n",
    "        self.eval_conv = nn.Conv2d(\n",
    "            in_channels=self.propa_num_feature,\n",
    "            out_channels=2 * self.evaluate_neighbors,\n",
    "            kernel_size=3,\n",
    "            stride=1,\n",
    "            padding=self.dilation,\n",
    "            dilation=self.dilation,\n",
    "            bias=True,\n",
    "        )\n",
    "        nn.init.constant_(self.eval_conv.weight, 0.0)\n",
    "        nn.init.constant_(self.eval_conv.bias, 0.0)\n",
    "        self.feature_weight_net = FeatureWeightNet(self.evaluate_neighbors, self.G)\n",
    "\n",
    "    def get_grid(\n",
    "        self, grid_type: int, batch: int, height: int, width: int, offset: torch.Tensor, device: torch.device\n",
    "    ) -> torch.Tensor:\n",
    "        \"\"\"Compute the offset for adaptive propagation or spatial cost aggregation in adaptive evaluation\n",
    "\n",
    "        Args:\n",
    "            grid_type: type of grid - propagation (1) or evaluation (2)\n",
    "            batch: batch size\n",
    "            height: grid height\n",
    "            width: grid width\n",
    "            offset: grid offset\n",
    "            device: device on which to place tensor\n",
    "\n",
    "        Returns:\n",
    "            generated grid: in the shape of [batch, propagate_neighbors*H, W, 2]\n",
    "        \"\"\"\n",
    "\n",
    "        if grid_type == self.grid_type[\"propagation\"]:\n",
    "            if self.propagate_neighbors == 4:  # if 4 neighbors to be sampled in propagation\n",
    "                original_offset = [[-self.dilation, 0], [0, -self.dilation], [0, self.dilation], [self.dilation, 0]]\n",
    "            elif self.propagate_neighbors == 8:  # if 8 neighbors to be sampled in propagation\n",
    "                original_offset = [\n",
    "                    [-self.dilation, -self.dilation],\n",
    "                    [-self.dilation, 0],\n",
    "                    [-self.dilation, self.dilation],\n",
    "                    [0, -self.dilation],\n",
    "                    [0, self.dilation],\n",
    "                    [self.dilation, -self.dilation],\n",
    "                    [self.dilation, 0],\n",
    "                    [self.dilation, self.dilation],\n",
    "                ]\n",
    "            elif self.propagate_neighbors == 16:  # if 16 neighbors to be sampled in propagation\n",
    "                original_offset = [\n",
    "                    [-self.dilation, -self.dilation],\n",
    "                    [-self.dilation, 0],\n",
    "                    [-self.dilation, self.dilation],\n",
    "                    [0, -self.dilation],\n",
    "                    [0, self.dilation],\n",
    "                    [self.dilation, -self.dilation],\n",
    "                    [self.dilation, 0],\n",
    "                    [self.dilation, self.dilation],\n",
    "                ]\n",
    "                for i in range(len(original_offset)):\n",
    "                    offset_x, offset_y = original_offset[i]\n",
    "                    original_offset.append([2 * offset_x, 2 * offset_y])\n",
    "            else:\n",
    "                raise NotImplementedError\n",
    "        elif grid_type == self.grid_type[\"evaluation\"]:\n",
    "            dilation = self.dilation - 1  # dilation of evaluation is a little smaller than propagation\n",
    "            if self.evaluate_neighbors == 9:  # if 9 neighbors to be sampled in evaluation\n",
    "                original_offset = [\n",
    "                    [-dilation, -dilation],\n",
    "                    [-dilation, 0],\n",
    "                    [-dilation, dilation],\n",
    "                    [0, -dilation],\n",
    "                    [0, 0],\n",
    "                    [0, dilation],\n",
    "                    [dilation, -dilation],\n",
    "                    [dilation, 0],\n",
    "                    [dilation, dilation],\n",
    "                ]\n",
    "            elif self.evaluate_neighbors == 17:  # if 17 neighbors to be sampled in evaluation\n",
    "                original_offset = [\n",
    "                    [-dilation, -dilation],\n",
    "                    [-dilation, 0],\n",
    "                    [-dilation, dilation],\n",
    "                    [0, -dilation],\n",
    "                    [0, 0],\n",
    "                    [0, dilation],\n",
    "                    [dilation, -dilation],\n",
    "                    [dilation, 0],\n",
    "                    [dilation, dilation],\n",
    "                ]\n",
    "                for i in range(len(original_offset)):\n",
    "                    offset_x, offset_y = original_offset[i]\n",
    "                    if offset_x != 0 or offset_y != 0:\n",
    "                        original_offset.append([2 * offset_x, 2 * offset_y])\n",
    "            else:\n",
    "                raise NotImplementedError\n",
    "        else:\n",
    "            raise NotImplementedError\n",
    "\n",
    "        with torch.no_grad():\n",
    "            y_grid, x_grid = torch.meshgrid(\n",
    "                [\n",
    "                    torch.arange(0, height, dtype=torch.float32, device=device),\n",
    "                    torch.arange(0, width, dtype=torch.float32, device=device),\n",
    "                ]\n",
    "            )\n",
    "            y_grid, x_grid = y_grid.contiguous().view(height * width), x_grid.contiguous().view(height * width)\n",
    "            xy = torch.stack((x_grid, y_grid))  # [2, H*W]\n",
    "            xy = torch.unsqueeze(xy, 0).repeat(batch, 1, 1)  # [B, 2, H*W]\n",
    "\n",
    "        xy_list = []\n",
    "        for i in range(len(original_offset)):\n",
    "            original_offset_y, original_offset_x = original_offset[i]\n",
    "            offset_x = original_offset_x + offset[:, 2 * i, :].unsqueeze(1)\n",
    "            offset_y = original_offset_y + offset[:, 2 * i + 1, :].unsqueeze(1)\n",
    "            xy_list.append((xy + torch.cat((offset_x, offset_y), dim=1)).unsqueeze(2))\n",
    "\n",
    "        xy = torch.cat(xy_list, dim=2)  # [B, 2, 9, H*W]\n",
    "\n",
    "        del xy_list\n",
    "        del x_grid\n",
    "        del y_grid\n",
    "\n",
    "        x_normalized = xy[:, 0, :, :] / ((width - 1) / 2) - 1\n",
    "        y_normalized = xy[:, 1, :, :] / ((height - 1) / 2) - 1\n",
    "        del xy\n",
    "        grid = torch.stack((x_normalized, y_normalized), dim=3)  # [B, 9, H*W, 2]\n",
    "        del x_normalized\n",
    "        del y_normalized\n",
    "        return grid.view(batch, len(original_offset) * height, width, 2)\n",
    "\n",
    "    def forward(\n",
    "        self,\n",
    "        ref_feature: torch.Tensor,\n",
    "        src_features: List[torch.Tensor],\n",
    "        ref_proj: torch.Tensor,\n",
    "        src_projs: List[torch.Tensor],\n",
    "        depth_min: torch.Tensor,\n",
    "        depth_max: torch.Tensor,\n",
    "        depth: torch.Tensor,\n",
    "        view_weights: torch.Tensor = torch.empty(0),\n",
    "    ) -> Tuple[List[torch.Tensor], torch.Tensor, torch.Tensor]:\n",
    "        \"\"\"Forward method for PatchMatch\n",
    "\n",
    "        Args:\n",
    "            ref_feature: feature from reference view, (B, C, H, W)\n",
    "            src_features: features from (Nview-1) source views, (Nview-1) * (B, C, H, W), where Nview is the number of\n",
    "                input images (or views) of PatchmatchNet\n",
    "            ref_proj: projection matrix of reference view, (B, 4, 4)\n",
    "            src_projs: source matrices of source views, (Nview-1) * (B, 4, 4), where Nview is the number of input\n",
    "                images (or views) of PatchmatchNet\n",
    "            depth_min: minimum virtual depth, (B,)\n",
    "            depth_max: maximum virtual depth, (B,)\n",
    "            depth: current depth map, (B,1,H,W) or None\n",
    "            view_weights: Tensor to store weights of source views, in shape of (B,Nview-1,H,W),\n",
    "                Nview-1 represents the number of source views\n",
    "\n",
    "        Returns:\n",
    "            depth_samples: list of depth maps from each patchmatch iteration, Niter * (B,1,H,W)\n",
    "            score: evaluted probabilities, (B,Ndepth,H,W)\n",
    "            view_weights: Tensor to store weights of source views, in shape of (B,Nview-1,H,W),\n",
    "                Nview-1 represents the number of source views\n",
    "        \"\"\"\n",
    "        score = torch.empty(0)\n",
    "        depth_samples = []\n",
    "\n",
    "        device = ref_feature.device\n",
    "        batch, _, height, width = ref_feature.size()\n",
    "\n",
    "        # the learned additional 2D offsets for adaptive propagation\n",
    "        propa_grid = torch.empty(0)\n",
    "        if self.propagate_neighbors > 0 and not (self.stage == 1 and self.patchmatch_iteration == 1):\n",
    "            # last iteration on stage 1 does not have propagation (photometric consistency filtering)\n",
    "            propa_offset = self.propa_conv(ref_feature).view(batch, 2 * self.propagate_neighbors, height * width)\n",
    "            propa_grid = self.get_grid(self.grid_type[\"propagation\"], batch, height, width, propa_offset, device)\n",
    "\n",
    "        # the learned additional 2D offsets for adaptive spatial cost aggregation (adaptive evaluation)\n",
    "        eval_offset = self.eval_conv(ref_feature).view(batch, 2 * self.evaluate_neighbors, height * width)\n",
    "        eval_grid = self.get_grid(self.grid_type[\"evaluation\"], batch, height, width, eval_offset, device)\n",
    "\n",
    "        # [B, evaluate_neighbors, H, W]\n",
    "        feature_weight = self.feature_weight_net(ref_feature.detach(), eval_grid)\n",
    "        depth_sample = depth\n",
    "        del depth\n",
    "\n",
    "        for iter in range(1, self.patchmatch_iteration + 1):\n",
    "            is_inverse = self.stage == 1 and iter == self.patchmatch_iteration\n",
    "\n",
    "            # first iteration on stage 3, random initialization (depth is empty), no adaptive propagation\n",
    "            # subsequent iterations, local perturbation based on previous result, [B,Ndepth,H,W]\n",
    "            depth_sample = self.depth_initialization(\n",
    "                min_depth=depth_min,\n",
    "                max_depth=depth_max,\n",
    "                height=height,\n",
    "                width=width,\n",
    "                depth_interval_scale=self.patchmatch_interval_scale,\n",
    "                device=device,\n",
    "                depth=depth_sample\n",
    "            )\n",
    "\n",
    "            # adaptive propagation\n",
    "            if self.propagate_neighbors > 0 and not (self.stage == 1 and iter == self.patchmatch_iteration):\n",
    "                # last iteration on stage 1 does not have propagation (photometric consistency filtering)\n",
    "                depth_sample = self.propagation(depth_sample=depth_sample, grid=propa_grid)\n",
    "\n",
    "            # weights for adaptive spatial cost aggregation in adaptive evaluation, [B,Ndepth,N_neighbors_eval,H,W]\n",
    "            weight = depth_weight(\n",
    "                depth_sample=depth_sample.detach(),\n",
    "                depth_min=depth_min,\n",
    "                depth_max=depth_max,\n",
    "                grid=eval_grid.detach(),\n",
    "                patchmatch_interval_scale=self.patchmatch_interval_scale,\n",
    "                neighbors=self.evaluate_neighbors,\n",
    "            ) * feature_weight.unsqueeze(1)\n",
    "            weight = weight / torch.sum(weight, dim=2).unsqueeze(2)  # [B,Ndepth,1,H,W]\n",
    "\n",
    "            # evaluation, outputs regressed depth map and pixel-wise view weights which will\n",
    "            # be used for subsequent iterations\n",
    "            depth_sample, score, view_weights = self.evaluation(\n",
    "                ref_feature=ref_feature,\n",
    "                src_features=src_features,\n",
    "                ref_proj=ref_proj,\n",
    "                src_projs=src_projs,\n",
    "                depth_sample=depth_sample,\n",
    "                grid=eval_grid,\n",
    "                weight=weight,\n",
    "                view_weights=view_weights,\n",
    "                is_inverse=is_inverse,\n",
    "            )\n",
    "\n",
    "            depth_sample = depth_sample.unsqueeze(1)\n",
    "            depth_samples.append(depth_sample)\n",
    "\n",
    "        return depth_samples, score, view_weights\n",
    "\n",
    "\n",
    "class SimilarityNet(nn.Module):\n",
    "    \"\"\"Similarity Net, used in Evaluation module (adaptive evaluation step)\n",
    "    1. Do 1x1x1 convolution on aggregated cost [B, G, Ndepth, H, W] among all the source views,\n",
    "        where G is the number of groups\n",
    "    2. Perform adaptive spatial cost aggregation to get final cost (scores)\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, G: int) -> None:\n",
    "        \"\"\"Initialize method\n",
    "\n",
    "        Args:\n",
    "            G: the feature channels of input will be divided evenly into G groups\n",
    "        \"\"\"\n",
    "        super(SimilarityNet, self).__init__()\n",
    "\n",
    "        self.conv0 = ConvBnReLU3D(in_channels=G, out_channels=16, kernel_size=1, stride=1, pad=0)\n",
    "        self.conv1 = ConvBnReLU3D(in_channels=16, out_channels=8, kernel_size=1, stride=1, pad=0)\n",
    "        self.similarity = nn.Conv3d(in_channels=8, out_channels=1, kernel_size=1, stride=1, padding=0)\n",
    "\n",
    "    def forward(self, x1: torch.Tensor, grid: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:\n",
    "        \"\"\"Forward method for SimilarityNet\n",
    "\n",
    "        Args:\n",
    "            x1: [B, G, Ndepth, H, W], where G is the number of groups, aggregated cost among all the source views with\n",
    "                pixel-wise view weight\n",
    "            grid: position of sampling points in adaptive spatial cost aggregation, (B, evaluate_neighbors*H, W, 2)\n",
    "            weight: weight of sampling points in adaptive spatial cost aggregation, combination of\n",
    "                feature weight and depth weight, [B,Ndepth,1,H,W]\n",
    "\n",
    "        Returns:\n",
    "            final cost: in the shape of [B,Ndepth,H,W]\n",
    "        \"\"\"\n",
    "\n",
    "        batch, G, num_depth, height, width = x1.size()\n",
    "        num_neighbors = grid.size()[1] // height\n",
    "\n",
    "        # [B,Ndepth,num_neighbors,H,W]\n",
    "        x1 = F.grid_sample(\n",
    "            input=self.similarity(self.conv1(self.conv0(x1))).squeeze(1),\n",
    "            grid=grid,\n",
    "            mode=\"bilinear\",\n",
    "            padding_mode=\"border\",\n",
    "            align_corners=False\n",
    "        ).view(batch, num_depth, num_neighbors, height, width)\n",
    "\n",
    "        return torch.sum(x1 * weight, dim=2)\n",
    "\n",
    "\n",
    "class FeatureWeightNet(nn.Module):\n",
    "    \"\"\"FeatureWeight Net: Called at the beginning of patchmatch, to calculate feature weights based on similarity of\n",
    "    features of sampling points and center pixel. The feature weights is used to implement adaptive spatial\n",
    "    cost aggregation.\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, neighbors: int = 9, G: int = 8) -> None:\n",
    "        \"\"\"Initialize method\n",
    "\n",
    "        Args:\n",
    "            neighbors: number of neighbors to be sampled\n",
    "            G: the feature channels of input will be divided evenly into G groups\n",
    "        \"\"\"\n",
    "        super(FeatureWeightNet, self).__init__()\n",
    "        self.neighbors = neighbors\n",
    "        self.G = G\n",
    "\n",
    "        self.conv0 = ConvBnReLU3D(in_channels=G, out_channels=16, kernel_size=1, stride=1, pad=0)\n",
    "        self.conv1 = ConvBnReLU3D(in_channels=16, out_channels=8, kernel_size=1, stride=1, pad=0)\n",
    "        self.similarity = nn.Conv3d(in_channels=8, out_channels=1, kernel_size=1, stride=1, padding=0)\n",
    "\n",
    "        self.output = nn.Sigmoid()\n",
    "\n",
    "    def forward(self, ref_feature: torch.Tensor, grid: torch.Tensor) -> torch.Tensor:\n",
    "        \"\"\"Forward method for FeatureWeightNet\n",
    "\n",
    "        Args:\n",
    "            ref_feature: reference feature map, [B,C,H,W]\n",
    "            grid: position of sampling points in adaptive spatial cost aggregation, (B, evaluate_neighbors*H, W, 2)\n",
    "\n",
    "        Returns:\n",
    "            weight based on similarity of features of sampling points and center pixel, [B,Neighbor,H,W]\n",
    "        \"\"\"\n",
    "        batch, feature_channel, height, width = ref_feature.size()\n",
    "\n",
    "        weight = F.grid_sample(\n",
    "            ref_feature, grid, mode=\"bilinear\", padding_mode=\"border\", align_corners=False\n",
    "        ).view(batch, self.G, feature_channel // self.G, self.neighbors, height, width)\n",
    "\n",
    "        # [B,G,C//G,H,W]\n",
    "        ref_feature = ref_feature.view(batch, self.G, feature_channel // self.G, height, width).unsqueeze(3)\n",
    "        # [B,G,Neighbor,H,W]\n",
    "        weight = (weight * ref_feature).mean(2)\n",
    "        # [B,Neighbor,H,W]\n",
    "        return self.output(self.similarity(self.conv1(self.conv0(weight))).squeeze(1))\n",
    "\n",
    "\n",
    "def depth_weight(\n",
    "    depth_sample: torch.Tensor,\n",
    "    depth_min: torch.Tensor,\n",
    "    depth_max: torch.Tensor,\n",
    "    grid: torch.Tensor,\n",
    "    patchmatch_interval_scale: float,\n",
    "    neighbors: int,\n",
    ") -> torch.Tensor:\n",
    "    \"\"\"Calculate depth weight\n",
    "    1. Adaptive spatial cost aggregation\n",
    "    2. Weight based on depth difference of sampling points and center pixel\n",
    "\n",
    "    Args:\n",
    "        depth_sample: sample depth map, (B,Ndepth,H,W)\n",
    "        depth_min: minimum virtual depth, (B,)\n",
    "        depth_max: maximum virtual depth, (B,)\n",
    "        grid: position of sampling points in adaptive spatial cost aggregation, (B, neighbors*H, W, 2)\n",
    "        patchmatch_interval_scale: patchmatch interval scale,\n",
    "        neighbors: number of neighbors to be sampled in evaluation\n",
    "\n",
    "    Returns:\n",
    "        depth weight\n",
    "    \"\"\"\n",
    "    batch, num_depth, height, width = depth_sample.size()\n",
    "    inverse_depth_min = 1.0 / depth_min\n",
    "    inverse_depth_max = 1.0 / depth_max\n",
    "\n",
    "    # normalization\n",
    "    x = 1.0 / depth_sample\n",
    "    del depth_sample\n",
    "    x = (x - inverse_depth_max.view(batch, 1, 1, 1)) / (inverse_depth_min - inverse_depth_max).view(batch, 1, 1, 1)\n",
    "\n",
    "    x1 = F.grid_sample(\n",
    "        x, grid, mode=\"bilinear\", padding_mode=\"border\", align_corners=False\n",
    "    ).view(batch, num_depth, neighbors, height, width)\n",
    "    del grid\n",
    "\n",
    "    # [B,Ndepth,N_neighbors,H,W]\n",
    "    x1 = torch.abs(x1 - x.unsqueeze(2)) / patchmatch_interval_scale\n",
    "    del x\n",
    "\n",
    "    # sigmoid output approximate to 1 when x=4\n",
    "    return torch.sigmoid(4.0 - 2.0 * x1.clamp(min=0, max=4)).detach()\n",
    "\n",
    "\n",
    "class PixelwiseNet(nn.Module):\n",
    "    \"\"\"Pixelwise Net: A simple pixel-wise view weight network, composed of 1x1x1 convolution layers\n",
    "    and sigmoid nonlinearities, takes the initial set of similarities to output a number between 0 and 1 per\n",
    "    pixel as estimated pixel-wise view weight.\n",
    "\n",
    "    1. The Pixelwise Net is used in adaptive evaluation step\n",
    "    2. The similarity is calculated by ref_feature and other source_features warped by differentiable_warping\n",
    "    3. The learned pixel-wise view weight is estimated in the first iteration of Patchmatch and kept fixed in the\n",
    "    matching cost computation.\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, G: int) -> None:\n",
    "        \"\"\"Initialize method\n",
    "\n",
    "        Args:\n",
    "            G: the feature channels of input will be divided evenly into G groups\n",
    "        \"\"\"\n",
    "        super(PixelwiseNet, self).__init__()\n",
    "        self.conv0 = ConvBnReLU3D(in_channels=G, out_channels=16, kernel_size=1, stride=1, pad=0)\n",
    "        self.conv1 = ConvBnReLU3D(in_channels=16, out_channels=8, kernel_size=1, stride=1, pad=0)\n",
    "        self.conv2 = nn.Conv3d(in_channels=8, out_channels=1, kernel_size=1, stride=1, padding=0)\n",
    "        self.output = nn.Sigmoid()\n",
    "\n",
    "    def forward(self, x1: torch.Tensor) -> torch.Tensor:\n",
    "        \"\"\"Forward method for PixelwiseNet\n",
    "\n",
    "        Args:\n",
    "            x1: pixel-wise view weight, [B, G, Ndepth, H, W], where G is the number of groups\n",
    "        \"\"\"\n",
    "        # [B,1,H,W]\n",
    "        return torch.max(self.output(self.conv2(self.conv1(self.conv0(x1))).squeeze(1)), dim=1)[0].unsqueeze(1)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "from typing import Any, Callable, Union, Dict\n",
    "\n",
    "import numpy as np\n",
    "\n",
    "import torchvision.utils as vutils\n",
    "import torch\n",
    "import torch.utils.tensorboard as tb\n",
    "\n",
    "\n",
    "def print_args(args: Any) -> None:\n",
    "    \"\"\"Utilities to print arguments\n",
    "\n",
    "    Arsg:\n",
    "        args: arguments to pring out\n",
    "    \"\"\"\n",
    "    print(\"################################  args  ################################\")\n",
    "    for k, v in args.__dict__.items():\n",
    "        print(\"{0: <10}\\t{1: <30}\\t{2: <20}\".format(k, str(v), str(type(v))))\n",
    "    print(\"########################################################################\")\n",
    "\n",
    "\n",
    "def make_nograd_func(func: Callable) -> Callable:\n",
    "    \"\"\"Utilities to make function no gradient\n",
    "\n",
    "    Args:\n",
    "        func: input function\n",
    "\n",
    "    Returns:\n",
    "        no gradient function wrapper for input function\n",
    "    \"\"\"\n",
    "\n",
    "    def wrapper(*f_args, **f_kwargs):\n",
    "        with torch.no_grad():\n",
    "            ret = func(*f_args, **f_kwargs)\n",
    "        return ret\n",
    "\n",
    "    return wrapper\n",
    "\n",
    "\n",
    "def make_recursive_func(func: Callable) -> Callable:\n",
    "    \"\"\"Convert a function into recursive style to handle nested dict/list/tuple variables\n",
    "\n",
    "    Args:\n",
    "        func: input function\n",
    "\n",
    "    Returns:\n",
    "        recursive style function\n",
    "    \"\"\"\n",
    "\n",
    "    def wrapper(vars):\n",
    "        if isinstance(vars, list):\n",
    "            return [wrapper(x) for x in vars]\n",
    "        elif isinstance(vars, tuple):\n",
    "            return tuple([wrapper(x) for x in vars])\n",
    "        elif isinstance(vars, dict):\n",
    "            return {k: wrapper(v) for k, v in vars.items()}\n",
    "        else:\n",
    "            return func(vars)\n",
    "\n",
    "    return wrapper\n",
    "\n",
    "\n",
    "@make_recursive_func\n",
    "def tensor2float(vars: Any) -> float:\n",
    "    \"\"\"Convert tensor to float\"\"\"\n",
    "    if isinstance(vars, float):\n",
    "        return vars\n",
    "    elif isinstance(vars, torch.Tensor):\n",
    "        return vars.data.item()\n",
    "    else:\n",
    "        raise NotImplementedError(\"invalid input type {} for tensor2float\".format(type(vars)))\n",
    "\n",
    "\n",
    "@make_recursive_func\n",
    "def tensor2numpy(vars: Any) -> np.ndarray:\n",
    "    \"\"\"Convert tensor to numpy array\"\"\"\n",
    "    if isinstance(vars, np.ndarray):\n",
    "        return vars\n",
    "    elif isinstance(vars, torch.Tensor):\n",
    "        return vars.detach().cpu().numpy().copy()\n",
    "    else:\n",
    "        raise NotImplementedError(\"invalid input type {} for tensor2numpy\".format(type(vars)))\n",
    "\n",
    "\n",
    "@make_recursive_func\n",
    "def tocuda(vars: Any) -> Union[str, torch.Tensor]:\n",
    "    \"\"\"Convert tensor to tensor on GPU\"\"\"\n",
    "    if isinstance(vars, torch.Tensor):\n",
    "        return vars.cpu()\n",
    "    elif isinstance(vars, str):\n",
    "        return vars\n",
    "    else:\n",
    "        raise NotImplementedError(\"invalid input type {} for tocuda\".format(type(vars)))\n",
    "\n",
    "\n",
    "def save_scalars(logger: tb.SummaryWriter, mode: str, scalar_dict: Dict[str, Any], global_step: int) -> None:\n",
    "    \"\"\"Log values stored in the scalar dictionary\n",
    "\n",
    "    Args:\n",
    "        logger: tensorboard summary writer\n",
    "        mode: mode name used in writing summaries\n",
    "        scalar_dict: python dictionary stores the key and value pairs to be recorded\n",
    "        global_step: step index where the logger should write\n",
    "    \"\"\"\n",
    "    scalar_dict = tensor2float(scalar_dict)\n",
    "    for key, value in scalar_dict.items():\n",
    "        if not isinstance(value, (list, tuple)):\n",
    "            name = \"{}/{}\".format(mode, key)\n",
    "            logger.add_scalar(name, value, global_step)\n",
    "        else:\n",
    "            for idx in range(len(value)):\n",
    "                name = \"{}/{}_{}\".format(mode, key, idx)\n",
    "                logger.add_scalar(name, value[idx], global_step)\n",
    "\n",
    "\n",
    "def save_images(logger: tb.SummaryWriter, mode: str, images_dict: Dict[str, Any], global_step: int) -> None:\n",
    "    \"\"\"Log images stored in the image dictionary\n",
    "\n",
    "    Args:\n",
    "        logger: tensorboard summary writer\n",
    "        mode: mode name used in writing summaries\n",
    "        images_dict: python dictionary stores the key and image pairs to be recorded\n",
    "        global_step: step index where the logger should write\n",
    "    \"\"\"\n",
    "    images_dict = tensor2numpy(images_dict)\n",
    "\n",
    "    def preprocess(name, img):\n",
    "        if not (len(img.shape) == 3 or len(img.shape) == 4):\n",
    "            raise NotImplementedError(\"invalid img shape {}:{} in save_images\".format(name, img.shape))\n",
    "        if len(img.shape) == 3:\n",
    "            img = img[:, np.newaxis, :, :]\n",
    "        img = torch.from_numpy(img[:1])\n",
    "        return vutils.make_grid(img, padding=0, nrow=1, normalize=True, scale_each=True)\n",
    "\n",
    "    for key, value in images_dict.items():\n",
    "        if not isinstance(value, (list, tuple)):\n",
    "            name = \"{}/{}\".format(mode, key)\n",
    "            logger.add_image(name, preprocess(name, value), global_step)\n",
    "        else:\n",
    "            for idx in range(len(value)):\n",
    "                name = \"{}/{}_{}\".format(mode, key, idx)\n",
    "                logger.add_image(name, preprocess(name, value[idx]), global_step)\n",
    "\n",
    "\n",
    "class DictAverageMeter:\n",
    "    \"\"\"Wrapper class for dictionary variables that require the average value\"\"\"\n",
    "\n",
    "    def __init__(self) -> None:\n",
    "        \"\"\"Initialization method\"\"\"\n",
    "        self.data: Dict[Any, float] = {}\n",
    "        self.count = 0\n",
    "\n",
    "    def update(self, new_input: Dict[Any, float]) -> None:\n",
    "        \"\"\"Update the stored dictionary with new input data\n",
    "\n",
    "        Args:\n",
    "            new_input: new data to update self.data\n",
    "        \"\"\"\n",
    "        self.count += 1\n",
    "        if len(self.data) == 0:\n",
    "            for k, v in new_input.items():\n",
    "                if not isinstance(v, float):\n",
    "                    raise NotImplementedError(\"invalid data {}: {}\".format(k, type(v)))\n",
    "                self.data[k] = v\n",
    "        else:\n",
    "            for k, v in new_input.items():\n",
    "                if not isinstance(v, float):\n",
    "                    raise NotImplementedError(\"invalid data {}: {}\".format(k, type(v)))\n",
    "                self.data[k] += v\n",
    "\n",
    "    def mean(self) -> Any:\n",
    "        \"\"\"Return the average value of values stored in self.data\"\"\"\n",
    "        return {k: v / self.count for k, v in self.data.items()}\n",
    "\n",
    "\n",
    "def compute_metrics_for_each_image(metric_func: Callable) -> Callable:\n",
    "    \"\"\"A wrapper to compute metrics for each image individually\"\"\"\n",
    "\n",
    "    def wrapper(depth_est, depth_gt, mask, *args):\n",
    "        batch_size = depth_gt.shape[0]\n",
    "        print(batch_size)\n",
    "        # if batch_size < BATCH_SIZE:\n",
    "        #     break\n",
    "        results = []\n",
    "        # compute result one by one\n",
    "        for idx in range(batch_size):\n",
    "            ret = metric_func(depth_est[idx], depth_gt[idx], mask[idx], *args)\n",
    "            results.append(ret)\n",
    "        return torch.stack(results).mean()\n",
    "\n",
    "    return wrapper\n",
    "\n",
    "\n",
    "@make_nograd_func\n",
    "@compute_metrics_for_each_image\n",
    "def Thres_metrics(\n",
    "    depth_est: torch.Tensor, depth_gt: torch.Tensor, mask: torch.Tensor, thres: Union[int, float]\n",
    ") -> torch.Tensor:\n",
    "    \"\"\"Return error rate for where absolute error is larger than threshold.\n",
    "\n",
    "    Args:\n",
    "        depth_est: estimated depth map\n",
    "        depth_gt: ground truth depth map\n",
    "        mask: mask\n",
    "        thres: threshold\n",
    "\n",
    "    Returns:\n",
    "        error rate: error rate of the depth map\n",
    "    \"\"\"\n",
    "    # if thres is int or float, then True\n",
    "    assert isinstance(thres, (int, float))\n",
    "    depth_est, depth_gt = depth_est[mask], depth_gt[mask]\n",
    "    errors = torch.abs(depth_est - depth_gt)\n",
    "    err_mask = errors > thres\n",
    "    return torch.mean(err_mask.float())\n",
    "\n",
    "\n",
    "# NOTE: please do not use this to build up training loss\n",
    "@make_nograd_func\n",
    "@compute_metrics_for_each_image\n",
    "def AbsDepthError_metrics(depth_est: torch.Tensor, depth_gt: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:\n",
    "    \"\"\"Calculate average absolute depth error\n",
    "\n",
    "    Args:\n",
    "        depth_est: estimated depth map\n",
    "        depth_gt: ground truth depth map\n",
    "        mask: mask\n",
    "    \"\"\"\n",
    "    depth_est, depth_gt = depth_est[mask], depth_gt[mask]\n",
    "    return torch.mean((depth_est - depth_gt).abs())\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\"\"\"Utilities for reading and writing images, depth maps, and auxiliary data (cams, pairs) from/to disk.\"\"\"\n",
    "\n",
    "import re\n",
    "import struct\n",
    "import sys\n",
    "from typing import Dict, List, Tuple\n",
    "\n",
    "import cv2\n",
    "import numpy as np\n",
    "from PIL import Image\n",
    "\n",
    "\n",
    "def scale_to_max_dim(image: np.ndarray, max_dim: int) -> Tuple[np.ndarray, int, int]:\n",
    "    \"\"\"Scale image to specified max dimension\n",
    "\n",
    "    Args:\n",
    "        image: the input image in original size\n",
    "        max_dim: the max dimension to scale the image down to if smaller than the actual max dimension\n",
    "\n",
    "    Returns:\n",
    "        Tuple of scaled image along with original image height and width\n",
    "    \"\"\"\n",
    "    original_height = image.shape[0]\n",
    "    original_width = image.shape[1]\n",
    "    scale = max_dim / max(original_height, original_width)\n",
    "    if 0 < scale < 1:\n",
    "        width = int(scale * original_width)\n",
    "        height = int(scale * original_height)\n",
    "        image = cv2.resize(image, (width, height), interpolation=cv2.INTER_LINEAR)\n",
    "\n",
    "    return image, original_height, original_width\n",
    "\n",
    "\n",
    "def read_image(filename: str, max_dim: int = -1) -> Tuple[np.ndarray, int, int]:\n",
    "    \"\"\"Read image and rescale to specified max dimension (if exists)\n",
    "\n",
    "    Args:\n",
    "        filename: image input file path string\n",
    "        max_dim: max dimension to scale down the image; keep original size if -1\n",
    "\n",
    "    Returns:\n",
    "        Tuple of scaled image along with original image height and width\n",
    "    \"\"\"\n",
    "    image = Image.open(filename)\n",
    "    # scale 0~255 to 0~1\n",
    "    np_image = np.array(image, dtype=np.float32) / 255.0\n",
    "    return scale_to_max_dim(np_image, max_dim)\n",
    "\n",
    "\n",
    "def save_image(filename: str, image: np.ndarray) -> None:\n",
    "    \"\"\"Save images including binary mask (bool), float (0<= val <= 1), or int (as-is)\n",
    "\n",
    "    Args:\n",
    "        filename: image output file path string\n",
    "        image: output image array\n",
    "    \"\"\"\n",
    "    if image.dtype == bool:\n",
    "        image = image.astype(np.uint8) * 255\n",
    "    elif image.dtype == np.float32 or image.dtype == np.float64:\n",
    "        image = image * 255\n",
    "        image = image.astype(np.uint8)\n",
    "    else:\n",
    "        image = image.astype(np.uint8)\n",
    "    Image.fromarray(image).save(filename)\n",
    "\n",
    "\n",
    "def read_image_dictionary(filename: str) -> Dict[int, str]:\n",
    "    \"\"\"Create image dictionary from file; useful for ETH3D dataset reading and conversion.\n",
    "\n",
    "    Args:\n",
    "        filename: input dictionary text file path\n",
    "\n",
    "    Returns:\n",
    "        Dictionary of image id (int) and corresponding image file name (string)\n",
    "    \"\"\"\n",
    "    image_dict: Dict[int, str] = {}\n",
    "    with open(filename) as f:\n",
    "        num_entries = int(f.readline().strip())\n",
    "        for _ in range(num_entries):\n",
    "            parts = f.readline().strip().split(' ')\n",
    "            image_dict[int(parts[0].strip())] = parts[1].strip()\n",
    "    return image_dict\n",
    "\n",
    "\n",
    "def read_cam_file(filename: str) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n",
    "    \"\"\"Read camera intrinsics, extrinsics, and depth values (min, max) from text file\n",
    "\n",
    "    Args:\n",
    "        filename: cam text file path string\n",
    "\n",
    "    Returns:\n",
    "        Tuple with intrinsics matrix (3x3), extrinsics matrix (4x4), and depth params vector (min and max) if exists\n",
    "    \"\"\"\n",
    "    with open(filename) as f:\n",
    "        lines = [line.rstrip() for line in f.readlines()]\n",
    "    # extrinsics: line [1,5), 4x4 matrix\n",
    "    extrinsics = np.fromstring(' '.join(lines[1:5]), dtype=np.float32, sep=' ').reshape((4, 4))\n",
    "    # intrinsics: line [7-10), 3x3 matrix\n",
    "    intrinsics = np.fromstring(' '.join(lines[7:10]), dtype=np.float32, sep=' ').reshape((3, 3))\n",
    "    # depth min and max: line 11\n",
    "    if len(lines) >= 12:\n",
    "        depth_params = np.fromstring(lines[11], dtype=np.float32, sep=' ')\n",
    "    else:\n",
    "        depth_params = np.empty(0)\n",
    "\n",
    "    return intrinsics, extrinsics, depth_params\n",
    "\n",
    "\n",
    "def read_pair_file(filename: str) -> List[Tuple[int, List[int]]]:\n",
    "    \"\"\"Read image pairs from text file and output a list of tuples each containing the reference image ID and a list of\n",
    "    source image IDs\n",
    "\n",
    "    Args:\n",
    "        filename: pair text file path string\n",
    "\n",
    "    Returns:\n",
    "        List of tuples with reference ID and list of source IDs\n",
    "    \"\"\"\n",
    "    data = []\n",
    "    with open(filename) as f:\n",
    "        num_viewpoint = int(f.readline())\n",
    "        for _ in range(num_viewpoint):\n",
    "            # ref_view = int(f.readline().rstrip())\n",
    "            ref_view = int(f.readline().rstrip())\n",
    "            # print(ref_view)\n",
    "            # src_views = [int(x) for x in f.readline().rstrip().split()[1::2]]\n",
    "            src_views = [int(x) for x in f.readline().rstrip().split()[1::2]]\n",
    "            # print(src_views)\n",
    "            view_ids = [ref_view] + src_views[:2]\n",
    "            # print(view_ids)\n",
    "            if len(src_views) != 0:\n",
    "                data.append((ref_view, src_views))\n",
    "    return data\n",
    "\n",
    "\n",
    "def read_map(path: str, max_dim: int = -1) -> np.ndarray:\n",
    "    \"\"\" Read a binary depth map from either PFM or Colmap (bin) format determined by the file extension and also scale\n",
    "    the map to the max dim if given\n",
    "\n",
    "    Args:\n",
    "        path: input depth map file path string\n",
    "        max_dim: max dimension to scale down the map; keep original size if -1\n",
    "\n",
    "    Returns:\n",
    "        Array of depth map values\n",
    "    \"\"\"\n",
    "    if path.endswith('.bin'):\n",
    "        in_map = read_bin(path)\n",
    "    elif path.endswith('.pfm'):\n",
    "        in_map, _ = read_pfm(path)\n",
    "    else:\n",
    "        raise Exception('Invalid input format; only pfm and bin are supported')\n",
    "    return scale_to_max_dim(in_map, max_dim)[0]\n",
    "\n",
    "\n",
    "def save_map(path: str, data: np.ndarray) -> None:\n",
    "    \"\"\"Save binary depth or confidence maps in PFM or Colmap (bin) format determined by the file extension\n",
    "\n",
    "    Args:\n",
    "        path: output map file path string\n",
    "        data: map data array\n",
    "    \"\"\"\n",
    "    if path.endswith('.bin'):\n",
    "        save_bin(path, data)\n",
    "    elif path.endswith('.pfm'):\n",
    "        save_pfm(path, data)\n",
    "    else:\n",
    "        raise Exception('Invalid input format; only pfm and bin are supported')\n",
    "\n",
    "\n",
    "def read_bin(path: str) -> np.ndarray:\n",
    "    \"\"\"Read a depth map from a Colmap .bin file\n",
    "\n",
    "    Args:\n",
    "        path: .pfm file path string\n",
    "\n",
    "    Returns:\n",
    "        data: array of shape (H, W, C) representing loaded depth map\n",
    "    \"\"\"\n",
    "    with open(path, 'rb') as fid:\n",
    "        width, height, channels = np.genfromtxt(fid, delimiter='&', max_rows=1,\n",
    "                                                usecols=(0, 1, 2), dtype=int)\n",
    "        fid.seek(0)\n",
    "        num_delimiter = 0\n",
    "        byte = fid.read(1)\n",
    "        while True:\n",
    "            if byte == b'&':\n",
    "                num_delimiter += 1\n",
    "                if num_delimiter >= 3:\n",
    "                    break\n",
    "            byte = fid.read(1)\n",
    "        data = np.fromfile(fid, np.float32)\n",
    "    data = data.reshape((width, height, channels), order='F')\n",
    "    data = np.transpose(data, (1, 0, 2))\n",
    "    return data\n",
    "\n",
    "\n",
    "def save_bin(filename: str, data: np.ndarray):\n",
    "    \"\"\"Save a depth map to a Colmap .bin file\n",
    "\n",
    "    Args:\n",
    "        filename: output .pfm file path string,\n",
    "        data: depth map to save, of shape (H,W) or (H,W,C)\n",
    "    \"\"\"\n",
    "    if data.dtype != np.float32:\n",
    "        raise Exception('Image data type must be float32.')\n",
    "\n",
    "    if len(data.shape) == 2:\n",
    "        height, width = data.shape\n",
    "        channels = 1\n",
    "    elif len(data.shape) == 3 and (data.shape[2] == 3 or data.shape[2] == 1):\n",
    "        height, width, channels = data.shape\n",
    "    else:\n",
    "        raise Exception('Image must have H x W x 3, H x W x 1 or H x W dimensions.')\n",
    "\n",
    "    with open(filename, 'w') as fid:\n",
    "        fid.write(str(width) + '&' + str(height) + '&' + str(channels) + '&')\n",
    "\n",
    "    with open(filename, 'ab') as fid:\n",
    "        if len(data.shape) == 2:\n",
    "            image_trans = np.transpose(data, (1, 0))\n",
    "        else:\n",
    "            image_trans = np.transpose(data, (1, 0, 2))\n",
    "        data_1d = image_trans.reshape(-1, order='F')\n",
    "        data_list = data_1d.tolist()\n",
    "        endian_character = '<'\n",
    "        format_char_sequence = ''.join(['f'] * len(data_list))\n",
    "        byte_data = struct.pack(endian_character + format_char_sequence, *data_list)\n",
    "        fid.write(byte_data)\n",
    "\n",
    "\n",
    "def read_pfm(filename: str) -> Tuple[np.ndarray, float]:\n",
    "    \"\"\"Read a depth map from a .pfm file\n",
    "\n",
    "    Args:\n",
    "        filename: .pfm file path string\n",
    "\n",
    "    Returns:\n",
    "        data: array of shape (H, W, C) representing loaded depth map\n",
    "        scale: float to recover actual depth map pixel values\n",
    "    \"\"\"\n",
    "    file = open(filename, \"rb\")  # treat as binary and read-only\n",
    "\n",
    "    header = file.readline().decode(\"utf-8\").rstrip()\n",
    "    if header == \"PF\":\n",
    "        color = True\n",
    "    elif header == \"Pf\": # depth is Pf\n",
    "        color = False\n",
    "    else:\n",
    "        raise Exception(\"Not a PFM file.\")\n",
    "\n",
    "    dim_match = re.match(r\"^(\\d+)\\s(\\d+)\\s$\", file.readline().decode(\"utf-8\"))\n",
    "    if dim_match:\n",
    "        width, height = map(int, dim_match.groups())\n",
    "    else:\n",
    "        raise Exception(\"Malformed PFM header.\")\n",
    "\n",
    "    scale = float(file.readline().rstrip())\n",
    "    if scale < 0:  # little-endian\n",
    "        endian = \"<\"\n",
    "        scale = -scale\n",
    "    else:\n",
    "        endian = \">\"  # big-endian\n",
    "\n",
    "    data = np.fromfile(file, endian + \"f\")\n",
    "    shape = (height, width, 3) if color else (height, width, 1)\n",
    "\n",
    "    data = np.reshape(data, shape)\n",
    "    data = np.flipud(data)\n",
    "    file.close()\n",
    "    return data, scale\n",
    "\n",
    "\n",
    "def save_pfm(filename: str, image: np.ndarray, scale: float = 1) -> None:\n",
    "    \"\"\"Save a depth map to a .pfm file\n",
    "\n",
    "    Args:\n",
    "        filename: output .pfm file path string,\n",
    "        image: depth map to save, of shape (H,W) or (H,W,C)\n",
    "        scale: scale parameter to save\n",
    "    \"\"\"\n",
    "    file = open(filename, \"wb\")\n",
    "    color = None\n",
    "\n",
    "    image = np.flipud(image)\n",
    "\n",
    "    if image.dtype.name != \"float32\":\n",
    "        raise Exception(\"Image dtype must be float32.\")\n",
    "\n",
    "    if len(image.shape) == 3 and image.shape[2] == 3:  # color image\n",
    "        color = True\n",
    "    elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1:  # greyscale\n",
    "        color = False\n",
    "    else:\n",
    "        raise Exception(\"Image must have H x W x 3, H x W x 1 or H x W dimensions.\")\n",
    "\n",
    "    file.write(\"PF\\n\".encode(\"utf-8\") if color else \"Pf\\n\".encode(\"utf-8\"))\n",
    "    file.write(\"{} {}\\n\".format(image.shape[1], image.shape[0]).encode(\"utf-8\"))\n",
    "\n",
    "    endian = image.dtype.byteorder\n",
    "\n",
    "    if endian == \"<\" or endian == \"=\" and sys.byteorder == \"little\":\n",
    "        scale = -scale\n",
    "\n",
    "    file.write((\"%f\\n\" % scale).encode(\"utf-8\"))\n",
    "\n",
    "    image.tofile(file)\n",
    "    file.close()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import argparse\n",
    "import os\n",
    "import torch.nn as nn\n",
    "import torch.nn.parallel\n",
    "import torch.backends.cudnn as cudnn\n",
    "from torch.utils.data import DataLoader\n",
    "import time\n",
    "# from datasets import find_dataset_def\n",
    "# from models import *\n",
    "# from utils import *\n",
    "import sys\n",
    "# from datasets.data_io import read_cam_file, read_pair_file, read_image, read_map, save_image, save_map\n",
    "import cv2\n",
    "from plyfile import PlyData, PlyElement\n",
    "\n",
    "cudnn.benchmark = True\n",
    "\n",
    "parser = argparse.ArgumentParser(description='Predict depth, filter, and fuse')\n",
    "parser.add_argument('--model', default='PatchmatchNet', help='select model')\n",
    "\n",
    "parser.add_argument('--dataset', default='eth3d', help='select dataset')\n",
    "parser.add_argument('--testpath', help='testing data path')\n",
    "parser.add_argument('--testlist', help='testing scan list')\n",
    "parser.add_argument('--split', default='test', help='select data')\n",
    "\n",
    "parser.add_argument('--batch_size', type=int, default=1, help='testing batch size')\n",
    "parser.add_argument('--n_views', type=int, default=5, help='num of view')\n",
    "\n",
    "\n",
    "parser.add_argument('--loadckpt', default=None, help='load a specific checkpoint')\n",
    "parser.add_argument('--outdir', default='./outputs', help='output dir')\n",
    "parser.add_argument('--display', action='store_true', help='display depth images and masks')\n",
    "\n",
    "parser.add_argument('--patchmatch_iteration', nargs='+', type=int, default=[1, 2, 2],\n",
    "                    help='num of iteration of patchmatch on stages 1,2,3')\n",
    "parser.add_argument('--patchmatch_num_sample', nargs='+', type=int, default=[8, 8, 16],\n",
    "                    help='num of generated samples in local perturbation on stages 1,2,3')\n",
    "parser.add_argument('--patchmatch_interval_scale', nargs='+', type=float, default=[0.005, 0.0125, 0.025], \n",
    "                    help='normalized interval in inverse depth range to generate samples in local perturbation')\n",
    "parser.add_argument('--patchmatch_range', nargs='+', type=int, default=[6, 4, 2],\n",
    "                    help='fixed offset of sampling points for propogation of patchmatch on stages 1,2,3')\n",
    "parser.add_argument('--propagate_neighbors', nargs='+', type=int, default=[0, 8, 16],\n",
    "                    help='num of neighbors for adaptive propagation on stages 1,2,3')\n",
    "parser.add_argument('--evaluate_neighbors', nargs='+', type=int, default=[9, 9, 9],\n",
    "                    help='num of neighbors for adaptive matching cost aggregation of adaptive evaluation on stages 1,2,3')\n",
    "\n",
    "parser.add_argument('--geo_pixel_thres', type=float, default=1,\n",
    "                    help='pixel threshold for geometric consistency filtering')\n",
    "parser.add_argument('--geo_depth_thres', type=float, default=0.01,\n",
    "                    help='depth threshold for geometric consistency filtering')\n",
    "parser.add_argument('--photo_thres', type=float, default=0.8, help='threshold for photometric consistency filtering')\n",
    "\n",
    "# parse arguments and check\n",
    "args = parser.parse_args()\n",
    "print(\"argv:\", sys.argv[1:])\n",
    "print_args(args)\n",
    "\n",
    "\n",
    "# run MVS model to save depth maps\n",
    "def save_depth():\n",
    "    # dataset, dataloader\n",
    "    mvs_dataset = find_dataset_def(args.dataset)\n",
    "    test_dataset = mvs_dataset(args.testpath, args.n_views)\n",
    "    image_loader = DataLoader(test_dataset, args.batch_size, shuffle=False, num_workers=4, drop_last=False)\n",
    "    # image_loader = DataLoader(test_dataset, args.batch_size, shuffle=False, drop_last=False)\n",
    "\n",
    "    # model\n",
    "    model = PatchmatchNet(\n",
    "        patchmatch_interval_scale=args.patchmatch_interval_scale,\n",
    "        propagation_range=args.patchmatch_range,\n",
    "        patchmatch_iteration=args.patchmatch_iteration,\n",
    "        patchmatch_num_sample=args.patchmatch_num_sample,\n",
    "        propagate_neighbors=args.propagate_neighbors,\n",
    "        evaluate_neighbors=args.evaluate_neighbors\n",
    "    )\n",
    "    model = nn.DataParallel(model)\n",
    "    model.cpu()\n",
    "\n",
    "    # load checkpoint file specified by args.loadckpt\n",
    "    print(\"loading model {}\".format(args.loadckpt))\n",
    "    state_dict = torch.load(args.loadckpt,map_location=torch.device('cpu'))\n",
    "    model.load_state_dict(state_dict['model'], strict=False)\n",
    "    model.eval()\n",
    "    \n",
    "    with torch.no_grad():\n",
    "        for batch_idx, sample in enumerate(image_loader):\n",
    "            # print(batch_idx)\n",
    "            start_time = time.time()\n",
    "            sample_cuda = tocuda(sample)\n",
    "            refined_depth, confidence, _ = model(sample_cuda[\"imgs\"], sample_cuda[\"proj_matrices\"],\n",
    "                                                 sample_cuda[\"depth_min\"], sample_cuda[\"depth_max\"])\n",
    "            refined_depth = tensor2numpy(refined_depth)\n",
    "            confidence = tensor2numpy(confidence)\n",
    "\n",
    "            del sample_cuda\n",
    "            print('Iter {}/{}, time = {:.3f}'.format(batch_idx, len(image_loader), time.time() - start_time))\n",
    "            filenames = sample[\"filename\"]\n",
    "\n",
    "            # save depth maps and confidence maps\n",
    "            for filename, depth_est, photometric_confidence in zip(filenames, refined_depth, confidence):\n",
    "                depth_filename = os.path.join(args.outdir, filename.format('depth_est', '.pfm'))\n",
    "                confidence_filename = os.path.join(args.outdir, filename.format('confidence', '.pfm'))\n",
    "                os.makedirs(depth_filename.rsplit('/', 1)[0], exist_ok=True)\n",
    "                os.makedirs(confidence_filename.rsplit('/', 1)[0], exist_ok=True)\n",
    "                # save depth maps\n",
    "                depth_est = np.squeeze(depth_est, 0)\n",
    "                save_map(depth_filename, depth_est)\n",
    "                # save confidence maps\n",
    "                save_map(confidence_filename, photometric_confidence)\n",
    "                \n",
    "\n",
    "# project the reference point cloud into the source view, then project back\n",
    "def reproject_with_depth(depth_ref, intrinsics_ref, extrinsics_ref, depth_src, intrinsics_src, extrinsics_src):\n",
    "    width, height = depth_ref.shape[1], depth_ref.shape[0]\n",
    "    # step1. project reference pixels to the source view\n",
    "    # reference view x, y\n",
    "    x_ref, y_ref = np.meshgrid(np.arange(0, width), np.arange(0, height))\n",
    "    x_ref, y_ref = x_ref.reshape([-1]), y_ref.reshape([-1])\n",
    "    # reference 3D space\n",
    "    xyz_ref = np.matmul(np.linalg.inv(intrinsics_ref),\n",
    "                        np.vstack((x_ref, y_ref, np.ones_like(x_ref))) * depth_ref.reshape([-1]))\n",
    "    # source 3D space\n",
    "    xyz_src = np.matmul(np.matmul(extrinsics_src, np.linalg.inv(extrinsics_ref)),\n",
    "                        np.vstack((xyz_ref, np.ones_like(x_ref))))[:3]\n",
    "    # source view x, y\n",
    "    k_xyz_src = np.matmul(intrinsics_src, xyz_src)\n",
    "    xy_src = k_xyz_src[:2] / k_xyz_src[2:3]\n",
    "\n",
    "    # step2. reproject the source view points with source view depth estimation\n",
    "    # find the depth estimation of the source view\n",
    "    x_src = xy_src[0].reshape([height, width]).astype(np.float32)\n",
    "    y_src = xy_src[1].reshape([height, width]).astype(np.float32)\n",
    "    sampled_depth_src = cv2.remap(depth_src, x_src, y_src, interpolation=cv2.INTER_LINEAR)\n",
    "    # mask = sampled_depth_src > 0\n",
    "\n",
    "    # source 3D space\n",
    "    # NOTE that we should use sampled source-view depth_here to project back\n",
    "    xyz_src = np.matmul(np.linalg.inv(intrinsics_src),\n",
    "                        np.vstack((xy_src, np.ones_like(x_ref))) * sampled_depth_src.reshape([-1]))\n",
    "    # reference 3D space\n",
    "    xyz_reprojected = np.matmul(np.matmul(extrinsics_ref, np.linalg.inv(extrinsics_src)),\n",
    "                                np.vstack((xyz_src, np.ones_like(x_ref))))[:3]\n",
    "    # source view x, y, depth\n",
    "    depth_reprojected = xyz_reprojected[2].reshape([height, width]).astype(np.float32)\n",
    "    k_xyz_reprojected = np.matmul(intrinsics_ref, xyz_reprojected)\n",
    "    xy_reprojected = k_xyz_reprojected[:2] / k_xyz_reprojected[2:3]\n",
    "    x_reprojected = xy_reprojected[0].reshape([height, width]).astype(np.float32)\n",
    "    y_reprojected = xy_reprojected[1].reshape([height, width]).astype(np.float32)\n",
    "\n",
    "    return depth_reprojected, x_reprojected, y_reprojected, x_src, y_src\n",
    "\n",
    "\n",
    "def check_geometric_consistency(depth_ref, intrinsics_ref, extrinsics_ref, depth_src, intrinsics_src, extrinsics_src,\n",
    "                                geo_pixel_thres, geo_depth_thres):\n",
    "    width, height = depth_ref.shape[1], depth_ref.shape[0]\n",
    "    x_ref, y_ref = np.meshgrid(np.arange(0, width), np.arange(0, height))\n",
    "    depth_reprojected, x2d_reprojected, y2d_reprojected, x2d_src, y2d_src = reproject_with_depth(\n",
    "        depth_ref, intrinsics_ref, extrinsics_ref, depth_src, intrinsics_src, extrinsics_src)\n",
    "    # print(depth_ref.shape)\n",
    "    # print(depth_reprojected.shape)\n",
    "    # check |p_reproj-p_1| < 1\n",
    "    dist = np.sqrt((x2d_reprojected - x_ref) ** 2 + (y2d_reprojected - y_ref) ** 2)\n",
    "\n",
    "    # check |d_reproj-d_1| / d_1 < 0.01\n",
    "    # depth_ref = np.squeeze(depth_ref, 2)\n",
    "    depth_diff = np.abs(depth_reprojected - depth_ref)\n",
    "    relative_depth_diff = depth_diff / depth_ref\n",
    "\n",
    "    mask = np.logical_and(dist < geo_pixel_thres, relative_depth_diff < geo_depth_thres)\n",
    "    depth_reprojected[~mask] = 0\n",
    "\n",
    "    return mask, depth_reprojected, x2d_src, y2d_src\n",
    "\n",
    "\n",
    "def filter_depth(\n",
    "        scan_folder, out_folder, plyfilename, geo_pixel_thres, geo_depth_thres, photo_thres, img_wh, geo_mask_thres):\n",
    "    # the pair file\n",
    "    pair_file = os.path.join(scan_folder, \"pair.txt\")\n",
    "    # for the final point cloud\n",
    "    vertexs = []\n",
    "    vertex_colors = []\n",
    "\n",
    "    pair_data = read_pair_file(pair_file)\n",
    "\n",
    "    # for each reference view and the corresponding source views\n",
    "    for ref_view, src_views in pair_data:\n",
    "        \n",
    "        # load the reference image\n",
    "        ref_img, original_h, original_w = read_image(\n",
    "            os.path.join(scan_folder, 'images/{:0>8}.jpg'.format(ref_view)), max(img_wh))\n",
    "        ref_intrinsics, ref_extrinsics, _ = read_cam_file(\n",
    "            os.path.join(scan_folder, 'cams/{:0>8}_cam.txt'.format(ref_view)))[0:2]\n",
    "        # print([ref_intrinsics,ref_extrinsics])\n",
    "        ref_intrinsics[0] *= img_wh[0]/original_w\n",
    "        ref_intrinsics[1] *= img_wh[1]/original_h\n",
    "        \n",
    "        # load the estimated depth of the reference view\n",
    "        ref_depth_est = read_map(os.path.join(out_folder, 'depth_est/{:0>8}.pfm'.format(ref_view)))\n",
    "        ref_depth_est = np.squeeze(ref_depth_est, 2)\n",
    "        # load the photometric mask of the reference view\n",
    "        confidence = read_map(os.path.join(out_folder, 'confidence/{:0>8}.pfm'.format(ref_view)))\n",
    "        \n",
    "        photo_mask = confidence > photo_thres\n",
    "        photo_mask = np.squeeze(photo_mask, 2)\n",
    "\n",
    "        all_srcview_depth_ests = []\n",
    "        # compute the geometric mask\n",
    "        geo_mask_sum = 0\n",
    "        for src_view in src_views:\n",
    "            # camera parameters of the source view\n",
    "            _, original_h, original_w = read_image(\n",
    "                os.path.join(scan_folder, 'images/{:0>8}.jpg'.format(src_view)), max(img_wh))\n",
    "            src_intrinsics, src_extrinsics, _ = read_cam_file(\n",
    "                os.path.join(scan_folder, 'cams/{:0>8}_cam.txt'.format(src_view)))[0:2]\n",
    "            src_intrinsics[0] *= img_wh[0]/original_w\n",
    "            src_intrinsics[1] *= img_wh[1]/original_h\n",
    "            \n",
    "            # the estimated depth of the source view\n",
    "            src_depth_est = read_map(os.path.join(out_folder, 'depth_est/{:0>8}.pfm'.format(src_view)))\n",
    "\n",
    "            geo_mask, depth_reprojected, _, _ = check_geometric_consistency(\n",
    "                ref_depth_est, ref_intrinsics, ref_extrinsics, src_depth_est, src_intrinsics, src_extrinsics,\n",
    "                geo_pixel_thres, geo_depth_thres)\n",
    "            geo_mask_sum += geo_mask.astype(np.int32)\n",
    "            all_srcview_depth_ests.append(depth_reprojected)\n",
    "\n",
    "        depth_est_averaged = (sum(all_srcview_depth_ests) + ref_depth_est) / (geo_mask_sum + 1)\n",
    "        geo_mask = geo_mask_sum >= geo_mask_thres\n",
    "        final_mask = np.logical_and(photo_mask, geo_mask)\n",
    "\n",
    "        os.makedirs(os.path.join(out_folder, \"mask\"), exist_ok=True)\n",
    "        save_image(os.path.join(out_folder, \"mask/{:0>8}_photo.png\".format(ref_view)), photo_mask)\n",
    "        save_image(os.path.join(out_folder, \"mask/{:0>8}_geo.png\".format(ref_view)), geo_mask)\n",
    "        save_image(os.path.join(out_folder, \"mask/{:0>8}_final.png\".format(ref_view)), final_mask)\n",
    "        \n",
    "        print(\"processing {}, ref-view{:0>2}, geo_mask:{:3f} photo_mask:{:3f} final_mask: {:3f}\".format(\n",
    "            scan_folder, ref_view, geo_mask.mean(), photo_mask.mean(), final_mask.mean()))\n",
    "\n",
    "        if args.display:\n",
    "            cv2.imshow('ref_img', ref_img[:, :, ::-1])\n",
    "            cv2.imshow('ref_depth', ref_depth_est)\n",
    "            cv2.imshow('ref_depth * photo_mask', ref_depth_est * photo_mask.astype(np.float32))\n",
    "            cv2.imshow('ref_depth * geo_mask', ref_depth_est * geo_mask.astype(np.float32))\n",
    "            cv2.imshow('ref_depth * mask', ref_depth_est * final_mask.astype(np.float32))\n",
    "            cv2.waitKey(1)\n",
    "\n",
    "        height, width = depth_est_averaged.shape[:2]\n",
    "        x, y = np.meshgrid(np.arange(0, width), np.arange(0, height))\n",
    "        \n",
    "        valid_points = final_mask\n",
    "        \n",
    "        x, y, depth = x[valid_points], y[valid_points], depth_est_averaged[valid_points]\n",
    "        \n",
    "        color = ref_img[valid_points]\n",
    "        xyz_ref = np.matmul(np.linalg.inv(ref_intrinsics), np.vstack((x, y, np.ones_like(x))) * depth)\n",
    "        xyz_world = np.matmul(np.linalg.inv(ref_extrinsics), np.vstack((xyz_ref, np.ones_like(x))))[:3]\n",
    "        vertexs.append(xyz_world.transpose((1, 0)))\n",
    "        vertex_colors.append((color * 255).astype(np.uint8))\n",
    "\n",
    "    vertexs = np.concatenate(vertexs, axis=0)\n",
    "    vertex_colors = np.concatenate(vertex_colors, axis=0)\n",
    "    vertexs = np.array([tuple(v) for v in vertexs], dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])\n",
    "    vertex_colors = np.array([tuple(v) for v in vertex_colors], dtype=[('red', 'u1'), ('green', 'u1'), ('blue', 'u1')])\n",
    "\n",
    "    vertex_all = np.empty(len(vertexs), vertexs.dtype.descr + vertex_colors.dtype.descr)\n",
    "    for prop in vertexs.dtype.names:\n",
    "        vertex_all[prop] = vertexs[prop]\n",
    "    for prop in vertex_colors.dtype.names:\n",
    "        vertex_all[prop] = vertex_colors[prop]\n",
    "\n",
    "    el = PlyElement.describe(vertex_all, 'vertex')\n",
    "    PlyData([el]).write(plyfilename)\n",
    "    print(\"saving the final model to\", plyfilename)\n",
    "\n",
    "\n",
    "if __name__ == '__main__':\n",
    "    # step1. save all the depth maps and the masks in outputs directory\n",
    "    save_depth()\n",
    "    # the size of image input for PatchmatchNet, maybe downsampled\n",
    "    img_wh = (640, 480)\n",
    "    # number of source images need to be consistent with in geometric consistency filtering\n",
    "    geo_mask_thres = 2\n",
    "\n",
    "    # step2. filter saved depth maps and reconstruct point cloud\n",
    "    filter_depth(args.testpath, args.outdir, os.path.join(args.outdir, 'custom.ply'),  args.geo_pixel_thres,\n",
    "                 args.geo_depth_thres, args.photo_thres, img_wh, geo_mask_thres)\n"
   ]
  }
 ],
 "metadata": {
  "language_info": {
   "name": "python"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
