{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "<h2><center>Evaluation of whole dataset</center></h2>"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "(c) DI Dominik Hirner BSc. \n",
    "Institute for graphics and vision (ICG)\n",
    "University of Technology Graz, Austria\n",
    "E-mail: dominik.hirner@tugraz.at"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "##### This function is meant to be used if you want to predict and evaluate all images (train or test) for a dataset. This is useful for a submission or to calculate an overall error."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import sys\n",
    "import numpy as np\n",
    "import os\n",
    "import glob\n",
    "import cv2\n",
    "import re\n",
    "import time\n",
    "import numpy.matlib\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "from torch.autograd import Variable\n",
    "from PIL import Image\n",
    "from typing import Tuple\n",
    "import torch.nn.functional as F\n",
    "from guided_filter_pytorch.guided_filter import GuidedFilter\n",
    "import argparse\n",
    "import skimage"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "os.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\n",
    "os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "num_conv_feature_maps = 64"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#This is the filename used for the output as specified by http://vision.middlebury.edu/stereo/submit3/upload-format.html\n",
    "algo_name = 'FC-DNN-fst+Watershed'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "debug_mode = True\n",
    "lr_check = True\n",
    "fill_incons = True\n",
    "getEPE = False\n",
    "is_filtered = True\n",
    "\n",
    "#The naming and structure must be as in http://vision.middlebury.edu/stereo/submit3/zip/MiddEval3-data-H.zip\n",
    "#for MB\n",
    "#MB,MBTest, ETH, ETHTest (only with getEPE = False), KITTI2012, KITTI2012Test, KITTI2015,KITTI2015Test\n",
    "#the output images for KITTI will be written in the uint16 format\n",
    "dataset = 'MB'\n",
    "\n",
    "output_folder = '/media/HDD/FC-DCNN2/FC-DCNN2/t/'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if(dataset == 'MB'):\n",
    "    \n",
    "    input_folder = '/media/HDD/TrainingsData/MB_H/trainingHDisp/*/'\n",
    "    weight = '../weights/mb'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if(dataset == 'MBTest'):\n",
    "    \n",
    "    input_folder = '/media/HDD/TrainingsData/MB_H/testH/*/'\n",
    "    weight = '/media/HDD/FC-DCNNSegTest/weights/new/mb_64f_best0000e31.717011'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if(dataset == 'ETHTest'):\n",
    "    \n",
    "    input_folder = '/media/HDD/TrainingsData/ETH3D/two_view_test/*/'\n",
    "    weight = '../weights/eth3d'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if(dataset == 'ETH'):\n",
    "    \n",
    "    input_folder = '/media/HDD/TrainingsData/ETH3D/two_view_training/*/'\n",
    "    weight = '../weights/eth3d'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if(dataset == 'KITTI2012'):\n",
    "    \n",
    "    weight = '../weights/kitti'\n",
    "    \n",
    "    #Kitti folder\n",
    "    input_folder = '/media/HDD/TrainingsData/kitti2012/training'\n",
    "    \n",
    "    left_folder = input_folder + '/colored_0/'\n",
    "    right_folder = input_folder + '/colored_1/'\n",
    "    \n",
    "    \n",
    "    left_list = glob.glob(input_folder + '/colored_0/*.png')\n",
    "    right_list = glob.glob(input_folder + '/colored_1/*.png')\n",
    "    \n",
    "    if(getEPE):\n",
    "        gt_folder = input_folder + '/disp_noc/'\n",
    "        \n",
    "        gt_list = glob.glob(gt_folder + '*.png')    \n",
    "        \n",
    "        gt_elem_list = []\n",
    "        for gt_im in gt_list:\n",
    "\n",
    "            gt_im_el = gt_im.split('/')[-1]\n",
    "            gt_elem_list.append(gt_im_el)\n",
    "\n",
    "        gt_elem_list = sorted(gt_elem_list)\n",
    "    \n",
    "    left_elem_list = []\n",
    "    for left_im in left_list:\n",
    "\n",
    "        left_im_el = left_im.split('/')[-1]\n",
    "        left_elem_list.append(left_im_el)\n",
    "\n",
    "    left_elem_list = sorted(left_elem_list)\n",
    "\n",
    "\n",
    "    right_elem_list = []\n",
    "    for right_im in right_list:\n",
    "\n",
    "        right_im_el = right_im.split('/')[-1]\n",
    "        right_elem_list.append(right_im_el)\n",
    "\n",
    "    right_elem_list = sorted(right_elem_list)\n",
    "    if(getEPE):\n",
    "        inters_list = set(left_elem_list) & set(right_elem_list) & set(gt_elem_list)\n",
    "    else:\n",
    "        inters_list = set(left_elem_list) & set(right_elem_list)   \n",
    "    \n",
    "    inters_list = list(inters_list)    \n",
    "    inters_list = sorted(inters_list)    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if(dataset == 'KITTI2012Test'):\n",
    "    \n",
    "    weight = '/media/HDD/FC-DCNNSegTest/weights/kitti2012'\n",
    "    \n",
    "    #Kitti folder\n",
    "    input_folder = '/media/HDD/TrainingsData/kitti2012/testing'\n",
    "    \n",
    "    left_folder = input_folder + '/colored_0/'\n",
    "    right_folder = input_folder + '/colored_1/'\n",
    "    \n",
    "    \n",
    "    left_list = glob.glob(input_folder + '/colored_0/*.png')\n",
    "    right_list = glob.glob(input_folder + '/colored_1/*.png')\n",
    "        \n",
    "    left_elem_list = []\n",
    "    for left_im in left_list:\n",
    "\n",
    "        left_im_el = left_im.split('/')[-1]\n",
    "        left_elem_list.append(left_im_el)\n",
    "\n",
    "    left_elem_list = sorted(left_elem_list)\n",
    "\n",
    "\n",
    "    right_elem_list = []\n",
    "    for right_im in right_list:\n",
    "\n",
    "        right_im_el = right_im.split('/')[-1]\n",
    "        right_elem_list.append(right_im_el)\n",
    "\n",
    "    right_elem_list = sorted(right_elem_list)\n",
    "    \n",
    "    inters_list = set(left_elem_list) & set(right_elem_list)   \n",
    "    \n",
    "    inters_list = list(inters_list)    \n",
    "    inters_list = sorted(inters_list)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if(dataset == 'KITTI2015'):\n",
    "    \n",
    "    weight = '../weights/kitti'\n",
    "    \n",
    "    #Kitti folder\n",
    "    input_folder = '/media/HDD/TrainingsData/kitti2015'\n",
    "    \n",
    "    left_list  = glob.glob(input_folder + '/testing/image_2/*png')\n",
    "    right_list  = glob.glob(input_folder + '/testing/image_3/*png')\n",
    "    \n",
    "    if(getEPE):\n",
    "        gt_folder = input_folder + '/testing/disp_noc_0/'\n",
    "        \n",
    "        gt_list = glob.glob(gt_folder + '*.png')    \n",
    "        \n",
    "        gt_elem_list = []\n",
    "        for gt_im in gt_list:\n",
    "\n",
    "            gt_im_el = gt_im.split('/')[-1]\n",
    "            gt_elem_list.append(gt_im_el)\n",
    "\n",
    "        gt_elem_list = sorted(gt_elem_list)\n",
    "    \n",
    "    left_elem_list = []\n",
    "    for left_im in left_list:\n",
    "\n",
    "        left_im_el = left_im.split('/')[-1]\n",
    "        left_elem_list.append(left_im_el)\n",
    "\n",
    "    left_elem_list = sorted(left_elem_list)\n",
    "\n",
    "\n",
    "    right_elem_list = []\n",
    "    for right_im in right_list:\n",
    "\n",
    "        right_im_el = right_im.split('/')[-1]\n",
    "        right_elem_list.append(right_im_el)\n",
    "\n",
    "    right_elem_list = sorted(right_elem_list)\n",
    "    \n",
    "    \n",
    "    if(getEPE):\n",
    "        inters_list = set(left_elem_list) & set(right_elem_list) & set(gt_elem_list)\n",
    "    else:\n",
    "        inters_list = set(left_elem_list) & set(right_elem_list)   \n",
    "    \n",
    "    inters_list = list(inters_list)    \n",
    "    inters_list = sorted(inters_list)    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if(dataset == 'KITTI2015Test'):\n",
    "    \n",
    "    weight = '/media/HDD/FC-DCNNSegTest/weights/kitti2015'\n",
    "    \n",
    "    #Kitti folder\n",
    "    input_folder = '/media/HDD/TrainingsData/kitti2015/testing'\n",
    "    \n",
    "    left_list  = glob.glob(input_folder + '/image_2/*png')\n",
    "    right_list  = glob.glob(input_folder + '/image_3/*png')\n",
    "        \n",
    "    left_elem_list = []\n",
    "    for left_im in left_list:\n",
    "\n",
    "        left_im_el = left_im.split('/')[-1]\n",
    "        left_elem_list.append(left_im_el)\n",
    "\n",
    "    left_elem_list = sorted(left_elem_list)\n",
    "\n",
    "\n",
    "    right_elem_list = []\n",
    "    for right_im in right_list:\n",
    "\n",
    "        right_im_el = right_im.split('/')[-1]\n",
    "        right_elem_list.append(right_im_el)\n",
    "\n",
    "    right_elem_list = sorted(right_elem_list)\n",
    "\n",
    "    inters_list = set(left_elem_list) & set(right_elem_list)   \n",
    "    \n",
    "    inters_list = list(inters_list)    \n",
    "    inters_list = sorted(inters_list)    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "##python3 version!!!!\n",
    "def readPFM(file):\n",
    "    file = open(file, 'rb')\n",
    "\n",
    "    color = None\n",
    "    width = None\n",
    "    height = None\n",
    "    scale = None\n",
    "    endian = None\n",
    "\n",
    "    header = file.readline().decode('utf-8').rstrip()\n",
    "    if header == 'PF':\n",
    "        color = True\n",
    "    elif header == 'Pf':\n",
    "        color = False\n",
    "    else:\n",
    "        raise Exception('Not a PFM file.')\n",
    "\n",
    "    dim_match = re.match(r'^(\\d+)\\s(\\d+)\\s$', file.readline().decode('utf-8'))\n",
    "    if dim_match:\n",
    "        width, height = map(int, dim_match.groups())\n",
    "    else:\n",
    "        raise Exception('Malformed PFM header.')\n",
    "\n",
    "    scale = float(file.readline().decode('utf-8').rstrip())\n",
    "    if scale < 0:  # little-endian\n",
    "        endian = '<'\n",
    "        scale = -scale\n",
    "    else:\n",
    "        endian = '>'  # big-endian\n",
    "\n",
    "    data = np.fromfile(file, endian + 'f')\n",
    "    shape = (height, width, 3) if color else (height, width)\n",
    "\n",
    "    data = np.reshape(data, shape)\n",
    "    data = np.flipud(data)\n",
    "    return data, scale"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def writePFM(file, image, scale=1):\n",
    "    file = open(file, 'wb')\n",
    "\n",
    "    color = None\n",
    "\n",
    "    if image.dtype.name != 'float32':\n",
    "        raise Exception('Image dtype must be float32.')\n",
    "\n",
    "    image = np.flipud(image)\n",
    "\n",
    "    if len(image.shape) == 3 and image.shape[2] == 3:  # color image\n",
    "        color = True\n",
    "    elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1:  # greyscale\n",
    "        color = False\n",
    "    else:\n",
    "        raise Exception('Image must have H x W x 3, H x W x 1 or H x W dimensions.')\n",
    "\n",
    "    file.write('PF\\n'.encode() if color else 'Pf\\n'.encode())\n",
    "    file.write('%d %d\\n'.encode() % (image.shape[1], image.shape[0]))\n",
    "\n",
    "    endian = image.dtype.byteorder\n",
    "\n",
    "    if endian == '<' or endian == '=' and sys.byteorder == 'little':\n",
    "        scale = -scale\n",
    "\n",
    "    file.write('%f\\n'.encode() % scale)\n",
    "\n",
    "    image.tofile(file)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class SiameseBranch(nn.Module):\n",
    "    def __init__(self,img_ch=3):\n",
    "        super(SiameseBranch,self).__init__()\n",
    "        \n",
    "        self.Tanh = nn.Tanh()        \n",
    "        self.Conv1 = nn.Conv2d(img_ch, num_conv_feature_maps, kernel_size = 3,stride=1,padding = 1,dilation = 1, bias=True)\n",
    "        self.Conv2 = nn.Conv2d(num_conv_feature_maps, num_conv_feature_maps, kernel_size=3,stride=1,padding = 1,dilation = 1, bias=True)\n",
    "        self.Conv3 = nn.Conv2d(2*num_conv_feature_maps, num_conv_feature_maps, kernel_size=3,stride=1,padding = 1,dilation = 1, bias=True)\n",
    "        self.Conv4 = nn.Conv2d(3*num_conv_feature_maps, num_conv_feature_maps, kernel_size=3,stride=1,padding = 1,dilation = 1,bias=True)\n",
    "        self.Conv5 = nn.Conv2d(4*num_conv_feature_maps, num_conv_feature_maps, kernel_size=3,stride=1,padding = 1,dilation = 1, bias=True)\n",
    "        \n",
    "        \n",
    "    def forward(self,x_in):\n",
    "        \n",
    "        x1 = self.Conv1(x_in) \n",
    "        x1 = self.Tanh(x1)\n",
    "                \n",
    "        x2 = self.Conv2(x1) \n",
    "        x2 = self.Tanh(x2)\n",
    "        \n",
    "        d2 = torch.cat((x1,x2),dim=1)\n",
    "        \n",
    "        x3 = self.Conv3(d2) \n",
    "        x3 = self.Tanh(x3)\n",
    "        \n",
    "        d3 = torch.cat((x1,x2,x3),dim=1)\n",
    "        \n",
    "        x4 = self.Conv4(d3)\n",
    "        x4 = self.Tanh(x4)\n",
    "        \n",
    "        d4 = torch.cat((x1,x2,x3,x4),dim=1)\n",
    "        \n",
    "        x5 = self.Conv5(d4)\n",
    "\n",
    "        return x5"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "branch = SiameseBranch()\n",
    "branch = branch.cuda()\n",
    "\n",
    "branch.load_state_dict(torch.load(weight))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "Tensor = torch.cuda.FloatTensor\n",
    "cos = torch.nn.CosineSimilarity()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "pytorch_total_params = sum(p.numel() for p in branch.parameters() if p.requires_grad)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "print(pytorch_total_params)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def createCostVol(left_im,right_im,max_disp):\n",
    "\n",
    "    a_h, a_w,c = left_im.shape\n",
    "    left_im = np.transpose(left_im, (2,0,1)).astype(np.uint8)\n",
    "    right_im = np.transpose(right_im, (2,0,1)).astype(np.uint8)\n",
    "    \n",
    "    left_im = np.reshape(left_im, [1,c,a_h,a_w])\n",
    "    right_im = np.reshape(right_im, [1,c,a_h,a_w])\n",
    "        \n",
    "    with torch.no_grad():\n",
    "\n",
    "        left_imT = Variable(Tensor(left_im))\n",
    "        right_imT = Variable(Tensor(right_im))\n",
    "\n",
    "        left_feat = branch(left_imT)\n",
    "        right_feat = branch(right_imT)\n",
    "        \n",
    "        _,f,h,w = left_feat.shape\n",
    "        \n",
    "        cost_vol = np.zeros((max_disp+1,a_h,a_w))\n",
    "        cost_volT = Variable(Tensor(cost_vol))\n",
    "\n",
    "        #0 => max_disp => one less disp!\n",
    "        for disp in range(0,max_disp+1):\n",
    "\n",
    "            if(disp == 0):\n",
    "                sim_score = cos(left_feat, right_feat)\n",
    "                cost_volT[disp,:,:] = torch.squeeze(sim_score)\n",
    "                \n",
    "            else:\n",
    "                right_shifted = torch.cuda.FloatTensor(1,f,h,w).fill_(0)                      \n",
    "                right_shift = torch.cuda.FloatTensor(1,f,h,disp).fill_(0)  \n",
    "                right_appended = torch.cat([right_shift,right_feat],3)\n",
    "\n",
    "                _,f,h_ap,w_ap = right_appended.shape\n",
    "                right_shifted[:,:,:,:] = right_appended[:,:,:,:(w_ap-disp)]\n",
    "                sim_score = cos(left_feat, right_shifted)\n",
    "                cost_volT[disp,:,:] = torch.squeeze(sim_score)              \n",
    "    \n",
    "    return cost_volT"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def createCostVolRL(left_im,right_im,max_disp):\n",
    "\n",
    "    a_h, a_w,c = left_im.shape\n",
    "    left_im = np.transpose(left_im, (2,0,1)).astype(np.uint8)\n",
    "    right_im = np.transpose(right_im, (2,0,1)).astype(np.uint8)\n",
    "    \n",
    "    left_im = np.reshape(left_im, [1,c,a_h,a_w])\n",
    "    right_im = np.reshape(right_im, [1,c,a_h,a_w])\n",
    "\n",
    "    with torch.no_grad():\n",
    "        \n",
    "        left_imT = Variable(Tensor(left_im))\n",
    "        right_imT = Variable(Tensor(right_im))\n",
    "\n",
    "        left_feat = branch(left_imT)\n",
    "        right_feat = branch(right_imT)\n",
    "\n",
    "\n",
    "        _,f,h,w = left_feat.shape\n",
    "        cost_vol = np.zeros((max_disp+1,a_h,a_w))\n",
    "        \n",
    "        cost_volT = Variable(Tensor(cost_vol))\n",
    "\n",
    "        for disp in range(0,max_disp+1):\n",
    "\n",
    "            if(disp == 0):\n",
    "                sim_score = cos(right_feat, left_feat)\n",
    "                cost_volT[disp,:,:] = torch.squeeze(sim_score)\n",
    "                \n",
    "            else:    \n",
    "                left_shifted = torch.cuda.FloatTensor(1,f,h,w).fill_(0)\n",
    "                left_shift = torch.cuda.FloatTensor(1,f,h,disp).fill_(0)\n",
    "                left_appended = torch.cat([left_feat,left_shift],3)\n",
    "\n",
    "                _,f,h_ap,w_ap = left_appended.shape\n",
    "                left_shifted[:,:,:,:] = left_appended[:,:,:,disp:w_ap]\n",
    "\n",
    "                sim_score = cos(right_feat, left_shifted)\n",
    "                cost_volT[disp,:,:] = torch.squeeze(sim_score)\n",
    "                \n",
    "    return cost_volT"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from typing import Tuple\n",
    "\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "\n",
    "def _compute_binary_kernel(window_size: Tuple[int, int]) -> torch.Tensor:\n",
    "    r\"\"\"Creates a binary kernel to extract the patches. If the window size\n",
    "    is HxW will create a (H*W)xHxW kernel.\n",
    "    \"\"\"\n",
    "    window_range: int = window_size[0] * window_size[1]\n",
    "    kernel: torch.Tensor = torch.zeros(window_range, window_range)\n",
    "    for i in range(window_range):\n",
    "        kernel[i, i] += 1.0\n",
    "    return kernel.view(window_range, 1, window_size[0], window_size[1])\n",
    "\n",
    "\n",
    "def _compute_zero_padding(kernel_size: Tuple[int, int]) -> Tuple[int, int]:\n",
    "    r\"\"\"Utility function that computes zero padding tuple.\"\"\"\n",
    "    computed: Tuple[int, ...] = tuple([(k - 1) // 2 for k in kernel_size])\n",
    "    return computed[0], computed[1]\n",
    "\n",
    "\n",
    "class MedianBlur(nn.Module):\n",
    "    r\"\"\"Blurs an image using the median filter.\n",
    "\n",
    "    Args:\n",
    "        kernel_size (Tuple[int, int]): the blurring kernel size.\n",
    "\n",
    "    Returns:\n",
    "        torch.Tensor: the blurred input tensor.\n",
    "\n",
    "    Shape:\n",
    "        - Input: :math:`(B, C, H, W)`\n",
    "        - Output: :math:`(B, C, H, W)`\n",
    "\n",
    "    Example:\n",
    "        >>> input = torch.rand(2, 4, 5, 7)\n",
    "        >>> blur = kornia.filters.MedianBlur((3, 3))\n",
    "        >>> output = blur(input)  # 2x4x5x7\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, kernel_size: Tuple[int, int]) -> None:\n",
    "        super(MedianBlur, self).__init__()\n",
    "        self.kernel: torch.Tensor = _compute_binary_kernel(kernel_size)\n",
    "        self.padding: Tuple[int, int] = _compute_zero_padding(kernel_size)\n",
    "\n",
    "    def forward(self, input: torch.Tensor):  # type: ignore\n",
    "        if not torch.is_tensor(input):\n",
    "            raise TypeError(\"Input type is not a torch.Tensor. Got {}\"\n",
    "                            .format(type(input)))\n",
    "        if not len(input.shape) == 4:\n",
    "            raise ValueError(\"Invalid input shape, we expect BxCxHxW. Got: {}\"\n",
    "                             .format(input.shape))\n",
    "        # prepare kernel\n",
    "        b, c, h, w = input.shape\n",
    "        tmp_kernel: torch.Tensor = self.kernel.to(input.device).to(input.dtype)\n",
    "        kernel: torch.Tensor = tmp_kernel.repeat(c, 1, 1, 1)\n",
    "\n",
    "        # map the local window to single vector\n",
    "        features: torch.Tensor = F.conv2d(\n",
    "            input, kernel, padding=self.padding, stride=1, groups=c)\n",
    "        features = features.view(b, c, -1, h, w)  # BxCx(K_h * K_w)xHxW\n",
    "\n",
    "        # compute the median along the feature axis\n",
    "        median: torch.Tensor = torch.median(features, dim=2)[0]\n",
    "        return median\n",
    "\n",
    "\n",
    "\n",
    "# functiona api\n",
    "def median_blur(input: torch.Tensor,\n",
    "                kernel_size: Tuple[int, int]) -> torch.Tensor:\n",
    "    r\"\"\"Blurs an image using the median filter.\n",
    "\n",
    "    See :class:`~kornia.filters.MedianBlur` for details.\n",
    "    \"\"\"\n",
    "    return MedianBlur(kernel_size)(input)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def filterCostVolMedianPyt(cost_vol):\n",
    "    \n",
    "    d,h,w = cost_vol.shape\n",
    "    cost_vol = cost_vol.unsqueeze(0)\n",
    "    \n",
    "    for disp in range(d):\n",
    "\n",
    "        cost_vol[:,disp,:,:] = median_blur(cost_vol[:,disp,:,:].unsqueeze(0), (5,5))\n",
    "        \n",
    "    return torch.squeeze(cost_vol)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from guided_filter_pytorch.guided_filter import GuidedFilter\n",
    "import math \n",
    "def filterCostVolBilatpyt(cost_vol,left):\n",
    "    \n",
    "    left = np.mean(left,axis=2)\n",
    "    leftT = Variable(Tensor(left))\n",
    "    leftT = leftT.unsqueeze(0).unsqueeze(0)\n",
    "\n",
    "    d,h,w = cost_vol.shape  \n",
    "    \n",
    "    f = GuidedFilter(8,10).cuda() #0.001\n",
    "    \n",
    "    for disp in range(d):\n",
    "        cur_slice =  cost_vol[disp,:,:]\n",
    "        cur_slice = cur_slice.unsqueeze(0).unsqueeze(0)\n",
    "        \n",
    "        inputs = [leftT, cur_slice]\n",
    "\n",
    "        test = f(*inputs)\n",
    "        cost_vol[disp,:,:] = np.squeeze(test)\n",
    "        \n",
    "    return cost_vol"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%load_ext cython"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def writePFMcyt(file, image, scale=1):\n",
    "    file = open(file, 'wb')\n",
    "\n",
    "    color = None\n",
    "\n",
    "    image = np.flipud(image)\n",
    "\n",
    "    if len(image.shape) == 3 and image.shape[2] == 3:  # color image\n",
    "        color = True\n",
    "    elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1:  # greyscale\n",
    "        color = False\n",
    "    else:\n",
    "        raise Exception('Image must have H x W x 3, H x W x 1 or H x W dimensions.')\n",
    "\n",
    "    file.write('PF\\n'.encode() if color else 'Pf\\n'.encode())\n",
    "    file.write('%d %d\\n'.encode() % (image.shape[1], image.shape[0]))\n",
    "\n",
    "    endian = image.dtype.byteorder\n",
    "\n",
    "    scale = -scale\n",
    "\n",
    "    file.write('%f\\n'.encode() % scale)\n",
    "\n",
    "    image.tofile(file)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def TestImage(fn_left, fn_right, max_disp, im_to_save, filtered = True, lr_check = True):\n",
    "    \n",
    "    left = cv2.imread(fn_left)\n",
    "    right = cv2.imread(fn_right)\n",
    "    disp_map = []\n",
    "    \n",
    "    if(filtered):\n",
    "        \n",
    "        cost_vol = createCostVol(left,right,max_disp)\n",
    "        cost_vol = filterCostVolMedianPyt(cost_vol) \n",
    "        \n",
    "        cost_vol_filteredn = filterCostVolBilatpyt(cost_vol,left)\n",
    "        cost_vol_filteredn = np.squeeze(cost_vol_filteredn.cpu().data.numpy())        \n",
    "        disp = np.argmax(cost_vol_filteredn, axis=0)         \n",
    "        \n",
    "        if(lr_check):\n",
    "            cost_vol_RL = createCostVolRL(left,right,max_disp)\n",
    "            cost_vol_RL = filterCostVolMedianPyt(cost_vol_RL)\n",
    "            \n",
    "            cost_vol_RL_fn = filterCostVolBilatpyt(cost_vol_RL,right)\n",
    "            cost_vol_RL_fn = np.squeeze(cost_vol_RL_fn.cpu().data.numpy())        \n",
    "            \n",
    "            disp_map_RL = np.argmax(cost_vol_RL_fn, axis=0)  \n",
    "            disp_map = LR_Check(disp.astype(np.float32), disp_map_RL.astype(np.float32))\n",
    "        \n",
    "    else:\n",
    "        \n",
    "        cost_vol = createCostVol(left,right,max_disp)\n",
    "        \n",
    "        cost_vol = np.squeeze(cost_vol.cpu().data.numpy())\n",
    "        disp = np.argmax(cost_vol, axis=0)        \n",
    "        \n",
    "        if(lr_check):\n",
    "            \n",
    "            cost_vol_RL = createCostVolRL(left,right,max_disp)\n",
    "            cost_vol_RL = np.squeeze(cost_vol_RL.cpu().data.numpy())\n",
    "            disp_map_RL = np.argmax(cost_vol_RL, axis=0) \n",
    "                        \n",
    "            disp_map = LR_Check(disp.astype(np.float32), disp_map_RL.astype(np.float32))\n",
    "                    \n",
    "    return disp_map, disp"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%%cython -a\n",
    "import numpy as np\n",
    "import cython\n",
    "@cython.wraparound(False)\n",
    "cpdef float[:, :] FillIncons(unsigned char[:, :] mask, float[:, :] disp):\n",
    "\n",
    "    cpdef int curnan, curnanh, curnanw,curw, w, h ,left, right, above, under, r_above, l_above, r_under, l_under\n",
    "    cpdef float fill  \n",
    "    cpdef int max_search\n",
    "    \n",
    "    max_search = 300\n",
    "    \n",
    "    w = mask.shape[1]\n",
    "    h = mask.shape[0] \n",
    "    \n",
    "    idc = np.argwhere(np.isnan(disp))    \n",
    "    for curnan in range(len(idc)):\n",
    "        \n",
    "        curnanh = idc[curnan][0]\n",
    "        curnanw = idc[curnan][1]        \n",
    "        if(mask[curnanh,curnanw] == 0):\n",
    "            \n",
    "            #whole scanline is nan => disp is 0\n",
    "            if(all(np.isnan(disp[curnanh,:]))):\n",
    "                #hole line set to 0!\n",
    "                disp[curnanh,:] = 0.0\n",
    "                \n",
    "            #all px to the left are NaN\n",
    "            if(all(np.isnan(disp[curnanh,0:curnanw]))):\n",
    "                #go to the right\n",
    "                curw = curnanw\n",
    "                fill = 0\n",
    "                while(np.isnan(disp[curnanh,curw]) and mask[curnanh,curnanw] == 0):\n",
    "                    curw = curw +1\n",
    "                    fill = disp[curnanh,curw]\n",
    "                disp[curnanh,curnanw] = fill\n",
    "                \n",
    "            #else go left\n",
    "            else:\n",
    "                curw = curnanw\n",
    "                fill = 0\n",
    "                while(np.isnan(disp[curnanh,curw]) and mask[curnanh,curnanw] == 0):\n",
    "                    curw = curw -1\n",
    "                    fill = disp[curnanh,curw]\n",
    "                disp[curnanh,curnanw] = fill \n",
    "    \n",
    "    #FG\n",
    "    idcFG = np.argwhere(np.isnan(disp))\n",
    "    for curnan in range(len(idcFG)):\n",
    "        \n",
    "        curnanh = idcFG[curnan][0]\n",
    "        curnanw = idcFG[curnan][1]\n",
    "      \n",
    "        left = 0\n",
    "        right = 0\n",
    "        above = 0\n",
    "        under = 0\n",
    "\n",
    "        r_above = 0\n",
    "        l_above = 0\n",
    "        r_under = 0\n",
    "        l_under = 0      \n",
    "        \n",
    "        \n",
    "        if(curnanw == 0):\n",
    "            left = 0\n",
    "        else:\n",
    "            left = int(disp[curnanh,curnanw-1])\n",
    "            \n",
    "        counter = 0                                    \n",
    "        while(np.isnan(disp[curnanh,curnanw+counter])):\n",
    "            counter = counter +1                       \n",
    "            if((curnanw+counter) >= w or counter >= max_search):\n",
    "                right = 0\n",
    "                break\n",
    "            right = int(disp[curnanh,curnanw+counter])\n",
    "        \n",
    "        counter = 0                                    \n",
    "        while(np.isnan(disp[curnanh+counter,curnanw])):\n",
    "            counter = counter +1                       \n",
    "            if((curnanh+counter) >= h or counter >= max_search):\n",
    "                above = 0\n",
    "                break       \n",
    "            above = int(disp[curnanh+counter,curnanw])\n",
    "             \n",
    "        if(curnanh == 0):\n",
    "            under = 0\n",
    "        else:\n",
    "            under = int(disp[curnanh-1,curnanw])\n",
    "        \n",
    "        \n",
    "        counter = 0                                    \n",
    "        while(np.isnan(disp[curnanh+counter,curnanw+counter])):\n",
    "            counter = counter +1\n",
    "            if((curnanh+counter) >= h or counter >= max_search):\n",
    "                r_above = 0\n",
    "                break\n",
    "            if((curnanw+counter) >= w):\n",
    "                r_above = 0\n",
    "                break                        \n",
    "            r_above = int(disp[curnanh+counter,curnanw+counter])     \n",
    "        \n",
    "        if(curnanh == 0 or curnanw == 0):\n",
    "            l_under = 0\n",
    "        else:\n",
    "            l_under = int(disp[curnanh-1,curnanw-1])\n",
    "             \n",
    "        \n",
    "        counter = 0      \n",
    "        while(np.isnan(disp[curnanh+counter,curnanw-counter])):\n",
    "            counter = counter +1\n",
    "            if((curnanh+counter) >= h):\n",
    "                l_above = 0\n",
    "                break\n",
    "            if((curnanw-counter) <= 0 or counter >= max_search):\n",
    "                l_above = 0\n",
    "                break\n",
    "            l_above = int(disp[curnanh+counter,curnanw-counter])\n",
    "\n",
    "        if(curnanh == 0 or curnanw >= w-1):\n",
    "            r_under = 0\n",
    "        else:\n",
    "            r_under = int(disp[curnanh-1,curnanw+1])\n",
    "        \n",
    "        \n",
    "        fill = np.median([left,right,above,under,r_above,l_above,r_under,l_under])\n",
    "        disp[curnanh,curnanw] = fill\n",
    "\n",
    "    return disp"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#even further improve this by using pytorch!\n",
    "def LR_Check(first_output, second_output):    \n",
    "    \n",
    "    h,w = first_output.shape\n",
    "        \n",
    "    line = np.array(range(0, w))\n",
    "    idx_arr = np.matlib.repmat(line,h,1)    \n",
    "    \n",
    "    dif = idx_arr - first_output\n",
    "    \n",
    "    first_output[np.where(dif <= 0)] = 0\n",
    "    \n",
    "    first_output = first_output.astype(np.int)\n",
    "    second_output = second_output.astype(np.int)\n",
    "    dif = dif.astype(np.int)\n",
    "    \n",
    "    second_arr_reordered = np.array(list(map(lambda x, y: y[x], dif, second_output)))\n",
    "    \n",
    "    dif_LR = np.abs(second_arr_reordered - first_output)\n",
    "    first_output[np.where(dif_LR >= 1.1)] = 0\n",
    "    \n",
    "    first_output = first_output.astype(np.float32)\n",
    "    first_output[np.where(first_output == 0.0)] = np.nan\n",
    "    \n",
    "    #only for MB!\n",
    "    if(dataset == 'MB'):\n",
    "        first_output[np.where(first_output <= 18)] = np.nan\n",
    "    if(dataset == 'KITTI2012'):\n",
    "        first_output[np.where(first_output <= 2)] = np.nan\n",
    "        \n",
    "    return first_output"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def calcEPE(disp, gt_fn):\n",
    "    \n",
    "    gt = gt_fn\n",
    "\n",
    "    gt[np.where(gt == np.inf)] = -100   \n",
    "    \n",
    "    mask = gt > 0\n",
    "\n",
    "    disp = disp[mask]\n",
    "    gt = gt[mask]        \n",
    "\n",
    "    nr_px = len(gt)\n",
    "\n",
    "\n",
    "    abs_error_im = np.abs(disp - gt)\n",
    "\n",
    "    five_pe = (float(np.count_nonzero(abs_error_im >= 5.0) ) / nr_px) * 100.0  \n",
    "    four_pe = (float(np.count_nonzero(abs_error_im >= 4.0) ) / nr_px) * 100.0  \n",
    "    three_pe = (float(np.count_nonzero(abs_error_im >= 3.0) ) / nr_px) * 100.0  \n",
    "    two_pe = (float(np.count_nonzero(abs_error_im >= 2.0) ) / nr_px) * 100.0        \n",
    "    one_pe = (float(np.count_nonzero(abs_error_im >= 1.0) ) / nr_px) * 100.0        \n",
    "    pf_pe = (float(np.count_nonzero(abs_error_im >= 0.5) ) / nr_px) * 100.0  \n",
    "        \n",
    "    return five_pe, four_pe, three_pe, two_pe, one_pe, pf_pe"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "s_count = 1\n",
    "t_count = 0.0\n",
    "\n",
    "if(getEPE):\n",
    "    avg_five_pe = 0.0\n",
    "    avg_four_pe = 0.0 \n",
    "    avg_three_pe = 0.0 \n",
    "    avg_two_pe = 0.0\n",
    "    avg_one_pe = 0.0\n",
    "    avg_pf_pe = 0.0\n",
    "\n",
    "\n",
    "if(dataset == 'MB'):\n",
    "\n",
    "    nr_samples = len(glob.glob(input_folder))\n",
    "    for samples in glob.glob(input_folder):\n",
    "        \n",
    "        if(getEPE):\n",
    "            gt,_ = readPFM(samples + 'disp0GT.pfm')\n",
    "\n",
    "        f = open(samples + 'calib.txt','r')\n",
    "        calib = f.read()\n",
    "        max_disp = int(calib.split('\\n')[6].split(\"=\")[1])\n",
    "\n",
    "        print(\"Now processing: {} {} of {}\".format(samples.split('/')[-2],s_count, len(glob.glob('/home/dominik/tensorflow-mc-cnn/MiddEval3/*/*/'))))\n",
    "\n",
    "        t = time.time()\n",
    "        \n",
    "        name = samples.split('/')[-2]\n",
    "        disp_name = output_folder + '/disp0'+algo_name + name + '.pfm'\n",
    "        disp_name_s = output_folder + '/disp0'+algo_name + name + '_s.pfm'\n",
    "        disp_name_filled = output_folder + '/disp0'+algo_name + name + '_filled.pfm'\n",
    "\n",
    "        disp = None\n",
    "        disp_s = None\n",
    "        \n",
    "        if(lr_check):\n",
    "            disp_s,disp = TestImage(samples + '/im0.png', samples + '/im1.png', max_disp, disp_name, filtered = is_filtered, lr_check = True)\n",
    "        else:\n",
    "            _,disp = TestImage(samples + '/im0.png', samples + '/im1.png', max_disp, disp_name, filtered = is_filtered, lr_check = False)\n",
    "\n",
    "        if(not fill_incons):\n",
    "            s_count = s_count + 1\n",
    "            elapsed = time.time() - t\n",
    "\n",
    "        \n",
    "        folder = samples\n",
    "\n",
    "        \n",
    "        if(fill_incons):\n",
    "            \n",
    "            #do it dynamically\n",
    "            disp_s_arr = np.array(disp_s)\n",
    "            im_disp = Image.fromarray(disp_s_arr) \n",
    "            im_disp = np.dstack((im_disp, im_disp, im_disp)).astype(np.uint8)    \n",
    "\n",
    "            h,w = disp_s.shape\n",
    "            shifted = cv2.pyrMeanShiftFiltering(im_disp, 7, 7)\n",
    "\n",
    "            gray = cv2.cvtColor(shifted, cv2.COLOR_BGR2GRAY)\n",
    "            thresh = cv2.threshold(gray, 0, 1,\n",
    "                cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n",
    "            \n",
    "            if(debug_mode):\n",
    "                cv2.imwrite(output_folder + name + 'bilat_and_med_mask.png',thresh * 255)            \n",
    "            \n",
    "            \n",
    "            disp_filled  = FillIncons(thresh, disp_s_arr)\n",
    "            s_count = s_count + 1\n",
    "            elapsed = time.time() - t\n",
    "\n",
    "            \n",
    "        if(fill_incons):\n",
    "            disp = np.array(disp)\n",
    "            disp_filled = np.array(disp_filled)\n",
    "        else:\n",
    "            disp = np.array(disp)\n",
    "\n",
    "\n",
    "        if(getEPE):\n",
    "        \n",
    "            gt = np.array(gt)\n",
    "            if(fill_incons):\n",
    "                five_pe, four_pe, three_pe, two_pe, one_pe, pf_pe = calcEPE(disp_filled, gt)    \n",
    "            else:\n",
    "                five_pe, four_pe, three_pe, two_pe, one_pe, pf_pe = calcEPE(disp, gt)\n",
    "            \n",
    "            \n",
    "            avg_five_pe = avg_five_pe + five_pe\n",
    "            avg_four_pe = avg_four_pe +  four_pe\n",
    "            avg_three_pe = avg_three_pe + three_pe\n",
    "            avg_two_pe = avg_two_pe + two_pe\n",
    "            avg_one_pe = avg_one_pe + one_pe\n",
    "            avg_pf_pe = avg_pf_pe + pf_pe\n",
    "            \n",
    "            \n",
    "        if(debug_mode):\n",
    "            writePFMcyt(disp_name,disp_filled) \n",
    "        \n",
    "        else:\n",
    "            \n",
    "            if(fill_incons):\n",
    "                \n",
    "                writePFMcyt(disp_name,disp.astype(np.float32))                 \n",
    "                writePFMcyt(disp_name_filled,disp_filled.astype(np.float32)) \n",
    "                writePFMcyt(disp_name_s,disp_s)\n",
    "            else:\n",
    "                writePFMcyt(disp_name,disp.astype(np.float32))\n",
    "                if(lr_check):\n",
    "                    writePFMcyt(disp_name_s,disp_s) \n",
    "\n",
    "        text_file = open(output_folder + '/time'+algo_name + name + '.txt', \"w\")\n",
    "        n = text_file.write(str(elapsed))\n",
    "        text_file.close()\n",
    "        t_count = t_count + elapsed\n",
    "\n",
    "    print(\"Average execution time: {}\".format(t_count / nr_samples)) \n",
    "    \n",
    "    if(getEPE):\n",
    "        print(\"Average 5-PE: {}\".format(avg_five_pe / nr_samples))\n",
    "        print(\"Average 4-PE: {}\".format(avg_four_pe / nr_samples))\n",
    "        print(\"Average 3-PE: {}\".format(avg_three_pe / nr_samples))\n",
    "        print(\"Average 2-PE: {}\".format(avg_two_pe / nr_samples))\n",
    "        print(\"Average 1-PE: {}\".format(avg_one_pe / nr_samples))\n",
    "        print(\"Average 0.5-PE: {}\".format(avg_pf_pe / nr_samples))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "s_count = 1\n",
    "t_count = 0.0\n",
    "\n",
    "if(getEPE):\n",
    "    avg_five_pe = 0.0\n",
    "    avg_four_pe = 0.0 \n",
    "    avg_three_pe = 0.0 \n",
    "    avg_two_pe = 0.0\n",
    "    avg_one_pe = 0.0\n",
    "    avg_pf_pe = 0.0\n",
    "\n",
    "if(dataset == 'MBTest'):\n",
    "    nr_samples = len(glob.glob(input_folder))\n",
    "    for samples in glob.glob(input_folder):\n",
    "        \n",
    "        f = open(samples + 'calib.txt','r')\n",
    "        calib = f.read()\n",
    "        max_disp = int(calib.split('\\n')[6].split(\"=\")[1])\n",
    "\n",
    "        print(\"Now processing: {} {} of {}\".format(samples.split('/')[-2],s_count, len(glob.glob('/home/dominik/tensorflow-mc-cnn/MiddEval3/*/*/'))))\n",
    "\n",
    "        t = time.time()\n",
    "        \n",
    "        name = samples.split('/')[-2]\n",
    "        disp_name = output_folder + '/disp0'+algo_name + name + '.pfm'\n",
    "        disp_name_s = output_folder + '/disp0'+algo_name + name + '_s.pfm'\n",
    "        disp_name_filled = output_folder + '/disp0'+algo_name + name + '_filled.pfm'\n",
    "\n",
    "        disp = None\n",
    "        disp_s = None\n",
    "        \n",
    "        if(lr_check):\n",
    "            disp_s,disp = TestImage(samples + '/im0.png', samples + '/im1.png', max_disp, disp_name, filtered = is_filtered, lr_check = True)\n",
    "        else:\n",
    "            _,disp = TestImage(samples + '/im0.png', samples + '/im1.png', max_disp, disp_name, filtered = is_filtered, lr_check = False)\n",
    "\n",
    "        if(not fill_incons):\n",
    "            s_count = s_count + 1\n",
    "            elapsed = time.time() - t\n",
    "\n",
    "        \n",
    "        folder = samples\n",
    "\n",
    "        \n",
    "        if(fill_incons):\n",
    "            \n",
    "            #do it dynamically\n",
    "            disp_s_arr = np.array(disp_s)\n",
    "            im_disp = Image.fromarray(disp_s_arr) \n",
    "            im_disp = np.dstack((im_disp, im_disp, im_disp)).astype(np.uint8)    \n",
    "\n",
    "            h,w = disp_s.shape\n",
    "            shifted = cv2.pyrMeanShiftFiltering(im_disp, 7, 7)\n",
    "\n",
    "            gray = cv2.cvtColor(shifted, cv2.COLOR_BGR2GRAY)\n",
    "            thresh = cv2.threshold(gray, 0, 1,\n",
    "                cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]            \n",
    "            \n",
    "            disp_filled  = FillIncons(thresh, disp_s_arr)\n",
    "            s_count = s_count + 1\n",
    "            elapsed = time.time() - t\n",
    "\n",
    "            \n",
    "        if(fill_incons):\n",
    "            disp = np.array(disp)\n",
    "            disp_filled = np.array(disp_filled)\n",
    "        else:\n",
    "            disp = np.array(disp)\n",
    "\n",
    "\n",
    "            \n",
    "        if(fill_incons):\n",
    "\n",
    "            writePFMcyt(disp_name,disp.astype(np.float32))                 \n",
    "            writePFMcyt(disp_name_filled,disp_filled.astype(np.float32)) \n",
    "            writePFMcyt(disp_name_s,disp_s)\n",
    "        else:\n",
    "            writePFMcyt(disp_name,disp.astype(np.float32))\n",
    "            if(lr_check):\n",
    "                writePFMcyt(disp_name_s,disp_s) \n",
    "\n",
    "        text_file = open(output_folder + '/time'+algo_name + name + '.txt', \"w\")\n",
    "        n = text_file.write(str(elapsed))\n",
    "        text_file.close()\n",
    "        t_count = t_count + elapsed\n",
    "\n",
    "    print(\"Average execution time: {}\".format(t_count / nr_samples)) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#saves the results as 16bit png as well as pfm\n",
    "#if a submission is wanted comment out pfm saving\n",
    "s_count = 1\n",
    "t_count = 0.0\n",
    "\n",
    "\n",
    "if(getEPE):\n",
    "    avg_five_pe = 0.0\n",
    "    avg_four_pe = 0.0 \n",
    "    avg_three_pe = 0.0 \n",
    "    avg_two_pe = 0.0\n",
    "    avg_one_pe = 0.0\n",
    "    avg_pf_pe = 0.0    \n",
    "    \n",
    "if(dataset == 'KITTI2015' or dataset =='KITTI2015Test'):\n",
    "    \n",
    "    os.system('mkdir ' + output_folder + '/disp_0')\n",
    "    \n",
    "    for samples in range(len(inters_list)):\n",
    "\n",
    "        left = input_folder + '/image_2/' + inters_list[samples]\n",
    "        right = input_folder + '/image_3/' + inters_list[samples]\n",
    "\n",
    "        max_disp = 228\n",
    "\n",
    "        print(\"Now processing: {} {} of {}\".format(inters_list[samples],samples,len(inters_list)))\n",
    "\n",
    "        \n",
    "        disp_name = output_folder + inters_list[samples]\n",
    "\n",
    "        t = time.time()\n",
    "        disp = None\n",
    "        if(lr_check):\n",
    "            disp_s,disp = TestImage(left, right, max_disp, disp_name, filtered = is_filtered, lr_check = True)            \n",
    "        else:\n",
    "            _,disp = TestImage(left, right,  max_disp, disp_name, filtered = is_filtered, lr_check = False)\n",
    "            torch.cuda.empty_cache()\n",
    "\n",
    "        if(fill_incons):\n",
    "            disp_s = np.array(disp_s)\n",
    "            im_disp = Image.fromarray(disp_s) \n",
    "            im_disp = np.dstack((im_disp, im_disp, im_disp)).astype(np.uint8)    \n",
    "\n",
    "            h,w = disp_s.shape\n",
    "\n",
    "            shifted = cv2.pyrMeanShiftFiltering(im_disp, 7, 7)\n",
    "\n",
    "            gray = cv2.cvtColor(shifted, cv2.COLOR_BGR2GRAY)\n",
    "            thresh = cv2.threshold(gray, 0, 1,\n",
    "                cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n",
    "\n",
    "            kernel = np.ones((5,5), np.uint8)\n",
    "\n",
    "            dilation = cv2.dilate(thresh,kernel,iterations = 2)\n",
    "            mask = cv2.erode(dilation, kernel, iterations=2)    \n",
    "\n",
    "            if(debug_mode):\n",
    "                cv2.imwrite(output_folder + inters_list[samples] + 'bilat_and_med_mask.png',mask * 255)\n",
    "\n",
    "            disp_filled = FillIncons(mask, disp_s)\n",
    "            \n",
    "            \n",
    "        elapsed = time.time() - t\n",
    "        t_count = t_count + elapsed\n",
    "\n",
    "        \n",
    "        if(fill_incons):\n",
    "            disp_filled = np.asarray(disp_filled)\n",
    "        else:\n",
    "            disp = np.asarray(disp)\n",
    "            \n",
    "        \n",
    "        if(getEPE):\n",
    "            gt = gt_folder + inters_list[samples]\n",
    "            gt_fn = cv2.imread(gt)\n",
    "            gt_fn = np.mean(gt_fn, axis=2)\n",
    "            if(fill_incons):\n",
    "                five_pe, four_pe, three_pe, two_pe, one_pe, pf_pe = calcEPE(disp_filled, gt_fn)\n",
    "            else:\n",
    "                five_pe, four_pe, three_pe, two_pe, one_pe, pf_pe = calcEPE(disp, gt_fn)\n",
    "                \n",
    "            avg_five_pe = avg_five_pe + five_pe\n",
    "            avg_four_pe = avg_four_pe +  four_pe\n",
    "            avg_three_pe = avg_three_pe + three_pe\n",
    "            avg_two_pe = avg_two_pe + two_pe\n",
    "            avg_one_pe = avg_one_pe + one_pe\n",
    "            avg_pf_pe = avg_pf_pe + pf_pe   \n",
    "        \n",
    "        s_count = s_count + 1\n",
    "        \n",
    "        if(fill_incons):\n",
    "            skimage.io.imsave(output_folder + '/disp_0/' + inters_list[samples],(disp_filled*256).astype('uint16'))\n",
    "        else:\n",
    "            skimage.io.imsave(output_folder + inters_list[samples],(disp*256).astype('uint16'))\n",
    "\n",
    "    print(\"Average execution time: {}\".format(t_count / len(inters_list))) \n",
    "    \n",
    "    if(getEPE):\n",
    "        print(\"Average 5-PE: {}\".format(avg_five_pe / len(inters_list)))\n",
    "        print(\"Average 4-PE: {}\".format(avg_four_pe / len(inters_list)))\n",
    "        print(\"Average 3-PE: {}\".format(avg_three_pe / len(inters_list)))\n",
    "        print(\"Average 2-PE: {}\".format(avg_two_pe / len(inters_list)))\n",
    "        print(\"Average 1-PE: {}\".format(avg_one_pe / len(inters_list)))\n",
    "        print(\"Average 0.5-PE: {}\".format(avg_pf_pe / len(inters_list)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "#saves the results as 16bit png as well as pfm\n",
    "#if a submission is wanted comment out pfm saving\n",
    "s_count = 1\n",
    "t_count = 0.0\n",
    "\n",
    "\n",
    "if(getEPE):\n",
    "    avg_five_pe = 0.0\n",
    "    avg_four_pe = 0.0 \n",
    "    avg_three_pe = 0.0 \n",
    "    avg_two_pe = 0.0\n",
    "    avg_one_pe = 0.0\n",
    "    avg_pf_pe = 0.0    \n",
    "    \n",
    "if(dataset == 'KITTI2012' or dataset == 'KITTI2012Test'):\n",
    "    \n",
    "    for samples in range(len(inters_list)):\n",
    "\n",
    "        left = input_folder + '/colored_0/' + inters_list[samples]\n",
    "        right = input_folder + '/colored_1/' + inters_list[samples]\n",
    "        max_disp = 230\n",
    "        \n",
    "        print(\"Now processing: {} {} of {}\".format(inters_list[samples],samples,len(inters_list)))\n",
    "        disp_name = output_folder + inters_list[samples]\n",
    "\n",
    "        t = time.time()\n",
    "        disp = None\n",
    "        if(lr_check):\n",
    "            disp_s,disp = TestImage(left, right, max_disp, disp_name, filtered = is_filtered, lr_check = True)\n",
    "        else:\n",
    "            _,disp = TestImage(left, right,  max_disp, disp_name, filtered = is_filtered, lr_check = False)\n",
    "        if(fill_incons):\n",
    "            disp_s = np.array(disp_s)\n",
    "            im_disp = Image.fromarray(disp_s) \n",
    "            im_disp = np.dstack((im_disp, im_disp, im_disp)).astype(np.uint8)    \n",
    "\n",
    "            h,w = disp_s.shape\n",
    "\n",
    "            shifted = cv2.pyrMeanShiftFiltering(im_disp, 5, 5)\n",
    "\n",
    "            gray = cv2.cvtColor(shifted, cv2.COLOR_BGR2GRAY)\n",
    "            thresh = cv2.threshold(gray, 0, 1,\n",
    "                cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n",
    "\n",
    "            kernel = np.ones((5,5), np.uint8)\n",
    "\n",
    "            dilation = cv2.dilate(thresh,kernel,iterations = 3)\n",
    "            mask = cv2.erode(dilation, kernel, iterations=2)\n",
    "            disp_filled = FillIncons(mask, disp_s)\n",
    "            \n",
    "\n",
    "        elapsed = time.time() - t\n",
    "        t_count = t_count + elapsed\n",
    "  \n",
    "        if(fill_incons):\n",
    "            disp_filled = np.asarray(disp_filled)\n",
    "        else:\n",
    "            disp = np.asarray(disp)      \n",
    "        \n",
    "        if(getEPE):\n",
    "            gt = gt_folder + inters_list[samples]\n",
    "            gt_fn = cv2.imread(gt)\n",
    "            gt_fn = np.mean(gt_fn, axis=2)\n",
    "\n",
    "            if(fill_incons):\n",
    "                five_pe, four_pe, three_pe, two_pe, one_pe, pf_pe = calcEPE(disp_filled, gt_fn)\n",
    "            else:\n",
    "                five_pe, four_pe, three_pe, two_pe, one_pe, pf_pe = calcEPE(disp, gt_fn)\n",
    "                \n",
    "            avg_five_pe = avg_five_pe + five_pe\n",
    "            avg_four_pe = avg_four_pe +  four_pe\n",
    "            avg_three_pe = avg_three_pe + three_pe\n",
    "            avg_two_pe = avg_two_pe + two_pe\n",
    "            avg_one_pe = avg_one_pe + one_pe\n",
    "            avg_pf_pe = avg_pf_pe + pf_pe   \n",
    "        \n",
    "        s_count = s_count + 1\n",
    "        \n",
    "        if(fill_incons):\n",
    "            skimage.io.imsave(output_folder + inters_list[samples],(disp_filled*256).astype('uint16'))    \n",
    "        else:\n",
    "            skimage.io.imsave(output_folder + inters_list[samples],(disp*256).astype('uint16'))\n",
    "\n",
    "    print(\"Average execution time: {}\".format(t_count / len(inters_list))) \n",
    "    \n",
    "    if(getEPE):\n",
    "        print(\"Average 5-PE: {}\".format(avg_five_pe / len(inters_list)))\n",
    "        print(\"Average 4-PE: {}\".format(avg_four_pe / len(inters_list)))\n",
    "        print(\"Average 3-PE: {}\".format(avg_three_pe / len(inters_list)))\n",
    "        print(\"Average 2-PE: {}\".format(avg_two_pe / len(inters_list)))\n",
    "        print(\"Average 1-PE: {}\".format(avg_one_pe / len(inters_list)))\n",
    "        print(\"Average 0.5-PE: {}\".format(avg_pf_pe / len(inters_list)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "if(dataset == 'ETH'):\n",
    "    \n",
    "    \n",
    "    if(getEPE):\n",
    "        avg_five_pe = 0.0\n",
    "        avg_four_pe = 0.0 \n",
    "        avg_three_pe = 0.0 \n",
    "        avg_two_pe = 0.0\n",
    "        avg_one_pe = 0.0\n",
    "        avg_pf_pe = 0.0    \n",
    "    \n",
    "    nr_samples = len(glob.glob(input_folder))\n",
    "    for samples in glob.glob(input_folder):\n",
    "\n",
    "        \n",
    "        gt,_ = readPFM(samples + 'disp0GT.pfm')\n",
    "        \n",
    "        max_disp =  int(np.ceil(gt[np.isfinite(gt)].max())) + 1\n",
    "        s_name = samples.split('/')[-2]\n",
    "        \n",
    "\n",
    "        print(\"Now processing: {} {} of {}\".format(samples.split('/')[-2],s_count, nr_samples))\n",
    "\n",
    "        t = time.time()\n",
    "\n",
    "        disp_name = output_folder + s_name + '.pfm'\n",
    "\n",
    "        disp = None\n",
    "        if(lr_check):\n",
    "            disp_s,disp = TestImage(samples + '/im0.png', samples + '/im1.png', max_disp, disp_name, filtered = is_filtered, lr_check = True)        \n",
    "        else:\n",
    "            _,disp = TestImage(samples + '/im0.png', samples + '/im1.png', max_disp, disp_name, filtered = is_filtered, lr_check = False)\n",
    "\n",
    "        folder = samples\n",
    "        \n",
    "        if(fill_incons):\n",
    "            disp_s_arr = np.array(disp_s)\n",
    "            im_disp = Image.fromarray(disp_s_arr) \n",
    "            im_disp = np.dstack((im_disp, im_disp, im_disp)).astype(np.uint8)    \n",
    "\n",
    "            h,w = disp_s.shape\n",
    "            \n",
    "            shifted = cv2.pyrMeanShiftFiltering(im_disp, 3, 3)\n",
    "\n",
    "            gray = cv2.cvtColor(shifted, cv2.COLOR_BGR2GRAY)\n",
    "            thresh = cv2.threshold(gray, 0, 1,\n",
    "                cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n",
    "\n",
    "            kernel = np.ones((5,5), np.uint8)\n",
    "\n",
    "            dilation = cv2.dilate(thresh,kernel,iterations = 3)\n",
    "            mask = cv2.erode(dilation, kernel, iterations=2)         \n",
    "\n",
    "            if(debug_mode):\n",
    "                cv2.imwrite(output_folder + s_name + 'bilat_and_med_mask.png',mask * 255)\n",
    "\n",
    "            disp_filled = FillIncons(mask, disp_s_arr)\n",
    "\n",
    "        s_count = s_count + 1\n",
    "        elapsed = time.time() - t\n",
    "    \n",
    "        if(getEPE):\n",
    "            \n",
    "            gt = np.array(gt)\n",
    "            if(fill_incons):\n",
    "                disp_filled = np.array(disp_filled)\n",
    "            else:\n",
    "                disp = np.array(disp)\n",
    "\n",
    "            if(fill_incons):\n",
    "                five_pe, four_pe, three_pe, two_pe, one_pe, pf_pe = calcEPE(disp_filled, gt)\n",
    "            else:\n",
    "                five_pe, four_pe, three_pe, two_pe, one_pe, pf_pe = calcEPE(disp, gt)\n",
    "            \n",
    "            print(\"2-PE: {} \".format(two_pe))\n",
    "    \n",
    "            avg_five_pe = avg_five_pe + five_pe\n",
    "            avg_four_pe = avg_four_pe +  four_pe\n",
    "            avg_three_pe = avg_three_pe + three_pe\n",
    "            avg_two_pe = avg_two_pe + two_pe\n",
    "            avg_one_pe = avg_one_pe + one_pe\n",
    "            avg_pf_pe = avg_pf_pe + pf_pe        \n",
    "\n",
    "        if(debug_mode):\n",
    "            if(fill_incons):\n",
    "                if(lr_check):\n",
    "                    writePFMcyt(output_folder + s_name + '_s.pfm',disp_s) \n",
    "                writePFMcyt(output_folder + s_name + '_filled.pfm',disp_filled.astype(np.float32)) \n",
    "                writePFMcyt(output_folder + s_name + '.pfm',disp.astype(np.float32)) \n",
    "                    \n",
    "        else:\n",
    "            if(fill_incons):\n",
    "                \n",
    "                if(lr_check):\n",
    "                    writePFMcyt(output_folder + s_name + '.pfm',disp_filled.astype(np.float32)) \n",
    "            else:\n",
    "                    writePFMcyt(output_folder + s_name + '.pfm',disp.astype(np.float32)) \n",
    "                \n",
    "        text_file = open(output_folder + s_name + \".txt\", \"w\")\n",
    "        n = text_file.write('runtime '+str(elapsed))\n",
    "        text_file.close()\n",
    "        t_count = t_count + elapsed\n",
    "\n",
    "    print(\"Average execution time: {}\".format(t_count / nr_samples))\n",
    "    \n",
    "    if(getEPE):\n",
    "        print(\"Average 5-PE: {}\".format(avg_five_pe / nr_samples))\n",
    "        print(\"Average 4-PE: {}\".format(avg_four_pe / nr_samples))\n",
    "        print(\"Average 3-PE: {}\".format(avg_three_pe / nr_samples))\n",
    "        print(\"Average 2-PE: {}\".format(avg_two_pe / nr_samples))\n",
    "        print(\"Average 1-PE: {}\".format(avg_one_pe / nr_samples))\n",
    "        print(\"Average 0.5-PE: {}\".format(avg_pf_pe / nr_samples))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if(dataset == 'ETHTest'):\n",
    "    \n",
    "        \n",
    "    nr_samples = len(glob.glob(input_folder))\n",
    "    for samples in glob.glob(input_folder):\n",
    "        \n",
    "        f = open(samples + 'calib.txt','r')\n",
    "        calib = f.read()\n",
    "        max_disp = int(calib.split('\\n')[6].split(\"=\")[1])\n",
    "        s_name = samples.split('/')[-2]\n",
    "\n",
    "        print(\"Now processing: {} {} of {}\".format(samples.split('/')[-2],s_count, len(glob.glob('/home/dominik/tensorflow-mc-cnn/MiddEval3/*/*/'))))\n",
    "\n",
    "        t = time.time()\n",
    "\n",
    "        disp_name = output_folder + s_name + '.pfm'\n",
    "        disp = None\n",
    "        if(lr_check):\n",
    "            disp_s,disp = TestImage(samples + '/im0.png', samples + '/im1.png', max_disp, disp_name, filtered = is_filtered, lr_check = True)        \n",
    "        else:\n",
    "            _,disp = TestImage(samples + '/im0.png', samples + '/im1.png', max_disp, disp_name, filtered = is_filtered, lr_check = False)\n",
    "\n",
    "        folder = samples\n",
    "        \n",
    "        if(fill_incons):                        \n",
    "            disp_s_arr = np.array(disp_s)\n",
    "            im_disp = Image.fromarray(disp_s_arr) \n",
    "            im_disp = np.dstack((im_disp, im_disp, im_disp)).astype(np.uint8)    \n",
    "\n",
    "            h,w = disp_s.shape\n",
    "            \n",
    "            #RUN AGAIN!!\n",
    "            shifted = cv2.pyrMeanShiftFiltering(im_disp, 3, 3)\n",
    "\n",
    "            gray = cv2.cvtColor(shifted, cv2.COLOR_BGR2GRAY)\n",
    "            thresh = cv2.threshold(gray, 0, 1,\n",
    "                cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n",
    "\n",
    "            kernel = np.ones((5,5), np.uint8)\n",
    "\n",
    "            dilation = cv2.dilate(thresh,kernel,iterations = 3)\n",
    "            mask = cv2.erode(dilation, kernel, iterations=2)         \n",
    "\n",
    "            if(debug_mode):\n",
    "                cv2.imwrite(output_folder + s_name + 'bilat_and_med_mask.png',mask * 255)\n",
    "\n",
    "            disp_filled = FillIncons(mask, disp_s_arr)\n",
    "\n",
    "        s_count = s_count + 1\n",
    "        elapsed = time.time() - t\n",
    "        \n",
    "        if(fill_incons):\n",
    "            disp = np.array(disp)            \n",
    "            disp_filled = np.array(disp_filled)\n",
    "        else:\n",
    "            disp = np.array(disp)\n",
    "        \n",
    "        if(debug_mode):\n",
    "            if(fill_incons):\n",
    "                if(lr_check):\n",
    "                    writePFMcyt(output_folder + s_name + '_s.pfm',disp_s) \n",
    "                writePFMcyt(output_folder + s_name + '_filled.pfm',disp_filled.astype(np.float32)) \n",
    "                writePFMcyt(output_folder + s_name + '.pfm',disp.astype(np.float32)) \n",
    "                    \n",
    "        else:\n",
    "            if(fill_incons):\n",
    "                \n",
    "                if(lr_check):\n",
    "                    disp_filled = np.asarray(disp_filled)\n",
    "                    writePFMcyt(output_folder + s_name + '.pfm',disp_filled.astype(np.float32)) \n",
    "                    writePFMcyt(output_folder + s_name + '_s.pfm',disp_s.astype(np.float32))                 \n",
    "            else:\n",
    "                    writePFMcyt(output_folder + s_name + '.pfm',disp.astype(np.float32)) \n",
    "                    writePFMcyt(output_folder + s_name + '_s.pfm',disp_s.astype(np.float32))                 \n",
    "                \n",
    "        text_file = open(output_folder + s_name + \".txt\", \"w\")\n",
    "        n = text_file.write('runtime '+str(elapsed))\n",
    "        text_file.close()\n",
    "        t_count = t_count + elapsed\n",
    "\n",
    "    print(\"Average execution time: {}\".format(t_count / nr_samples))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
