{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import models\n",
    "import torch as t\n",
    "import matplotlib.pyplot as plt\n",
    "import numpy as np\n",
    "import scipy.io as sio\n",
    "# import torchvision.models as models\n",
    "from torchvision import transforms as T\n",
    "from torch.utils import data\n",
    "from PIL import Image"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "base_root = '/home/vivien/code/data/ActiveVisionDataset/'\n",
    "dataset_name = 'Home_001_1'\n",
    "rgb_prefix = 'jpg_rgb'\n",
    "depth_prefix = 'high_res_depth'\n",
    "jpg_sample_name = '000110{:0>5}0101.jpg'\n",
    "depth_sample_name = '000110{:0>5}0103.png'\n",
    "rgb_file_name = os.path.join(base_root,dataset_name,rgb_prefix,jpg_sample_name.format(1))\n",
    "depth_file_name = os.path.join(base_root,dataset_name,depth_prefix,depth_sample_name.format(1))\n",
    "mat_file_name = os.path.join(base_root,dataset_name,'image_structs.mat')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "rgb = Image.open(rgb_file_name)\n",
    "depth = Image.open(depth_file_name)\n",
    "depth_array = np.array(depth)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "resnet50 = models.resnet50(pretrained=True)\n",
    "\n",
    "transforms = T.Compose([T.ToTensor()])\n",
    "data = transforms(rgb)\n",
    "batch_data = data.unsqueeze(0)\n",
    "img_feature = resnet50.middle(batch_data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "mat_data = sio.loadmat(mat_file_name)\n",
    "image_structs = mat_data['image_structs']\n",
    "scale = mat_data['scale'].squeeze()\n",
    "struct1 = image_structs[0,1]\n",
    "K = np.array(struct1[2])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "S = 10.5\n",
    "array = [[(i,j) for i in range(depth_array.shape[1])] for j in range(depth_array.shape[0])]\n",
    "loc_array = np.array(array)\n",
    "depth_expand = np.expand_dims(depth_array,2)\n",
    "data = np.concatenate((loc_array,depth_expand),axis=2)\n",
    "data[:,:,2] = data[:,:,2] / scale\n",
    "data = np.expand_dims(data,3)\n",
    "projected_p = np.matmul(K,data)\n",
    "projected_p = projected_p / scale\n",
    "projected_p = projected_p.squeeze(3)\n",
    "projected_p[:,:,0] = projected_p[:,:,0]*(S-1)/2 + (S+1)/2\n",
    "projected_p[:,:,2] = projected_p[:,:,2]*(S-1)/2 + (S+1)/2\n",
    "projected_p[:,:,0] = np.ceil(projected_p[:,:,0])\n",
    "projected_p[:,:,2] = np.ceil(projected_p[:,:,2])\n",
    "projected_p = projected_p.astype(np.int)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "o_t = np.ceil(projected_p[:,:,0].max()).astype(np.int).tolist()\n",
    "o_k = np.ceil(projected_p[:,:,2].max()).astype(np.int).tolist()\n",
    "o_size = max(o_t, o_k)\n",
    "o_feature = t.zeros((img_feature.shape[1], o_size, o_size))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "for i in range(o_size):\n",
    "    for j in range(o_size):\n",
    "        select_projected_p = np.logical_and(projected_p[:,:,0]==i,projected_p[:,:,2]==j)\n",
    "        loc_image_1,loc_image_2 = np.where(select_projected_p==True)\n",
    "        loc_feature_1 = (loc_image_1/8).astype(np.int)\n",
    "        loc_feature_2 = (loc_image_2/8).astype(np.int)\n",
    "        tuple_loc = set(zip(loc_feature_1,loc_feature_2))\n",
    "        list_loc = list(tuple_loc)\n",
    "        list_loc = np.array(list_loc)\n",
    "        if list_loc.shape[0] == 0:\n",
    "            continue\n",
    "        loc_feature_1 = list_loc[:,0]\n",
    "        loc_feature_2 = list_loc[:,1]\n",
    "        feature_selected = img_feature[:,:,loc_feature_1,loc_feature_2]\n",
    "        feature_selected = feature_selected.max(2)[0]\n",
    "        o_feature[:,i,j] = feature_selected"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "o_feature = o_feature.unsqueeze(0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch.nn as nn\n",
    "\n",
    "class GroundCNN(nn.Module):\n",
    "\n",
    "    def __init__(self, inplanes, planes):\n",
    "        super().__init__()\n",
    "        self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, padding=1)\n",
    "        self.bn1 = nn.BatchNorm2d(planes)\n",
    "        self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,padding=1)\n",
    "        self.bn2 = nn.BatchNorm2d(planes)\n",
    "        self.relu = nn.ReLU(inplace=True)\n",
    "\n",
    "    def forward(self, x):\n",
    "        out = self.conv1(x)\n",
    "        out = self.bn1(out)\n",
    "        out = self.relu(out)\n",
    "\n",
    "        out = self.conv2(out)\n",
    "        out = self.bn2(out)\n",
    "        out = self.relu(out)\n",
    "        return out"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "ground_cnn = GroundCNN(o_feature.shape[1],64)\n",
    "observation = ground_cnn(o_feature)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch.nn.functional as F\n",
    "import math\n",
    "\n",
    "grid_offset = t.tensor([[[1,0,-observation.shape[2]/2],[0,1,-observation.shape[3]/2]]])\n",
    "rotate_alpha = math.pi*90/180\n",
    "grid_rotate = t.tensor([[[math.cos(rotate_alpha),math.sin(rotate_alpha),0],[-math.sin(rotate_alpha),math.cos(rotate_alpha),0]]])\n",
    "grid_offset2 = t.tensor([[[1,0,observation.shape[2]/2],[0,1,observation.shape[3]/2]]])\n",
    "g1 = F.affine_grid(grid_offset, observation.size())\n",
    "g2 = F.affine_grid(grid_rotate, observation.size())\n",
    "g3 = F.affine_grid(grid_offset2, observation.size())\n",
    "# o_rotate = F.grid_sample(observation, g1)\n",
    "o_rotate = F.grid_sample(observation, g2)\n",
    "# o_rotate = F.grid_sample(observation, g3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ 0.0379,  0.0320,  0.3708,  0.0000,  0.0000,  0.0404,  0.0000,\n",
       "          0.0101,  0.0000,  0.0000,  0.2295,  0.0109,  0.0000,  0.0000,\n",
       "          0.0834,  0.0000,  0.0000,  0.0405,  0.0230,  0.0000,  0.0184,\n",
       "          0.0000,  0.0500,  0.0000,  0.1284,  0.2103,  0.0000,  0.0000,\n",
       "          0.0000,  0.1711,  0.0000,  0.0151,  0.0000,  0.0000,  0.0316,\n",
       "          0.0000,  0.0306,  0.1904,  0.0000,  0.0000,  0.0000,  0.0000,\n",
       "          0.0325,  0.0454,  0.0000,  0.0000,  0.0014,  0.0000,  0.0000,\n",
       "          0.0000,  0.0708,  0.1758,  0.0000,  0.0000,  0.1591,  0.0000,\n",
       "          0.0091,  0.3002,  0.0451,  0.0000,  0.0193,  0.0890,  0.0000,\n",
       "          0.0000]])"
      ]
     },
     "execution_count": 47,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "o_rotate[:,:,5,5]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[[0.00818993, 0.00835242, 0.00835242],\n",
       "        [0.02678652, 0.02731799, 0.02731799],\n",
       "        [0.02900326, 0.03661798, 0.03661798],\n",
       "        ...,\n",
       "        [0.        , 0.        , 0.        ],\n",
       "        [0.        , 0.        , 0.        ],\n",
       "        [0.        , 0.        , 0.        ]],\n",
       "\n",
       "       [[0.0339862 , 0.03741692, 0.03741692],\n",
       "        [0.0153896 , 0.03443521, 0.03443521],\n",
       "        [0.03743571, 0.06118733, 0.06118733],\n",
       "        ...,\n",
       "        [0.        , 0.        , 0.        ],\n",
       "        [0.        , 0.        , 0.        ],\n",
       "        [0.        , 0.        , 0.        ]],\n",
       "\n",
       "       [[0.00177595, 0.03225245, 0.03225245],\n",
       "        [0.19635007, 0.18522452, 0.18522452],\n",
       "        [0.41343112, 0.35466246, 0.35466246],\n",
       "        ...,\n",
       "        [0.        , 0.        , 0.        ],\n",
       "        [0.        , 0.        , 0.        ],\n",
       "        [0.        , 0.        , 0.        ]],\n",
       "\n",
       "       ...,\n",
       "\n",
       "       [[0.        , 0.        , 0.        ],\n",
       "        [0.        , 0.        , 0.        ],\n",
       "        [0.        , 0.        , 0.        ],\n",
       "        ...,\n",
       "        [0.0012684 , 0.08471181, 0.08471181],\n",
       "        [0.01455024, 0.04023202, 0.04023202],\n",
       "        [0.02529528, 0.        , 0.        ]],\n",
       "\n",
       "       [[0.        , 0.        , 0.        ],\n",
       "        [0.        , 0.        , 0.        ],\n",
       "        [0.        , 0.        , 0.        ],\n",
       "        ...,\n",
       "        [0.02427323, 0.00767056, 0.00767056],\n",
       "        [0.0155723 , 0.        , 0.        ],\n",
       "        [0.00229046, 0.        , 0.        ]],\n",
       "\n",
       "       [[0.        , 0.        , 0.        ],\n",
       "        [0.        , 0.        , 0.        ],\n",
       "        [0.        , 0.        , 0.        ],\n",
       "        ...,\n",
       "        [0.00584932, 0.        , 0.        ],\n",
       "        [0.        , 0.        , 0.        ],\n",
       "        [0.        , 0.        , 0.        ]]])"
      ]
     },
     "execution_count": 27,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "transform.rotate(observation[0,:,:,:3].detach().numpy(),30)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
