{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Populating the interactive namespace from numpy and matplotlib\n"
     ]
    }
   ],
   "source": [
    "%pylab inline\n",
    "import numpy as np\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.utils.data.dataloader as dataloader\n",
    "import torch.optim as optim\n",
    "\n",
    "from torch.utils.data import TensorDataset\n",
    "from torch.autograd import Variable\n",
    "from torchvision import transforms\n",
    "from torchvision.datasets import MNIST, CIFAR10\n",
    "from tqdm import tqdm\n",
    "from time import sleep\n",
    "import sys, os\n",
    "import pickle\n",
    "import cv2\n",
    "\n",
    "SEED = 1\n",
    "\n",
    "# CUDA?\n",
    "cuda = torch.cuda.is_available()\n",
    "\n",
    "# For reproducibility\n",
    "torch.manual_seed(SEED)\n",
    "\n",
    "if cuda:\n",
    "    torch.cuda.manual_seed(SEED)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Files already downloaded and verified\n",
      "Files already downloaded and verified\n",
      "class number: 10\n",
      "image size: 32\n"
     ]
    }
   ],
   "source": [
    "# Create DataLoader\n",
    "# transform = transforms.Compose(\n",
    "#     [transforms.ToTensor(),\n",
    "#      transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261))])\n",
    "transform = transforms.ToTensor()\n",
    "\n",
    "train = CIFAR10('./data', train=True, download=True, transform=transform)\n",
    "test = CIFAR10('./data', train=False, download=True, transform=transform)\n",
    "\n",
    "dataloader_args = dict(batch_size=256,num_workers=4, \n",
    "                       pin_memory=True) if cuda else dict(batch_size=64)\n",
    "train_loader = dataloader.DataLoader(train, shuffle=True, **dataloader_args)\n",
    "test_loader = dataloader.DataLoader(test, shuffle=False, **dataloader_args)\n",
    "\n",
    "classes = ('plane', 'car', 'bird', 'cat',\n",
    "           'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n",
    "num_cls = len(classes)\n",
    "p = next(iter(train_loader))[0].shape[-1]\n",
    "print('class number: {}'.format(num_cls))\n",
    "print('image size: {}'.format(p))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Model(nn.Module):\n",
    "    def __init__(self, p):\n",
    "        super(Model, self).__init__()\n",
    "\n",
    "        self.conv1 = nn.Sequential(\n",
    "            # Conv Layer block 1\n",
    "            nn.Conv2d(in_channels=3, out_channels=p, kernel_size=3, padding=1),\n",
    "            nn.BatchNorm2d(p),\n",
    "            nn.ReLU(inplace=True),\n",
    "            nn.Conv2d(in_channels=p, out_channels=64, kernel_size=3, padding=1),\n",
    "            nn.ReLU(inplace=True)\n",
    "        )\n",
    "        self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)\n",
    "\n",
    "        self.conv2 = nn.Sequential(\n",
    "            # Conv Layer block 2\n",
    "            nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1),\n",
    "            nn.BatchNorm2d(128),\n",
    "            nn.ReLU(inplace=True),\n",
    "            nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1),\n",
    "            nn.ReLU(inplace=True)\n",
    "        )\n",
    "        self.pool2 = nn.Sequential(\n",
    "            nn.MaxPool2d(kernel_size=2, stride=2),\n",
    "            nn.Dropout2d(p=0.05)\n",
    "        )\n",
    "\n",
    "        self.conv3 = nn.Sequential(\n",
    "            # Conv Layer block 3\n",
    "            nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1),\n",
    "            nn.BatchNorm2d(256),\n",
    "            nn.ReLU(inplace=True),\n",
    "            nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1),\n",
    "            nn.ReLU(inplace=True)\n",
    "        )\n",
    "        self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)\n",
    "\n",
    "        self.fc_layer1 = nn.Sequential(\n",
    "            nn.Dropout(p=0.1),\n",
    "            nn.Linear(4096, 1024),\n",
    "            nn.ReLU(inplace=True),\n",
    "            nn.Linear(1024, 512),\n",
    "            nn.ReLU(inplace=True)\n",
    "        )\n",
    "        self.fc_layer2 = nn.Sequential(\n",
    "            nn.Dropout(p=0.1),\n",
    "            nn.Linear(512, 10)\n",
    "        )\n",
    "\n",
    "\n",
    "    def forward(self, x):\n",
    "        \"\"\"Perform forward.\"\"\"\n",
    "        \n",
    "        # conv layers\n",
    "        x_conv1 = self.conv1(x)\n",
    "        x_conv2 = self.conv2(self.pool1(x_conv1))\n",
    "        x_conv3 = self.conv3(self.pool2(x_conv2))\n",
    "        x1 = self.pool3(x_conv3)\n",
    "        \n",
    "        # flatten\n",
    "        x2 = x1.view(x1.size(0), -1)\n",
    "        \n",
    "        # fc layer\n",
    "        x3 = self.fc_layer1(x2)\n",
    "        x4 = self.fc_layer2(x3)\n",
    "\n",
    "        return x4, x3, x_conv3, x_conv2, x_conv1\n",
    "      \n",
    "model = Model(p)\n",
    "if cuda:\n",
    "    model.cuda()\n",
    "optimizer = optim.Adam(model.parameters(), lr=1e-3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "save_path = 'cache/models'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "IncompatibleKeys(missing_keys=[], unexpected_keys=[])"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# Load pre-trained model\n",
    "bst_mdl = save_path+'/epoch_7.pth'\n",
    "model.load_state_dict(torch.load(bst_mdl)['model'])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Translation invariance test"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0it [00:00, ?it/s]\n",
      "Test accuracy:0.8204, padded accuracy:0.2043, padded control accuracy:0.2025\n"
     ]
    }
   ],
   "source": [
    "# move input data to upper left, pad with 0\n",
    "# move input to center, pad with 0 (as the control group, since resolution drops) ==> also testing scale\n",
    "_loader = test_loader # train_loader\n",
    "hit = 0\n",
    "hit_padded = 0\n",
    "hit_padded_ctrl = 0\n",
    "total = 0\n",
    "with torch.no_grad():\n",
    "    with tqdm(len(_loader), file=sys.stdout) as pbar:\n",
    "        for batch_idx, (data, target) in enumerate(_loader):\n",
    "            padded_data = []\n",
    "            padded_data_ctrl = []\n",
    "            for i in range(data.shape[0]):\n",
    "                cur_data = data[i].permute(1,2,0)\n",
    "                data_img = cv2.resize(cur_data.numpy(), (p//2, p//2))\n",
    "                padded_img = np.zeros_like(cur_data)\n",
    "                padded_img[:p//2, :p//2] = data_img\n",
    "                padded_data.append(torch.from_numpy(padded_img.transpose(2, 0, 1)[None, ...]))\n",
    "                padded_img_ctrl = np.zeros_like(cur_data)\n",
    "                padded_img_ctrl[p//4:3*p//4, p//4:3*p//4] = data_img\n",
    "                padded_data_ctrl.append(torch.from_numpy(padded_img_ctrl.transpose(2, 0, 1)[None, ...]))\n",
    "                \n",
    "#                 plt.imshow(cur_data)\n",
    "#                 plt.show()\n",
    "#                 plt.imshow(padded_img)\n",
    "#                 plt.show()\n",
    "#                 plt.imshow(padded_img_ctrl)\n",
    "#                 plt.show()\n",
    "            \n",
    "            padded_data = torch.cat(padded_data)\n",
    "            padded_data_ctrl = torch.cat(padded_data_ctrl)\n",
    "            if cuda:\n",
    "                data, padded_data, padded_data_ctrl, target = data.cuda(), \\\n",
    "                padded_data.cuda(), padded_data_ctrl.cuda(), target.cuda()\n",
    "            pred = model(data)[0].max(1)[1]\n",
    "            pred_padded = model(padded_data)[0].max(1)[1]\n",
    "            pred_padded_ctrl = model(padded_data_ctrl)[0].max(1)[1]\n",
    "            \n",
    "            hit += pred.eq(target).cpu().sum()\n",
    "            hit_padded += pred_padded.eq(target).cpu().sum()\n",
    "            hit_padded_ctrl += pred_padded_ctrl.eq(target).cpu().sum()\n",
    "            total += len(target)\n",
    "\n",
    "acc = hit.type(dtype=torch.float64)/total\n",
    "acc_padded = hit_padded.type(dtype=torch.float64)/total\n",
    "acc_padded_ctrl = hit_padded_ctrl.type(dtype=torch.float64)/total\n",
    "\n",
    "print('Test accuracy:{}, padded accuracy:{}, padded control accuracy:{}'.format(acc, acc_padded, acc_padded_ctrl))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " Best model saved. [25856/50000 (52%)]\tLoss: 1.621167\n",
      " Train Epoch: 1/15 [50000/50000 (20%)]\tLoss: 1.409732\t Upper-Left Accuracy: 47.7200% \t             Center Accuracy: 19.2300% \t Lower-right Accuracy: 10.0500%\n",
      " Best model saved. [25856/50000 (52%)]\tLoss: 1.291767\n",
      " Train Epoch: 2/15 [50000/50000 (20%)]\tLoss: 1.286268\t Upper-Left Accuracy: 54.4700% \t             Center Accuracy: 14.7600% \t Lower-right Accuracy: 12.0400%\n",
      " Train Epoch: 3/15 [50000/50000 (20%)]\tLoss: 0.986637\t Upper-Left Accuracy: 53.3100% \t             Center Accuracy: 12.2100% \t Lower-right Accuracy: 11.6600%\n",
      " Best model saved. [25856/50000 (52%)]\tLoss: 0.996821\n",
      " Train Epoch: 4/15 [50000/50000 (20%)]\tLoss: 0.818752\t Upper-Left Accuracy: 67.8800% \t             Center Accuracy: 13.5400% \t Lower-right Accuracy: 10.4000%\n",
      " Best model saved. [25856/50000 (52%)]\tLoss: 0.847410\n",
      " Train Epoch: 5/15 [50000/50000 (20%)]\tLoss: 0.846342\t Upper-Left Accuracy: 68.7400% \t             Center Accuracy: 12.7900% \t Lower-right Accuracy: 11.5700%\n",
      " Train Epoch: 6/15 [50000/50000 (20%)]\tLoss: 0.849288\t Upper-Left Accuracy: 65.8800% \t             Center Accuracy: 12.7800% \t Lower-right Accuracy: 10.1700%\n",
      " Best model saved. [25856/50000 (52%)]\tLoss: 0.638393\n",
      " Train Epoch: 7/15 [50000/50000 (20%)]\tLoss: 0.642567\t Upper-Left Accuracy: 71.1700% \t             Center Accuracy: 12.4300% \t Lower-right Accuracy: 10.1600%\n",
      " Best model saved. [25856/50000 (52%)]\tLoss: 0.651127\n",
      " Train Epoch: 8/15 [50000/50000 (20%)]\tLoss: 0.563692\t Upper-Left Accuracy: 71.6000% \t             Center Accuracy: 10.4700% \t Lower-right Accuracy: 12.7500%\n",
      " Best model saved. [25856/50000 (52%)]\tLoss: 0.470319\n",
      " Train Epoch: 9/15 [50000/50000 (20%)]\tLoss: 0.554540\t Upper-Left Accuracy: 73.8300% \t             Center Accuracy: 11.2600% \t Lower-right Accuracy: 12.8300%\n",
      " Train Epoch: 10/15 [50000/50000 (20%)]\tLoss: 0.630406\t Upper-Left Accuracy: 72.4800% \t             Center Accuracy: 10.7200% \t Lower-right Accuracy: 10.8000%\n",
      " Train Epoch: 11/15 [50000/50000 (20%)]\tLoss: 0.453944\t Upper-Left Accuracy: 71.6700% \t             Center Accuracy: 11.9500% \t Lower-right Accuracy: 10.2800%\n",
      " Train Epoch: 12/15 [50000/50000 (20%)]\tLoss: 0.321227\t Upper-Left Accuracy: 73.6900% \t             Center Accuracy: 10.8800% \t Lower-right Accuracy: 9.6800%\n",
      " Train Epoch: 13/15 [50000/50000 (20%)]\tLoss: 0.344391\t Upper-Left Accuracy: 73.1600% \t             Center Accuracy: 10.4200% \t Lower-right Accuracy: 8.7200%\n",
      " Train Epoch: 14/15 [50000/50000 (20%)]\tLoss: 0.260058\t Upper-Left Accuracy: 71.8500% \t             Center Accuracy: 7.2900% \t Lower-right Accuracy: 9.3400%\n",
      " Train Epoch: 15/15 [50000/50000 (20%)]\tLoss: 0.282833\t Upper-Left Accuracy: 73.2600% \t             Center Accuracy: 7.9300% \t Lower-right Accuracy: 11.8400%\n"
     ]
    }
   ],
   "source": [
    "# training the model using images being put to upper left\n",
    "# test using upper-left, center, and lower-right\n",
    "\n",
    "##### \n",
    "# re-run model definition block if want to train from scratch\n",
    "#####\n",
    "\n",
    "EPOCHS = 15\n",
    "losses = []\n",
    "\n",
    "best_acc = 0\n",
    "for epoch in range(EPOCHS):\n",
    "    model.train()\n",
    "    for batch_idx, (data, target) in enumerate(train_loader):\n",
    "        padded_data = []\n",
    "        for i in range(data.shape[0]):\n",
    "            cur_data = data[i].permute(1,2,0)\n",
    "            data_img = cv2.resize(cur_data.numpy(), (p//2, p//2))\n",
    "            padded_img = np.zeros_like(cur_data)\n",
    "            padded_img[:p//2, :p//2] = data_img\n",
    "            padded_data.append(torch.from_numpy(padded_img.transpose(2, 0, 1)[None, ...]))\n",
    "        padded_data = torch.cat(padded_data)\n",
    "        if cuda:\n",
    "            padded_data, target = padded_data.cuda(), target.cuda()\n",
    "        \n",
    "        optimizer.zero_grad()\n",
    "        # Predict\n",
    "        y_pred = model(padded_data)[0]\n",
    "\n",
    "        # Calculate loss\n",
    "        loss = F.cross_entropy(y_pred, target)\n",
    "        losses.append(loss.cpu().data)      \n",
    "        # Backpropagation\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "        \n",
    "        \n",
    "        # Display\n",
    "        if batch_idx % 100 == 1:\n",
    "            print('\\r Train Epoch: {}/{} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n",
    "                  epoch+1,\n",
    "                  EPOCHS,\n",
    "                  batch_idx * len(data),\n",
    "                  len(train_loader.dataset),\n",
    "                  100. * batch_idx / len(train_loader), \n",
    "                  loss.cpu().data), \n",
    "                  end='')\n",
    "    # Eval\n",
    "    model.eval()\n",
    "    hit_ul = 0\n",
    "    hit_ctr = 0\n",
    "    hit_lr = 0\n",
    "    total = 0\n",
    "    with torch.no_grad():\n",
    "        for batch_idx, (data, target) in enumerate(test_loader):\n",
    "            padded_data_ul = []\n",
    "            padded_data_ctr = []\n",
    "            padded_data_lr = []\n",
    "            for i in range(data.shape[0]):\n",
    "                cur_data = data[i].permute(1,2,0)\n",
    "                data_img = cv2.resize(cur_data.numpy(), (p//2, p//2))\n",
    "                padded_img_ul = np.zeros_like(cur_data)\n",
    "                padded_img_ul[:p//2, :p//2] = data_img\n",
    "                padded_data_ul.append(torch.from_numpy(padded_img_ul.transpose(2, 0, 1)[None, ...]))\n",
    "                padded_img_ctr = np.zeros_like(cur_data)\n",
    "                padded_img_ctr[p//4:3*p//4, p//4:3*p//4] = data_img\n",
    "                padded_data_ctr.append(torch.from_numpy(padded_img_ctr.transpose(2, 0, 1)[None, ...]))\n",
    "                padded_img_lr = np.zeros_like(cur_data)\n",
    "                padded_img_lr[p//2:, p//2:] = data_img\n",
    "                padded_data_lr.append(torch.from_numpy(padded_img_lr.transpose(2, 0, 1)[None, ...]))\n",
    "                \n",
    "#                 plt.imshow(padded_img_ul)\n",
    "#                 plt.show()\n",
    "#                 plt.imshow(padded_img_ctr)\n",
    "#                 plt.show()\n",
    "#                 plt.imshow(padded_img_lr)\n",
    "#                 plt.show()\n",
    "#                 import pdb; pdb.set_trace()\n",
    "\n",
    "            padded_data_ul = torch.cat(padded_data_ul)\n",
    "            padded_data_ctr = torch.cat(padded_data_ctr)\n",
    "            padded_data_lr = torch.cat(padded_data_lr)\n",
    "            if cuda:\n",
    "                padded_data_ul, padded_data_ctr, padded_data_lr, target = \\\n",
    "                padded_data_ul.cuda(), padded_data_ctr.cuda(), padded_data_lr.cuda(), target.cuda()\n",
    "                \n",
    "            pred_ul = model(padded_data_ul)[0].max(1)[1]\n",
    "            pred_ctr = model(padded_data_ctr)[0].max(1)[1]\n",
    "            pred_lr = model(padded_data_lr)[0].max(1)[1]\n",
    "            \n",
    "            hit_ul += pred_ul.eq(target).cpu().sum()\n",
    "            hit_ctr += pred_ctr.eq(target).cpu().sum()\n",
    "            hit_lr += pred_lr.eq(target).cpu().sum()\n",
    "            total += len(target)\n",
    "            \n",
    "    accuracy_ul = hit_ul.type(dtype=torch.float64)/total\n",
    "    accuracy_ctr = hit_ctr.type(dtype=torch.float64)/total\n",
    "    accuracy_lr = hit_lr.type(dtype=torch.float64)/total\n",
    "\n",
    "            \n",
    "    # save best\n",
    "    if accuracy_ul > best_acc:\n",
    "        best_acc = accuracy_ul\n",
    "        torch.save({'epoch': epoch,\n",
    "                  'model': model.state_dict(),\n",
    "                  'optimizer': optimizer.state_dict()\n",
    "                 }, '{}/padded_epoch_{}.pth'.format(save_path, epoch))\n",
    "        print('\\r Best model saved.\\r')\n",
    "      \n",
    "    print('\\r Train Epoch: {}/{} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}\\t Upper-Left Accuracy: {:.4f}% \\t \\\n",
    "            Center Accuracy: {:.4f}% \\t Lower-right Accuracy: {:.4f}%'.format(\n",
    "        epoch+1,\n",
    "        EPOCHS,\n",
    "        len(train_loader.dataset), \n",
    "        len(train_loader.dataset),\n",
    "        100. * batch_idx / len(train_loader), \n",
    "        loss.cpu().data,\n",
    "        accuracy_ul*100,\n",
    "        accuracy_ctr*100,\n",
    "        accuracy_lr*100,\n",
    "        end=''))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Rotation invariance test"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0it [00:00, ?it/s]\n",
      "Test accuracy:0.8194, rotate 90 accuracy:0.3076, rotate 180 accuracy:0.3185, rotate 270 accuracy:0.2925\n"
     ]
    }
   ],
   "source": [
    "# test using roatated images (90/180/270 degrees)\n",
    "\n",
    "_loader = test_loader # train_loader\n",
    "hit = 0\n",
    "hit_90 = 0\n",
    "hit_180 = 0\n",
    "hit_270 = 0\n",
    "total = 0\n",
    "with torch.no_grad():\n",
    "    with tqdm(len(_loader), file=sys.stdout) as pbar:\n",
    "        for batch_idx, (data, target) in enumerate(_loader):\n",
    "            data_90 = data.transpose(2, 3).flip(2)\n",
    "            data_180 = data.flip(2).flip(3)\n",
    "            data_270 = data.transpose(2, 3).flip(3)\n",
    "            \n",
    "#             plt.imshow(data[0].numpy().transpose(1,2,0))\n",
    "#             plt.show()\n",
    "#             plt.imshow(data_90[0].numpy().transpose(1,2,0))\n",
    "#             plt.show()\n",
    "#             plt.imshow(data_180[0].numpy().transpose(1,2,0))\n",
    "#             plt.show()\n",
    "#             plt.imshow(data_270[0].numpy().transpose(1,2,0))\n",
    "#             plt.show()\n",
    "#             import pdb; pdb.set_trace()\n",
    "            \n",
    "            if cuda:\n",
    "                data, data_90, data_180, data_270, target = data.cuda(), \\\n",
    "                data_90.cuda(), data_180.cuda(),data_270.cuda(), target.cuda()\n",
    "            pred = model(data)[0].max(1)[1]\n",
    "            pred_90 = model(data_90)[0].max(1)[1]\n",
    "            pred_180 = model(data_180)[0].max(1)[1]\n",
    "            pred_270 = model(data_270)[0].max(1)[1]\n",
    "            \n",
    "            hit += pred.eq(target).cpu().sum()\n",
    "            hit_90 += pred_90.eq(target).cpu().sum()\n",
    "            hit_180 += pred_180.eq(target).cpu().sum()\n",
    "            hit_270 += pred_270.eq(target).cpu().sum()\n",
    "            total += len(target)\n",
    "\n",
    "acc = hit.type(dtype=torch.float64)/total\n",
    "acc_90 = hit_90.type(dtype=torch.float64)/total\n",
    "acc_180 = hit_180.type(dtype=torch.float64)/total\n",
    "acc_270 = hit_270.type(dtype=torch.float64)/total\n",
    "\n",
    "print('Test accuracy:{}, rotate 90 accuracy:{}, rotate 180 accuracy:{}, rotate 270 accuracy:{}'.format(\n",
    "    acc, acc_90, acc_180, acc_270))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Scale invariance test"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0it [00:00, ?it/s]\n",
      "Test accuracy:0.8134, zoomed out accuracy:0.3774\n"
     ]
    }
   ],
   "source": [
    "# zoom original images to 64*64, and use the center part\n",
    "\n",
    "_loader = test_loader # train_loader\n",
    "hit = 0\n",
    "hit_crop = 0\n",
    "total = 0\n",
    "with torch.no_grad():\n",
    "    with tqdm(len(_loader), file=sys.stdout) as pbar:\n",
    "        for batch_idx, (data, target) in enumerate(_loader):\n",
    "            crop_data = []\n",
    "            for i in range(data.shape[0]):\n",
    "                cur_data = data[i].permute(1,2,0)\n",
    "                data_img = cv2.resize(cur_data.numpy(), (int(p*2), int(p*2)))\n",
    "                crop_img = data_img[p//2:3*p//2, p//2:3*p//2]\n",
    "                crop_data.append(torch.from_numpy(crop_img.transpose(2, 0, 1)[None, ...]))\n",
    "                \n",
    "#                 plt.imshow(cur_data)\n",
    "#                 plt.show()\n",
    "#                 plt.imshow(crop_img)\n",
    "#                 plt.show()\n",
    "#                 import pdb; pdb.set_trace()\n",
    "            \n",
    "            crop_data = torch.cat(crop_data)\n",
    "            if cuda:\n",
    "                data, crop_data, target = data.cuda(), crop_data.cuda(), target.cuda()\n",
    "            pred = model(data)[0].max(1)[1]\n",
    "            pred_crop = model(crop_data)[0].max(1)[1]\n",
    "            \n",
    "            hit += pred.eq(target).cpu().sum()\n",
    "            hit_crop += pred_crop.eq(target).cpu().sum()\n",
    "            total += len(target)\n",
    "\n",
    "acc = hit.type(dtype=torch.float64)/total\n",
    "acc_crop = hit_crop.type(dtype=torch.float64)/total\n",
    "\n",
    "print('Test accuracy:{}, zoomed out accuracy:{}'.format(acc, acc_crop))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " Best model saved. [25856/50000 (52%)]\tLoss: 1.191429\n",
      " Train Epoch: 1/15 [50000/50000 (20%)]\tLoss: 1.025013\t Accuracy: 58.1100% \t             Scaled-up Accuracy: 32.6300% \t\n",
      " Best model saved. [25856/50000 (52%)]\tLoss: 1.052014\n",
      " Train Epoch: 2/15 [50000/50000 (20%)]\tLoss: 1.018951\t Accuracy: 59.8900% \t             Scaled-up Accuracy: 38.2600% \t\n",
      " Best model saved. [25856/50000 (52%)]\tLoss: 1.042470\n",
      " Train Epoch: 3/15 [50000/50000 (20%)]\tLoss: 0.918776\t Accuracy: 64.5500% \t             Scaled-up Accuracy: 42.4400% \t\n",
      " Train Epoch: 4/15 [50000/50000 (20%)]\tLoss: 0.715079\t Accuracy: 62.5000% \t             Scaled-up Accuracy: 40.8100% \t\n",
      " Best model saved. [25856/50000 (52%)]\tLoss: 0.759676\n",
      " Train Epoch: 5/15 [50000/50000 (20%)]\tLoss: 0.691271\t Accuracy: 68.3500% \t             Scaled-up Accuracy: 41.1100% \t\n",
      " Best model saved. [25856/50000 (52%)]\tLoss: 0.688234\n",
      " Train Epoch: 6/15 [50000/50000 (20%)]\tLoss: 0.841605\t Accuracy: 70.4800% \t             Scaled-up Accuracy: 39.0900% \t\n",
      " Best model saved. [25856/50000 (52%)]\tLoss: 0.601093\n",
      " Train Epoch: 7/15 [50000/50000 (20%)]\tLoss: 0.905031\t Accuracy: 72.6000% \t             Scaled-up Accuracy: 41.9400% \t\n",
      " Train Epoch: 8/15 [50000/50000 (20%)]\tLoss: 0.596615\t Accuracy: 72.3100% \t             Scaled-up Accuracy: 40.3300% \t\n",
      " Train Epoch: 9/15 [50000/50000 (20%)]\tLoss: 0.574426\t Accuracy: 72.1100% \t             Scaled-up Accuracy: 35.8300% \t\n",
      " Best model saved.5 [25856/50000 (52%)]\tLoss: 0.471950\n",
      " Train Epoch: 10/15 [50000/50000 (20%)]\tLoss: 0.362183\t Accuracy: 73.1400% \t             Scaled-up Accuracy: 35.1200% \t\n",
      " Train Epoch: 11/15 [50000/50000 (20%)]\tLoss: 0.548658\t Accuracy: 72.3200% \t             Scaled-up Accuracy: 33.1500% \t\n",
      " Train Epoch: 12/15 [50000/50000 (20%)]\tLoss: 0.388898\t Accuracy: 72.9000% \t             Scaled-up Accuracy: 39.3000% \t\n",
      " Best model saved.5 [25856/50000 (52%)]\tLoss: 0.301367\n",
      " Train Epoch: 13/15 [50000/50000 (20%)]\tLoss: 0.242607\t Accuracy: 73.9900% \t             Scaled-up Accuracy: 40.5800% \t\n",
      " Train Epoch: 14/15 [50000/50000 (20%)]\tLoss: 0.248111\t Accuracy: 72.6900% \t             Scaled-up Accuracy: 38.7200% \t\n",
      " Train Epoch: 15/15 [50000/50000 (20%)]\tLoss: 0.465826\t Accuracy: 72.3600% \t             Scaled-up Accuracy: 37.8700% \t\n"
     ]
    }
   ],
   "source": [
    "# train on center 16*16, test on 32*32 original\n",
    "\n",
    "##### \n",
    "# re-run model definition block if want to train from scratch\n",
    "#####\n",
    "\n",
    "EPOCHS = 15\n",
    "losses = []\n",
    "\n",
    "best_acc = 0\n",
    "for epoch in range(EPOCHS):\n",
    "    model.train()\n",
    "    for batch_idx, (data, target) in enumerate(train_loader):\n",
    "        padded_data = []\n",
    "        for i in range(data.shape[0]):\n",
    "            cur_data = data[i].permute(1,2,0)\n",
    "            data_img = cv2.resize(cur_data.numpy(), (p//2, p//2))\n",
    "            padded_img = np.zeros_like(cur_data)\n",
    "            padded_img[p//4:3*p//4, p//4:3*p//4] = data_img\n",
    "            padded_data.append(torch.from_numpy(padded_img.transpose(2, 0, 1)[None, ...]))\n",
    "            \n",
    "#             plt.imshow(cur_data)\n",
    "#             plt.show()\n",
    "#             plt.imshow(padded_img)\n",
    "#             plt.show()\n",
    "#             import pdb; pdb.set_trace()\n",
    "            \n",
    "        padded_data = torch.cat(padded_data)\n",
    "        if cuda:\n",
    "            padded_data, target = padded_data.cuda(), target.cuda()\n",
    "        \n",
    "        optimizer.zero_grad()\n",
    "        # Predict\n",
    "        y_pred = model(padded_data)[0]\n",
    "\n",
    "        # Calculate loss\n",
    "        loss = F.cross_entropy(y_pred, target)\n",
    "        losses.append(loss.cpu().data)      \n",
    "        # Backpropagation\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "        \n",
    "        \n",
    "        # Display\n",
    "        if batch_idx % 100 == 1:\n",
    "            print('\\r Train Epoch: {}/{} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n",
    "                  epoch+1,\n",
    "                  EPOCHS,\n",
    "                  batch_idx * len(data),\n",
    "                  len(train_loader.dataset),\n",
    "                  100. * batch_idx / len(train_loader), \n",
    "                  loss.cpu().data), \n",
    "                  end='')\n",
    "    # Eval\n",
    "    model.eval()\n",
    "    hit = 0\n",
    "    hit_padded = 0\n",
    "    total = 0\n",
    "    with torch.no_grad():\n",
    "        for batch_idx, (data, target) in enumerate(test_loader):\n",
    "            padded_data = []\n",
    "            for i in range(data.shape[0]):\n",
    "                cur_data = data[i].permute(1,2,0)\n",
    "                data_img = cv2.resize(cur_data.numpy(), (p//2, p//2))\n",
    "                padded_img = np.zeros_like(cur_data)\n",
    "                padded_img[p//4:3*p//4, p//4:3*p//4] = data_img\n",
    "                padded_data.append(torch.from_numpy(padded_img.transpose(2, 0, 1)[None, ...]))\n",
    "\n",
    "            padded_data = torch.cat(padded_data)\n",
    "            if cuda:\n",
    "                data, padded_data, target = data.cuda(), padded_data.cuda(), target.cuda()\n",
    "                \n",
    "            pred = model(data)[0].max(1)[1]\n",
    "            pred_padded = model(padded_data)[0].max(1)[1]\n",
    "            \n",
    "            hit += pred.eq(target).cpu().sum()\n",
    "            hit_padded += pred_padded.eq(target).cpu().sum()\n",
    "            total += len(target)\n",
    "            \n",
    "    accuracy = hit.type(dtype=torch.float64)/total\n",
    "    accuracy_padded = hit_padded.type(dtype=torch.float64)/total\n",
    "\n",
    "            \n",
    "    # save best\n",
    "    if accuracy_padded > best_acc:\n",
    "        best_acc = accuracy_padded\n",
    "        torch.save({'epoch': epoch,\n",
    "                  'model': model.state_dict(),\n",
    "                  'optimizer': optimizer.state_dict()\n",
    "                 }, '{}/scale_epoch_{}.pth'.format(save_path, epoch))\n",
    "        print('\\r Best model saved.\\r')\n",
    "      \n",
    "    print('\\r Train Epoch: {}/{} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}\\t Accuracy: {:.4f}% \\t \\\n",
    "            Scaled-up Accuracy: {:.4f}% \\t'.format(\n",
    "        epoch+1,\n",
    "        EPOCHS,\n",
    "        len(train_loader.dataset), \n",
    "        len(train_loader.dataset),\n",
    "        100. * batch_idx / len(train_loader), \n",
    "        loss.cpu().data,\n",
    "        accuracy_padded*100,\n",
    "        accuracy*100,\n",
    "        end=''))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
