{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Neural Color Transfer (Multi-Reference Style Transfer)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Library"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Append 'src' direc to import modules from notebooks directory#\n",
    "##################################################\n",
    "#import os,sys\n",
    "#src_dir=os.path.join(os.getcwd(),os.pardir)\n",
    "#sys.path.append(src_dir)\n",
    "#################################################"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%pylab inline\n",
    "%matplotlib inline"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import matplotlib as plt\n",
    "import numpy as np\n",
    "import torch\n",
    "\n",
    "from PIL import Image\n",
    "from skimage import color\n",
    "from sklearn.cluster import KMeans\n",
    "from sklearn.neighbors import NearestNeighbors\n",
    "from torch import nn, optim\n",
    "from torchvision import models, transforms, utils"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Input Source & Reference Image"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "USE_CUDA = True  # or False if you don't have CUDA\n",
    "FEATURE_IDS = [1, 6, 11, 20, 29]\n",
    "LEFT_SHIFT = (1, 2, 0)\n",
    "RIGHT_SHIFT = (2, 0, 1)\n",
    "\n",
    "imgS_path = './image/0_Source.jpg'\n",
    "imgR_path = './image/0_Reference.jpg'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Checking\n",
    "origS = Image.open(imgS_path).convert(\"RGB\")\n",
    "imshow(origS)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# Checking\n",
    "origR = Image.open(imgR_path).convert(\"RGB\")\n",
    "imshow(origR)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Image Loader"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def image_loader(img_path, flip=False):\n",
    "    img = Image.open(img_path).convert(\"RGB\")\n",
    "    if flip:\n",
    "        img = img.transpose(Image.FLIP_LEFT_RIGHT)\n",
    "\n",
    "    data_transforms = transforms.Compose([\n",
    "        transforms.ToTensor(),\n",
    "        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n",
    "    ])\n",
    "    img_tensor = data_transforms(img)\n",
    "    img_tensor = img_tensor.unsqueeze(0)\n",
    "    return img_tensor\n",
    "\n",
    "\n",
    "def tsshow(img_tensor):\n",
    "    img_np = img_tensor.squeeze().numpy().transpose(LEFT_SHIFT)\n",
    "    imshow(img_np)  # , interpolation = 'nearest')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "imgS = image_loader(imgS_path, flip=False)\n",
    "imgR = image_loader(imgR_path, flip=False)\n",
    "\n",
    "imgS_np = imgS.squeeze().numpy().transpose(LEFT_SHIFT)\n",
    "imgR_np = imgR.squeeze().numpy().transpose(LEFT_SHIFT)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# Checking\n",
    "print(imgS.size())  # (1, 3, SHeight, SWidth)\n",
    "print(imgR.size())  # (1, 3, RHeight, RWidth)\n",
    "print(imgS.dtype, imgR.dtype, \"\\n\")  # torch.float32\n",
    "\n",
    "print(imgS_np.shape)  # (SHeight, SWidth, 3)\n",
    "print(imgR_np.shape, \"\\n\")  # (RHeight, RWidth, 3)\n",
    "\n",
    "# Verifying normalization\n",
    "print(\"Original S's mean:\", np.asarray(origS).mean(axis=(0, 1)))\n",
    "print(\"Original S's std:\", np.asarray(origS).std(axis=(0, 1)))\n",
    "print(\"Normalized S's mean:\", imgS_np.mean(axis=(0, 1)))\n",
    "print(\"Normalized S's std:\", imgS_np.std(axis=(0, 1)), \"\\n\")\n",
    "\n",
    "print(\"Original R's mean:\", np.asarray(origR).mean(axis=(0, 1)))\n",
    "print(\"Original R's std:\", np.asarray(origR).std(axis=(0, 1)))\n",
    "print(\"Normalized R's mean:\", imgR_np.mean(axis=(0, 1)))\n",
    "print(\"Normalized R's std:\", imgR_np.std(axis=(0, 1)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# Checking\n",
    "tsshow(imgS)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "# Checking\n",
    "tsshow(imgR)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Process\n",
    "From S_6 = S_(L+1) to S_1(Final Result)\n",
    "\n",
    "Feature Domain\n",
    "    > FeatureExtractor\n",
    "    > PatchMatch\n",
    "    > BDS Voting\n",
    "\n",
    "Image Domain\n",
    "    > Resolution equal\n",
    "    > Local Color Transfer"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Feature Extractor"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class FeatureExtractor(nn.Sequential):\n",
    "    def __init__(self):\n",
    "        super(FeatureExtractor, self).__init__()\n",
    "\n",
    "    def add_layer(self, name, layer):\n",
    "        self.add_module(name, layer)\n",
    "\n",
    "    def forward(self, x, feature_id):\n",
    "        for idx, module in enumerate(self._modules):\n",
    "            x = self._modules[module](x)\n",
    "            if idx == feature_id:\n",
    "                return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "vgg_temp = models.vgg19(pretrained=True).features\n",
    "model = FeatureExtractor()  # The new Feature Extractor module network\n",
    "\n",
    "conv_counter = 1\n",
    "relu_counter = 1\n",
    "block_counter = 1\n",
    "\n",
    "for i, layer in enumerate(list(vgg_temp)):\n",
    "    if isinstance(layer, nn.Conv2d):\n",
    "        name = \"conv_\" + str(block_counter) + \"_\" + str(conv_counter)\n",
    "        conv_counter += 1\n",
    "        model.add_layer(name, layer)\n",
    "\n",
    "    if isinstance(layer, nn.ReLU):\n",
    "        name = \"relu_\" + str(block_counter) + \"_\" + str(relu_counter)\n",
    "        relu_counter += 1\n",
    "        model.add_layer(name, layer)\n",
    "\n",
    "    if isinstance(layer, nn.MaxPool2d):\n",
    "        name = \"pool_\" + str(block_counter) \n",
    "        relu_counter = conv_counter = 1\n",
    "        block_counter += 1\n",
    "        model.add_layer(name, nn.AvgPool2d(2, 2))  # Is nn.AvgPool2d(2, 2) better than nn.MaxPool2d?\n",
    "\n",
    "if USE_CUDA:\n",
    "    model.cuda('cuda:3')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "# Checking\n",
    "print(model)\n",
    "print([list(model._modules)[idx] for idx in FEATURE_IDS])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_feature(img_tensor, feature_id):\n",
    "    if USE_CUDA:\n",
    "        img_tensor = img_tensor.cuda('cuda:3')\n",
    "\n",
    "    feature_tensor = model(img_tensor, feature_id)\n",
    "    feature = feature_tensor.data.squeeze().cpu().numpy().transpose(LEFT_SHIFT)\n",
    "    return feature\n",
    "\n",
    "\n",
    "def normalize(feature):\n",
    "    return feature / np.linalg.norm(feature, ord=2, axis=2, keepdims=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "feat5S = get_feature(imgS, FEATURE_IDS[4])\n",
    "feat5R = get_feature(imgR, FEATURE_IDS[4])\n",
    "feat5S_norm = normalize(feat5S)\n",
    "feat5R_norm = normalize(feat5R)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Checking\n",
    "print(feat5S.shape)\n",
    "print(feat5R.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# (IN PROGRESS)\n",
    "# EXPERIMENTAL\n",
    "class DeepDream:\n",
    "    \"\"\"\n",
    "        Produces an image that minimizes the loss of a convolution\n",
    "        operation for a specific layer and filter\n",
    "    \"\"\"\n",
    "    def __init__(self, model, selected_layer, selected_filter, im_path):\n",
    "        self.model = model\n",
    "        self.selected_layer = selected_layer\n",
    "        self.selected_filter = selected_filter\n",
    "        self.conv_output = 0\n",
    "        self.image = image_loader(im_path, flip=False)\n",
    "\n",
    "        if USE_CUDA:\n",
    "            self.model.cuda('cuda:3')\n",
    "            self.image = self.image.cuda('cuda:3')\n",
    "\n",
    "        self.image.requires_grad_()\n",
    "        # Hook the layers to get result of the convolution\n",
    "        self.hook_layer()\n",
    "\n",
    "    def hook_layer(self):\n",
    "        def hook_function(module, grad_in, grad_out):\n",
    "            # Get the conv output of the selected filter (from selected layer)\n",
    "            self.conv_output = grad_out[0, self.selected_filter]\n",
    "\n",
    "        # Hook the selected layer\n",
    "        self.model[self.selected_layer].register_forward_hook(hook_function)\n",
    "\n",
    "    def dream(self):\n",
    "        # Define optimizer for the image\n",
    "        # Earlier layers need higher learning rates to visualize whereas lower layers need less\n",
    "        optimizer = optim.SGD([self.image], lr=12, weight_decay=1e-4)\n",
    "        # optimizer = optim.Adam([self.image], lr=0.1, weight_decay=1e-6)\n",
    "        for i in range(1, 251):\n",
    "            optimizer.zero_grad()\n",
    "            # Assign image to a variable to move forward in the model\n",
    "            x = self.image\n",
    "            for index, layer in enumerate(self.model):\n",
    "                # Forward\n",
    "                x = layer(x)\n",
    "                # Only need to forward until the selected layer is reached\n",
    "                if index == self.selected_layer:\n",
    "                    break\n",
    "            # Loss function is the mean of the output of the selected layer/filter\n",
    "            # We try to minimize the mean of the output of that specific filter\n",
    "            loss = -torch.mean(self.conv_output)\n",
    "            if i % 10 == 0:\n",
    "                print(\"Iteration:\", str(i) + \"/250\", \"Loss: {0:.2f}\".format(loss.data))\n",
    "            # Backward\n",
    "            loss.backward()\n",
    "            # Update image\n",
    "            optimizer.step()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "# (IN PROGRESS)\n",
    "# EXPERIMENTAL\n",
    "cnn_layer = FEATURE_IDS[4]\n",
    "filter_pos = 94\n",
    "dd = DeepDream(vgg_temp, cnn_layer, filter_pos, imgS_path)\n",
    "# This operation can also be done without Pytorch hooks\n",
    "# See layer visualisation for the implementation without hooks\n",
    "dd.dream()\n",
    "\n",
    "dd_image_np = dd.image.data.squeeze().cpu().numpy().transpose(LEFT_SHIFT)\n",
    "imshow(dd_image_np * [0.229, 0.224, 0.225] + [0.485, 0.456, 0.406])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": true
   },
   "source": [
    "## PatchMatch"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class PatchMatch: \n",
    "    def __init__(self, a, b, patch_size=3):\n",
    "        self.a = a\n",
    "        self.b = b\n",
    "        self.ah = a.shape[0]\n",
    "        self.aw = a.shape[1]\n",
    "        self.bh = b.shape[0]\n",
    "        self.bw = b.shape[1]\n",
    "        self.patch_size = patch_size\n",
    "\n",
    "        self.nnf = np.zeros((self.ah, self.aw, 2)).astype(np.int)  # The NNF\n",
    "        self.nnd = np.zeros((self.ah, self.aw))  # The NNF distance map\n",
    "        self.init_nnf()\n",
    "\n",
    "    def init_nnf(self):\n",
    "        for ay in range(self.ah):\n",
    "            for ax in range(self.aw):\n",
    "                by = np.random.randint(self.bh)\n",
    "                bx = np.random.randint(self.bw)\n",
    "                self.nnf[ay, ax] = [by, bx]\n",
    "                self.nnd[ay, ax] = self.calc_dist(ay, ax, by, bx)\n",
    "\n",
    "    def calc_dist(self, ay, ax, by, bx):\n",
    "        \"\"\"\n",
    "            Measure distance between 2 patches across all channels\n",
    "            ay : y coordinate of a patch in a\n",
    "            ax : x coordinate of a patch in a\n",
    "            by : y coordinate of a patch in b\n",
    "            bx : x coordinate of a patch in b\n",
    "        \"\"\"\n",
    "        dy0 = dx0 = self.patch_size // 2\n",
    "        dy1 = dx1 = self.patch_size // 2 + 1\n",
    "        dy0 = min(ay, by, dy0)\n",
    "        dy1 = min(self.ah - ay, self.bh - by, dy1)\n",
    "        dx0 = min(ax, bx, dx0)\n",
    "        dx1 = min(self.aw - ax, self.bw - bx, dx1)\n",
    "\n",
    "        dist = np.sum(np.square(self.a[ay - dy0:ay + dy1, ax - dx0:ax + dx1] - self.b[by - dy0:by + dy1, bx - dx0:bx + dx1]))\n",
    "        dist /= ((dy0 + dy1) * (dx0 + dx1))\n",
    "        return dist\n",
    "\n",
    "    def improve_guess(self, ay, ax, by, bx, ybest, xbest, dbest):\n",
    "        d = self.calc_dist(ay, ax, by, bx)\n",
    "        if d < dbest:\n",
    "            ybest, xbest, dbest = by, bx, d\n",
    "        return ybest, xbest, dbest\n",
    "\n",
    "    def improve_nnf(self, total_iter=5):\n",
    "        for iter in range(total_iter):\n",
    "            if iter % 2:\n",
    "                ystart, yend, ychange = self.ah - 1, -1, -1\n",
    "                xstart, xend, xchange = self.aw - 1, -1, -1\n",
    "            else:\n",
    "                ystart, yend, ychange = 0, self.ah, 1\n",
    "                xstart, xend, xchange = 0, self.aw, 1\n",
    "\n",
    "            for ay in range(ystart, yend, ychange):\n",
    "                for ax in range(xstart, xend, xchange):\n",
    "                    ybest, xbest = self.nnf[ay, ax]\n",
    "                    dbest = self.nnd[ay, ax]\n",
    "\n",
    "                    # Propagation\n",
    "                    if 0 <= (ay - ychange) < self.ah:\n",
    "                        yp, xp = self.nnf[ay - ychange, ax]\n",
    "                        yp += ychange\n",
    "                        if 0 <= yp < self.bh:\n",
    "                            ybest, xbest, dbest = self.improve_guess(ay, ax, yp, xp, ybest, xbest, dbest)\n",
    "                    if 0 <= (ax - xchange) < self.aw:\n",
    "                        yp, xp = self.nnf[ay, ax - xchange]\n",
    "                        xp += xchange\n",
    "                        if 0 <= xp < self.bw:\n",
    "                            ybest, xbest, dbest = self.improve_guess(ay, ax, yp, xp, ybest, xbest, dbest)\n",
    "\n",
    "                    # Random search\n",
    "                    rand_d = max(self.bh, self.bw)\n",
    "                    while rand_d >= 1:\n",
    "                        ymin, ymax = max(ybest - rand_d, 0), min(ybest + rand_d, self.bh)\n",
    "                        xmin, xmax = max(xbest - rand_d, 0), min(xbest + rand_d, self.bw)\n",
    "                        yp = np.random.randint(ymin, ymax)\n",
    "                        xp = np.random.randint(xmin, xmax)\n",
    "                        ybest, xbest, dbest = self.improve_guess(ay, ax, yp, xp, ybest, xbest, dbest)\n",
    "                        rand_d = rand_d // 2\n",
    "\n",
    "                    self.nnf[ay, ax] = [ybest, xbest]\n",
    "                    self.nnd[ay, ax] = dbest\n",
    "            print(\"iteration:\", str(iter + 1) + \"/\" + str(total_iter))\n",
    "\n",
    "    def solve(self):\n",
    "        self.improve_nnf(total_iter=8)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "map5SR = PatchMatch(feat5S_norm, feat5R_norm)  # S -> R\n",
    "map5RS = PatchMatch(feat5R_norm, feat5S_norm)  # R -> S\n",
    "map5SR.solve()\n",
    "print()\n",
    "map5RS.solve()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "# Checking\n",
    "print(map5SR.nnf.shape)\n",
    "print(map5SR.nnd.shape, \"\\n\")\n",
    "\n",
    "print(map5RS.nnf.shape)\n",
    "print(map5RS.nnd.shape)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Image Resizing"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def image_to_tensor(img, img_transforms=None):\n",
    "    if img_transforms is None:\n",
    "        img_transforms = list()\n",
    "    data_transforms = transforms.Compose(img_transforms + [\n",
    "        transforms.ToTensor(),\n",
    "    ])\n",
    "    img_tensor = data_transforms(img)\n",
    "    return img_tensor\n",
    "\n",
    "\n",
    "def resize_img(img, size):\n",
    "    return image_to_tensor(img, [transforms.Resize(size)])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "imgS_resized = resize_img(origS, feat5S.shape[:2])\n",
    "imgR_resized = resize_img(origR, feat5R.shape[:2])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# Checking\n",
    "print(imgS_resized.size())\n",
    "print(imgR_resized.size())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Checking\n",
    "tsshow(imgS_resized)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# Checking\n",
    "tsshow(imgR_resized)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## BDS Voting (Reconstruct)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def bds_vote(ref, nnf_sr, nnf_rs, patch_size=3):\n",
    "    \"\"\"\n",
    "    Reconstructs an image or feature map by bidirectionaly\n",
    "    similarity voting\n",
    "    \"\"\"\n",
    "\n",
    "    src_height = nnf_sr.shape[0]\n",
    "    src_width = nnf_sr.shape[1]\n",
    "    ref_height = nnf_rs.shape[0]\n",
    "    ref_width = nnf_rs.shape[1]\n",
    "    channel = ref.shape[0]\n",
    "\n",
    "    guide = np.zeros((channel, src_height, src_width))\n",
    "    weight = np.zeros((src_height, src_width))\n",
    "    ws = 1 / (src_height * src_width)\n",
    "    wr = 1 / (ref_height * ref_width)\n",
    "\n",
    "    # coherence\n",
    "    # The S->R forward NNF enforces coherence\n",
    "    for sy in range(src_height):\n",
    "        for sx in range(src_width):\n",
    "            ry, rx = nnf_sr[sy, sx]\n",
    "\n",
    "            dy0 = dx0 = patch_size // 2\n",
    "            dy1 = dx1 = patch_size // 2 + 1\n",
    "            dy0 = min(sy, ry, dy0)\n",
    "            dy1 = min(src_height - sy, ref_height - ry, dy1)\n",
    "            dx0 = min(sx, rx, dx0)\n",
    "            dx1 = min(src_width - sx, ref_width - rx, dx1)\n",
    "\n",
    "            guide[:, sy - dy0:sy + dy1, sx - dx0:sx + dx1] += ws * ref[:, ry - dy0:ry + dy1, rx - dx0:rx + dx1]\n",
    "            weight[sy - dy0:sy + dy1, sx - dx0:sx + dx1] += ws\n",
    "\n",
    "    # completeness\n",
    "    # The R->S backward NNF enforces completeness\n",
    "    for ry in range(ref_height):\n",
    "        for rx in range(ref_width):\n",
    "            sy, sx = nnf_rs[ry, rx]\n",
    "\n",
    "            dy0 = dx0 = patch_size // 2\n",
    "            dy1 = dx1 = patch_size // 2 + 1\n",
    "            dy0 = min(sy, ry, dy0)\n",
    "            dy1 = min(src_height - sy, ref_height - ry, dy1)\n",
    "            dx0 = min(sx, rx, dx0)\n",
    "            dx1 = min(src_width - sx, ref_width - rx, dx1)\n",
    "\n",
    "            guide[:, sy - dy0:sy + dy1, sx - dx0:sx + dx1] += wr * ref[:, ry - dy0:ry + dy1, rx - dx0:rx + dx1]\n",
    "            weight[sy - dy0:sy + dy1, sx - dx0:sx + dx1] += wr\n",
    "\n",
    "    weight[weight == 0] = 1\n",
    "    guide /= weight\n",
    "    return guide"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "imgG = bds_vote(imgR_resized, map5SR.nnf, map5RS.nnf)\n",
    "feat5G = bds_vote(feat5R.transpose(RIGHT_SHIFT), map5SR.nnf, map5RS.nnf).transpose(LEFT_SHIFT)\n",
    "feat5G_norm = normalize(feat5G)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Checking\n",
    "print(imgG.shape, \"==\", imgR_resized.size()[0], map5SR.nnf.shape[:2])\n",
    "print(feat5G.shape, \"==\", map5SR.nnf.shape[:2], feat5R.shape[2])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "# Checking\n",
    "imshow(imgG.transpose(LEFT_SHIFT))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": true
   },
   "source": [
    "## Affine Transfrom"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "kmeans = KMeans(n_clusters=5, n_jobs=1).fit(feat5S.reshape(-1, feat5S.shape[2]))\n",
    "kmeans_labels = kmeans.labels_.reshape(feat5S.shape[:2])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "labS = color.rgb2lab(imgS_resized.numpy().transpose(LEFT_SHIFT))\n",
    "labG = color.rgb2lab(imgG.transpose(LEFT_SHIFT))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# Checking\n",
    "print(labS.shape)\n",
    "print(labG.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class LocalColorTransfer:\n",
    "    def __init__(self, s, g, featS_norm, featG_norm, kmeans_ratio=1, patch_size=3):\n",
    "        self.source = torch.from_numpy(s).float()\n",
    "        self.guide = torch.from_numpy(g).float()\n",
    "        self.featS_norm = torch.from_numpy(featS_norm).float()\n",
    "        self.featG_norm = torch.from_numpy(featG_norm).float()\n",
    "        self.height = s.shape[0]\n",
    "        self.width = s.shape[1]\n",
    "        self.channel = s.shape[2]\n",
    "        self.patch_size = patch_size\n",
    "\n",
    "        self.paramA = torch.zeros(s.shape)\n",
    "        self.paramB = torch.zeros(s.shape)\n",
    "        self.sub = torch.ones(*s.shape[:2], 1)\n",
    "\n",
    "        self.kmeans_labels = np.zeros(s.shape[:2]).astype(np.int32)\n",
    "        self.kmeans_ratio = kmeans_ratio\n",
    "\n",
    "        if USE_CUDA:\n",
    "            self.source = self.source.cuda('cuda:3')\n",
    "            self.guide = self.guide.cuda('cuda:3')\n",
    "            self.featS_norm = self.featS_norm.cuda('cuda:3')\n",
    "            self.featG_norm = self.featG_norm.cuda('cuda:3')\n",
    "            self.paramA = self.paramA.cuda('cuda:3')\n",
    "            self.paramB = self.paramB.cuda('cuda:3')\n",
    "            self.sub = self.sub.cuda('cuda:3')\n",
    "        self.init_params()\n",
    "\n",
    "    def init_params(self):\n",
    "        \"\"\"\n",
    "            Initialize a and b from source and guidance using mean and std\n",
    "        \"\"\"\n",
    "        eps = 0.002\n",
    "        for y in range(self.height):\n",
    "            for x in range(self.width):\n",
    "                dy0 = dx0 = self.patch_size // 2\n",
    "                dy1 = dx1 = self.patch_size // 2 + 1\n",
    "                dy0 = min(y, dy0)\n",
    "                dy1 = min(self.height - y, dy1)\n",
    "                dx0 = min(x, dx0)\n",
    "                dx1 = min(self.width - x, dx1)\n",
    "\n",
    "                patchS = self.source[y - dy0:y + dy1, x - dx0:x + dx1].reshape(-1, self.channel)\n",
    "                patchG = self.guide[y - dy0:y + dy1, x - dx0:x + dx1].reshape(-1, self.channel)\n",
    "                self.paramA[y, x] = patchG.std(0) / (patchS.std(0) + eps)\n",
    "                self.paramB[y, x] = patchG.mean(0) - self.paramA[y, x] * patchS.mean(0)\n",
    "                self.sub[y, x, 0] += self.patch_size ** 2 - (dy0 + dy1) * (dx0 + dx1)\n",
    "\n",
    "                y_adj = min(y // self.kmeans_ratio, kmeans_labels.shape[0] - 1)\n",
    "                x_adj = min(x // self.kmeans_ratio, kmeans_labels.shape[1] - 1)\n",
    "                self.kmeans_labels[y, x] = kmeans_labels[y_adj, x_adj]\n",
    "        self.paramA.requires_grad_()\n",
    "        self.paramB.requires_grad_()\n",
    "\n",
    "    def visualize(self):\n",
    "        transfered = self.paramA * self.source + self.paramB\n",
    "        imshow(transfered.data.cpu().numpy().astype(np.float64))\n",
    "        # imshow(color.lab2rgb(transfered.data.cpu().numpy().astype(np.float64)))\n",
    "\n",
    "    def loss_d(self):\n",
    "        error = torch.pow(self.featS_norm - self.featG_norm, 2).sum(2)\n",
    "        transfered = self.paramA * self.source + self.paramB\n",
    "        term1 = 1 - error / 4\n",
    "        term2 = torch.pow(transfered - self.guide, 2).sum(2)\n",
    "        loss_d = torch.mean(term1 * term2)\n",
    "\n",
    "        return loss_d\n",
    "\n",
    "    def loss_l(self):\n",
    "        patchS_stack = self.source.unsqueeze(2).repeat(1, 1, self.patch_size ** 2, 1)  # (self.height, self.width, 9, self.channel)\n",
    "        patchA_stack = self.paramA.unsqueeze(2).repeat(1, 1, self.patch_size ** 2, 1)\n",
    "        patchB_stack = self.paramB.unsqueeze(2).repeat(1, 1, self.patch_size ** 2, 1)\n",
    "        for y in range(self.height):\n",
    "            for x in range(self.width):\n",
    "                dy0 = dx0 = self.patch_size // 2\n",
    "                dy1 = dx1 = self.patch_size // 2 + 1\n",
    "                dy0 = min(y, dy0)\n",
    "                dy1 = min(self.height - y, dy1)\n",
    "                dx0 = min(x, dx0)\n",
    "                dx1 = min(self.width - x, dx1)\n",
    "\n",
    "                patchS_stack[y, x, :((dy0 + dy1) * (dx0 + dx1))] = self.source[y - dy0:y + dy1, x - dx0:x + dx1].reshape(-1, self.channel)\n",
    "                patchA_stack[y, x, :((dy0 + dy1) * (dx0 + dx1))] = self.paramA[y - dy0:y + dy1, x - dx0:x + dx1].reshape(-1, self.channel)\n",
    "                patchB_stack[y, x, :((dy0 + dy1) * (dx0 + dx1))] = self.paramB[y - dy0:y + dy1, x - dx0:x + dx1].reshape(-1, self.channel)\n",
    "\n",
    "        patchSD = torch.norm(self.source.unsqueeze(2) - patchS_stack, 2, 3).exp()\n",
    "        wgt = patchSD / (patchSD.sum(2, keepdim=True) - self.sub)\n",
    "        # Getting norm term\n",
    "        term1 = torch.pow(self.paramA.unsqueeze(2) - patchA_stack, 2).sum(3)\n",
    "        term2 = torch.pow(self.paramB.unsqueeze(2) - patchB_stack, 2).sum(3)\n",
    "        term3 = term1 + term2\n",
    "        loss_l = torch.sum(wgt * term3, 2).mean()\n",
    "\n",
    "        return loss_l\n",
    "\n",
    "        \"\"\"\n",
    "            if y == 0:\n",
    "                if x == 0:\n",
    "                    allA = patchA[0 ,0].view(3, 1, 1)  # left up corner\n",
    "                elif x == self.width - 1:\n",
    "                    allA = patchA[0, 1].view(3, 1, 1)  # right up corner\n",
    "                else:\n",
    "                    allA = patchA[0, 1].view(3, 1, 1)\n",
    "            elif y == self.height - 1:\n",
    "                if x == 0:\n",
    "                    allA = patchA[1, 0].view(3, 1, 1)  # left down corner\n",
    "                elif x == self.width - 1:\n",
    "                    allA = patchA[1, 1].view(3, 1, 1)  # right down corner\n",
    "                else:\n",
    "                    allA = patchA[1, 1].view(3, 1, 1)\n",
    "            else:\n",
    "                if x == 0:\n",
    "                    allA = patchA[1, 0].view(3, 1, 1)  # left middle\n",
    "                elif x == self.width - 1:\n",
    "                    allA = patchA[1, 1].view(3, 1, 1)  # right middle\n",
    "                else:\n",
    "                    allA = patchA[1, 1].view(3, 1, 1)  # middle\n",
    "\n",
    "            멍청한 내 자신의 노가다;\n",
    "        \"\"\"\n",
    "\n",
    "    def loss_nl(self):\n",
    "        patchS_stack = list()\n",
    "        patchA_stack = list()\n",
    "        patchB_stack = list()\n",
    "        mixedS = list()\n",
    "        mixedA = list()\n",
    "        mixedB = list()\n",
    "\n",
    "        index_map = np.zeros((2, self.height, self.width)).astype(np.int32)\n",
    "        index_map[0] = np.arange(self.height)[:, np.newaxis] + np.zeros(self.width).astype(np.int32)\n",
    "        index_map[1] = np.zeros(self.height).astype(np.int32)[:, np.newaxis] + np.arange(self.width)\n",
    "\n",
    "        for i in range(5):\n",
    "            index_map_cluster = index_map[:, self.kmeans_labels == i]\n",
    "            source_cluster = self.source[index_map_cluster[0], index_map_cluster[1]]\n",
    "            paramA_cluster = self.paramA[index_map_cluster[0], index_map_cluster[1]]\n",
    "            paramB_cluster = self.paramB[index_map_cluster[0], index_map_cluster[1]]\n",
    "\n",
    "            nbrs = NearestNeighbors(n_neighbors=9, n_jobs=1).fit(source_cluster)\n",
    "            indices = nbrs.kneighbors(source_cluster, return_distance=False)\n",
    "\n",
    "            patchS_stack.append(source_cluster[indices[:, 1:].reshape(-1)].reshape(-1, 8, self.channel))\n",
    "            patchA_stack.append(paramA_cluster[indices[:, 1:].reshape(-1)].reshape(-1, 8, self.channel))\n",
    "            patchB_stack.append(paramB_cluster[indices[:, 1:].reshape(-1)].reshape(-1, 8, self.channel))\n",
    "            mixedS.append(source_cluster.unsqueeze(1))\n",
    "            mixedA.append(paramA_cluster.unsqueeze(1))\n",
    "            mixedB.append(paramB_cluster.unsqueeze(1))\n",
    "\n",
    "        patchS_stack = torch.cat(patchS_stack)\n",
    "        patchA_stack = torch.cat(patchA_stack)\n",
    "        patchB_stack = torch.cat(patchB_stack)\n",
    "        mixedS = torch.cat(mixedS)\n",
    "        mixedA = torch.cat(mixedA)\n",
    "        mixedB = torch.cat(mixedB)\n",
    "\n",
    "        mixedT = mixedA * mixedS + mixedB\n",
    "        patchT_stack = patchA_stack * patchS_stack + patchB_stack\n",
    "        patchSD = torch.norm(mixedS - patchS_stack, 2, 2).exp()\n",
    "        wgt = patchSD / patchSD.sum(1, keepdim=True)\n",
    "        term1 = torch.pow(mixedT - patchT_stack, 2).sum(2)\n",
    "        loss_nl = torch.sum(wgt * term1, 1).mean()\n",
    "\n",
    "        return loss_nl\n",
    "\n",
    "    def train(self, total_iter=250):\n",
    "        optimizer = optim.Adam([self.paramA, self.paramB], lr=0.1, weight_decay=0)\n",
    "        hyper_l = 0.005\n",
    "        hyper_nl = 0.5\n",
    "        for iter in range(total_iter):\n",
    "            optimizer.zero_grad()\n",
    "\n",
    "            loss_d = self.loss_d()\n",
    "            loss_l = self.loss_l()\n",
    "            loss_nl = self.loss_nl()\n",
    "            loss = loss_d + hyper_l * loss_l + hyper_nl * loss_nl\n",
    "\n",
    "            print(\"Loss_d: {0:.4f}, Loss_l: {1:.4f}, loss_nl: {2:.4f}\".format(loss_d.data, loss_l.data, loss_nl.data))\n",
    "            if (iter + 1) % 10 == 0:\n",
    "                print(\"Iteration:\", str(iter + 1) + \"/\" + str(total_iter), \"Loss: {0:.4f}\".format(loss.data))\n",
    "            loss.backward()\n",
    "            optimizer.step()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "lct = LocalColorTransfer(imgS_resized.numpy().transpose(LEFT_SHIFT), imgG.transpose(LEFT_SHIFT), feat5S_norm, feat5G_norm, kmeans_ratio=1)\n",
    "save = torch.from_numpy(imgG).float()\n",
    "utils.save_image(save, 'results/img5G.png')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "lct.visualize()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "lct.train()\n",
    "lct.visualize()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## WLS Filter"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "from guided_filter_pytorch.guided_filter import FastGuidedFilter\n",
    "# labOrigS = torch.from_numpy(color.rgb2lab(np.array(origS)).transpose(RIGHT_SHIFT)).float()\n",
    "rgbOrigS = transforms.ToTensor()(origS)\n",
    "a_upsampled = FastGuidedFilter(1, eps=1e-08)(lct.source.permute(RIGHT_SHIFT).unsqueeze(0).cpu(),\n",
    "                                             lct.paramA.permute(RIGHT_SHIFT).unsqueeze(0).cpu(),\n",
    "                                             rgbOrigS.unsqueeze(0)).squeeze()\n",
    "b_upsampled = FastGuidedFilter(1, eps=1e-08)(lct.source.permute(RIGHT_SHIFT).unsqueeze(0).cpu(),\n",
    "                                             lct.paramB.permute(RIGHT_SHIFT).unsqueeze(0).cpu(),\n",
    "                                             rgbOrigS.unsqueeze(0)).squeeze()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "tsshow(lct.paramA.data.permute(RIGHT_SHIFT).cpu())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "tsshow(a_upsampled.data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "tsshow(lct.paramB.data.permute(RIGHT_SHIFT).cpu())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "tsshow(b_upsampled.data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "img5S = a_upsampled * rgbOrigS + b_upsampled\n",
    "img5S = img5S.data.numpy().transpose(LEFT_SHIFT).astype(np.float64)\n",
    "# img5S = color.lab2rgb(img5S.data.numpy().transpose(LEFT_SHIFT).astype(np.float64))\n",
    "imshow(img5S)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "img5S = torch.from_numpy(img5S.transpose(RIGHT_SHIFT)).float()\n",
    "utils.save_image(img5S, 'results/img5S.png')\n",
    "transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(img5S)\n",
    "img5S = img5S.unsqueeze(0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "feat4S = get_feature(img5S, FEATURE_IDS[3])\n",
    "feat4R = get_feature(imgR, FEATURE_IDS[3])\n",
    "feat4S_norm = normalize(feat4S)\n",
    "feat4R_norm = normalize(feat4R)\n",
    "\n",
    "map4SR = PatchMatch(feat4S_norm, feat4R_norm) #S -> R\n",
    "map4RS = PatchMatch(feat4R_norm, feat4S_norm) #R -> S\n",
    "map4SR.solve()\n",
    "print()\n",
    "map4RS.solve()\n",
    "\n",
    "imgS_resized = resize_img(origS, feat4S.shape[:2])\n",
    "imgR_resized = resize_img(origR, feat4R.shape[:2])\n",
    "\n",
    "imgG = bds_vote(imgR_resized, map4SR.nnf, map4RS.nnf)\n",
    "feat4G = bds_vote(feat4R.transpose(RIGHT_SHIFT), map4SR.nnf, map4RS.nnf).transpose(LEFT_SHIFT)\n",
    "feat4G_norm = normalize(feat4G)\n",
    "\n",
    "labS = color.rgb2lab(imgS_resized.numpy().transpose(LEFT_SHIFT))\n",
    "labG = color.rgb2lab(imgG.transpose(LEFT_SHIFT))\n",
    "\n",
    "lct = LocalColorTransfer(imgS_resized.numpy().transpose(LEFT_SHIFT), imgG.transpose(LEFT_SHIFT), feat4S_norm, feat4G_norm, kmeans_ratio=2)\n",
    "save = torch.from_numpy(imgG).float()\n",
    "utils.save_image(save, 'results/img4G.png')\n",
    "lct.train()\n",
    "\n",
    "a_upsampled = FastGuidedFilter(1, eps=1e-08)(lct.source.permute(RIGHT_SHIFT).unsqueeze(0).cpu(),\n",
    "                                             lct.paramA.permute(RIGHT_SHIFT).unsqueeze(0).cpu(),\n",
    "                                             rgbOrigS.unsqueeze(0)).squeeze()\n",
    "b_upsampled = FastGuidedFilter(1, eps=1e-08)(lct.source.permute(RIGHT_SHIFT).unsqueeze(0).cpu(),\n",
    "                                             lct.paramB.permute(RIGHT_SHIFT).unsqueeze(0).cpu(),\n",
    "                                             rgbOrigS.unsqueeze(0)).squeeze()\n",
    "\n",
    "img4S = a_upsampled * rgbOrigS + b_upsampled\n",
    "img4S = img4S.data.numpy().transpose(LEFT_SHIFT)\n",
    "imshow(img4S)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "img4S = torch.from_numpy(img4S.transpose(RIGHT_SHIFT)).float()\n",
    "utils.save_image(img4S, 'results/img4S.png')\n",
    "transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(img4S)\n",
    "img4S = img4S.unsqueeze(0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "feat3S = get_feature(img4S, FEATURE_IDS[2])\n",
    "feat3R = get_feature(imgR, FEATURE_IDS[2])\n",
    "feat3S_norm = normalize(feat3S)\n",
    "feat3R_norm = normalize(feat3R)\n",
    "\n",
    "map3SR = PatchMatch(feat3S_norm, feat3R_norm) #S -> R\n",
    "map3RS = PatchMatch(feat3R_norm, feat3S_norm) #R -> S\n",
    "map3SR.solve()\n",
    "print()\n",
    "map3RS.solve()\n",
    "\n",
    "imgS_resized = resize_img(origS, feat3S.shape[:2])\n",
    "imgR_resized = resize_img(origR, feat3R.shape[:2])\n",
    "\n",
    "imgG = bds_vote(imgR_resized, map3SR.nnf, map3RS.nnf)\n",
    "feat3G = bds_vote(feat3R.transpose(RIGHT_SHIFT), map3SR.nnf, map3RS.nnf).transpose(LEFT_SHIFT)\n",
    "feat3G_norm = normalize(feat3G)\n",
    "\n",
    "labS = color.rgb2lab(imgS_resized.numpy().transpose(LEFT_SHIFT))\n",
    "labG = color.rgb2lab(imgG.transpose(LEFT_SHIFT))\n",
    "\n",
    "lct = LocalColorTransfer(imgS_resized.numpy().transpose(LEFT_SHIFT), imgG.transpose(LEFT_SHIFT), feat3S_norm, feat3G_norm, kmeans_ratio=4)\n",
    "save = torch.from_numpy(imgG).float()\n",
    "utils.save_image(save, 'results/img3G.png')\n",
    "lct.train()\n",
    "\n",
    "a_upsampled = FastGuidedFilter(1, eps=1e-08)(lct.source.permute(RIGHT_SHIFT).unsqueeze(0).cpu(),\n",
    "                                             lct.paramA.permute(RIGHT_SHIFT).unsqueeze(0).cpu(),\n",
    "                                             rgbOrigS.unsqueeze(0)).squeeze()\n",
    "b_upsampled = FastGuidedFilter(1, eps=1e-08)(lct.source.permute(RIGHT_SHIFT).unsqueeze(0).cpu(),\n",
    "                                             lct.paramB.permute(RIGHT_SHIFT).unsqueeze(0).cpu(),\n",
    "                                             rgbOrigS.unsqueeze(0)).squeeze()\n",
    "\n",
    "img3S = a_upsampled * rgbOrigS + b_upsampled\n",
    "img3S = img3S.data.numpy().transpose(LEFT_SHIFT)\n",
    "imshow(img3S)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "img3S = torch.from_numpy(img3S.transpose(RIGHT_SHIFT)).float()\n",
    "utils.save_image(img3S, 'results/img3S.png')\n",
    "transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(img3S)\n",
    "img3S = img3S.unsqueeze(0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "feat2S = get_feature(img3S, FEATURE_IDS[1])\n",
    "feat2R = get_feature(imgR, FEATURE_IDS[1])\n",
    "feat2S_norm = normalize(feat2S)\n",
    "feat2R_norm = normalize(feat2R)\n",
    "\n",
    "map2SR = PatchMatch(feat2S_norm, feat2R_norm) #S -> R\n",
    "map2RS = PatchMatch(feat2R_norm, feat2S_norm) #R -> S\n",
    "map2SR.solve()\n",
    "print()\n",
    "map2RS.solve()\n",
    "\n",
    "imgS_resized = resize_img(origS, feat2S.shape[:2])\n",
    "imgR_resized = resize_img(origR, feat2R.shape[:2])\n",
    "\n",
    "imgG = bds_vote(imgR_resized, map2SR.nnf, map2RS.nnf)\n",
    "feat2G = bds_vote(feat2R.transpose(RIGHT_SHIFT), map2SR.nnf, map2RS.nnf).transpose(LEFT_SHIFT)\n",
    "feat2G_norm = normalize(feat2G)\n",
    "\n",
    "labS = color.rgb2lab(imgS_resized.numpy().transpose(LEFT_SHIFT))\n",
    "labG = color.rgb2lab(imgG.transpose(LEFT_SHIFT))\n",
    "\n",
    "lct = LocalColorTransfer(imgS_resized.numpy().transpose(LEFT_SHIFT), imgG.transpose(LEFT_SHIFT), feat2S_norm, feat2G_norm, kmeans_ratio=8)\n",
    "save = torch.from_numpy(imgG).float()\n",
    "utils.save_image(save, 'results/img2G.png')\n",
    "lct.train()\n",
    "\n",
    "a_upsampled = FastGuidedFilter(1, eps=1e-08)(lct.source.permute(RIGHT_SHIFT).unsqueeze(0).cpu(),\n",
    "                                             lct.paramA.permute(RIGHT_SHIFT).unsqueeze(0).cpu(),\n",
    "                                             rgbOrigS.unsqueeze(0)).squeeze()\n",
    "b_upsampled = FastGuidedFilter(1, eps=1e-08)(lct.source.permute(RIGHT_SHIFT).unsqueeze(0).cpu(),\n",
    "                                             lct.paramB.permute(RIGHT_SHIFT).unsqueeze(0).cpu(),\n",
    "                                             rgbOrigS.unsqueeze(0)).squeeze()\n",
    "\n",
    "img2S = a_upsampled * rgbOrigS + b_upsampled\n",
    "img2S = img2S.data.numpy().transpose(LEFT_SHIFT)\n",
    "imshow(img2S)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "img2S = torch.from_numpy(img2S.transpose(RIGHT_SHIFT)).float()\n",
    "utils.save_image(img2S, 'results/img2S.png')\n",
    "transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(img2S)\n",
    "img2S = img2S.unsqueeze(0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "feat1S = get_feature(img2S, FEATURE_IDS[0])\n",
    "feat1R = get_feature(imgR, FEATURE_IDS[0])\n",
    "feat1S_norm = normalize(feat1S)\n",
    "feat1R_norm = normalize(feat1R)\n",
    "\n",
    "map1SR = PatchMatch(feat1S_norm, feat1R_norm) #S -> R\n",
    "map1RS = PatchMatch(feat1R_norm, feat1S_norm) #R -> S\n",
    "map1SR.solve()\n",
    "print()\n",
    "map1RS.solve()\n",
    "\n",
    "imgS_resized = resize_img(origS, feat1S.shape[:2])\n",
    "imgR_resized = resize_img(origR, feat1R.shape[:2])\n",
    "\n",
    "imgG = bds_vote(imgR_resized, map1SR.nnf, map1RS.nnf)\n",
    "feat1G = bds_vote(feat1R.transpose(RIGHT_SHIFT), map1SR.nnf, map1RS.nnf).transpose(LEFT_SHIFT)\n",
    "feat1G_norm = normalize(feat1G)\n",
    "\n",
    "labS = color.rgb2lab(imgS_resized.numpy().transpose(LEFT_SHIFT))\n",
    "labG = color.rgb2lab(imgG.transpose(LEFT_SHIFT))\n",
    "\n",
    "lct = LocalColorTransfer(imgS_resized.numpy().transpose(LEFT_SHIFT), imgG.transpose(LEFT_SHIFT), feat1S_norm, feat1G_norm, kmeans_ratio=16)\n",
    "save = torch.from_numpy(imgG).float()\n",
    "utils.save_image(save, 'results/img1G.png')\n",
    "lct.train()\n",
    "\n",
    "a_upsampled = FastGuidedFilter(1, eps=1e-08)(lct.source.permute(RIGHT_SHIFT).unsqueeze(0).cpu(),\n",
    "                                             lct.paramA.permute(RIGHT_SHIFT).unsqueeze(0).cpu(),\n",
    "                                             rgbOrigS.unsqueeze(0)).squeeze()\n",
    "b_upsampled = FastGuidedFilter(1, eps=1e-08)(lct.source.permute(RIGHT_SHIFT).unsqueeze(0).cpu(),\n",
    "                                             lct.paramB.permute(RIGHT_SHIFT).unsqueeze(0).cpu(),\n",
    "                                             rgbOrigS.unsqueeze(0)).squeeze()\n",
    "\n",
    "img1S = a_upsampled * rgbOrigS + b_upsampled\n",
    "img1S = img1S.data.numpy().transpose(LEFT_SHIFT)\n",
    "imshow(img1S)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "img1S = torch.from_numpy(img1S.transpose(RIGHT_SHIFT)).float()\n",
    "utils.save_image(img1S, 'results/img1S.png')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
