{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Using TensorFlow backend.\n"
     ]
    }
   ],
   "source": [
    "from __future__ import print_function, division\n",
    "\n",
    "from keras.models import Sequential, Model\n",
    "from keras.layers import *\n",
    "from keras.layers.advanced_activations import LeakyReLU\n",
    "from keras.activations import relu\n",
    "from keras.initializers import RandomNormal\n",
    "from keras.applications import *\n",
    "import keras.backend as K\n",
    "from tensorflow.contrib.distributions import Beta\n",
    "import tensorflow as tf\n",
    "from keras.optimizers import Adam\n",
    "from keras import losses\n",
    "from keras.layers.merge import _Merge"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "from image_augmentation import random_transform\n",
    "from image_augmentation import random_warp\n",
    "from utils import get_image_paths, load_images, stack_images\n",
    "from pixel_shuffler import PixelShuffler"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "import time\n",
    "import numpy as np\n",
    "from PIL import Image\n",
    "import cv2\n",
    "import glob\n",
    "from random import randint, shuffle\n",
    "from IPython.display import clear_output\n",
    "from IPython.display import display\n",
    "import matplotlib.pyplot as plt\n",
    "from functools import partial\n",
    "%matplotlib inline"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Code borrow from [eriklindernoren](https://github.com/eriklindernoren), [fchollet](https://github.com/fchollet) and [keras-contrib](https://github.com/keras-team/keras-contrib)\n",
    "\n",
    "https://github.com/eriklindernoren/Keras-GAN/blob/master/aae/adversarial_autoencoder.py\n",
    "\n",
    "https://github.com/fchollet/deep-learning-with-python-notebooks/blob/master/8.5-introduction-to-gans.ipynb\n",
    "\n",
    "https://github.com/keras-team/keras-contrib/blob/master/examples/improved_wgan.py"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "class FaceSwapGAN():\n",
    "    def __init__(self, batch_size=8, img_dirA='./faceA/*.*', img_dirB='./faceB/*.*', use_mixup=False):\n",
    "        self.img_size = 64 \n",
    "        self.channels = 3\n",
    "        self.img_shape = (self.img_size, self.img_size, self.channels)\n",
    "        self.batch_size = batch_size\n",
    "        self.img_dirA = img_dirA\n",
    "        self.img_dirB = img_dirB\n",
    "        self.random_transform_args = {\n",
    "            'rotation_range': 20,\n",
    "            'zoom_range': 0.05,\n",
    "            'shift_range': 0.05,\n",
    "            'random_flip': 0.5,\n",
    "            }\n",
    "        self.use_mixup = use_mixup \n",
    "        self.mixup_alpha = 0.2        \n",
    "        self.n_critic = 5\n",
    "        if self.use_mixup:\n",
    "            rand_wavg_batch_size = self.batch_size\n",
    "        else:\n",
    "            rand_wavg_batch_size = self.batch_size * 2\n",
    "        optimizer = Adam(1e-4, 0.5, 0.9)\n",
    "        \n",
    "        def wasserstein_loss(y_true, y_pred):\n",
    "            return K.mean(y_true * y_pred)\n",
    "\n",
    "        def gradient_penalty_loss(y_true, y_pred, averaged_samples, gradient_penalty_weight):\n",
    "            gradients = K.gradients(K.sum(y_pred), averaged_samples)\n",
    "            gradient_l2_norm = K.sqrt(K.sum(K.square(gradients)))\n",
    "            gradient_penalty = gradient_penalty_weight * K.square(1 - gradient_l2_norm)\n",
    "            return gradient_penalty\n",
    "    \n",
    "        class RandomWeightedAverage(_Merge):\n",
    "            def _merge_function(self, inputs):\n",
    "                weights = K.random_uniform((rand_wavg_batch_size, 1, 1, 1))\n",
    "                return (weights * inputs[0]) + ((1 - weights) * inputs[1])\n",
    "\n",
    "        # Build and compile the discriminator\n",
    "        self.netDA, self.netDB = self.build_discriminator()\n",
    "        self.netDA.compile(loss=wasserstein_loss, optimizer=optimizer, metrics=['accuracy'])\n",
    "        self.netDB.compile(loss=wasserstein_loss, optimizer=optimizer, metrics=['accuracy'])\n",
    "\n",
    "        # Build and compile the generator\n",
    "        self.netGA, self.netGB = self.build_generator()\n",
    "        try:\n",
    "            self.netGA.load_weights(\"models/netGA.h5\")\n",
    "            self.netGB.load_weights(\"models/netGB.h5\")\n",
    "            print (\"Generator models loaded.\")\n",
    "        except:\n",
    "            print (\"Generator weights files not found.\")\n",
    "            pass\n",
    "        self.netGA.compile(loss=['mae', wasserstein_loss], optimizer=optimizer)\n",
    "        self.netGB.compile(loss=['mae', wasserstein_loss], optimizer=optimizer)  \n",
    "\n",
    "        warped_img = Input(shape=self.img_shape)\n",
    "        real_img = Input(shape=self.img_shape)\n",
    "        alphaA, reconstructed_imgA = self.netGA(warped_img)\n",
    "        alphaB, reconstructed_imgB = self.netGB(warped_img)             \n",
    "\n",
    "        # For the adversarial_autoencoder model we will only train the generator\n",
    "        self.netDA.trainable = False\n",
    "        self.netDB.trainable = False\n",
    "\n",
    "        def one_minus(x): return 1 - x\n",
    "        # masked_img = alpha * reconstructed_img + (1 - alpha) * img\n",
    "        masked_imgA = add([multiply([alphaA, reconstructed_imgA]), \n",
    "                           multiply([Lambda(one_minus)(alphaA), warped_img])])\n",
    "        masked_imgB = add([multiply([alphaB, reconstructed_imgB]), \n",
    "                           multiply([Lambda(one_minus)(alphaB), warped_img])])\n",
    "        out_discriminatorA = self.netDA(concatenate([masked_imgA, warped_img], axis=-1))\n",
    "        out_discriminatorB = self.netDB(concatenate([masked_imgB, warped_img], axis=-1))\n",
    "\n",
    "        # The adversarial_autoencoder model  (stacked generator and discriminator) takes\n",
    "        # img as input => generates encoded represenation and reconstructed image => determines validity \n",
    "        self.adversarial_autoencoderA = Model(warped_img, [reconstructed_imgA, out_discriminatorA, alphaA])\n",
    "        self.adversarial_autoencoderB = Model(warped_img, [reconstructed_imgB, out_discriminatorB, alphaB])\n",
    "        self.adversarial_autoencoderA.compile(loss=['mae', wasserstein_loss, 'mse'],\n",
    "                                              loss_weights=[1, .5, 3e-3],\n",
    "                                              optimizer=optimizer)\n",
    "        self.adversarial_autoencoderB.compile(loss=['mae', wasserstein_loss, 'mse'],\n",
    "                                              loss_weights=[1, .5, 3e-3],\n",
    "                                              optimizer=optimizer)\n",
    "        \n",
    "        # Setting trainable=True for discriminators\n",
    "        # If not set, keras will throw error: Non type not supported (due to missing gradient from discriminator)\n",
    "        self.netDA.trainable = True\n",
    "        self.netDB.trainable = True\n",
    "        self.netGA.trainable = False\n",
    "        self.netGB.trainable = False\n",
    "        averaged_imgA = RandomWeightedAverage()([real_img, reconstructed_imgA])\n",
    "        averaged_imgB = RandomWeightedAverage()([real_img, reconstructed_imgB])\n",
    "        averaged_outA = self.netDA(concatenate([averaged_imgA, warped_img], axis=-1))\n",
    "        averaged_outB = self.netDB(concatenate([averaged_imgB, warped_img], axis=-1))\n",
    "        partial_gp_lossA = partial(gradient_penalty_loss, \n",
    "                                   averaged_samples=averaged_imgA, \n",
    "                                   gradient_penalty_weight=10.)\n",
    "        partial_gp_lossA.__name__ = 'gradient_penaltyA'\n",
    "        partial_gp_lossB = partial(gradient_penalty_loss, \n",
    "                                   averaged_samples=averaged_imgB, \n",
    "                                   gradient_penalty_weight=10.)\n",
    "        partial_gp_lossB.__name__ = 'gradient_penaltyB'\n",
    "        self.netDA = Model(inputs=[self.netDA.inputs[0], real_img, warped_img],\n",
    "                           outputs=[self.netDA.outputs[0], averaged_outA])\n",
    "        self.netDB = Model(inputs=[self.netDB.inputs[0], real_img, warped_img],\n",
    "                           outputs=[self.netDB.outputs[0], averaged_outB])\n",
    "        try:\n",
    "            self.netDA.load_weights(\"models/netDA.h5\") \n",
    "            self.netDB.load_weights(\"models/netDB.h5\") \n",
    "            print (\"Discriminator models loaded.\")\n",
    "        except:\n",
    "            print (\"Discriminator weights files not found.\")\n",
    "            pass\n",
    "        self.netDA.compile(optimizer=optimizer, loss=[wasserstein_loss, partial_gp_lossA])\n",
    "        self.netDB.compile(optimizer=optimizer, loss=[wasserstein_loss, partial_gp_lossB]) \n",
    "        \n",
    "\n",
    "    def build_generator(self):\n",
    "        def conv_block(input_tensor, f):\n",
    "            x = input_tensor\n",
    "            x = Conv2D(f, kernel_size=3, strides=2, kernel_initializer=RandomNormal(0, 0.02), \n",
    "                       use_bias=False, padding=\"same\")(x)\n",
    "            x = LeakyReLU(alpha=0.2)(x)\n",
    "            return x\n",
    "\n",
    "        def res_block(input_tensor, f):\n",
    "            x = input_tensor\n",
    "            x = Conv2D(f, kernel_size=3, kernel_initializer=RandomNormal(0, 0.02), \n",
    "                       use_bias=False, padding=\"same\")(x)\n",
    "            x = LeakyReLU(alpha=0.2)(x)\n",
    "            x = Conv2D(f, kernel_size=3, kernel_initializer=RandomNormal(0, 0.02), \n",
    "                       use_bias=False, padding=\"same\")(x)\n",
    "            x = add([x, input_tensor])\n",
    "            x = LeakyReLU(alpha=0.2)(x)\n",
    "            return x\n",
    "\n",
    "        def upscale_ps(filters, use_norm=True):\n",
    "            def block(x):\n",
    "                x = Conv2D(filters*4, kernel_size=3, use_bias=False, \n",
    "                           kernel_initializer=RandomNormal(0, 0.02), padding='same' )(x)\n",
    "                x = LeakyReLU(0.1)(x)\n",
    "                x = PixelShuffler()(x)\n",
    "                return x\n",
    "            return block\n",
    "\n",
    "        def Encoder(img_shape):\n",
    "            inp = Input(shape=img_shape)\n",
    "            x = Conv2D(64, kernel_size=5, kernel_initializer=RandomNormal(0, 0.02), \n",
    "                       use_bias=False, padding=\"same\")(inp)\n",
    "            x = conv_block(x,128)\n",
    "            x = conv_block(x,256)\n",
    "            x = conv_block(x,512) \n",
    "            x = conv_block(x,1024)\n",
    "            x = Dense(1024)(Flatten()(x))\n",
    "            x = Dense(4*4*1024)(x)\n",
    "            x = Reshape((4, 4, 1024))(x)\n",
    "            out = upscale_ps(512)(x)\n",
    "            return Model(inputs=inp, outputs=out)\n",
    "\n",
    "        def Decoder_ps(img_shape):\n",
    "            nc_in = 512\n",
    "            input_size = img_shape[0]//8\n",
    "            inp = Input(shape=(input_size, input_size, nc_in))\n",
    "            x = inp\n",
    "            x = upscale_ps(256)(x)\n",
    "            x = upscale_ps(128)(x)\n",
    "            x = upscale_ps(64)(x)\n",
    "            x = res_block(x, 64)\n",
    "            x = res_block(x, 64)\n",
    "            alpha = Conv2D(1, kernel_size=5, padding='same', activation=\"sigmoid\")(x)\n",
    "            rgb = Conv2D(3, kernel_size=5, padding='same', activation=\"tanh\")(x)\n",
    "            return Model(inp, [alpha, rgb])\n",
    "        \n",
    "        encoder = Encoder(self.img_shape)\n",
    "        decoder_A = Decoder_ps(self.img_shape)\n",
    "        decoder_B = Decoder_ps(self.img_shape)    \n",
    "        x = Input(shape=self.img_shape)\n",
    "        netGA = Model(x, decoder_A(encoder(x)))\n",
    "        netGB = Model(x, decoder_B(encoder(x)))         \n",
    "        return netGA, netGB, \n",
    "\n",
    "    def build_discriminator(self):  \n",
    "        def conv_block_d(input_tensor, f, use_instance_norm=True):\n",
    "            x = input_tensor\n",
    "            x = Conv2D(f, kernel_size=4, strides=2, kernel_initializer=RandomNormal(0, 0.02), \n",
    "                       use_bias=False, padding=\"same\")(x)\n",
    "            x = LeakyReLU(alpha=0.2)(x)\n",
    "            return x   \n",
    "        def Discriminator(img_shape):\n",
    "            inp = Input(shape=(img_shape[0], img_shape[1], img_shape[2]*2))\n",
    "            x = conv_block_d(inp, 64, False)\n",
    "            x = conv_block_d(x, 128, False)\n",
    "            x = conv_block_d(x, 256, False)\n",
    "            out = Conv2D(1, kernel_size=4, kernel_initializer=RandomNormal(0, 0.02), \n",
    "                         use_bias=False, padding=\"same\", activation=\"sigmoid\")(x)   \n",
    "            return Model(inputs=[inp], outputs=out) \n",
    "        \n",
    "        netDA = Discriminator(self.img_shape)\n",
    "        netDB = Discriminator(self.img_shape)       \n",
    "        return netDA, netDB    \n",
    "\n",
    "\n",
    "    def train(self, max_iters, save_interval=50):        \n",
    "        def load_data(file_pattern):\n",
    "            return glob.glob(file_pattern)\n",
    "        \n",
    "        def read_image(fn, random_transform_args=self.random_transform_args):\n",
    "            image = cv2.imread(fn)\n",
    "            image = cv2.resize(image, (256,256)) / 255 * 2 - 1\n",
    "            image = random_transform(image, **random_transform_args )\n",
    "            warped_img, target_img = random_warp(image)\n",
    "            return warped_img, target_img\n",
    "\n",
    "        def minibatch(data, batchsize):\n",
    "            length = len(data)\n",
    "            epoch = i = 0\n",
    "            tmpsize = None  \n",
    "            shuffle(data)\n",
    "            while True:\n",
    "                size = tmpsize if tmpsize else batchsize\n",
    "                if i+size > length:\n",
    "                    shuffle(data)\n",
    "                    i = 0\n",
    "                    epoch+=1        \n",
    "                rtn = np.float32([read_image(data[j]) for j in range(i,i+size)])\n",
    "                i+=size\n",
    "                tmpsize = yield epoch, rtn[:,0,:,:,:], rtn[:,1,:,:,:]       \n",
    "\n",
    "        def minibatchAB(dataA, batchsize):\n",
    "            batchA = minibatch(dataA, batchsize)\n",
    "            tmpsize = None    \n",
    "            while True:        \n",
    "                ep1, warped_img, target_img = batchA.send(tmpsize)\n",
    "                tmpsize = yield ep1, warped_img, target_img\n",
    "\n",
    "        batch_size = self.batch_size    \n",
    "            \n",
    "        # Load the dataset\n",
    "        train_A = load_data(self.img_dirA)\n",
    "        train_B = load_data(self.img_dirB)        \n",
    "        assert len(train_A), \"No image found in \" + str(img_dirA) + \".\"\n",
    "        assert len(train_B), \"No image found in \" + str(img_dirB) + \".\"\n",
    "        train_batchA = minibatchAB(train_A, batch_size)\n",
    "        train_batchB = minibatchAB(train_B, batch_size)\n",
    "\n",
    "        print (\"Training starts...\")\n",
    "        t0 = time.time()\n",
    "        gen_iterations = 0\n",
    "        while gen_iterations < max_iters:\n",
    "            #print (\"iter: \" + str(gen_iterations))\n",
    "\n",
    "            # ---------------------\n",
    "            #  Train Discriminators\n",
    "            # ---------------------\n",
    "\n",
    "            # Select a random half batch of images\n",
    "            epoch, warped_A, target_A = next(train_batchA) \n",
    "            epoch, warped_B, target_B = next(train_batchB) \n",
    "\n",
    "            # Generate a half batch of new images\n",
    "            gen_alphasA, gen_imgsA = self.netGA.predict(warped_A)\n",
    "            gen_alphasB, gen_imgsB = self.netGB.predict(warped_B)\n",
    "            #gen_masked_imgsA = gen_alphasA * gen_imgsA + (1 - gen_alphasA) * warped_A\n",
    "            #gen_masked_imgsB = gen_alphasB * gen_imgsB + (1 - gen_alphasB) * warped_B\n",
    "            gen_masked_imgsA = np.array([gen_alphasA[i] * gen_imgsA[i] + (1 - gen_alphasA[i]) * warped_A[i] \n",
    "                                         for i in range(batch_size)])\n",
    "            gen_masked_imgsB = np.array([gen_alphasB[i] * gen_imgsB[i] + (1 - gen_alphasB[i]) * warped_B[i]\n",
    "                                         for i in range (batch_size)])\n",
    "\n",
    "            positive_y = np.ones((batch_size, ) + self.netDA.output_shape[0][1:])\n",
    "            negative_y = -positive_y #np.zeros((batch_size, ) + self.netDA.output_shape[0][1:])\n",
    "            gp_loss_zeros = np.zeros((batch_size, ) + self.netDA.output_shape[0][1:])\n",
    "            \n",
    "            concat_real_inputA = np.array([np.concatenate([target_A[i], warped_A[i]], axis=-1) \n",
    "                                           for i in range(batch_size)])\n",
    "            concat_real_inputB = np.array([np.concatenate([target_B[i], warped_B[i]], axis=-1) \n",
    "                                           for i in range(batch_size)])\n",
    "            concat_fake_inputA = np.array([np.concatenate([gen_masked_imgsA[i], warped_A[i]], axis=-1) \n",
    "                                           for i in range(batch_size)])\n",
    "            concat_fake_inputB = np.array([np.concatenate([gen_masked_imgsB[i], warped_B[i]], axis=-1) \n",
    "                                           for i in range(batch_size)])\n",
    "            if self.use_mixup:\n",
    "                lam = np.random.beta(self.mixup_alpha, self.mixup_alpha)\n",
    "                mixup_A = lam * concat_real_inputA + (1 - lam) * concat_fake_inputA\n",
    "                mixup_B = lam * concat_real_inputB + (1 - lam) * concat_fake_inputB\n",
    "                mixup_label = lam * positive_y + (1 - lam) * negative_y\n",
    "\n",
    "            # Train the discriminators\n",
    "            #print (\"Train the discriminators.\")\n",
    "            if self.use_mixup:\n",
    "                d_lossA = self.netDA.train_on_batch([mixup_A, target_A, warped_A], [mixup_label, gp_loss_zeros])\n",
    "                d_lossB = self.netDB.train_on_batch([mixup_B, target_B, warped_B], [mixup_label, gp_loss_zeros])\n",
    "            else:\n",
    "                d_lossA = self.netDA.train_on_batch([np.concatenate([concat_real_inputA, concat_fake_inputA], axis=0),\n",
    "                                                     np.concatenate([target_A, target_A], axis=0), \n",
    "                                                     np.concatenate([warped_A, warped_A], axis=0)], \n",
    "                                                    [np.concatenate([positive_y, negative_y], axis=0), \n",
    "                                                     np.concatenate([gp_loss_zeros, gp_loss_zeros], axis=0)])\n",
    "                d_lossB = self.netDB.train_on_batch([np.concatenate([concat_real_inputB, concat_fake_inputB], axis=0),\n",
    "                                                     np.concatenate([target_B, target_B], axis=0), \n",
    "                                                     np.concatenate([warped_B, warped_B], axis=0)], \n",
    "                                                    [np.concatenate([positive_y, negative_y], axis=0), \n",
    "                                                     np.concatenate([gp_loss_zeros, gp_loss_zeros], axis=0)])\n",
    "\n",
    "\n",
    "            # ---------------------\n",
    "            #  Train Generators\n",
    "            # ---------------------\n",
    "\n",
    "            # Train the generators\n",
    "            #print (\"Train the generators.\")\n",
    "            if (gen_iterations + 1) % self.n_critic == 0:\n",
    "                mask_regularizationA = np.zeros((batch_size, ) + self.netGA.output_shape[0][1:])\n",
    "                mask_regularizationB = np.zeros((batch_size, ) + self.netGB.output_shape[0][1:])\n",
    "                g_lossA = self.adversarial_autoencoderA.train_on_batch(warped_A, \n",
    "                                                                       [target_A, positive_y, mask_regularizationA])\n",
    "                g_lossB = self.adversarial_autoencoderB.train_on_batch(warped_B, \n",
    "                                                                       [target_B, positive_y, mask_regularizationB])           \n",
    "            gen_iterations += 1             \n",
    "\n",
    "            # If at save interval => save models & show results\n",
    "            if (gen_iterations) % save_interval == 0:\n",
    "                clear_output()\n",
    "                # Plot the progress\n",
    "                print('[%d/%s][%d] Loss_DA: %f Loss_DB: %f Loss_GA: %f Loss_GB: %f time: %f'\n",
    "                      % (epoch, \"num_epochs\", gen_iterations, d_lossA[0], \n",
    "                         d_lossB[0], g_lossA[0], g_lossB[0], time.time()-t0)) \n",
    "                \n",
    "                # Save models\n",
    "                self.netGA.save_weights(\"models/netGA.h5\")\n",
    "                self.netGB.save_weights(\"models/netGB.h5\" )\n",
    "                self.netDA.save_weights(\"models/netDA.h5\")\n",
    "                self.netDB.save_weights(\"models/netDB.h5\")\n",
    "                print (\"Models saved.\")\n",
    "                \n",
    "                # Show results\n",
    "                _, wA, tA = train_batchA.send(14)  \n",
    "                _, wB, tB = train_batchB.send(14)\n",
    "                self.showG(tA, tB)\n",
    "            \n",
    "    def showG(self, test_A, test_B):      \n",
    "        def display_fig(figure_A, figure_B):\n",
    "            figure = np.concatenate([figure_A, figure_B], axis=0 )\n",
    "            figure = figure.reshape((4,7) + figure.shape[1:])\n",
    "            figure = stack_images(figure)\n",
    "            figure = np.clip((figure + 1) * 255 / 2, 0, 255).astype('uint8')\n",
    "            figure = cv2.cvtColor(figure, cv2.COLOR_BGR2RGB)\n",
    "            display(Image.fromarray(figure)) \n",
    "            \n",
    "        out_test_A_netGA = self.netGA.predict(test_A)\n",
    "        out_test_A_netGB = self.netGB.predict(test_A)\n",
    "        out_test_B_netGA = self.netGA.predict(test_B)\n",
    "        out_test_B_netGB = self.netGB.predict(test_B)\n",
    "        \n",
    "        figure_A = np.stack([\n",
    "            test_A,\n",
    "            out_test_A_netGA[0] * out_test_A_netGA[1] + (1 - out_test_A_netGA[0]) * test_A,\n",
    "            out_test_A_netGB[0] * out_test_A_netGB[1] + (1 - out_test_A_netGB[0]) * test_A,\n",
    "            ], axis=1 )\n",
    "        figure_B = np.stack([\n",
    "            test_B,\n",
    "            out_test_B_netGB[0] * out_test_B_netGB[1] + (1 - out_test_B_netGB[0]) * test_B,\n",
    "            out_test_B_netGA[0] * out_test_B_netGA[1] + (1 - out_test_B_netGA[0]) * test_B,\n",
    "            ], axis=1 )\n",
    "        print (\"Masked results:\")\n",
    "        display_fig(figure_A, figure_B)   \n",
    "        \n",
    "        figure_A = np.stack([\n",
    "            test_A,\n",
    "            out_test_A_netGA[1],\n",
    "            out_test_A_netGB[1],\n",
    "            ], axis=1 )\n",
    "        figure_B = np.stack([\n",
    "            test_B,\n",
    "            out_test_B_netGB[1],\n",
    "            out_test_B_netGA[1],\n",
    "            ], axis=1 )\n",
    "        print (\"Raw results:\")\n",
    "        display_fig(figure_A, figure_B)       \n",
    "        \n",
    "        figure_A = np.stack([\n",
    "            test_A,\n",
    "            np.tile(out_test_A_netGA[0],3) * 2 - 1,\n",
    "            np.tile(out_test_A_netGB[0],3) * 2 - 1,\n",
    "            ], axis=1 )\n",
    "        figure_B = np.stack([\n",
    "            test_B,\n",
    "            np.tile(out_test_B_netGB[0],3) * 2 - 1,\n",
    "            np.tile(out_test_B_netGA[0],3) * 2 - 1,\n",
    "            ], axis=1 )\n",
    "        print (\"Alpha masks:\")\n",
    "        display_fig(figure_A, figure_B)        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "mkdir: cannot create directory ‘models’: File exists\r\n"
     ]
    }
   ],
   "source": [
    "!mkdir models"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Generator models loaded.\n",
      "Discriminator models loaded.\n"
     ]
    }
   ],
   "source": [
    "gan = FaceSwapGAN()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "gan.train(max_iters=10e4, save_interval=500)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Video"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "import face_recognition\n",
    "from moviepy.editor import VideoFileClip"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "metadata": {},
   "outputs": [],
   "source": [
    "use_smoothed_mask = True\n",
    "use_smoothed_bbox = True\n",
    "\n",
    "\n",
    "def get_smoothed_coord(x0, x1, y0, y1):\n",
    "    global prev_x0, prev_x1, prev_y0, prev_y1\n",
    "    x0 = int(0.65*prev_x0 + 0.35*x0)\n",
    "    x1 = int(0.65*prev_x1 + 0.35*x1)\n",
    "    y1 = int(0.65*prev_y1 + 0.35*y1)\n",
    "    y0 = int(0.65*prev_y0 + 0.35*y0)\n",
    "    return x0, x1, y0, y1    \n",
    "    \n",
    "def set_global_coord(x0, x1, y0, y1):\n",
    "    global prev_x0, prev_x1, prev_y0, prev_y1\n",
    "    prev_x0 = x0\n",
    "    prev_x1 = x1\n",
    "    prev_y1 = y1\n",
    "    prev_y0 = y0\n",
    "\n",
    "def process_video(input_img):   \n",
    "    #input_img = input_img[:, input_img.shape[1]//3:2*input_img.shape[1]//3,:]\n",
    "    input_img = input_img[:, :640,:]\n",
    "    image = input_img\n",
    "    faces = face_recognition.face_locations(image, model=\"cnn\")\n",
    "    \n",
    "    if len(faces) == 0:\n",
    "        comb_img = np.zeros([input_img.shape[0], input_img.shape[1]*2,input_img.shape[2]])\n",
    "        comb_img[:, :input_img.shape[1], :] = input_img\n",
    "        comb_img[:, input_img.shape[1]:, :] = input_img\n",
    "        triple_img = np.zeros([input_img.shape[0], input_img.shape[1]*3,input_img.shape[2]])\n",
    "        triple_img[:, :input_img.shape[1], :] = input_img\n",
    "        triple_img[:, input_img.shape[1]:input_img.shape[1]*2, :] = input_img      \n",
    "        triple_img[:, input_img.shape[1]*2:, :] = (input_img * .15).astype('uint8')\n",
    "    \n",
    "    mask_map = np.zeros_like(image)\n",
    "    \n",
    "    global prev_x0, prev_x1, prev_y0, prev_y1\n",
    "    global frames    \n",
    "    for (x0, y1, x1, y0) in faces:\n",
    "        h = x1 - x0\n",
    "        w = y1 - y0\n",
    "        \n",
    "        # smoothing bounding box\n",
    "        if use_smoothed_bbox:\n",
    "            if frames != 0:\n",
    "                x0, x1, y0, y1 = get_smoothed_coord(x0, x1, y0, y1)\n",
    "                set_global_coord(x0, x1, y0, y1)\n",
    "            else:\n",
    "                set_global_coord(x0, x1, y0, y1)\n",
    "                frames += 1\n",
    "            \n",
    "        cv2_img = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n",
    "        roi_image = cv2_img[x0+h//15:x1-h//15,y0+w//15:y1-w//15,:]\n",
    "        roi_size = roi_image.shape  \n",
    "        \n",
    "        # smoothing mask\n",
    "        if use_smoothed_mask:\n",
    "            mask = np.zeros_like(roi_image)\n",
    "            mask[h//15:-h//15,w//15:-w//15,:] = 255\n",
    "            mask = cv2.GaussianBlur(mask,(15,15),10)\n",
    "            orig_img = cv2.cvtColor(roi_image, cv2.COLOR_BGR2RGB)\n",
    "        \n",
    "        ae_input = cv2.resize(roi_image, (64,64))/255. * 2 - 1        \n",
    "        result = gan.netGA.predict(np.array([ae_input])) # Change path_A/path_B here\n",
    "        result_a = result[0][0] * 255\n",
    "        result_bgr = np.clip( (result[1][0] + 1) * 255 / 2, 0, 255 )\n",
    "        result_a = cv2.GaussianBlur(result_a ,(7,7),6)\n",
    "        result_a = np.expand_dims(result_a, axis=2)\n",
    "        result = (result_a/255 * result_bgr + (1 - result_a/255) * ((ae_input + 1) * 255 / 2)).astype('uint8')\n",
    "        result = cv2.cvtColor(result, cv2.COLOR_BGR2RGB)\n",
    "        \n",
    "        mask_map[x0+h//15:x1-h//15, y0+w//15:y1-w//15,:] = np.expand_dims(cv2.resize(result_a, (roi_size[1],roi_size[0])), axis=2)\n",
    "        mask_map = np.clip(mask_map + .15 * input_img, 0, 255 )\n",
    "        \n",
    "        result = cv2.resize(result, (roi_size[1],roi_size[0]))\n",
    "        comb_img = np.zeros([input_img.shape[0], input_img.shape[1]*2,input_img.shape[2]])\n",
    "        comb_img[:, :input_img.shape[1], :] = input_img\n",
    "        comb_img[:, input_img.shape[1]:, :] = input_img\n",
    "        \n",
    "        if use_smoothed_mask:\n",
    "            comb_img[x0+h//15:x1-h//15, input_img.shape[1]+y0+w//15:input_img.shape[1]+y1-w//15,:] = mask/255*result + (1-mask/255)*orig_img\n",
    "        else:\n",
    "            comb_img[x0+h//15:x1-h//15, input_img.shape[1]+y0+w//15:input_img.shape[1]+y1-w//15,:] = result\n",
    "            \n",
    "        triple_img = np.zeros([input_img.shape[0], input_img.shape[1]*3,input_img.shape[2]])\n",
    "        triple_img[:, :input_img.shape[1]*2, :] = comb_img\n",
    "        triple_img[:, input_img.shape[1]*2:, :] = mask_map\n",
    "    \n",
    "    return triple_img#comb_img"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Variables for smoothing bounding box\n",
    "global prev_x0, prev_x1, prev_y0, prev_y1\n",
    "global frames\n",
    "prev_x0 = prev_x1 = prev_y0 = prev_y1 = 0\n",
    "frames = 0\n",
    "\n",
    "output = 'OUTPUT_VIDEO.mp4'\n",
    "clip1 = VideoFileClip(\"INPUT_VIDEO.mp4\")\n",
    "clip = clip1.fl_image(process_video)#.subclip(11, 13) #NOTE: this function expects color images!!\n",
    "%time clip.write_videofile(output, audio=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Neptune",
   "language": "",
   "name": "neptune-kernel"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
