{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Using TensorFlow backend.\n"
     ]
    }
   ],
   "source": [
    "from __future__ import print_function, division\n",
    "\n",
    "from keras.models import Sequential, Model\n",
    "from keras.layers import *\n",
    "from keras.layers.advanced_activations import LeakyReLU\n",
    "from keras.activations import relu\n",
    "from keras.initializers import RandomNormal\n",
    "from keras.applications import *\n",
    "import keras.backend as K\n",
    "from tensorflow.contrib.distributions import Beta\n",
    "import tensorflow as tf\n",
    "from keras.optimizers import Adam\n",
    "from keras import losses\n",
    "from keras.utils import to_categorical"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "from image_augmentation import random_transform\n",
    "from image_augmentation import random_warp\n",
    "from utils import get_image_paths, load_images, stack_images\n",
    "from pixel_shuffler import PixelShuffler"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "import time\n",
    "import numpy as np\n",
    "from PIL import Image\n",
    "import cv2\n",
    "import glob\n",
    "from random import randint, shuffle\n",
    "from IPython.display import clear_output\n",
    "from IPython.display import display\n",
    "import matplotlib.pyplot as plt\n",
    "%matplotlib inline"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Code borrow from [eriklindernoren](https://github.com/eriklindernoren) and [fchollet](https://github.com/fchollet)\n",
    "\n",
    "https://github.com/eriklindernoren/Keras-GAN/blob/master/aae/adversarial_autoencoder.py\n",
    "\n",
    "https://github.com/fchollet/deep-learning-with-python-notebooks/blob/master/8.5-introduction-to-gans.ipynb"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "class GANModel():\n",
    "    img_size = 64 \n",
    "    channels = 3\n",
    "    img_shape = (img_size, img_size, channels)\n",
    "    encoded_dim = 1024\n",
    "    \n",
    "    def __init__(self):\n",
    "        optimizer = Adam(1e-4, 0.5)\n",
    "\n",
    "        # Build and compile the discriminator\n",
    "        self.netDA, self.netDB = self.build_discriminator()\n",
    "        self.netDA.compile(loss='mse', optimizer=optimizer, metrics=['accuracy'])\n",
    "        self.netDB.compile(loss='mse', optimizer=optimizer, metrics=['accuracy'])\n",
    "\n",
    "        # Build and compile the generator\n",
    "        self.netGA, self.netGB = self.build_generator()\n",
    "        self.netGA.compile(loss=['mae', 'mse'], optimizer=optimizer)\n",
    "        self.netGB.compile(loss=['mae', 'mse'], optimizer=optimizer)\n",
    "\n",
    "        img = Input(shape=self.img_shape)\n",
    "        alphaA, reconstructed_imgA = self.netGA(img)\n",
    "        alphaB, reconstructed_imgB = self.netGB(img)\n",
    "\n",
    "        # For the adversarial_autoencoder model we will only train the generator\n",
    "        self.netDA.trainable = False\n",
    "        self.netDB.trainable = False\n",
    "\n",
    "        def one_minus(x): return 1 - x\n",
    "        # masked_img = alpha * reconstructed_img + (1 - alpha) * img\n",
    "        masked_imgA = add([multiply([alphaA, reconstructed_imgA]), multiply([Lambda(one_minus)(alphaA), img])])\n",
    "        masked_imgB = add([multiply([alphaB, reconstructed_imgB]), multiply([Lambda(one_minus)(alphaB), img])])\n",
    "        out_discriminatorA = self.netDA(concatenate([masked_imgA, img], axis=-1))\n",
    "        out_discriminatorB = self.netDB(concatenate([masked_imgB, img], axis=-1))\n",
    "\n",
    "        # The adversarial_autoencoder model  (stacked generator and discriminator) takes\n",
    "        # img as input => generates encoded represenation and reconstructed image => determines validity \n",
    "        self.adversarial_autoencoderA = Model(img, [reconstructed_imgA, out_discriminatorA])\n",
    "        self.adversarial_autoencoderB = Model(img, [reconstructed_imgB, out_discriminatorB])\n",
    "        self.adversarial_autoencoderA.compile(loss=['mae', 'mse'],\n",
    "                                              loss_weights=[1, 0.5],\n",
    "                                              optimizer=optimizer)\n",
    "        self.adversarial_autoencoderB.compile(loss=['mae', 'mse'],\n",
    "                                              loss_weights=[1, 0.5],\n",
    "                                              optimizer=optimizer)\n",
    "\n",
    "    def build_generator(self):\n",
    "        def conv_block(input_tensor, f):\n",
    "            x = input_tensor\n",
    "            x = Conv2D(f, kernel_size=3, strides=2, kernel_initializer=RandomNormal(0, 0.02), \n",
    "                       use_bias=False, padding=\"same\")(x)\n",
    "            x = LeakyReLU(alpha=0.2)(x)\n",
    "            return x\n",
    "\n",
    "        def res_block(input_tensor, f):\n",
    "            x = input_tensor\n",
    "            x = Conv2D(f, kernel_size=3, kernel_initializer=RandomNormal(0, 0.02), \n",
    "                       use_bias=False, padding=\"same\")(x)\n",
    "            x = LeakyReLU(alpha=0.2)(x)\n",
    "            x = Conv2D(f, kernel_size=3, kernel_initializer=RandomNormal(0, 0.02), \n",
    "                       use_bias=False, padding=\"same\")(x)\n",
    "            x = add([x, input_tensor])\n",
    "            x = LeakyReLU(alpha=0.2)(x)\n",
    "            return x\n",
    "\n",
    "        def upscale_ps(filters, use_norm=True):\n",
    "            def block(x):\n",
    "                x = Conv2D(filters*4, kernel_size=3, use_bias=False, \n",
    "                           kernel_initializer=RandomNormal(0, 0.02), padding='same' )(x)\n",
    "                x = LeakyReLU(0.1)(x)\n",
    "                x = PixelShuffler()(x)\n",
    "                return x\n",
    "            return block\n",
    "\n",
    "        def Encoder(img_shape):\n",
    "            inp = Input(shape=img_shape)\n",
    "            x = Conv2D(64, kernel_size=5, kernel_initializer=RandomNormal(0, 0.02), \n",
    "                       use_bias=False, padding=\"same\")(inp)\n",
    "            x = conv_block(x,128)\n",
    "            x = conv_block(x,256)\n",
    "            x = conv_block(x,512) \n",
    "            x = conv_block(x,1024)\n",
    "            x = Dense(1024)(Flatten()(x))\n",
    "            x = Dense(4*4*1024)(x)\n",
    "            x = Reshape((4, 4, 1024))(x)\n",
    "            out = upscale_ps(512)(x)\n",
    "            return Model(inputs=inp, outputs=out)\n",
    "\n",
    "        def Decoder_ps(img_shape):\n",
    "            nc_in = 512\n",
    "            input_size = img_shape[0]//8\n",
    "            inp = Input(shape=(input_size, input_size, nc_in))\n",
    "            x = inp\n",
    "            x = upscale_ps(256)(x)\n",
    "            x = upscale_ps(128)(x)\n",
    "            x = upscale_ps(64)(x)\n",
    "            x = res_block(x, 64)\n",
    "            x = res_block(x, 64)\n",
    "            alpha = Conv2D(1, kernel_size=5, padding='same', activation=\"sigmoid\")(x)\n",
    "            rgb = Conv2D(3, kernel_size=5, padding='same', activation=\"tanh\")(x)\n",
    "            return Model(inp, [alpha, rgb])\n",
    "        \n",
    "        encoder = Encoder(self.img_shape)\n",
    "        decoder_A = Decoder_ps(self.img_shape)\n",
    "        decoder_B = Decoder_ps(self.img_shape)    \n",
    "        x = Input(shape=self.img_shape)\n",
    "        netGA = Model(x, decoder_A(encoder(x)))\n",
    "        netGB = Model(x, decoder_B(encoder(x)))           \n",
    "        try:\n",
    "            netGA.load_weights(\"models/netGA.h5\")\n",
    "            netGB.load_weights(\"models/netGB.h5\")\n",
    "            print (\"Generator models loaded.\")\n",
    "        except:\n",
    "            print (\"Generator weights files not found.\")\n",
    "            pass\n",
    "        return netGA, netGB, \n",
    "\n",
    "    def build_discriminator(self):  \n",
    "        def conv_block_d(input_tensor, f, use_instance_norm=True):\n",
    "            x = input_tensor\n",
    "            x = Conv2D(f, kernel_size=4, strides=2, kernel_initializer=RandomNormal(0, 0.02), \n",
    "                       use_bias=False, padding=\"same\")(x)\n",
    "            x = LeakyReLU(alpha=0.2)(x)\n",
    "            return x   \n",
    "        def Discriminator(img_shape):\n",
    "            inp = Input(shape=(img_shape[0], img_shape[1], img_shape[2]*2))\n",
    "            x = conv_block_d(inp, 64, False)\n",
    "            x = conv_block_d(x, 128, False)\n",
    "            x = conv_block_d(x, 256, False)\n",
    "            out = Conv2D(1, kernel_size=4, kernel_initializer=RandomNormal(0, 0.02), \n",
    "                         use_bias=False, padding=\"same\", activation=\"sigmoid\")(x)   \n",
    "            return Model(inputs=[inp], outputs=out) \n",
    "        \n",
    "        netDA = Discriminator(self.img_shape)\n",
    "        netDB = Discriminator(self.img_shape)        \n",
    "        try:\n",
    "            netDA.load_weights(\"models/netDA.h5\") \n",
    "            netDB.load_weights(\"models/netDB.h5\") \n",
    "            print (\"Discriminator models loaded.\")\n",
    "        except:\n",
    "            print (\"Discriminator weights files not found.\")\n",
    "            pass\n",
    "        return netDA, netDB    \n",
    "    \n",
    "    def load(self, swapped):\n",
    "        if swapped:\n",
    "            print(\"swapping not supported on GAN\")\n",
    "        pass\n",
    "    \n",
    "    def save_weights(self):\n",
    "        self.netGA.save_weights(\"models/netGA.h5\")\n",
    "        self.netGB.save_weights(\"models/netGB.h5\" )\n",
    "        self.netDA.save_weights(\"models/netDA.h5\")\n",
    "        self.netDB.save_weights(\"models/netDB.h5\")\n",
    "        print (\"Models saved.\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Train():\n",
    "    random_transform_args = {\n",
    "        'rotation_range': 20,\n",
    "        'zoom_range': 0.05,\n",
    "        'shift_range': 0.05,\n",
    "        'random_flip': 0.5,\n",
    "        }\n",
    "    def __init__(self, model, fn_A, fn_B, batch_size=8):\n",
    "        self.model = model\n",
    "        self.train_batchA = minibatchAB(fn_A, batch_size, self.random_transform_args)\n",
    "        self.train_batchB = minibatchAB(fn_B, batch_size, self.random_transform_args)\n",
    "        self.batch_size = batch_size\n",
    "        \n",
    "        self.use_mixup = True\n",
    "        self.mixup_alpha = 0.2\n",
    "    \n",
    "    def train_one_step(self, gen_iter, t0):\n",
    "        # ---------------------\n",
    "        #  Train Discriminators\n",
    "        # ---------------------\n",
    "\n",
    "        # Select a random half batch of images\n",
    "        epoch, warped_A, target_A = next(self.train_batchA) \n",
    "        epoch, warped_B, target_B = next(self.train_batchB) \n",
    "\n",
    "        # Generate a half batch of new images\n",
    "        gen_alphasA, gen_imgsA = self.model.netGA.predict(warped_A)\n",
    "        gen_alphasB, gen_imgsB = self.model.netGB.predict(warped_B)\n",
    "        #gen_masked_imgsA = gen_alphasA * gen_imgsA + (1 - gen_alphasA) * warped_A\n",
    "        #gen_masked_imgsB = gen_alphasB * gen_imgsB + (1 - gen_alphasB) * warped_B\n",
    "        gen_masked_imgsA = np.array([gen_alphasA[i] * gen_imgsA[i] + (1 - gen_alphasA[i]) * warped_A[i] \n",
    "                                     for i in range(self.batch_size)])\n",
    "        gen_masked_imgsB = np.array([gen_alphasB[i] * gen_imgsB[i] + (1 - gen_alphasB[i]) * warped_B[i]\n",
    "                                     for i in range (self.batch_size)])\n",
    "\n",
    "        valid = np.ones((self.batch_size, ) + self.model.netDA.output_shape[1:])\n",
    "        fake = np.zeros((self.batch_size, ) + self.model.netDA.output_shape[1:])\n",
    "\n",
    "        concat_real_inputA = np.array([np.concatenate([target_A[i], warped_A[i]], axis=-1) \n",
    "                                       for i in range(self.batch_size)])\n",
    "        concat_real_inputB = np.array([np.concatenate([target_B[i], warped_B[i]], axis=-1) \n",
    "                                       for i in range(self.batch_size)])\n",
    "        concat_fake_inputA = np.array([np.concatenate([gen_masked_imgsA[i], warped_A[i]], axis=-1) \n",
    "                                       for i in range(self.batch_size)])\n",
    "        concat_fake_inputB = np.array([np.concatenate([gen_masked_imgsB[i], warped_B[i]], axis=-1) \n",
    "                                       for i in range(self.batch_size)])\n",
    "        if self.use_mixup:\n",
    "            lam = np.random.beta(self.mixup_alpha, self.mixup_alpha)\n",
    "            mixup_A = lam * concat_real_inputA + (1 - lam) * concat_fake_inputA\n",
    "            mixup_B = lam * concat_real_inputB + (1 - lam) * concat_fake_inputB\n",
    "\n",
    "        # Train the discriminators\n",
    "        #print (\"Train the discriminators.\")\n",
    "        if self.use_mixup:\n",
    "            d_lossA = self.model.netDA.train_on_batch(mixup_A, lam * valid)\n",
    "            d_lossB = self.model.netDB.train_on_batch(mixup_B, lam * valid)\n",
    "        else:\n",
    "            d_lossA = self.model.netDA.train_on_batch(np.concatenate([concat_real_inputA, concat_fake_inputA], axis=0), \n",
    "                                                np.concatenate([valid, fake], axis=0))\n",
    "            d_lossB = self.model.netDB.train_on_batch(np.concatenate([concat_real_inputB, concat_fake_inputB], axis=0),\n",
    "                                                np.concatenate([valid, fake], axis=0))\n",
    "\n",
    "\n",
    "        # ---------------------\n",
    "        #  Train Generators\n",
    "        # ---------------------\n",
    "\n",
    "        # Train the generators\n",
    "        #print (\"Train the generators.\")\n",
    "        g_lossA = self.model.adversarial_autoencoderA.train_on_batch(warped_A, [target_A, valid])\n",
    "        g_lossB = self.model.adversarial_autoencoderB.train_on_batch(warped_B, [target_B, valid])            \n",
    "        \n",
    "        print('[%d/%s][%d] Loss_DA: %f Loss_DB: %f Loss_GA: %f Loss_GB: %f time: %f'\n",
    "              % (epoch, \"num_epochs\", gen_iter, d_lossA[0], \n",
    "                 d_lossB[0], g_lossA[0], g_lossB[0], time.time()-t0)) \n",
    "        \n",
    "        \n",
    "        return None\n",
    "    \n",
    "    def show_sample(self):\n",
    "        _, wA, tA = self.train_batchA.send(14)  \n",
    "        _, wB, tB = self.train_batchB.send(14)\n",
    "        self.showG(tA, tB)\n",
    "\n",
    "    def showG(self, test_A, test_B):      \n",
    "        def display_fig(figure_A, figure_B):\n",
    "            figure = np.concatenate([figure_A, figure_B], axis=0 )\n",
    "            figure = figure.reshape((4,7) + figure.shape[1:])\n",
    "            figure = stack_images(figure)\n",
    "            figure = np.clip((figure + 1) * 255 / 2, 0, 255).astype('uint8')\n",
    "            figure = cv2.cvtColor(figure, cv2.COLOR_BGR2RGB)\n",
    "            display(Image.fromarray(figure)) \n",
    "\n",
    "        out_test_A_netGA = self.model.netGA.predict(test_A)\n",
    "        out_test_A_netGB = self.model.netGB.predict(test_A)\n",
    "        out_test_B_netGA = self.model.netGA.predict(test_B)\n",
    "        out_test_B_netGB = self.model.netGB.predict(test_B)\n",
    "\n",
    "        figure_A = np.stack([\n",
    "            test_A,\n",
    "            out_test_A_netGA[0] * out_test_A_netGA[1] + (1 - out_test_A_netGA[0]) * test_A,\n",
    "            out_test_A_netGB[0] * out_test_A_netGB[1] + (1 - out_test_A_netGB[0]) * test_A,\n",
    "            ], axis=1 )\n",
    "        figure_B = np.stack([\n",
    "            test_B,\n",
    "            out_test_B_netGB[0] * out_test_B_netGB[1] + (1 - out_test_B_netGB[0]) * test_B,\n",
    "            out_test_B_netGA[0] * out_test_B_netGA[1] + (1 - out_test_B_netGA[0]) * test_B,\n",
    "            ], axis=1 )\n",
    "        \n",
    "        print (\"Masked results:\")\n",
    "        display_fig(figure_A, figure_B)   \n",
    "\n",
    "        figure_A = np.stack([\n",
    "            test_A,\n",
    "            out_test_A_netGA[1],\n",
    "            out_test_A_netGB[1],\n",
    "            ], axis=1 )\n",
    "        figure_B = np.stack([\n",
    "            test_B,\n",
    "            out_test_B_netGB[1],\n",
    "            out_test_B_netGA[1],\n",
    "            ], axis=1 )\n",
    "        \n",
    "        print (\"Raw results:\")\n",
    "        display_fig(figure_A, figure_B)       \n",
    "\n",
    "        figure_A = np.stack([\n",
    "            test_A,\n",
    "            np.tile(out_test_A_netGA[0],3) * 2 - 1,\n",
    "            np.tile(out_test_A_netGB[0],3) * 2 - 1,\n",
    "            ], axis=1 )\n",
    "        figure_B = np.stack([\n",
    "            test_B,\n",
    "            np.tile(out_test_B_netGB[0],3) * 2 - 1,\n",
    "            np.tile(out_test_B_netGA[0],3) * 2 - 1,\n",
    "            ], axis=1 )\n",
    "        print (\"Alpha masks:\")\n",
    "\n",
    "        display_fig(figure_A, figure_B)        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "img_dirA = './faceA/*.*'\n",
    "img_dirB = './faceB/*.*'\n",
    "\n",
    "def read_image(fn, random_transform_args):\n",
    "    image = cv2.imread(fn)\n",
    "    image = cv2.resize(image, (256,256)) / 255 * 2 - 1\n",
    "    image = random_transform(image, **random_transform_args )\n",
    "    warped_img, target_img = random_warp(image)\n",
    "    return warped_img, target_img\n",
    "\n",
    "def minibatch(data, batchsize, args):\n",
    "    length = len(data)\n",
    "    epoch = i = 0\n",
    "    tmpsize = None  \n",
    "    shuffle(data)\n",
    "    while True:\n",
    "        size = tmpsize if tmpsize else batchsize\n",
    "        if i+size > length:\n",
    "            shuffle(data)\n",
    "            i = 0\n",
    "            epoch+=1        \n",
    "        rtn = np.float32([read_image(data[j], args) for j in range(i,i+size)])\n",
    "        i+=size\n",
    "        tmpsize = yield epoch, rtn[:,0,:,:,:], rtn[:,1,:,:,:]       \n",
    "\n",
    "def minibatchAB(dataA, batchsize, args):\n",
    "    batchA = minibatch(dataA, batchsize, args)\n",
    "    tmpsize = None    \n",
    "    while True:        \n",
    "        ep1, warped_img, target_img = batchA.send(tmpsize)\n",
    "        tmpsize = yield ep1, warped_img, target_img\n",
    "\n",
    "def load_data(file_pattern):\n",
    "    return glob.glob(file_pattern)\n",
    "\n",
    "def launch_training(max_iters, batch_size=8, save_interval=100):\n",
    "    train_A = load_data(img_dirA)\n",
    "    train_B = load_data(img_dirB)        \n",
    "    assert len(train_A), \"No image found in \" + str(img_dirA) + \".\"\n",
    "    assert len(train_B), \"No image found in \" + str(img_dirB) + \".\"\n",
    "\n",
    "    gan = GANModel()\n",
    "    trainer = Train(gan, train_A, train_B, batch_size)\n",
    "\n",
    "    print (\"Training starts...\")\n",
    "    t0 = time.time()\n",
    "    gen_iterations = 0\n",
    "\n",
    "    while gen_iterations < max_iters:\n",
    "        #print (\"iter: \" + str(gen_iterations))\n",
    "\n",
    "        _ = trainer.train_one_step(gen_iterations, t0)\n",
    "        gen_iterations += 1           \n",
    "\n",
    "        # If at save interval => save models & show results\n",
    "        if (gen_iterations) % save_interval == 0:\n",
    "            clear_output()\n",
    "\n",
    "            # Save models\n",
    "            gan.save_weights()\n",
    "\n",
    "            # Show results\n",
    "            trainer.show_sample()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "launch_training(max_iters=2e4, batch_size=8, save_interval=20)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Neptune",
   "language": "",
   "name": "neptune-kernel"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
