{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# DCGAN人脸生成实验\n",
    "* 实验使用 CelebA 数据集的部分数据\n",
    "* 实验数据下载\n",
    ">https://pan.baidu.com/s/14d4pXYORYhddhmMurcxbwA  \n",
    ">提取码：acfe\n",
    "\n",
    "![DCGAN](pic/DCGAN.png)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import tensorflow as tf\n",
    "import numpy as np\n",
    "import math\n",
    "from glob import glob\n",
    "from matplotlib import pyplot\n",
    "from PIL import Image"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "%matplotlib inline\n",
    "data_dir = './data/facedata/'"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 数据探索\n",
    "* 定义数据探索函数\n",
    "* 定义图片查看函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_image(image_path, width, height, mode):\n",
    "    image = Image.open(image_path)\n",
    "    if image.size != (width, height):\n",
    "        face_width = face_height = 108\n",
    "        j = (image.size[0] - face_width) // 2\n",
    "        i = (image.size[1] - face_height) // 2\n",
    "        image = image.crop([j, i, j + face_width, i + face_height])\n",
    "        image = image.resize([width, height], Image.BILINEAR)\n",
    "    return np.array(image.convert(mode))\n",
    "\n",
    "def search(image_files, width, height, mode):\n",
    "    data_batch = np.array(\n",
    "        [get_image(sample_file, width, height, mode) for sample_file in image_files]).astype(np.float32)\n",
    "    if len(data_batch.shape) < 4:\n",
    "        data_batch = data_batch.reshape(data_batch.shape + (1,))\n",
    "    return data_batch\n",
    "    \n",
    "def img_show(images, mode):\n",
    "    save_size = math.floor(np.sqrt(images.shape[0]))\n",
    "    images = (((images - images.min()) * 255) / (images.max() - images.min())).astype(np.uint8)\n",
    "    images_in_square = np.reshape(\n",
    "            images[:save_size*save_size],\n",
    "            (save_size, save_size, images.shape[1], images.shape[2], images.shape[3]))\n",
    "    if mode == 'L':\n",
    "        images_in_square = np.squeeze(images_in_square, 4)\n",
    "    new_im = Image.new(mode, (images.shape[1] * save_size, images.shape[2] * save_size))\n",
    "    for col_i, col_images in enumerate(images_in_square):\n",
    "        for image_i, image in enumerate(col_images):\n",
    "            im = Image.fromarray(image, mode)\n",
    "            new_im.paste(im, (col_i * images.shape[1], image_i * images.shape[2]))\n",
    "    return new_im"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### CelebA 数据探索"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "celeb_images = search(glob(os.path.join(data_dir, '*.jpg'))[:9],28,28,'RGB')\n",
    "pyplot.imshow(img_show(celeb_images, 'RGB'))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 数据处理\n",
    "* 建立数据类\n",
    "* 设定 batch 方法"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Dataset():\n",
    "    def __init__(self, data_files):\n",
    "        IMAGE_WIDTH = 28\n",
    "        IMAGE_HEIGHT = 28\n",
    "        self.image_mode = 'RGB'\n",
    "        image_channels = 3\n",
    "        self.data_files = data_files\n",
    "        self.shape = len(data_files), IMAGE_WIDTH, IMAGE_HEIGHT, image_channels\n",
    "        \n",
    "    def get_batches(self, batch_size):\n",
    "        IMAGE_MAX_VALUE = 255\n",
    "        current_index = 0\n",
    "        while current_index + batch_size <= self.shape[0]:\n",
    "            data_batch = search(\n",
    "                self.data_files[current_index:current_index+batch_size],\n",
    "                *self.shape[1:3],\n",
    "                self.image_mode)\n",
    "            current_index += batch_size\n",
    "            yield data_batch / IMAGE_MAX_VALUE - 0.5"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 建立DCGAN网络\n",
    "* 如果有GPU加速，可以运行下列代码检查GPU配置与状态"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if not tf.test.gpu_device_name():\n",
    "    print(\"无GPU\")\n",
    "else:\n",
    "    print(\"正在使用GPU：{}\".format(tf.test.gpu_device_name()))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 数据输入"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def model_inputs(image_width, image_height, image_channels, z_dim):\n",
    "    input_real = tf.placeholder(tf.float32, shape=(None,image_width,image_height,image_channels), name='input_real')\n",
    "    input_z = tf.placeholder(tf.float32, shape=(None,z_dim), name='input_z')\n",
    "    learning_rate = tf.placeholder(tf.float32, name='learning_rate')\n",
    "    return input_real,input_z,learning_rate"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Generator"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def generator(z, out_channel_dim, reuse=False, alpha=0.2):\n",
    "    with tf.variable_scope('generator', reuse=reuse):\n",
    "        gl1 = tf.layers.dense(z, 7*7*256)\n",
    "        gl1 = tf.reshape(gl1, (-1,7,7,256))\n",
    "        gl1 = tf.layers.batch_normalization(gl1, training=not reuse)\n",
    "        gl_relu1 = tf.maximum(alpha * gl1, gl1)\n",
    "        \n",
    "        gl2 = tf.layers.conv2d_transpose(gl_relu1, 128, 5, strides=2, padding='SAME')\n",
    "        gl2 = tf.layers.batch_normalization(gl2, training=not reuse)\n",
    "        gl_relu2 = tf.maximum(alpha * gl2, gl2)\n",
    "        \n",
    "        gl3 = tf.layers.conv2d_transpose(gl_relu2, 64, 5, strides=2, padding='SAME')\n",
    "        gl3 = tf.layers.batch_normalization(gl3, training=not reuse)\n",
    "        gl_relu3 = tf.maximum(alpha * gl3, gl3)\n",
    "        \n",
    "        logits = tf.layers.conv2d_transpose(gl_relu3, out_channel_dim, 5, strides=1, padding='SAME')\n",
    "        out = tf.tanh(logits)\n",
    "        \n",
    "    return out"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Discriminator"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def discriminator(images, reuse=False, alpha=0.2):\n",
    "    with tf.variable_scope('discriminator', reuse=reuse):\n",
    "        dl1 = tf.layers.conv2d(images, 64, 5, strides=2, padding='SAME', kernel_initializer=tf.random_normal_initializer(stddev=0.02))\n",
    "        dl_relu1 = tf.maximum(alpha * dl1, dl1)\n",
    "        \n",
    "        dl2 = tf.layers.conv2d(images, 128, 5, strides=2, padding='SAME', kernel_initializer=tf.random_normal_initializer(stddev=0.02))\n",
    "        dl2 = tf.layers.batch_normalization(dl2, trainable=True)\n",
    "        dl_relu2 = tf.maximum(alpha * dl2, dl2)\n",
    "        \n",
    "        dl3 = tf.layers.conv2d(images, 256, 5, strides=2, padding='SAME', kernel_initializer=tf.random_normal_initializer(stddev=0.02))\n",
    "        dl3 = tf.layers.batch_normalization(dl3, trainable=True)\n",
    "        dl_relu3 = tf.maximum(alpha * dl3, dl3)\n",
    "        \n",
    "        flat = tf.reshape(dl_relu3, (-1,7*7*256))\n",
    "        logits = tf.layers.dense(flat,1)\n",
    "        out = tf.sigmoid(logits)\n",
    "        \n",
    "    return out, logits"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Loss"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def model_loss(input_real, input_z, out_channel_dim):\n",
    "    smooth = 0.1\n",
    "    \n",
    "    g_model = generator(input_z, out_channel_dim)\n",
    "    d_real_model, d_real_logits = discriminator(input_real)\n",
    "    d_fake_model, d_fake_logits = discriminator(g_model, reuse=True)\n",
    "    \n",
    "    d_real_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_real_logits,\n",
    "                                                                         labels=tf.ones_like(d_real_logits)*(1-smooth)))\n",
    "    d_fake_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake_logits,\n",
    "                                                                         labels=tf.zeros_like(d_fake_logits)))\n",
    "    d_loss = d_real_loss + d_fake_loss\n",
    "    g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake_logits,\n",
    "                                                                    labels=tf.ones_like(d_fake_logits)))\n",
    "    \n",
    "    return d_loss, g_loss"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 优化函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def model_opt(d_loss, g_loss, learning_rate, beta1):\n",
    "    t_vars = tf.trainable_variables()\n",
    "    g_vars = [var for var in t_vars if var.name.startswith('generator')]\n",
    "    d_vars = [var for var in t_vars if var.name.startswith('discriminator')]   \n",
    "    d_train_opt = tf.train.AdamOptimizer(learning_rate, beta1).minimize(d_loss, var_list=d_vars)    \n",
    "    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n",
    "    g_updates = [opt for opt in update_ops if opt.name.startswith('generator')]\n",
    "    with tf.control_dependencies(g_updates):\n",
    "        g_train_opt = tf.train.AdamOptimizer(learning_rate, beta1).minimize(g_loss, var_list=g_vars)    \n",
    "    return d_train_opt, g_train_opt"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 输出可视化"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def show_generator_output(sess, n_images, input_z, out_channel_dim, image_mode):\n",
    "    cmap = None if image_mode == 'RGB' else 'gray'\n",
    "    z_dim = input_z.get_shape().as_list()[-1]\n",
    "    example_z = np.random.uniform(-1, 1, size=[n_images, z_dim])\n",
    "    samples = sess.run(\n",
    "        generator(input_z, out_channel_dim, True),\n",
    "        feed_dict={input_z: example_z})\n",
    "    images_grid = img_show(samples, image_mode)\n",
    "    pyplot.imshow(images_grid, cmap=cmap)\n",
    "    pyplot.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 开始训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def train(epoch_count, batch_size, z_dim, learning_rate, beta1, get_batches, data_shape, data_image_mode):\n",
    "    _, image_width, image_height, image_channel = data_shape\n",
    "    input_real, input_z, _learning_rate = model_inputs(image_width, image_height, image_channel, z_dim)\n",
    "    d_loss, g_loss = model_loss(input_real, input_z, image_channel)\n",
    "    d_train_opt, g_train_opt = model_opt(d_loss, g_loss, learning_rate, beta1)\n",
    "    \n",
    "    steps = 0\n",
    "    print_every = 10\n",
    "    img_show_num = 9\n",
    "    \n",
    "    with tf.Session() as sess:\n",
    "        sess.run(tf.global_variables_initializer())\n",
    "        for epoch_i in range(epoch_count):\n",
    "            for batch_images in get_batches(batch_size):\n",
    "                batch_images = batch_images*2.0\n",
    "                batch_z = np.random.uniform(-1,1,size=(batch_size, z_dim))\n",
    "                \n",
    "                _ = sess.run(d_train_opt, feed_dict={input_real:batch_images, input_z:batch_z})\n",
    "                _ = sess.run(g_train_opt, feed_dict={input_z:batch_z})\n",
    "                \n",
    "                steps += 1\n",
    "                if steps % print_every == 0:\n",
    "                    d_train_loss = sess.run(d_loss, {input_z:batch_z, input_real:batch_images})\n",
    "                    g_train_loss = g_loss.eval({input_z:batch_z})\n",
    "                    \n",
    "                    print(\"Epoch {}/{}...\".format(epoch_i+1, epochs),\n",
    "                          \"Discriminator Loss: {:.4f}...\".format(d_train_loss),\n",
    "                          \"Generator Loss: {:.4f}...\".format(g_train_loss))\n",
    "                    \n",
    "                if (steps % 100 == 0):\n",
    "                    show_generator_output(sess, img_show_num, input_z, image_channel, data_image_mode)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### CelebA 数据集训练\n",
    "* 设定超参\n",
    "* 进行训练\n",
    "* 观看结果"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "batch_size = 128\n",
    "z_dim = 100\n",
    "learning_rate = 0.001\n",
    "beta1 = 0.5\n",
    "epochs = 5\n",
    "\n",
    "celeba_dataset = Dataset(glob(os.path.join(data_dir, '*.jpg')))\n",
    "with tf.Graph().as_default():\n",
    "    train(epochs, batch_size, z_dim, learning_rate, beta1, celeba_dataset.get_batches, celeba_dataset.shape, celeba_dataset.image_mode)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.0"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
