{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# coding: utf-8\n",
    "\n",
    "# In[1]:\n",
    "\n",
    "\n",
    "import time\n",
    "\n",
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "\n",
    "import tensorlayer as tl\n",
    "from tensorlayer.layers import *\n",
    "\n",
    "sess = tf.InteractiveSession()\n",
    "\n",
    "X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False)\n",
    "\n",
    "\n",
    "\n",
    "# In[2]:\n",
    "\n",
    "\n",
    "def model(x, y_, reuse, is_train, batch_norm=False):\n",
    "    \n",
    "    #参数初始化函数\n",
    "    W_init = tf.truncated_normal_initializer(stddev=5e-2)\n",
    "    W_init2 = tf.truncated_normal_initializer(stddev=0.04)\n",
    "    b_init2 = tf.constant_initializer(value=0.1)\n",
    "    \n",
    "    \n",
    "    #定义模型及损失函数和准确度\n",
    "    with tf.variable_scope(\"model\", reuse=reuse):\n",
    "        tl.layers.set_name_reuse(reuse)\n",
    "        net = InputLayer(x, name='input')\n",
    "        \n",
    "        \n",
    "        \n",
    "        # 定义卷积1\n",
    "        # Conv:\n",
    "        #  act=tf.nn.relu\n",
    "        #  shape=[5, 5, 3, 64],\n",
    "        #  strides=[1, 1, 1, 1]\n",
    "        # MaxPool:\n",
    "        #  ksize=[1, 3, 3, 1]\n",
    "        #  strides=[1, 2, 2, 1]\n",
    "        \n",
    "        if batch_norm:\n",
    "            # 使用批规范化\n",
    "            net = Conv2d(net, 64, (5, 5), (1, 1), padding='SAME', W_init=W_init, b_init=None, name='cnn1')\n",
    "            net = BatchNormLayer(net, is_train, act=tf.nn.relu, name=\"batch1\")\n",
    "            net = MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool1')\n",
    "\n",
    "        else:\n",
    "            # 使用局部归一化\n",
    "            net = Conv2d(net, 64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', W_init=W_init, name='cnn1')\n",
    "            net = MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool1')\n",
    "            net = LocalResponseNormLayer(net, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1')\n",
    "\n",
    "        \n",
    "        # 定义卷积2\n",
    "        # Conv:\n",
    "        #  shape=[5, 5, 64, 64],\n",
    "        #  strides=[1, 1, 1, 1], \n",
    "        # MaxPool:\n",
    "        #  ksize=[1, 3, 3, 1]\n",
    "        #  strides=[1, 2, 2, 1]\n",
    "        \n",
    "        if batch_norm:\n",
    "            net = Conv2d(net, 64, (5, 5), (1, 1), padding='SAME', W_init=W_init, b_init=None, name='cnn2')\n",
    "            net = BatchNormLayer(net, is_train, act=tf.nn.relu, name=\"batch2\")\n",
    "        else:\n",
    "            net = Conv2d(net, 64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', W_init=W_init, name='cnn2')\n",
    "            net = LocalResponseNormLayer(net, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2')\n",
    "        net = MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool2')\n",
    "\n",
    "        \n",
    "        # MLP分类模块\n",
    "        net = FlattenLayer(net, name='flatten')  # output: (batch_size, 2304)\n",
    "        net = DenseLayer(net, n_units=384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d1relu')  # output: (batch_size, 384)\n",
    "        net = DenseLayer(net, n_units=192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d2relu')  # output: (batch_size, 192)\n",
    "        net = DenseLayer(net, n_units=10, act=tf.identity, W_init=tf.truncated_normal_initializer(stddev=1 / 192.0), name='output')  # output: (batch_size, 10)\n",
    "        y = net.outputs\n",
    "\n",
    "        \n",
    "        # 定义交叉熵损失\n",
    "        ce = tl.cost.cross_entropy(y, y_, name='cost')\n",
    "        \n",
    "        # 定义L2惩罚\n",
    "        L2 = 0\n",
    "        for p in tl.layers.get_variables_with_name('relu/W', True, True):\n",
    "            L2 += tf.contrib.layers.l2_regularizer(0.004)(p)\n",
    "        cost = ce + L2\n",
    "\n",
    "        # 准确度\n",
    "        correct_prediction = tf.equal(tf.argmax(y, 1), y_)\n",
    "        acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "\n",
    "        return net, cost, acc\n",
    "\n",
    "\n",
    "\n",
    "# In[3]:\n",
    "\n",
    "\n",
    "#数据增强\n",
    "def distort_fn(x, is_train=False):\n",
    "    x = tl.prepro.crop(x, 24, 24, is_random=is_train)\n",
    "    if is_train:\n",
    "        x = tl.prepro.flip_axis(x, axis=1, is_random=True)\n",
    "        x = tl.prepro.brightness(x, gamma=0.1, gain=1, is_random=True)\n",
    "    x = (x - np.mean(x)) / max(np.std(x), 1e-5)  # avoid values divided by 0\n",
    "    return x\n",
    "\n",
    "\n",
    "# In[4]:\n",
    "\n",
    "\n",
    "def train():\n",
    "    x = tf.placeholder(tf.float32, shape=[None, 24, 24, 3], name='x')\n",
    "    y_ = tf.placeholder(\n",
    "        tf.int64, shape=[\n",
    "            None,\n",
    "        ], name='y_')\n",
    "\n",
    "    network, cost, _ = model(x, y_, False, is_train=True)\n",
    "    _, cost_test, acc = model(x, y_, True, is_train=False)\n",
    "\n",
    "    # train\n",
    "    n_epoch = 50000\n",
    "    learning_rate = 0.0001\n",
    "    print_freq = 1\n",
    "    batch_size = 128\n",
    "\n",
    "    train_params = network.all_params\n",
    "    train_op = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-08, use_locking=False).minimize(cost, var_list=train_params)\n",
    "\n",
    "    tl.layers.initialize_global_variables(sess)\n",
    "\n",
    "    network.print_params(False)\n",
    "    network.print_layers()\n",
    "\n",
    "    print('   learning_rate: %f' % learning_rate)\n",
    "    print('   batch_size: %d' % batch_size)\n",
    "\n",
    "    for epoch in range(n_epoch):\n",
    "        start_time = time.time()\n",
    "        for X_train_a, y_train_a in tl.iterate.minibatches(X_train, y_train, batch_size, shuffle=True):\n",
    "            with tf.device(\"cpu:0\")\n",
    "                X_train_a = tl.prepro.threading_data(X_train_a, fn=distort_fn, is_train=True)  # data augmentation for training\n",
    "            \n",
    "            sess.run(train_op, feed_dict={x: X_train_a, y_: y_train_a})\n",
    "\n",
    "        if epoch + 1 == 1 or (epoch + 1) % print_freq == 0:\n",
    "            print(\"Epoch %d of %d took %fs\" % (epoch + 1, n_epoch, time.time() - start_time))\n",
    "            test_loss, test_acc, n_batch = 0, 0, 0\n",
    "            for X_test_a, y_test_a in tl.iterate.minibatches(X_test, y_test, batch_size, shuffle=False):\n",
    "                X_test_a = tl.prepro.threading_data(X_test_a, fn=distort_fn, is_train=False)  # central crop\n",
    "                err, ac = sess.run([cost_test, acc], feed_dict={x: X_test_a, y_: y_test_a})\n",
    "                test_loss += err\n",
    "                test_acc += ac\n",
    "                n_batch += 1\n",
    "            print(\"   test loss: %f\" % (test_loss / n_batch))\n",
    "            print(\"   test acc: %f\" % (test_acc / n_batch))\n",
    "\n",
    "\n",
    "# In[5]:\n",
    "\n",
    "\n",
    "train()\n",
    "\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
