{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# cnn-mnist\n",
    "使用tensorflow，构造并训练一个神经网络，在测试机上达到超过98%的准确率。 \n",
    "作业详情：https://gitee.com/ai100/quiz-w7-1-cnn-mnist "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "\"\"\"A very simple MNIST classifier.\n",
    "See extensive documentation at\n",
    "https://www.tensorflow.org/get_started/mnist/beginners\n",
    "\"\"\"\n",
    "from __future__ import absolute_import\n",
    "from __future__ import division\n",
    "from __future__ import print_function\n",
    "\n",
    "import argparse\n",
    "import sys\n",
    "import tensorflow as tf\n",
    "import numpy as np\n",
    "\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "\n",
    "from keras.layers.core import Dense, Flatten, Dropout\n",
    "from keras.layers.convolutional import Conv2D\n",
    "from keras.layers.pooling import MaxPooling2D\n",
    "from keras import initializers\n",
    "from keras.layers import advanced_activations\n",
    "\n",
    "from keras import backend as K\n",
    "\n",
    "K.image_data_format() "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "我们在这里调用系统提供的Mnist数据函数为我们读入数据，如果没有下载的话则进行下载。\n",
    "\n",
    "<font color=#ff0000>**这里将data_dir改为适合你的运行环境的目录**</font>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# Import data\n",
    "data_dir = '/tmp/tensorflow/mnist/input_data'\n",
    "mnist = input_data.read_data_sets(data_dir, one_hot=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义输入输出\n",
    "x = tf.placeholder(tf.float32, [None, 784])\n",
    "y_ = tf.placeholder(tf.float32, [None, 10])\n",
    "# 学习速率\n",
    "learning_rate = tf.placeholder(tf.float32)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "with tf.name_scope('reshape'):\n",
    "  x_image = tf.reshape(x, [-1, 28, 28, 1])\n",
    "\n",
    "\n",
    "# 二维卷积层，即对图像的空域卷积。\n",
    "# keras.layers.convolutional.Conv2D(filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, \n",
    "#                                  dilation_rate=(1, 1), activation=None, use_bias=True, kernel_initializer='glorot_uniform', \n",
    "#                                  bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, \n",
    "#                                  activity_regularizer=None, kernel_constraint=None, bias_constraint=None)\n",
    "# filters：卷积核的数目（即输出的维度）\n",
    "# kernel_size：卷积核的宽度和长度。如为单个整数，则表示在各个空间维度的相同长度。\n",
    "# strides：卷积的步长。如为单个整数，则表示在各个空间维度的相同步长。\n",
    "# activation：激活函数，如果不指定该参数，将不会使用任何激活函数（即使用线性激活函数：a(x)=x）\n",
    "# padding：补0策略，为“valid”, “same” 。\n",
    "# use_bias:布尔值，是否使用偏置项\n",
    "# kernel_initializer：权值初始化方法\n",
    "# bias_initializer：权值初始化方法\n",
    "# kernel_regularizer：施加在权重上的正则项\n",
    "# bias_regularizer：施加在偏置向量上的正则项\n",
    "# activity_regularizer：施加在输出上的正则项\n",
    "# kernel_constraints：施加在权重上的约束项\n",
    "# bias_constraints：施加在偏置上的约束项\n",
    "\n",
    "# 卷积层1\n",
    "net = Conv2D(16, kernel_size=[7,7], strides=[1,1], activation='selu', padding='same',\n",
    "             kernel_initializer = 'glorot_normal',  # Glorot正态分布初始化方法，也称作Xavier正态分布初始化\n",
    "             bias_initializer = initializers.Constant(value=0.01), \n",
    "             input_shape=[28,28,1])(x_image)  \n",
    "# 第一层卷积必须显示指定input_shape输入数据形状[高，宽，通道数]\n",
    "# Conv2D 是类方法，第一层时需传递输入数据x_image\n",
    "\n",
    "\n",
    "# 最大值池化层\n",
    "# keras.layers.pooling.MaxPooling2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None)\n",
    "# pool_size：代表在两个方向（竖直，水平）上的下采样因子，如取（2，2）将使图片在两个维度上均变为原长的一半。\n",
    "# strides：步长值\n",
    "# border_mode：‘valid’或者‘same’\n",
    "# data_format：“channels_first”或“channels_last”之一，代表图像的通道维的位置。以128x128的RGB图像为例，\n",
    "#              “channels_first”应将数据组织为（3,128,128），而“channels_last”应将数据组织为（128,128,3）。\n",
    "\n",
    "# 池化层1\n",
    "net = MaxPooling2D(pool_size=[2,2])(net) # 池化\n",
    "\n",
    "#卷积层2\n",
    "net = Conv2D(128, kernel_size=[3,3], strides=[1,1],activation='selu', padding='same', \n",
    "             kernel_initializer = 'glorot_normal',  # Glorot正态分布初始化方法，也称作Xavier正态分布初始化\n",
    "             bias_initializer = initializers.Constant(value=0.01))(net)\n",
    "\n",
    "# 池化层2\n",
    "net = MaxPooling2D(pool_size=[2,2])(net)\n",
    "\n",
    "# Flatten层，用来将输入“压平”，即把多维的输入一维化，常用在从卷积层到全连接层的过渡。Flatten不影响batch的大小。\n",
    "net = Flatten()(net) \n",
    "\n",
    "# Dropout层\n",
    "net = Dropout(0.7)(net)\n",
    "\n",
    "# 全连接层\n",
    "# keras.layers.core.Dense(units, activation=None, use_bias=True, kernel_initializer='glorot_uniform', \n",
    "#                         bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, \n",
    "#                         activity_regularizer=None, kernel_constraint=None, bias_constraint=None)\n",
    "# units：大于0的整数，代表该层的输出维度\n",
    "# activation：激活函数，如果不指定该参数，将不会使用任何激活函数（即使用线性激活函数：a(x)=x）\n",
    "# use_bias: 布尔值，是否使用偏置项\n",
    "# kernel_initializer：权值初始化方法，为预定义初始化方法名的字符串，或用于初始化权重的初始化器\n",
    "# bias_initializer：偏置向量初始化方法，为预定义初始化方法名的字符串，或用于初始化偏置向量的初始化器\n",
    "# kernel_regularizer：施加在权重上的正则项\n",
    "# bias_regularizer：施加在偏置向量上的正则项\n",
    "# activity_regularizer：施加在输出上的正则项\n",
    "# kernel_constraints：施加在权重上的约束项\n",
    "# bias_constraints：施加在偏置上的约束项\n",
    "\n",
    "# 全连接层1\n",
    "net = Dense(1000, activation='selu')(net) # 全连接层\n",
    "\n",
    "# 全连接层2\n",
    "net = Dense(10,activation='softmax')(net)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 计算交叉熵\n",
    "from keras.objectives import categorical_crossentropy\n",
    "cross_entropy = tf.reduce_mean(categorical_crossentropy(y_, net)) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "l2_loss = tf.add_n( [tf.nn.l2_loss(w) for w in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)] )\n",
    "#惩罚因子，即正则化因子\n",
    "regularization_rate = 1e-4\n",
    "total_loss = cross_entropy + regularization_rate * l2_loss"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(total_loss)\n",
    "sess = tf.Session()\n",
    "K.set_session(sess)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "init_op = tf.global_variables_initializer()\n",
    "sess.run(init_op)\n",
    "batch_size = 100\n",
    "# 学习速率\n",
    "lr = 0.10\n",
    "# 学习速率的衰减率\n",
    "lr_decay = 0.87\n",
    "acc_train_mat = []\n",
    "acc_test_mat = []\n",
    "loss_mat = []\n",
    "l2_loss_mat = []\n",
    "total_loss_mat = []\n",
    "# 训练\n",
    "for step in range(8000):   \n",
    "    # 当训练集loss出现波动或趋于平稳，开始学习速率衰减\n",
    "    if (step+1) >= 5000 and (step+1) % 500 == 0:\n",
    "        # 衰减学习速率\n",
    "        lr *= lr_decay\n",
    "        \n",
    "    batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n",
    "    _, loss, l2_loss_value, total_loss_value = sess.run(\n",
    "               [train_step, cross_entropy, l2_loss, total_loss], \n",
    "               feed_dict={x: batch_xs, y_: batch_ys, learning_rate:lr})\n",
    "    if (step+1) % 100 == 0: \n",
    "        # 测试模型\n",
    "        correct_prediction = tf.equal(tf.argmax(net, 1), tf.argmax(y_, 1))\n",
    "        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "        print('step %d, entropy loss: %f, l2_loss: %f, total loss: %f, lr: %f, accuracy: %f' % \n",
    "                (step+1, loss, l2_loss_value, total_loss_value, lr, sess.run(accuracy, feed_dict={x: batch_xs, y_: batch_ys})))\n",
    "        loss_mat.append(loss)\n",
    "        l2_loss_mat.append(l2_loss_value)\n",
    "        total_loss_mat.append(total_loss_value)\n",
    "        \n",
    "        if (step+1) % 500 == 0:\n",
    "            batch_size_pre = 1000\n",
    "            # 计算训练误差，因显卡内存不足，故分批计算求平均值\n",
    "            train_size = mnist.train.images.shape\n",
    "            acc_batch = 0\n",
    "            acc = []\n",
    "            step_time = (int)(train_size[0]/batch_size_pre)\n",
    "            for step_train in range(step_time):\n",
    "                batch_xs, batch_ys = mnist.train.next_batch(batch_size_pre)\n",
    "                acc_batch = sess.run(accuracy, feed_dict={x: batch_xs, y_: batch_ys})\n",
    "                acc.append(acc_batch)\n",
    "            step_acc_train = np.mean(acc)\n",
    "            print('step %d, 训练正确率:  %f' % (step+1, step_acc_train))\n",
    "            acc_train_mat.append(step_acc_train)\n",
    "\n",
    "            # 计算测试误差，因显卡内存不足，故分批计算求平均值\n",
    "            test_size = mnist.test.images.shape\n",
    "            acc_batch = 0\n",
    "            acc = []\n",
    "            step_time = (int)(test_size[0]/batch_size_pre)\n",
    "            for step_test in range(step_time):\n",
    "                batch_xs_test, batch_ys_test = mnist.test.next_batch(batch_size_pre)\n",
    "                acc_batch = sess.run(accuracy, feed_dict={x: batch_xs_test, y_: batch_ys_test})\n",
    "                acc.append(acc_batch)\n",
    "            step_acc_test = np.mean(acc)\n",
    "            print('step %d, 测试正确率:  %f' % (step+1, step_acc_test))\n",
    "            acc_test_mat.append(step_acc_test)\n",
    "            \n",
    "            if step_acc_test >= 0.993:\n",
    "                break"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#可视化\n",
    "import matplotlib.pyplot as plt\n",
    "%matplotlib inline\n",
    "\n",
    "plt.plot(range(len(loss_mat)), loss_mat) \n",
    "plt.xlabel('steps')\n",
    "plt.ylabel('steps_entropy_loss')\n",
    "plt.show() \n",
    "\n",
    "plt.plot(range(len(l2_loss_mat)), l2_loss_mat) \n",
    "plt.xlabel('steps')\n",
    "plt.ylabel('steps_l2_loss')\n",
    "plt.show()\n",
    "\n",
    "plt.plot(range(len(total_loss_mat)), total_loss_mat) \n",
    "plt.xlabel('steps')\n",
    "plt.ylabel('steps_total_loss')\n",
    "plt.show()\n",
    "\n",
    "plt.plot(range(len(acc_train_mat)), acc_train_mat, label='Train Accuracy') \n",
    "plt.plot(range(len(acc_test_mat)), acc_test_mat, label='Test Accuracy')\n",
    "plt.legend()\n",
    "plt.xlabel('steps')\n",
    "plt.ylabel('accuracy')\n",
    "plt.show() "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "毫无疑问，这个模型是一个非常简陋，性能也不理想的模型。目前只能达到92%左右的准确率。\n",
    "接下来，希望大家利用现有的知识，将这个模型优化至98%以上的准确率。\n",
    "Hint：\n",
    "- 卷积\n",
    "- 池化\n",
    "- 激活函数\n",
    "- 正则化\n",
    "- 初始化\n",
    "- 摸索一下各个超参数\n",
    "  - 卷积kernel size\n",
    "  - 卷积kernel 数量\n",
    "  - 学习率\n",
    "  - 正则化惩罚因子\n",
    "  - 最好每隔几个step就对loss、accuracy等等进行一次输出，这样才能有根据地进行调整"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
