{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 尝试类似LeNet-5模型实现mnist手写体数字识别"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## LeNet-5模型架构，卷积层-->池化层-->卷积层-->池化层-->全连接层-->全连接层-->全连接层"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From C:\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\base.py:198: retry (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use the retry module or similar alternatives.\n"
     ]
    }
   ],
   "source": [
    "#导入工具包\n",
    "from __future__ import absolute_import\n",
    "from __future__ import division\n",
    "from __future__ import print_function\n",
    "\n",
    "#argparse 解析命令行的模块\n",
    "import argparse\n",
    "import sys\n",
    "#导入tf样本input_data\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "\n",
    "import tensorflow as tf\n",
    "import numpy as np\n",
    "\n",
    "FLAGS = None\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "#定义mnist数据集相关参数\n",
    "INPUT_NODE = 784 #mnist图片的像素 ,输入层的节点数\n",
    "OUTPUT_NODE = 10 #输出层的节点数，等于类别的是数目\n",
    "\n",
    "#配置神经网络的参数\n",
    "BATCH_SIZE = 100 #一个batch的训练数据个数，训练数据越小，训练过程越接近于随机梯度下降，训练数据越大，训练过程越接近于梯度下降\n",
    "\n",
    "LEARNING_RATE_BASE = 0.003 #基础学习率\n",
    "LEARNING_RATE_DECAY = 0.99 #学习率的衰减率\n",
    "REGULARIZATION_RATE = 0.000001 #描述模型复杂度的正则化在损失函数中的系数\n",
    "TRAINING_STEPS = 20000 #训练轮数\n",
    "MOVING_AVERAGE_DECAY = 0.99 #滑动平均衰减率\n",
    "\n",
    "IMAGE_SIZE = 28 #图片的长和宽\n",
    "NUM_CHANNELS = 1 #图片的渠道为1\n",
    "NUM_LABELS = 10 \n",
    "\n",
    "#第一层卷积层的尺寸和深度\n",
    "CONV1_DEEP = 32\n",
    "CONV1_SIZE = 5\n",
    "\n",
    "#第二层卷积层的尺寸和深度\n",
    "CONV2_DEEP = 64\n",
    "CONV2_SIZE = 5\n",
    "\n",
    "#全连接层的节点个数\n",
    "FC_SIZE = 512"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "#定义一个辅助函数，给定神经网络的输入和正则化项，计算神经网络的前向传播结果。\n",
    "def inference(input_tensor, regularizer):\n",
    "    #第一层卷积层\n",
    "    \n",
    "    #卷积层的输入为 28x28x1的原始图片数据，使用 padding使用SAME,步长为1，所以输出为 28x28x32\n",
    "    #定义上下文管理器\n",
    "    with tf.variable_scope('layer1-conv1'): #命名空间为 layer1-conv1\n",
    "        conv1_weights = tf.get_variable(\"weight\", [CONV1_SIZE, CONV1_SIZE, NUM_CHANNELS, CONV1_DEEP],\n",
    "                                       initializer=tf.truncated_normal_initializer(stddev=0.1))\n",
    "        conv1_biases = tf.get_variable(\"bias\", [CONV1_DEEP], initializer= tf.constant_initializer(0.0))\n",
    "        #使用边长为5，深度为32的卷积核，步长为1 ，padding使用SAME\n",
    "        conv1 = tf.nn.conv2d(input_tensor,conv1_weights,strides=[1, 1, 1, 1],padding='SAME')\n",
    "        relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))\n",
    "        \n",
    "        \n",
    "    #第二层池化层\n",
    "    \n",
    "    #第一层输出为 28x28x32，池化层过滤器边长选 2，步长为2，padding为SAME 则输出为 14x14x32,池化层只改变像素大小，不改变渠道数\n",
    "    #选择最常用的最大池化\n",
    "    with tf.name_scope('layer2-pool1'):\n",
    "        pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1],strides = [1, 2, 2, 1],padding = 'SAME')\n",
    "        \n",
    "    #第三层卷积层\n",
    "    #第二层输出为 14x14x32，过滤器边长为5，深度为 64，padding='SAME',移动步长为 1，则输出为14x14x64\n",
    "    with tf.variable_scope('layer3-conv2'): #命名空间为 layer3-conv2\n",
    "        conv2_weights = tf.get_variable(\"weight\", [CONV2_SIZE, CONV2_SIZE, CONV1_DEEP, CONV2_DEEP],\n",
    "                                       initializer=tf.truncated_normal_initializer(stddev=0.1))\n",
    "        conv2_biases = tf.get_variable(\"bias\", [CONV2_DEEP], initializer= tf.constant_initializer(0.0))\n",
    "        #使用边长为5，深度为32的卷积核，步长为1 ，padding使用SAME\n",
    "        conv2 = tf.nn.conv2d(pool1,conv2_weights,strides=[1, 1, 1, 1],padding='SAME')\n",
    "        relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))\n",
    "    \n",
    "    #第四层池化层\n",
    "    #第三层输出为 14x14x64,这一层的池化层边长与第二层池化层选择一样，则输出为 7x7x64\n",
    "    with tf.name_scope('layer4-pool2'):\n",
    "        pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1],strides = [1, 2, 2, 1],padding = 'SAME')\n",
    "    \n",
    "    #第五层全连接层\n",
    "    #第四层输出为 7x7x64的矩阵，而全连接层的输入要求为向量\n",
    "    pool_shape = pool2.get_shape().as_list()\n",
    "    #pool_shape[0]为batch中的数据个数\n",
    "    nodes = pool_shape[1] * pool_shape[2] * pool_shape[3]\n",
    "    \n",
    "    reshaped = tf.layers.flatten(pool2)\n",
    "    \n",
    "    with tf.variable_scope('layer5-fc1'):\n",
    "        #全连接层权重shape 应该为 [nodes, FC_SIZE]\n",
    "        fc1_weights = tf.get_variable(\"weight\", [nodes, FC_SIZE],\n",
    "                                     initializer=tf.truncated_normal_initializer(stddev=0.1))\n",
    "        #只有全连接层的权重需要加入正则化。、\n",
    "        if regularizer != None:\n",
    "            tf.add_to_collection('losses', regularizer(fc1_weights))\n",
    "        fc1_biases = tf.get_variable(\"bias\", [FC_SIZE], initializer= tf.constant_initializer(0.1))\n",
    "        \n",
    "        fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_weights) + fc1_biases)\n",
    "        #fc1 = tf.nn.dropout(fc1, 0.8)\n",
    "    \n",
    "    #第六层全连接层\n",
    "    #输入为 512的向量，输出为 10的向量\n",
    "    with tf.variable_scope('layer6-fc2'):\n",
    "        #全连接层权重shape 应该为 [FC_SIZE, NUM_LABELS]\n",
    "        fc2_weights = tf.get_variable(\"weight\", [FC_SIZE, NUM_LABELS],\n",
    "                                     initializer=tf.truncated_normal_initializer(stddev=0.1))\n",
    "        #只有全连接层的权重需要加入正则化。、\n",
    "        if regularizer != None:\n",
    "            tf.add_to_collection('losses', regularizer(fc2_weights))\n",
    "        fc2_biases = tf.get_variable(\"bias\", [NUM_LABELS], initializer= tf.constant_initializer(0.1))\n",
    "        \n",
    "        #最后输出不用加激活函数了，因为后续经过softmax之后就得到了最后的分类结果\n",
    "        logit = tf.matmul(fc1, fc2_weights) + fc2_biases\n",
    "    \n",
    "    #返回第六层输出\n",
    "    return logit\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "#训练模型的过程：\n",
    "def train(mnist):\n",
    "    #定义x占位符，命名为x_input，接收输入数据,长和宽 为 28 ，深度为1 \n",
    "    x = tf.placeholder(tf.float32,[None, 784], name='x-input')\n",
    "    #定义真值的占位符，命名为y_input,接收真值\n",
    "    y_ = tf.placeholder(tf.float32,[None, OUTPUT_NODE], name='y_input')\n",
    "    \n",
    "    reshaped_x = tf.reshape(x, [-1, 28, 28, 1] )\n",
    "    \n",
    "    #计算L2正则化损失函数\n",
    "    regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)\n",
    "    \n",
    "    #计算在当前参数下神经网络前向传播的结果。这里给出的用于计算滑动平均的类为None，一般预测的时候才会用到滑动平均得到的参数\n",
    "    #所以函数不会使用参数的滑动平均值。\n",
    "    y = inference(reshaped_x, regularizer)\n",
    "    \n",
    "    #定义存储训练轮数的变量。这个变量不需要计算滑动平均值，所以这里指定这个变量为\n",
    "    #不可训练的变量（trainable=Fasle).在使用TensorFlow训练神经网络时，\n",
    "    #一般会将代表训练轮数的变量指定为不可训练的参数。\n",
    "    global_step = tf.Variable(0, trainable=False)\n",
    "    \n",
    "    #给定滑动平均衰减率和训练轮数的变量，初始化滑动平均类。\n",
    "    variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)\n",
    "    \n",
    "    #在所有代表神经网络参数的变量上使用滑动平均。\n",
    "    #tf.trainable_variables()返回图上所有可训练变量\n",
    "    variables_averages_op = variable_averages.apply(tf.trainable_variables())\n",
    "    \n",
    "    #计算交叉熵，使用sparse_softmax_cross_entropy_with_logits函数计算，此函数 labels 只能是一个类别，所以需要使用tf.argmax函数来得到一个\n",
    "    #正确答案对应的类别编号\n",
    "    cross_entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits= y, labels=tf.argmax(y_, 1)))\n",
    "    \n",
    "    \n",
    "    \n",
    "    #总损失等于交叉熵损失和正则化损失的和。\n",
    "    loss = cross_entropy + tf.add_n(tf.get_collection('losses'))\n",
    "    \n",
    "    #设置指数衰减的学习率。可以开始时加快训练速度，后续慢慢变小防止震荡\n",
    "    learning_rate = tf.train.exponential_decay(\n",
    "        LEARNING_RATE_BASE,    #基础的学习率，随着迭代的进行，更新变量时使用的学习率在这个基础上递减\n",
    "        global_step, #当前迭代的轮数\n",
    "        mnist.train.num_examples / BATCH_SIZE,#过完所有的训练数据需要的迭代次数。\n",
    "        LEARNING_RATE_DECAY) #学习率递减的速度 \n",
    "\n",
    "    #使用tf.train.GradientDescentOptimizer 优化算法来优化损失函数。\n",
    "    train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss, global_step=global_step)\n",
    "    #在训练神经网络模型时，每过一遍数据既需要通过反向传播来更新神经网络中的参数，又要更新每一个参数的滑动平均值。\n",
    "    #为了一次完成多个操作，可以使用TensorFlow的tf.control_dependencies和tf.group两种机制\n",
    "    with tf.control_dependencies([train_step, variables_averages_op]):\n",
    "        train_op = tf.no_op(name='train')  #tf.no_op 方法 do nothing 确保 control_dependencies里面的都执行了\n",
    "    \n",
    "    #检验使用了滑动平均模型的神经网络前向传播结果是否正确。tf.argmax(average_y,1) 1表示 average_y中最大值得下标，\n",
    "    #tf.equal判断两个张量的每一维是否相等，如果相等返回True,否则返回False.\n",
    "    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n",
    "\n",
    "    #correct_prediction 返回的是布尔值，需要转换为实数型，然后计算平均值，平均值就是模型在这组数据上的正确率。\n",
    "    #tf.reduce_mean(input_tensor, reduction_indices=None, keep_dims=False, name=None)\n",
    "    #第一个参数是求平均值的张量，第二个参数是指对哪一维求平均，为空即对张量所有元素求平均，第三、第四参数可忽略\n",
    "    accuracy = tf.reduce_mean(tf.cast(correct_prediction ,tf.float32))\n",
    "\n",
    "    #初始化会话并开始训练过程。\n",
    "    with tf.Session() as sess:\n",
    "        #初始化全局变量\n",
    "        tf.global_variables_initializer().run()\n",
    "        #准备验证数据。\n",
    "        #val_x = mnist.validation.images\n",
    "        #reshaped_val_x = np.reshape(val_x, (-1, 28, 28, 1)) \n",
    "        validate_feed = {x: mnist.validation.images, y_: mnist.validation.labels}\n",
    "        #准备测试数据\n",
    "        #test_x = mnist.test.images\n",
    "        #reshaped_test_x = np.reshape(test_x, (-1, 28, 28, 1)) \n",
    "        test_feed = {x: mnist.test.images, y_: mnist.test.labels}\n",
    "        #test_x,test_y = mnist.test.next_batch(BATCH_SIZE)\n",
    "        #reshaped_test_x = np.reshape(test_x, (-1, 28, 28, 1))\n",
    "        \n",
    "        \n",
    "        #迭代的训练神经网络.\n",
    "        for i in range(TRAINING_STEPS):\n",
    "            #每100轮输出一次在验证数据集上的测试结果\n",
    "            #val_x,val_y = mnist.validation.next_batch(BATCH_SIZE)\n",
    "            #reshaped_val_x = np.reshape(val_x, (BATCH_SIZE, 28, 28, 1))\n",
    "            if i % 1000 == 0:\n",
    "                validate_acc = sess.run(accuracy, feed_dict=validate_feed)\n",
    "                print (\"After %d training steps, validation accuracy \"\n",
    "                       \"using average model is %g \" % (i, validate_acc))\n",
    "            #生成一轮使用的batch的训练数据，并运行训练过程。\n",
    "            xs, ys = mnist.train.next_batch(BATCH_SIZE)\n",
    "            #这里需要调整下 xs的输入格式\n",
    "            #reshaped_xs = np.reshape(xs, (BATCH_SIZE, 28, 28, 1))\n",
    "            sess.run(train_op, feed_dict={x: xs, y_: ys})\n",
    "        #在训练结束之后，在测试数据上检测神经网络模型的最终正确率\n",
    "        test_acc= sess.run(accuracy, feed_dict=test_feed)\n",
    "        print (\"After %d training steps, test accuracy using average \"\n",
    "               \"model is %g \" % (TRAINING_STEPS, test_acc))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From <ipython-input-5-16716e72cc9c>:4: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n",
      "WARNING:tensorflow:From C:\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please write your own downloading logic.\n",
      "WARNING:tensorflow:From C:\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.data to implement this functionality.\n",
      "Extracting C:/Users/dell/Desktop/ai/seven-week/homework\\train-images-idx3-ubyte.gz\n",
      "WARNING:tensorflow:From C:\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.data to implement this functionality.\n",
      "Extracting C:/Users/dell/Desktop/ai/seven-week/homework\\train-labels-idx1-ubyte.gz\n",
      "WARNING:tensorflow:From C:\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:110: dense_to_one_hot (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.one_hot on tensors.\n",
      "Extracting C:/Users/dell/Desktop/ai/seven-week/homework\\t10k-images-idx3-ubyte.gz\n",
      "Extracting C:/Users/dell/Desktop/ai/seven-week/homework\\t10k-labels-idx1-ubyte.gz\n",
      "WARNING:tensorflow:From C:\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:290: DataSet.__init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n",
      "After 0 training steps, validation accuracy using average model is 0.1358 \n",
      "After 1000 training steps, validation accuracy using average model is 0.9876 \n",
      "After 2000 training steps, validation accuracy using average model is 0.9884 \n",
      "After 3000 training steps, validation accuracy using average model is 0.992 \n",
      "After 4000 training steps, validation accuracy using average model is 0.9906 \n",
      "After 5000 training steps, validation accuracy using average model is 0.99 \n",
      "After 6000 training steps, validation accuracy using average model is 0.9876 \n",
      "After 7000 training steps, validation accuracy using average model is 0.9912 \n",
      "After 8000 training steps, validation accuracy using average model is 0.9896 \n",
      "After 9000 training steps, validation accuracy using average model is 0.991 \n",
      "After 10000 training steps, validation accuracy using average model is 0.9922 \n",
      "After 11000 training steps, validation accuracy using average model is 0.9888 \n",
      "After 12000 training steps, validation accuracy using average model is 0.9892 \n",
      "After 13000 training steps, validation accuracy using average model is 0.9906 \n",
      "After 14000 training steps, validation accuracy using average model is 0.988 \n",
      "After 15000 training steps, validation accuracy using average model is 0.9894 \n",
      "After 16000 training steps, validation accuracy using average model is 0.9924 \n",
      "After 17000 training steps, validation accuracy using average model is 0.9892 \n",
      "After 18000 training steps, validation accuracy using average model is 0.9896 \n",
      "After 19000 training steps, validation accuracy using average model is 0.9884 \n",
      "After 20000 training steps, test accuracy using average model is 0.9919 \n"
     ]
    },
    {
     "ename": "SystemExit",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "An exception has occurred, use %tb to see the full traceback.\n",
      "\u001b[1;31mSystemExit\u001b[0m\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\IPython\\core\\interactiveshell.py:2971: UserWarning: To exit: use 'exit', 'quit', or Ctrl-D.\n",
      "  warn(\"To exit: use 'exit', 'quit', or Ctrl-D.\", stacklevel=1)\n"
     ]
    }
   ],
   "source": [
    "##### 主程序入口。\n",
    "def main(avgv=None):\n",
    "    #声明处理mnist数据集的类\n",
    "    mnist = input_data.read_data_sets('C:/Users/dell/Desktop/ai/seven-week/homework',one_hot=True)\n",
    "    train(mnist)\n",
    "    \n",
    "#生成一个主程序入口\n",
    "if __name__ == '__main__':\n",
    "    tf.app.run()\n",
    "    "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## tensorflow先前是cpu版本的，跑的很慢，然后卸载重新装了gpu版本，速度快了很多，\n",
    "因为作业的结构和老师的示例代码有点出入，遇到不少坑耽误不少时间，\n",
    "使用dropout的话，会在测试集上同时使用（所以就把dropout给去掉了）\n",
    "使用随机梯度下降算法调了两个晚上最高也就98.4%，学习率调的低了，下降太慢，\n",
    "换了优化算法AdamOptimizer之后马上99%，666"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
