{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "\"\"\"A very simple MNIST classifier.\n",
    "See extensive documentation at\n",
    "https://www.tensorflow.org/get_started/mnist/beginners\n",
    "\"\"\"\n",
    "from __future__ import absolute_import\n",
    "from __future__ import division\n",
    "from __future__ import print_function\n",
    "\n",
    "import argparse\n",
    "import sys\n",
    "\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "\n",
    "import tensorflow as tf"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting ./MNIST/train-images-idx3-ubyte.gz\n",
      "Extracting ./MNIST/train-labels-idx1-ubyte.gz\n",
      "Extracting ./MNIST/t10k-images-idx3-ubyte.gz\n",
      "Extracting ./MNIST/t10k-labels-idx1-ubyte.gz\n"
     ]
    }
   ],
   "source": [
    "# 导入数据\n",
    "data_dir = './MNIST'\n",
    "mnist = input_data.read_data_sets(data_dir, one_hot=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "#定义数据\n",
    "x = tf.placeholder(tf.float32, [None, 784])   # 输入图片的大小，28x28=784\n",
    "y_ = tf.placeholder(tf.float32, [None, 10])   # 输出0-9共10个数字\n",
    "learning_rate = tf.placeholder(tf.float32)    # 用于接收dropout操作的值，dropout为了防止过拟合\n",
    "\n",
    "with tf.name_scope('reshape'):\n",
    "#-1代表先不考虑输入的图片例子多少这个维度，后面的1是channel的数量，因为我们输入的图片是黑白的，因此channel是1，例如如果是RGB图像，那么channel就是3\n",
    "  x_image = tf.reshape(x, [-1, 28, 28, 1])    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 卷积层定义\n",
    "#函数参数中的filter_size是指卷积核的大小,step表示布长\n",
    "def conv_op(input_op, filter_size, channel_out, step, name):\n",
    "    #获取输入数据的channel\n",
    "    channel_in = input_op.get_shape()[-1].value\n",
    "    with tf.name_scope(name) as scope:\n",
    "        #生成权重张量，参数服从均值为0，标准差为0.1的正态分布\n",
    "        weights = tf.Variable(tf.truncated_normal([filter_size, filter_size, channel_in, channel_out],mean=0,\n",
    "                                                  dtype=tf.float32, stddev=0.1),\n",
    "                                                  collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\n",
    "        #偏置初始化为0.0\n",
    "        biases = tf.Variable(tf.constant(value=0.0, shape=[channel_out], dtype=tf.float32),\n",
    "                             trainable=True, name='biases')\n",
    "        #调用卷积函数\n",
    "        conv = tf.nn.conv2d(input_op, weights, strides=[1, step, step, 1], padding='SAME') + biases\n",
    "        #使用Relu激活函数\n",
    "        conv = tf.nn.relu(conv, name=scope)\n",
    "        return conv"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 最大池化层\n",
    "def maxPool_op(input_op, filter_size, step, name):\n",
    "    return tf.nn.max_pool(input_op, ksize=[1, filter_size, filter_size, 1], strides=[1, step, step, 1],\n",
    "                          padding='VALID', name=name)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "\"\\ndef full_connection(input_op, channel_out, name):\\n    channel_in = input_op.get_shape()[-1].value\\n    with tf.name_scope(name) as scope:\\n        weight = tf.Variable(tf.truncated_normal([channel_in, channel_out],mean=0,\\n                                                  dtype=tf.float32, stddev=0.1),\\n                                                  collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\\n        #weight = tf.get_variable(shape=[channel_in, channel_out], dtype=tf.float32,\\n        #                         initializer=xavier_initializer_conv2d(), name=scope + 'weight')\\n        bias = tf.Variable(tf.constant(value=0.0, shape=[channel_out], dtype=tf.float32), name='bias')\\n        input_op_reshape = tf.reshape(input_op, [-1, 7 * 7 * 64])\\n        fc = tf.nn.relu(tf.matmul(input_op_reshape, weight) + bias)\\n        return fc\\n\""
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 全连接层\n",
    "'''\n",
    "def full_connection(input_op, channel_out, name):\n",
    "    channel_in = input_op.get_shape()[-1].value\n",
    "    with tf.name_scope(name) as scope:\n",
    "        weight = tf.Variable(tf.truncated_normal([channel_in, channel_out],mean=0,\n",
    "                                                  dtype=tf.float32, stddev=0.1),\n",
    "                                                  collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\n",
    "        #weight = tf.get_variable(shape=[channel_in, channel_out], dtype=tf.float32,\n",
    "        #                         initializer=xavier_initializer_conv2d(), name=scope + 'weight')\n",
    "        bias = tf.Variable(tf.constant(value=0.0, shape=[channel_out], dtype=tf.float32), name='bias')\n",
    "        input_op_reshape = tf.reshape(input_op, [-1, 7 * 7 * 64])\n",
    "        fc = tf.nn.relu(tf.matmul(input_op_reshape, weight) + bias)\n",
    "        return fc\n",
    "'''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "#第一层卷积层，卷积核为5*5，深度为32，步长为1，输出为28*28*32\n",
    "conv1=conv_op(x_image,filter_size=5,channel_out=32,step=1,name='conv1')\n",
    "#第一个池化层，输出14*14*28\n",
    "pool1=maxPool_op(conv1,filter_size=2,step=2,name='pool1')\n",
    "#第二层卷积层，卷积核为5*5，深度为64，步长为1，输出为28*28*64\n",
    "conv2=conv_op(pool1,filter_size=5,channel_out=64,step=1,name='conv2')\n",
    "#第二个池化层，输出7*7*64\n",
    "pool2=maxPool_op(conv2,filter_size=2,step=2,name='pool2')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "#全连接层，映射7*7*64特征图，映射为1024个特征\n",
    "with tf.name_scope('fc1'):\n",
    "  W_fc1 = tf.Variable(tf.truncated_normal([7 * 7 * 64, 1024], stddev=0.1),                      \n",
    "                      collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\n",
    "  b_fc1 = tf.Variable(tf.constant(0.1, shape=[1024]))\n",
    "#计算前需要把第2层的输出reshape成[batch, 7*7*64]的张量\n",
    "  h_pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])\n",
    "  h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n",
    "\n",
    "# Dropout - controls the complexity of the model, prevents co-adaptation of\n",
    "# features.\n",
    "with tf.name_scope('dropout'):\n",
    "  keep_prob = tf.placeholder(tf.float32)\n",
    "  h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n",
    "\n",
    "# Map the 1024 features to 10 classes, one for each digit\n",
    "#这里同上，需要注意的是，最后暂不需要使用激活函数\n",
    "with tf.name_scope('fc2'):\n",
    "  W_fc2 = tf.Variable(tf.truncated_normal([1024, 10], stddev=0.1),\n",
    "                      collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\n",
    "  b_fc2 = tf.Variable(tf.constant(0.1, shape=[10]))\n",
    "  y = tf.matmul(h_fc1_drop, W_fc2) + b_fc2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 设置正则化方法\n",
    "REGULARIZATION_RATE = 0.0001 # 比较合适的参数\n",
    "\n",
    "regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)  # 定义L2正则化损失函数\n",
    "#regularization = regularizer(weights1) + regularizer(weights2)  # 计算模型的正则化损失"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "step 100, entropy loss: 0.792896, l2_loss: 1.265902, total loss: 2.058797\n",
      "0.69\n",
      "step 200, entropy loss: 0.515871, l2_loss: 1.265673, total loss: 1.781544\n",
      "0.88\n",
      "step 300, entropy loss: 0.611102, l2_loss: 1.265435, total loss: 1.876537\n",
      "0.86\n",
      "step 400, entropy loss: 0.510548, l2_loss: 1.265196, total loss: 1.775743\n",
      "0.9\n",
      "step 500, entropy loss: 0.270574, l2_loss: 1.264953, total loss: 1.535527\n",
      "0.88\n",
      "step 600, entropy loss: 0.347528, l2_loss: 1.264710, total loss: 1.612238\n",
      "0.92\n",
      "step 700, entropy loss: 0.244095, l2_loss: 1.264466, total loss: 1.508561\n",
      "0.97\n",
      "step 800, entropy loss: 0.164760, l2_loss: 1.264219, total loss: 1.428979\n",
      "0.93\n",
      "step 900, entropy loss: 0.253192, l2_loss: 1.263977, total loss: 1.517169\n",
      "0.96\n",
      "step 1000, entropy loss: 0.240088, l2_loss: 1.263731, total loss: 1.503819\n",
      "0.9\n",
      "0.9382\n",
      "step 1100, entropy loss: 0.201718, l2_loss: 1.263484, total loss: 1.465202\n",
      "0.91\n",
      "step 1200, entropy loss: 0.255347, l2_loss: 1.263239, total loss: 1.518586\n",
      "0.93\n",
      "step 1300, entropy loss: 0.085614, l2_loss: 1.262994, total loss: 1.348608\n",
      "0.96\n",
      "step 1400, entropy loss: 0.227837, l2_loss: 1.262747, total loss: 1.490584\n",
      "0.92\n",
      "step 1500, entropy loss: 0.095459, l2_loss: 1.262499, total loss: 1.357958\n",
      "0.96\n",
      "step 1600, entropy loss: 0.144603, l2_loss: 1.262251, total loss: 1.406853\n",
      "0.95\n",
      "step 1700, entropy loss: 0.121043, l2_loss: 1.262004, total loss: 1.383047\n",
      "0.95\n",
      "step 1800, entropy loss: 0.194946, l2_loss: 1.261759, total loss: 1.456705\n",
      "0.94\n",
      "step 1900, entropy loss: 0.085889, l2_loss: 1.261511, total loss: 1.347400\n",
      "0.94\n",
      "step 2000, entropy loss: 0.096265, l2_loss: 1.261266, total loss: 1.357531\n",
      "1.0\n",
      "0.9543\n",
      "step 2100, entropy loss: 0.314426, l2_loss: 1.261019, total loss: 1.575445\n",
      "0.93\n",
      "step 2200, entropy loss: 0.133978, l2_loss: 1.260772, total loss: 1.394749\n",
      "0.98\n",
      "step 2300, entropy loss: 0.239320, l2_loss: 1.260523, total loss: 1.499844\n",
      "0.94\n",
      "step 2400, entropy loss: 0.216153, l2_loss: 1.260276, total loss: 1.476428\n",
      "0.95\n",
      "step 2500, entropy loss: 0.048578, l2_loss: 1.260026, total loss: 1.308604\n",
      "0.99\n",
      "step 2600, entropy loss: 0.070589, l2_loss: 1.259782, total loss: 1.330371\n",
      "0.97\n",
      "step 2700, entropy loss: 0.108212, l2_loss: 1.259536, total loss: 1.367747\n",
      "0.97\n",
      "step 2800, entropy loss: 0.067973, l2_loss: 1.259287, total loss: 1.327261\n",
      "0.96\n",
      "step 2900, entropy loss: 0.103888, l2_loss: 1.259040, total loss: 1.362927\n",
      "0.95\n",
      "step 3000, entropy loss: 0.049602, l2_loss: 1.258794, total loss: 1.308396\n",
      "1.0\n",
      "0.9641\n",
      "step 3100, entropy loss: 0.116952, l2_loss: 1.258547, total loss: 1.375499\n",
      "0.97\n",
      "step 3200, entropy loss: 0.148510, l2_loss: 1.258299, total loss: 1.406809\n",
      "0.93\n",
      "step 3300, entropy loss: 0.063093, l2_loss: 1.258051, total loss: 1.321144\n",
      "0.99\n",
      "step 3400, entropy loss: 0.188302, l2_loss: 1.257805, total loss: 1.446107\n",
      "0.97\n",
      "step 3500, entropy loss: 0.126167, l2_loss: 1.257559, total loss: 1.383726\n",
      "0.97\n",
      "step 3600, entropy loss: 0.096384, l2_loss: 1.257312, total loss: 1.353696\n",
      "0.97\n",
      "step 3700, entropy loss: 0.071560, l2_loss: 1.257065, total loss: 1.328625\n",
      "0.99\n",
      "step 3800, entropy loss: 0.071299, l2_loss: 1.256816, total loss: 1.328116\n",
      "0.97\n",
      "step 3900, entropy loss: 0.096551, l2_loss: 1.256570, total loss: 1.353122\n",
      "0.98\n",
      "step 4000, entropy loss: 0.033932, l2_loss: 1.256324, total loss: 1.290256\n",
      "0.99\n",
      "0.9724\n",
      "step 4100, entropy loss: 0.176011, l2_loss: 1.256078, total loss: 1.432089\n",
      "0.97\n",
      "step 4200, entropy loss: 0.078993, l2_loss: 1.255829, total loss: 1.334822\n",
      "0.98\n",
      "step 4300, entropy loss: 0.121104, l2_loss: 1.255584, total loss: 1.376688\n",
      "0.95\n",
      "step 4400, entropy loss: 0.051040, l2_loss: 1.255337, total loss: 1.306377\n",
      "0.98\n",
      "step 4500, entropy loss: 0.138347, l2_loss: 1.255090, total loss: 1.393437\n",
      "0.99\n",
      "step 4600, entropy loss: 0.095584, l2_loss: 1.254842, total loss: 1.350426\n",
      "0.96\n",
      "step 4700, entropy loss: 0.111830, l2_loss: 1.254594, total loss: 1.366424\n",
      "0.96\n",
      "step 4800, entropy loss: 0.034035, l2_loss: 1.254346, total loss: 1.288381\n",
      "1.0\n",
      "step 4900, entropy loss: 0.103972, l2_loss: 1.254102, total loss: 1.358074\n",
      "0.97\n",
      "step 5000, entropy loss: 0.046576, l2_loss: 1.253856, total loss: 1.300432\n",
      "0.97\n",
      "0.9733\n",
      "step 5100, entropy loss: 0.018847, l2_loss: 1.253614, total loss: 1.272461\n",
      "0.99\n",
      "step 5200, entropy loss: 0.185401, l2_loss: 1.253365, total loss: 1.438766\n",
      "0.95\n",
      "step 5300, entropy loss: 0.055189, l2_loss: 1.253120, total loss: 1.308308\n",
      "0.98\n",
      "step 5400, entropy loss: 0.058004, l2_loss: 1.252874, total loss: 1.310878\n",
      "0.97\n",
      "step 5500, entropy loss: 0.076415, l2_loss: 1.252624, total loss: 1.329039\n",
      "0.97\n",
      "step 5600, entropy loss: 0.158700, l2_loss: 1.252380, total loss: 1.411080\n",
      "0.96\n",
      "step 5700, entropy loss: 0.025736, l2_loss: 1.252135, total loss: 1.277871\n",
      "1.0\n",
      "step 5800, entropy loss: 0.065188, l2_loss: 1.251888, total loss: 1.317076\n",
      "0.96\n",
      "step 5900, entropy loss: 0.115090, l2_loss: 1.251641, total loss: 1.366731\n",
      "0.98\n",
      "step 6000, entropy loss: 0.041581, l2_loss: 1.251394, total loss: 1.292975\n",
      "0.97\n",
      "0.9754\n",
      "step 6100, entropy loss: 0.068004, l2_loss: 1.251150, total loss: 1.319154\n",
      "0.98\n",
      "step 6200, entropy loss: 0.158556, l2_loss: 1.250906, total loss: 1.409461\n",
      "0.97\n",
      "step 6300, entropy loss: 0.045907, l2_loss: 1.250659, total loss: 1.296565\n",
      "0.98\n",
      "step 6400, entropy loss: 0.056972, l2_loss: 1.250412, total loss: 1.307384\n",
      "0.96\n",
      "step 6500, entropy loss: 0.127241, l2_loss: 1.250164, total loss: 1.377405\n",
      "0.96\n",
      "step 6600, entropy loss: 0.060519, l2_loss: 1.249918, total loss: 1.310436\n",
      "0.98\n",
      "step 6700, entropy loss: 0.057529, l2_loss: 1.249673, total loss: 1.307203\n",
      "0.99\n",
      "step 6800, entropy loss: 0.084604, l2_loss: 1.249427, total loss: 1.334031\n",
      "0.97\n",
      "step 6900, entropy loss: 0.060129, l2_loss: 1.249183, total loss: 1.309312\n",
      "0.97\n",
      "step 7000, entropy loss: 0.052583, l2_loss: 1.248938, total loss: 1.301520\n",
      "0.99\n",
      "0.9789\n",
      "step 7100, entropy loss: 0.191953, l2_loss: 1.248690, total loss: 1.440643\n",
      "0.97\n",
      "step 7200, entropy loss: 0.032243, l2_loss: 1.248447, total loss: 1.280690\n",
      "0.99\n",
      "step 7300, entropy loss: 0.129411, l2_loss: 1.248201, total loss: 1.377612\n",
      "0.95\n",
      "step 7400, entropy loss: 0.101038, l2_loss: 1.247955, total loss: 1.348993\n",
      "0.98\n",
      "step 7500, entropy loss: 0.041962, l2_loss: 1.247710, total loss: 1.289672\n",
      "1.0\n",
      "step 7600, entropy loss: 0.052809, l2_loss: 1.247465, total loss: 1.300274\n",
      "0.98\n",
      "step 7700, entropy loss: 0.027452, l2_loss: 1.247220, total loss: 1.274672\n",
      "0.99\n",
      "step 7800, entropy loss: 0.029444, l2_loss: 1.246976, total loss: 1.276420\n",
      "0.98\n",
      "step 7900, entropy loss: 0.077849, l2_loss: 1.246731, total loss: 1.324580\n",
      "0.97\n",
      "step 8000, entropy loss: 0.082025, l2_loss: 1.246486, total loss: 1.328511\n",
      "0.97\n",
      "0.98\n",
      "step 8100, entropy loss: 0.107061, l2_loss: 1.246240, total loss: 1.353301\n",
      "0.97\n",
      "step 8200, entropy loss: 0.080967, l2_loss: 1.245996, total loss: 1.326962\n",
      "0.94\n",
      "step 8300, entropy loss: 0.069680, l2_loss: 1.245747, total loss: 1.315427\n",
      "0.96\n",
      "step 8400, entropy loss: 0.115319, l2_loss: 1.245502, total loss: 1.360820\n",
      "0.95\n",
      "step 8500, entropy loss: 0.109377, l2_loss: 1.245257, total loss: 1.354634\n",
      "0.96\n",
      "step 8600, entropy loss: 0.046945, l2_loss: 1.245010, total loss: 1.291955\n",
      "0.97\n",
      "step 8700, entropy loss: 0.080181, l2_loss: 1.244762, total loss: 1.324943\n",
      "0.97\n",
      "step 8800, entropy loss: 0.133952, l2_loss: 1.244517, total loss: 1.378469\n",
      "0.99\n",
      "step 8900, entropy loss: 0.057536, l2_loss: 1.244272, total loss: 1.301809\n",
      "0.98\n",
      "step 9000, entropy loss: 0.049372, l2_loss: 1.244030, total loss: 1.293402\n",
      "0.97\n",
      "0.9815\n",
      "step 9100, entropy loss: 0.057130, l2_loss: 1.243784, total loss: 1.300915\n",
      "0.98\n",
      "step 9200, entropy loss: 0.032342, l2_loss: 1.243540, total loss: 1.275882\n",
      "1.0\n",
      "step 9300, entropy loss: 0.103621, l2_loss: 1.243292, total loss: 1.346914\n",
      "0.98\n",
      "step 9400, entropy loss: 0.037994, l2_loss: 1.243047, total loss: 1.281042\n",
      "0.97\n",
      "step 9500, entropy loss: 0.029367, l2_loss: 1.242803, total loss: 1.272169\n",
      "0.98\n",
      "step 9600, entropy loss: 0.029749, l2_loss: 1.242559, total loss: 1.272308\n",
      "0.98\n",
      "step 9700, entropy loss: 0.052286, l2_loss: 1.242313, total loss: 1.294599\n",
      "0.98\n",
      "step 9800, entropy loss: 0.011681, l2_loss: 1.242069, total loss: 1.253750\n",
      "1.0\n",
      "step 9900, entropy loss: 0.058598, l2_loss: 1.241824, total loss: 1.300422\n",
      "0.99\n",
      "step 10000, entropy loss: 0.097026, l2_loss: 1.241579, total loss: 1.338605\n",
      "0.97\n",
      "0.9818\n"
     ]
    }
   ],
   "source": [
    "cross_entropy = tf.reduce_mean(\n",
    "    tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))\n",
    "regularization=0.0\n",
    "for w in tf.get_collection('WEIGHTS'):\n",
    "    regularization=regularization+regularizer(w)\n",
    "l2_loss=regularization\n",
    "#l2_loss = tf.add_n( [tf.nn.l2_loss(w) for w in tf.get_collection('WEIGHTS')] )\n",
    "#total_loss = cross_entropy + 7e-5*l2_loss\n",
    "total_loss = cross_entropy + l2_loss\n",
    "train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(total_loss)\n",
    "\n",
    "sess = tf.Session()\n",
    "init_op = tf.global_variables_initializer()\n",
    "sess.run(init_op)\n",
    "# Train\n",
    "for step in range(10000):\n",
    "  batch_xs, batch_ys = mnist.train.next_batch(100)\n",
    "  lr = 0.01\n",
    "  _, loss, l2_loss_value, total_loss_value = sess.run(\n",
    "               [train_step, cross_entropy, l2_loss, total_loss], \n",
    "               feed_dict={x: batch_xs, y_: batch_ys, learning_rate:lr, keep_prob:0.5})\n",
    "  \n",
    "  if (step+1) % 100 == 0:\n",
    "    print('step %d, entropy loss: %f, l2_loss: %f, total loss: %f' % \n",
    "            (step+1, loss, l2_loss_value, total_loss_value))\n",
    "    # Test trained model\n",
    "    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n",
    "    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "    print(sess.run(accuracy, feed_dict={x: batch_xs, y_: batch_ys, keep_prob:0.5}))\n",
    "  if (step+1) % 1000 == 0:\n",
    "    print(sess.run(accuracy, feed_dict={x: mnist.test.images,\n",
    "                                    y_: mnist.test.labels, keep_prob:0.5}))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "心得与小结:\n",
    "本小节使用了两个卷积层，两个池化层，两个全连接层，对权重参数设置了L2正则，设置正则参数为0.0001，设置学习率为0.01，设置Dropout的概率为0.5，这里可以看到输出结果收敛比较慢，在step=8000的时候，在测试集上的准确率才达到98%，后续调整相关参数，寻找合适的参数设置。"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 2",
   "language": "python",
   "name": "python2"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.15"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
