{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting ./MNIST_data\\train-images-idx3-ubyte.gz\n",
      "Extracting ./MNIST_data\\train-labels-idx1-ubyte.gz\n",
      "Extracting ./MNIST_data\\t10k-images-idx3-ubyte.gz\n",
      "Extracting ./MNIST_data\\t10k-labels-idx1-ubyte.gz\n"
     ]
    }
   ],
   "source": [
    "#导入tensorflow 与 MNIST数据集\n",
    "import tensorflow as tf\n",
    "import tensorflow.examples.tutorials.mnist.input_data as input_data\n",
    "data_dir = './MNIST_data'\n",
    "mnist = input_data.read_data_sets(data_dir, one_hot = True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "train_images: (55000, 784) \n",
      "train_labels: (55000, 10) \n",
      "validation_images: (5000, 784) \n",
      "validation_labels: (5000, 10) \n",
      "test_images: (10000, 784) \n",
      "test_labels: (10000, 10)\n"
     ]
    }
   ],
   "source": [
    "#对数据集做下简单的数据探索\n",
    "print(\"train_images:\", mnist.train.images.shape, \"\\ntrain_labels:\", mnist.train.labels.shape, \"\\nvalidation_images:\", mnist.validation.images.shape, \"\\nvalidation_labels:\", mnist.validation.labels.shape, \"\\ntest_images:\", mnist.test.images.shape, \"\\ntest_labels:\", mnist.test.labels.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "#定义两个weight函数，使用不同的初始化\n",
    "def init1_weight(shape):\n",
    "    return tf.Variable(tf.truncated_normal(shape, stddev = 0.1), collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\n",
    "\n",
    "def init2_weight(shape):\n",
    "     return tf.Variable(tf.random_uniform(shape, minval = -1, maxval = 1), collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\n",
    "\n",
    "#定义偏置    \n",
    "def bias(shape):\n",
    "    return tf.Variable(tf.constant(0.1, shape = shape), name = \"b\")\n",
    "\n",
    "#定义卷积操作，使用步长为1，padding为补齐的卷积核\n",
    "def conv2d(x, W):\n",
    "    return tf.nn.conv2d(x, W, strides = [1,1,1,1], padding = 'SAME')\n",
    "\n",
    "#定义池化操作，使用了卷积核为2x2，步长为2的最大池化\n",
    "def max_pool_2x2(x):\n",
    "    return tf.nn.max_pool(x, ksize = [1,2,2,1], strides = [1,2,2,1], padding = 'SAME')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "#将各节点以卷积神经网络的步骤划入相应的步骤空间里\n",
    "\n",
    "with tf.name_scope(\"Input_Layer\"):\n",
    "    x = tf.placeholder(\"float\", shape = [None, 784])\n",
    "    x_image = tf.reshape(x, [-1, 28, 28, 1])\n",
    "\n",
    "#第一层卷积操作，定义了不同尺寸与不同数量的卷积核    \n",
    "with tf.name_scope(\"C1_Conv\"):\n",
    "    W1_1 = init1_weight([12,12,1,36]) #第一层卷积核为12x12，卷积核数量为36\n",
    "    W1_2 = init2_weight([8,8,1,20]) #第一层卷积核为8x8，卷积核数量为20\n",
    "    b1_1 = bias([36]) \n",
    "    b1_2 = bias([20])\n",
    "    Conv1_1 = conv2d(x_image, W1_1) + b1_1 \n",
    "    Conv1_2 = conv2d(x_image, W1_2) + b1_2\n",
    "    C1_Conv_1 = tf.nn.relu(Conv1_1)\n",
    "    C1_Conv_2 = tf.nn.relu(Conv1_2)\n",
    "\n",
    "#第一层池化操作，对上定义的两种卷积经relu非线性变换得到的feature map分别作池化    \n",
    "with tf.name_scope(\"C1_Pool\"):\n",
    "    C1_Pool_1 = max_pool_2x2(C1_Conv_1)\n",
    "    C1_Pool_2 = max_pool_2x2(C1_Conv_2)\n",
    "\n",
    "#第二层卷积操作，定义了相同尺寸的卷积核（第一层卷积池化后得到的输入维度不同，故定义了两个）\n",
    "with tf.name_scope(\"C2_Conv\"):\n",
    "    W2_1 = init1_weight([5, 5, 36, 18]) #第二层卷积核为5x5，卷积核数量为18\n",
    "    W2_2 = init2_weight([5, 5, 20, 18]) #第二层卷积核为5x5，卷积核数量为18，不作改变\n",
    "    b2 = bias([18])\n",
    "    Conv2_1 = conv2d(C1_Pool_1, W2_1) + b2\n",
    "    Conv2_2 = conv2d(C1_Pool_2, W2_2) + b2\n",
    "    C2_Conv_1 = tf.nn.relu(Conv2_1)\n",
    "    C2_Conv_2 = tf.nn.relu(Conv2_2)\n",
    "\n",
    "#第二层池化操作，对第二层的卷积经relu变换得到的feature map分别作池化\n",
    "with tf.name_scope(\"C2_Pool\"):\n",
    "    C2_Pool_1 = max_pool_2x2(C2_Conv_1)\n",
    "    C2_Pool_2 = max_pool_2x2(C2_Conv_2)\n",
    "\n",
    "# 展开层\n",
    "with tf.name_scope(\"D_Flat\"):\n",
    "    D_Flat_1 = tf.reshape(C2_Pool_1, [-1, 882])\n",
    "    D_Flat_2 = tf.reshape(C2_Pool_2, [-1, 882])\n",
    "\n",
    "#全连接层\n",
    "with tf.name_scope(\"D_Hidden_Layer\"):\n",
    "    W3 = init1_weight([882, 128])\n",
    "    b3 = bias([128])\n",
    "    D_Hidden_1 = tf.nn.relu(tf.matmul(D_Flat_1, W3) + b3)\n",
    "    D_Hidden_2 = tf.nn.relu(tf.matmul(D_Flat_2, W3) + b3)\n",
    "    D_Hidden_Dropout_1 = tf.nn.dropout(D_Hidden_1, keep_prob = 0.7) #dropout保留率设置为0.7， 一般为0.5\n",
    "    D_Hidden_Dropout_2 = tf.nn.dropout(D_Hidden_2, keep_prob = 0.7) #dropout保留率设置为0.7， 一般为0.5\n",
    "\n",
    "# 输出层(logits)\n",
    "with tf.name_scope(\"Output_Layer\"):\n",
    "    W4 = init1_weight([128, 10])\n",
    "    b4 = bias([10])\n",
    "    y_predict_1 = tf.matmul(D_Hidden_Dropout_1, W4) + b4\n",
    "    y_predict_2 = tf.matmul(D_Hidden_Dropout_2, W4) + b4\n",
    "\n",
    "#对以上形成的节点，设置相应的损失函数与优化器\n",
    "with tf.name_scope(\"Optimizer\"):\n",
    "    y_label = tf.placeholder(\"float32\", [None, 10], name = \"y_label\")\n",
    "    loss_function_1 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = y_predict_1, labels = y_label)) #用cross_entropy作为损失函数，logits为y_predict_1\n",
    "    loss_function_2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = y_predict_2, labels = y_label)) #用cross_entropy作为损失函数，logits为y_predict_2\n",
    "    l2_loss_1 = tf.add_n( [tf.nn.l2_loss(w) for w in tf.get_collection('WEIGHTS')] ) #将数据流经过的所有权重作为输入计算l2正则化\n",
    "    l2_loss_2 = tf.add_n( [tf.nn.l2_loss(w) for w in tf.get_collection('WEIGHTS')] ) #将数据流经过的所有权重作为输入计算l2正则化\n",
    "    total_loss_1_1 = loss_function_1 + 5e-5*l2_loss_1 #根据不同的正则化因子与损失函数对应的y_predict设置了四种不同的total_loss，在后续的训练中使用total_loss的选取应与选\n",
    "    total_loss_1_2 = loss_function_1 + 1e-5*l2_loss_1 #（接上行）用的不同y_predict对应上。\n",
    "    total_loss_2_1 = loss_function_2 + 7e-5*l2_loss_2\n",
    "    total_loss_2_2 = loss_function_2 + 1e-5*l2_loss_2\n",
    "    optimizer_1 = tf.train.AdamOptimizer(learning_rate = 0.0001).minimize(total_loss_1_1) # 根据不同的学习率与total_loss设置了五种不同的优化器，后续训练时选用时应注意相配\n",
    "    optimizer_2 = tf.train.AdamOptimizer(learning_rate = 0.0001).minimize(total_loss_1_2)\n",
    "    optimizer_3 = tf.train.AdamOptimizer(learning_rate = 0.001).minimize(total_loss_1_1)\n",
    "    optimizer_4 = tf.train.AdamOptimizer(learning_rate = 0.0005).minimize(total_loss_2_1)\n",
    "    optimizer_5 = tf.train.AdamOptimizer(learning_rate = 0.0005).minimize(total_loss_2_2)\n",
    "\n",
    "#使用正确率作为模型的评估指标\n",
    "with tf.name_scope(\"Model_Evaluation\"):\n",
    "    correct_prediction_1 = tf.equal(tf.argmax(y_predict_1, 1), tf.argmax(y_label, 1))\n",
    "    correct_prediction_2 = tf.equal(tf.argmax(y_predict_2, 1), tf.argmax(y_label, 1))\n",
    "    accuracy_1 = tf.reduce_mean(tf.cast(correct_prediction_1, \"float\"))\n",
    "    accuracy_2 = tf.reduce_mean(tf.cast(correct_prediction_2, \"float\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "The results are based on optimizer_1, and accuracy_1\n",
      "Train Epoch:  05 \n",
      "Loss= 0.171269447   Accuracy= 0.9684 \n",
      "=======================================\n",
      "Train Epoch:  10 \n",
      "Loss= 0.110592857   Accuracy= 0.978 \n",
      "=======================================\n",
      "Train Epoch:  15 \n",
      "Loss= 0.088352799   Accuracy= 0.9836 \n",
      "=======================================\n",
      "Train Epoch:  20 \n",
      "Loss= 0.072316125   Accuracy= 0.9874 \n",
      "=======================================\n",
      "Train Epoch:  25 \n",
      "Loss= 0.068444304   Accuracy= 0.9878 \n",
      "=======================================\n",
      "Train Epoch:  30 \n",
      "Loss= 0.063741088   Accuracy= 0.9892 \n",
      "=======================================\n",
      "\n",
      "Accuracy for test datasets with optimizer_1: 0.9886\n"
     ]
    }
   ],
   "source": [
    "#设置周期为30，batchSize为100的训练\n",
    "trainEpoch = 30\n",
    "batchSize = 100\n",
    "totalBatchs = int(mnist.train.num_examples/batchSize)\n",
    "loss_list = []\n",
    "accuracy_list = []\n",
    "epoch_list = []\n",
    "\n",
    "# 第一个训练是基于基于optimizer_1优化器的，并使用accuracy_1作为评估指标的模型\n",
    "print(\"The results are based on optimizer_1, and accuracy_1\")\n",
    "\n",
    "with tf.Session() as sess:\n",
    "    sess.run(tf.global_variables_initializer())\n",
    "\n",
    "\n",
    "    for epoch in range(trainEpoch):\n",
    "        for i in range(totalBatchs):\n",
    "            batch_xs, batch_ys = mnist.train.next_batch(batchSize)\n",
    "            sess.run(optimizer_1, feed_dict = {x: batch_xs, y_label: batch_ys}) #使用训练样本训练模型\n",
    "        loss, acc = sess.run([total_loss_1_1, accuracy_1], feed_dict = {x: mnist.validation.images, y_label: mnist.validation.labels}) #训练好的模型在验证集上做验证\n",
    "        epoch_list.append(epoch)\n",
    "        loss_list.append(loss)\n",
    "        accuracy_list.append(acc)\n",
    "        if (epoch+1)%5 == 0: #每5个周期的训练结束后，打印出模型在验证集上的损失与正确率\n",
    "            print(\"Train Epoch: \", \"%02d\" %(epoch+1), \"\\nLoss=\", \"{:.9f}\".format(loss), \"  Accuracy=\", acc, \"\\n=======================================\")\n",
    "\n",
    "    print(\"\\nAccuracy for test datasets with optimizer_1:\", sess.run(accuracy_1, feed_dict = {x: mnist.test.images, y_label: mnist.test.labels}))        "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "第一个模型在测试集上得到的正确率真为98.86%，其第一层使用的卷积核尺寸为12x12，卷积数量为36；第二层使用的卷积核尺寸为5x5，卷积核数量为18；两层卷积使用的池化均为2x2，非线性转换均为relu函数，在全连接层使用为保留率为0.7的Dropout，l2正则化设定的因子为 5e-5，训练的学习率为0.0001."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "The results are based on optimizer_5, and accuracy_2\n",
      "Train Epoch:  05 \n",
      "Loss= 0.205536664   Accuracy= 0.937 \n",
      "=======================================\n",
      "Train Epoch:  10 \n",
      "Loss= 0.127845809   Accuracy= 0.9666 \n",
      "=======================================\n",
      "Train Epoch:  15 \n",
      "Loss= 0.109259069   Accuracy= 0.9758 \n",
      "=======================================\n",
      "Train Epoch:  20 \n",
      "Loss= 0.104023442   Accuracy= 0.979 \n",
      "=======================================\n",
      "Train Epoch:  25 \n",
      "Loss= 0.105037309   Accuracy= 0.9804 \n",
      "=======================================\n",
      "Train Epoch:  30 \n",
      "Loss= 0.097015247   Accuracy= 0.982 \n",
      "=======================================\n",
      "\n",
      "Accuracy for test datasets with optimizer_5: 0.9836\n"
     ]
    }
   ],
   "source": [
    "#设置周期为30，batchSize为100的训练\n",
    "trainEpoch = 30\n",
    "batchSize = 100\n",
    "totalBatchs = int(mnist.train.num_examples/batchSize)\n",
    "loss_list = []\n",
    "accuracy_list = []\n",
    "epoch_list = []\n",
    "\n",
    "# 第二个训练是基于基于optimizer_5优化器的，并使用accuracy_2作为评估指标的模型\n",
    "print(\"The results are based on optimizer_5, and accuracy_2\")\n",
    "\n",
    "with tf.Session() as sess:\n",
    "    sess.run(tf.global_variables_initializer())\n",
    "\n",
    "\n",
    "    for epoch in range(trainEpoch):\n",
    "        for i in range(totalBatchs):\n",
    "            batch_xs, batch_ys = mnist.train.next_batch(batchSize)\n",
    "            sess.run(optimizer_5, feed_dict = {x: batch_xs, y_label: batch_ys}) #使用训练样本训练模型\n",
    "        loss, acc = sess.run([total_loss_2_2, accuracy_2], feed_dict = {x: mnist.validation.images, y_label: mnist.validation.labels}) #设置周期为30，batchSize为100的训练\n",
    "        if (epoch+1)%5 == 0: #每5个周期的训练结束后，打印出模型在验证集上的损失与正确率\n",
    "            print(\"Train Epoch: \", \"%02d\" %(epoch+1), \"\\nLoss=\", \"{:.9f}\".format(loss), \"  Accuracy=\", acc, \"\\n=======================================\")\n",
    "\n",
    "    print(\"\\nAccuracy for test datasets with optimizer_5:\", sess.run(accuracy_2, feed_dict = {x: mnist.test.images, y_label: mnist.test.labels}))        "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "第二个模型在测试集上得到的正确率真为98.36%，与第一个模型的性能相差不大。\n",
    "\n",
    "与第一个模型设置的参数不同的有：\n",
    "1. 第一层使用的卷积核尺寸改为8x8，卷积数量改为20；\n",
    "2. 第一层使用的初始化分布函数不同；\n",
    "3. l2正则化设定的因子改为 1e-5；\n",
    "4. 学习率改为0.0005；\n",
    "\n",
    "与第一个模型保持相同的参数有：\n",
    "1. 第二层的卷积核尺寸为5x5，卷积核数量为18；\n",
    "2. 两层卷积使用的池化参数均为2x2；\n",
    "3. 非线性转换均为relu函数；\n",
    "4. 在全连接层使用为保留率为0.7的Dropout；"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "The results are based on optimizer_4, and accuracy_2\n",
      "Train Epoch:  05 \n",
      "Loss= 0.325660080   Accuracy= 0.95 \n",
      "=======================================\n",
      "Train Epoch:  10 \n",
      "Loss= 0.241151780   Accuracy= 0.9712 \n",
      "=======================================\n",
      "Train Epoch:  15 \n",
      "Loss= 0.210552841   Accuracy= 0.9802 \n",
      "=======================================\n",
      "Train Epoch:  20 \n",
      "Loss= 0.203144327   Accuracy= 0.9806 \n",
      "=======================================\n",
      "Train Epoch:  25 \n",
      "Loss= 0.188240707   Accuracy= 0.9832 \n",
      "=======================================\n",
      "Train Epoch:  30 \n",
      "Loss= 0.175604194   Accuracy= 0.985 \n",
      "=======================================\n",
      "\n",
      "Accuracy for test datasets with optimizer_4: 0.0974\n"
     ]
    }
   ],
   "source": [
    "#设置周期为30，batchSize为100的训练\n",
    "trainEpoch = 30\n",
    "batchSize = 100\n",
    "totalBatchs = int(mnist.train.num_examples/batchSize)\n",
    "loss_list = []\n",
    "accuracy_list = []\n",
    "epoch_list = []\n",
    "\n",
    "# 第三个训练是基于基于optimizer_4优化器的，并使用accuracy_2作为评估指标的模型\n",
    "print(\"The results are based on optimizer_4, and accuracy_2\")\n",
    "\n",
    "with tf.Session() as sess:\n",
    "    sess.run(tf.global_variables_initializer())\n",
    "\n",
    "\n",
    "    for epoch in range(trainEpoch):\n",
    "        for i in range(totalBatchs):\n",
    "            batch_xs, batch_ys = mnist.train.next_batch(batchSize)\n",
    "            sess.run(optimizer_4, feed_dict = {x: batch_xs, y_label: batch_ys}) #使用训练样本训练模型\n",
    "        loss, acc = sess.run([total_loss_2_1, accuracy_2], feed_dict = {x: mnist.validation.images, y_label: mnist.validation.labels})\n",
    "        epoch_list.append(epoch)\n",
    "        loss_list.append(loss)\n",
    "        accuracy_list.append(acc)\n",
    "        if (epoch+1)%5 == 0: #每5个周期的训练结束后，打印出模型在验证集上的损失与正确率\n",
    "            print(\"Train Epoch: \", \"%02d\" %(epoch+1), \"\\nLoss=\", \"{:.9f}\".format(loss), \"  Accuracy=\", acc, \"\\n=======================================\")\n",
    "\n",
    "    print(\"\\nAccuracy for test datasets with optimizer_4:\", sess.run(accuracy_1, feed_dict = {x: mnist.test.images, y_label: mnist.test.labels}))        "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "第三个模型在测试集上得到的正确率真为97.4%，可以看出该模型的预测正确率相对较差。\n",
    "\n",
    "与第一个模型设置不同的包括（不仅限于）：\n",
    "\n",
    "1. 第一层使用的卷积核尺寸改为8x8，卷积数量改为20；\n",
    "2. 第一层使用的初始化分布函数不同；\n",
    "3. l2正则化设定的因子改为 7e-5；\n",
    "4. 学习率改为0.0005；\n",
    "\n",
    "与第一个模型保持相同的包括（但不仅限于）：\n",
    "1. 第二层的卷积核尺寸为5x5，卷积核数量为18；\n",
    "2. 两层卷积使用的池化参数均为2x2；\n",
    "3. 非线性转换均为relu函数；\n",
    "4. 在全连接层使用为保留率为0.7的Dropout；"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "The results are based on optimizer_new, and accuracy_new\n",
      "Train Epoch:  05 \n",
      "Loss= 0.146868527   Accuracy= 0.9734 \n",
      "=======================================\n",
      "Train Epoch:  10 \n",
      "Loss= 0.323647261   Accuracy= 0.9684 \n",
      "=======================================\n",
      "Train Epoch:  15 \n",
      "Loss= 0.359443218   Accuracy= 0.9706 \n",
      "=======================================\n",
      "Train Epoch:  20 \n",
      "Loss= 0.418189943   Accuracy= 0.9502 \n",
      "=======================================\n",
      "Train Epoch:  25 \n",
      "Loss= 0.512638390   Accuracy= 0.8998 \n",
      "=======================================\n",
      "Train Epoch:  30 \n",
      "Loss= 0.694482386   Accuracy= 0.9078 \n",
      "=======================================\n",
      "\n",
      "Accuracy for test datasets with optimizer_new: 0.9056\n"
     ]
    }
   ],
   "source": [
    "\n",
    "\"\"\"以上的模型设置了不同的卷积核大小与数量、学习率、正则化因子与不同的权重初始化分布函数，\n",
    "但卷积的步长均为1，以下再探索下不同步长的卷积网络，且偏置量设为0\"\"\"\n",
    "\n",
    "x_new = tf.placeholder(\"float\", shape = [None, 784])\n",
    "x_image_new = tf.reshape(x_new, [-1, 28, 28, 1])\n",
    "\n",
    "def conv2d_new(x_new, W): #与上述三个模型不同，此处的卷积步长为2\n",
    "    return tf.nn.conv2d(x_new, W, strides = [1,2,2,1], padding = 'SAME')\n",
    "\n",
    "y_label_new = tf.placeholder(\"float32\", [None, 10], name = \"y_label_new\")\n",
    "\n",
    "W1_new = init1_weight([5, 5, 1, 64]) #设置第一层卷积核的数量为64，尺寸为5x5\n",
    "Conv1_new = conv2d_new(x_image_new, W1_new) #第一层卷积操作\n",
    "C1_Conv_new = tf.nn.relu(Conv1_new) #第一层卷积核的非线性变换\n",
    "C1_Pool_new = max_pool_2x2(C1_Conv_new) #第一层池化操作\n",
    "\n",
    "W2_new = init1_weight([3, 3, 64, 36]) #设置第二层卷积核的数量为36，尺寸为3x3\n",
    "Conv2_new = conv2d_new(C1_Pool_new, W2_new) #第二层卷积操作\n",
    "C2_Conv_new = tf.nn.relu(Conv2_new) #第二层卷积核的非线性变换\n",
    "C2_Pool_new = max_pool_2x2(C2_Conv_new) #第二层池化操作\n",
    "\n",
    "D_Flatten_new = tf.reshape(C2_Pool_new, [-1, 144]) #根据feature map的维度计算公式，可知经两层卷积与池化后的维度为144\n",
    "W_Hidden_new = init1_weight([144, 300]) #全连接层权重\n",
    "D_Hidden_Relu = tf.nn.relu(tf.matmul(D_Flatten_new, W_Hidden_new))\n",
    "D_Hidden_Dropout_new = tf.nn.dropout(D_Hidden_Relu, keep_prob = 0.5) #设置dropout的保留率0.5\n",
    "\n",
    "W3_new = init1_weight([300,10])\n",
    "y_predict_new = tf.matmul(D_Hidden_Dropout_new, W3_new)\n",
    "\n",
    "loss_function_new = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = y_predict_new, labels = y_label_new))\n",
    "optimizer_new = tf.train.RMSPropOptimizer(learning_rate = 0.007, decay = 0.8).minimize(loss_function_new) #采用与以上不同的优化器函数RMSPropOptimizer\n",
    "\n",
    "correct_prediction_new = tf.equal(tf.argmax(y_predict_new, 1), tf.argmax(y_label_new, 1))\n",
    "accuracy_new = tf.reduce_mean(tf.cast(correct_prediction_new, \"float\"))\n",
    "\n",
    "\n",
    "trainEpoch = 30\n",
    "batchSize = 100\n",
    "totalBatchs = int(mnist.train.num_examples/batchSize)\n",
    "loss_list = []\n",
    "accuracy_list = []\n",
    "epoch_list = []\n",
    "\n",
    "# 第三个训练是基于基于optimizer_new优化器的，并使用accuracy_new作为评估指标的模型\n",
    "print(\"The results are based on optimizer_new, and accuracy_new\")\n",
    "\n",
    "with tf.Session() as sess:\n",
    "    sess.run(tf.global_variables_initializer())\n",
    "\n",
    "\n",
    "    for epoch in range(trainEpoch):\n",
    "        for i in range(totalBatchs):\n",
    "            batch_xs, batch_ys = mnist.train.next_batch(batchSize)\n",
    "            sess.run(optimizer_new, feed_dict = {x_new: batch_xs, y_label_new: batch_ys})\n",
    "        loss, acc = sess.run([loss_function_new, accuracy_new], feed_dict = {x_new: mnist.validation.images, y_label_new: mnist.validation.labels})\n",
    "        epoch_list.append(epoch)\n",
    "        loss_list.append(loss)\n",
    "        accuracy_list.append(acc)\n",
    "        if (epoch+1)%5 == 0:\n",
    "            print(\"Train Epoch: \", \"%02d\" %(epoch+1), \"\\nLoss=\", \"{:.9f}\".format(loss), \"  Accuracy=\", acc, \"\\n=======================================\")\n",
    "\n",
    "    print(\"\\nAccuracy for test datasets with optimizer_new:\", sess.run(accuracy_new, feed_dict = {x_new: mnist.test.images, y_label_new: mnist.test.labels}))        \n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": true
   },
   "source": [
    "第四个模型在测试集上得到的正确率真为90.56%，可以看出该模型的预测正确率不高。\n",
    "\n",
    "与第一个模型设置不同的包括（不仅限于）：\n",
    "\n",
    "1. 第一层使用的卷积核尺寸改为5x5，卷积核数量改为64；\n",
    "2. 第二层使用的卷积核尺寸改为3x3, 卷积核数量改为36；\n",
    "3. 卷积操作的步长由1改为2；\n",
    "4. 全连接层的神经元个数改为300；\n",
    "5. 未使用偏置；\n",
    "6. 未使用w权重的正则，仅使用用Dropout来减小过拟合的可能，Dropout的保留率改为0.5；\n",
    "7. 使用了优化器RMSPropOptimizer，并设置了不同的学习率（起始）；\n",
    "\n",
    "\n",
    "与第一个模型保持相同的包括（但不仅限于）：\n",
    "1. 两层卷积使用的池化参数均为2x2；\n",
    "2. 非线性转换均为relu函数；\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "总结：从以上的四个模型的运行结果来看，因为各参数的设置不同，测试集上的正确率依次表现为98.86%, 98.36%, 97.4%,90.56%。其中值得注意的是第四个模型，随着训练周期的增加，模型在验证集上的正确率反而越来越低，这可能与未施加w的正则化有关（即使dropout的保留率降低），产生了过拟合（应当与学习率设置和采取的strides = 2 无关，对吗？因为从验证集的结果来看，在少周期的时候，正确率其实是还可以的）。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
