{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\ProgramData\\Anaconda3\\lib\\site-packages\\h5py\\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n",
      "  from ._conv import register_converters as _register_converters\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From <ipython-input-1-de18f4ae6f37>:14: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n",
      "WARNING:tensorflow:From C:\\ProgramData\\Anaconda3\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please write your own downloading logic.\n",
      "WARNING:tensorflow:From C:\\ProgramData\\Anaconda3\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.data to implement this functionality.\n",
      "Extracting /tmp/tensorflow/mnist/input_data\\train-images-idx3-ubyte.gz\n",
      "WARNING:tensorflow:From C:\\ProgramData\\Anaconda3\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.data to implement this functionality.\n",
      "Extracting /tmp/tensorflow/mnist/input_data\\train-labels-idx1-ubyte.gz\n",
      "WARNING:tensorflow:From C:\\ProgramData\\Anaconda3\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:110: dense_to_one_hot (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.one_hot on tensors.\n",
      "Extracting /tmp/tensorflow/mnist/input_data\\t10k-images-idx3-ubyte.gz\n",
      "Extracting /tmp/tensorflow/mnist/input_data\\t10k-labels-idx1-ubyte.gz\n",
      "WARNING:tensorflow:From C:\\ProgramData\\Anaconda3\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:290: DataSet.__init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n",
      "WARNING:tensorflow:From <ipython-input-1-de18f4ae6f37>:79: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "\n",
      "Future major versions of TensorFlow will allow gradients to flow\n",
      "into the labels input on backprop by default.\n",
      "\n",
      "See @{tf.nn.softmax_cross_entropy_with_logits_v2}.\n",
      "\n",
      "step 100, entropy loss: 1.012305, l2_loss: 6329.482422, total loss: 1.455369\n",
      "0.67\n",
      "step 200, entropy loss: 0.625805, l2_loss: 6329.083496, total loss: 1.068841\n",
      "0.85\n",
      "step 300, entropy loss: 0.525067, l2_loss: 6328.485352, total loss: 0.968061\n",
      "0.83\n",
      "step 400, entropy loss: 0.347307, l2_loss: 6327.820312, total loss: 0.790254\n",
      "0.94\n",
      "step 500, entropy loss: 0.270749, l2_loss: 6327.108398, total loss: 0.713647\n",
      "0.91\n",
      "step 600, entropy loss: 0.334790, l2_loss: 6326.377441, total loss: 0.777637\n",
      "0.9\n",
      "step 700, entropy loss: 0.215766, l2_loss: 6325.626953, total loss: 0.658560\n",
      "0.96\n",
      "step 800, entropy loss: 0.283655, l2_loss: 6324.804199, total loss: 0.726391\n",
      "0.88\n",
      "step 900, entropy loss: 0.211987, l2_loss: 6324.035156, total loss: 0.654669\n",
      "0.92\n",
      "step 1000, entropy loss: 0.172298, l2_loss: 6323.221680, total loss: 0.614924\n",
      "0.97\n",
      "0.935\n",
      "step 1100, entropy loss: 0.226575, l2_loss: 6322.381348, total loss: 0.669142\n",
      "0.94\n",
      "step 1200, entropy loss: 0.119138, l2_loss: 6321.589844, total loss: 0.561650\n",
      "0.94\n",
      "step 1300, entropy loss: 0.140114, l2_loss: 6320.774902, total loss: 0.582568\n",
      "0.96\n",
      "step 1400, entropy loss: 0.189316, l2_loss: 6319.952637, total loss: 0.631712\n",
      "0.95\n",
      "step 1500, entropy loss: 0.280893, l2_loss: 6319.139648, total loss: 0.723232\n",
      "0.96\n",
      "step 1600, entropy loss: 0.163105, l2_loss: 6318.334473, total loss: 0.605389\n",
      "0.94\n",
      "step 1700, entropy loss: 0.112542, l2_loss: 6317.507324, total loss: 0.554768\n",
      "1.0\n",
      "step 1800, entropy loss: 0.187421, l2_loss: 6316.696289, total loss: 0.629589\n",
      "0.94\n",
      "step 1900, entropy loss: 0.141465, l2_loss: 6315.880371, total loss: 0.583576\n",
      "0.95\n",
      "step 2000, entropy loss: 0.151281, l2_loss: 6315.065430, total loss: 0.593336\n",
      "0.94\n",
      "0.9546\n",
      "step 2100, entropy loss: 0.063866, l2_loss: 6314.211914, total loss: 0.505861\n",
      "0.99\n",
      "step 2200, entropy loss: 0.134219, l2_loss: 6313.367188, total loss: 0.576155\n",
      "0.97\n",
      "step 2300, entropy loss: 0.064980, l2_loss: 6312.511719, total loss: 0.506856\n",
      "0.98\n",
      "step 2400, entropy loss: 0.098646, l2_loss: 6311.680176, total loss: 0.540464\n",
      "0.99\n",
      "step 2500, entropy loss: 0.107326, l2_loss: 6310.873535, total loss: 0.549087\n",
      "0.95\n",
      "step 2600, entropy loss: 0.136091, l2_loss: 6310.042969, total loss: 0.577794\n",
      "0.96\n",
      "step 2700, entropy loss: 0.254039, l2_loss: 6309.193359, total loss: 0.695683\n",
      "0.96\n",
      "step 2800, entropy loss: 0.119049, l2_loss: 6308.368652, total loss: 0.560635\n",
      "0.96\n",
      "step 2900, entropy loss: 0.196392, l2_loss: 6307.526367, total loss: 0.637919\n",
      "0.97\n",
      "step 3000, entropy loss: 0.100638, l2_loss: 6306.690918, total loss: 0.542106\n",
      "0.99\n",
      "0.9623\n"
     ]
    }
   ],
   "source": [
    "from __future__ import absolute_import\n",
    "from __future__ import division\n",
    "from __future__ import print_function\n",
    "\n",
    "import argparse\n",
    "import sys\n",
    "\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "\n",
    "import tensorflow as tf\n",
    "\n",
    "\n",
    "data_dir = '/tmp/tensorflow/mnist/input_data'\n",
    "mnist = input_data.read_data_sets(data_dir, one_hot=True)\n",
    "\n",
    "x = tf.placeholder(tf.float32, [None, 784])\n",
    "y_ = tf.placeholder(tf.float32, [None, 10])\n",
    "learning_rate = tf.placeholder(tf.float32)\n",
    "\n",
    "#构造一个reshape函数，重构输入图片数据大小\n",
    "with tf.name_scope('reshape'):\n",
    "    x_image = tf.reshape(x, [-1, 28, 28, 1])\n",
    "\n",
    "#第一个卷积层\n",
    "with tf.name_scope('conv1'):\n",
    "    shape = [6, 6, 1, 16]  \n",
    "    W_conv1 = tf.Variable(tf.truncated_normal(shape, stddev=0.1),\n",
    "                        collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\n",
    "    shape = [16]\n",
    "    b_conv1 = tf.Variable(tf.constant(0.1, shape=shape))\n",
    "    l_conv1 = tf.nn.conv2d(x_image, W_conv1, strides=[1, 1, 1, 1], \n",
    "                         padding='SAME') + b_conv1\n",
    "    h_conv1 = tf.nn.relu(l_conv1)\n",
    "\n",
    "with tf.name_scope('pool1'):\n",
    "    h_pool1 = tf.nn.max_pool(h_conv1, ksize=[1, 2, 2, 1],\n",
    "                        strides=[1, 2, 2, 1], padding='VALID')\n",
    "\n",
    "\n",
    "with tf.name_scope('conv2'):\n",
    "    W_conv2 = tf.Variable(tf.truncated_normal([6,6 , 16, 32], stddev=0.1),\n",
    "                        \n",
    "                        collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\n",
    "    b_conv2 = tf.Variable(tf.constant(0.1, shape=[32]))\n",
    "    l_conv2 = tf.nn.conv2d(h_pool1, W_conv2, strides=[1, 1, 1, 1], \n",
    "                         padding='SAME') + b_conv2\n",
    "    h_conv2 = tf.nn.relu(l_conv2)\n",
    "\n",
    "\n",
    "with tf.name_scope('pool2'):\n",
    "    h_pool2 = tf.nn.max_pool(h_conv2, ksize=[1, 2, 2, 1],\n",
    "                        strides=[1, 2, 2, 1], padding='VALID')\n",
    "\n",
    "\n",
    "with tf.name_scope('fc1'):\n",
    "    W_fc1 = tf.Variable(tf.truncated_normal([7 * 7 * 32, 1024], stddev=0.1),\n",
    "                      \n",
    "                      collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\n",
    "    b_fc1 = tf.Variable(tf.constant(0.1, shape=[1024]))\n",
    "\n",
    "    h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 32])\n",
    "    h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n",
    "\n",
    "\n",
    "with tf.name_scope('dropout'):\n",
    "    keep_prob = tf.placeholder(tf.float32)\n",
    "    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n",
    "\n",
    "\n",
    "with tf.name_scope('fc2'):\n",
    "    W_fc2 = tf.Variable(tf.truncated_normal([1024, 10], stddev=0.1),\n",
    "                      collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\n",
    "    b_fc2 = tf.Variable(tf.constant(0.1, shape=[10]))\n",
    "\n",
    "    y = tf.matmul(h_fc1_drop, W_fc2) + b_fc2\n",
    "\n",
    "\n",
    "cross_entropy = tf.reduce_mean(\n",
    "    tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))\n",
    "\n",
    "l2_loss = tf.add_n( [tf.nn.l2_loss(w) for w in tf.get_collection('WEIGHTS')] )\n",
    "total_loss = cross_entropy + 7e-5*l2_loss\n",
    "train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(total_loss)\n",
    "\n",
    "sess = tf.Session()\n",
    "init_op = tf.global_variables_initializer()\n",
    "sess.run(init_op)\n",
    "\n",
    "\n",
    "for step in range(3000):\n",
    "    batch_xs, batch_ys = mnist.train.next_batch(100)\n",
    "    lr = 0.01\n",
    "    _, loss, l2_loss_value, total_loss_value = sess.run(\n",
    "               [train_step, cross_entropy, l2_loss, total_loss], \n",
    "               feed_dict={x: batch_xs, y_: batch_ys, learning_rate:lr, keep_prob:0.5})\n",
    "  \n",
    "    if (step+1) % 100 == 0:\n",
    "        print('step %d, entropy loss: %f, l2_loss: %f, total loss: %f' % \n",
    "            (step+1, loss, l2_loss_value, total_loss_value))\n",
    "    # Test trained model\n",
    "        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n",
    "        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "        print(sess.run(accuracy, feed_dict={x: batch_xs, y_: batch_ys, keep_prob:0.5}))\n",
    "    if (step+1) % 1000 == 0:\n",
    "        print(sess.run(accuracy, feed_dict={x: mnist.test.images,\n",
    "                                    y_: mnist.test.labels, keep_prob:0.5}))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "较少卷积核的数量效果并不是特别好"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "接下来将第一层卷积层的卷积核数量调整为64，第二层神经元数量调整为32"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting /tmp/tensorflow/mnist/input_data\\train-images-idx3-ubyte.gz\n",
      "Extracting /tmp/tensorflow/mnist/input_data\\train-labels-idx1-ubyte.gz\n",
      "Extracting /tmp/tensorflow/mnist/input_data\\t10k-images-idx3-ubyte.gz\n",
      "Extracting /tmp/tensorflow/mnist/input_data\\t10k-labels-idx1-ubyte.gz\n",
      "step 100, entropy loss: 0.889768, l2_loss: 12864.052734, total loss: 1.790252\n",
      "0.76\n",
      "step 200, entropy loss: 0.603610, l2_loss: 12862.458984, total loss: 1.503983\n",
      "0.83\n",
      "step 300, entropy loss: 0.422788, l2_loss: 12860.855469, total loss: 1.323048\n",
      "0.89\n",
      "step 400, entropy loss: 0.284134, l2_loss: 12859.197266, total loss: 1.184278\n",
      "0.91\n",
      "step 500, entropy loss: 0.240793, l2_loss: 12857.453125, total loss: 1.140815\n",
      "0.85\n",
      "step 600, entropy loss: 0.190240, l2_loss: 12855.747070, total loss: 1.090143\n",
      "0.94\n",
      "step 700, entropy loss: 0.350080, l2_loss: 12854.035156, total loss: 1.249862\n",
      "0.89\n",
      "step 800, entropy loss: 0.276901, l2_loss: 12852.297852, total loss: 1.176561\n",
      "0.94\n",
      "step 900, entropy loss: 0.217364, l2_loss: 12850.582031, total loss: 1.116905\n",
      "0.96\n",
      "step 1000, entropy loss: 0.201245, l2_loss: 12848.855469, total loss: 1.100665\n",
      "0.95\n",
      "0.9334\n",
      "step 1100, entropy loss: 0.233857, l2_loss: 12847.132812, total loss: 1.133157\n",
      "0.96\n",
      "step 1200, entropy loss: 0.221753, l2_loss: 12845.381836, total loss: 1.120930\n",
      "0.95\n",
      "step 1300, entropy loss: 0.104781, l2_loss: 12843.656250, total loss: 1.003837\n",
      "0.97\n",
      "step 1400, entropy loss: 0.101460, l2_loss: 12841.934570, total loss: 1.000395\n",
      "0.97\n",
      "step 1500, entropy loss: 0.143408, l2_loss: 12840.171875, total loss: 1.042220\n",
      "0.97\n",
      "step 1600, entropy loss: 0.151075, l2_loss: 12838.433594, total loss: 1.049766\n",
      "0.96\n",
      "step 1700, entropy loss: 0.156229, l2_loss: 12836.686523, total loss: 1.054797\n",
      "0.95\n",
      "step 1800, entropy loss: 0.084805, l2_loss: 12834.931641, total loss: 0.983250\n",
      "0.98\n",
      "step 1900, entropy loss: 0.113783, l2_loss: 12833.170898, total loss: 1.012105\n",
      "0.94\n",
      "step 2000, entropy loss: 0.078770, l2_loss: 12831.443359, total loss: 0.976971\n",
      "0.92\n",
      "0.9557\n",
      "step 2100, entropy loss: 0.130768, l2_loss: 12829.694336, total loss: 1.028846\n",
      "0.96\n",
      "step 2200, entropy loss: 0.167531, l2_loss: 12827.918945, total loss: 1.065485\n",
      "0.97\n",
      "step 2300, entropy loss: 0.118453, l2_loss: 12826.187500, total loss: 1.016286\n",
      "0.94\n",
      "step 2400, entropy loss: 0.171935, l2_loss: 12824.453125, total loss: 1.069646\n",
      "0.95\n",
      "step 2500, entropy loss: 0.147631, l2_loss: 12822.717773, total loss: 1.045221\n",
      "0.97\n",
      "step 2600, entropy loss: 0.101364, l2_loss: 12820.966797, total loss: 0.998832\n",
      "0.95\n",
      "step 2700, entropy loss: 0.194994, l2_loss: 12819.212891, total loss: 1.092339\n",
      "0.97\n",
      "step 2800, entropy loss: 0.202591, l2_loss: 12817.483398, total loss: 1.099815\n",
      "0.94\n",
      "step 2900, entropy loss: 0.041511, l2_loss: 12815.720703, total loss: 0.938611\n",
      "0.99\n",
      "step 3000, entropy loss: 0.104845, l2_loss: 12813.968750, total loss: 1.001822\n",
      "0.98\n",
      "0.9636\n"
     ]
    }
   ],
   "source": [
    "\n",
    "data_dir = '/tmp/tensorflow/mnist/input_data'\n",
    "mnist = input_data.read_data_sets(data_dir, one_hot=True)\n",
    "\n",
    "x = tf.placeholder(tf.float32, [None, 784])\n",
    "y_ = tf.placeholder(tf.float32, [None, 10])\n",
    "learning_rate = tf.placeholder(tf.float32)\n",
    "\n",
    "#构造一个reshape函数，重构输入图片数据大小\n",
    "with tf.name_scope('reshape'):\n",
    "    x_image = tf.reshape(x, [-1, 28, 28, 1])\n",
    "\n",
    "#第一个卷积层\n",
    "with tf.name_scope('conv1'):\n",
    "    shape = [6, 6, 1, 64]  \n",
    "    W_conv1 = tf.Variable(tf.truncated_normal(shape, stddev=0.1),\n",
    "                        collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\n",
    "    shape = [64]\n",
    "    b_conv1 = tf.Variable(tf.constant(0.1, shape=shape))\n",
    "    l_conv1 = tf.nn.conv2d(x_image, W_conv1, strides=[1, 1, 1, 1], \n",
    "                         padding='SAME') + b_conv1\n",
    "    h_conv1 = tf.nn.relu(l_conv1)\n",
    "\n",
    "with tf.name_scope('pool1'):\n",
    "    h_pool1 = tf.nn.max_pool(h_conv1, ksize=[1, 2, 2, 1],\n",
    "                        strides=[1, 2, 2, 1], padding='VALID')\n",
    "\n",
    "\n",
    "with tf.name_scope('conv2'):\n",
    "    W_conv2 = tf.Variable(tf.truncated_normal([6,6 , 64, 32], stddev=0.1),\n",
    "                        \n",
    "                        collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\n",
    "    b_conv2 = tf.Variable(tf.constant(0.1, shape=[32]))\n",
    "    l_conv2 = tf.nn.conv2d(h_pool1, W_conv2, strides=[1, 1, 1, 1], \n",
    "                         padding='SAME') + b_conv2\n",
    "    h_conv2 = tf.nn.relu(l_conv2)\n",
    "\n",
    "\n",
    "with tf.name_scope('pool2'):\n",
    "    h_pool2 = tf.nn.max_pool(h_conv2, ksize=[1, 2, 2, 1],\n",
    "                        strides=[1, 2, 2, 1], padding='VALID')\n",
    "\n",
    "\n",
    "with tf.name_scope('fc1'):\n",
    "    W_fc1 = tf.Variable(tf.truncated_normal([7 * 7 * 32, 1024], stddev=0.1),\n",
    "                      \n",
    "                      collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\n",
    "    b_fc1 = tf.Variable(tf.constant(0.1, shape=[1024]))\n",
    "\n",
    "    h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 32])\n",
    "    h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n",
    "\n",
    "\n",
    "with tf.name_scope('dropout'):\n",
    "    keep_prob = tf.placeholder(tf.float32)\n",
    "    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n",
    "\n",
    "\n",
    "with tf.name_scope('fc2'):\n",
    "    W_fc2 = tf.Variable(tf.truncated_normal([1024, 10], stddev=0.1),\n",
    "                      collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\n",
    "    b_fc2 = tf.Variable(tf.constant(0.1, shape=[10]))\n",
    "\n",
    "    y = tf.matmul(h_fc1_drop, W_fc2) + b_fc2\n",
    "\n",
    "\n",
    "cross_entropy = tf.reduce_mean(\n",
    "    tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))\n",
    "\n",
    "l2_loss = tf.add_n( [tf.nn.l2_loss(w) for w in tf.get_collection('WEIGHTS')] )\n",
    "total_loss = cross_entropy + 7e-5*l2_loss\n",
    "train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(total_loss)\n",
    "\n",
    "sess = tf.Session()\n",
    "init_op = tf.global_variables_initializer()\n",
    "sess.run(init_op)\n",
    "\n",
    "\n",
    "for step in range(3000):\n",
    "    batch_xs, batch_ys = mnist.train.next_batch(100)\n",
    "    lr = 0.01\n",
    "    _, loss, l2_loss_value, total_loss_value = sess.run(\n",
    "               [train_step, cross_entropy, l2_loss, total_loss], \n",
    "               feed_dict={x: batch_xs, y_: batch_ys, learning_rate:lr, keep_prob:0.5})\n",
    "  \n",
    "    if (step+1) % 100 == 0:\n",
    "        print('step %d, entropy loss: %f, l2_loss: %f, total loss: %f' % \n",
    "            (step+1, loss, l2_loss_value, total_loss_value))\n",
    "    # Test trained model\n",
    "        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n",
    "        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "        print(sess.run(accuracy, feed_dict={x: batch_xs, y_: batch_ys, keep_prob:0.5}))\n",
    "    if (step+1) % 1000 == 0:\n",
    "        print(sess.run(accuracy, feed_dict={x: mnist.test.images,\n",
    "                                    y_: mnist.test.labels, keep_prob:0.5}))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "效果也不好，如果两层卷积核数量均为64如何？"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting /tmp/tensorflow/mnist/input_data\\train-images-idx3-ubyte.gz\n",
      "Extracting /tmp/tensorflow/mnist/input_data\\train-labels-idx1-ubyte.gz\n",
      "Extracting /tmp/tensorflow/mnist/input_data\\t10k-images-idx3-ubyte.gz\n",
      "Extracting /tmp/tensorflow/mnist/input_data\\t10k-labels-idx1-ubyte.gz\n",
      "step 100, entropy loss: 0.750156, l2_loss: 25897.203125, total loss: 2.562960\n",
      "0.74\n",
      "step 200, entropy loss: 0.450031, l2_loss: 25893.597656, total loss: 2.262583\n",
      "0.85\n",
      "step 300, entropy loss: 0.528245, l2_loss: 25890.000000, total loss: 2.340545\n",
      "0.88\n",
      "step 400, entropy loss: 0.380238, l2_loss: 25886.410156, total loss: 2.192287\n",
      "0.89\n",
      "step 500, entropy loss: 0.308016, l2_loss: 25882.775391, total loss: 2.119810\n",
      "0.89\n",
      "step 600, entropy loss: 0.401105, l2_loss: 25879.142578, total loss: 2.212645\n",
      "0.87\n",
      "step 700, entropy loss: 0.298286, l2_loss: 25875.542969, total loss: 2.109574\n",
      "0.95\n",
      "step 800, entropy loss: 0.322712, l2_loss: 25871.923828, total loss: 2.133747\n",
      "0.9\n",
      "step 900, entropy loss: 0.265335, l2_loss: 25868.316406, total loss: 2.076117\n",
      "0.94\n",
      "step 1000, entropy loss: 0.218749, l2_loss: 25864.720703, total loss: 2.029280\n",
      "0.95\n",
      "0.9427\n",
      "step 1100, entropy loss: 0.149232, l2_loss: 25861.121094, total loss: 1.959510\n",
      "0.96\n",
      "step 1200, entropy loss: 0.173387, l2_loss: 25857.542969, total loss: 1.983415\n",
      "0.95\n",
      "step 1300, entropy loss: 0.121947, l2_loss: 25853.916016, total loss: 1.931721\n",
      "0.95\n",
      "step 1400, entropy loss: 0.187638, l2_loss: 25850.312500, total loss: 1.997160\n",
      "0.97\n",
      "step 1500, entropy loss: 0.163661, l2_loss: 25846.726562, total loss: 1.972932\n",
      "0.96\n",
      "step 1600, entropy loss: 0.035119, l2_loss: 25843.105469, total loss: 1.844136\n",
      "1.0\n",
      "step 1700, entropy loss: 0.256719, l2_loss: 25839.525391, total loss: 2.065486\n",
      "0.96\n",
      "step 1800, entropy loss: 0.130133, l2_loss: 25835.945312, total loss: 1.938650\n",
      "0.93\n",
      "step 1900, entropy loss: 0.267433, l2_loss: 25832.365234, total loss: 2.075699\n",
      "0.94\n",
      "step 2000, entropy loss: 0.060845, l2_loss: 25828.767578, total loss: 1.868859\n",
      "0.98\n",
      "0.9562\n",
      "step 2100, entropy loss: 0.047278, l2_loss: 25825.175781, total loss: 1.855040\n",
      "0.96\n",
      "step 2200, entropy loss: 0.114761, l2_loss: 25821.587891, total loss: 1.922272\n",
      "0.97\n",
      "step 2300, entropy loss: 0.104227, l2_loss: 25818.011719, total loss: 1.911488\n",
      "0.99\n",
      "step 2400, entropy loss: 0.079915, l2_loss: 25814.417969, total loss: 1.886924\n",
      "0.97\n",
      "step 2500, entropy loss: 0.160339, l2_loss: 25810.853516, total loss: 1.967099\n",
      "0.95\n",
      "step 2600, entropy loss: 0.171368, l2_loss: 25807.277344, total loss: 1.977878\n",
      "0.97\n",
      "step 2700, entropy loss: 0.098830, l2_loss: 25803.677734, total loss: 1.905088\n",
      "0.96\n",
      "step 2800, entropy loss: 0.092404, l2_loss: 25800.097656, total loss: 1.898411\n",
      "0.97\n",
      "step 2900, entropy loss: 0.096362, l2_loss: 25796.548828, total loss: 1.902121\n",
      "0.94\n",
      "step 3000, entropy loss: 0.106537, l2_loss: 25792.972656, total loss: 1.912045\n",
      "0.99\n",
      "0.9668\n"
     ]
    }
   ],
   "source": [
    "\n",
    "data_dir = '/tmp/tensorflow/mnist/input_data'\n",
    "mnist = input_data.read_data_sets(data_dir, one_hot=True)\n",
    "\n",
    "x = tf.placeholder(tf.float32, [None, 784])\n",
    "y_ = tf.placeholder(tf.float32, [None, 10])\n",
    "learning_rate = tf.placeholder(tf.float32)\n",
    "\n",
    "#构造一个reshape函数，重构输入图片数据大小\n",
    "with tf.name_scope('reshape'):\n",
    "    x_image = tf.reshape(x, [-1, 28, 28, 1])\n",
    "\n",
    "#第一个卷积层\n",
    "with tf.name_scope('conv1'):\n",
    "    shape = [6, 6, 1, 64]  \n",
    "    W_conv1 = tf.Variable(tf.truncated_normal(shape, stddev=0.1),\n",
    "                        collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\n",
    "    shape = [64]\n",
    "    b_conv1 = tf.Variable(tf.constant(0.1, shape=shape))\n",
    "    l_conv1 = tf.nn.conv2d(x_image, W_conv1, strides=[1, 1, 1, 1], \n",
    "                         padding='SAME') + b_conv1\n",
    "    h_conv1 = tf.nn.relu(l_conv1)\n",
    "\n",
    "with tf.name_scope('pool1'):\n",
    "    h_pool1 = tf.nn.max_pool(h_conv1, ksize=[1, 2, 2, 1],\n",
    "                        strides=[1, 2, 2, 1], padding='VALID')\n",
    "\n",
    "\n",
    "with tf.name_scope('conv2'):\n",
    "    W_conv2 = tf.Variable(tf.truncated_normal([6,6 , 64, 64], stddev=0.1),\n",
    "                        \n",
    "                        collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\n",
    "    b_conv2 = tf.Variable(tf.constant(0.1, shape=[64]))\n",
    "    l_conv2 = tf.nn.conv2d(h_pool1, W_conv2, strides=[1, 1, 1, 1], \n",
    "                         padding='SAME') + b_conv2\n",
    "    h_conv2 = tf.nn.relu(l_conv2)\n",
    "\n",
    "\n",
    "with tf.name_scope('pool2'):\n",
    "    h_pool2 = tf.nn.max_pool(h_conv2, ksize=[1, 2, 2, 1],\n",
    "                        strides=[1, 2, 2, 1], padding='VALID')\n",
    "\n",
    "\n",
    "with tf.name_scope('fc1'):\n",
    "    W_fc1 = tf.Variable(tf.truncated_normal([7 * 7 * 64, 1024], stddev=0.1),\n",
    "                      \n",
    "                      collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\n",
    "    b_fc1 = tf.Variable(tf.constant(0.1, shape=[1024]))\n",
    "\n",
    "    h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])\n",
    "    h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n",
    "\n",
    "\n",
    "with tf.name_scope('dropout'):\n",
    "    keep_prob = tf.placeholder(tf.float32)\n",
    "    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n",
    "\n",
    "\n",
    "with tf.name_scope('fc2'):\n",
    "    W_fc2 = tf.Variable(tf.truncated_normal([1024, 10], stddev=0.1),\n",
    "                      collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\n",
    "    b_fc2 = tf.Variable(tf.constant(0.1, shape=[10]))\n",
    "\n",
    "    y = tf.matmul(h_fc1_drop, W_fc2) + b_fc2\n",
    "\n",
    "\n",
    "cross_entropy = tf.reduce_mean(\n",
    "    tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))\n",
    "\n",
    "l2_loss = tf.add_n( [tf.nn.l2_loss(w) for w in tf.get_collection('WEIGHTS')] )\n",
    "total_loss = cross_entropy + 7e-5*l2_loss\n",
    "train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(total_loss)\n",
    "\n",
    "sess = tf.Session()\n",
    "init_op = tf.global_variables_initializer()\n",
    "sess.run(init_op)\n",
    "\n",
    "\n",
    "for step in range(3000):\n",
    "    batch_xs, batch_ys = mnist.train.next_batch(100)\n",
    "    lr = 0.01\n",
    "    _, loss, l2_loss_value, total_loss_value = sess.run(\n",
    "               [train_step, cross_entropy, l2_loss, total_loss], \n",
    "               feed_dict={x: batch_xs, y_: batch_ys, learning_rate:lr, keep_prob:0.5})\n",
    "  \n",
    "    if (step+1) % 100 == 0:\n",
    "        print('step %d, entropy loss: %f, l2_loss: %f, total loss: %f' % \n",
    "            (step+1, loss, l2_loss_value, total_loss_value))\n",
    "    # Test trained model\n",
    "        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n",
    "        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "        print(sess.run(accuracy, feed_dict={x: batch_xs, y_: batch_ys, keep_prob:0.5}))\n",
    "    if (step+1) % 1000 == 0:\n",
    "        print(sess.run(accuracy, feed_dict={x: mnist.test.images,\n",
    "                                    y_: mnist.test.labels, keep_prob:0.5}))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "结果相较于初始卷积核数量并没有太大的改进"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "再将两层卷积核军调整为32"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "step 100, entropy loss: 1.184723, l2_loss: 38715.769531, total loss: 3.894827\n",
      "0.68\n",
      "step 200, entropy loss: 0.513702, l2_loss: 38710.675781, total loss: 3.223449\n",
      "0.82\n",
      "step 300, entropy loss: 0.519051, l2_loss: 38705.468750, total loss: 3.228434\n",
      "0.88\n",
      "step 400, entropy loss: 0.293234, l2_loss: 38700.218750, total loss: 3.002249\n",
      "0.92\n",
      "step 500, entropy loss: 0.257291, l2_loss: 38694.972656, total loss: 2.965939\n",
      "0.92\n",
      "step 600, entropy loss: 0.210056, l2_loss: 38689.667969, total loss: 2.918333\n",
      "0.94\n",
      "step 700, entropy loss: 0.276487, l2_loss: 38684.394531, total loss: 2.984395\n",
      "0.96\n",
      "step 800, entropy loss: 0.229309, l2_loss: 38679.035156, total loss: 2.936842\n",
      "0.94\n",
      "step 900, entropy loss: 0.210562, l2_loss: 38673.695312, total loss: 2.917721\n",
      "0.95\n",
      "step 1000, entropy loss: 0.322034, l2_loss: 38668.382812, total loss: 3.028821\n",
      "0.92\n",
      "0.9351\n",
      "step 1100, entropy loss: 0.127375, l2_loss: 38663.039062, total loss: 2.833788\n",
      "0.96\n",
      "step 1200, entropy loss: 0.239356, l2_loss: 38657.730469, total loss: 2.945397\n",
      "0.9\n",
      "step 1300, entropy loss: 0.259308, l2_loss: 38652.378906, total loss: 2.964974\n",
      "0.94\n",
      "step 1400, entropy loss: 0.251650, l2_loss: 38647.039062, total loss: 2.956943\n",
      "0.94\n",
      "step 1500, entropy loss: 0.150819, l2_loss: 38641.695312, total loss: 2.855738\n",
      "0.96\n",
      "step 1600, entropy loss: 0.153542, l2_loss: 38636.390625, total loss: 2.858089\n",
      "0.98\n",
      "step 1700, entropy loss: 0.155189, l2_loss: 38631.046875, total loss: 2.859362\n",
      "0.98\n",
      "step 1800, entropy loss: 0.173372, l2_loss: 38625.703125, total loss: 2.877171\n",
      "0.96\n",
      "step 1900, entropy loss: 0.256503, l2_loss: 38620.351562, total loss: 2.959928\n",
      "0.94\n",
      "step 2000, entropy loss: 0.048627, l2_loss: 38614.996094, total loss: 2.751677\n",
      "0.98\n",
      "0.9559\n",
      "step 2100, entropy loss: 0.096969, l2_loss: 38609.632812, total loss: 2.799644\n",
      "0.98\n",
      "step 2200, entropy loss: 0.333557, l2_loss: 38604.296875, total loss: 3.035858\n",
      "0.91\n",
      "step 2300, entropy loss: 0.230957, l2_loss: 38598.960938, total loss: 2.932884\n",
      "0.96\n",
      "step 2400, entropy loss: 0.115208, l2_loss: 38593.597656, total loss: 2.816760\n",
      "0.95\n",
      "step 2500, entropy loss: 0.060336, l2_loss: 38588.257812, total loss: 2.761515\n",
      "0.98\n",
      "step 2600, entropy loss: 0.210237, l2_loss: 38582.917969, total loss: 2.911041\n",
      "0.96\n",
      "step 2700, entropy loss: 0.114272, l2_loss: 38577.578125, total loss: 2.814703\n",
      "0.99\n",
      "step 2800, entropy loss: 0.145875, l2_loss: 38572.250000, total loss: 2.845933\n",
      "0.96\n",
      "step 2900, entropy loss: 0.125527, l2_loss: 38566.906250, total loss: 2.825210\n",
      "0.98\n",
      "step 3000, entropy loss: 0.065481, l2_loss: 38561.574219, total loss: 2.764791\n",
      "0.98\n",
      "0.9633\n"
     ]
    }
   ],
   "source": [
    "x = tf.placeholder(tf.float32, [None, 784])\n",
    "y_ = tf.placeholder(tf.float32, [None, 10])\n",
    "learning_rate = tf.placeholder(tf.float32)\n",
    "\n",
    "#构造一个reshape函数，重构输入图片数据大小\n",
    "with tf.name_scope('reshape'):\n",
    "    x_image = tf.reshape(x, [-1, 28, 28, 1])\n",
    "\n",
    "#第一个卷积层\n",
    "with tf.name_scope('conv1'):\n",
    "    shape = [6, 6, 1, 32]  \n",
    "    W_conv1 = tf.Variable(tf.truncated_normal(shape, stddev=0.1),\n",
    "                        collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\n",
    "    shape = [32]\n",
    "    b_conv1 = tf.Variable(tf.constant(0.1, shape=shape))\n",
    "    l_conv1 = tf.nn.conv2d(x_image, W_conv1, strides=[1, 1, 1, 1], \n",
    "                         padding='SAME') + b_conv1\n",
    "    h_conv1 = tf.nn.relu(l_conv1)\n",
    "\n",
    "with tf.name_scope('pool1'):\n",
    "    h_pool1 = tf.nn.max_pool(h_conv1, ksize=[1, 2, 2, 1],\n",
    "                        strides=[1, 2, 2, 1], padding='VALID')\n",
    "\n",
    "\n",
    "with tf.name_scope('conv2'):\n",
    "    W_conv2 = tf.Variable(tf.truncated_normal([6,6 , 32, 32], stddev=0.1),\n",
    "                        \n",
    "                        collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\n",
    "    b_conv2 = tf.Variable(tf.constant(0.1, shape=[32]))\n",
    "    l_conv2 = tf.nn.conv2d(h_pool1, W_conv2, strides=[1, 1, 1, 1], \n",
    "                         padding='SAME') + b_conv2\n",
    "    h_conv2 = tf.nn.relu(l_conv2)\n",
    "\n",
    "\n",
    "with tf.name_scope('pool2'):\n",
    "    h_pool2 = tf.nn.max_pool(h_conv2, ksize=[1, 2, 2, 1],\n",
    "                        strides=[1, 2, 2, 1], padding='VALID')\n",
    "\n",
    "\n",
    "with tf.name_scope('fc1'):\n",
    "    W_fc1 = tf.Variable(tf.truncated_normal([7 * 7 * 32, 1024], stddev=0.1),\n",
    "                      \n",
    "                      collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\n",
    "    b_fc1 = tf.Variable(tf.constant(0.1, shape=[1024]))\n",
    "\n",
    "    h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 32])\n",
    "    h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n",
    "\n",
    "\n",
    "with tf.name_scope('dropout'):\n",
    "    keep_prob = tf.placeholder(tf.float32)\n",
    "    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n",
    "\n",
    "\n",
    "with tf.name_scope('fc2'):\n",
    "    W_fc2 = tf.Variable(tf.truncated_normal([1024, 10], stddev=0.1),\n",
    "                      collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\n",
    "    b_fc2 = tf.Variable(tf.constant(0.1, shape=[10]))\n",
    "\n",
    "    y = tf.matmul(h_fc1_drop, W_fc2) + b_fc2\n",
    "\n",
    "\n",
    "cross_entropy = tf.reduce_mean(\n",
    "    tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))\n",
    "\n",
    "l2_loss = tf.add_n( [tf.nn.l2_loss(w) for w in tf.get_collection('WEIGHTS')] )\n",
    "total_loss = cross_entropy + 7e-5*l2_loss\n",
    "train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(total_loss)\n",
    "\n",
    "sess = tf.Session()\n",
    "init_op = tf.global_variables_initializer()\n",
    "sess.run(init_op)\n",
    "\n",
    "\n",
    "for step in range(3000):\n",
    "    batch_xs, batch_ys = mnist.train.next_batch(100)\n",
    "    lr = 0.01\n",
    "    _, loss, l2_loss_value, total_loss_value = sess.run(\n",
    "               [train_step, cross_entropy, l2_loss, total_loss], \n",
    "               feed_dict={x: batch_xs, y_: batch_ys, learning_rate:lr, keep_prob:0.5})\n",
    "  \n",
    "    if (step+1) % 100 == 0:\n",
    "        print('step %d, entropy loss: %f, l2_loss: %f, total loss: %f' % \n",
    "            (step+1, loss, l2_loss_value, total_loss_value))\n",
    "    # Test trained model\n",
    "        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n",
    "        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "        print(sess.run(accuracy, feed_dict={x: batch_xs, y_: batch_ys, keep_prob:0.5}))\n",
    "    if (step+1) % 1000 == 0:\n",
    "        print(sess.run(accuracy, feed_dict={x: mnist.test.images,\n",
    "                                    y_: mnist.test.labels, keep_prob:0.5}))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "结果仍然一般，显然，64+32的卷积核数量性能已经还算不错了"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
