{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "name": "tensorflow_home2.ipynb",
      "version": "0.3.2",
      "provenance": [],
      "collapsed_sections": [
        "LXEKnReiwi_9",
        "K8CxyGIwxPP0",
        "sNdB_WTfjl8y",
        "G6kKxPf3BdLT"
      ]
    },
    "kernelspec": {
      "name": "python2",
      "display_name": "Python 2"
    },
    "accelerator": "GPU"
  },
  "cells": [
    {
      "metadata": {
        "id": "4o7b4VTDswyu",
        "colab_type": "text"
      },
      "cell_type": "markdown",
      "source": [
        "# mnist\n",
        "\n",
        "并探索如下超参数设置：\n",
        "- 卷积kernel size\n",
        "- 卷积kernel 数量\n",
        "- 学习率\n",
        "- 正则化因子\n",
        "- 权重初始化分布参数\n",
        "\n",
        "## 评价标准\n",
        "\n",
        "- 准确度达到98%或者以上60分，作为及格标准，未达到者本作业不及格，不予打分。\n",
        "- 使用了正则化因子或文档中给出描述：10分。\n",
        "- 手动初始化参数或文档中给出描述：10分，不设置初始化参数的，只使用默认初始化认为学员没考虑到初始化问题，不给分。\n",
        "- 学习率调整：10分，需要文档中给出描述。\n",
        "- 卷积kernel size和数量调整：10分，需要文档中给出描述。\n"
      ]
    },
    {
      "metadata": {
        "id": "cMp2gQd2pv5_",
        "colab_type": "code",
        "outputId": "e351891c-fc85-41cd-b354-a67468da16bd",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 565
        }
      },
      "cell_type": "code",
      "source": [
        "from __future__ import absolute_import\n",
        "from __future__ import division\n",
        "from __future__ import print_function\n",
        "\n",
        "import argparse\n",
        "import sys\n",
        "import tensorflow as tf\n",
        "from tensorflow.examples.tutorials.mnist import input_data\n",
        "import numpy as np\n",
        "\n",
        "def reset_graph(seed=42):\n",
        "    tf.reset_default_graph()\n",
        "    tf.set_random_seed(seed)\n",
        "    np.random.seed(seed)\n",
        "\n",
        "FLAGS = None\n",
        "\n",
        "mnist = input_data.read_data_sets('/tmp/tensorflow/input_data', one_hot=True)"
      ],
      "execution_count": 1,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "WARNING:tensorflow:From <ipython-input-1-bd059af1de1e>:18: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
            "Instructions for updating:\n",
            "Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n",
            "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.\n",
            "Instructions for updating:\n",
            "Please write your own downloading logic.\n",
            "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/datasets/base.py:252: wrapped_fn (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.\n",
            "Instructions for updating:\n",
            "Please use urllib or similar directly.\n",
            "Successfully downloaded train-images-idx3-ubyte.gz 9912422 bytes.\n",
            "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
            "Instructions for updating:\n",
            "Please use tf.data to implement this functionality.\n",
            "Extracting /tmp/tensorflow/input_data/train-images-idx3-ubyte.gz\n",
            "Successfully downloaded train-labels-idx1-ubyte.gz 28881 bytes.\n",
            "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
            "Instructions for updating:\n",
            "Please use tf.data to implement this functionality.\n",
            "Extracting /tmp/tensorflow/input_data/train-labels-idx1-ubyte.gz\n",
            "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:110: dense_to_one_hot (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
            "Instructions for updating:\n",
            "Please use tf.one_hot on tensors.\n",
            "Successfully downloaded t10k-images-idx3-ubyte.gz 1648877 bytes.\n",
            "Extracting /tmp/tensorflow/input_data/t10k-images-idx3-ubyte.gz\n",
            "Successfully downloaded t10k-labels-idx1-ubyte.gz 4542 bytes.\n",
            "Extracting /tmp/tensorflow/input_data/t10k-labels-idx1-ubyte.gz\n",
            "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:290: __init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
            "Instructions for updating:\n",
            "Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "metadata": {
        "id": "3wcK19sYTycV",
        "colab_type": "text"
      },
      "cell_type": "markdown",
      "source": [
        "# 基本的神经网络"
      ]
    },
    {
      "metadata": {
        "id": "nj3iY1-7rNKi",
        "colab_type": "code",
        "outputId": "5bd39801-d071-4405-b5de-dba30309e09d",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 54
        }
      },
      "cell_type": "code",
      "source": [
        "!ls /tmp/tensorflow/input_data"
      ],
      "execution_count": 2,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "t10k-images-idx3-ubyte.gz  train-images-idx3-ubyte.gz\n",
            "t10k-labels-idx1-ubyte.gz  train-labels-idx1-ubyte.gz\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "metadata": {
        "id": "1uZJ1pZaszZg",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 35
        },
        "outputId": "ed1b9833-b22d-413e-b2d1-566ca231952a"
      },
      "cell_type": "code",
      "source": [
        "x = tf.placeholder(tf.float32, [None, 784])\n",
        "W = tf.Variable(tf.zeros([784, 10]))\n",
        "b = tf.Variable(tf.zeros([10]))\n",
        "y = tf.matmul(x, W) + b\n",
        "\n",
        "y_ = tf.placeholder(tf.float32, [None, 10])\n",
        "\n",
        "cross_entroy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=y))\n",
        "train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entroy)\n",
        "\n",
        "sess = tf.Session()\n",
        "\n",
        "init_op = tf.global_variables_initializer()\n",
        "sess.run(init_op)\n",
        "\n",
        "for _ in range(3000):\n",
        "    batch_xs, batch_ys = mnist.train.next_batch(100)\n",
        "    sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})\n",
        "    \n",
        "    \n",
        "correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n",
        "accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
        "\n",
        "print(sess.run(accuracy, feed_dict={x: mnist.test.images, \n",
        "                                    y_: mnist.test.labels}))\n",
        "\n",
        "sess.close()"
      ],
      "execution_count": 5,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "0.9202\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "metadata": {
        "id": "LXEKnReiwi_9",
        "colab_type": "text"
      },
      "cell_type": "markdown",
      "source": [
        "# 深度神经网络"
      ]
    },
    {
      "metadata": {
        "id": "ft-H81FhUmK9",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "reset_graph()\n",
        "\n",
        "x = tf.placeholder(tf.float32, [None, 784])\n",
        "W1 = tf.get_variable('w11_scale', [784, 500], initializer=tf.contrib.layers.variance_scaling_initializer())\n",
        "b1 = tf.Variable(tf.zeros([500]) + 0.1)\n",
        "L1 = tf.nn.relu(tf.matmul(x, W1) + b1)\n",
        "\n",
        "W2 = tf.get_variable('w12_scale', [500, 10], initializer=tf.contrib.layers.variance_scaling_initializer())\n",
        "b2 = tf.Variable(tf.zeros([10]) + 0.1)\n",
        "y = tf.matmul(L1, W2) + b2\n",
        "y_ = tf.placeholder(tf.float32, [None, 10])\n",
        "\n",
        "\n",
        "cross_entroy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=y))\n",
        "global_step = tf.Variable(0, trainable=False)\n",
        "starter_learning_rate = 0.5  #初始学习速率\n",
        "learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step, 100, 0.96, staircase=False)\n",
        "train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entroy)\n",
        "\n",
        "sess = tf.Session()\n",
        "init_op = tf.global_variables_initializer()\n",
        "sess.run(init_op)\n",
        "\n",
        "for _ in range(30000):\n",
        "    batch_xs, batch_ys = mnist.train.next_batch(100)\n",
        "    sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})\n",
        "    \n",
        "correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n",
        "accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
        "print(sess.run(accuracy, feed_dict={x: mnist.test.images,  y_: mnist.test.labels}))\n",
        "\n",
        "sess.close()"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "MUHWxkEGUt4x",
        "colab_type": "text"
      },
      "cell_type": "markdown",
      "source": [
        "# 卷积神经网络"
      ]
    },
    {
      "metadata": {
        "id": "NodPSvOPi2Am",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "x = tf.placeholder(tf.float32, [None, 784])\n",
        "y_ = tf.placeholder(tf.float32, [None, 10])\n",
        "learning_rate = tf.placeholder(tf.float32)\n",
        "x_image = tf.reshape(x, [-1, 28, 28, 1])"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "lOdJAxKv4GJJ",
        "colab_type": "text"
      },
      "cell_type": "markdown",
      "source": [
        "## 权重初始化"
      ]
    },
    {
      "metadata": {
        "id": "tldqGQLpXciq",
        "colab_type": "text"
      },
      "cell_type": "markdown",
      "source": [
        "这里我们先使用tf.truncated_normal初始化权重，stddev设置为0.1"
      ]
    },
    {
      "metadata": {
        "id": "Em7NSKyiVtXi",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "def weight_variable(shape):\n",
        "    init = tf.truncated_normal(shape, stddev=0.1)\n",
        "    return tf.Variable(init, collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\n",
        "\n",
        "def bias_variable(shape):\n",
        "    init = tf.constant(0.1, shape=shape)\n",
        "    return tf.Variable(init) "
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "nlkUnRsWw-bD",
        "colab_type": "text"
      },
      "cell_type": "markdown",
      "source": [
        "##  激活函数"
      ]
    },
    {
      "metadata": {
        "id": "-ndabB7TYjhk",
        "colab_type": "text"
      },
      "cell_type": "markdown",
      "source": [
        "### 1. relu\n",
        "\n",
        "### 2.  relu6\n",
        "\n",
        "### 3. swish"
      ]
    },
    {
      "metadata": {
        "id": "ozO5A-I5FT-8",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "def swish(x, b = 1):\n",
        "    return x * tf.nn.sigmoid(b * x)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "ss5eENSM6qp9",
        "colab_type": "text"
      },
      "cell_type": "markdown",
      "source": [
        "## 卷积\n",
        "\n",
        "这里我们的卷积使用步长为1，0填充，因此输出和输出的大小是一样的"
      ]
    },
    {
      "metadata": {
        "id": "b5xVb2S1Y_yL",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "# def conv2d(x, W):\n",
        "#     return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n",
        "\n",
        "def conv2d(x, W, stride=1):\n",
        "    return tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding='SAME')"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "jawCM528uRNB",
        "colab_type": "text"
      },
      "cell_type": "markdown",
      "source": [
        "## 池化\n",
        "\n",
        "这里我们的池化使用的是最大池化2x2块"
      ]
    },
    {
      "metadata": {
        "id": "sPxxsTrwbV9X",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "def max_pool(x):\n",
        "    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "K8CxyGIwxPP0",
        "colab_type": "text"
      },
      "cell_type": "markdown",
      "source": [
        "## 正则化\n",
        "\n",
        "这里我们采用L2正则\n",
        "\n",
        "l2_loss = tf.add_n( [tf.nn.l2_loss(w) for w in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)] )"
      ]
    },
    {
      "metadata": {
        "id": "sNdB_WTfjl8y",
        "colab_type": "text"
      },
      "cell_type": "markdown",
      "source": [
        "## 学习率\n",
        "\n",
        "自适应学习速率\n",
        "\n",
        "global_step = tf.Variable(0, trainable=False)\n",
        "\n",
        "starter_learning_rate = 1.0  #初始学习速率\n",
        "\n",
        "learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step, \n",
        "                                           mnist.train.num_examples / 100, 0.96, staircase=False)"
      ]
    },
    {
      "metadata": {
        "id": "N2XsLucKh2L2",
        "colab_type": "text"
      },
      "cell_type": "markdown",
      "source": [
        "## 第一层卷积"
      ]
    },
    {
      "metadata": {
        "id": "zO19zENbhyfb",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "#28X28X1 -> 28X28X32 -> 14X14X32\n",
        "with tf.name_scope('conv1'):\n",
        "    shape = [5, 5, 1, 32]\n",
        "    Weight_conv1 = weight_variable(shape)\n",
        "    bias_conv1 = bias_variable([32])\n",
        "    relu_conv1 = tf.nn.relu(conv2d(x_image, Weight_conv1) + bias_conv1)  \n",
        "    pool_conv1 = max_pool(relu_conv1)\n",
        "    "
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "MbghK_kajJca",
        "colab_type": "text"
      },
      "cell_type": "markdown",
      "source": [
        "## 第二层卷积"
      ]
    },
    {
      "metadata": {
        "id": "o4R-F-cxjLzA",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "#28X28X32 -> 28X28X64 -> 7X7X64\n",
        "with tf.name_scope('conv2'):\n",
        "    shape = [5, 5, 32, 64]\n",
        "    Weight_conv2 = weight_variable(shape)\n",
        "    bias_conv2 = bias_variable([64])\n",
        "    relu_conv2 = tf.nn.relu(conv2d(pool_conv1, Weight_conv2) + bias_conv2)\n",
        "    pool_conv2 = max_pool(relu_conv2)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "uvypnnWWjcCr",
        "colab_type": "text"
      },
      "cell_type": "markdown",
      "source": [
        "## 全连接层"
      ]
    },
    {
      "metadata": {
        "id": "Ag37H1ZRjd-4",
        "colab_type": "code",
        "colab": {}
      },
      "cell_type": "code",
      "source": [
        "### 7X7X64 -> 1024\n",
        "with tf.name_scope('fc1'):\n",
        "    Weight_fc1 = weight_variable([7*7*64, 1024])\n",
        "    bias_fc1 = bias_variable([1024])\n",
        "    pool_flat_fc1 = tf.reshape(pool_conv2, [-1, 7*7*64])\n",
        "    h_fc1 = tf.nn.relu(tf.matmul(pool_flat_fc1, Weight_fc1) + bias_fc1)\n",
        "    \n",
        "# 为了降低过拟合，使用dropout, 一般取0.5\n",
        "# 0.5的时候dropout随机生成的网络结构最多\n",
        "with tf.name_scope('dropout'):\n",
        "    keep_prob = tf.placeholder(tf.float32)\n",
        "    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n",
        "\n",
        "# 1024 -> 10\n",
        "with tf.name_scope('fc2'):\n",
        "    Weight_fc2 = weight_variable([1024, 10])\n",
        "    bias_fc2 = bias_variable([10])\n",
        "    y = tf.matmul(h_fc1_drop, Weight_fc2) + bias_fc2"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "metadata": {
        "id": "0Hdw3kIsXFPn",
        "colab_type": "text"
      },
      "cell_type": "markdown",
      "source": [
        "## 最后"
      ]
    },
    {
      "metadata": {
        "id": "AS7n95__sRsX",
        "colab_type": "text"
      },
      "cell_type": "markdown",
      "source": [
        "### 基本的卷积神经网络"
      ]
    },
    {
      "metadata": {
        "id": "ssF3gJsknBVf",
        "colab_type": "code",
        "outputId": "7798da8a-9feb-4fb5-ae81-65c4fc2f47aa",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 290
        }
      },
      "cell_type": "code",
      "source": [
        "cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=y))\n",
        "train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)\n",
        "\n",
        "sess = tf.Session()\n",
        "init_op = tf.global_variables_initializer()\n",
        "sess.run(init_op)\n",
        "\n",
        "for step in range(30000):\n",
        "    batch_xs, batch_ys = mnist.train.next_batch(100)\n",
        "    _, loss = sess.run([train_step, cross_entropy], feed_dict={x: batch_xs, y_: batch_ys, keep_prob:0.5})\n",
        "  \n",
        "    if (step+1) % 5000 == 0:\n",
        "        print('step %d, entropy loss: %f' % (step+1, loss))\n",
        "        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n",
        "        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
        "        print(sess.run(accuracy, feed_dict={x: batch_xs, y_: batch_ys, keep_prob:0.5}))\n",
        "    \n",
        "    if (step+1) % 10000 == 0:\n",
        "        print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob:0.5}))\n",
        "    \n",
        "sess.close()"
      ],
      "execution_count": 29,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "step 5000, entropy loss: 0.171281\n",
            "0.97\n",
            "step 10000, entropy loss: 0.103083\n",
            "0.97\n",
            "0.9828\n",
            "step 15000, entropy loss: 0.039121\n",
            "0.99\n",
            "step 20000, entropy loss: 0.074265\n",
            "0.99\n",
            "0.9862\n",
            "step 25000, entropy loss: 0.022181\n",
            "1.0\n",
            "step 30000, entropy loss: 0.053179\n",
            "0.99\n",
            "0.9863\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "metadata": {
        "id": "jjVKW1pNooqs",
        "colab_type": "text"
      },
      "cell_type": "markdown",
      "source": [
        "准确率已经达到98.63%\n"
      ]
    },
    {
      "metadata": {
        "id": "NXb0eZzGsf-7",
        "colab_type": "text"
      },
      "cell_type": "markdown",
      "source": [
        "### 增加L2正则"
      ]
    },
    {
      "metadata": {
        "id": "vWUMTHaAs1Ne",
        "colab_type": "code",
        "outputId": "2990dee0-e44d-41bf-fc4e-2d0a7073a538",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 90
        }
      },
      "cell_type": "code",
      "source": [
        "tf.get_collection('WEIGHTS')"
      ],
      "execution_count": 31,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "[<tf.Variable 'conv1/Variable:0' shape=(5, 5, 1, 32) dtype=float32_ref>,\n",
              " <tf.Variable 'conv2/Variable:0' shape=(5, 5, 32, 64) dtype=float32_ref>,\n",
              " <tf.Variable 'fc1/Variable:0' shape=(3136, 1024) dtype=float32_ref>,\n",
              " <tf.Variable 'fc1_1/Variable:0' shape=(1024, 10) dtype=float32_ref>]"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 31
        }
      ]
    },
    {
      "metadata": {
        "id": "LhI_OKnhopEW",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 290
        },
        "outputId": "f1006ff0-3fda-4091-f054-0d118b609f1e"
      },
      "cell_type": "code",
      "source": [
        "cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=y))\n",
        "l2_loss = tf.add_n( [tf.nn.l2_loss(w) for w in tf.get_collection('WEIGHTS')] )\n",
        "total_loss = cross_entropy + 7e-5*l2_loss\n",
        "train_step = tf.train.GradientDescentOptimizer(0.01).minimize(total_loss)\n",
        "\n",
        "sess = tf.Session()\n",
        "init_op = tf.global_variables_initializer()\n",
        "sess.run(init_op)\n",
        "\n",
        "for step in range(30000):\n",
        "    batch_xs, batch_ys = mnist.train.next_batch(100)\n",
        "    _, cross_entropy_loss, total_loss_value = sess.run([train_step, cross_entropy, total_loss], feed_dict={x: batch_xs, y_: batch_ys, keep_prob:0.5})\n",
        "  \n",
        "    if (step+1) % 5000 == 0:\n",
        "        print('step %d, entropy loss: %f, total_loss: %f' % (step+1, cross_entropy_loss, total_loss_value))\n",
        "        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n",
        "        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
        "        print(sess.run(accuracy, feed_dict={x: batch_xs, y_: batch_ys, keep_prob:0.5}))\n",
        "    \n",
        "    if (step+1) % 10000 == 0:\n",
        "        print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob:0.5}))\n",
        "    \n",
        "sess.close()"
      ],
      "execution_count": 30,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "step 5000, entropy loss: 0.149273, total_loss: 1.029179\n",
            "0.97\n",
            "step 10000, entropy loss: 0.082128, total_loss: 0.956037\n",
            "0.97\n",
            "0.9805\n",
            "step 15000, entropy loss: 0.088382, total_loss: 0.956328\n",
            "0.99\n",
            "step 20000, entropy loss: 0.057932, total_loss: 0.919951\n",
            "0.98\n",
            "0.9847\n",
            "step 25000, entropy loss: 0.043772, total_loss: 0.899896\n",
            "1.0\n",
            "step 30000, entropy loss: 0.027689, total_loss: 0.877956\n",
            "1.0\n",
            "0.9862\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "metadata": {
        "id": "LFeJ9uID0Bpx",
        "colab_type": "text"
      },
      "cell_type": "markdown",
      "source": [
        "准备率达到98.62%"
      ]
    },
    {
      "metadata": {
        "id": "gsum7uUisqQ_",
        "colab_type": "text"
      },
      "cell_type": "markdown",
      "source": [
        "### 改变学习速率"
      ]
    },
    {
      "metadata": {
        "id": "neomLp2w4z2l",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 290
        },
        "outputId": "e5c83780-e4ee-46b8-e8bd-ae28288a5516"
      },
      "cell_type": "code",
      "source": [
        "# 自适应学习速率\n",
        "global_step = tf.Variable(0, trainable=False)\n",
        "starter_learning_rate = 0.01\n",
        "learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step, \n",
        "                                           mnist.train.num_examples / 100, 0.96, staircase=False)\n",
        "\n",
        "cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=y))\n",
        "train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)\n",
        "\n",
        "sess = tf.Session()\n",
        "init_op = tf.global_variables_initializer()\n",
        "sess.run(init_op)\n",
        "\n",
        "for step in range(30000):\n",
        "    batch_xs, batch_ys = mnist.train.next_batch(100)\n",
        "    _, cross_entropy_loss, learning_rate_value = sess.run([train_step, cross_entropy, learning_rate], feed_dict={x: batch_xs, y_: batch_ys, keep_prob:0.5})\n",
        "  \n",
        "    if (step+1) % 5000 == 0:\n",
        "        print('step %d, entropy loss: %f, learning rate: %f' % (step+1, cross_entropy_loss, learning_rate_value))\n",
        "        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n",
        "        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
        "        print(sess.run(accuracy, feed_dict={x: batch_xs, y_: batch_ys, keep_prob:0.5}))\n",
        "    \n",
        "    if (step+1) % 10000 == 0:\n",
        "        print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob:0.5}))\n",
        "    \n",
        "sess.close()"
      ],
      "execution_count": 32,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "step 5000, entropy loss: 0.082237, learning rate: 0.010000\n",
            "0.97\n",
            "step 10000, entropy loss: 0.134571, learning rate: 0.010000\n",
            "0.98\n",
            "0.982\n",
            "step 15000, entropy loss: 0.088873, learning rate: 0.010000\n",
            "0.97\n",
            "step 20000, entropy loss: 0.014359, learning rate: 0.010000\n",
            "0.99\n",
            "0.9866\n",
            "step 25000, entropy loss: 0.006385, learning rate: 0.010000\n",
            "1.0\n",
            "step 30000, entropy loss: 0.063617, learning rate: 0.010000\n",
            "0.99\n",
            "0.988\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "metadata": {
        "id": "XDX4xgUV4tdb",
        "colab_type": "text"
      },
      "cell_type": "markdown",
      "source": [
        "使用自适应学习速率,准备率能达到98.8%"
      ]
    },
    {
      "metadata": {
        "id": "KLrjSOYi4aB5",
        "colab_type": "text"
      },
      "cell_type": "markdown",
      "source": [
        "## 使用更复杂的AdamOptimizer优化器替换GradientDescentOptimizer优化器"
      ]
    },
    {
      "metadata": {
        "id": "D13rdy5P5Qy_",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 290
        },
        "outputId": "94cd57ac-e316-4f21-ede6-69db1c3c066e"
      },
      "cell_type": "code",
      "source": [
        "cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=y))\n",
        "l2_loss = tf.add_n( [tf.nn.l2_loss(w) for w in tf.get_collection('WEIGHTS')] )\n",
        "total_loss = cross_entropy + 7e-5*l2_loss\n",
        "train_step = tf.train.AdamOptimizer(1e-4).minimize(total_loss)\n",
        "\n",
        "sess = tf.Session()\n",
        "init_op = tf.global_variables_initializer()\n",
        "sess.run(init_op)\n",
        "\n",
        "for step in range(30000):\n",
        "    batch_xs, batch_ys = mnist.train.next_batch(100)\n",
        "    _, cross_entropy_loss, total_loss_value = sess.run([train_step, cross_entropy, total_loss], feed_dict={x: batch_xs, y_: batch_ys, keep_prob:0.5})\n",
        "  \n",
        "    if (step+1) % 5000 == 0:\n",
        "        print('step %d, entropy loss: %f, total_loss: %f' % (step+1, cross_entropy_loss, total_loss_value))\n",
        "        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n",
        "        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
        "        print(sess.run(accuracy, feed_dict={x: batch_xs, y_: batch_ys, keep_prob:0.5}))\n",
        "    \n",
        "    if (step+1) % 10000 == 0:\n",
        "        print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob:0.5}))\n",
        "    \n",
        "sess.close()"
      ],
      "execution_count": 26,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "step 5000, entropy loss: 0.029717, total_loss: 0.652172\n",
            "0.99\n",
            "step 10000, entropy loss: 0.009926, total_loss: 0.415160\n",
            "0.99\n",
            "0.9872\n",
            "step 15000, entropy loss: 0.001915, total_loss: 0.249239\n",
            "1.0\n",
            "step 20000, entropy loss: 0.008723, total_loss: 0.162707\n",
            "1.0\n",
            "0.9893\n",
            "step 25000, entropy loss: 0.000784, total_loss: 0.101584\n",
            "1.0\n",
            "step 30000, entropy loss: 0.000663, total_loss: 0.070713\n",
            "1.0\n",
            "0.9902\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "metadata": {
        "id": "Xh_VeNGfLKoY",
        "colab_type": "text"
      },
      "cell_type": "markdown",
      "source": [
        "使用AdamOptimizer后，准确率达到99.02%"
      ]
    },
    {
      "metadata": {
        "id": "dbs9j8uTNQfG",
        "colab_type": "text"
      },
      "cell_type": "markdown",
      "source": [
        "## 卷积kernel size和数量调整"
      ]
    },
    {
      "metadata": {
        "id": "nCb6LY8XBBBa",
        "colab_type": "text"
      },
      "cell_type": "markdown",
      "source": [
        "### 卷积kernel size改成3X3"
      ]
    },
    {
      "metadata": {
        "id": "_xyiDwQs1_16",
        "colab_type": "text"
      },
      "cell_type": "markdown",
      "source": [
        "#### 1.0"
      ]
    },
    {
      "metadata": {
        "id": "l52rSp8SNWpx",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 290
        },
        "outputId": "f74a9725-68ba-4b7f-95d7-ee53b0becc18"
      },
      "cell_type": "code",
      "source": [
        "x = tf.placeholder(tf.float32, [None, 784])\n",
        "y_ = tf.placeholder(tf.float32, [None, 10])\n",
        "learning_rate = tf.placeholder(tf.float32)\n",
        "x_image = tf.reshape(x, [-1, 28, 28, 1])\n",
        "\n",
        "\n",
        "def weight_variable(shape):\n",
        "    init = tf.truncated_normal(shape, stddev=0.1)\n",
        "    return tf.Variable(init, collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\n",
        "\n",
        "def bias_variable(shape):\n",
        "    init = tf.constant(0.1, shape=shape)\n",
        "    return tf.Variable(init) \n",
        "\n",
        "def conv2d(x, W, stride=1):\n",
        "    return tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding='SAME')\n",
        "  \n",
        "def max_pool(x):\n",
        "    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')\n",
        "\n",
        "#28X28X1 -> 28X28X32 -> 14X14X32\n",
        "with tf.name_scope('conv1_2'):\n",
        "    shape = [3, 3, 1, 32]\n",
        "    Weight_conv1 = weight_variable(shape)\n",
        "    bias_conv1 = bias_variable([32])\n",
        "    relu_conv1 = tf.nn.relu(conv2d(x_image, Weight_conv1) + bias_conv1)  \n",
        "    pool_conv1 = max_pool(relu_conv1)\n",
        "\n",
        "#14X14X32 -> 14X14X64 -> 7X7X64\n",
        "with tf.name_scope('conv2_2'):\n",
        "    shape = [3, 3, 32, 64]\n",
        "    Weight_conv2 = weight_variable(shape)\n",
        "    bias_conv2 = bias_variable([64])\n",
        "    relu_conv2 = tf.nn.relu(conv2d(pool_conv1, Weight_conv2) + bias_conv2)\n",
        "    pool_conv2 = max_pool(relu_conv2)\n",
        "\n",
        "### 7X7X64 -> 1024\n",
        "with tf.name_scope('fc1_2'):\n",
        "    Weight_fc1 = weight_variable([7*7*64, 1024])\n",
        "    bias_fc1 = bias_variable([1024])\n",
        "    pool_flat_fc1 = tf.reshape(pool_conv2, [-1, 7*7*64])\n",
        "    h_fc1 = tf.nn.relu(tf.matmul(pool_flat_fc1, Weight_fc1) + bias_fc1)\n",
        "    \n",
        "# 为了降低过拟合，使用dropout, 一般取0.5\n",
        "# 0.5的时候dropout随机生成的网络结构最多\n",
        "with tf.name_scope('dropout_2'):\n",
        "    keep_prob = tf.placeholder(tf.float32)\n",
        "    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n",
        "\n",
        "# 1024 -> 10\n",
        "with tf.name_scope('fc2_2'):\n",
        "    Weight_fc2 = weight_variable([1024, 10])\n",
        "    bias_fc2 = bias_variable([10])\n",
        "    y = tf.matmul(h_fc1_drop, Weight_fc2) + bias_fc2\n",
        "    \n",
        "cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=y))\n",
        "l2_loss = tf.add_n( [tf.nn.l2_loss(w) for w in tf.get_collection('WEIGHTS')] )\n",
        "total_loss = cross_entropy + 7e-5*l2_loss\n",
        "train_step = tf.train.AdamOptimizer(1e-4).minimize(total_loss)\n",
        "\n",
        "sess = tf.Session()\n",
        "init_op = tf.global_variables_initializer()\n",
        "sess.run(init_op)\n",
        "\n",
        "for step in range(30000):\n",
        "    batch_xs, batch_ys = mnist.train.next_batch(100)\n",
        "    _, cross_entropy_loss, total_loss_value = sess.run([train_step, cross_entropy, total_loss], feed_dict={x: batch_xs, y_: batch_ys, keep_prob:0.5})\n",
        "  \n",
        "    if (step+1) % 5000 == 0:\n",
        "        print('step %d, entropy loss: %f, total_loss: %f' % (step+1, cross_entropy_loss, total_loss_value))\n",
        "        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n",
        "        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
        "        print(sess.run(accuracy, feed_dict={x: batch_xs, y_: batch_ys, keep_prob:0.5}))\n",
        "    \n",
        "    if (step+1) % 10000 == 0:\n",
        "        print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob:0.5}))\n",
        "    \n",
        "sess.close()"
      ],
      "execution_count": 35,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "step 5000, entropy loss: 0.065255, total_loss: 0.605431\n",
            "0.98\n",
            "step 10000, entropy loss: 0.029008, total_loss: 0.346657\n",
            "0.99\n",
            "0.9874\n",
            "step 15000, entropy loss: 0.014787, total_loss: 0.205699\n",
            "1.0\n",
            "step 20000, entropy loss: 0.009378, total_loss: 0.135076\n",
            "1.0\n",
            "0.9885\n",
            "step 25000, entropy loss: 0.002474, total_loss: 0.090672\n",
            "1.0\n",
            "step 30000, entropy loss: 0.001839, total_loss: 0.067473\n",
            "1.0\n",
            "0.9897\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "metadata": {
        "id": "Cpy9R0m3kUbs",
        "colab_type": "text"
      },
      "cell_type": "markdown",
      "source": [
        "准确率达到98.97%"
      ]
    },
    {
      "metadata": {
        "id": "pmrOfbWskdFS",
        "colab_type": "text"
      },
      "cell_type": "markdown",
      "source": [
        "#### 2.0 全连接增加一层"
      ]
    },
    {
      "metadata": {
        "id": "e4QiamPopdR9",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 108
        },
        "outputId": "ec2bffd2-2cfc-4372-a694-e6419bf16ff1"
      },
      "cell_type": "code",
      "source": [
        "tf.get_collection('WEIGHTS')"
      ],
      "execution_count": 3,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "[<tf.Variable 'conv1_2/Variable:0' shape=(3, 3, 1, 32) dtype=float32_ref>,\n",
              " <tf.Variable 'conv2_2/Variable:0' shape=(3, 3, 32, 64) dtype=float32_ref>,\n",
              " <tf.Variable 'fc1_2/Variable:0' shape=(3136, 1024) dtype=float32_ref>,\n",
              " <tf.Variable 'fc2_2/Variable:0' shape=(1024, 128) dtype=float32_ref>,\n",
              " <tf.Variable 'fc3_2/Variable:0' shape=(128, 10) dtype=float32_ref>]"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 3
        }
      ]
    },
    {
      "metadata": {
        "id": "3PWkD8lrc9Pj",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 290
        },
        "outputId": "c6fe1ed9-5d38-4fe5-805c-094f5e7f0fe7"
      },
      "cell_type": "code",
      "source": [
        "x = tf.placeholder(tf.float32, [None, 784])\n",
        "y_ = tf.placeholder(tf.float32, [None, 10])\n",
        "learning_rate = tf.placeholder(tf.float32)\n",
        "x_image = tf.reshape(x, [-1, 28, 28, 1])\n",
        "keep_prob = tf.placeholder(tf.float32)\n",
        "\n",
        "\n",
        "def weight_variable(shape):\n",
        "    init = tf.truncated_normal(shape, stddev=0.1)\n",
        "    return tf.Variable(init, collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\n",
        "\n",
        "def bias_variable(shape):\n",
        "    init = tf.constant(0.1, shape=shape)\n",
        "    return tf.Variable(init) \n",
        "\n",
        "def conv2d(x, W, stride=1):\n",
        "    return tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding='SAME')\n",
        "  \n",
        "def max_pool(x):\n",
        "    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')\n",
        "\n",
        "#28X28X1 -> 28X28X32 -> 14X14X32\n",
        "with tf.name_scope('conv1_2'):\n",
        "    shape = [3, 3, 1, 32]\n",
        "    Weight_conv1 = weight_variable(shape)\n",
        "    bias_conv1 = bias_variable([32])\n",
        "    relu_conv1 = tf.nn.relu(conv2d(x_image, Weight_conv1) + bias_conv1)  \n",
        "    pool_conv1 = max_pool(relu_conv1)\n",
        "\n",
        "#14X14X32 -> 14X14X64 -> 7X7X64\n",
        "with tf.name_scope('conv2_2'):\n",
        "    shape = [3, 3, 32, 64]\n",
        "    Weight_conv2 = weight_variable(shape)\n",
        "    bias_conv2 = bias_variable([64])\n",
        "    relu_conv2 = tf.nn.relu(conv2d(pool_conv1, Weight_conv2) + bias_conv2)\n",
        "    pool_conv2 = max_pool(relu_conv2)\n",
        "\n",
        "### 7X7X64 -> 1024\n",
        "with tf.name_scope('fc1_2'):\n",
        "    Weight_fc1 = weight_variable([7*7*64, 1024])\n",
        "    bias_fc1 = bias_variable([1024])\n",
        "    pool_flat_fc1 = tf.reshape(pool_conv2, [-1, 7*7*64])\n",
        "    h_fc1 = tf.nn.relu(tf.matmul(pool_flat_fc1, Weight_fc1) + bias_fc1)\n",
        "    \n",
        "# 为了降低过拟合，使用dropout, 一般取0.5\n",
        "# 0.5的时候dropout随机生成的网络结构最多\n",
        "with tf.name_scope('dropout_2'):\n",
        "    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n",
        "\n",
        "# 1024 -> 128\n",
        "with tf.name_scope('fc2_2'):\n",
        "    Weight_fc2 = weight_variable([1024, 128])\n",
        "    bias_fc2 = bias_variable([128])\n",
        "    relu_fc2 = tf.nn.relu(tf.matmul(h_fc1_drop, Weight_fc2) + bias_fc2)\n",
        "    \n",
        "# 为了降低过拟合，使用dropout, 一般取0.5\n",
        "# 0.5的时候dropout随机生成的网络结构最多\n",
        "with tf.name_scope('dropout_2'):\n",
        "    h_fc2_drop = tf.nn.dropout(relu_fc2, keep_prob)\n",
        "    \n",
        "# 128 -> 10\n",
        "with tf.name_scope('fc3_2'):\n",
        "    Weight_fc3 = weight_variable([128, 10])\n",
        "    bias_fc3 = bias_variable([10])\n",
        "    y = tf.matmul(h_fc2_drop, Weight_fc3) + bias_fc3\n",
        "    \n",
        "cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=y))\n",
        "l2_loss = tf.add_n( [tf.nn.l2_loss(w) for w in tf.get_collection('WEIGHTS')] )\n",
        "total_loss = cross_entropy + 7e-5*l2_loss\n",
        "train_step = tf.train.AdamOptimizer(1e-4).minimize(total_loss)\n",
        "\n",
        "sess = tf.Session()\n",
        "init_op = tf.global_variables_initializer()\n",
        "sess.run(init_op)\n",
        "\n",
        "for step in range(30000):\n",
        "    batch_xs, batch_ys = mnist.train.next_batch(100)\n",
        "    _, cross_entropy_loss, total_loss_value = sess.run([train_step, cross_entropy, total_loss], feed_dict={x: batch_xs, y_: batch_ys, keep_prob:0.5})\n",
        "  \n",
        "    if (step+1) % 5000 == 0:\n",
        "        print('step %d, entropy loss: %f, total_loss: %f' % (step+1, cross_entropy_loss, total_loss_value))\n",
        "        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n",
        "        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
        "        print(sess.run(accuracy, feed_dict={x: batch_xs, y_: batch_ys, keep_prob:0.5}))\n",
        "    \n",
        "    if (step+1) % 10000 == 0:\n",
        "        print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob:0.5}))\n",
        "    \n",
        "sess.close()"
      ],
      "execution_count": 2,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "step 5000, entropy loss: 0.073040, total_loss: 0.627549\n",
            "0.97\n",
            "step 10000, entropy loss: 0.011128, total_loss: 0.363452\n",
            "0.99\n",
            "0.9859\n",
            "step 15000, entropy loss: 0.003952, total_loss: 0.237915\n",
            "1.0\n",
            "step 20000, entropy loss: 0.001094, total_loss: 0.168112\n",
            "0.99\n",
            "0.9902\n",
            "step 25000, entropy loss: 0.026915, total_loss: 0.152247\n",
            "1.0\n",
            "step 30000, entropy loss: 0.001301, total_loss: 0.098386\n",
            "1.0\n",
            "0.9908\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "metadata": {
        "id": "QZBoriUfpVnk",
        "colab_type": "text"
      },
      "cell_type": "markdown",
      "source": [
        "准确率有99.08%"
      ]
    },
    {
      "metadata": {
        "id": "dXInPwu7yimY",
        "colab_type": "text"
      },
      "cell_type": "markdown",
      "source": [
        "#### 3.0 再增加一层卷积层"
      ]
    },
    {
      "metadata": {
        "id": "DCV6iLyAypoC",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 290
        },
        "outputId": "e38ecabe-cbb2-41b0-982e-9d1849f1c4a5"
      },
      "cell_type": "code",
      "source": [
        "x = tf.placeholder(tf.float32, [None, 784])\n",
        "y_ = tf.placeholder(tf.float32, [None, 10])\n",
        "learning_rate = tf.placeholder(tf.float32)\n",
        "x_image = tf.reshape(x, [-1, 28, 28, 1])\n",
        "keep_prob = tf.placeholder(tf.float32)\n",
        "\n",
        "\n",
        "def weight_variable(shape):\n",
        "    init = tf.truncated_normal(shape, stddev=0.1)\n",
        "    return tf.Variable(init, collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\n",
        "\n",
        "def bias_variable(shape):\n",
        "    init = tf.constant(0.1, shape=shape)\n",
        "    return tf.Variable(init) \n",
        "\n",
        "def conv2d(x, W, stride=1):\n",
        "    return tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding='SAME')\n",
        "  \n",
        "def max_pool(x):\n",
        "    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')\n",
        "\n",
        "#28X28X1 -> 28X28X32 -> 14X14X32\n",
        "with tf.name_scope('conv1_2'):\n",
        "    shape = [3, 3, 1, 32]\n",
        "    Weight_conv1 = weight_variable(shape)\n",
        "    bias_conv1 = bias_variable([32])\n",
        "    relu_conv1 = tf.nn.relu(conv2d(x_image, Weight_conv1) + bias_conv1)  \n",
        "    pool_conv1 = max_pool(relu_conv1)\n",
        "\n",
        "#14X14X32 -> 14X14X64\n",
        "with tf.name_scope('conv2_2'):\n",
        "    shape = [3, 3, 32, 64]\n",
        "    Weight_conv2 = weight_variable(shape)\n",
        "    bias_conv2 = bias_variable([64])\n",
        "    relu_conv2 = tf.nn.relu(conv2d(pool_conv1, Weight_conv2) + bias_conv2)\n",
        "\n",
        "#14X14X64 -> 14X14X64 -> 7X7X64\n",
        "with tf.name_scope('conv2_3'):\n",
        "    shape = [3, 3, 64, 64]\n",
        "    Weight_conv3 = weight_variable(shape)\n",
        "    bias_conv3 = bias_variable([64])\n",
        "    relu_conv3 = tf.nn.relu(conv2d(relu_conv2, Weight_conv3) + bias_conv3)\n",
        "    pool_conv2 = max_pool(relu_conv2)\n",
        "    \n",
        "\n",
        "### 7X7X64 -> 1024\n",
        "with tf.name_scope('fc1_2'):\n",
        "    Weight_fc1 = weight_variable([7*7*64, 1024])\n",
        "    bias_fc1 = bias_variable([1024])\n",
        "    pool_flat_fc1 = tf.reshape(pool_conv2, [-1, 7*7*64])\n",
        "    h_fc1 = tf.nn.relu(tf.matmul(pool_flat_fc1, Weight_fc1) + bias_fc1)\n",
        "    \n",
        "# 为了降低过拟合，使用dropout, 一般取0.5\n",
        "# 0.5的时候dropout随机生成的网络结构最多\n",
        "with tf.name_scope('dropout_2'):\n",
        "    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n",
        "\n",
        "# 1024 -> 128\n",
        "with tf.name_scope('fc2_2'):\n",
        "    Weight_fc2 = weight_variable([1024, 128])\n",
        "    bias_fc2 = bias_variable([128])\n",
        "    relu_fc2 = tf.nn.relu(tf.matmul(h_fc1_drop, Weight_fc2) + bias_fc2)\n",
        "    \n",
        "# 为了降低过拟合，使用dropout, 一般取0.5\n",
        "# 0.5的时候dropout随机生成的网络结构最多\n",
        "with tf.name_scope('dropout_2'):\n",
        "    h_fc2_drop = tf.nn.dropout(relu_fc2, keep_prob)\n",
        "    \n",
        "# 128 -> 10\n",
        "with tf.name_scope('fc3_2'):\n",
        "    Weight_fc3 = weight_variable([128, 10])\n",
        "    bias_fc3 = bias_variable([10])\n",
        "    y = tf.matmul(h_fc2_drop, Weight_fc3) + bias_fc3\n",
        "    \n",
        "cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=y))\n",
        "l2_loss = tf.add_n( [tf.nn.l2_loss(w) for w in tf.get_collection('WEIGHTS')] )\n",
        "total_loss = cross_entropy + 7e-5*l2_loss\n",
        "train_step = tf.train.AdamOptimizer(1e-4).minimize(total_loss)\n",
        "\n",
        "sess = tf.Session()\n",
        "init_op = tf.global_variables_initializer()\n",
        "sess.run(init_op)\n",
        "\n",
        "for step in range(30000):\n",
        "    batch_xs, batch_ys = mnist.train.next_batch(100)\n",
        "    _, cross_entropy_loss, total_loss_value = sess.run([train_step, cross_entropy, total_loss], feed_dict={x: batch_xs, y_: batch_ys, keep_prob:0.5})\n",
        "  \n",
        "    if (step+1) % 5000 == 0:\n",
        "        print('step %d, entropy loss: %f, total_loss: %f' % (step+1, cross_entropy_loss, total_loss_value))\n",
        "        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n",
        "        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
        "        print(sess.run(accuracy, feed_dict={x: batch_xs, y_: batch_ys, keep_prob:0.5}))\n",
        "    \n",
        "    if (step+1) % 10000 == 0:\n",
        "        print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob:0.5}))\n",
        "    \n",
        "sess.close()"
      ],
      "execution_count": 2,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "step 5000, entropy loss: 0.056357, total_loss: 0.638354\n",
            "1.0\n",
            "step 10000, entropy loss: 0.064400, total_loss: 0.435460\n",
            "0.99\n",
            "0.9856\n",
            "step 15000, entropy loss: 0.018165, total_loss: 0.265352\n",
            "0.97\n",
            "step 20000, entropy loss: 0.005329, total_loss: 0.180509\n",
            "0.99\n",
            "0.9894\n",
            "step 25000, entropy loss: 0.001364, total_loss: 0.131682\n",
            "1.0\n",
            "step 30000, entropy loss: 0.006206, total_loss: 0.106408\n",
            "0.98\n",
            "0.9911\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "metadata": {
        "id": "Ob6nai4hA1x5",
        "colab_type": "text"
      },
      "cell_type": "markdown",
      "source": [
        "准确率达到99.11%"
      ]
    },
    {
      "metadata": {
        "id": "64IkKe1c-iAT",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 235
        },
        "outputId": "17e8d2d4-3baa-458b-9b3f-a39058778d32"
      },
      "cell_type": "code",
      "source": [
        " tf.get_collection('WEIGHTS')"
      ],
      "execution_count": 5,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "[<tf.Variable 'conv1_2/Variable:0' shape=(3, 3, 1, 32) dtype=float32_ref>,\n",
              " <tf.Variable 'conv2_2/Variable:0' shape=(3, 3, 32, 64) dtype=float32_ref>,\n",
              " <tf.Variable 'conv2_3/Variable:0' shape=(3, 3, 32, 64) dtype=float32_ref>,\n",
              " <tf.Variable 'conv1_2_1/Variable:0' shape=(3, 3, 1, 32) dtype=float32_ref>,\n",
              " <tf.Variable 'conv2_2_1/Variable:0' shape=(3, 3, 32, 64) dtype=float32_ref>,\n",
              " <tf.Variable 'conv2_3_1/Variable:0' shape=(3, 3, 32, 64) dtype=float32_ref>,\n",
              " <tf.Variable 'conv1_2_2/Variable:0' shape=(3, 3, 1, 32) dtype=float32_ref>,\n",
              " <tf.Variable 'conv2_2_2/Variable:0' shape=(3, 3, 32, 64) dtype=float32_ref>,\n",
              " <tf.Variable 'conv2_3_2/Variable:0' shape=(3, 3, 64, 64) dtype=float32_ref>,\n",
              " <tf.Variable 'fc1_2/Variable:0' shape=(3136, 1024) dtype=float32_ref>,\n",
              " <tf.Variable 'fc2_2/Variable:0' shape=(1024, 128) dtype=float32_ref>,\n",
              " <tf.Variable 'fc3_2/Variable:0' shape=(128, 10) dtype=float32_ref>]"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 5
        }
      ]
    },
    {
      "metadata": {
        "id": "_xTuxux0sdAs",
        "colab_type": "text"
      },
      "cell_type": "markdown",
      "source": [
        "#### 4.0 尝试五层卷积，不考虑L2正则"
      ]
    },
    {
      "metadata": {
        "id": "rItvs5eDsysi",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 290
        },
        "outputId": "1ebcad40-bbd6-4254-9cc0-73271f2d208f"
      },
      "cell_type": "code",
      "source": [
        "x = tf.placeholder(tf.float32, [None, 784])\n",
        "y_ = tf.placeholder(tf.float32, [None, 10])\n",
        "learning_rate = tf.placeholder(tf.float32)\n",
        "x_image = tf.reshape(x, [-1, 28, 28, 1])\n",
        "keep_prob = tf.placeholder(tf.float32)\n",
        "\n",
        "\n",
        "def weight_variable(shape):\n",
        "    init = tf.truncated_normal(shape, stddev=0.1)\n",
        "    return tf.Variable(init, collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\n",
        "\n",
        "def bias_variable(shape):\n",
        "    init = tf.constant(0.1, shape=shape)\n",
        "    return tf.Variable(init) \n",
        "\n",
        "def conv2d(x, W, stride=1, padding='SAME'):\n",
        "    return tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding=padding)\n",
        "  \n",
        "def max_pool(x):\n",
        "    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')\n",
        "\n",
        "#28X28X1 -> 28X28X32\n",
        "with tf.name_scope('conv1_2'):\n",
        "    shape = [3, 3, 1, 32]\n",
        "    Weight_conv1 = weight_variable(shape)\n",
        "    bias_conv1 = bias_variable([32])\n",
        "    relu_conv1 = tf.nn.relu(conv2d(x_image, Weight_conv1) + bias_conv1)  \n",
        "\n",
        "\n",
        "#28X28X32 -> 14X14X32\n",
        "with tf.name_scope('conv2_2'):\n",
        "    shape = [3, 3, 32, 32]\n",
        "    Weight_conv2 = weight_variable(shape)\n",
        "    bias_conv2 = bias_variable([32])\n",
        "    relu_conv2 = tf.nn.relu(conv2d(relu_conv1, Weight_conv2) + bias_conv2)\n",
        "    pool_conv2 = max_pool(relu_conv2)\n",
        "    \n",
        "\n",
        "#使用dropout\n",
        "with tf.name_scope('dropout2_1'):\n",
        "    dropout2_1 = tf.nn.dropout(pool_conv2, keep_prob)\n",
        "\n",
        "    \n",
        "#14X14X32 -> 14X14X64\n",
        "with tf.name_scope('conv2_3'):\n",
        "    shape = [3, 3, 32, 64]\n",
        "    Weight_conv3 = weight_variable(shape)\n",
        "    bias_conv3 = bias_variable([64])\n",
        "    relu_conv3 = tf.nn.relu(conv2d(dropout2_1, Weight_conv3) + bias_conv3)\n",
        "    \n",
        "\n",
        "#14X14X64 -> 7X7X64\n",
        "with tf.name_scope('conv2_4'):\n",
        "    shape = [3, 3, 64, 64]\n",
        "    Weight_conv4 = weight_variable(shape)\n",
        "    bias_conv4 = bias_variable([64])\n",
        "    relu_conv4 = tf.nn.relu(conv2d(relu_conv3, Weight_conv4) + bias_conv4)\n",
        "    pool_conv4 = max_pool(relu_conv4)\n",
        "    \n",
        "\n",
        "#使用dropout\n",
        "with tf.name_scope('dropout2_2'):\n",
        "    dropout2_2 = tf.nn.dropout(pool_conv4, keep_prob)\n",
        "\n",
        "    \n",
        "### 7X7X64 -> 1024\n",
        "with tf.name_scope('fc2_1'):\n",
        "    Weight_fc1 = weight_variable([7*7*64, 1024])\n",
        "    bias_fc1 = bias_variable([1024])\n",
        "    pool_flat_fc1 = tf.reshape(dropout2_2, [-1, 7*7*64])\n",
        "    h_fc1 = tf.nn.relu(tf.matmul(pool_flat_fc1, Weight_fc1) + bias_fc1)\n",
        "\n",
        "# 1024 -> 128\n",
        "with tf.name_scope('fc2_2'):\n",
        "    Weight_fc2 = weight_variable([1024, 128])\n",
        "    bias_fc2 = bias_variable([128])\n",
        "    relu_fc2 = tf.nn.relu(tf.matmul(h_fc1, Weight_fc2) + bias_fc2)\n",
        "\n",
        "    \n",
        "#使用dropout\n",
        "with tf.name_scope('dropout_2'):\n",
        "    h_fc2_drop = tf.nn.dropout(relu_fc2, keep_prob)\n",
        "\n",
        "    \n",
        "# 128 -> 10\n",
        "with tf.name_scope('fc2_3'):\n",
        "    Weight_fc3 = weight_variable([128, 10])\n",
        "    bias_fc3 = bias_variable([10])\n",
        "    y = tf.matmul(h_fc2_drop, Weight_fc3) + bias_fc3\n",
        "    \n",
        "cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=y))\n",
        "# l2_loss = tf.add_n( [tf.nn.l2_loss(w) for w in tf.get_collection('WEIGHTS')] )\n",
        "# total_loss = cross_entropy + 7e-5*l2_loss\n",
        "train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\n",
        "\n",
        "sess = tf.Session()\n",
        "init_op = tf.global_variables_initializer()\n",
        "sess.run(init_op)\n",
        "\n",
        "for step in range(30000):\n",
        "    batch_xs, batch_ys = mnist.train.next_batch(100)\n",
        "    _, cross_entropy_loss = sess.run([train_step, cross_entropy], feed_dict={x: batch_xs, y_: batch_ys, keep_prob:0.5})\n",
        "  \n",
        "    if (step+1) % 5000 == 0:\n",
        "        print('step %d, entropy loss: %f' % (step+1, cross_entropy_loss))\n",
        "        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n",
        "        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
        "        print(sess.run(accuracy, feed_dict={x: batch_xs, y_: batch_ys, keep_prob:0.5}))\n",
        "    \n",
        "    if (step+1) % 10000 == 0:\n",
        "        print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob:0.5}))\n",
        "    \n",
        "sess.close()"
      ],
      "execution_count": 8,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "step 5000, entropy loss: 0.104473\n",
            "0.97\n",
            "step 10000, entropy loss: 0.144040\n",
            "0.94\n",
            "0.9784\n",
            "step 15000, entropy loss: 0.011284\n",
            "0.98\n",
            "step 20000, entropy loss: 0.015941\n",
            "1.0\n",
            "0.9874\n",
            "step 25000, entropy loss: 0.012486\n",
            "1.0\n",
            "step 30000, entropy loss: 0.022286\n",
            "0.99\n",
            "0.9892\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "metadata": {
        "id": "G6kKxPf3BdLT",
        "colab_type": "text"
      },
      "cell_type": "markdown",
      "source": [
        "### 5X5"
      ]
    },
    {
      "metadata": {
        "id": "-Kn1XuMQBe30",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 290
        },
        "outputId": "7bbcac9d-1196-4fbb-ad19-d493917e0704"
      },
      "cell_type": "code",
      "source": [
        "x = tf.placeholder(tf.float32, [None, 784])\n",
        "y_ = tf.placeholder(tf.float32, [None, 10])\n",
        "learning_rate = tf.placeholder(tf.float32)\n",
        "x_image = tf.reshape(x, [-1, 28, 28, 1])\n",
        "keep_prob = tf.placeholder(tf.float32)\n",
        "\n",
        "\n",
        "def weight_variable(shape):\n",
        "    init = tf.truncated_normal(shape, stddev=0.1)\n",
        "    return tf.Variable(init, collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\n",
        "\n",
        "def weight2_variable(shape):\n",
        "    init = tf.truncated_normal(shape, stddev=0.1)\n",
        "    return tf.Variable(init)\n",
        "\n",
        "def bias_variable(shape):\n",
        "    init = tf.constant(0.1, shape=shape)\n",
        "    return tf.Variable(init) \n",
        "\n",
        "def conv2d(x, W, stride=1):\n",
        "    return tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding='SAME')\n",
        "  \n",
        "def max_pool(x):\n",
        "    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')\n",
        "\n",
        "#28X28X1 -> 28X28X32 -> 14X14X32\n",
        "with tf.name_scope('conv1_2'):\n",
        "    shape = [3, 3, 1, 32]\n",
        "    Weight_conv1 = weight_variable(shape)\n",
        "    bias_conv1 = bias_variable([32])\n",
        "    relu_conv1 = tf.nn.relu(conv2d(x_image, Weight_conv1) + bias_conv1)  \n",
        "    pool_conv1 = max_pool(relu_conv1)\n",
        "\n",
        "#14X14X32 -> 14X14X64\n",
        "with tf.name_scope('conv2_2'):\n",
        "    shape = [5, 5, 32, 64]\n",
        "    Weight_conv2 = weight_variable(shape)\n",
        "    bias_conv2 = bias_variable([64])\n",
        "    relu_conv2 = tf.nn.relu(conv2d(pool_conv1, Weight_conv2) + bias_conv2)\n",
        "\n",
        "#14X14X64 -> 14X14X64 -> 7X7X64\n",
        "with tf.name_scope('conv2_3'):\n",
        "    shape = [5, 5, 64, 64]\n",
        "    Weight_conv3 = weight_variable(shape)\n",
        "    bias_conv3 = bias_variable([64])\n",
        "    relu_conv3 = tf.nn.relu(conv2d(relu_conv2, Weight_conv3) + bias_conv3)\n",
        "    pool_conv2 = max_pool(relu_conv2)\n",
        "    \n",
        "\n",
        "### 7X7X64 -> 1024\n",
        "with tf.name_scope('fc1_2'):\n",
        "    Weight_fc1 = weight2_variable([7*7*64, 1024])\n",
        "    bias_fc1 = bias_variable([1024])\n",
        "    pool_flat_fc1 = tf.reshape(pool_conv2, [-1, 7*7*64])\n",
        "    h_fc1 = tf.nn.relu(tf.matmul(pool_flat_fc1, Weight_fc1) + bias_fc1)\n",
        "    \n",
        "# 为了降低过拟合，使用dropout, 一般取0.5\n",
        "# 0.5的时候dropout随机生成的网络结构最多\n",
        "with tf.name_scope('dropout_2'):\n",
        "    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n",
        "\n",
        "# 1024 -> 128\n",
        "with tf.name_scope('fc2_2'):\n",
        "    Weight_fc2 = weight2_variable([1024, 128])\n",
        "    bias_fc2 = bias_variable([128])\n",
        "    relu_fc2 = tf.nn.relu(tf.matmul(h_fc1_drop, Weight_fc2) + bias_fc2)\n",
        "    \n",
        "# 为了降低过拟合，使用dropout, 一般取0.5\n",
        "# 0.5的时候dropout随机生成的网络结构最多\n",
        "with tf.name_scope('dropout_2'):\n",
        "    h_fc2_drop = tf.nn.dropout(relu_fc2, keep_prob)\n",
        "    \n",
        "# 128 -> 10\n",
        "with tf.name_scope('fc3_2'):\n",
        "    Weight_fc3 = weight2_variable([128, 10])\n",
        "    bias_fc3 = bias_variable([10])\n",
        "    y = tf.matmul(h_fc2_drop, Weight_fc3) + bias_fc3\n",
        "    \n",
        "cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=y))\n",
        "l2_loss = tf.add_n( [tf.nn.l2_loss(w) for w in tf.get_collection('WEIGHTS')] )\n",
        "total_loss = cross_entropy + 7e-5*l2_loss\n",
        "train_step = tf.train.AdamOptimizer(1e-4).minimize(total_loss)\n",
        "\n",
        "sess = tf.Session()\n",
        "init_op = tf.global_variables_initializer()\n",
        "sess.run(init_op)\n",
        "\n",
        "for step in range(30000):\n",
        "    batch_xs, batch_ys = mnist.train.next_batch(100)\n",
        "    _, cross_entropy_loss, total_loss_value = sess.run([train_step, cross_entropy, total_loss], feed_dict={x: batch_xs, y_: batch_ys, keep_prob:0.5})\n",
        "  \n",
        "    if (step+1) % 5000 == 0:\n",
        "        print('step %d, entropy loss: %f, total_loss: %f' % (step+1, cross_entropy_loss, total_loss_value))\n",
        "        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n",
        "        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
        "        print(sess.run(accuracy, feed_dict={x: batch_xs, y_: batch_ys, keep_prob:0.5}))\n",
        "    \n",
        "    if (step+1) % 10000 == 0:\n",
        "        print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob:0.5}))\n",
        "    \n",
        "sess.close()"
      ],
      "execution_count": 2,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "step 5000, entropy loss: 0.011767, total_loss: 0.025594\n",
            "1.0\n",
            "step 10000, entropy loss: 0.053173, total_loss: 0.066842\n",
            "1.0\n",
            "0.9853\n",
            "step 15000, entropy loss: 0.023236, total_loss: 0.036665\n",
            "0.98\n",
            "step 20000, entropy loss: 0.005418, total_loss: 0.018512\n",
            "0.99\n",
            "0.9882\n",
            "step 25000, entropy loss: 0.011896, total_loss: 0.024589\n",
            "1.0\n",
            "step 30000, entropy loss: 0.005618, total_loss: 0.017846\n",
            "0.99\n",
            "0.99\n"
          ],
          "name": "stdout"
        }
      ]
    }
  ]
}