{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 载入必要的库\n",
    "from __future__ import absolute_import\n",
    "from __future__ import division\n",
    "from __future__ import print_function\n",
    "\n",
    "import argparse\n",
    "import sys\n",
    "import time\n",
    "import gc\n",
    "import math\n",
    "import matplotlib.pyplot as plt\n",
    "%matplotlib inline\n",
    "\n",
    "import numpy as np\n",
    "from scipy import ndimage\n",
    "\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "import tensorflow as tf\n",
    "import tensorflow.contrib.slim as slim\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1.8.0\n"
     ]
    }
   ],
   "source": [
    "#查询tensorflow版本\n",
    "print(tf.__version__)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "我们在这里调用系统提供的Mnist数据函数为我们读入数据，如果没有下载的话则进行下载。\n",
    "\n",
    "<font color=#ff0000>**这里将data_dir改为适合你的运行环境的目录**</font>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From <ipython-input-3-36c26dd26cf8>:3: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n",
      "WARNING:tensorflow:From /home/fei/.local/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please write your own downloading logic.\n",
      "WARNING:tensorflow:From /home/fei/.local/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.data to implement this functionality.\n",
      "Extracting ./input_data/train-images-idx3-ubyte.gz\n",
      "WARNING:tensorflow:From /home/fei/.local/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.data to implement this functionality.\n",
      "Extracting ./input_data/train-labels-idx1-ubyte.gz\n",
      "WARNING:tensorflow:From /home/fei/.local/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:110: dense_to_one_hot (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.one_hot on tensors.\n",
      "Extracting ./input_data/t10k-images-idx3-ubyte.gz\n",
      "Extracting ./input_data/t10k-labels-idx1-ubyte.gz\n",
      "WARNING:tensorflow:From /home/fei/.local/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:290: DataSet.__init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n"
     ]
    }
   ],
   "source": [
    "# Import data\n",
    "data_dir = './input_data'\n",
    "mnist = input_data.read_data_sets(data_dir, one_hot=True)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 分析样本"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " 训练样本数： 55000\n",
      " 测试样本数： 10000\n",
      " 训练集的shape (55000, 784)\n",
      " 训练集的标签的shape (55000, 10)\n",
      " 测试集的shape' is (10000, 784)\n",
      " 测试集的标签的shape (10000, 10)\n"
     ]
    }
   ],
   "source": [
    "print (\" 训练样本数： %d\" % (mnist.train.num_examples))\n",
    "print (\" 测试样本数： %d\" % (mnist.test.num_examples))\n",
    "trainimg   = mnist.train.images\n",
    "trainlabel = mnist.train.labels\n",
    "testimg    = mnist.test.images\n",
    "testlabel  = mnist.test.labels\n",
    "\n",
    "print (\" 训练集的shape %s\"   % (trainimg.shape,))\n",
    "print (\" 训练集的标签的shape %s\" % (trainlabel.shape,))\n",
    "print (\" 测试集的shape' is %s\"    % (testimg.shape,))\n",
    "print (\" 测试集的标签的shape %s\"  % (testlabel.shape,))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1th 训练数据 标签是 3\n"
     ]
    },
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAP8AAAD8CAYAAAC4nHJkAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAADxxJREFUeJzt3X+QVfV5x/HPw7osCQQUTClBEvwBaRCmWDfYRppYiamaGExTjbbj0Bnqmox2zEymo7WdCU5mGmITrdMakzVQsWMNnSSOlJioRaZMokUWg4CuDehAYeWHhiSAsbjLPv1jj5mN7vne673n3nPZ5/2a2dm757lnzzMXPnvuvd/7PV9zdwGIZ0zZDQAoB+EHgiL8QFCEHwiK8ANBEX4gKMIPBEX4gaAIPxDUSc082Fjr8HEa38xDAqH8n17V637MqrlvXeE3s4sl3SmpTdK33H156v7jNF7n2aJ6DgkgYaOvq/q+NT/tN7M2SXdJukTSHElXm9mcWn8fgOaq5zX/Akk73f1Fd39d0rclLS6mLQCNVk/4p0vaM+znvdm232BmXWbWY2Y9/TpWx+EAFKnh7/a7e7e7d7p7Z7s6Gn04AFWqJ/x9kmYM+/m0bBuAE0A94d8kaZaZnW5mYyVdJWlNMW0BaLSah/rcfcDMbpD0iIaG+la6+7OFdQagoeoa53f3hyU9XFAvAJqIj/cCQRF+ICjCDwRF+IGgCD8QFOEHgiL8QFCEHwiK8ANBEX4gKMIPBEX4gaAIPxAU4QeCIvxAUIQfCIrwA0ERfiAowg8ERfiBoAg/EBThB4Ii/EBQhB8IivADQRF+ICjCDwRF+IGgCD8QVF2r9JrZLklHJB2XNODunUU0heZpmzM7WX/+c6ck6zv+5O5kfVCeWxsjS+779V+cnqyvuv3SZH3KiieT9ejqCn/mj9z9lQJ+D4Am4mk/EFS94XdJj5rZZjPrKqIhAM1R79P+he7eZ2a/JekxM3ve3TcMv0P2R6FLksbpnXUeDkBR6jrzu3tf9v2gpAclLRjhPt3u3unune3qqOdwAApUc/jNbLyZveuN25I+Jml7UY0BaKx6nvZPlfSgmb3xe/7N3X9YSFcAGs7c88dhizbRJvt5tqhpx4vipBmn5dae++JvJ/d94MJvJuvndAwm62MqPHkcVP7+9ewrSWtfnZKsr7zwD3NrA3v7kvueqDb6Oh32Q+kPUGQY6gOCIvxAUIQfCIrwA0ERfiAowg8EVcSsPjTYi7f9QbL+/J/flVtLTamVKk+rHaxwfvj+ryYl608dPSNZTzl3/K5k/dMTDifrLz2S/5mztWenpypHwJkfCIrwA0ERfiAowg8ERfiBoAg/EBThB4JinP8EcMVFP07WU2P5labFVvr7f9cvzkzWH/vjs5P1eqbO/viyq5L1T34jfdnwrpN35tbW6oM19TSacOYHgiL8QFCEHwiK8ANBEX4gKMIPBEX4gaAY528FC+Yly5+dkh7P/v6v8i/PXWk+/fbD70nWj/31u5P1F25rS9Znfyl/ibbjvTuS+477j6eS9fZvpo/dn7iUQd9NH0ruO/0rTyTrowFnfiAowg8ERfiBoAg/EBThB4Ii/EBQhB8IquI4v5mtlPQJSQfdfW62bbKk1ZJmStol6Up3/3nj2hzlntqWLHd9+nPJetu+Q7m1yvPp9yerfTelPyfQ+5F/StYvuefa3Fpbb3JX/Wxper2Cft+crKeuZfC++3cn9x1IVkeHas7890q6+E3bbpa0zt1nSVqX/QzgBFIx/O6+QdKbTy2LJa3Kbq+SdHnBfQFosFpf8091933Z7f2SphbUD4AmqfsNP3d3Kf8icmbWZWY9ZtbTr2P1Hg5AQWoN/wEzmyZJ2feDeXd0925373T3znZ11Hg4AEWrNfxrJC3Jbi+R9FAx7QBolorhN7MHJD0p6f1mttfMlkpaLukiM9sh6aPZzwBOIBXH+d396pzSooJ7QQ7flP4cQCPHpMe9kpgUL6n7lzOT9bEHjubWXrw1Paf+3mvSnyEYI0vWNx/LP7fVs57AaMEn/ICgCD8QFOEHgiL8QFCEHwiK8ANBcenuUeC1xQtya4d+J/1PXGkob8q2/KE6SeqatCtZn782f+rsgo70sSstL74pMZQnSX+3NDGdWE8n942AMz8QFOEHgiL8QFCEHwiK8ANBEX4gKMIPBMU4/yjw0mdez631fiS9vHelabGD+Vdoq2r/1Fh+PVNyJema79yQrJ+x/slkPTrO/EBQhB8IivADQRF+ICjCDwRF+IGgCD8QFOP8o1ylOfGV/v43cv+uPRcm993zN7OSdcbx68OZHwiK8ANBEX4gKMIPBEX4gaAIPxAU4QeCqjjOb2YrJX1C0kF3n5ttWybpWkkvZ3e7xd0fblSTSHvP6rG5tSumX5bcd+7El5L1z055Ilmf3vbOZD11fnnhyx9I7vmO9U9V+N2oRzVn/nslXTzC9jvcfX72RfCBE0zF8Lv7BkmHmtALgCaq5zX/DWa21cxWmtkphXUEoClqDf/dks6UNF/SPklfy7ujmXWZWY+Z9fTrWI2HA1C0msLv7gfc/bi7D0q6R1LuSpHu3u3une7e2a6OWvsEULCawm9m04b9+ClJ24tpB0CzVDPU94CkCySdamZ7JX1R0gVmNl+SS9ol6boG9gigAcw9fV32Ik20yX6eLWra8VA/++C8ZP3Il15N1h+ftzq3duvBc5P7PnPZjGR9YG9fsh7RRl+nw34ovSBChk/4AUERfiAowg8ERfiBoAg/EBThB4Li0t1VOmnGabm1gT17m9hJc/mmbcn6hJHmew5zxX/lTyl+8Kz0ZNC5f7kwWX/vMob66sGZHwiK8ANBEX4gKMIPBEX4gaAIPxAU4QeCYpw/89ri3IsRSZIWLvvv3Nra3Wcn9512eW9NPY0Gv/zqe3Nrg99ITyfvn/Va0e1gGM78QFCEHwiK8ANBEX4gKMIPBEX4gaAIPxBUmHH+1Hx8SfrMl3+QrPccnplbizyO33bypGT9T5c/klsbo6quMI0G4cwPBEX4gaAIPxAU4QeCIvxAUIQfCIrwA0FVHOc3sxmS7pM0VZJL6nb3O81ssqTVkmZK2iXpSnf/eeNarc/uP8ufVy5JXZMeStbv+MlHc2tn6ic19XRCWJBeovuSf9mQrHedvDO3Nljh3NP+03ck66hPNWf+AUlfcPc5kn5f0vVmNkfSzZLWufssSeuynwGcICqG3933ufvT2e0jknolTZe0WNKq7G6rJF3eqCYBFO9tveY3s5mSzpG0UdJUd9+XlfZr6GUBgBNE1eE3swmSvivp8+5+eHjN3V1D7weMtF+XmfWYWU+/jtXVLIDiVBV+M2vXUPDvd/fvZZsPmNm0rD5N0sGR9nX3bnfvdPfOdnUU0TOAAlQMv5mZpBWSet399mGlNZKWZLeXSEq/XQ6gpVQzpfd8SddI2mZmW7Jtt0haLunfzWyppN2SrmxMi8WYvv5Ist5+Y1uyfuP8x3NrK/7q48l9pzybfrlz0uObk/VK2ubMzq29tOjU5L4TPr4/WV8/795kvdK03NRw3uwfXJfcd/atTyTrqE/F8Lv7j6Tcf+FFxbYDoFn4hB8QFOEHgiL8QFCEHwiK8ANBEX4gKBv6ZG5zTLTJfp615ujg0R+ekaw/Pm91bm1Mhb+hgxpM1m89eG6yXsknJ+VPKT6nI33senuvtP/7v3N9bu0D/7Anue/A3r5kHW+10dfpsB+q6pronPmBoAg/EBThB4Ii/EBQhB8IivADQRF+ICjG+TOVlvD+3TX/m1v7+6lbk/v2+/FkvfKc+PS/UWr/SvseOP5asv71n30oWX/0n89P1qeseDJZR7EY5wdQEeEHgiL8QFCEHwiK8ANBEX4gKMIPBFXNdftDGNizN1l/5rIZubWzvlLffPzeC76VrH94a3pJhJcPTaz52Gf940Cy7pu2JetTxDj+iYozPxAU4QeCIvxAUIQfCIrwA0ERfiAowg8EVXE+v5nNkHSfpKmSXFK3u99pZsskXSvp5eyut7j7w6nf1crz+YHR4O3M56/mQz4Dkr7g7k+b2bskbTazx7LaHe7+1VobBVCeiuF3932S9mW3j5hZr6TpjW4MQGO9rdf8ZjZT0jmSNmabbjCzrWa20sxOydmny8x6zKynX8fqahZAcaoOv5lNkPRdSZ9398OS7pZ0pqT5Gnpm8LWR9nP3bnfvdPfOdnUU0DKAIlQVfjNr11Dw73f370mSux9w9+PuPijpHkkLGtcmgKJVDL+ZmaQVknrd/fZh26cNu9unJG0vvj0AjVLNu/3nS7pG0jYz25Jtu0XS1WY2X0PDf7skXdeQDgE0RDXv9v9IGvHC8MkxfQCtjU/4AUERfiAowg8ERfiBoAg/EBThB4Ii/EBQhB8IivADQRF+ICjCDwRF+IGgCD8QFOEHgqp46e5CD2b2sqTdwzadKumVpjXw9rRqb63al0RvtSqyt/e5+7uruWNTw/+Wg5v1uHtnaQ0ktGpvrdqXRG+1Kqs3nvYDQRF+IKiyw99d8vFTWrW3Vu1LordaldJbqa/5AZSn7DM/gJKUEn4zu9jM/sfMdprZzWX0kMfMdpnZNjPbYmY9Jfey0swOmtn2Ydsmm9ljZrYj+z7iMmkl9bbMzPqyx26LmV1aUm8zzGy9mT1nZs+a2Y3Z9lIfu0RfpTxuTX/ab2Ztkn4q6SJJeyVtknS1uz/X1EZymNkuSZ3uXvqYsJl9WNJRSfe5+9xs222SDrn78uwP5ynuflOL9LZM0tGyV27OFpSZNnxlaUmXS/oLlfjYJfq6UiU8bmWc+RdI2unuL7r765K+LWlxCX20PHffIOnQmzYvlrQqu71KQ/95mi6nt5bg7vvc/ens9hFJb6wsXepjl+irFGWEf7qkPcN+3qvWWvLbJT1qZpvNrKvsZkYwNVs2XZL2S5paZjMjqLhyczO9aWXplnnsalnxumi84fdWC9399yRdIun67OltS/Kh12ytNFxT1crNzTLCytK/VuZjV+uK10UrI/x9kmYM+/m0bFtLcPe+7PtBSQ+q9VYfPvDGIqnZ94Ml9/NrrbRy80grS6sFHrtWWvG6jPBvkjTLzE43s7GSrpK0poQ+3sLMxmdvxMjMxkv6mFpv9eE1kpZkt5dIeqjEXn5Dq6zcnLeytEp+7FpuxWt3b/qXpEs19I7/C5L+towecvo6Q9Iz2dezZfcm6QENPQ3s19B7I0slTZG0TtIOSf8paXIL9favkrZJ2qqhoE0rqbeFGnpKv1XSluzr0rIfu0RfpTxufMIPCIo3/ICgCD8QFOEHgiL8QFCEHwiK8ANBEX4gKMIPBPX/EhqoeSQulYEAAAAASUVORK5CYII=\n",
      "text/plain": [
       "<Figure size 432x288 with 1 Axes>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "#样本显示\n",
    "\n",
    "n = 1\n",
    "curr_img   = np.reshape(trainimg[n, :], (28, 28)) # 28 by 28 matrix \n",
    "curr_label = np.argmax(trainlabel[n, :] ) # Label\n",
    "plt.imshow(curr_img)\n",
    "print (\"\" + str(n) + \"th 训练数据 \" + \"标签是 \" + str(curr_label))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 模型建立"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "采用 slim 建立模型\n",
    "    -- 常用设置参数：\n",
    "* padding : 补零的方式，例如'SAME'\n",
    "* activation_fn : 激活函数，默认是nn.relu\n",
    "* normalizer_fn : 正则化函数，默认为None，这里可以设置为batch normalization，函数用slim.batch_norm\n",
    "* normalizer_params : slim.batch_norm中的参数，以字典形式表示\n",
    "* weights_initializer : 权重的初始化器，initializers.xavier_initializer()\n",
    "* weights_regularizer : 权重的正则化器，一般不怎么用到\n",
    "* biases_initializer : 如果之前有batch norm，那么这个及下面一个就不用管了\n",
    "* biases_regularizer : \n",
    "* trainable : 参数是否可训练，默认为True\n",
    "* scope:你绘制的网络结构图中它属于那个范围内"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "#初始化参数\n",
    "global learing_rate, _padding, _num_outputs, _kernel_size, _activation_fn, _normalizer_fn, _normalizer_params\n",
    "\n",
    "\n",
    "_padding = 'SAME'\n",
    "_num_outputs = 32 # filter 的个数\n",
    "_kernel_size = [5,5] # filter 维度\n",
    "_activation_fn = tf.nn.relu #激活函数\n",
    "_normalizer_fn = slim.batch_norm # 正则化函数\n",
    "_normalizer_params = {'is_training': True, 'decay': 0.9, 'updates_collections': None} # 正则化函数的参数\n",
    "\n",
    "def build_tf_model(x, keep_prob):\n",
    "    # x是1*784,进行reshape成28*28\n",
    "    x_image = tf.reshape(x, [-1, 28, 28, 1])\n",
    "    print('x_image shape = ', x_image.shape)\n",
    "    \n",
    "    print('====第一层卷积====')\n",
    "    print('采用卷积核： %d*%d*%d ' % (_kernel_size[0],_kernel_size[1],_num_outputs))\n",
    "    print('激活函数：%s' % _activation_fn.__name__)\n",
    "    print('正则化因子：%s ==> %s' % (_normalizer_fn.__name__, _normalizer_params))\n",
    "\n",
    "    \n",
    "    h_conv1 = slim.conv2d(x_image, _num_outputs, _kernel_size,\n",
    "                            padding=_padding,\n",
    "                            activation_fn=_activation_fn,\n",
    "                            normalizer_fn=_normalizer_fn,\n",
    "                            normalizer_params=_normalizer_params,\n",
    "                            scope='conv1')\n",
    "    print('====第一层池化====')\n",
    "    print('采用最大值池化')\n",
    "    print('池化核 2 X 2')\n",
    "    print('步长采用默认值 2')\n",
    "    print('补齐采用默认值 VALID')\n",
    "    h_pool1 = slim.max_pool2d(h_conv1, [2,2], stride=2, padding='VALID',scope='pool1')\n",
    "    \n",
    "    _num_outputs_2 = _num_outputs * 2\n",
    "    print('====第二层卷积====')\n",
    "    print('采用卷积核： %d*%d*%d' % (_kernel_size[0],_kernel_size[1],_num_outputs_2))\n",
    "    print('其他参数同上层')\n",
    "    h_conv2 = slim.conv2d(h_pool1, _num_outputs_2, _kernel_size,\n",
    "                            padding=_padding,\n",
    "                            activation_fn=_activation_fn,\n",
    "                            normalizer_fn=_normalizer_fn,\n",
    "                            normalizer_params=_normalizer_params,\n",
    "                            scope='conv2')\n",
    "    print('====第二层池化====')\n",
    "    print('参数和上层一样')\n",
    "    h_pool2 = tf.contrib.slim.max_pool2d(h_conv2, [2,2],stride=2, padding='VALID',scope='pool2')\n",
    "    \n",
    "    h_pool2_flat = tf.contrib.slim.avg_pool2d(h_pool2, h_pool2.shape[1:3],\n",
    "                        stride=[1, 1], padding='VALID')\n",
    "    h_fc1 = slim.conv2d(h_pool2_flat, 1024, [1,1], activation_fn=tf.nn.relu,scope='fc1')\n",
    "    \n",
    "    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n",
    "    \n",
    "    # Map the 1024 features to 10 classes, one for each digit\n",
    "    y = tf.squeeze(slim.conv2d(h_fc1_drop, 10, [1,1], activation_fn=None,scope='fc2'))\n",
    "    return y"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 57,
   "metadata": {},
   "outputs": [],
   "source": [
    "def train(lr=0.01, num_outputs=32, kernel_size=[5,5],batch = 3000):\n",
    "    learning_rate = tf.placeholder(tf.float32)\n",
    "    x = tf.placeholder(tf.float32, [None, 784])\n",
    "    y_ = tf.placeholder(tf.float32, [None, 10])\n",
    "    global_steps = tf.Variable(0, trainable=False)\n",
    "    keep_prob=tf.placeholder('float') \n",
    "    \n",
    "    _kernel_size[0] = kernel_size[0]\n",
    "    _kernel_size[1] = kernel_size[1]\n",
    "    global _num_outputs\n",
    "    _num_outputs = num_outputs\n",
    "    y = build_tf_model(x,keep_prob)\n",
    "    \n",
    "    cross_entropy = tf.reduce_mean(\n",
    "        tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=y))\n",
    "\n",
    "    l2_loss = tf.add_n( [tf.nn.l2_loss(w) for w in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)] )\n",
    "    total_loss = cross_entropy + 7e-5*l2_loss\n",
    "    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(total_loss)\n",
    "\n",
    "    sess = tf.Session()\n",
    "    init_op = tf.global_variables_initializer()\n",
    "    sess.run(init_op)\n",
    "\n",
    "    # 记录运行时间\n",
    "    start = time.time()\n",
    "    total_batch = batch\n",
    "    # Train\n",
    "    for step in range(total_batch):\n",
    "        batch_xs, batch_ys = mnist.train.next_batch(100)\n",
    "        _, loss, l2_loss_value, total_loss_value = sess.run(\n",
    "            [train_step, cross_entropy, l2_loss, total_loss], \n",
    "            feed_dict={x: batch_xs, y_: batch_ys, learning_rate:lr, keep_prob:1.0})\n",
    "        if (step+1) % 100 == 0:\n",
    "            print('step %d, entropy loss: %f, l2_loss: %f, total loss: %f' % \n",
    "                  (step+1, loss, l2_loss_value, total_loss_value))\n",
    "        # Test trained model\n",
    "        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n",
    "        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "        sess.run(accuracy, feed_dict={x: batch_xs, y_: batch_ys, keep_prob:1.0})\n",
    "    \n",
    "    test_accuracy = sess.run(accuracy, feed_dict={x: mnist.test.images,\n",
    "                                  y_: mnist.test.labels, keep_prob:1.0})\n",
    "    end = time.time()  \n",
    "    print(\"Training Used %d sec.\" % (end-start))\n",
    "    print('test accuracy is: ',test_accuracy)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "x_image shape =  (?, 28, 28, 1)\n",
      "====第一层卷积====\n",
      "采用卷积核： 5*5*32 \n",
      "激活函数：relu\n",
      "正则化因子：batch_norm ==> {'is_training': True, 'decay': 0.9, 'updates_collections': None}\n",
      "====第一层池化====\n",
      "采用最大值池化\n",
      "池化核 2 X 2\n",
      "步长采用默认值 2\n",
      "补齐采用默认值 VALID\n",
      "====第二层卷积====\n",
      "采用卷积核： 5*5*64\n",
      "其他参数同上层\n",
      "====第二层池化====\n",
      "参数和上层一样\n",
      "step 100, entropy loss: 2.110455, l2_loss: 92.201408, total loss: 2.116909\n",
      "step 200, entropy loss: 1.990993, l2_loss: 92.606613, total loss: 1.997475\n",
      "step 300, entropy loss: 1.743042, l2_loss: 93.193092, total loss: 1.749565\n",
      "step 400, entropy loss: 1.549403, l2_loss: 93.889687, total loss: 1.555975\n",
      "step 500, entropy loss: 1.515466, l2_loss: 94.642006, total loss: 1.522091\n",
      "step 600, entropy loss: 1.416823, l2_loss: 95.458488, total loss: 1.423505\n",
      "step 700, entropy loss: 1.173006, l2_loss: 96.291832, total loss: 1.179746\n",
      "step 800, entropy loss: 1.153850, l2_loss: 97.140129, total loss: 1.160650\n",
      "step 900, entropy loss: 1.151383, l2_loss: 98.011253, total loss: 1.158244\n",
      "step 1000, entropy loss: 1.097961, l2_loss: 98.884575, total loss: 1.104883\n",
      "step 1100, entropy loss: 0.883353, l2_loss: 99.756256, total loss: 0.890336\n",
      "step 1200, entropy loss: 0.887016, l2_loss: 100.645859, total loss: 0.894061\n",
      "step 1300, entropy loss: 0.872283, l2_loss: 101.555939, total loss: 0.879392\n",
      "step 1400, entropy loss: 0.797418, l2_loss: 102.462708, total loss: 0.804591\n",
      "step 1500, entropy loss: 0.642883, l2_loss: 103.363434, total loss: 0.650118\n",
      "step 1600, entropy loss: 0.613567, l2_loss: 104.243263, total loss: 0.620864\n",
      "step 1700, entropy loss: 0.716784, l2_loss: 105.117752, total loss: 0.724143\n",
      "step 1800, entropy loss: 0.630758, l2_loss: 105.972748, total loss: 0.638176\n",
      "step 1900, entropy loss: 0.501444, l2_loss: 106.797386, total loss: 0.508919\n",
      "step 2000, entropy loss: 0.521766, l2_loss: 107.603088, total loss: 0.529298\n",
      "step 2100, entropy loss: 0.489896, l2_loss: 108.389420, total loss: 0.497483\n",
      "step 2200, entropy loss: 0.487697, l2_loss: 109.161659, total loss: 0.495338\n",
      "step 2300, entropy loss: 0.445097, l2_loss: 109.907150, total loss: 0.452790\n",
      "step 2400, entropy loss: 0.440263, l2_loss: 110.636597, total loss: 0.448008\n",
      "step 2500, entropy loss: 0.386304, l2_loss: 111.329994, total loss: 0.394097\n",
      "step 2600, entropy loss: 0.368885, l2_loss: 112.006172, total loss: 0.376726\n",
      "step 2700, entropy loss: 0.400818, l2_loss: 112.668823, total loss: 0.408705\n",
      "step 2800, entropy loss: 0.351824, l2_loss: 113.314659, total loss: 0.359756\n",
      "step 2900, entropy loss: 0.392797, l2_loss: 113.915733, total loss: 0.400771\n",
      "step 3000, entropy loss: 0.399264, l2_loss: 114.521362, total loss: 0.407281\n",
      "Training Used 1535 sec.\n",
      "test accuracy is:  0.9428\n"
     ]
    }
   ],
   "source": [
    "# keep_prob:1.0\n",
    "lr = 0.01\n",
    "tf.reset_default_graph()\n",
    "train(lr,32,[5,5],3000)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "x_image shape =  (?, 28, 28, 1)\n",
      "====第一层卷积====\n",
      "采用卷积核： 5*5*32 \n",
      "激活函数：relu\n",
      "正则化因子：batch_norm ==> {'is_training': True, 'decay': 0.9, 'updates_collections': None}\n",
      "====第一层池化====\n",
      "采用最大值池化\n",
      "池化核 2 X 2\n",
      "步长采用默认值 2\n",
      "补齐采用默认值 VALID\n",
      "====第二层卷积====\n",
      "采用卷积核： 5*5*64\n",
      "其他参数同上层\n",
      "====第二层池化====\n",
      "参数和上层一样\n",
      "step 100, entropy loss: 2.328616, l2_loss: 92.454453, total loss: 2.335088\n",
      "step 200, entropy loss: 2.370786, l2_loss: 92.452827, total loss: 2.377257\n",
      "step 300, entropy loss: 2.334073, l2_loss: 92.451248, total loss: 2.340544\n",
      "step 400, entropy loss: 2.293372, l2_loss: 92.449692, total loss: 2.299844\n",
      "step 500, entropy loss: 2.329888, l2_loss: 92.448364, total loss: 2.336360\n",
      "step 600, entropy loss: 2.328345, l2_loss: 92.447090, total loss: 2.334816\n",
      "step 700, entropy loss: 2.343710, l2_loss: 92.445923, total loss: 2.350182\n",
      "step 800, entropy loss: 2.323253, l2_loss: 92.444847, total loss: 2.329725\n",
      "step 900, entropy loss: 2.331797, l2_loss: 92.443825, total loss: 2.338268\n",
      "step 1000, entropy loss: 2.285145, l2_loss: 92.442787, total loss: 2.291615\n",
      "step 1100, entropy loss: 2.305106, l2_loss: 92.441811, total loss: 2.311577\n",
      "step 1200, entropy loss: 2.326123, l2_loss: 92.440910, total loss: 2.332593\n",
      "step 1300, entropy loss: 2.319935, l2_loss: 92.440056, total loss: 2.326406\n",
      "step 1400, entropy loss: 2.297013, l2_loss: 92.439278, total loss: 2.303484\n",
      "step 1500, entropy loss: 2.301268, l2_loss: 92.438568, total loss: 2.307739\n",
      "step 1600, entropy loss: 2.296845, l2_loss: 92.437881, total loss: 2.303315\n",
      "step 1700, entropy loss: 2.333186, l2_loss: 92.437202, total loss: 2.339657\n",
      "step 1800, entropy loss: 2.296329, l2_loss: 92.436630, total loss: 2.302800\n",
      "step 1900, entropy loss: 2.292596, l2_loss: 92.436096, total loss: 2.299067\n",
      "step 2000, entropy loss: 2.309192, l2_loss: 92.435463, total loss: 2.315662\n",
      "step 2100, entropy loss: 2.332289, l2_loss: 92.434868, total loss: 2.338759\n",
      "step 2200, entropy loss: 2.260415, l2_loss: 92.434258, total loss: 2.266886\n",
      "step 2300, entropy loss: 2.292287, l2_loss: 92.433792, total loss: 2.298758\n",
      "step 2400, entropy loss: 2.306202, l2_loss: 92.433380, total loss: 2.312672\n",
      "step 2500, entropy loss: 2.309990, l2_loss: 92.432846, total loss: 2.316460\n",
      "step 2600, entropy loss: 2.300002, l2_loss: 92.432571, total loss: 2.306473\n",
      "step 2700, entropy loss: 2.260899, l2_loss: 92.432274, total loss: 2.267370\n",
      "step 2800, entropy loss: 2.308982, l2_loss: 92.431885, total loss: 2.315452\n",
      "step 2900, entropy loss: 2.247967, l2_loss: 92.431648, total loss: 2.254437\n",
      "step 3000, entropy loss: 2.252434, l2_loss: 92.431297, total loss: 2.258904\n",
      "Training Used 1443 sec.\n",
      "test accuracy is:  0.1392\n"
     ]
    }
   ],
   "source": [
    "#keep_prob 0.1\n",
    "lr = 0.0001\n",
    "tf.reset_default_graph()\n",
    "train(lr,32,[5,5],3000)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "x_image shape =  (?, 28, 28, 1)\n",
      "====第一层卷积====\n",
      "采用卷积核： 5*5*32 \n",
      "激活函数：relu\n",
      "正则化因子：batch_norm ==> {'is_training': True, 'decay': 0.9, 'updates_collections': None}\n",
      "====第一层池化====\n",
      "采用最大值池化\n",
      "池化核 2 X 2\n",
      "步长采用默认值 2\n",
      "补齐采用默认值 VALID\n",
      "====第二层卷积====\n",
      "采用卷积核： 5*5*64\n",
      "其他参数同上层\n",
      "====第二层池化====\n",
      "参数和上层一样\n",
      "step 100, entropy loss: 2.301980, l2_loss: 92.334869, total loss: 2.308444\n",
      "step 200, entropy loss: 2.307870, l2_loss: 92.327446, total loss: 2.314333\n",
      "step 300, entropy loss: 2.298083, l2_loss: 92.323418, total loss: 2.304545\n",
      "step 400, entropy loss: 2.261318, l2_loss: 92.322830, total loss: 2.267781\n",
      "step 500, entropy loss: 2.231346, l2_loss: 92.326782, total loss: 2.237809\n",
      "step 600, entropy loss: 2.244617, l2_loss: 92.334343, total loss: 2.251081\n",
      "step 700, entropy loss: 2.182878, l2_loss: 92.343636, total loss: 2.189342\n",
      "step 800, entropy loss: 2.205385, l2_loss: 92.356369, total loss: 2.211850\n",
      "step 900, entropy loss: 2.148927, l2_loss: 92.372185, total loss: 2.155393\n",
      "step 1000, entropy loss: 2.093712, l2_loss: 92.389084, total loss: 2.100179\n",
      "step 1100, entropy loss: 2.176708, l2_loss: 92.409355, total loss: 2.183177\n",
      "step 1200, entropy loss: 2.119422, l2_loss: 92.432739, total loss: 2.125892\n",
      "step 1300, entropy loss: 2.093223, l2_loss: 92.458084, total loss: 2.099695\n",
      "step 1400, entropy loss: 2.145795, l2_loss: 92.485748, total loss: 2.152269\n",
      "step 1500, entropy loss: 2.080028, l2_loss: 92.515472, total loss: 2.086504\n",
      "step 1600, entropy loss: 2.064231, l2_loss: 92.547951, total loss: 2.070709\n",
      "step 1700, entropy loss: 2.020343, l2_loss: 92.583099, total loss: 2.026824\n",
      "step 1800, entropy loss: 2.073867, l2_loss: 92.618660, total loss: 2.080351\n",
      "step 1900, entropy loss: 1.909743, l2_loss: 92.655533, total loss: 1.916229\n",
      "step 2000, entropy loss: 2.017369, l2_loss: 92.696228, total loss: 2.023858\n",
      "step 2100, entropy loss: 1.982638, l2_loss: 92.736160, total loss: 1.989129\n",
      "step 2200, entropy loss: 2.024822, l2_loss: 92.779015, total loss: 2.031317\n",
      "step 2300, entropy loss: 1.856440, l2_loss: 92.821815, total loss: 1.862938\n",
      "step 2400, entropy loss: 1.878605, l2_loss: 92.867035, total loss: 1.885106\n",
      "step 2500, entropy loss: 1.835922, l2_loss: 92.912796, total loss: 1.842426\n",
      "step 2600, entropy loss: 1.844263, l2_loss: 92.960526, total loss: 1.850770\n",
      "step 2700, entropy loss: 1.896237, l2_loss: 93.010307, total loss: 1.902748\n",
      "step 2800, entropy loss: 1.981426, l2_loss: 93.059380, total loss: 1.987940\n",
      "step 2900, entropy loss: 1.737025, l2_loss: 93.108856, total loss: 1.743542\n",
      "step 3000, entropy loss: 1.914548, l2_loss: 93.160019, total loss: 1.921070\n",
      "Training Used 1483 sec.\n",
      "test accuracy is:  0.3625\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Exception ignored in: <bound method BaseSession.__del__ of <tensorflow.python.client.session.Session object at 0x7ff2ca88ba20>>\n",
      "Traceback (most recent call last):\n",
      "  File \"/home/fei/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\", line 707, in __del__\n",
      "    tf_session.TF_DeleteSession(self._session)\n",
      "KeyboardInterrupt: \n"
     ]
    }
   ],
   "source": [
    "#keep_prob 0.1\n",
    "lr = 0.001\n",
    "tf.reset_default_graph()\n",
    "train(lr,32,[5,5],3000)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "x_image shape =  (?, 28, 28, 1)\n",
      "====第一层卷积====\n",
      "采用卷积核： 5*5*32 \n",
      "激活函数：relu\n",
      "正则化因子：batch_norm ==> {'is_training': True, 'decay': 0.9, 'updates_collections': None}\n",
      "====第一层池化====\n",
      "采用最大值池化\n",
      "池化核 2 X 2\n",
      "步长采用默认值 2\n",
      "补齐采用默认值 VALID\n",
      "====第二层卷积====\n",
      "采用卷积核： 5*5*64\n",
      "其他参数同上层\n",
      "====第二层池化====\n",
      "参数和上层一样\n",
      "step 100, entropy loss: 1.224363, l2_loss: 98.049553, total loss: 1.231226\n",
      "step 200, entropy loss: 0.754319, l2_loss: 106.141495, total loss: 0.761748\n",
      "step 300, entropy loss: 0.597242, l2_loss: 113.612045, total loss: 0.605195\n",
      "step 400, entropy loss: 0.425028, l2_loss: 120.217667, total loss: 0.433443\n",
      "step 500, entropy loss: 0.283513, l2_loss: 125.807785, total loss: 0.292320\n",
      "step 600, entropy loss: 0.237633, l2_loss: 130.411453, total loss: 0.246762\n",
      "step 700, entropy loss: 0.208391, l2_loss: 134.441696, total loss: 0.217802\n",
      "step 800, entropy loss: 0.291376, l2_loss: 137.902939, total loss: 0.301029\n",
      "step 900, entropy loss: 0.103964, l2_loss: 140.769760, total loss: 0.113818\n",
      "step 1000, entropy loss: 0.131030, l2_loss: 143.547028, total loss: 0.141078\n",
      "step 1100, entropy loss: 0.137400, l2_loss: 146.020569, total loss: 0.147622\n",
      "step 1200, entropy loss: 0.253882, l2_loss: 148.454803, total loss: 0.264274\n",
      "step 1300, entropy loss: 0.133665, l2_loss: 150.763809, total loss: 0.144218\n",
      "step 1400, entropy loss: 0.104953, l2_loss: 152.718033, total loss: 0.115643\n",
      "step 1500, entropy loss: 0.151749, l2_loss: 154.663361, total loss: 0.162576\n",
      "step 1600, entropy loss: 0.103185, l2_loss: 156.451630, total loss: 0.114136\n",
      "step 1700, entropy loss: 0.102914, l2_loss: 158.240585, total loss: 0.113991\n",
      "step 1800, entropy loss: 0.189673, l2_loss: 159.947800, total loss: 0.200869\n",
      "step 1900, entropy loss: 0.137893, l2_loss: 161.536942, total loss: 0.149201\n",
      "step 2000, entropy loss: 0.135292, l2_loss: 163.083267, total loss: 0.146708\n",
      "step 2100, entropy loss: 0.155817, l2_loss: 164.546616, total loss: 0.167335\n",
      "step 2200, entropy loss: 0.092595, l2_loss: 166.076218, total loss: 0.104221\n",
      "step 2300, entropy loss: 0.107898, l2_loss: 167.448349, total loss: 0.119620\n",
      "step 2400, entropy loss: 0.040225, l2_loss: 168.824615, total loss: 0.052042\n",
      "step 2500, entropy loss: 0.050173, l2_loss: 170.119095, total loss: 0.062081\n",
      "step 2600, entropy loss: 0.087911, l2_loss: 171.368927, total loss: 0.099907\n",
      "step 2700, entropy loss: 0.074098, l2_loss: 172.551025, total loss: 0.086176\n",
      "step 2800, entropy loss: 0.083671, l2_loss: 173.794708, total loss: 0.095837\n",
      "step 2900, entropy loss: 0.041279, l2_loss: 175.021652, total loss: 0.053530\n",
      "step 3000, entropy loss: 0.028976, l2_loss: 176.167267, total loss: 0.041308\n",
      "Training Used 1424 sec.\n",
      "test accuracy is:  0.9755\n"
     ]
    }
   ],
   "source": [
    "#keep_prob 0.1\n",
    "lr = 0.1\n",
    "tf.reset_default_graph()\n",
    "train(lr,32,[5,5],3000)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "x_image shape =  (?, 28, 28, 1)\n",
      "====第一层卷积====\n",
      "采用卷积核： 5*5*32 \n",
      "激活函数：relu\n",
      "正则化因子：batch_norm ==> {'is_training': True, 'decay': 0.9, 'updates_collections': None}\n",
      "====第一层池化====\n",
      "采用最大值池化\n",
      "池化核 2 X 2\n",
      "步长采用默认值 2\n",
      "补齐采用默认值 VALID\n",
      "====第二层卷积====\n",
      "采用卷积核： 5*5*64\n",
      "其他参数同上层\n",
      "====第二层池化====\n",
      "参数和上层一样\n",
      "step 100, entropy loss: 1.261891, l2_loss: 98.257385, total loss: 1.268769\n",
      "step 200, entropy loss: 0.637752, l2_loss: 106.671547, total loss: 0.645219\n",
      "step 300, entropy loss: 0.648672, l2_loss: 114.528450, total loss: 0.656689\n",
      "step 400, entropy loss: 0.311058, l2_loss: 121.153397, total loss: 0.319539\n",
      "step 500, entropy loss: 0.217598, l2_loss: 126.649269, total loss: 0.226464\n",
      "step 600, entropy loss: 0.280603, l2_loss: 131.085403, total loss: 0.289779\n",
      "step 700, entropy loss: 0.121107, l2_loss: 134.765671, total loss: 0.130541\n",
      "step 800, entropy loss: 0.176346, l2_loss: 138.111877, total loss: 0.186013\n",
      "step 900, entropy loss: 0.151843, l2_loss: 141.130768, total loss: 0.161722\n",
      "step 1000, entropy loss: 0.311328, l2_loss: 143.792877, total loss: 0.321393\n",
      "step 1100, entropy loss: 0.219907, l2_loss: 146.302979, total loss: 0.230148\n",
      "step 1200, entropy loss: 0.123239, l2_loss: 148.661392, total loss: 0.133646\n",
      "step 1300, entropy loss: 0.122952, l2_loss: 150.862717, total loss: 0.133512\n",
      "step 1400, entropy loss: 0.184463, l2_loss: 152.965103, total loss: 0.195170\n",
      "step 1500, entropy loss: 0.063199, l2_loss: 154.883957, total loss: 0.074041\n",
      "step 1600, entropy loss: 0.121646, l2_loss: 156.704376, total loss: 0.132616\n",
      "step 1700, entropy loss: 0.118751, l2_loss: 158.497971, total loss: 0.129846\n",
      "step 1800, entropy loss: 0.120546, l2_loss: 160.271973, total loss: 0.131765\n",
      "step 1900, entropy loss: 0.067336, l2_loss: 161.872147, total loss: 0.078667\n",
      "step 2000, entropy loss: 0.036294, l2_loss: 163.500275, total loss: 0.047739\n",
      "step 2100, entropy loss: 0.050606, l2_loss: 165.025223, total loss: 0.062158\n",
      "step 2200, entropy loss: 0.050449, l2_loss: 166.418045, total loss: 0.062098\n",
      "step 2300, entropy loss: 0.142434, l2_loss: 167.875885, total loss: 0.154186\n",
      "step 2400, entropy loss: 0.055594, l2_loss: 169.284866, total loss: 0.067444\n",
      "step 2500, entropy loss: 0.061096, l2_loss: 170.555466, total loss: 0.073035\n",
      "step 2600, entropy loss: 0.134873, l2_loss: 171.827835, total loss: 0.146901\n",
      "step 2700, entropy loss: 0.044360, l2_loss: 173.111649, total loss: 0.056478\n",
      "step 2800, entropy loss: 0.212401, l2_loss: 174.422302, total loss: 0.224610\n",
      "step 2900, entropy loss: 0.117035, l2_loss: 175.595428, total loss: 0.129327\n",
      "step 3000, entropy loss: 0.040303, l2_loss: 176.748596, total loss: 0.052676\n",
      "step 3100, entropy loss: 0.045907, l2_loss: 177.860367, total loss: 0.058357\n",
      "step 3200, entropy loss: 0.088479, l2_loss: 178.967285, total loss: 0.101006\n",
      "step 3300, entropy loss: 0.065206, l2_loss: 180.058685, total loss: 0.077810\n",
      "step 3400, entropy loss: 0.079080, l2_loss: 181.168991, total loss: 0.091762\n",
      "step 3500, entropy loss: 0.109684, l2_loss: 182.199692, total loss: 0.122438\n",
      "step 3600, entropy loss: 0.044117, l2_loss: 183.279785, total loss: 0.056946\n",
      "step 3700, entropy loss: 0.041220, l2_loss: 184.255081, total loss: 0.054118\n",
      "step 3800, entropy loss: 0.077822, l2_loss: 185.195938, total loss: 0.090786\n",
      "step 3900, entropy loss: 0.103737, l2_loss: 186.030136, total loss: 0.116759\n",
      "step 4000, entropy loss: 0.010447, l2_loss: 186.977081, total loss: 0.023536\n",
      "Training Used 2319 sec.\n",
      "test accuracy is:  0.9834\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Exception ignored in: <bound method BaseSession.__del__ of <tensorflow.python.client.session.Session object at 0x7ff3274d0fd0>>\n",
      "Traceback (most recent call last):\n",
      "  File \"/home/fei/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\", line 707, in __del__\n",
      "    tf_session.TF_DeleteSession(self._session)\n",
      "KeyboardInterrupt: \n"
     ]
    }
   ],
   "source": [
    "#keep_prob 0.1\n",
    "lr = 0.1\n",
    "tf.reset_default_graph()\n",
    "train(lr,32,[5,5],4000)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "x_image shape =  (?, 28, 28, 1)\n",
      "====第一层卷积====\n",
      "采用卷积核： 5*5*32 \n",
      "激活函数：relu\n",
      "正则化因子：batch_norm ==> {'is_training': True, 'decay': 0.9, 'updates_collections': None}\n",
      "====第一层池化====\n",
      "采用最大值池化\n",
      "池化核 2 X 2\n",
      "步长采用默认值 2\n",
      "补齐采用默认值 VALID\n",
      "====第二层卷积====\n",
      "采用卷积核： 5*5*64\n",
      "其他参数同上层\n",
      "====第二层池化====\n",
      "参数和上层一样\n",
      "step 100, entropy loss: 1.698270, l2_loss: 94.402977, total loss: 1.704878\n",
      "step 200, entropy loss: 1.313770, l2_loss: 98.165131, total loss: 1.320642\n",
      "step 300, entropy loss: 0.772692, l2_loss: 102.149277, total loss: 0.779842\n",
      "step 400, entropy loss: 0.772021, l2_loss: 106.291725, total loss: 0.779461\n",
      "step 500, entropy loss: 0.626804, l2_loss: 110.351898, total loss: 0.634529\n",
      "step 600, entropy loss: 0.547885, l2_loss: 114.081459, total loss: 0.555870\n",
      "step 700, entropy loss: 0.393907, l2_loss: 117.410362, total loss: 0.402126\n",
      "step 800, entropy loss: 0.340416, l2_loss: 120.418442, total loss: 0.348845\n",
      "step 900, entropy loss: 0.342473, l2_loss: 123.079605, total loss: 0.351089\n",
      "step 1000, entropy loss: 0.331377, l2_loss: 125.481087, total loss: 0.340161\n",
      "step 1100, entropy loss: 0.174858, l2_loss: 127.712509, total loss: 0.183798\n",
      "step 1200, entropy loss: 0.242595, l2_loss: 129.724960, total loss: 0.251675\n",
      "step 1300, entropy loss: 0.268842, l2_loss: 131.554230, total loss: 0.278051\n",
      "step 1400, entropy loss: 0.158754, l2_loss: 133.327423, total loss: 0.168087\n",
      "step 1500, entropy loss: 0.128664, l2_loss: 134.931931, total loss: 0.138110\n",
      "step 1600, entropy loss: 0.227570, l2_loss: 136.447174, total loss: 0.237122\n",
      "step 1700, entropy loss: 0.237511, l2_loss: 137.885223, total loss: 0.247163\n",
      "step 1800, entropy loss: 0.119502, l2_loss: 139.237076, total loss: 0.129248\n",
      "step 1900, entropy loss: 0.165394, l2_loss: 140.533844, total loss: 0.175232\n",
      "step 2000, entropy loss: 0.268249, l2_loss: 141.810486, total loss: 0.278176\n",
      "step 2100, entropy loss: 0.129731, l2_loss: 142.974930, total loss: 0.139739\n",
      "step 2200, entropy loss: 0.095363, l2_loss: 144.169373, total loss: 0.105455\n",
      "step 2300, entropy loss: 0.116298, l2_loss: 145.256699, total loss: 0.126466\n",
      "step 2400, entropy loss: 0.133806, l2_loss: 146.360214, total loss: 0.144051\n",
      "step 2500, entropy loss: 0.111020, l2_loss: 147.349976, total loss: 0.121334\n",
      "step 2600, entropy loss: 0.247563, l2_loss: 148.352264, total loss: 0.257947\n",
      "step 2700, entropy loss: 0.129320, l2_loss: 149.323700, total loss: 0.139772\n",
      "step 2800, entropy loss: 0.123399, l2_loss: 150.236221, total loss: 0.133915\n",
      "step 2900, entropy loss: 0.108318, l2_loss: 151.142105, total loss: 0.118898\n",
      "step 3000, entropy loss: 0.088073, l2_loss: 152.019623, total loss: 0.098714\n",
      "step 3100, entropy loss: 0.083234, l2_loss: 152.874100, total loss: 0.093935\n",
      "step 3200, entropy loss: 0.191462, l2_loss: 153.732880, total loss: 0.202224\n",
      "step 3300, entropy loss: 0.115813, l2_loss: 154.553986, total loss: 0.126632\n",
      "step 3400, entropy loss: 0.077230, l2_loss: 155.293488, total loss: 0.088100\n",
      "step 3500, entropy loss: 0.071794, l2_loss: 156.031326, total loss: 0.082717\n",
      "step 3600, entropy loss: 0.100265, l2_loss: 156.831238, total loss: 0.111243\n",
      "step 3700, entropy loss: 0.137586, l2_loss: 157.577713, total loss: 0.148617\n",
      "step 3800, entropy loss: 0.118345, l2_loss: 158.300919, total loss: 0.129426\n",
      "step 3900, entropy loss: 0.102000, l2_loss: 159.004303, total loss: 0.113131\n",
      "step 4000, entropy loss: 0.061439, l2_loss: 159.733521, total loss: 0.072620\n",
      "Training Used 2290 sec.\n",
      "test accuracy is:  0.9785\n"
     ]
    }
   ],
   "source": [
    "#keep_prob 0.1\n",
    "lr = 0.05\n",
    "tf.reset_default_graph()\n",
    "train(lr,32,[5,5],4000)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "x_image shape =  (?, 28, 28, 1)\n",
      "====第一层卷积====\n",
      "采用卷积核： 5*5*32 \n",
      "激活函数：relu\n",
      "正则化因子：batch_norm ==> {'is_training': True, 'decay': 0.9, 'updates_collections': None}\n",
      "====第一层池化====\n",
      "采用最大值池化\n",
      "池化核 2 X 2\n",
      "步长采用默认值 2\n",
      "补齐采用默认值 VALID\n",
      "====第二层卷积====\n",
      "采用卷积核： 5*5*64\n",
      "其他参数同上层\n",
      "====第二层池化====\n",
      "参数和上层一样\n",
      "step 100, entropy loss: 1.268941, l2_loss: 97.670380, total loss: 1.275778\n",
      "step 200, entropy loss: 0.928815, l2_loss: 106.108353, total loss: 0.936242\n",
      "step 300, entropy loss: 0.538459, l2_loss: 114.246521, total loss: 0.546456\n",
      "step 400, entropy loss: 0.409055, l2_loss: 120.986549, total loss: 0.417524\n",
      "step 500, entropy loss: 0.456124, l2_loss: 126.408813, total loss: 0.464973\n",
      "step 600, entropy loss: 0.235623, l2_loss: 130.873199, total loss: 0.244784\n",
      "step 700, entropy loss: 0.283143, l2_loss: 134.936707, total loss: 0.292589\n",
      "step 800, entropy loss: 0.193856, l2_loss: 138.246796, total loss: 0.203534\n",
      "step 900, entropy loss: 0.199787, l2_loss: 141.206573, total loss: 0.209672\n",
      "step 1000, entropy loss: 0.156996, l2_loss: 143.897049, total loss: 0.167068\n",
      "step 1100, entropy loss: 0.225358, l2_loss: 146.439285, total loss: 0.235609\n",
      "step 1200, entropy loss: 0.113479, l2_loss: 148.775253, total loss: 0.123893\n",
      "step 1300, entropy loss: 0.162066, l2_loss: 150.984924, total loss: 0.172635\n",
      "step 1400, entropy loss: 0.126600, l2_loss: 153.062363, total loss: 0.137314\n",
      "step 1500, entropy loss: 0.156408, l2_loss: 154.917679, total loss: 0.167252\n",
      "step 1600, entropy loss: 0.112664, l2_loss: 156.759323, total loss: 0.123637\n",
      "step 1700, entropy loss: 0.077832, l2_loss: 158.571625, total loss: 0.088932\n",
      "step 1800, entropy loss: 0.141274, l2_loss: 160.227203, total loss: 0.152490\n",
      "step 1900, entropy loss: 0.063546, l2_loss: 161.860062, total loss: 0.074876\n",
      "step 2000, entropy loss: 0.068466, l2_loss: 163.400101, total loss: 0.079904\n",
      "step 2100, entropy loss: 0.090096, l2_loss: 164.877625, total loss: 0.101638\n",
      "step 2200, entropy loss: 0.099454, l2_loss: 166.340469, total loss: 0.111098\n",
      "step 2300, entropy loss: 0.114023, l2_loss: 167.725143, total loss: 0.125764\n",
      "step 2400, entropy loss: 0.084494, l2_loss: 169.197769, total loss: 0.096337\n",
      "step 2500, entropy loss: 0.077277, l2_loss: 170.517014, total loss: 0.089213\n",
      "step 2600, entropy loss: 0.085327, l2_loss: 171.764420, total loss: 0.097351\n",
      "step 2700, entropy loss: 0.062839, l2_loss: 172.995148, total loss: 0.074949\n",
      "step 2800, entropy loss: 0.105463, l2_loss: 174.282288, total loss: 0.117663\n",
      "step 2900, entropy loss: 0.078577, l2_loss: 175.486633, total loss: 0.090861\n",
      "step 3000, entropy loss: 0.100315, l2_loss: 176.581497, total loss: 0.112676\n",
      "step 3100, entropy loss: 0.069298, l2_loss: 177.798904, total loss: 0.081744\n",
      "step 3200, entropy loss: 0.035971, l2_loss: 178.896301, total loss: 0.048494\n",
      "step 3300, entropy loss: 0.033365, l2_loss: 179.959351, total loss: 0.045962\n",
      "step 3400, entropy loss: 0.051392, l2_loss: 181.064743, total loss: 0.064066\n",
      "step 3500, entropy loss: 0.043895, l2_loss: 182.164352, total loss: 0.056646\n",
      "step 3600, entropy loss: 0.071725, l2_loss: 183.173538, total loss: 0.084548\n",
      "step 3700, entropy loss: 0.134900, l2_loss: 184.196869, total loss: 0.147794\n",
      "step 3800, entropy loss: 0.076503, l2_loss: 185.163086, total loss: 0.089465\n",
      "step 3900, entropy loss: 0.048134, l2_loss: 186.125610, total loss: 0.061163\n",
      "step 4000, entropy loss: 0.037087, l2_loss: 187.093246, total loss: 0.050184\n",
      "step 4100, entropy loss: 0.183264, l2_loss: 187.993500, total loss: 0.196423\n",
      "step 4200, entropy loss: 0.094070, l2_loss: 188.886246, total loss: 0.107292\n",
      "step 4300, entropy loss: 0.095693, l2_loss: 189.751694, total loss: 0.108976\n",
      "step 4400, entropy loss: 0.015606, l2_loss: 190.581543, total loss: 0.028946\n",
      "step 4500, entropy loss: 0.025362, l2_loss: 191.357086, total loss: 0.038757\n",
      "step 4600, entropy loss: 0.092352, l2_loss: 192.207733, total loss: 0.105806\n",
      "step 4700, entropy loss: 0.032150, l2_loss: 193.006409, total loss: 0.045660\n",
      "step 4800, entropy loss: 0.045591, l2_loss: 193.847717, total loss: 0.059161\n",
      "step 4900, entropy loss: 0.030151, l2_loss: 194.767075, total loss: 0.043785\n",
      "step 5000, entropy loss: 0.027262, l2_loss: 195.501022, total loss: 0.040947\n",
      "Training Used 3357 sec.\n",
      "test accuracy is:  0.9833\n"
     ]
    }
   ],
   "source": [
    "#keep_prob 0.1\n",
    "lr = 0.1\n",
    "tf.reset_default_graph()\n",
    "train(lr,32,[5,5],5000)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "x_image shape =  (?, 28, 28, 1)\n",
      "====第一层卷积====\n",
      "采用卷积核： 5*5*32 \n",
      "激活函数：relu\n",
      "正则化因子：batch_norm ==> {'is_training': True, 'decay': 0.9, 'updates_collections': None}\n",
      "====第一层池化====\n",
      "采用最大值池化\n",
      "池化核 2 X 2\n",
      "步长采用默认值 2\n",
      "补齐采用默认值 VALID\n",
      "====第二层卷积====\n",
      "采用卷积核： 5*5*64\n",
      "其他参数同上层\n",
      "====第二层池化====\n",
      "参数和上层一样\n",
      "step 100, entropy loss: 1.013283, l2_loss: 123.228699, total loss: 1.021909\n",
      "step 200, entropy loss: 0.460989, l2_loss: 159.757828, total loss: 0.472172\n",
      "step 300, entropy loss: 0.267629, l2_loss: 186.343124, total loss: 0.280673\n",
      "step 400, entropy loss: 0.145122, l2_loss: 201.603775, total loss: 0.159234\n",
      "step 500, entropy loss: 0.284881, l2_loss: 213.914764, total loss: 0.299855\n",
      "step 600, entropy loss: 0.631478, l2_loss: 224.235611, total loss: 0.647175\n",
      "step 700, entropy loss: 0.153863, l2_loss: 233.174850, total loss: 0.170186\n",
      "step 800, entropy loss: 0.023899, l2_loss: 241.039764, total loss: 0.040772\n",
      "step 900, entropy loss: 0.043324, l2_loss: 247.288712, total loss: 0.060634\n",
      "step 1000, entropy loss: 0.203003, l2_loss: 253.491699, total loss: 0.220748\n",
      "step 1100, entropy loss: 0.040199, l2_loss: 258.581329, total loss: 0.058300\n",
      "step 1200, entropy loss: 0.135701, l2_loss: 264.114624, total loss: 0.154189\n",
      "step 1300, entropy loss: 0.039419, l2_loss: 268.957214, total loss: 0.058246\n",
      "step 1400, entropy loss: 0.105678, l2_loss: 273.202911, total loss: 0.124803\n",
      "step 1500, entropy loss: 0.031062, l2_loss: 277.645203, total loss: 0.050497\n",
      "step 1600, entropy loss: 0.109140, l2_loss: 282.148407, total loss: 0.128890\n",
      "step 1700, entropy loss: 0.056718, l2_loss: 285.857635, total loss: 0.076728\n",
      "step 1800, entropy loss: 0.057238, l2_loss: 289.707458, total loss: 0.077518\n",
      "step 1900, entropy loss: 0.024654, l2_loss: 293.234131, total loss: 0.045181\n",
      "step 2000, entropy loss: 0.019613, l2_loss: 296.603424, total loss: 0.040376\n",
      "step 2100, entropy loss: 0.012790, l2_loss: 299.567749, total loss: 0.033760\n",
      "step 2200, entropy loss: 0.064518, l2_loss: 302.140106, total loss: 0.085668\n",
      "step 2300, entropy loss: 0.030149, l2_loss: 305.183197, total loss: 0.051512\n",
      "step 2400, entropy loss: 0.056321, l2_loss: 307.405273, total loss: 0.077840\n",
      "step 2500, entropy loss: 0.055234, l2_loss: 310.064453, total loss: 0.076939\n",
      "step 2600, entropy loss: 0.086264, l2_loss: 312.699097, total loss: 0.108153\n",
      "step 2700, entropy loss: 0.077690, l2_loss: 315.100464, total loss: 0.099748\n",
      "step 2800, entropy loss: 0.013274, l2_loss: 317.524017, total loss: 0.035501\n",
      "step 2900, entropy loss: 0.035449, l2_loss: 319.397858, total loss: 0.057806\n",
      "step 3000, entropy loss: 0.095856, l2_loss: 321.858765, total loss: 0.118386\n",
      "step 3100, entropy loss: 0.020343, l2_loss: 324.612457, total loss: 0.043066\n",
      "step 3200, entropy loss: 0.052614, l2_loss: 326.346191, total loss: 0.075459\n",
      "step 3300, entropy loss: 0.096663, l2_loss: 328.446411, total loss: 0.119654\n",
      "step 3400, entropy loss: 0.015032, l2_loss: 330.362244, total loss: 0.038158\n",
      "step 3500, entropy loss: 0.005754, l2_loss: 331.851318, total loss: 0.028984\n",
      "step 3600, entropy loss: 0.049263, l2_loss: 333.565277, total loss: 0.072612\n",
      "step 3700, entropy loss: 0.042610, l2_loss: 335.605194, total loss: 0.066103\n",
      "step 3800, entropy loss: 0.019437, l2_loss: 336.788879, total loss: 0.043012\n",
      "step 3900, entropy loss: 0.010435, l2_loss: 338.109528, total loss: 0.034103\n",
      "step 4000, entropy loss: 0.022411, l2_loss: 339.492889, total loss: 0.046176\n",
      "Training Used 2267 sec.\n",
      "test accuracy is:  0.9847\n"
     ]
    }
   ],
   "source": [
    "#keep_prob 0.1\n",
    "lr = 0.5\n",
    "tf.reset_default_graph()\n",
    "train(lr,32,[5,5],4000)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "x_image shape =  (?, 28, 28, 1)\n",
      "====第一层卷积====\n",
      "采用卷积核： 5*5*32 \n",
      "激活函数：relu\n",
      "正则化因子：batch_norm ==> {'is_training': True, 'decay': 0.9, 'updates_collections': None}\n",
      "====第一层池化====\n",
      "采用最大值池化\n",
      "池化核 2 X 2\n",
      "步长采用默认值 2\n",
      "补齐采用默认值 VALID\n",
      "====第二层卷积====\n",
      "采用卷积核： 5*5*64\n",
      "其他参数同上层\n",
      "====第二层池化====\n",
      "参数和上层一样\n",
      "step 100, entropy loss: 0.973416, l2_loss: 122.457230, total loss: 0.981988\n",
      "step 200, entropy loss: 0.342297, l2_loss: 156.019180, total loss: 0.353218\n",
      "step 300, entropy loss: 0.219823, l2_loss: 179.431152, total loss: 0.232383\n",
      "step 400, entropy loss: 0.132986, l2_loss: 194.579498, total loss: 0.146607\n",
      "step 500, entropy loss: 0.124225, l2_loss: 203.504227, total loss: 0.138470\n",
      "step 600, entropy loss: 0.123420, l2_loss: 211.969528, total loss: 0.138258\n",
      "step 700, entropy loss: 0.049377, l2_loss: 219.144699, total loss: 0.064717\n",
      "step 800, entropy loss: 0.052906, l2_loss: 225.418365, total loss: 0.068686\n",
      "step 900, entropy loss: 0.078082, l2_loss: 231.007004, total loss: 0.094253\n",
      "step 1000, entropy loss: 0.098491, l2_loss: 236.310211, total loss: 0.115033\n",
      "step 1100, entropy loss: 0.098546, l2_loss: 241.236679, total loss: 0.115432\n",
      "step 1200, entropy loss: 0.108878, l2_loss: 246.197769, total loss: 0.126112\n",
      "step 1300, entropy loss: 0.033985, l2_loss: 250.187912, total loss: 0.051498\n",
      "step 1400, entropy loss: 0.087530, l2_loss: 254.505066, total loss: 0.105345\n",
      "step 1500, entropy loss: 0.038387, l2_loss: 257.781799, total loss: 0.056432\n",
      "step 1600, entropy loss: 0.044288, l2_loss: 261.507385, total loss: 0.062593\n",
      "step 1700, entropy loss: 0.026966, l2_loss: 264.569397, total loss: 0.045486\n",
      "step 1800, entropy loss: 0.035447, l2_loss: 267.299194, total loss: 0.054158\n",
      "step 1900, entropy loss: 0.038862, l2_loss: 270.125580, total loss: 0.057770\n",
      "step 2000, entropy loss: 0.025353, l2_loss: 272.655212, total loss: 0.044439\n",
      "step 2100, entropy loss: 0.086235, l2_loss: 278.281342, total loss: 0.105715\n",
      "step 2200, entropy loss: 0.014740, l2_loss: 280.795929, total loss: 0.034396\n",
      "step 2300, entropy loss: 0.032149, l2_loss: 283.141235, total loss: 0.051969\n",
      "step 2400, entropy loss: 0.025496, l2_loss: 285.551758, total loss: 0.045484\n",
      "step 2500, entropy loss: 0.081605, l2_loss: 287.940735, total loss: 0.101761\n",
      "step 2600, entropy loss: 0.020494, l2_loss: 289.906525, total loss: 0.040787\n",
      "step 2700, entropy loss: 0.013477, l2_loss: 292.002228, total loss: 0.033917\n",
      "step 2800, entropy loss: 0.081811, l2_loss: 293.568970, total loss: 0.102361\n",
      "step 2900, entropy loss: 0.037346, l2_loss: 295.176331, total loss: 0.058008\n",
      "step 3000, entropy loss: 0.045998, l2_loss: 296.962891, total loss: 0.066786\n",
      "step 3100, entropy loss: 0.034177, l2_loss: 298.738159, total loss: 0.055089\n",
      "step 3200, entropy loss: 0.002867, l2_loss: 299.856049, total loss: 0.023857\n",
      "step 3300, entropy loss: 0.018391, l2_loss: 301.737213, total loss: 0.039513\n",
      "step 3400, entropy loss: 0.024621, l2_loss: 303.094940, total loss: 0.045838\n",
      "step 3500, entropy loss: 0.034424, l2_loss: 304.412994, total loss: 0.055733\n",
      "step 3600, entropy loss: 0.014826, l2_loss: 305.624207, total loss: 0.036220\n",
      "step 3700, entropy loss: 0.027288, l2_loss: 306.976807, total loss: 0.048776\n",
      "step 3800, entropy loss: 0.012000, l2_loss: 308.515045, total loss: 0.033596\n",
      "step 3900, entropy loss: 0.031094, l2_loss: 309.521667, total loss: 0.052760\n",
      "step 4000, entropy loss: 0.013483, l2_loss: 310.691589, total loss: 0.035231\n",
      "Training Used 2454 sec.\n",
      "test accuracy is:  0.9905\n"
     ]
    }
   ],
   "source": [
    "#keep_prob 1.0\n",
    "lr = 0.5\n",
    "tf.reset_default_graph()\n",
    "train(lr,32,[5,5],4000)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 59,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "x_image shape =  (?, 28, 28, 1)\n",
      "====第一层卷积====\n",
      "采用卷积核： 9*9*32 \n",
      "激活函数：relu\n",
      "正则化因子：batch_norm ==> {'is_training': True, 'decay': 0.9, 'updates_collections': None}\n",
      "====第一层池化====\n",
      "采用最大值池化\n",
      "池化核 2 X 2\n",
      "步长采用默认值 2\n",
      "补齐采用默认值 VALID\n",
      "====第二层卷积====\n",
      "采用卷积核： 9*9*64\n",
      "其他参数同上层\n",
      "====第二层池化====\n",
      "参数和上层一样\n",
      "step 100, entropy loss: 0.313116, l2_loss: 127.227898, total loss: 0.322022\n",
      "step 200, entropy loss: 0.285770, l2_loss: 142.672302, total loss: 0.295757\n",
      "step 300, entropy loss: 0.110500, l2_loss: 152.439316, total loss: 0.121171\n",
      "step 400, entropy loss: 0.119805, l2_loss: 160.329071, total loss: 0.131028\n",
      "step 500, entropy loss: 0.129280, l2_loss: 166.929977, total loss: 0.140965\n",
      "step 600, entropy loss: 0.032490, l2_loss: 172.700531, total loss: 0.044579\n",
      "step 700, entropy loss: 0.049235, l2_loss: 177.459381, total loss: 0.061657\n",
      "step 800, entropy loss: 0.058724, l2_loss: 181.755753, total loss: 0.071447\n",
      "step 900, entropy loss: 0.069609, l2_loss: 185.812592, total loss: 0.082616\n",
      "step 1000, entropy loss: 0.036488, l2_loss: 189.482788, total loss: 0.049752\n",
      "step 1100, entropy loss: 0.030538, l2_loss: 192.950867, total loss: 0.044044\n",
      "step 1200, entropy loss: 0.145030, l2_loss: 195.964767, total loss: 0.158748\n",
      "step 1300, entropy loss: 0.027165, l2_loss: 198.755905, total loss: 0.041078\n",
      "step 1400, entropy loss: 0.012684, l2_loss: 201.260925, total loss: 0.026772\n",
      "step 1500, entropy loss: 0.039725, l2_loss: 204.000793, total loss: 0.054005\n",
      "step 1600, entropy loss: 0.047379, l2_loss: 206.363647, total loss: 0.061824\n",
      "step 1700, entropy loss: 0.022362, l2_loss: 209.095963, total loss: 0.036999\n",
      "step 1800, entropy loss: 0.021627, l2_loss: 211.420120, total loss: 0.036426\n",
      "step 1900, entropy loss: 0.019034, l2_loss: 213.088516, total loss: 0.033950\n",
      "step 2000, entropy loss: 0.025358, l2_loss: 215.036179, total loss: 0.040410\n",
      "step 2100, entropy loss: 0.015295, l2_loss: 216.978653, total loss: 0.030484\n",
      "step 2200, entropy loss: 0.027211, l2_loss: 218.850861, total loss: 0.042531\n",
      "step 2300, entropy loss: 0.041338, l2_loss: 220.415466, total loss: 0.056767\n",
      "step 2400, entropy loss: 0.024608, l2_loss: 221.958664, total loss: 0.040145\n",
      "step 2500, entropy loss: 0.083373, l2_loss: 223.182098, total loss: 0.098996\n",
      "step 2600, entropy loss: 0.040252, l2_loss: 224.956940, total loss: 0.055999\n",
      "step 2700, entropy loss: 0.012741, l2_loss: 225.919098, total loss: 0.028556\n",
      "step 2800, entropy loss: 0.039309, l2_loss: 227.366852, total loss: 0.055224\n",
      "step 2900, entropy loss: 0.011761, l2_loss: 228.207214, total loss: 0.027735\n",
      "step 3000, entropy loss: 0.007347, l2_loss: 229.118271, total loss: 0.023385\n",
      "step 3100, entropy loss: 0.023591, l2_loss: 230.463303, total loss: 0.039724\n",
      "step 3200, entropy loss: 0.011839, l2_loss: 231.802414, total loss: 0.028065\n",
      "step 3300, entropy loss: 0.011939, l2_loss: 232.804199, total loss: 0.028235\n",
      "step 3400, entropy loss: 0.016642, l2_loss: 233.800247, total loss: 0.033008\n",
      "step 3500, entropy loss: 0.011124, l2_loss: 234.423508, total loss: 0.027534\n",
      "step 3600, entropy loss: 0.017732, l2_loss: 235.508270, total loss: 0.034218\n",
      "step 3700, entropy loss: 0.019585, l2_loss: 236.252548, total loss: 0.036123\n",
      "step 3800, entropy loss: 0.006240, l2_loss: 237.472626, total loss: 0.022863\n",
      "step 3900, entropy loss: 0.010803, l2_loss: 238.279556, total loss: 0.027483\n",
      "step 4000, entropy loss: 0.018090, l2_loss: 238.933807, total loss: 0.034815\n",
      "Training Used 2978 sec.\n",
      "test accuracy is:  0.9917\n"
     ]
    }
   ],
   "source": [
    "#keep_prob 1.0, 改变 kernel size 为 9*9，数量不变\n",
    "lr = 0.5\n",
    "tf.reset_default_graph()\n",
    "train(lr,32,[9,9],4000)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 60,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "x_image shape =  (?, 28, 28, 1)\n",
      "====第一层卷积====\n",
      "采用卷积核： 9*9*16 \n",
      "激活函数：relu\n",
      "正则化因子：batch_norm ==> {'is_training': True, 'decay': 0.9, 'updates_collections': None}\n",
      "====第一层池化====\n",
      "采用最大值池化\n",
      "池化核 2 X 2\n",
      "步长采用默认值 2\n",
      "补齐采用默认值 VALID\n",
      "====第二层卷积====\n",
      "采用卷积核： 9*9*32\n",
      "其他参数同上层\n",
      "====第二层池化====\n",
      "参数和上层一样\n",
      "step 100, entropy loss: 0.382905, l2_loss: 88.393387, total loss: 0.389093\n",
      "step 200, entropy loss: 0.120477, l2_loss: 107.900810, total loss: 0.128030\n",
      "step 300, entropy loss: 0.094607, l2_loss: 119.314957, total loss: 0.102959\n",
      "step 400, entropy loss: 0.035635, l2_loss: 128.799408, total loss: 0.044651\n",
      "step 500, entropy loss: 0.203909, l2_loss: 136.336105, total loss: 0.213452\n",
      "step 600, entropy loss: 0.086409, l2_loss: 142.961472, total loss: 0.096416\n",
      "step 700, entropy loss: 0.172048, l2_loss: 148.686295, total loss: 0.182456\n",
      "step 800, entropy loss: 0.049744, l2_loss: 153.685547, total loss: 0.060502\n",
      "step 900, entropy loss: 0.060055, l2_loss: 158.183731, total loss: 0.071128\n",
      "step 1000, entropy loss: 0.045502, l2_loss: 162.412811, total loss: 0.056871\n",
      "step 1100, entropy loss: 0.045231, l2_loss: 166.468063, total loss: 0.056884\n",
      "step 1200, entropy loss: 0.059737, l2_loss: 169.917633, total loss: 0.071631\n",
      "step 1300, entropy loss: 0.066080, l2_loss: 173.705795, total loss: 0.078239\n",
      "step 1400, entropy loss: 0.065566, l2_loss: 177.541565, total loss: 0.077994\n",
      "step 1500, entropy loss: 0.057269, l2_loss: 181.223221, total loss: 0.069955\n",
      "step 1600, entropy loss: 0.029130, l2_loss: 184.136688, total loss: 0.042020\n",
      "step 1700, entropy loss: 0.022876, l2_loss: 187.081070, total loss: 0.035971\n",
      "step 1800, entropy loss: 0.012719, l2_loss: 189.668396, total loss: 0.025995\n",
      "step 1900, entropy loss: 0.022216, l2_loss: 192.500000, total loss: 0.035691\n",
      "step 2000, entropy loss: 0.019767, l2_loss: 195.318588, total loss: 0.033439\n",
      "step 2100, entropy loss: 0.064261, l2_loss: 197.463669, total loss: 0.078083\n",
      "step 2200, entropy loss: 0.009199, l2_loss: 199.388580, total loss: 0.023157\n",
      "step 2300, entropy loss: 0.107499, l2_loss: 201.535446, total loss: 0.121607\n",
      "step 2400, entropy loss: 0.022729, l2_loss: 203.813553, total loss: 0.036996\n",
      "step 2500, entropy loss: 0.061770, l2_loss: 205.886444, total loss: 0.076182\n",
      "step 2600, entropy loss: 0.049649, l2_loss: 208.035492, total loss: 0.064211\n",
      "step 2700, entropy loss: 0.043247, l2_loss: 209.752701, total loss: 0.057930\n",
      "step 2800, entropy loss: 0.010571, l2_loss: 211.993561, total loss: 0.025410\n",
      "step 2900, entropy loss: 0.011819, l2_loss: 214.061249, total loss: 0.026803\n",
      "step 3000, entropy loss: 0.010811, l2_loss: 215.464294, total loss: 0.025894\n",
      "step 3100, entropy loss: 0.007489, l2_loss: 216.748001, total loss: 0.022662\n",
      "step 3200, entropy loss: 0.041357, l2_loss: 218.157196, total loss: 0.056628\n",
      "step 3300, entropy loss: 0.043333, l2_loss: 219.478516, total loss: 0.058696\n",
      "step 3400, entropy loss: 0.010250, l2_loss: 220.997421, total loss: 0.025719\n",
      "step 3500, entropy loss: 0.059390, l2_loss: 222.406723, total loss: 0.074958\n",
      "step 3600, entropy loss: 0.042112, l2_loss: 223.905365, total loss: 0.057785\n",
      "step 3700, entropy loss: 0.050109, l2_loss: 225.232071, total loss: 0.065875\n",
      "step 3800, entropy loss: 0.015549, l2_loss: 226.804504, total loss: 0.031426\n",
      "step 3900, entropy loss: 0.030135, l2_loss: 228.364487, total loss: 0.046121\n",
      "step 4000, entropy loss: 0.007492, l2_loss: 229.507141, total loss: 0.023557\n",
      "Training Used 2358 sec.\n",
      "test accuracy is:  0.9912\n"
     ]
    }
   ],
   "source": [
    "#keep_prob 1.0, 改变 kernel 数量 16 ,size 不变\n",
    "lr = 0.5\n",
    "tf.reset_default_graph()\n",
    "train(lr,16,[9,9],4000)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 61,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "x_image shape =  (?, 28, 28, 1)\n",
      "====第一层卷积====\n",
      "采用卷积核： 5*5*64 \n",
      "激活函数：relu\n",
      "正则化因子：batch_norm ==> {'is_training': True, 'decay': 0.9, 'updates_collections': None}\n",
      "====第一层池化====\n",
      "采用最大值池化\n",
      "池化核 2 X 2\n",
      "步长采用默认值 2\n",
      "补齐采用默认值 VALID\n",
      "====第二层卷积====\n",
      "采用卷积核： 5*5*128\n",
      "其他参数同上层\n",
      "====第二层池化====\n",
      "参数和上层一样\n",
      "step 100, entropy loss: 1.334015, l2_loss: 195.789673, total loss: 1.347721\n",
      "step 200, entropy loss: 0.525501, l2_loss: 232.647430, total loss: 0.541786\n",
      "step 300, entropy loss: 0.325427, l2_loss: 262.708893, total loss: 0.343817\n",
      "step 400, entropy loss: 0.224845, l2_loss: 277.984863, total loss: 0.244304\n",
      "step 500, entropy loss: 0.293779, l2_loss: 288.531738, total loss: 0.313976\n",
      "step 600, entropy loss: 0.129326, l2_loss: 296.620514, total loss: 0.150089\n",
      "step 700, entropy loss: 0.161666, l2_loss: 304.424561, total loss: 0.182976\n",
      "step 800, entropy loss: 0.076524, l2_loss: 310.002991, total loss: 0.098224\n",
      "step 900, entropy loss: 0.112227, l2_loss: 315.013367, total loss: 0.134278\n",
      "step 1000, entropy loss: 0.134561, l2_loss: 319.202209, total loss: 0.156905\n",
      "step 1100, entropy loss: 0.048484, l2_loss: 324.907349, total loss: 0.071227\n",
      "step 1200, entropy loss: 0.024790, l2_loss: 328.747955, total loss: 0.047802\n",
      "step 1300, entropy loss: 0.083130, l2_loss: 331.924500, total loss: 0.106365\n",
      "step 1400, entropy loss: 0.105634, l2_loss: 334.766937, total loss: 0.129068\n",
      "step 1500, entropy loss: 0.021661, l2_loss: 337.356659, total loss: 0.045276\n",
      "step 1600, entropy loss: 0.023057, l2_loss: 340.309418, total loss: 0.046879\n",
      "step 1700, entropy loss: 0.030547, l2_loss: 342.771851, total loss: 0.054541\n",
      "step 1800, entropy loss: 0.050077, l2_loss: 345.255829, total loss: 0.074244\n",
      "step 1900, entropy loss: 0.030109, l2_loss: 347.807159, total loss: 0.054455\n",
      "step 2000, entropy loss: 0.084887, l2_loss: 349.497192, total loss: 0.109352\n",
      "step 2100, entropy loss: 0.033677, l2_loss: 350.814362, total loss: 0.058234\n",
      "step 2200, entropy loss: 0.095981, l2_loss: 353.074280, total loss: 0.120696\n",
      "step 2300, entropy loss: 0.010577, l2_loss: 354.985870, total loss: 0.035426\n",
      "step 2400, entropy loss: 0.063107, l2_loss: 356.233734, total loss: 0.088044\n",
      "step 2500, entropy loss: 0.020161, l2_loss: 357.235718, total loss: 0.045167\n",
      "step 2600, entropy loss: 0.025951, l2_loss: 358.848206, total loss: 0.051070\n",
      "step 2700, entropy loss: 0.024542, l2_loss: 360.041565, total loss: 0.049745\n",
      "step 2800, entropy loss: 0.013642, l2_loss: 361.059235, total loss: 0.038917\n",
      "step 2900, entropy loss: 0.012662, l2_loss: 363.271393, total loss: 0.038091\n",
      "step 3000, entropy loss: 0.003240, l2_loss: 364.887817, total loss: 0.028782\n",
      "step 3100, entropy loss: 0.008838, l2_loss: 366.073059, total loss: 0.034463\n",
      "step 3200, entropy loss: 0.004608, l2_loss: 366.708313, total loss: 0.030277\n",
      "step 3300, entropy loss: 0.037637, l2_loss: 367.350525, total loss: 0.063352\n",
      "step 3400, entropy loss: 0.028381, l2_loss: 368.183960, total loss: 0.054154\n",
      "step 3500, entropy loss: 0.022866, l2_loss: 369.048340, total loss: 0.048699\n",
      "step 3600, entropy loss: 0.037047, l2_loss: 369.983765, total loss: 0.062946\n",
      "step 3700, entropy loss: 0.007999, l2_loss: 370.812622, total loss: 0.033956\n",
      "step 3800, entropy loss: 0.028683, l2_loss: 371.035156, total loss: 0.054656\n",
      "step 3900, entropy loss: 0.072247, l2_loss: 371.769562, total loss: 0.098271\n",
      "step 4000, entropy loss: 0.006490, l2_loss: 372.414673, total loss: 0.032559\n",
      "Training Used 3359 sec.\n",
      "test accuracy is:  0.9895\n"
     ]
    }
   ],
   "source": [
    "#keep_prob 1.0, 改变 kernel 数量 64 ,size 5*5\n",
    "lr = 0.5\n",
    "tf.reset_default_graph()\n",
    "train(lr,64,[5,5],4000)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "作业小节：\n",
    "最终结果：\n",
    "    *1. 4层神经网络\n",
    "    *2. 学习率在0.5最好\n",
    "    *3. 卷积核 9*9*16\n",
    "    *4. Dropout的keep_prob取值越大,准确率越高\n",
    "    * 正确率为 0.9912\n",
    "    \n",
    "    * 学习率调试总结：\n",
    "    * 先后调试过（0.1 0.05 0.001 0.0001）,学习率越小模型训练越差，可能是梯度下降到一个小低谷，大的学习率可以跨过这个低谷，大学习率收敛的更快，训练的正确率更高。\n",
    "    \n",
    "    * 卷积核调试总结：\n",
    "    * 核数量一样，9*9和3*3的卷积核有更高的正确率，但是从训练时间上看时间更长，这应该9*9的核带来的运算量更大，学习的特征点更多。\n",
    "    * 9*9*16 和 9*9*32的卷积核，正确率都差不多，但考虑到模型的复杂度，选择9*9*16更合适\n",
    "    \n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
