{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "E:\\ProgramData\\Anaconda3\\lib\\site-packages\\h5py\\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n",
      "  from ._conv import register_converters as _register_converters\n",
      "Using TensorFlow backend.\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "'channels_last'"
      ]
     },
     "execution_count": 1,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import argparse\n",
    "import tensorflow as tf\n",
    "\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "from keras.objectives import categorical_crossentropy\n",
    "\n",
    "from keras.layers.core import Dense, Flatten\n",
    "from keras.layers.convolutional import Conv2D\n",
    "from keras.layers.pooling import MaxPooling2D\n",
    "from keras import initializers\n",
    "from keras import backend as K\n",
    "from keras.layers.normalization import BatchNormalization\n",
    "\n",
    "\n",
    "K.image_data_format() "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting /data/train-images-idx3-ubyte.gz\n",
      "Extracting /data/train-labels-idx1-ubyte.gz\n",
      "Extracting /data/t10k-images-idx3-ubyte.gz\n",
      "Extracting /data/t10k-labels-idx1-ubyte.gz\n"
     ]
    }
   ],
   "source": [
    "# Import data\n",
    "data_dir = '/data/'\n",
    "mnist = input_data.read_data_sets(data_dir, one_hot=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define loss and optimizer\n",
    "x = tf.placeholder(tf.float32, [None, 784])\n",
    "y_ = tf.placeholder(tf.float32, [None, 10])\n",
    "learning_rate = tf.placeholder(tf.float32)\n",
    "\n",
    "with tf.name_scope('reshape'):\n",
    "    x_image = tf.reshape(x, [-1, 28, 28, 1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "\"\\n# 设置参数 - 0\\n# 老师给出的参数\\nkernel_size_c1 = [5,5] # 第一个卷积层的kernel size\\nn_kernel_c1 = 32 # 第一个卷积层的kernel数目\\n\\npool_size_p1 = [2,2] # 第一个池化层的size\\n\\nkernel_size_c2 = [5,5] # 第二个卷积层的kernel size\\nn_kernel_c2 = 64 # 第二个卷积层的kernel数目\\n\\npool_size_p2 = [2,2] # 第二个池化层的size\\n\\nactivation = 'relu' # 卷积层激活函数\\n\\ndense_out = 1000 # 第一个dense层输出维度\\n\\nlr = 0.01 # 学习率\\nlambd = 7e-5 # 正则化参数\\n\\nsteps = 3000 # 训练次数\\n\\n# final result\\n# step 3000, entropy loss: 0.070404, l2_loss: 797.948853, total loss: 0.126260\\n# batch accuracy: 0.99\\n# test accuracy: 0.977\\n# 尝试调整卷积核初始化分布参数\\n\""
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''\n",
    "# 设置参数 - 0\n",
    "# 老师给出的参数\n",
    "kernel_size_c1 = [5,5] # 第一个卷积层的kernel size\n",
    "n_kernel_c1 = 32 # 第一个卷积层的kernel数目\n",
    "\n",
    "pool_size_p1 = [2,2] # 第一个池化层的size\n",
    "\n",
    "kernel_size_c2 = [5,5] # 第二个卷积层的kernel size\n",
    "n_kernel_c2 = 64 # 第二个卷积层的kernel数目\n",
    "\n",
    "pool_size_p2 = [2,2] # 第二个池化层的size\n",
    "\n",
    "activation = 'relu' # 卷积层激活函数\n",
    "\n",
    "dense_out = 1000 # 第一个dense层输出维度\n",
    "\n",
    "lr = 0.01 # 学习率\n",
    "lambd = 7e-5 # 正则化参数\n",
    "\n",
    "steps = 3000 # 训练次数\n",
    "\n",
    "# final result\n",
    "# step 3000, entropy loss: 0.070404, l2_loss: 797.948853, total loss: 0.126260\n",
    "# batch accuracy: 0.99\n",
    "# test accuracy: 0.977\n",
    "# 尝试调整卷积核初始化分布参数\n",
    "'''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "\"\\n# 设置参数 - 1\\n# 卷积核初始化设置了he_normal（msra）\\nkernel_size_c1 = [5,5] # 第一个卷积层的kernel size\\nn_kernel_c1 = 32 # 第一个卷积层的kernel数目\\n\\npool_size_p1 = [2,2] # 第一个池化层的size\\n\\nkernel_size_c2 = [5,5] # 第二个卷积层的kernel size\\nn_kernel_c2 = 64 # 第二个卷积层的kernel数目\\n\\npool_size_p2 = [2,2] # 第二个池化层的size\\n\\nactivation = 'relu' # 卷积层激活函数\\n\\ndense_out = 1000 # 第一个dense层输出维度\\n\\nlr = 0.01 # 学习率\\nlambd = 7e-5 # 正则化参数\\n\\nkernel_initializer = 'he_normal' # 卷积核初始化方式\\n#bias_initializer = 'random_normal' # 卷积层偏置初始化方式\\n\\nsteps = 3000 # 训练次数\\n\\n# final result\\n# step 3000, entropy loss: 0.031255, l2_loss: 848.309143, total loss: 0.090637\\n# batch accuracy: 1.0\\n# test accuracy: 0.9825\\n# 交叉熵loss减小了，l2 loss增大了，准确率有所提高。准备尝试调整kernel size，\\n# 同时由于环境限制（训练次数多了总死机，tinymind算到一半也总断开），减少训练次数为2400\\n\""
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''\n",
    "# 设置参数 - 1\n",
    "# 卷积核初始化设置了he_normal（msra）\n",
    "kernel_size_c1 = [5,5] # 第一个卷积层的kernel size\n",
    "n_kernel_c1 = 32 # 第一个卷积层的kernel数目\n",
    "\n",
    "pool_size_p1 = [2,2] # 第一个池化层的size\n",
    "\n",
    "kernel_size_c2 = [5,5] # 第二个卷积层的kernel size\n",
    "n_kernel_c2 = 64 # 第二个卷积层的kernel数目\n",
    "\n",
    "pool_size_p2 = [2,2] # 第二个池化层的size\n",
    "\n",
    "activation = 'relu' # 卷积层激活函数\n",
    "\n",
    "dense_out = 1000 # 第一个dense层输出维度\n",
    "\n",
    "lr = 0.01 # 学习率\n",
    "lambd = 7e-5 # 正则化参数\n",
    "\n",
    "kernel_initializer = 'he_normal' # 卷积核初始化方式\n",
    "#bias_initializer = 'random_normal' # 卷积层偏置初始化方式\n",
    "\n",
    "steps = 3000 # 训练次数\n",
    "\n",
    "# final result\n",
    "# step 3000, entropy loss: 0.031255, l2_loss: 848.309143, total loss: 0.090637\n",
    "# batch accuracy: 1.0\n",
    "# test accuracy: 0.9825\n",
    "# 交叉熵loss减小了，l2 loss增大了，准确率有所提高。准备尝试调整kernel size，\n",
    "# 同时由于环境限制（训练次数多了总死机，tinymind算到一半也总断开），减少训练次数为2400\n",
    "'''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "\"\\n# 设置参数 - 2\\n# 第一个卷积层kernel size 由[5,5]改为[4,4]\\nkernel_size_c1 = [4,4] # 第一个卷积层的kernel size\\nn_kernel_c1 = 32 # 第一个卷积层的kernel数目\\n\\npool_size_p1 = [2,2] # 第一个池化层的size\\n\\nkernel_size_c2 = [5,5] # 第二个卷积层的kernel size\\nn_kernel_c2 = 64 # 第二个卷积层的kernel数目\\n\\npool_size_p2 = [2,2] # 第二个池化层的size\\n\\nactivation = 'relu' # 卷积层激活函数\\n\\ndense_out = 1000 # 第一个dense层输出维度\\n\\nlr = 0.01 # 学习率\\nlambd = 7e-5 # 正则化参数\\n\\nkernel_initializer = 'he_normal' # 卷积核初始化方式\\n#bias_initializer = 'random_normal' # 卷积层偏置初始化方式\\n\\nsteps = 2400 # 训练次数\\n\\n# final result\\n# step 2400, entropy loss: 0.113354, l2_loss: 848.669250, total loss: 0.172761\\n# batch accuracy: 0.980000\\n# test accuracy: 0.980200\\n# 交叉熵loss 变大，l2 loss没有什么变化，准确率稍有降低，可能是迭代次数减少的缘故\\n\""
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''\n",
    "# 设置参数 - 2\n",
    "# 第一个卷积层kernel size 由[5,5]改为[4,4]\n",
    "kernel_size_c1 = [4,4] # 第一个卷积层的kernel size\n",
    "n_kernel_c1 = 32 # 第一个卷积层的kernel数目\n",
    "\n",
    "pool_size_p1 = [2,2] # 第一个池化层的size\n",
    "\n",
    "kernel_size_c2 = [5,5] # 第二个卷积层的kernel size\n",
    "n_kernel_c2 = 64 # 第二个卷积层的kernel数目\n",
    "\n",
    "pool_size_p2 = [2,2] # 第二个池化层的size\n",
    "\n",
    "activation = 'relu' # 卷积层激活函数\n",
    "\n",
    "dense_out = 1000 # 第一个dense层输出维度\n",
    "\n",
    "lr = 0.01 # 学习率\n",
    "lambd = 7e-5 # 正则化参数\n",
    "\n",
    "kernel_initializer = 'he_normal' # 卷积核初始化方式\n",
    "#bias_initializer = 'random_normal' # 卷积层偏置初始化方式\n",
    "\n",
    "steps = 2400 # 训练次数\n",
    "\n",
    "# final result\n",
    "# step 2400, entropy loss: 0.113354, l2_loss: 848.669250, total loss: 0.172761\n",
    "# batch accuracy: 0.980000\n",
    "# test accuracy: 0.980200\n",
    "# 交叉熵loss 变大，l2 loss没有什么变化，准确率稍有降低，可能是迭代次数减少的缘故\n",
    "'''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "\"\\n# 设置参数 - 3\\n# 第二个卷积层kernel size 由[5,5]改为[4,4]\\nkernel_size_c1 = [4,4] # 第一个卷积层的kernel size\\nn_kernel_c1 = 32 # 第一个卷积层的kernel数目\\n\\npool_size_p1 = [2,2] # 第一个池化层的size\\n\\nkernel_size_c2 = [4,4] # 第二个卷积层的kernel size\\nn_kernel_c2 = 64 # 第二个卷积层的kernel数目\\n\\npool_size_p2 = [2,2] # 第二个池化层的size\\n\\nactivation = 'relu' # 卷积层激活函数\\n\\ndense_out = 1000 # 第一个dense层输出维度\\n\\nlr = 0.01 # 学习率\\nlambd = 7e-5 # 正则化参数\\n\\nkernel_initializer = 'he_normal' # 卷积核初始化方式\\n#bias_initializer = 'random_normal' # 卷积层偏置初始化方式\\n\\nsteps = 2400 # 训练次数\\n\\n# final result\\n# step 2400, entropy loss: 0.087702, l2_loss: 850.670593, total loss: 0.147249\\n# batch accuracy: 0.980000\\n# test accuracy: 0.977500\\n# 交叉熵loss 更小，l2 loss稍大，准确率又低了点；loss上下浮动比较大，可能需要加入学习率衰减？先试试调整kernel数目吧\\n\""
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''\n",
    "# 设置参数 - 3\n",
    "# 第二个卷积层kernel size 由[5,5]改为[4,4]\n",
    "kernel_size_c1 = [4,4] # 第一个卷积层的kernel size\n",
    "n_kernel_c1 = 32 # 第一个卷积层的kernel数目\n",
    "\n",
    "pool_size_p1 = [2,2] # 第一个池化层的size\n",
    "\n",
    "kernel_size_c2 = [4,4] # 第二个卷积层的kernel size\n",
    "n_kernel_c2 = 64 # 第二个卷积层的kernel数目\n",
    "\n",
    "pool_size_p2 = [2,2] # 第二个池化层的size\n",
    "\n",
    "activation = 'relu' # 卷积层激活函数\n",
    "\n",
    "dense_out = 1000 # 第一个dense层输出维度\n",
    "\n",
    "lr = 0.01 # 学习率\n",
    "lambd = 7e-5 # 正则化参数\n",
    "\n",
    "kernel_initializer = 'he_normal' # 卷积核初始化方式\n",
    "#bias_initializer = 'random_normal' # 卷积层偏置初始化方式\n",
    "\n",
    "steps = 2400 # 训练次数\n",
    "\n",
    "# final result\n",
    "# step 2400, entropy loss: 0.087702, l2_loss: 850.670593, total loss: 0.147249\n",
    "# batch accuracy: 0.980000\n",
    "# test accuracy: 0.977500\n",
    "# 交叉熵loss 更小，l2 loss稍大，准确率又低了点；loss上下浮动比较大，可能需要加入学习率衰减？先试试调整kernel数目吧\n",
    "'''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "\"\\n# 设置参数 - 4\\n# 第一个卷积层kernel数目由32改为 64\\nkernel_size_c1 = [4,4] # 第一个卷积层的kernel size\\nn_kernel_c1 = 64 # 第一个卷积层的kernel数目\\n\\npool_size_p1 = [2,2] # 第一个池化层的size\\n\\nkernel_size_c2 = [4,4] # 第二个卷积层的kernel size\\nn_kernel_c2 = 64 # 第二个卷积层的kernel数目\\n\\npool_size_p2 = [2,2] # 第二个池化层的size\\n\\nactivation = 'relu' # 卷积层激活函数\\n\\ndense_out = 1000 # 第一个dense层输出维度\\n\\nlr = 0.01 # 学习率\\nlambd = 7e-5 # 正则化参数\\n\\nkernel_initializer = 'he_normal' # 卷积核初始化方式\\n#bias_initializer = 'random_normal' # 卷积层偏置初始化方式\\n\\nsteps = 2400 # 训练次数\\n\\n# final result\\n# step 2400, entropy loss: 0.045341, l2_loss: 873.644226, total loss: 0.106496\\n# batch accuracy: 1.0\\n# test accuracy: 0.9792\\n# 后两个epoch里的交叉熵loss在很小的范围内波动，l2 loss基本没什么变化，batch准确率基本在0.99-1之间；\\n# test准确率变高，但是相比起来还是kernel size更大的时候准确率更高些\\n\""
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''\n",
    "# 设置参数 - 4\n",
    "# 第一个卷积层kernel数目由32改为 64\n",
    "kernel_size_c1 = [4,4] # 第一个卷积层的kernel size\n",
    "n_kernel_c1 = 64 # 第一个卷积层的kernel数目\n",
    "\n",
    "pool_size_p1 = [2,2] # 第一个池化层的size\n",
    "\n",
    "kernel_size_c2 = [4,4] # 第二个卷积层的kernel size\n",
    "n_kernel_c2 = 64 # 第二个卷积层的kernel数目\n",
    "\n",
    "pool_size_p2 = [2,2] # 第二个池化层的size\n",
    "\n",
    "activation = 'relu' # 卷积层激活函数\n",
    "\n",
    "dense_out = 1000 # 第一个dense层输出维度\n",
    "\n",
    "lr = 0.01 # 学习率\n",
    "lambd = 7e-5 # 正则化参数\n",
    "\n",
    "kernel_initializer = 'he_normal' # 卷积核初始化方式\n",
    "#bias_initializer = 'random_normal' # 卷积层偏置初始化方式\n",
    "\n",
    "steps = 2400 # 训练次数\n",
    "\n",
    "# final result\n",
    "# step 2400, entropy loss: 0.045341, l2_loss: 873.644226, total loss: 0.106496\n",
    "# batch accuracy: 1.0\n",
    "# test accuracy: 0.9792\n",
    "# 后两个epoch里的交叉熵loss在很小的范围内波动，l2 loss基本没什么变化，batch准确率基本在0.99-1之间；\n",
    "# test准确率变高，但是相比起来还是kernel size更大的时候准确率更高些\n",
    "'''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "\"\\n# 设置参数 - 5\\n# 第二个卷积层kernel数目由64改为 32\\nkernel_size_c1 = [4,4] # 第一个卷积层的kernel size\\nn_kernel_c1 = 64 # 第一个卷积层的kernel数目\\n\\npool_size_p1 = [2,2] # 第一个池化层的size\\n\\nkernel_size_c2 = [4,4] # 第二个卷积层的kernel size\\nn_kernel_c2 = 32 # 第二个卷积层的kernel数目\\n\\npool_size_p2 = [2,2] # 第二个池化层的size\\n\\nactivation = 'relu' # 卷积层激活函数\\n\\ndense_out = 1000 # 第一个dense层输出维度\\n\\nlr = 0.01 # 学习率\\nlambd = 7e-5 # 正则化参数\\n\\nkernel_initializer = 'he_normal' # 卷积核初始化方式\\n#bias_initializer = 'random_normal' # 卷积层偏置初始化方式\\n\\nsteps = 2400 # 训练次数\\n\\n# final result\\n# step 2400, entropy loss: 0.028412, l2_loss: 699.221619, total loss: 0.077357\\n# batch accuracy: 1.0\\n# test accuracy: 0.9808\\n# loss 低了不少，准确率也高了些\\n\""
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''\n",
    "# 设置参数 - 5\n",
    "# 第二个卷积层kernel数目由64改为 32\n",
    "kernel_size_c1 = [4,4] # 第一个卷积层的kernel size\n",
    "n_kernel_c1 = 64 # 第一个卷积层的kernel数目\n",
    "\n",
    "pool_size_p1 = [2,2] # 第一个池化层的size\n",
    "\n",
    "kernel_size_c2 = [4,4] # 第二个卷积层的kernel size\n",
    "n_kernel_c2 = 32 # 第二个卷积层的kernel数目\n",
    "\n",
    "pool_size_p2 = [2,2] # 第二个池化层的size\n",
    "\n",
    "activation = 'relu' # 卷积层激活函数\n",
    "\n",
    "dense_out = 1000 # 第一个dense层输出维度\n",
    "\n",
    "lr = 0.01 # 学习率\n",
    "lambd = 7e-5 # 正则化参数\n",
    "\n",
    "kernel_initializer = 'he_normal' # 卷积核初始化方式\n",
    "#bias_initializer = 'random_normal' # 卷积层偏置初始化方式\n",
    "\n",
    "steps = 2400 # 训练次数\n",
    "\n",
    "# final result\n",
    "# step 2400, entropy loss: 0.028412, l2_loss: 699.221619, total loss: 0.077357\n",
    "# batch accuracy: 1.0\n",
    "# test accuracy: 0.9808\n",
    "# loss 低了不少，准确率也高了些\n",
    "'''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "\"\\n# 设置参数 - 6\\n# 学习率由0.01调整为0.1\\nkernel_size_c1 = [4,4] # 第一个卷积层的kernel size\\nn_kernel_c1 = 64 # 第一个卷积层的kernel数目\\n\\npool_size_p1 = [2,2] # 第一个池化层的size\\n\\nkernel_size_c2 = [4,4] # 第二个卷积层的kernel size\\nn_kernel_c2 = 32 # 第二个卷积层的kernel数目\\n\\npool_size_p2 = [2,2] # 第二个池化层的size\\n\\nactivation = 'relu' # 卷积层激活函数\\n\\ndense_out = 1000 # 第一个dense层输出维度\\n\\nlr = 0.1 # 学习率\\nlambd = 7e-5 # 正则化参数\\n\\nkernel_initializer = 'he_normal' # 卷积核初始化方式\\n#bias_initializer = 'random_normal' # 卷积层偏置初始化方式\\n\\nsteps = 2400 # 训练次数\\n\\n# final result\\n# step 2400, entropy loss: 0.011938, l2_loss: 694.598206, total loss: 0.060560\\n# batch accuracy: 1.0\\n# test accuracy: 0.992\\n# 收敛很快，第一个epoch就达到了98%的准确率；\\n# 之后loss就开始有较大的波动，有点overshooting的感觉（0.062409 - 0.149980 - 0.014855 - 0.126124 - 0.026414）\\n# 第三个epoch开始稳定在0.02 - 0.05之间了，batch accuracy 一直是100%\\n# 最后取得了目前为止的最高test准确率（0.9912）。batch一直100%拟合，不知道算不算过拟合了，尝试增大正则参数\\n\""
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''\n",
    "# 设置参数 - 6\n",
    "# 学习率由0.01调整为0.1\n",
    "kernel_size_c1 = [4,4] # 第一个卷积层的kernel size\n",
    "n_kernel_c1 = 64 # 第一个卷积层的kernel数目\n",
    "\n",
    "pool_size_p1 = [2,2] # 第一个池化层的size\n",
    "\n",
    "kernel_size_c2 = [4,4] # 第二个卷积层的kernel size\n",
    "n_kernel_c2 = 32 # 第二个卷积层的kernel数目\n",
    "\n",
    "pool_size_p2 = [2,2] # 第二个池化层的size\n",
    "\n",
    "activation = 'relu' # 卷积层激活函数\n",
    "\n",
    "dense_out = 1000 # 第一个dense层输出维度\n",
    "\n",
    "lr = 0.1 # 学习率\n",
    "lambd = 7e-5 # 正则化参数\n",
    "\n",
    "kernel_initializer = 'he_normal' # 卷积核初始化方式\n",
    "#bias_initializer = 'random_normal' # 卷积层偏置初始化方式\n",
    "\n",
    "steps = 2400 # 训练次数\n",
    "\n",
    "# final result\n",
    "# step 2400, entropy loss: 0.011938, l2_loss: 694.598206, total loss: 0.060560\n",
    "# batch accuracy: 1.0\n",
    "# test accuracy: 0.992\n",
    "# 收敛很快，第一个epoch就达到了98%的准确率；\n",
    "# 之后loss就开始有较大的波动，有点overshooting的感觉（0.062409 - 0.149980 - 0.014855 - 0.126124 - 0.026414）\n",
    "# 第三个epoch开始稳定在0.02 - 0.05之间了，batch accuracy 一直是100%\n",
    "# 最后取得了目前为止的最高test准确率（0.9912）。batch一直100%拟合，不知道算不算过拟合了，尝试增大正则参数\n",
    "'''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "\"\\n# 设置参数 - 7\\n# 正则参数由7e-5(0.00007)调整为 7e-4(0.0007)\\nkernel_size_c1 = [4,4] # 第一个卷积层的kernel size\\nn_kernel_c1 = 64 # 第一个卷积层的kernel数目\\n\\npool_size_p1 = [2,2] # 第一个池化层的size\\n\\nkernel_size_c2 = [4,4] # 第二个卷积层的kernel size\\nn_kernel_c2 = 32 # 第二个卷积层的kernel数目\\n\\npool_size_p2 = [2,2] # 第二个池化层的size\\n\\nactivation = 'relu' # 卷积层激活函数\\n\\ndense_out = 1000 # 第一个dense层输出维度\\n\\nlr = 0.1 # 学习率\\nlambd = 7e-4 # 正则化参数\\n\\nkernel_initializer = 'he_normal' # 卷积核初始化方式\\n#bias_initializer = 'random_normal' # 卷积层偏置初始化方式\\n\\nsteps = 3000 # 训练次数\\n\\n# final result\\n# step 3000, entropy loss: 0.016270, l2_loss: 485.613647, total loss: 0.356200\\n# batch accuracy: 1.0\\n# test accuracy: 0.9905\\n# 略微加大了对权重的限制，l2 loss 减小，total loss 增大，准确率下降了一点点（0.002），batch准确率依然在100%。\\n\""
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''\n",
    "# 设置参数 - 7\n",
    "# 正则参数由7e-5(0.00007)调整为 7e-4(0.0007)\n",
    "kernel_size_c1 = [4,4] # 第一个卷积层的kernel size\n",
    "n_kernel_c1 = 64 # 第一个卷积层的kernel数目\n",
    "\n",
    "pool_size_p1 = [2,2] # 第一个池化层的size\n",
    "\n",
    "kernel_size_c2 = [4,4] # 第二个卷积层的kernel size\n",
    "n_kernel_c2 = 32 # 第二个卷积层的kernel数目\n",
    "\n",
    "pool_size_p2 = [2,2] # 第二个池化层的size\n",
    "\n",
    "activation = 'relu' # 卷积层激活函数\n",
    "\n",
    "dense_out = 1000 # 第一个dense层输出维度\n",
    "\n",
    "lr = 0.1 # 学习率\n",
    "lambd = 7e-4 # 正则化参数\n",
    "\n",
    "kernel_initializer = 'he_normal' # 卷积核初始化方式\n",
    "#bias_initializer = 'random_normal' # 卷积层偏置初始化方式\n",
    "\n",
    "steps = 3000 # 训练次数\n",
    "\n",
    "# final result\n",
    "# step 3000, entropy loss: 0.016270, l2_loss: 485.613647, total loss: 0.356200\n",
    "# batch accuracy: 1.0\n",
    "# test accuracy: 0.9905\n",
    "# 略微加大了对权重的限制，l2 loss 减小，total loss 增大，准确率下降了一点点（0.002），batch准确率依然在100%。\n",
    "'''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "\"\\n# 设置参数 - 8\\n# 正则参数由7e-4(0.0007)调整为 7e-3(0.007)\\nkernel_size_c1 = [4,4] # 第一个卷积层的kernel size\\nn_kernel_c1 = 64 # 第一个卷积层的kernel数目\\n\\npool_size_p1 = [2,2] # 第一个池化层的size\\n\\nkernel_size_c2 = [4,4] # 第二个卷积层的kernel size\\nn_kernel_c2 = 32 # 第二个卷积层的kernel数目\\n\\npool_size_p2 = [2,2] # 第二个池化层的size\\n\\nactivation = 'relu' # 卷积层激活函数\\n\\ndense_out = 1000 # 第一个dense层输出维度\\n\\nlr = 0.1 # 学习率\\nlambd = 7e-3 # 正则化参数\\n\\nkernel_initializer = 'he_normal' # 卷积核初始化方式\\n#bias_initializer = 'random_normal' # 卷积层偏置初始化方式\\n\\nsteps = 3000 # 训练次数\\n\\n# final result\\n# step 3000, entropy loss: 0.077588, l2_loss: 29.955635, total loss: 0.287277\\n# batch accuracy: 1.0\\n# test accuracy: 0.9792\\n# l2 loss 缩减到了很小，准确率明显下降，batch准确率依然在100%，感觉这次数值可能有点过大了。\\n\""
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''\n",
    "# 设置参数 - 8\n",
    "# 正则参数由7e-4(0.0007)调整为 7e-3(0.007)\n",
    "kernel_size_c1 = [4,4] # 第一个卷积层的kernel size\n",
    "n_kernel_c1 = 64 # 第一个卷积层的kernel数目\n",
    "\n",
    "pool_size_p1 = [2,2] # 第一个池化层的size\n",
    "\n",
    "kernel_size_c2 = [4,4] # 第二个卷积层的kernel size\n",
    "n_kernel_c2 = 32 # 第二个卷积层的kernel数目\n",
    "\n",
    "pool_size_p2 = [2,2] # 第二个池化层的size\n",
    "\n",
    "activation = 'relu' # 卷积层激活函数\n",
    "\n",
    "dense_out = 1000 # 第一个dense层输出维度\n",
    "\n",
    "lr = 0.1 # 学习率\n",
    "lambd = 7e-3 # 正则化参数\n",
    "\n",
    "kernel_initializer = 'he_normal' # 卷积核初始化方式\n",
    "#bias_initializer = 'random_normal' # 卷积层偏置初始化方式\n",
    "\n",
    "steps = 3000 # 训练次数\n",
    "\n",
    "# final result\n",
    "# step 3000, entropy loss: 0.077588, l2_loss: 29.955635, total loss: 0.287277\n",
    "# batch accuracy: 1.0\n",
    "# test accuracy: 0.9792\n",
    "# l2 loss 缩减到了很小，准确率明显下降，batch准确率依然在100%，感觉这次数值可能有点过大了。\n",
    "'''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "\"\\n# 设置参数 - 9\\n# 正则参数由7e-3(0.007)恢复为 7e-4(0.0007)，尝试调大第一个卷积层的kernel size为[6,6]\\nkernel_size_c1 = [6,6] # 第一个卷积层的kernel size\\nn_kernel_c1 = 64 # 第一个卷积层的kernel数目\\n\\npool_size_p1 = [2,2] # 第一个池化层的size\\n\\nkernel_size_c2 = [4,4] # 第二个卷积层的kernel size\\nn_kernel_c2 = 32 # 第二个卷积层的kernel数目\\n\\npool_size_p2 = [2,2] # 第二个池化层的size\\n\\nactivation = 'relu' # 卷积层激活函数\\n\\ndense_out = 1000 # 第一个dense层输出维度\\n\\nlr = 0.1 # 学习率\\nlr_decay = 1 # 每个epoch的学习率衰减\\nlambd = 7e-4 # 正则化参数\\n\\nkernel_initializer = 'he_normal' # 卷积核初始化方式\\n#bias_initializer = 'random_normal' # 卷积层偏置初始化方式\\n\\nsteps = 3000 # 训练次数\\n\\n# final result\\n# step 3000, entropy loss: 0.025499, l2_loss: 482.746582, total loss: 0.363422\\n# batch accuracy: 1.0\\n# test accuracy: 0.9893\\n# 感觉结果跟之前没什么变化，交叉熵loss很快就维持在一个比较低的范围里了，后面都是l2 loss 在不断减小\\n\""
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''\n",
    "# 设置参数 - 9\n",
    "# 正则参数由7e-3(0.007)恢复为 7e-4(0.0007)，尝试调大第一个卷积层的kernel size为[6,6]\n",
    "kernel_size_c1 = [6,6] # 第一个卷积层的kernel size\n",
    "n_kernel_c1 = 64 # 第一个卷积层的kernel数目\n",
    "\n",
    "pool_size_p1 = [2,2] # 第一个池化层的size\n",
    "\n",
    "kernel_size_c2 = [4,4] # 第二个卷积层的kernel size\n",
    "n_kernel_c2 = 32 # 第二个卷积层的kernel数目\n",
    "\n",
    "pool_size_p2 = [2,2] # 第二个池化层的size\n",
    "\n",
    "activation = 'relu' # 卷积层激活函数\n",
    "\n",
    "dense_out = 1000 # 第一个dense层输出维度\n",
    "\n",
    "lr = 0.1 # 学习率\n",
    "lr_decay = 1 # 每个epoch的学习率衰减\n",
    "lambd = 7e-4 # 正则化参数\n",
    "\n",
    "kernel_initializer = 'he_normal' # 卷积核初始化方式\n",
    "#bias_initializer = 'random_normal' # 卷积层偏置初始化方式\n",
    "\n",
    "steps = 3000 # 训练次数\n",
    "\n",
    "# final result\n",
    "# step 3000, entropy loss: 0.025499, l2_loss: 482.746582, total loss: 0.363422\n",
    "# batch accuracy: 1.0\n",
    "# test accuracy: 0.9893\n",
    "# 感觉结果跟之前没什么变化，交叉熵loss很快就维持在一个比较低的范围里了，后面都是l2 loss 在不断减小\n",
    "'''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "\"\\n# 设置参数 - 10\\n# 尝试调大第二个卷积层的kernel size为[6,6]\\nkernel_size_c1 = [6,6] # 第一个卷积层的kernel size\\nn_kernel_c1 = 64 # 第一个卷积层的kernel数目\\n\\npool_size_p1 = [2,2] # 第一个池化层的size\\n\\nkernel_size_c2 = [6,6] # 第二个卷积层的kernel size\\nn_kernel_c2 = 32 # 第二个卷积层的kernel数目\\n\\npool_size_p2 = [2,2] # 第二个池化层的size\\n\\nactivation = 'relu' # 卷积层激活函数\\n\\ndense_out = 1000 # 第一个dense层输出维度\\n\\nlr = 0.1 # 学习率\\nlr_decay = 1 # 每个epoch的学习率衰减\\nlambd = 7e-4 # 正则化参数\\n\\nkernel_initializer = 'he_normal' # 卷积核初始化方式\\n#bias_initializer = 'random_normal' # 卷积层偏置初始化方式\\n\\nsteps = 3000 # 训练次数\\n\\n# final result\\n# step 3000, entropy loss: 0.009202, l2_loss: 485.285919, total loss: 0.348903\\n# batch accuracy: 1.0\\n# test accuracy: 0.9903\\n# 依然没什么明显变化...可能需要跑更多的epoch？？\\n\""
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''\n",
    "# 设置参数 - 10\n",
    "# 尝试调大第二个卷积层的kernel size为[6,6]\n",
    "kernel_size_c1 = [6,6] # 第一个卷积层的kernel size\n",
    "n_kernel_c1 = 64 # 第一个卷积层的kernel数目\n",
    "\n",
    "pool_size_p1 = [2,2] # 第一个池化层的size\n",
    "\n",
    "kernel_size_c2 = [6,6] # 第二个卷积层的kernel size\n",
    "n_kernel_c2 = 32 # 第二个卷积层的kernel数目\n",
    "\n",
    "pool_size_p2 = [2,2] # 第二个池化层的size\n",
    "\n",
    "activation = 'relu' # 卷积层激活函数\n",
    "\n",
    "dense_out = 1000 # 第一个dense层输出维度\n",
    "\n",
    "lr = 0.1 # 学习率\n",
    "lr_decay = 1 # 每个epoch的学习率衰减\n",
    "lambd = 7e-4 # 正则化参数\n",
    "\n",
    "kernel_initializer = 'he_normal' # 卷积核初始化方式\n",
    "#bias_initializer = 'random_normal' # 卷积层偏置初始化方式\n",
    "\n",
    "steps = 3000 # 训练次数\n",
    "\n",
    "# final result\n",
    "# step 3000, entropy loss: 0.009202, l2_loss: 485.285919, total loss: 0.348903\n",
    "# batch accuracy: 1.0\n",
    "# test accuracy: 0.9903\n",
    "# 依然没什么明显变化...可能需要跑更多的epoch？？\n",
    "'''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "\"\\n# 设置参数 - 11\\n# 尝试在每个卷积层和池化之间加入batch normalization，其他参数无变化\\nkernel_size_c1 = [6,6] # 第一个卷积层的kernel size\\nn_kernel_c1 = 64 # 第一个卷积层的kernel数目\\n\\npool_size_p1 = [2,2] # 第一个池化层的size\\n\\nkernel_size_c2 = [6,6] # 第二个卷积层的kernel size\\nn_kernel_c2 = 32 # 第二个卷积层的kernel数目\\n\\npool_size_p2 = [2,2] # 第二个池化层的size\\n\\nactivation = 'relu' # 卷积层激活函数\\n\\ndense_out = 1000 # 第一个dense层输出维度\\n\\nlr = 0.1 # 学习率\\nlr_decay = 1 # 每个epoch的学习率衰减\\nlambd = 7e-4 # 正则化参数\\n\\nkernel_initializer = 'he_normal' # 卷积核初始化方式\\n#bias_initializer = 'random_normal' # 卷积层偏置初始化方式\\n\\nsteps = 3000 # 训练次数\\n\\n# final result\\n# step 3000, entropy loss: 0.014939, l2_loss: 511.173553, total loss: 0.372761\\n# batch accuracy: 1.0\\n# test accuracy: 0.992\\n# 训练速度慢了很多，机器感觉快炸了。。收敛确实变快了一些，准确率也达到了99.2，试试把权重初始化换成xavier\\n\""
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''\n",
    "# 设置参数 - 11\n",
    "# 尝试在每个卷积层和池化之间加入batch normalization，其他参数无变化\n",
    "kernel_size_c1 = [6,6] # 第一个卷积层的kernel size\n",
    "n_kernel_c1 = 64 # 第一个卷积层的kernel数目\n",
    "\n",
    "pool_size_p1 = [2,2] # 第一个池化层的size\n",
    "\n",
    "kernel_size_c2 = [6,6] # 第二个卷积层的kernel size\n",
    "n_kernel_c2 = 32 # 第二个卷积层的kernel数目\n",
    "\n",
    "pool_size_p2 = [2,2] # 第二个池化层的size\n",
    "\n",
    "activation = 'relu' # 卷积层激活函数\n",
    "\n",
    "dense_out = 1000 # 第一个dense层输出维度\n",
    "\n",
    "lr = 0.1 # 学习率\n",
    "lr_decay = 1 # 每个epoch的学习率衰减\n",
    "lambd = 7e-4 # 正则化参数\n",
    "\n",
    "kernel_initializer = 'he_normal' # 卷积核初始化方式\n",
    "#bias_initializer = 'random_normal' # 卷积层偏置初始化方式\n",
    "\n",
    "steps = 3000 # 训练次数\n",
    "\n",
    "# final result\n",
    "# step 3000, entropy loss: 0.014939, l2_loss: 511.173553, total loss: 0.372761\n",
    "# batch accuracy: 1.0\n",
    "# test accuracy: 0.992\n",
    "# 训练速度慢了很多，机器感觉快炸了。。收敛确实变快了一些，准确率也达到了99.2，试试把权重初始化换成xavier\n",
    "'''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 设置参数 - 12\n",
    "# 权重初始化由msra调整为xavier\n",
    "kernel_size_c1 = [6,6] # 第一个卷积层的kernel size\n",
    "n_kernel_c1 = 64 # 第一个卷积层的kernel数目\n",
    "\n",
    "pool_size_p1 = [2,2] # 第一个池化层的size\n",
    "\n",
    "kernel_size_c2 = [6,6] # 第二个卷积层的kernel size\n",
    "n_kernel_c2 = 32 # 第二个卷积层的kernel数目\n",
    "\n",
    "pool_size_p2 = [2,2] # 第二个池化层的size\n",
    "\n",
    "activation = 'relu' # 卷积层激活函数\n",
    "\n",
    "dense_out = 1000 # 第一个dense层输出维度\n",
    "\n",
    "lr = 0.1 # 学习率\n",
    "lr_decay = 1 # 每个epoch的学习率衰减\n",
    "lambd = 7e-4 # 正则化参数\n",
    "\n",
    "kernel_initializer = 'glorot_normal' # 卷积核初始化方式\n",
    "#bias_initializer = 'random_normal' # 卷积层偏置初始化方式\n",
    "\n",
    "steps = 3000 # 训练次数\n",
    "\n",
    "# final result\n",
    "# step 3000, entropy loss: 0.014939, l2_loss: 511.173553, total loss: 0.372761\n",
    "# batch accuracy: 1.0\n",
    "# test accuracy: 0.992\n",
    "# 结果和msra初始化差的不是很多"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "# generate layers\n",
    "# c1\n",
    "net = Conv2D(n_kernel_c1, kernel_size = kernel_size_c1, strides=[1,1],\n",
    "                 activation = activation,\n",
    "                 kernel_initializer = kernel_initializer,\n",
    "                 #bias_initializer = bias_initializer,\n",
    "                 padding = 'same',\n",
    "                 input_shape = [28,28,1])(x_image)\n",
    "# bn1\n",
    "net = BatchNormalization(axis = 1)(net)\n",
    "\n",
    "# p1\n",
    "net = MaxPooling2D(pool_size = pool_size_p1)(net)\n",
    "\n",
    "# c2\n",
    "net = Conv2D(n_kernel_c2, kernel_size = kernel_size_c2, strides = [1,1],\n",
    "                 activation = activation,\n",
    "                 kernel_initializer = kernel_initializer,\n",
    "                 #bias_initializer = bias_initializer, \n",
    "                 padding = 'same')(net)\n",
    "\n",
    "# bn2\n",
    "net = BatchNormalization(axis = 1)(net)\n",
    "\n",
    "# p2\n",
    "net = MaxPooling2D(pool_size = pool_size_p2)(net)\n",
    "\n",
    "net = Flatten()(net)\n",
    "\n",
    "# fc1\n",
    "net = Dense(dense_out, activation = 'relu')(net)\n",
    "\n",
    "# fc2\n",
    "net = Dense(10,activation = 'softmax')(net)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "# loss calculation\n",
    "cross_entropy = tf.reduce_mean(categorical_crossentropy(y_, net))\n",
    "\n",
    "l2_loss = tf.add_n( [tf.nn.l2_loss(w) for w in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)] )\n",
    "\n",
    "total_loss = cross_entropy + lambd * l2_loss"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "# trainning step\n",
    "train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(total_loss)\n",
    "\n",
    "# Test trained model\n",
    "correct_prediction = tf.equal(tf.argmax(net, 1), tf.argmax(y_, 1))\n",
    "accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [],
   "source": [
    "# tf session\n",
    "sess = tf.Session()\n",
    "K.set_session(sess)\n",
    "\n",
    "# initialization\n",
    "init_op = tf.global_variables_initializer()\n",
    "sess.run(init_op)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "step 100, entropy loss: 0.353592, l2_loss: 678.174377, total loss: 0.828314\n",
      "batch accuracy: 0.93\n",
      "step 200, entropy loss: 0.110535, l2_loss: 671.479980, total loss: 0.580571\n",
      "batch accuracy: 1.0\n",
      "step 300, entropy loss: 0.190639, l2_loss: 664.103088, total loss: 0.655512\n",
      "batch accuracy: 0.96\n",
      "step 400, entropy loss: 0.105984, l2_loss: 656.313782, total loss: 0.565404\n",
      "batch accuracy: 1.0\n",
      "step 500, entropy loss: 0.036837, l2_loss: 648.576477, total loss: 0.490841\n",
      "batch accuracy: 1.0\n",
      "step 600, entropy loss: 0.071590, l2_loss: 640.744385, total loss: 0.520111\n",
      "batch accuracy: 1.0\n",
      "test accuracy: 0.982\n",
      "step 700, entropy loss: 0.071592, l2_loss: 633.002136, total loss: 0.514693\n",
      "batch accuracy: 0.99\n",
      "step 800, entropy loss: 0.041884, l2_loss: 625.314087, total loss: 0.479604\n",
      "batch accuracy: 0.99\n",
      "step 900, entropy loss: 0.032972, l2_loss: 617.558289, total loss: 0.465263\n",
      "batch accuracy: 1.0\n",
      "step 1000, entropy loss: 0.018616, l2_loss: 609.742493, total loss: 0.445435\n",
      "batch accuracy: 1.0\n",
      "step 1100, entropy loss: 0.069632, l2_loss: 602.120483, total loss: 0.491116\n",
      "batch accuracy: 1.0\n",
      "step 1200, entropy loss: 0.037281, l2_loss: 594.762878, total loss: 0.453615\n",
      "batch accuracy: 1.0\n",
      "test accuracy: 0.9877\n",
      "step 1300, entropy loss: 0.018322, l2_loss: 587.272217, total loss: 0.429413\n",
      "batch accuracy: 1.0\n",
      "step 1400, entropy loss: 0.052656, l2_loss: 579.817688, total loss: 0.458529\n",
      "batch accuracy: 1.0\n",
      "step 1500, entropy loss: 0.013929, l2_loss: 572.678406, total loss: 0.414804\n",
      "batch accuracy: 1.0\n",
      "step 1600, entropy loss: 0.044862, l2_loss: 565.462280, total loss: 0.440686\n",
      "batch accuracy: 1.0\n",
      "step 1700, entropy loss: 0.010824, l2_loss: 558.476807, total loss: 0.401758\n",
      "batch accuracy: 1.0\n",
      "step 1800, entropy loss: 0.028382, l2_loss: 551.417480, total loss: 0.414375\n",
      "batch accuracy: 1.0\n",
      "test accuracy: 0.9897\n",
      "step 1900, entropy loss: 0.022133, l2_loss: 544.503540, total loss: 0.403285\n",
      "batch accuracy: 1.0\n",
      "step 2000, entropy loss: 0.035450, l2_loss: 537.759888, total loss: 0.411882\n",
      "batch accuracy: 1.0\n",
      "step 2100, entropy loss: 0.057969, l2_loss: 530.796875, total loss: 0.429527\n",
      "batch accuracy: 1.0\n",
      "step 2200, entropy loss: 0.004815, l2_loss: 524.101440, total loss: 0.371686\n",
      "batch accuracy: 1.0\n",
      "step 2300, entropy loss: 0.005124, l2_loss: 517.582764, total loss: 0.367432\n",
      "batch accuracy: 1.0\n",
      "step 2400, entropy loss: 0.008036, l2_loss: 511.031921, total loss: 0.365758\n",
      "batch accuracy: 1.0\n",
      "test accuracy: 0.991\n",
      "step 2500, entropy loss: 0.005520, l2_loss: 504.571899, total loss: 0.358721\n",
      "batch accuracy: 1.0\n",
      "step 2600, entropy loss: 0.016952, l2_loss: 498.294800, total loss: 0.365759\n",
      "batch accuracy: 1.0\n",
      "step 2700, entropy loss: 0.020166, l2_loss: 491.974396, total loss: 0.364548\n",
      "batch accuracy: 1.0\n",
      "step 2800, entropy loss: 0.013944, l2_loss: 485.850647, total loss: 0.354039\n",
      "batch accuracy: 1.0\n",
      "step 2900, entropy loss: 0.002185, l2_loss: 479.718445, total loss: 0.337988\n",
      "batch accuracy: 1.0\n",
      "step 3000, entropy loss: 0.005521, l2_loss: 473.718170, total loss: 0.337124\n",
      "batch accuracy: 1.0\n",
      "test accuracy: 0.9917\n"
     ]
    }
   ],
   "source": [
    "# Train\n",
    "for step in range(steps):\n",
    "    batch_xs, batch_ys = mnist.train.next_batch(100)\n",
    "    _, loss, l2_loss_value, total_loss_value = sess.run(\n",
    "               [train_step, cross_entropy, l2_loss, total_loss], \n",
    "               feed_dict={x: batch_xs, y_: batch_ys, learning_rate:lr})\n",
    "    if (step + 1) % 100 == 0:\n",
    "        print('step %d, entropy loss: %f, l2_loss: %f, total loss: %f' % \n",
    "            (step + 1, loss, l2_loss_value, total_loss_value))\n",
    "        print('batch accuracy:', sess.run(accuracy, feed_dict = {x: batch_xs, y_: batch_ys}))\n",
    "    if (step + 1) % 600 == 0: # 每个epoch打印\n",
    "        #lr = lr * lr_decay\n",
    "        print('test accuracy:', sess.run(accuracy, feed_dict = {x: mnist.test.images,\n",
    "                                    y_: mnist.test.labels}))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
