{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\YuGo\\Anaconda3\\lib\\site-packages\\h5py\\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n",
      "  from ._conv import register_converters as _register_converters\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From <ipython-input-1-292d285a4f34>:4: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n",
      "WARNING:tensorflow:From C:\\Users\\YuGo\\Anaconda3\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please write your own downloading logic.\n",
      "WARNING:tensorflow:From C:\\Users\\YuGo\\Anaconda3\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.data to implement this functionality.\n",
      "Extracting /tmp/tensorflow/mnist/input_data\\train-images-idx3-ubyte.gz\n",
      "WARNING:tensorflow:From C:\\Users\\YuGo\\Anaconda3\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.data to implement this functionality.\n",
      "Extracting /tmp/tensorflow/mnist/input_data\\train-labels-idx1-ubyte.gz\n",
      "WARNING:tensorflow:From C:\\Users\\YuGo\\Anaconda3\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:110: dense_to_one_hot (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.one_hot on tensors.\n",
      "Extracting /tmp/tensorflow/mnist/input_data\\t10k-images-idx3-ubyte.gz\n",
      "Extracting /tmp/tensorflow/mnist/input_data\\t10k-labels-idx1-ubyte.gz\n",
      "WARNING:tensorflow:From C:\\Users\\YuGo\\Anaconda3\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:290: DataSet.__init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n"
     ]
    }
   ],
   "source": [
    "import tensorflow as tf\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "data_dir = '/tmp/tensorflow/mnist/input_data'\n",
    "mnist = input_data.read_data_sets(data_dir, one_hot=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "def weight(shape):\n",
    "    return tf.Variable(tf.truncated_normal(shape, stddev=0.1), name ='W')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "def bias(shape):\n",
    "    return tf.Variable(tf.zeros(shape), name = 'b')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "def conv2d(x, W):\n",
    "    return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "def max_pool_2x2(x, pool_size):\n",
    "    return tf.nn.max_pool(x, ksize = pool_size, strides=[1,2,2,1], padding='SAME')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "def convlayer(x_in, conv_size, pool_ksize):\n",
    "    #初始化参数\n",
    "    W = weight(conv_size)\n",
    "    b = bias([conv_size[3]])\n",
    "    Conv=conv2d(x_in, W)+ b\n",
    "    C_Conv = tf.nn.relu(Conv)\n",
    "    C_Pool = max_pool_2x2(C_Conv, pool_ksize)\n",
    "    \n",
    "    return C_Pool, W"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "def hidelayer(layer_in, layer_in_node, layer_out_node, drop_out):\n",
    "    W= weight([layer_in_node, layer_out_node])\n",
    "    b= bias([layer_out_node])\n",
    "    hide_layer = tf.nn.relu(tf.matmul(layer_in, W) + b)\n",
    "    hide_layer_Dropout= tf.nn.dropout(hide_layer, keep_prob = drop_out)\n",
    "   \n",
    "    return hide_layer_Dropout, W"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [],
   "source": [
    "def train(conv_ksize_list, pool_ksize, layer_node, drop_out, learning_rate, regularization_rate, training_steps, bath_size):\n",
    "    \n",
    "    x = tf.placeholder(\"float\",shape=[None, 784])    \n",
    "    x_image = tf.reshape(x, [-1, 28, 28, 1])\n",
    "    \n",
    "    Conv_in = x_image   \n",
    "    W_list = [] \n",
    "    #设置卷积层\n",
    "    for i in range(len(conv_ksize_list)):  \n",
    "        Conv_out, W = convlayer(Conv_in, conv_ksize_list[i], pool_ksize)     \n",
    "        W_list.append(W)   \n",
    "        Conv_in = Conv_out\n",
    "    \n",
    "    num = int(Conv_out.shape[1] * Conv_out.shape[2] * Conv_out.shape[3])\n",
    "    D_Flat = tf.reshape(Conv_out, [-1, num])#7*7*36=1764\n",
    "    \n",
    "    layer_in = D_Flat\n",
    "    layer_in_node = num\n",
    "    #设置全连接层\n",
    "    for i in range(len(layer_node)):\n",
    "        hide_layer_out, W = hidelayer(layer_in, layer_in_node, layer_node[i], drop_out)\n",
    "        layer_in = hide_layer_out\n",
    "        layer_in_node = layer_node[i]\n",
    "        W_list.append(W)\n",
    "\n",
    "    W6 = weight([layer_node[i],10])\n",
    "    b6 = bias([10])\n",
    "    y_predict= tf.nn.softmax(tf.matmul(hide_layer_out, W6)+b6)\n",
    "    W_list.append(W6)\n",
    "    \n",
    "    y_label = tf.placeholder(\"float\", shape=[None, 10], name=\"y_label\")\n",
    "    \n",
    "    cross_entropy_mean = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = y_predict ,labels = y_label))\n",
    "    \n",
    "    #正则化因子\n",
    "    regularizer = tf.contrib.layers.l2_regularizer(regularization_rate)\n",
    "    regularization = 0\n",
    "    for i in range(len(W_list)):\n",
    "        regularization = regularizer(W_list[i])\n",
    "    \n",
    "    loss_function = cross_entropy_mean + regularization\n",
    "    \n",
    "    optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss_function)\n",
    "    \n",
    "    correct_prediction = tf.equal(tf.argmax(y_predict, 1), tf.argmax(y_label, 1))\n",
    "    \n",
    "    accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n",
    "    \n",
    "    with tf.Session() as sess:\n",
    "        \n",
    "        tf.global_variables_initializer().run()\n",
    "\n",
    "        validate_data = {x: mnist.validation.images, y_label:mnist.validation.labels}\n",
    "\n",
    "        test_data = {x:mnist.test.images, y_label:mnist.test.labels}\n",
    "\n",
    "        for i in range(training_steps):\n",
    "            \n",
    "            if i%1000==0:\n",
    "                loss, validate_acc = sess.run([loss_function,accuracy], feed_dict = validate_data)\n",
    "                print(\"After %d training steps, loss is %f, validation accuracy using average model is %g\" %(i, loss, validate_acc))\n",
    "\n",
    "            xs, ys = mnist.train.next_batch(BATCH_SIZE)\n",
    "            sess.run(optimizer, feed_dict={x:xs, y_label:ys})\n",
    "\n",
    "        test_acc = sess.run(accuracy, feed_dict=test_data)\n",
    "        print(\"After %d training steps, loss is %f, test accuracy using average model is %g\" %(TRAINING_STEPS, loss, validate_acc))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 卷积核size为5x5，数量为3"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "After 0 training steps, loss is 2.354217, validation accuracy using average model is 0.1108\n",
      "After 1000 training steps, loss is 1.687906, validation accuracy using average model is 0.7858\n",
      "After 2000 training steps, loss is 1.501568, validation accuracy using average model is 0.9744\n",
      "After 3000 training steps, loss is 1.492647, validation accuracy using average model is 0.9818\n",
      "After 4000 training steps, loss is 1.486226, validation accuracy using average model is 0.9872\n",
      "After 5000 training steps, loss is 1.487042, validation accuracy using average model is 0.9842\n",
      "After 6000 training steps, loss is 1.483541, validation accuracy using average model is 0.9878\n",
      "After 7000 training steps, loss is 1.482023, validation accuracy using average model is 0.9884\n",
      "After 8000 training steps, loss is 1.479643, validation accuracy using average model is 0.99\n",
      "After 9000 training steps, loss is 1.479842, validation accuracy using average model is 0.989\n",
      "After 10000 training steps, loss is 1.478591, validation accuracy using average model is 0.9892\n",
      "After 11000 training steps, loss is 1.475982, validation accuracy using average model is 0.9916\n",
      "After 12000 training steps, loss is 1.475945, validation accuracy using average model is 0.9906\n",
      "After 13000 training steps, loss is 1.474205, validation accuracy using average model is 0.9922\n",
      "After 14000 training steps, loss is 1.474274, validation accuracy using average model is 0.9918\n",
      "After 15000 training steps, loss is 1.474419, validation accuracy using average model is 0.9906\n",
      "After 16000 training steps, loss is 1.473564, validation accuracy using average model is 0.9906\n",
      "After 17000 training steps, loss is 1.473068, validation accuracy using average model is 0.9912\n",
      "After 18000 training steps, loss is 1.471867, validation accuracy using average model is 0.992\n",
      "After 19000 training steps, loss is 1.471041, validation accuracy using average model is 0.9932\n",
      "After 20000 training steps, loss is 1.471321, validation accuracy using average model is 0.9918\n",
      "After 21000 training steps, loss is 1.471732, validation accuracy using average model is 0.992\n",
      "After 22000 training steps, loss is 1.470736, validation accuracy using average model is 0.992\n",
      "After 23000 training steps, loss is 1.471055, validation accuracy using average model is 0.9922\n",
      "After 24000 training steps, loss is 1.472383, validation accuracy using average model is 0.9902\n",
      "After 25000 training steps, loss is 1.470611, validation accuracy using average model is 0.9918\n",
      "After 26000 training steps, loss is 1.470031, validation accuracy using average model is 0.993\n",
      "After 27000 training steps, loss is 1.469817, validation accuracy using average model is 0.9926\n",
      "After 28000 training steps, loss is 1.469463, validation accuracy using average model is 0.993\n",
      "After 29000 training steps, loss is 1.468872, validation accuracy using average model is 0.9938\n",
      "After 30000 training steps, loss is 1.469870, validation accuracy using average model is 0.9926\n",
      "After 31000 training steps, loss is 1.470246, validation accuracy using average model is 0.992\n",
      "After 32000 training steps, loss is 1.471255, validation accuracy using average model is 0.991\n",
      "After 33000 training steps, loss is 1.469041, validation accuracy using average model is 0.9932\n",
      "After 34000 training steps, loss is 1.469861, validation accuracy using average model is 0.992\n",
      "After 35000 training steps, loss is 1.467875, validation accuracy using average model is 0.9948\n",
      "After 36000 training steps, loss is 1.470677, validation accuracy using average model is 0.9914\n",
      "After 37000 training steps, loss is 1.469807, validation accuracy using average model is 0.9918\n",
      "After 38000 training steps, loss is 1.469100, validation accuracy using average model is 0.9926\n",
      "After 39000 training steps, loss is 1.468440, validation accuracy using average model is 0.9932\n",
      "After 40000 training steps, loss is 1.471381, validation accuracy using average model is 0.9898\n",
      "After 41000 training steps, loss is 1.468963, validation accuracy using average model is 0.993\n",
      "After 42000 training steps, loss is 1.468819, validation accuracy using average model is 0.9926\n",
      "After 43000 training steps, loss is 1.469968, validation accuracy using average model is 0.9918\n",
      "After 44000 training steps, loss is 1.470862, validation accuracy using average model is 0.9912\n",
      "After 45000 training steps, loss is 1.469551, validation accuracy using average model is 0.9924\n",
      "After 46000 training steps, loss is 1.468355, validation accuracy using average model is 0.9938\n",
      "After 47000 training steps, loss is 1.467802, validation accuracy using average model is 0.9942\n",
      "After 48000 training steps, loss is 1.468264, validation accuracy using average model is 0.9934\n",
      "After 49000 training steps, loss is 1.469593, validation accuracy using average model is 0.9924\n",
      "After 50000 training steps, loss is 1.469593, test accuracy using average model is 0.9924\n"
     ]
    }
   ],
   "source": [
    "CONV_KSIZE_LIST = [[5,5,1,16],[5,5,16,32], [5,5,32,64]]\n",
    "POOL_KSIZE = [1,2,2,1]\n",
    "HIDE_LAYER_LIST = [1000,1000]\n",
    "DROP_OUT = 0.9\n",
    "REGULARIZATION_RATE = 0.0005\n",
    "LEARNING_RATE = 0.0001\n",
    "BATCH_SIZE = 100\n",
    "TRAINING_STEPS = 50000\n",
    "\n",
    "#卷积核size为5x5，数量为3\n",
    "train(CONV_KSIZE_LIST, POOL_KSIZE, HIDE_LAYER_List, DROP_OUT, LEARNING_RATE, REGULARIZATION_RATE, TRAINING_STEPS, BATCH_SIZE)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 卷积核size为3x3，数量为3"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "After 0 training steps, loss is 2.329642, validation accuracy using average model is 0.0926\n",
      "After 1000 training steps, loss is 1.604223, validation accuracy using average model is 0.8742\n",
      "After 2000 training steps, loss is 1.497591, validation accuracy using average model is 0.979\n",
      "After 3000 training steps, loss is 1.493467, validation accuracy using average model is 0.9822\n",
      "After 4000 training steps, loss is 1.491351, validation accuracy using average model is 0.9826\n",
      "After 5000 training steps, loss is 1.486376, validation accuracy using average model is 0.9872\n",
      "After 6000 training steps, loss is 1.483412, validation accuracy using average model is 0.9882\n",
      "After 7000 training steps, loss is 1.482341, validation accuracy using average model is 0.9888\n",
      "After 8000 training steps, loss is 1.482137, validation accuracy using average model is 0.988\n",
      "After 9000 training steps, loss is 1.481902, validation accuracy using average model is 0.9876\n",
      "After 10000 training steps, loss is 1.478789, validation accuracy using average model is 0.99\n",
      "After 11000 training steps, loss is 1.477385, validation accuracy using average model is 0.991\n",
      "After 12000 training steps, loss is 1.477367, validation accuracy using average model is 0.9896\n",
      "After 13000 training steps, loss is 1.476415, validation accuracy using average model is 0.9904\n",
      "After 14000 training steps, loss is 1.475293, validation accuracy using average model is 0.9912\n",
      "After 15000 training steps, loss is 1.474628, validation accuracy using average model is 0.9906\n",
      "After 16000 training steps, loss is 1.474523, validation accuracy using average model is 0.9912\n",
      "After 17000 training steps, loss is 1.474014, validation accuracy using average model is 0.992\n",
      "After 18000 training steps, loss is 1.473468, validation accuracy using average model is 0.9912\n",
      "After 19000 training steps, loss is 1.473423, validation accuracy using average model is 0.9906\n",
      "After 20000 training steps, loss is 1.472923, validation accuracy using average model is 0.9912\n",
      "After 21000 training steps, loss is 1.471853, validation accuracy using average model is 0.9926\n",
      "After 22000 training steps, loss is 1.471897, validation accuracy using average model is 0.9926\n",
      "After 23000 training steps, loss is 1.471043, validation accuracy using average model is 0.9924\n",
      "After 24000 training steps, loss is 1.471696, validation accuracy using average model is 0.9922\n",
      "After 25000 training steps, loss is 1.472875, validation accuracy using average model is 0.9906\n",
      "After 26000 training steps, loss is 1.471023, validation accuracy using average model is 0.9926\n",
      "After 27000 training steps, loss is 1.472754, validation accuracy using average model is 0.9906\n",
      "After 28000 training steps, loss is 1.470623, validation accuracy using average model is 0.992\n",
      "After 29000 training steps, loss is 1.470165, validation accuracy using average model is 0.9924\n",
      "After 30000 training steps, loss is 1.472263, validation accuracy using average model is 0.9908\n",
      "After 31000 training steps, loss is 1.469400, validation accuracy using average model is 0.9932\n",
      "After 32000 training steps, loss is 1.470827, validation accuracy using average model is 0.9916\n",
      "After 33000 training steps, loss is 1.470059, validation accuracy using average model is 0.9924\n",
      "After 34000 training steps, loss is 1.470927, validation accuracy using average model is 0.9918\n",
      "After 35000 training steps, loss is 1.470092, validation accuracy using average model is 0.9924\n",
      "After 36000 training steps, loss is 1.469853, validation accuracy using average model is 0.9924\n",
      "After 37000 training steps, loss is 1.469860, validation accuracy using average model is 0.9922\n",
      "After 38000 training steps, loss is 1.469303, validation accuracy using average model is 0.9932\n",
      "After 39000 training steps, loss is 1.468577, validation accuracy using average model is 0.9936\n",
      "After 40000 training steps, loss is 1.469008, validation accuracy using average model is 0.9932\n",
      "After 41000 training steps, loss is 1.468802, validation accuracy using average model is 0.9938\n",
      "After 42000 training steps, loss is 1.469234, validation accuracy using average model is 0.9922\n",
      "After 43000 training steps, loss is 1.468825, validation accuracy using average model is 0.993\n",
      "After 44000 training steps, loss is 1.471462, validation accuracy using average model is 0.9908\n",
      "After 45000 training steps, loss is 1.468320, validation accuracy using average model is 0.9938\n",
      "After 46000 training steps, loss is 1.469707, validation accuracy using average model is 0.9924\n",
      "After 47000 training steps, loss is 1.469992, validation accuracy using average model is 0.9916\n",
      "After 48000 training steps, loss is 1.471455, validation accuracy using average model is 0.9908\n",
      "After 49000 training steps, loss is 1.469260, validation accuracy using average model is 0.9926\n",
      "After 50000 training steps, loss is 1.469260, test accuracy using average model is 0.9926\n"
     ]
    }
   ],
   "source": [
    "CONV_KSIZE_LIST = [[3,3,1,16],[3,3,16,32],[3,3,32,64]]\n",
    "POOL_KSIZE = [1,2,2,1]\n",
    "HIDE_LAYER_LIST = [1000,1000]\n",
    "DROP_OUT = 0.9\n",
    "REGULARIZATION_RATE = 0.0005\n",
    "LEARNING_RATE = 0.0001\n",
    "BATCH_SIZE = 100\n",
    "TRAINING_STEPS = 50000\n",
    "\n",
    "#卷积核size为3x3，数量为3\n",
    "train(CONV_KSIZE_LIST, POOL_KSIZE, HIDE_LAYER_LIST, DROP_OUT, LEARNING_RATE, REGULARIZATION_RATE, TRAINING_STEPS, BATCH_SIZE)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 卷积核size为8x8，数量为3"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "After 0 training steps, loss is 2.382609, validation accuracy using average model is 0.0976\n",
      "After 1000 training steps, loss is 1.699841, validation accuracy using average model is 0.7752\n",
      "After 2000 training steps, loss is 1.606537, validation accuracy using average model is 0.867\n",
      "After 3000 training steps, loss is 1.505151, validation accuracy using average model is 0.9686\n",
      "After 4000 training steps, loss is 1.491459, validation accuracy using average model is 0.9822\n",
      "After 5000 training steps, loss is 1.491813, validation accuracy using average model is 0.9818\n",
      "After 6000 training steps, loss is 1.494109, validation accuracy using average model is 0.9774\n",
      "After 7000 training steps, loss is 1.487621, validation accuracy using average model is 0.9832\n",
      "After 8000 training steps, loss is 1.487856, validation accuracy using average model is 0.9824\n",
      "After 9000 training steps, loss is 1.485517, validation accuracy using average model is 0.9842\n",
      "After 10000 training steps, loss is 1.481724, validation accuracy using average model is 0.9876\n",
      "After 11000 training steps, loss is 1.480545, validation accuracy using average model is 0.988\n",
      "After 12000 training steps, loss is 1.482798, validation accuracy using average model is 0.9848\n",
      "After 13000 training steps, loss is 1.480777, validation accuracy using average model is 0.9868\n",
      "After 14000 training steps, loss is 1.479201, validation accuracy using average model is 0.9866\n",
      "After 15000 training steps, loss is 1.478624, validation accuracy using average model is 0.9876\n",
      "After 16000 training steps, loss is 1.477787, validation accuracy using average model is 0.9878\n",
      "After 17000 training steps, loss is 1.478337, validation accuracy using average model is 0.9866\n",
      "After 18000 training steps, loss is 1.476232, validation accuracy using average model is 0.9882\n",
      "After 19000 training steps, loss is 1.475495, validation accuracy using average model is 0.9894\n",
      "After 20000 training steps, loss is 1.475940, validation accuracy using average model is 0.989\n",
      "After 21000 training steps, loss is 1.476090, validation accuracy using average model is 0.9888\n",
      "After 22000 training steps, loss is 1.474922, validation accuracy using average model is 0.9894\n",
      "After 23000 training steps, loss is 1.476015, validation accuracy using average model is 0.9874\n",
      "After 24000 training steps, loss is 1.474137, validation accuracy using average model is 0.9894\n",
      "After 25000 training steps, loss is 1.472508, validation accuracy using average model is 0.9908\n",
      "After 26000 training steps, loss is 1.478103, validation accuracy using average model is 0.9856\n",
      "After 27000 training steps, loss is 1.474826, validation accuracy using average model is 0.9882\n",
      "After 28000 training steps, loss is 1.475518, validation accuracy using average model is 0.9872\n",
      "After 29000 training steps, loss is 1.472793, validation accuracy using average model is 0.9894\n",
      "After 30000 training steps, loss is 1.472824, validation accuracy using average model is 0.99\n",
      "After 31000 training steps, loss is 1.471808, validation accuracy using average model is 0.9908\n",
      "After 32000 training steps, loss is 1.471278, validation accuracy using average model is 0.9912\n",
      "After 33000 training steps, loss is 1.471218, validation accuracy using average model is 0.9912\n",
      "After 34000 training steps, loss is 1.470246, validation accuracy using average model is 0.9926\n",
      "After 35000 training steps, loss is 1.470808, validation accuracy using average model is 0.9916\n",
      "After 36000 training steps, loss is 1.474985, validation accuracy using average model is 0.987\n",
      "After 37000 training steps, loss is 1.471154, validation accuracy using average model is 0.9916\n",
      "After 38000 training steps, loss is 1.471468, validation accuracy using average model is 0.9908\n",
      "After 39000 training steps, loss is 1.471316, validation accuracy using average model is 0.9906\n",
      "After 40000 training steps, loss is 1.471096, validation accuracy using average model is 0.991\n",
      "After 41000 training steps, loss is 1.471050, validation accuracy using average model is 0.9908\n",
      "After 42000 training steps, loss is 1.470406, validation accuracy using average model is 0.991\n",
      "After 43000 training steps, loss is 1.473215, validation accuracy using average model is 0.9882\n",
      "After 44000 training steps, loss is 1.470618, validation accuracy using average model is 0.9914\n",
      "After 45000 training steps, loss is 1.472304, validation accuracy using average model is 0.9888\n",
      "After 46000 training steps, loss is 1.470876, validation accuracy using average model is 0.9914\n",
      "After 47000 training steps, loss is 1.471066, validation accuracy using average model is 0.991\n",
      "After 48000 training steps, loss is 1.472842, validation accuracy using average model is 0.989\n",
      "After 49000 training steps, loss is 1.470083, validation accuracy using average model is 0.992\n",
      "After 50000 training steps, loss is 1.470083, test accuracy using average model is 0.992\n"
     ]
    }
   ],
   "source": [
    "CONV_KSIZE_LIST = [[8,8,1,16],[8,8,16,32],[8,8,32,64]]\n",
    "POOL_KSIZE = [1,2,2,1]\n",
    "HIDE_LAYER_LIST = [1000,1000]\n",
    "DROP_OUT = 0.9\n",
    "REGULARIZATION_RATE = 0.0005\n",
    "LEARNING_RATE = 0.0001\n",
    "BATCH_SIZE = 100\n",
    "TRAINING_STEPS = 50000\n",
    "\n",
    "#卷积核size为8x8，数量为3\n",
    "train(CONV_KSIZE_LIST, POOL_KSIZE, HIDE_LAYER_LIST, DROP_OUT, LEARNING_RATE, REGULARIZATION_RATE, TRAINING_STEPS, BATCH_SIZE)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 卷积核size为10x10，数量为3"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 51,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "After 0 training steps, loss is 2.350495, validation accuracy using average model is 0.122\n",
      "After 1000 training steps, loss is 1.880032, validation accuracy using average model is 0.5882\n",
      "After 2000 training steps, loss is 1.503925, validation accuracy using average model is 0.9678\n",
      "After 3000 training steps, loss is 1.491909, validation accuracy using average model is 0.9796\n",
      "After 4000 training steps, loss is 1.490112, validation accuracy using average model is 0.98\n",
      "After 5000 training steps, loss is 1.488024, validation accuracy using average model is 0.9818\n",
      "After 6000 training steps, loss is 1.486060, validation accuracy using average model is 0.9834\n",
      "After 7000 training steps, loss is 1.482316, validation accuracy using average model is 0.987\n",
      "After 8000 training steps, loss is 1.481620, validation accuracy using average model is 0.9872\n",
      "After 9000 training steps, loss is 1.482924, validation accuracy using average model is 0.9858\n",
      "After 10000 training steps, loss is 1.481179, validation accuracy using average model is 0.9866\n",
      "After 11000 training steps, loss is 1.480245, validation accuracy using average model is 0.987\n",
      "After 12000 training steps, loss is 1.477525, validation accuracy using average model is 0.989\n",
      "After 13000 training steps, loss is 1.480024, validation accuracy using average model is 0.9858\n",
      "After 14000 training steps, loss is 1.476504, validation accuracy using average model is 0.99\n",
      "After 15000 training steps, loss is 1.477112, validation accuracy using average model is 0.9892\n",
      "After 16000 training steps, loss is 1.477113, validation accuracy using average model is 0.988\n",
      "After 17000 training steps, loss is 1.478605, validation accuracy using average model is 0.9866\n",
      "After 18000 training steps, loss is 1.475211, validation accuracy using average model is 0.9892\n",
      "After 19000 training steps, loss is 1.474204, validation accuracy using average model is 0.9904\n",
      "After 20000 training steps, loss is 1.474050, validation accuracy using average model is 0.9898\n",
      "After 21000 training steps, loss is 1.473048, validation accuracy using average model is 0.9916\n",
      "After 22000 training steps, loss is 1.480273, validation accuracy using average model is 0.9842\n",
      "After 23000 training steps, loss is 1.474753, validation accuracy using average model is 0.9898\n",
      "After 24000 training steps, loss is 1.473285, validation accuracy using average model is 0.9898\n",
      "After 25000 training steps, loss is 1.472562, validation accuracy using average model is 0.9912\n",
      "After 26000 training steps, loss is 1.472563, validation accuracy using average model is 0.9906\n",
      "After 27000 training steps, loss is 1.472567, validation accuracy using average model is 0.99\n",
      "After 28000 training steps, loss is 1.474050, validation accuracy using average model is 0.989\n",
      "After 29000 training steps, loss is 1.471870, validation accuracy using average model is 0.9912\n",
      "After 30000 training steps, loss is 1.472393, validation accuracy using average model is 0.9896\n",
      "After 31000 training steps, loss is 1.472430, validation accuracy using average model is 0.9896\n",
      "After 32000 training steps, loss is 1.472356, validation accuracy using average model is 0.9906\n",
      "After 33000 training steps, loss is 1.472686, validation accuracy using average model is 0.9896\n",
      "After 34000 training steps, loss is 1.471989, validation accuracy using average model is 0.9904\n",
      "After 35000 training steps, loss is 1.471546, validation accuracy using average model is 0.9904\n",
      "After 36000 training steps, loss is 1.471071, validation accuracy using average model is 0.9912\n",
      "After 37000 training steps, loss is 1.471313, validation accuracy using average model is 0.9908\n",
      "After 38000 training steps, loss is 1.471251, validation accuracy using average model is 0.991\n",
      "After 39000 training steps, loss is 1.471312, validation accuracy using average model is 0.9906\n",
      "After 40000 training steps, loss is 1.471524, validation accuracy using average model is 0.9902\n",
      "After 41000 training steps, loss is 1.470764, validation accuracy using average model is 0.9916\n",
      "After 42000 training steps, loss is 1.470917, validation accuracy using average model is 0.9912\n",
      "After 43000 training steps, loss is 1.470029, validation accuracy using average model is 0.9922\n",
      "After 44000 training steps, loss is 1.469716, validation accuracy using average model is 0.993\n",
      "After 45000 training steps, loss is 1.469677, validation accuracy using average model is 0.9926\n",
      "After 46000 training steps, loss is 1.471244, validation accuracy using average model is 0.991\n",
      "After 47000 training steps, loss is 1.469613, validation accuracy using average model is 0.9922\n",
      "After 48000 training steps, loss is 1.470094, validation accuracy using average model is 0.9914\n",
      "After 49000 training steps, loss is 1.469706, validation accuracy using average model is 0.9922\n",
      "After 50000 training steps, loss is 1.469706, test accuracy using average model is 0.9922\n"
     ]
    }
   ],
   "source": [
    "CONV_KSIZE_LIST = [[10,10,1,16],[10,10,16,32],[10,10,32,64]]\n",
    "POOL_KSIZE = [1,2,2,1]\n",
    "HIDE_LAYER_LIST = [1000]\n",
    "DROP_OUT = 0.9\n",
    "REGULARIZATION_RATE = 0.0005\n",
    "LEARNING_RATE = 0.0001\n",
    "BATCH_SIZE = 100\n",
    "TRAINING_STEPS = 50000\n",
    "\n",
    "#卷积核size为10x10，数量为3\n",
    "train(CONV_KSIZE_LIST, POOL_KSIZE, HIDE_LAYER_LIST, DROP_OUT, LEARNING_RATE, REGULARIZATION_RATE, TRAINING_STEPS, BATCH_SIZE)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 卷积核size为5x5，数量为5"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 53,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "After 0 training steps, loss is 2.346924, validation accuracy using average model is 0.0974\n",
      "After 1000 training steps, loss is 1.519304, validation accuracy using average model is 0.9584\n",
      "After 2000 training steps, loss is 1.506116, validation accuracy using average model is 0.9692\n",
      "After 3000 training steps, loss is 1.497159, validation accuracy using average model is 0.9766\n",
      "After 4000 training steps, loss is 1.493586, validation accuracy using average model is 0.9788\n",
      "After 5000 training steps, loss is 1.490952, validation accuracy using average model is 0.9812\n",
      "After 6000 training steps, loss is 1.485426, validation accuracy using average model is 0.9854\n",
      "After 7000 training steps, loss is 1.485524, validation accuracy using average model is 0.9844\n",
      "After 8000 training steps, loss is 1.483510, validation accuracy using average model is 0.9848\n",
      "After 9000 training steps, loss is 1.488010, validation accuracy using average model is 0.98\n",
      "After 10000 training steps, loss is 1.481975, validation accuracy using average model is 0.9854\n",
      "After 11000 training steps, loss is 1.481336, validation accuracy using average model is 0.9854\n",
      "After 12000 training steps, loss is 1.478643, validation accuracy using average model is 0.9878\n",
      "After 13000 training steps, loss is 1.480233, validation accuracy using average model is 0.9856\n",
      "After 14000 training steps, loss is 1.480095, validation accuracy using average model is 0.9856\n",
      "After 15000 training steps, loss is 1.478104, validation accuracy using average model is 0.987\n",
      "After 16000 training steps, loss is 1.477630, validation accuracy using average model is 0.9868\n",
      "After 17000 training steps, loss is 1.476573, validation accuracy using average model is 0.9884\n",
      "After 18000 training steps, loss is 1.476788, validation accuracy using average model is 0.9872\n",
      "After 19000 training steps, loss is 1.474518, validation accuracy using average model is 0.9886\n",
      "After 20000 training steps, loss is 1.475992, validation accuracy using average model is 0.988\n",
      "After 21000 training steps, loss is 1.475150, validation accuracy using average model is 0.9886\n",
      "After 22000 training steps, loss is 1.473708, validation accuracy using average model is 0.9896\n",
      "After 23000 training steps, loss is 1.474180, validation accuracy using average model is 0.9888\n",
      "After 24000 training steps, loss is 1.474078, validation accuracy using average model is 0.989\n",
      "After 25000 training steps, loss is 1.471982, validation accuracy using average model is 0.991\n",
      "After 26000 training steps, loss is 1.475046, validation accuracy using average model is 0.9872\n",
      "After 27000 training steps, loss is 1.473923, validation accuracy using average model is 0.9882\n",
      "After 28000 training steps, loss is 1.473400, validation accuracy using average model is 0.9888\n",
      "After 29000 training steps, loss is 1.473924, validation accuracy using average model is 0.9882\n",
      "After 30000 training steps, loss is 1.472497, validation accuracy using average model is 0.9902\n",
      "After 31000 training steps, loss is 1.473397, validation accuracy using average model is 0.9888\n",
      "After 32000 training steps, loss is 1.473580, validation accuracy using average model is 0.9886\n",
      "After 33000 training steps, loss is 1.471575, validation accuracy using average model is 0.9906\n",
      "After 34000 training steps, loss is 1.476221, validation accuracy using average model is 0.9858\n",
      "After 35000 training steps, loss is 1.472372, validation accuracy using average model is 0.99\n",
      "After 36000 training steps, loss is 1.471637, validation accuracy using average model is 0.9902\n",
      "After 37000 training steps, loss is 1.471392, validation accuracy using average model is 0.991\n",
      "After 38000 training steps, loss is 1.470403, validation accuracy using average model is 0.9912\n",
      "After 39000 training steps, loss is 1.472565, validation accuracy using average model is 0.9892\n",
      "After 40000 training steps, loss is 1.472325, validation accuracy using average model is 0.9894\n",
      "After 41000 training steps, loss is 1.472778, validation accuracy using average model is 0.9896\n",
      "After 42000 training steps, loss is 1.471067, validation accuracy using average model is 0.991\n",
      "After 43000 training steps, loss is 1.471087, validation accuracy using average model is 0.9912\n",
      "After 44000 training steps, loss is 1.471922, validation accuracy using average model is 0.99\n",
      "After 45000 training steps, loss is 1.472172, validation accuracy using average model is 0.9894\n",
      "After 46000 training steps, loss is 1.472607, validation accuracy using average model is 0.9884\n",
      "After 47000 training steps, loss is 1.472452, validation accuracy using average model is 0.9892\n",
      "After 48000 training steps, loss is 1.471864, validation accuracy using average model is 0.99\n",
      "After 49000 training steps, loss is 1.470492, validation accuracy using average model is 0.9912\n",
      "After 50000 training steps, loss is 1.470492, test accuracy using average model is 0.9912\n"
     ]
    }
   ],
   "source": [
    "CONV_KSIZE_LIST = [[5,5,1,16],[5,5,16,32],[5,5,32,32],[5,5,32,64],[5,5,64,64]]\n",
    "POOL_KSIZE = [1,2,2,1]\n",
    "HIDE_LAYER_LIST = [1000,1000]\n",
    "DROP_OUT = 0.9\n",
    "REGULARIZATION_RATE = 0.0005\n",
    "LEARNING_RATE = 0.0001\n",
    "BATCH_SIZE = 100\n",
    "TRAINING_STEPS = 50000\n",
    "\n",
    "#卷积核size为5x5，数量为5\n",
    "train(CONV_KSIZE_LIST, POOL_KSIZE, HIDE_LAYER_LIST, DROP_OUT, LEARNING_RATE, REGULARIZATION_RATE, TRAINING_STEPS, BATCH_SIZE)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 增加卷积层通道数效果不好"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "After 0 training steps, loss is 2.362780, validation accuracy using average model is 0.1082\n",
      "After 1000 training steps, loss is 1.712179, validation accuracy using average model is 0.7618\n",
      "After 2000 training steps, loss is 1.627311, validation accuracy using average model is 0.8468\n",
      "After 3000 training steps, loss is 1.593652, validation accuracy using average model is 0.8784\n",
      "After 4000 training steps, loss is 1.592629, validation accuracy using average model is 0.8778\n",
      "After 5000 training steps, loss is 1.589520, validation accuracy using average model is 0.8808\n",
      "After 6000 training steps, loss is 1.587410, validation accuracy using average model is 0.8812\n",
      "After 7000 training steps, loss is 1.492417, validation accuracy using average model is 0.9768\n",
      "After 8000 training steps, loss is 1.489098, validation accuracy using average model is 0.9796\n",
      "After 9000 training steps, loss is 1.486606, validation accuracy using average model is 0.9816\n",
      "After 10000 training steps, loss is 1.488795, validation accuracy using average model is 0.9778\n",
      "After 11000 training steps, loss is 1.486232, validation accuracy using average model is 0.981\n",
      "After 12000 training steps, loss is 1.483209, validation accuracy using average model is 0.983\n",
      "After 13000 training steps, loss is 1.484354, validation accuracy using average model is 0.9818\n",
      "After 14000 training steps, loss is 1.483442, validation accuracy using average model is 0.982\n",
      "After 15000 training steps, loss is 1.483734, validation accuracy using average model is 0.981\n",
      "After 16000 training steps, loss is 1.480859, validation accuracy using average model is 0.9844\n",
      "After 17000 training steps, loss is 1.479938, validation accuracy using average model is 0.9848\n",
      "After 18000 training steps, loss is 1.480382, validation accuracy using average model is 0.9848\n",
      "After 19000 training steps, loss is 1.480560, validation accuracy using average model is 0.9836\n",
      "After 20000 training steps, loss is 1.480341, validation accuracy using average model is 0.984\n",
      "After 21000 training steps, loss is 1.481573, validation accuracy using average model is 0.9824\n",
      "After 22000 training steps, loss is 1.478443, validation accuracy using average model is 0.9852\n",
      "After 23000 training steps, loss is 1.478987, validation accuracy using average model is 0.9844\n",
      "After 24000 training steps, loss is 1.478891, validation accuracy using average model is 0.9844\n",
      "After 25000 training steps, loss is 1.478255, validation accuracy using average model is 0.9846\n",
      "After 26000 training steps, loss is 1.477483, validation accuracy using average model is 0.9852\n",
      "After 27000 training steps, loss is 1.479280, validation accuracy using average model is 0.9834\n",
      "After 28000 training steps, loss is 1.476856, validation accuracy using average model is 0.9862\n",
      "After 29000 training steps, loss is 1.475975, validation accuracy using average model is 0.9868\n",
      "After 30000 training steps, loss is 1.475975, test accuracy using average model is 0.9868\n"
     ]
    }
   ],
   "source": [
    "CONV_KSIZE_LIST = [[8,8,1,8],[8,8,8,16],[8,8,16,32],[8,8,32,64],[8,8,64,128]]\n",
    "POOL_KSIZE = [1,2,2,1]\n",
    "HIDE_LAYER_LIST = [1000,1000]\n",
    "DROP_OUT = 0.9\n",
    "REGULARIZATION_RATE = 0.0005\n",
    "LEARNING_RATE = 0.0001\n",
    "BATCH_SIZE = 100\n",
    "TRAINING_STEPS = 30000\n",
    "\n",
    "#增加卷积层通道数效果不好\n",
    "train(CONV_KSIZE_LIST, POOL_KSIZE, HIDE_LAYER_LIST, DROP_OUT, LEARNING_RATE, REGULARIZATION_RATE, TRAINING_STEPS, BATCH_SIZE)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 学习率调整为0.0005"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 55,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "After 0 training steps, loss is 2.348448, validation accuracy using average model is 0.088\n",
      "After 1000 training steps, loss is 1.496597, validation accuracy using average model is 0.9756\n",
      "After 2000 training steps, loss is 1.486384, validation accuracy using average model is 0.983\n",
      "After 3000 training steps, loss is 1.478964, validation accuracy using average model is 0.988\n",
      "After 4000 training steps, loss is 1.478716, validation accuracy using average model is 0.9862\n",
      "After 5000 training steps, loss is 1.476087, validation accuracy using average model is 0.9884\n",
      "After 6000 training steps, loss is 1.474379, validation accuracy using average model is 0.9898\n",
      "After 7000 training steps, loss is 1.472183, validation accuracy using average model is 0.9914\n",
      "After 8000 training steps, loss is 1.473595, validation accuracy using average model is 0.9894\n",
      "After 9000 training steps, loss is 1.471868, validation accuracy using average model is 0.9908\n",
      "After 10000 training steps, loss is 1.472494, validation accuracy using average model is 0.99\n",
      "After 11000 training steps, loss is 1.471288, validation accuracy using average model is 0.9912\n",
      "After 12000 training steps, loss is 1.472072, validation accuracy using average model is 0.9902\n",
      "After 13000 training steps, loss is 1.471548, validation accuracy using average model is 0.9906\n",
      "After 14000 training steps, loss is 1.470561, validation accuracy using average model is 0.9916\n",
      "After 15000 training steps, loss is 1.470779, validation accuracy using average model is 0.9914\n",
      "After 16000 training steps, loss is 1.470739, validation accuracy using average model is 0.9914\n",
      "After 17000 training steps, loss is 1.473037, validation accuracy using average model is 0.9888\n",
      "After 18000 training steps, loss is 1.470642, validation accuracy using average model is 0.9914\n",
      "After 19000 training steps, loss is 1.471026, validation accuracy using average model is 0.9906\n",
      "After 20000 training steps, loss is 1.470712, validation accuracy using average model is 0.9908\n",
      "After 21000 training steps, loss is 1.469045, validation accuracy using average model is 0.993\n",
      "After 22000 training steps, loss is 1.471112, validation accuracy using average model is 0.9906\n",
      "After 23000 training steps, loss is 1.469392, validation accuracy using average model is 0.9924\n",
      "After 24000 training steps, loss is 1.469707, validation accuracy using average model is 0.9918\n",
      "After 25000 training steps, loss is 1.475002, validation accuracy using average model is 0.987\n",
      "After 26000 training steps, loss is 1.468746, validation accuracy using average model is 0.9932\n",
      "After 27000 training steps, loss is 1.467649, validation accuracy using average model is 0.9938\n",
      "After 28000 training steps, loss is 1.472238, validation accuracy using average model is 0.9894\n",
      "After 29000 training steps, loss is 1.470043, validation accuracy using average model is 0.9916\n",
      "After 30000 training steps, loss is 1.469709, validation accuracy using average model is 0.992\n",
      "After 31000 training steps, loss is 1.471010, validation accuracy using average model is 0.9904\n",
      "After 32000 training steps, loss is 1.468763, validation accuracy using average model is 0.993\n",
      "After 33000 training steps, loss is 1.471325, validation accuracy using average model is 0.9906\n",
      "After 34000 training steps, loss is 1.469855, validation accuracy using average model is 0.9918\n",
      "After 35000 training steps, loss is 1.469613, validation accuracy using average model is 0.992\n",
      "After 36000 training steps, loss is 1.468939, validation accuracy using average model is 0.9928\n",
      "After 37000 training steps, loss is 1.469504, validation accuracy using average model is 0.9922\n",
      "After 38000 training steps, loss is 1.468047, validation accuracy using average model is 0.9934\n",
      "After 39000 training steps, loss is 1.472676, validation accuracy using average model is 0.9888\n",
      "After 40000 training steps, loss is 1.473546, validation accuracy using average model is 0.9882\n",
      "After 41000 training steps, loss is 1.469261, validation accuracy using average model is 0.992\n",
      "After 42000 training steps, loss is 1.468441, validation accuracy using average model is 0.993\n",
      "After 43000 training steps, loss is 1.472943, validation accuracy using average model is 0.9884\n",
      "After 44000 training steps, loss is 1.472676, validation accuracy using average model is 0.9888\n",
      "After 45000 training steps, loss is 1.483069, validation accuracy using average model is 0.9782\n",
      "After 46000 training steps, loss is 1.469845, validation accuracy using average model is 0.9918\n",
      "After 47000 training steps, loss is 1.470484, validation accuracy using average model is 0.9908\n",
      "After 48000 training steps, loss is 1.468397, validation accuracy using average model is 0.9932\n",
      "After 49000 training steps, loss is 1.471742, validation accuracy using average model is 0.9896\n",
      "After 50000 training steps, loss is 1.471742, test accuracy using average model is 0.9896\n"
     ]
    }
   ],
   "source": [
    "CONV_KSIZE_LIST = [[3,3,1,16],[3,3,16,32],[3,3,32,64]]\n",
    "POOL_KSIZE = [1,2,2,1]\n",
    "HIDE_LAYER_LIST = [1000,1000]\n",
    "DROP_OUT = 0.9\n",
    "REGULARIZATION_RATE = 0.0005\n",
    "LEARNING_RATE = 0.0005\n",
    "BATCH_SIZE = 100\n",
    "TRAINING_STEPS = 50000\n",
    "\n",
    "#学习率调整为0.0005\n",
    "train(CONV_KSIZE_LIST, POOL_KSIZE, HIDE_LAYER_LIST, DROP_OUT, LEARNING_RATE, REGULARIZATION_RATE, TRAINING_STEPS, BATCH_SIZE)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 学习率调整为0.0003"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 56,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "After 0 training steps, loss is 2.351952, validation accuracy using average model is 0.108\n",
      "After 1000 training steps, loss is 1.495725, validation accuracy using average model is 0.9804\n",
      "After 2000 training steps, loss is 1.488631, validation accuracy using average model is 0.9836\n",
      "After 3000 training steps, loss is 1.483681, validation accuracy using average model is 0.9872\n",
      "After 4000 training steps, loss is 1.479347, validation accuracy using average model is 0.9894\n",
      "After 5000 training steps, loss is 1.476793, validation accuracy using average model is 0.9906\n",
      "After 6000 training steps, loss is 1.477203, validation accuracy using average model is 0.9888\n",
      "After 7000 training steps, loss is 1.474854, validation accuracy using average model is 0.9904\n",
      "After 8000 training steps, loss is 1.475299, validation accuracy using average model is 0.9892\n",
      "After 9000 training steps, loss is 1.473118, validation accuracy using average model is 0.991\n",
      "After 10000 training steps, loss is 1.472823, validation accuracy using average model is 0.9904\n",
      "After 11000 training steps, loss is 1.472382, validation accuracy using average model is 0.991\n",
      "After 12000 training steps, loss is 1.472309, validation accuracy using average model is 0.991\n",
      "After 13000 training steps, loss is 1.470495, validation accuracy using average model is 0.9924\n",
      "After 14000 training steps, loss is 1.470987, validation accuracy using average model is 0.9916\n",
      "After 15000 training steps, loss is 1.471140, validation accuracy using average model is 0.9916\n",
      "After 16000 training steps, loss is 1.471147, validation accuracy using average model is 0.991\n",
      "After 17000 training steps, loss is 1.469430, validation accuracy using average model is 0.9932\n",
      "After 18000 training steps, loss is 1.471028, validation accuracy using average model is 0.9908\n",
      "After 19000 training steps, loss is 1.472555, validation accuracy using average model is 0.9888\n",
      "After 20000 training steps, loss is 1.473733, validation accuracy using average model is 0.9884\n",
      "After 21000 training steps, loss is 1.471065, validation accuracy using average model is 0.991\n",
      "After 22000 training steps, loss is 1.471134, validation accuracy using average model is 0.9906\n",
      "After 23000 training steps, loss is 1.469690, validation accuracy using average model is 0.992\n",
      "After 24000 training steps, loss is 1.470087, validation accuracy using average model is 0.9914\n",
      "After 25000 training steps, loss is 1.468908, validation accuracy using average model is 0.9932\n",
      "After 26000 training steps, loss is 1.469025, validation accuracy using average model is 0.9926\n",
      "After 27000 training steps, loss is 1.469762, validation accuracy using average model is 0.992\n",
      "After 28000 training steps, loss is 1.468578, validation accuracy using average model is 0.9934\n",
      "After 29000 training steps, loss is 1.471098, validation accuracy using average model is 0.9904\n",
      "After 30000 training steps, loss is 1.469947, validation accuracy using average model is 0.992\n",
      "After 31000 training steps, loss is 1.473685, validation accuracy using average model is 0.988\n",
      "After 32000 training steps, loss is 1.468796, validation accuracy using average model is 0.9926\n",
      "After 33000 training steps, loss is 1.469447, validation accuracy using average model is 0.9924\n",
      "After 34000 training steps, loss is 1.469915, validation accuracy using average model is 0.9914\n",
      "After 35000 training steps, loss is 1.469741, validation accuracy using average model is 0.9918\n",
      "After 36000 training steps, loss is 1.468419, validation accuracy using average model is 0.9936\n",
      "After 37000 training steps, loss is 1.470010, validation accuracy using average model is 0.9914\n",
      "After 38000 training steps, loss is 1.471329, validation accuracy using average model is 0.9898\n",
      "After 39000 training steps, loss is 1.469858, validation accuracy using average model is 0.9918\n",
      "After 40000 training steps, loss is 1.470525, validation accuracy using average model is 0.9908\n",
      "After 41000 training steps, loss is 1.470078, validation accuracy using average model is 0.9912\n",
      "After 42000 training steps, loss is 1.470722, validation accuracy using average model is 0.9904\n",
      "After 43000 training steps, loss is 1.469797, validation accuracy using average model is 0.9914\n",
      "After 44000 training steps, loss is 1.470041, validation accuracy using average model is 0.9912\n",
      "After 45000 training steps, loss is 1.469802, validation accuracy using average model is 0.9916\n",
      "After 46000 training steps, loss is 1.469825, validation accuracy using average model is 0.9918\n",
      "After 47000 training steps, loss is 1.471715, validation accuracy using average model is 0.9894\n",
      "After 48000 training steps, loss is 1.469005, validation accuracy using average model is 0.9926\n",
      "After 49000 training steps, loss is 1.469607, validation accuracy using average model is 0.992\n",
      "After 50000 training steps, loss is 1.469607, test accuracy using average model is 0.992\n"
     ]
    }
   ],
   "source": [
    "CONV_KSIZE_LIST = [[3,3,1,16],[3,3,16,32],[3,3,32,64]]\n",
    "POOL_KSIZE = [1,2,2,1]\n",
    "HIDE_LAYER_LIST = [1000,1000]\n",
    "DROP_OUT = 0.9\n",
    "REGULARIZATION_RATE = 0.0005\n",
    "LEARNING_RATE = 0.0003\n",
    "BATCH_SIZE = 100\n",
    "TRAINING_STEPS = 50000\n",
    "\n",
    "#学习率调整为0.0003\n",
    "train(CONV_KSIZE_LIST, POOL_KSIZE, HIDE_LAYER_LIST, DROP_OUT, LEARNING_RATE, REGULARIZATION_RATE, TRAINING_STEPS, BATCH_SIZE)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
