{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 44,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "\"\"\"A very simple MNIST classifier.\n",
    "See extensive documentation at\n",
    "https://www.tensorflow.org/get_started/mnist/beginners\n",
    "\"\"\"\n",
    "from __future__ import absolute_import\n",
    "from __future__ import division\n",
    "from __future__ import print_function\n",
    "\n",
    "import argparse\n",
    "import sys\n",
    "\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "\n",
    "import tensorflow as tf\n",
    "\n",
    "FLAGS = None\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "import Keras"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'channels_last'"
      ]
     },
     "execution_count": 45,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from keras.layers.core import Dense, Flatten\n",
    "from keras.layers.convolutional import Conv2D\n",
    "from keras.layers.pooling import MaxPooling2D\n",
    "from keras.initializers import TruncatedNormal\n",
    "from keras import regularizers\n",
    "from keras import backend as K\n",
    "\n",
    "K.image_data_format() "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting /tmp/tensorflow/mnist/input_data/train-images-idx3-ubyte.gz\n",
      "Extracting /tmp/tensorflow/mnist/input_data/train-labels-idx1-ubyte.gz\n",
      "Extracting /tmp/tensorflow/mnist/input_data/t10k-images-idx3-ubyte.gz\n",
      "Extracting /tmp/tensorflow/mnist/input_data/t10k-labels-idx1-ubyte.gz\n"
     ]
    }
   ],
   "source": [
    "# Import data\n",
    "data_dir = '/tmp/tensorflow/mnist/input_data'\n",
    "mnist = input_data.read_data_sets(data_dir, one_hot=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# Create the model\n",
    "x = tf.placeholder(tf.float32, [None, 784])\n",
    "W = tf.Variable(tf.zeros([784, 10]))\n",
    "b = tf.Variable(tf.zeros([10]))\n",
    "y = tf.matmul(x, W) + b"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "learning_rate = tf.placeholder(tf.float32)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "定义我们的ground truth 占位符"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 49,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# Define loss and optimizer\n",
    "y_ = tf.placeholder(tf.float32, [None, 10])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "接下来我们计算交叉熵，注意这里不要使用注释中的手动计算方式，而是使用系统函数。\n",
    "另一个注意点就是，softmax_cross_entropy_with_logits的logits参数是**未经激活的wx+b**"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "权重初始化, 正则化"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 50,
   "metadata": {},
   "outputs": [],
   "source": [
    "initializer = TruncatedNormal(mean=0.0, stddev=0.1, seed=3)\n",
    "regularizer_l2 = regularizers.l2(0.01)\n",
    "regularizer_l1 = regularizers.l1(0.1)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "构造网络"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 51,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "\n",
    "with tf.name_scope('reshape'):\n",
    "    #条数,高,宽,深度(channel)\n",
    "    x_image = tf.reshape(x, [-1, 28, 28, 1])\n",
    "net = Conv2D(32, kernel_size=[5,5], strides=[1,1],activation='relu',\n",
    "            padding='same',\n",
    "            input_shape=[28,28,1],\n",
    "            kernel_initializer = initializer, # kernal 初始化\n",
    "            bias_initializer = initializer, # bias 初始化\n",
    "            kernel_regularizer = regularizer_l2,\n",
    "            bias_regularizer = regularizer_l1,\n",
    "            )(x_image)\n",
    "net = MaxPooling2D(pool_size=[2,2])(net)\n",
    "net = Conv2D(64, kernel_size=[5,5], strides=[1,1],activation='relu',\n",
    "             padding='same')(net)\n",
    "net = MaxPooling2D(pool_size=[2,2])(net)\n",
    "net = Flatten()(net)\n",
    "net = Dense(1000, activation='relu')(net)\n",
    "net = Dense(10, activation='softmax')(net)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 52,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# The raw formulation of cross-entropy,\n",
    "#\n",
    "#   tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)),\n",
    "#                                 reduction_indices=[1]))\n",
    "#\n",
    "# can be numerically unstable.\n",
    "#\n",
    "# So here we use tf.nn.softmax_cross_entropy_with_logits on the raw\n",
    "# outputs of 'y', and then average across the batch.\n",
    "cross_entropy = tf.reduce_mean(\n",
    "    tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits= net))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "添加L2 正则"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 53,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "l2_loss = tf.add_n( [tf.nn.l2_loss(w) for w in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)] )\n",
    "total_loss = cross_entropy + 7e-5*l2_loss\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(total_loss)\n",
    "\n",
    "sess = tf.Session()\n",
    "K.set_session(sess)\n",
    "init_op = tf.global_variables_initializer()\n",
    "sess.run(init_op)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "训练模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 55,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "step 100, entropy loss: 2.298270, l2_loss: 3167.828613, total loss: 2.520018\n",
      "0.23\n",
      "step 200, entropy loss: 2.262700, l2_loss: 3167.119629, total loss: 2.484398\n",
      "0.4\n",
      "step 300, entropy loss: 2.095112, l2_loss: 3166.963135, total loss: 2.316800\n",
      "0.4\n",
      "step 400, entropy loss: 1.885473, l2_loss: 3167.487305, total loss: 2.107198\n",
      "0.64\n",
      "step 500, entropy loss: 1.814662, l2_loss: 3167.573975, total loss: 2.036392\n",
      "0.71\n",
      "step 600, entropy loss: 1.787114, l2_loss: 3167.323975, total loss: 2.008826\n",
      "0.72\n",
      "step 700, entropy loss: 1.728430, l2_loss: 3166.935547, total loss: 1.950116\n",
      "0.78\n",
      "step 800, entropy loss: 1.692464, l2_loss: 3166.454834, total loss: 1.914116\n",
      "0.8\n",
      "step 900, entropy loss: 1.668267, l2_loss: 3165.890625, total loss: 1.889880\n",
      "0.84\n",
      "step 1000, entropy loss: 1.726544, l2_loss: 3165.285400, total loss: 1.948114\n",
      "0.75\n",
      "0.7629\n",
      "step 1100, entropy loss: 1.624444, l2_loss: 3164.716309, total loss: 1.845974\n",
      "0.87\n",
      "step 1200, entropy loss: 1.628938, l2_loss: 3164.223877, total loss: 1.850434\n",
      "0.87\n",
      "step 1300, entropy loss: 1.645679, l2_loss: 3163.680664, total loss: 1.867137\n",
      "0.83\n",
      "step 1400, entropy loss: 1.653116, l2_loss: 3163.096680, total loss: 1.874533\n",
      "0.86\n",
      "step 1500, entropy loss: 1.586170, l2_loss: 3162.462646, total loss: 1.807542\n",
      "0.9\n",
      "step 1600, entropy loss: 1.587377, l2_loss: 3161.819336, total loss: 1.808704\n",
      "0.89\n",
      "step 1700, entropy loss: 1.577377, l2_loss: 3161.163086, total loss: 1.798659\n",
      "0.9\n",
      "step 1800, entropy loss: 1.608685, l2_loss: 3160.487549, total loss: 1.829919\n",
      "0.88\n",
      "step 1900, entropy loss: 1.613796, l2_loss: 3159.799805, total loss: 1.834982\n",
      "0.87\n",
      "step 2000, entropy loss: 1.585051, l2_loss: 3159.091797, total loss: 1.806188\n",
      "0.88\n",
      "0.8572\n",
      "step 2100, entropy loss: 1.587777, l2_loss: 3158.375244, total loss: 1.808863\n",
      "0.89\n",
      "step 2200, entropy loss: 1.618808, l2_loss: 3157.657227, total loss: 1.839844\n",
      "0.86\n",
      "step 2300, entropy loss: 1.621427, l2_loss: 3156.926514, total loss: 1.842412\n",
      "0.85\n",
      "step 2400, entropy loss: 1.509983, l2_loss: 3156.333252, total loss: 1.730926\n",
      "0.96\n",
      "step 2500, entropy loss: 1.529688, l2_loss: 3155.742188, total loss: 1.750590\n",
      "0.98\n",
      "step 2600, entropy loss: 1.516564, l2_loss: 3155.119141, total loss: 1.737423\n",
      "0.96\n",
      "step 2700, entropy loss: 1.491890, l2_loss: 3154.483398, total loss: 1.712704\n",
      "0.99\n",
      "step 2800, entropy loss: 1.522581, l2_loss: 3153.838867, total loss: 1.743350\n",
      "0.98\n",
      "step 2900, entropy loss: 1.491031, l2_loss: 3153.173340, total loss: 1.711753\n",
      "0.98\n",
      "step 3000, entropy loss: 1.519165, l2_loss: 3152.491699, total loss: 1.739839\n",
      "0.96\n",
      "0.9459\n",
      "step 3100, entropy loss: 1.556264, l2_loss: 3151.800781, total loss: 1.776890\n",
      "0.95\n",
      "step 3200, entropy loss: 1.581939, l2_loss: 3151.106689, total loss: 1.802516\n",
      "0.93\n",
      "step 3300, entropy loss: 1.500847, l2_loss: 3150.406006, total loss: 1.721376\n",
      "0.99\n",
      "step 3400, entropy loss: 1.493312, l2_loss: 3149.703125, total loss: 1.713791\n",
      "0.99\n",
      "step 3500, entropy loss: 1.509770, l2_loss: 3148.990479, total loss: 1.730199\n",
      "0.98\n",
      "step 3600, entropy loss: 1.558884, l2_loss: 3148.271484, total loss: 1.779263\n",
      "0.92\n",
      "step 3700, entropy loss: 1.512803, l2_loss: 3147.552490, total loss: 1.733131\n",
      "0.98\n",
      "step 3800, entropy loss: 1.506213, l2_loss: 3146.833984, total loss: 1.726492\n",
      "0.97\n",
      "step 3900, entropy loss: 1.497549, l2_loss: 3146.101318, total loss: 1.717776\n",
      "0.98\n",
      "step 4000, entropy loss: 1.512258, l2_loss: 3145.362305, total loss: 1.732433\n",
      "0.97\n",
      "0.958\n",
      "step 4100, entropy loss: 1.479936, l2_loss: 3144.626465, total loss: 1.700060\n",
      "1.0\n",
      "step 4200, entropy loss: 1.523380, l2_loss: 3143.886230, total loss: 1.743452\n",
      "0.96\n",
      "step 4300, entropy loss: 1.474851, l2_loss: 3143.146240, total loss: 1.694872\n",
      "1.0\n",
      "step 4400, entropy loss: 1.474180, l2_loss: 3142.408936, total loss: 1.694149\n",
      "1.0\n",
      "step 4500, entropy loss: 1.539476, l2_loss: 3141.669922, total loss: 1.759392\n",
      "0.95\n",
      "step 4600, entropy loss: 1.522521, l2_loss: 3140.919434, total loss: 1.742386\n",
      "0.97\n",
      "step 4700, entropy loss: 1.489528, l2_loss: 3140.174561, total loss: 1.709340\n",
      "0.99\n",
      "step 4800, entropy loss: 1.518373, l2_loss: 3139.425293, total loss: 1.738133\n",
      "0.97\n",
      "step 4900, entropy loss: 1.493511, l2_loss: 3138.674805, total loss: 1.713219\n",
      "0.99\n",
      "step 5000, entropy loss: 1.519120, l2_loss: 3137.919434, total loss: 1.738775\n",
      "0.95\n",
      "0.969\n",
      "step 5100, entropy loss: 1.526900, l2_loss: 3137.169434, total loss: 1.746502\n",
      "0.95\n",
      "step 5200, entropy loss: 1.497905, l2_loss: 3136.413574, total loss: 1.717454\n",
      "0.97\n",
      "step 5300, entropy loss: 1.513456, l2_loss: 3135.650146, total loss: 1.732952\n",
      "0.96\n",
      "step 5400, entropy loss: 1.490800, l2_loss: 3134.885498, total loss: 1.710242\n",
      "0.98\n",
      "step 5500, entropy loss: 1.497662, l2_loss: 3134.131592, total loss: 1.717051\n",
      "0.97\n",
      "step 5600, entropy loss: 1.482888, l2_loss: 3133.363770, total loss: 1.702224\n",
      "0.99\n",
      "step 5700, entropy loss: 1.477205, l2_loss: 3132.607666, total loss: 1.696487\n",
      "1.0\n",
      "step 5800, entropy loss: 1.503964, l2_loss: 3131.846436, total loss: 1.723193\n",
      "0.98\n",
      "step 5900, entropy loss: 1.527676, l2_loss: 3131.079102, total loss: 1.746851\n",
      "0.95\n",
      "step 6000, entropy loss: 1.483287, l2_loss: 3130.308350, total loss: 1.702408\n",
      "0.99\n",
      "0.9711\n",
      "step 6100, entropy loss: 1.490141, l2_loss: 3129.532227, total loss: 1.709208\n",
      "0.98\n",
      "step 6200, entropy loss: 1.502578, l2_loss: 3128.769043, total loss: 1.721592\n",
      "0.96\n",
      "step 6300, entropy loss: 1.493825, l2_loss: 3127.999756, total loss: 1.712785\n",
      "0.97\n",
      "step 6400, entropy loss: 1.476105, l2_loss: 3127.228027, total loss: 1.695011\n",
      "1.0\n",
      "step 6500, entropy loss: 1.523134, l2_loss: 3126.450195, total loss: 1.741985\n",
      "0.97\n",
      "step 6600, entropy loss: 1.474276, l2_loss: 3125.672607, total loss: 1.693073\n",
      "1.0\n",
      "step 6700, entropy loss: 1.504574, l2_loss: 3124.891113, total loss: 1.723317\n",
      "0.97\n",
      "step 6800, entropy loss: 1.491390, l2_loss: 3124.123535, total loss: 1.710078\n",
      "1.0\n",
      "step 6900, entropy loss: 1.498669, l2_loss: 3123.343750, total loss: 1.717303\n",
      "0.97\n",
      "step 7000, entropy loss: 1.480986, l2_loss: 3122.567627, total loss: 1.699566\n",
      "0.99\n",
      "0.9748\n",
      "step 7100, entropy loss: 1.502677, l2_loss: 3121.792969, total loss: 1.721202\n",
      "0.98\n",
      "step 7200, entropy loss: 1.487108, l2_loss: 3121.015381, total loss: 1.705579\n",
      "0.98\n",
      "step 7300, entropy loss: 1.495392, l2_loss: 3120.238281, total loss: 1.713809\n",
      "0.97\n",
      "step 7400, entropy loss: 1.471404, l2_loss: 3119.459961, total loss: 1.689767\n",
      "1.0\n",
      "step 7500, entropy loss: 1.482160, l2_loss: 3118.678223, total loss: 1.700467\n",
      "0.98\n",
      "step 7600, entropy loss: 1.493573, l2_loss: 3117.894775, total loss: 1.711826\n",
      "0.99\n",
      "step 7700, entropy loss: 1.477341, l2_loss: 3117.116943, total loss: 1.695539\n",
      "0.99\n",
      "step 7800, entropy loss: 1.473348, l2_loss: 3116.333740, total loss: 1.691492\n",
      "0.99\n",
      "step 7900, entropy loss: 1.466976, l2_loss: 3115.548096, total loss: 1.685064\n",
      "1.0\n",
      "step 8000, entropy loss: 1.485345, l2_loss: 3114.764160, total loss: 1.703378\n",
      "0.99\n",
      "0.9768\n",
      "step 8100, entropy loss: 1.509149, l2_loss: 3113.981201, total loss: 1.727128\n",
      "0.97\n",
      "step 8200, entropy loss: 1.512001, l2_loss: 3113.199219, total loss: 1.729925\n",
      "0.97\n",
      "step 8300, entropy loss: 1.463602, l2_loss: 3112.419922, total loss: 1.681472\n",
      "1.0\n",
      "step 8400, entropy loss: 1.477999, l2_loss: 3111.636475, total loss: 1.695814\n",
      "0.99\n",
      "step 8500, entropy loss: 1.524084, l2_loss: 3110.850586, total loss: 1.741844\n",
      "0.95\n",
      "step 8600, entropy loss: 1.503465, l2_loss: 3110.065918, total loss: 1.721170\n",
      "0.97\n",
      "step 8700, entropy loss: 1.469805, l2_loss: 3109.277344, total loss: 1.687454\n",
      "1.0\n",
      "step 8800, entropy loss: 1.487996, l2_loss: 3108.496582, total loss: 1.705591\n",
      "0.98\n",
      "step 8900, entropy loss: 1.474325, l2_loss: 3107.710938, total loss: 1.691865\n",
      "0.99\n",
      "step 9000, entropy loss: 1.482057, l2_loss: 3106.922363, total loss: 1.699541\n",
      "0.99\n",
      "0.9791\n",
      "step 9100, entropy loss: 1.497815, l2_loss: 3106.136963, total loss: 1.715244\n",
      "0.98\n",
      "step 9200, entropy loss: 1.495045, l2_loss: 3105.343506, total loss: 1.712419\n",
      "0.97\n",
      "step 9300, entropy loss: 1.494344, l2_loss: 3104.550293, total loss: 1.711662\n",
      "0.98\n",
      "step 9400, entropy loss: 1.482179, l2_loss: 3103.760498, total loss: 1.699442\n",
      "0.99\n",
      "step 9500, entropy loss: 1.482915, l2_loss: 3102.968506, total loss: 1.700122\n",
      "0.99\n",
      "step 9600, entropy loss: 1.480996, l2_loss: 3102.184082, total loss: 1.698149\n",
      "1.0\n",
      "step 9700, entropy loss: 1.476445, l2_loss: 3101.401855, total loss: 1.693543\n",
      "0.99\n",
      "step 9800, entropy loss: 1.507253, l2_loss: 3100.612549, total loss: 1.724296\n",
      "0.97\n",
      "step 9900, entropy loss: 1.485006, l2_loss: 3099.824463, total loss: 1.701994\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.99\n",
      "step 10000, entropy loss: 1.487083, l2_loss: 3099.037598, total loss: 1.704016\n",
      "0.98\n",
      "0.9755\n",
      "step 10100, entropy loss: 1.500265, l2_loss: 3098.252197, total loss: 1.717143\n",
      "0.97\n",
      "step 10200, entropy loss: 1.515023, l2_loss: 3097.460205, total loss: 1.731845\n",
      "0.96\n",
      "step 10300, entropy loss: 1.471040, l2_loss: 3096.674805, total loss: 1.687808\n",
      "1.0\n",
      "step 10400, entropy loss: 1.475399, l2_loss: 3095.879883, total loss: 1.692111\n",
      "0.99\n",
      "step 10500, entropy loss: 1.475553, l2_loss: 3095.087158, total loss: 1.692209\n",
      "1.0\n",
      "step 10600, entropy loss: 1.478181, l2_loss: 3094.291504, total loss: 1.694782\n",
      "0.99\n",
      "step 10700, entropy loss: 1.492045, l2_loss: 3093.495117, total loss: 1.708589\n",
      "0.98\n",
      "step 10800, entropy loss: 1.475025, l2_loss: 3092.701416, total loss: 1.691514\n",
      "0.99\n",
      "step 10900, entropy loss: 1.476996, l2_loss: 3091.913818, total loss: 1.693430\n",
      "0.99\n",
      "step 11000, entropy loss: 1.480809, l2_loss: 3091.122803, total loss: 1.697188\n",
      "1.0\n",
      "0.981\n",
      "step 11100, entropy loss: 1.467626, l2_loss: 3090.322754, total loss: 1.683949\n",
      "1.0\n",
      "step 11200, entropy loss: 1.468644, l2_loss: 3089.534668, total loss: 1.684912\n",
      "1.0\n",
      "step 11300, entropy loss: 1.470038, l2_loss: 3088.743652, total loss: 1.686250\n",
      "1.0\n",
      "step 11400, entropy loss: 1.496692, l2_loss: 3087.956543, total loss: 1.712849\n",
      "0.98\n",
      "step 11500, entropy loss: 1.492541, l2_loss: 3087.158203, total loss: 1.708642\n",
      "0.99\n",
      "step 11600, entropy loss: 1.481879, l2_loss: 3086.364746, total loss: 1.697924\n",
      "0.98\n",
      "step 11700, entropy loss: 1.486504, l2_loss: 3085.569824, total loss: 1.702494\n",
      "0.98\n",
      "step 11800, entropy loss: 1.468878, l2_loss: 3084.779297, total loss: 1.684813\n",
      "1.0\n",
      "step 11900, entropy loss: 1.507727, l2_loss: 3083.984375, total loss: 1.723606\n",
      "0.98\n",
      "step 12000, entropy loss: 1.471884, l2_loss: 3083.191162, total loss: 1.687708\n",
      "0.99\n",
      "0.9829\n",
      "step 12100, entropy loss: 1.486628, l2_loss: 3082.392578, total loss: 1.702396\n",
      "0.98\n",
      "step 12200, entropy loss: 1.461535, l2_loss: 3081.597900, total loss: 1.677247\n",
      "1.0\n",
      "step 12300, entropy loss: 1.490358, l2_loss: 3080.804443, total loss: 1.706014\n",
      "0.98\n",
      "step 12400, entropy loss: 1.463294, l2_loss: 3080.005859, total loss: 1.678894\n",
      "1.0\n",
      "step 12500, entropy loss: 1.466567, l2_loss: 3079.209473, total loss: 1.682112\n",
      "1.0\n",
      "step 12600, entropy loss: 1.468452, l2_loss: 3078.416748, total loss: 1.683941\n",
      "1.0\n",
      "step 12700, entropy loss: 1.480058, l2_loss: 3077.622803, total loss: 1.695492\n",
      "0.99\n",
      "step 12800, entropy loss: 1.465436, l2_loss: 3076.821289, total loss: 1.680814\n",
      "1.0\n",
      "step 12900, entropy loss: 1.475134, l2_loss: 3076.026611, total loss: 1.690456\n",
      "1.0\n",
      "step 13000, entropy loss: 1.476635, l2_loss: 3075.226562, total loss: 1.691901\n",
      "0.99\n",
      "0.982\n",
      "step 13100, entropy loss: 1.488470, l2_loss: 3074.428711, total loss: 1.703680\n",
      "0.98\n",
      "step 13200, entropy loss: 1.489778, l2_loss: 3073.633545, total loss: 1.704932\n",
      "0.99\n",
      "step 13300, entropy loss: 1.463536, l2_loss: 3072.838379, total loss: 1.678635\n",
      "1.0\n",
      "step 13400, entropy loss: 1.473804, l2_loss: 3072.042480, total loss: 1.688847\n",
      "0.99\n",
      "step 13500, entropy loss: 1.487151, l2_loss: 3071.236572, total loss: 1.702138\n",
      "0.99\n",
      "step 13600, entropy loss: 1.475740, l2_loss: 3070.441650, total loss: 1.690671\n",
      "0.99\n",
      "step 13700, entropy loss: 1.486054, l2_loss: 3069.645508, total loss: 1.700929\n",
      "0.99\n",
      "step 13800, entropy loss: 1.472062, l2_loss: 3068.848145, total loss: 1.686881\n",
      "0.99\n",
      "step 13900, entropy loss: 1.468341, l2_loss: 3068.053711, total loss: 1.683105\n",
      "1.0\n",
      "step 14000, entropy loss: 1.469184, l2_loss: 3067.252930, total loss: 1.683892\n",
      "1.0\n",
      "0.9838\n",
      "step 14100, entropy loss: 1.506365, l2_loss: 3066.458008, total loss: 1.721017\n",
      "0.97\n",
      "step 14200, entropy loss: 1.470384, l2_loss: 3065.659424, total loss: 1.684980\n",
      "1.0\n",
      "step 14300, entropy loss: 1.472795, l2_loss: 3064.861328, total loss: 1.687335\n",
      "1.0\n",
      "step 14400, entropy loss: 1.472299, l2_loss: 3064.066406, total loss: 1.686784\n",
      "0.99\n",
      "step 14500, entropy loss: 1.482445, l2_loss: 3063.272949, total loss: 1.696874\n",
      "0.99\n",
      "step 14600, entropy loss: 1.471301, l2_loss: 3062.472656, total loss: 1.685674\n",
      "1.0\n",
      "step 14700, entropy loss: 1.482044, l2_loss: 3061.670898, total loss: 1.696361\n",
      "0.98\n",
      "step 14800, entropy loss: 1.475803, l2_loss: 3060.868408, total loss: 1.690063\n",
      "0.99\n",
      "step 14900, entropy loss: 1.468979, l2_loss: 3060.072021, total loss: 1.683184\n",
      "1.0\n",
      "step 15000, entropy loss: 1.475392, l2_loss: 3059.272705, total loss: 1.689541\n",
      "0.99\n",
      "0.9834\n",
      "step 15100, entropy loss: 1.492687, l2_loss: 3058.477783, total loss: 1.706781\n",
      "0.98\n",
      "step 15200, entropy loss: 1.481709, l2_loss: 3057.676270, total loss: 1.695747\n",
      "0.98\n",
      "step 15300, entropy loss: 1.463657, l2_loss: 3056.872314, total loss: 1.677638\n",
      "1.0\n",
      "step 15400, entropy loss: 1.469219, l2_loss: 3056.071777, total loss: 1.683144\n",
      "1.0\n",
      "step 15500, entropy loss: 1.479842, l2_loss: 3055.282471, total loss: 1.693711\n",
      "0.99\n",
      "step 15600, entropy loss: 1.504165, l2_loss: 3054.483398, total loss: 1.717978\n",
      "0.97\n",
      "step 15700, entropy loss: 1.477692, l2_loss: 3053.685547, total loss: 1.691450\n",
      "0.99\n",
      "step 15800, entropy loss: 1.467590, l2_loss: 3052.890381, total loss: 1.681293\n",
      "1.0\n",
      "step 15900, entropy loss: 1.472591, l2_loss: 3052.087402, total loss: 1.686238\n",
      "0.99\n",
      "step 16000, entropy loss: 1.470707, l2_loss: 3051.292480, total loss: 1.684298\n",
      "1.0\n",
      "0.9849\n",
      "step 16100, entropy loss: 1.462669, l2_loss: 3050.495605, total loss: 1.676204\n",
      "1.0\n",
      "step 16200, entropy loss: 1.470202, l2_loss: 3049.700195, total loss: 1.683681\n",
      "1.0\n",
      "step 16300, entropy loss: 1.490822, l2_loss: 3048.906006, total loss: 1.704246\n",
      "0.98\n",
      "step 16400, entropy loss: 1.471673, l2_loss: 3048.110107, total loss: 1.685040\n",
      "1.0\n",
      "step 16500, entropy loss: 1.468759, l2_loss: 3047.316406, total loss: 1.682071\n",
      "1.0\n",
      "step 16600, entropy loss: 1.466300, l2_loss: 3046.521729, total loss: 1.679556\n",
      "1.0\n",
      "step 16700, entropy loss: 1.472871, l2_loss: 3045.729004, total loss: 1.686072\n",
      "0.99\n",
      "step 16800, entropy loss: 1.482540, l2_loss: 3044.934570, total loss: 1.695685\n",
      "0.99\n",
      "step 16900, entropy loss: 1.471863, l2_loss: 3044.138184, total loss: 1.684952\n",
      "0.99\n",
      "step 17000, entropy loss: 1.461557, l2_loss: 3043.342285, total loss: 1.674591\n",
      "1.0\n",
      "0.984\n",
      "step 17100, entropy loss: 1.461677, l2_loss: 3042.544189, total loss: 1.674656\n",
      "1.0\n",
      "step 17200, entropy loss: 1.466717, l2_loss: 3041.748047, total loss: 1.679639\n",
      "1.0\n",
      "step 17300, entropy loss: 1.468376, l2_loss: 3040.948242, total loss: 1.681243\n",
      "1.0\n",
      "step 17400, entropy loss: 1.491497, l2_loss: 3040.151123, total loss: 1.704308\n",
      "0.98\n",
      "step 17500, entropy loss: 1.491194, l2_loss: 3039.357422, total loss: 1.703949\n",
      "0.98\n",
      "step 17600, entropy loss: 1.467028, l2_loss: 3038.562012, total loss: 1.679727\n",
      "1.0\n",
      "step 17700, entropy loss: 1.472702, l2_loss: 3037.766846, total loss: 1.685346\n",
      "0.99\n",
      "step 17800, entropy loss: 1.494966, l2_loss: 3036.973145, total loss: 1.707554\n",
      "0.97\n",
      "step 17900, entropy loss: 1.487251, l2_loss: 3036.175293, total loss: 1.699783\n",
      "0.98\n",
      "step 18000, entropy loss: 1.478960, l2_loss: 3035.375488, total loss: 1.691436\n",
      "1.0\n",
      "0.9846\n",
      "step 18100, entropy loss: 1.472521, l2_loss: 3034.583008, total loss: 1.684942\n",
      "0.99\n",
      "step 18200, entropy loss: 1.485544, l2_loss: 3033.784424, total loss: 1.697909\n",
      "0.98\n",
      "step 18300, entropy loss: 1.475507, l2_loss: 3032.989014, total loss: 1.687817\n",
      "0.99\n",
      "step 18400, entropy loss: 1.465855, l2_loss: 3032.192871, total loss: 1.678108\n",
      "1.0\n",
      "step 18500, entropy loss: 1.466650, l2_loss: 3031.400146, total loss: 1.678848\n",
      "1.0\n",
      "step 18600, entropy loss: 1.483427, l2_loss: 3030.601318, total loss: 1.695569\n",
      "0.98\n",
      "step 18700, entropy loss: 1.493130, l2_loss: 3029.807129, total loss: 1.705216\n",
      "0.97\n",
      "step 18800, entropy loss: 1.462123, l2_loss: 3029.014160, total loss: 1.674154\n",
      "1.0\n",
      "step 18900, entropy loss: 1.487637, l2_loss: 3028.214844, total loss: 1.699612\n",
      "0.99\n",
      "step 19000, entropy loss: 1.490868, l2_loss: 3027.420410, total loss: 1.702787\n",
      "0.97\n",
      "0.9855\n",
      "step 19100, entropy loss: 1.471271, l2_loss: 3026.622559, total loss: 1.683135\n",
      "1.0\n",
      "step 19200, entropy loss: 1.477638, l2_loss: 3025.824707, total loss: 1.689446\n",
      "0.99\n",
      "step 19300, entropy loss: 1.473559, l2_loss: 3025.034912, total loss: 1.685312\n",
      "1.0\n",
      "step 19400, entropy loss: 1.473287, l2_loss: 3024.239746, total loss: 1.684984\n",
      "1.0\n",
      "step 19500, entropy loss: 1.476484, l2_loss: 3023.440918, total loss: 1.688125\n",
      "0.99\n",
      "step 19600, entropy loss: 1.497496, l2_loss: 3022.646973, total loss: 1.709081\n",
      "0.97\n",
      "step 19700, entropy loss: 1.463704, l2_loss: 3021.854736, total loss: 1.675234\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1.0\n",
      "step 19800, entropy loss: 1.475629, l2_loss: 3021.063965, total loss: 1.687104\n",
      "0.99\n",
      "step 19900, entropy loss: 1.481416, l2_loss: 3020.269531, total loss: 1.692835\n",
      "0.99\n",
      "step 20000, entropy loss: 1.468711, l2_loss: 3019.476074, total loss: 1.680075\n",
      "1.0\n",
      "0.9826\n",
      "step 20100, entropy loss: 1.484305, l2_loss: 3018.683350, total loss: 1.695613\n",
      "0.98\n",
      "step 20200, entropy loss: 1.479967, l2_loss: 3017.888184, total loss: 1.691219\n",
      "0.99\n",
      "step 20300, entropy loss: 1.463354, l2_loss: 3017.093994, total loss: 1.674550\n",
      "1.0\n",
      "step 20400, entropy loss: 1.463096, l2_loss: 3016.301025, total loss: 1.674237\n",
      "1.0\n",
      "step 20500, entropy loss: 1.474301, l2_loss: 3015.505371, total loss: 1.685386\n",
      "0.99\n",
      "step 20600, entropy loss: 1.480439, l2_loss: 3014.707031, total loss: 1.691468\n",
      "0.99\n",
      "step 20700, entropy loss: 1.489545, l2_loss: 3013.912109, total loss: 1.700519\n",
      "0.98\n",
      "step 20800, entropy loss: 1.496573, l2_loss: 3013.118896, total loss: 1.707492\n",
      "0.97\n",
      "step 20900, entropy loss: 1.483985, l2_loss: 3012.327637, total loss: 1.694848\n",
      "0.99\n",
      "step 21000, entropy loss: 1.464474, l2_loss: 3011.535400, total loss: 1.675281\n",
      "1.0\n",
      "0.9852\n",
      "step 21100, entropy loss: 1.494707, l2_loss: 3010.746338, total loss: 1.705459\n",
      "0.98\n",
      "step 21200, entropy loss: 1.486394, l2_loss: 3009.952637, total loss: 1.697091\n",
      "0.98\n",
      "step 21300, entropy loss: 1.475128, l2_loss: 3009.161621, total loss: 1.685770\n",
      "0.99\n",
      "step 21400, entropy loss: 1.464370, l2_loss: 3008.366699, total loss: 1.674955\n",
      "1.0\n",
      "step 21500, entropy loss: 1.477121, l2_loss: 3007.571533, total loss: 1.687651\n",
      "0.99\n",
      "step 21600, entropy loss: 1.463222, l2_loss: 3006.779053, total loss: 1.673697\n",
      "1.0\n",
      "step 21700, entropy loss: 1.471766, l2_loss: 3005.986084, total loss: 1.682185\n",
      "0.99\n",
      "step 21800, entropy loss: 1.486735, l2_loss: 3005.194092, total loss: 1.697098\n",
      "0.98\n",
      "step 21900, entropy loss: 1.465861, l2_loss: 3004.404053, total loss: 1.676169\n",
      "1.0\n",
      "step 22000, entropy loss: 1.479593, l2_loss: 3003.610596, total loss: 1.689846\n",
      "0.99\n",
      "0.9856\n",
      "step 22100, entropy loss: 1.489082, l2_loss: 3002.817627, total loss: 1.699280\n",
      "0.98\n",
      "step 22200, entropy loss: 1.478127, l2_loss: 3002.028809, total loss: 1.688269\n",
      "0.99\n",
      "step 22300, entropy loss: 1.462692, l2_loss: 3001.237549, total loss: 1.672778\n",
      "1.0\n",
      "step 22400, entropy loss: 1.472542, l2_loss: 3000.447266, total loss: 1.682574\n",
      "0.99\n",
      "step 22500, entropy loss: 1.470157, l2_loss: 2999.653320, total loss: 1.680133\n",
      "1.0\n",
      "step 22600, entropy loss: 1.483155, l2_loss: 2998.864502, total loss: 1.693076\n",
      "0.98\n",
      "step 22700, entropy loss: 1.468777, l2_loss: 2998.074219, total loss: 1.678642\n",
      "1.0\n",
      "step 22800, entropy loss: 1.473285, l2_loss: 2997.283447, total loss: 1.683095\n",
      "0.99\n",
      "step 22900, entropy loss: 1.483515, l2_loss: 2996.492188, total loss: 1.693270\n",
      "0.98\n",
      "step 23000, entropy loss: 1.463326, l2_loss: 2995.699707, total loss: 1.673025\n",
      "1.0\n",
      "0.9865\n",
      "step 23100, entropy loss: 1.463818, l2_loss: 2994.905762, total loss: 1.673462\n",
      "1.0\n",
      "step 23200, entropy loss: 1.483068, l2_loss: 2994.117188, total loss: 1.692656\n",
      "0.99\n",
      "step 23300, entropy loss: 1.461699, l2_loss: 2993.322266, total loss: 1.671231\n",
      "1.0\n",
      "step 23400, entropy loss: 1.481896, l2_loss: 2992.531738, total loss: 1.691373\n",
      "0.98\n",
      "step 23500, entropy loss: 1.465080, l2_loss: 2991.740723, total loss: 1.674501\n",
      "1.0\n",
      "step 23600, entropy loss: 1.492307, l2_loss: 2990.959473, total loss: 1.701674\n",
      "0.97\n",
      "step 23700, entropy loss: 1.472165, l2_loss: 2990.167480, total loss: 1.681477\n",
      "0.99\n",
      "step 23800, entropy loss: 1.463592, l2_loss: 2989.377197, total loss: 1.672848\n",
      "1.0\n",
      "step 23900, entropy loss: 1.463911, l2_loss: 2988.587158, total loss: 1.673112\n",
      "1.0\n",
      "step 24000, entropy loss: 1.476851, l2_loss: 2987.795898, total loss: 1.685996\n",
      "0.99\n",
      "0.9849\n",
      "step 24100, entropy loss: 1.480523, l2_loss: 2987.007812, total loss: 1.689614\n",
      "0.99\n",
      "step 24200, entropy loss: 1.476923, l2_loss: 2986.219971, total loss: 1.685959\n",
      "0.99\n",
      "step 24300, entropy loss: 1.495417, l2_loss: 2985.433594, total loss: 1.704397\n",
      "0.98\n",
      "step 24400, entropy loss: 1.485257, l2_loss: 2984.644775, total loss: 1.694182\n",
      "0.98\n",
      "step 24500, entropy loss: 1.474816, l2_loss: 2983.854980, total loss: 1.683686\n",
      "0.99\n",
      "step 24600, entropy loss: 1.472645, l2_loss: 2983.069336, total loss: 1.681460\n",
      "0.99\n",
      "step 24700, entropy loss: 1.473221, l2_loss: 2982.277588, total loss: 1.681980\n",
      "1.0\n",
      "step 24800, entropy loss: 1.481752, l2_loss: 2981.490234, total loss: 1.690456\n",
      "0.98\n",
      "step 24900, entropy loss: 1.463236, l2_loss: 2980.701172, total loss: 1.671885\n",
      "1.0\n",
      "step 25000, entropy loss: 1.470420, l2_loss: 2979.909180, total loss: 1.679014\n",
      "1.0\n",
      "0.9872\n",
      "step 25100, entropy loss: 1.481812, l2_loss: 2979.120850, total loss: 1.690350\n",
      "0.99\n",
      "step 25200, entropy loss: 1.465457, l2_loss: 2978.334961, total loss: 1.673940\n",
      "1.0\n",
      "step 25300, entropy loss: 1.473180, l2_loss: 2977.540527, total loss: 1.681608\n",
      "0.99\n",
      "step 25400, entropy loss: 1.472598, l2_loss: 2976.754639, total loss: 1.680971\n",
      "0.99\n",
      "step 25500, entropy loss: 1.463655, l2_loss: 2975.966064, total loss: 1.671972\n",
      "1.0\n",
      "step 25600, entropy loss: 1.461771, l2_loss: 2975.176758, total loss: 1.670033\n",
      "1.0\n",
      "step 25700, entropy loss: 1.468617, l2_loss: 2974.385010, total loss: 1.676824\n",
      "1.0\n",
      "step 25800, entropy loss: 1.471391, l2_loss: 2973.594971, total loss: 1.679543\n",
      "1.0\n",
      "step 25900, entropy loss: 1.481970, l2_loss: 2972.806641, total loss: 1.690067\n",
      "0.98\n",
      "step 26000, entropy loss: 1.465169, l2_loss: 2972.016602, total loss: 1.673211\n",
      "1.0\n",
      "0.9868\n",
      "step 26100, entropy loss: 1.471414, l2_loss: 2971.229980, total loss: 1.679400\n",
      "0.99\n",
      "step 26200, entropy loss: 1.480308, l2_loss: 2970.442383, total loss: 1.688239\n",
      "0.99\n",
      "step 26300, entropy loss: 1.473531, l2_loss: 2969.654785, total loss: 1.681406\n",
      "0.99\n",
      "step 26400, entropy loss: 1.492976, l2_loss: 2968.868408, total loss: 1.700797\n",
      "0.98\n",
      "step 26500, entropy loss: 1.462629, l2_loss: 2968.080811, total loss: 1.670395\n",
      "1.0\n",
      "step 26600, entropy loss: 1.471550, l2_loss: 2967.299561, total loss: 1.679261\n",
      "0.99\n",
      "step 26700, entropy loss: 1.471916, l2_loss: 2966.507568, total loss: 1.679571\n",
      "0.99\n",
      "step 26800, entropy loss: 1.463440, l2_loss: 2965.721680, total loss: 1.671040\n",
      "1.0\n",
      "step 26900, entropy loss: 1.492079, l2_loss: 2964.931641, total loss: 1.699625\n",
      "0.97\n",
      "step 27000, entropy loss: 1.471082, l2_loss: 2964.145996, total loss: 1.678572\n",
      "0.99\n",
      "0.986\n",
      "step 27100, entropy loss: 1.461275, l2_loss: 2963.356445, total loss: 1.668710\n",
      "1.0\n",
      "step 27200, entropy loss: 1.463966, l2_loss: 2962.571777, total loss: 1.671346\n",
      "1.0\n",
      "step 27300, entropy loss: 1.480629, l2_loss: 2961.784180, total loss: 1.687954\n",
      "0.99\n",
      "step 27400, entropy loss: 1.470494, l2_loss: 2960.993652, total loss: 1.677763\n",
      "1.0\n",
      "step 27500, entropy loss: 1.462950, l2_loss: 2960.209473, total loss: 1.670164\n",
      "1.0\n",
      "step 27600, entropy loss: 1.473712, l2_loss: 2959.425781, total loss: 1.680872\n",
      "0.99\n",
      "step 27700, entropy loss: 1.462573, l2_loss: 2958.638916, total loss: 1.669678\n",
      "1.0\n",
      "step 27800, entropy loss: 1.470480, l2_loss: 2957.852295, total loss: 1.677530\n",
      "1.0\n",
      "step 27900, entropy loss: 1.469131, l2_loss: 2957.067383, total loss: 1.676126\n",
      "1.0\n",
      "step 28000, entropy loss: 1.474005, l2_loss: 2956.280762, total loss: 1.680945\n",
      "0.99\n",
      "0.9873\n",
      "step 28100, entropy loss: 1.472764, l2_loss: 2955.493896, total loss: 1.679648\n",
      "0.99\n",
      "step 28200, entropy loss: 1.464276, l2_loss: 2954.709717, total loss: 1.671106\n",
      "1.0\n",
      "step 28300, entropy loss: 1.463367, l2_loss: 2953.928223, total loss: 1.670142\n",
      "1.0\n",
      "step 28400, entropy loss: 1.479334, l2_loss: 2953.143066, total loss: 1.686054\n",
      "0.99\n",
      "step 28500, entropy loss: 1.464064, l2_loss: 2952.358398, total loss: 1.670729\n",
      "1.0\n",
      "step 28600, entropy loss: 1.462642, l2_loss: 2951.575684, total loss: 1.669252\n",
      "1.0\n",
      "step 28700, entropy loss: 1.464881, l2_loss: 2950.789307, total loss: 1.671437\n",
      "1.0\n",
      "step 28800, entropy loss: 1.469356, l2_loss: 2950.008057, total loss: 1.675856\n",
      "1.0\n",
      "step 28900, entropy loss: 1.464649, l2_loss: 2949.224121, total loss: 1.671095\n",
      "1.0\n",
      "step 29000, entropy loss: 1.463917, l2_loss: 2948.440918, total loss: 1.670308\n",
      "1.0\n",
      "0.9869\n",
      "step 29100, entropy loss: 1.467186, l2_loss: 2947.656250, total loss: 1.673522\n",
      "1.0\n",
      "step 29200, entropy loss: 1.462827, l2_loss: 2946.870605, total loss: 1.669108\n",
      "1.0\n",
      "step 29300, entropy loss: 1.461392, l2_loss: 2946.095947, total loss: 1.667619\n",
      "1.0\n",
      "step 29400, entropy loss: 1.463428, l2_loss: 2945.312744, total loss: 1.669600\n",
      "1.0\n",
      "step 29500, entropy loss: 1.472174, l2_loss: 2944.531738, total loss: 1.678291\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.99\n",
      "step 29600, entropy loss: 1.465887, l2_loss: 2943.746338, total loss: 1.671949\n",
      "1.0\n",
      "step 29700, entropy loss: 1.473191, l2_loss: 2942.961182, total loss: 1.679198\n",
      "0.99\n",
      "step 29800, entropy loss: 1.462096, l2_loss: 2942.175049, total loss: 1.668048\n",
      "1.0\n",
      "step 29900, entropy loss: 1.467493, l2_loss: 2941.393555, total loss: 1.673390\n",
      "1.0\n",
      "step 30000, entropy loss: 1.470148, l2_loss: 2940.612061, total loss: 1.675991\n",
      "1.0\n",
      "0.9886\n"
     ]
    }
   ],
   "source": [
    "\n",
    "for step in range(30000):\n",
    "  batch_xs, batch_ys = mnist.train.next_batch(100)\n",
    "  lr = 0.02\n",
    "  _, loss, l2_loss_value, total_loss_value = sess.run(\n",
    "               [train_step, cross_entropy, l2_loss, total_loss], \n",
    "               feed_dict={x: batch_xs, y_: batch_ys, learning_rate:lr})\n",
    "  \n",
    "  if (step+1) % 100 == 0:\n",
    "    print('step %d, entropy loss: %f, l2_loss: %f, total loss: %f' % \n",
    "            (step+1, loss, l2_loss_value, total_loss_value))\n",
    "    # Test trained model\n",
    "    correct_prediction = tf.equal(tf.argmax(net, 1), tf.argmax(y_, 1))\n",
    "    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "    print(sess.run(accuracy, feed_dict={x: batch_xs, y_: batch_ys}))\n",
    "  if (step+1) % 1000 == 0:\n",
    "    print(sess.run(accuracy, feed_dict={x: mnist.test.images,\n",
    "                                    y_: mnist.test.labels}))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "在这里我们仍然调用系统提供的读取数据，为我们取得一个batch。\n",
    "然后我们运行3k个step(5 epochs)，对权重进行优化。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Learning rate: 0.01 最终accuracy = 0.9848\n",
    "Learning rate: 0.005 最终accuracy = 0.9719, 应该需要增大epoch 数量\n",
    "Learning rate: 0.005 6w step , 最终 accuracy = 0.982\n",
    "\n",
    "调整了kernal 的size ,变成 3* 3, 效果不理想, accuracy = 0.9723 \n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "验证我们模型在测试数据上的准确率"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "毫无疑问，这个模型是一个非常简陋，性能也不理想的模型。目前只能达到92%左右的准确率。\n",
    "接下来，希望大家利用现有的知识，将这个模型优化至98%以上的准确率。\n",
    "Hint：\n",
    "- 卷积\n",
    "- 池化\n",
    "- 激活函数\n",
    "- 正则化\n",
    "- 初始化\n",
    "- 摸索一下各个超参数\n",
    "  - 卷积kernel size\n",
    "  - 卷积kernel 数量\n",
    "  - 学习率\n",
    "  - 正则化惩罚因子\n",
    "  - 最好每隔几个step就对loss、accuracy等等进行一次输出，这样才能有根据地进行调整"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
