{
 "cells": [
  {
   "cell_type": "raw",
   "metadata": {},
   "source": [
    "# 本次作业使用keras方法来做，像我这种对API不熟的人来说，代码越少越好\n",
    "    - 本次作业做了以下尝试：\n",
    "        1 采用三层神经网络\n",
    "        2 尝试了selu,swish两个激活函数\n",
    "        3 添加l2正则，正则系数4e-5\n",
    "        4 MSRA初始化权重分布\n",
    "        5 使用Dropout方法\n",
    "        6 尝试修改卷积kernel size：3X3,5X5,7X7\n",
    "        7 尝试修改卷积kernel 数量：16,32,64,50,100\n",
    "        8 使用了学习率衰减方法\n",
    "        9 使用adam优化器\n",
    "    - 由于本地计算太慢，epoch最多计算了20次。还有一些参数没时间调，使用了老师视频里讲的经验值，如：\n",
    "        1.正则化因子统一都用了4e-5\n",
    "        2.权重初始化都用了MSRA分布\n",
    "        3.学习率衰减系统都使用了0.57\n",
    "        4.层数都用了3层\n",
    "        5.Dropout系数都用了0.5 \n",
    "    - 结果没达到0.995,可能是由于BatchNormalization和Augmentation没用到。这两个找了很久没找到keras下的使用方法，希望老师能讲解下\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/ai/tool/bin/anaconda3/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n",
      "  from ._conv import register_converters as _register_converters\n",
      "Using TensorFlow backend.\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "'channels_last'"
      ]
     },
     "execution_count": 1,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "\"\"\"A very simple MNIST classifier.\n",
    "See extensive documentation at\n",
    "https://www.tensorflow.org/get_started/mnist/beginners\n",
    "\"\"\"\n",
    "from __future__ import absolute_import\n",
    "from __future__ import division\n",
    "from __future__ import print_function\n",
    "\n",
    "import argparse\n",
    "import sys\n",
    "import tensorflow as tf\n",
    "\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "\n",
    "\n",
    "\n",
    "from keras.layers.core import Dense, Flatten\n",
    "from keras.layers.convolutional import Conv2D\n",
    "from keras.layers.pooling import MaxPooling2D\n",
    "\n",
    "from keras import backend as K\n",
    "from keras import initializers\n",
    "\n",
    "K.image_data_format() "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From <ipython-input-2-818c857eed92>:3: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n",
      "WARNING:tensorflow:From /home/ai/tool/bin/anaconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please write your own downloading logic.\n",
      "WARNING:tensorflow:From /home/ai/tool/bin/anaconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.data to implement this functionality.\n",
      "Extracting ../data/train-images-idx3-ubyte.gz\n",
      "WARNING:tensorflow:From /home/ai/tool/bin/anaconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.data to implement this functionality.\n",
      "Extracting ../data/train-labels-idx1-ubyte.gz\n",
      "WARNING:tensorflow:From /home/ai/tool/bin/anaconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:110: dense_to_one_hot (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.one_hot on tensors.\n",
      "Extracting ../data/t10k-images-idx3-ubyte.gz\n",
      "Extracting ../data/t10k-labels-idx1-ubyte.gz\n",
      "WARNING:tensorflow:From /home/ai/tool/bin/anaconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:290: DataSet.__init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n"
     ]
    }
   ],
   "source": [
    "# Import data\n",
    "data_dir = '../data/'\n",
    "mnist = input_data.read_data_sets(data_dir, one_hot=True)\n",
    "# Define loss and optimizer\n",
    "x = tf.placeholder(tf.float32, [None, 784])\n",
    "y_ = tf.placeholder(tf.float32, [None, 10])\n",
    "init_learning_rate = tf.placeholder(tf.float32)\n",
    "\n",
    "def swish(x, name='swish'):\n",
    "    return tf.nn.sigmoid(x) * x\n",
    "\n",
    "def selu(x, name='selu'):\n",
    "    with tf.name_scope('elu') as scope:\n",
    "        alpha = 1.6732632423543772848170429916717\n",
    "        scale = 1.0507009873554804934193349852946\n",
    "        return scale*tf.where(x>=0.0,x,alpha*tf.nn.elu(x))\n",
    "\n",
    "with tf.name_scope('reshape'):\n",
    "    x_image = tf.reshape(x, [-1, 28, 28, 1])\n",
    "#学习率衰减代码，参考老师视频里的方法\n",
    "epoch_steps = tf.to_int64(tf.div(60000,tf.shape(x)[0]))\n",
    "global_step = tf.train.get_or_create_global_step()\n",
    "current_epoch = global_step//epoch_steps\n",
    "decay_times = current_epoch\n",
    "current_learning_rate = tf.multiply(init_learning_rate,tf.pow(0.575, tf.to_float(decay_times)))\n",
    "\n",
    "net = Conv2D(16, kernel_initializer='he_normal',kernel_size=[5,5], strides=[1,1],activation=swish,\n",
    "                 padding='same',\n",
    "                input_shape=[28,28,1])(x_image)\n",
    "net = MaxPooling2D(pool_size=[2,2])(net)\n",
    "net = Conv2D(32, kernel_initializer='he_normal',kernel_size=[5,5],strides=[1,1],activation=swish,\n",
    "                padding='same')(net)\n",
    "net = MaxPooling2D(pool_size=[2,2])(net)\n",
    "net = Flatten()(net)\n",
    "net = Dense(1000, kernel_initializer='he_normal',activation=swish)(net)\n",
    "#添加dropout\n",
    "#with tf.name_scope('dropout'):\n",
    "#    keep_prob = tf.placeholder(tf.float32)\n",
    "#    net = tf.nn.dropout(net, keep_prob)\n",
    "net = Dense(10,kernel_initializer='he_normal',activation='softmax')(net)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "from keras.objectives import categorical_crossentropy\n",
    "cross_entropy = tf.reduce_mean(categorical_crossentropy(y_, net))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "l2_loss = tf.add_n( [tf.nn.l2_loss(w) for w in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)] )\n",
    "total_loss = cross_entropy + 4e-5*l2_loss\n",
    "#使用adam优化器\n",
    "op = tf.train.AdamOptimizer(current_learning_rate)\n",
    "gr = op.compute_gradients(total_loss)\n",
    "train_step = op.apply_gradients(gr)\n",
    "train_step = tf.train.AdamOptimizer(current_learning_rate).minimize(total_loss,global_step=global_step)\n",
    "#使用SGD\n",
    "#train_step = tf.train.GradientDescentOptimizer(current_learning_rate).minimize(total_loss,global_step=global_step)\n",
    "sess = tf.Session()\n",
    "K.set_session(sess)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "step 100, entropy loss: 0.042696, l2_loss: 2830.461670, total loss: 0.155914,current_lr_value 0.010000\n",
      "1.0\n",
      "step 200, entropy loss: 0.063866, l2_loss: 2430.305420, total loss: 0.161078,current_lr_value 0.010000\n",
      "0.98\n",
      "step 300, entropy loss: 0.072874, l2_loss: 2246.704102, total loss: 0.162742,current_lr_value 0.010000\n",
      "0.98\n",
      "step 400, entropy loss: 0.088934, l2_loss: 2377.739746, total loss: 0.184043,current_lr_value 0.010000\n",
      "0.99\n",
      "step 500, entropy loss: 0.207644, l2_loss: 2358.125244, total loss: 0.301969,current_lr_value 0.010000\n",
      "0.98\n",
      "step 600, entropy loss: 0.174642, l2_loss: 2597.269531, total loss: 0.278533,current_lr_value 0.010000\n",
      "0.98\n",
      "step 600,p_value 0.973000\n",
      "step 700, entropy loss: 0.033471, l2_loss: 2306.239014, total loss: 0.125720,current_lr_value 0.005750\n",
      "1.0\n",
      "step 800, entropy loss: 0.045928, l2_loss: 1996.147583, total loss: 0.125774,current_lr_value 0.005750\n",
      "0.99\n",
      "step 900, entropy loss: 0.002119, l2_loss: 1785.955200, total loss: 0.073558,current_lr_value 0.005750\n",
      "1.0\n",
      "step 1000, entropy loss: 0.044570, l2_loss: 1681.352417, total loss: 0.111824,current_lr_value 0.005750\n",
      "1.0\n",
      "step 1100, entropy loss: 0.094834, l2_loss: 1617.465332, total loss: 0.159532,current_lr_value 0.005750\n",
      "0.99\n",
      "step 1200, entropy loss: 0.005961, l2_loss: 1519.771362, total loss: 0.066752,current_lr_value 0.005750\n",
      "1.0\n",
      "step 1200,p_value 0.989000\n",
      "step 1300, entropy loss: 0.011816, l2_loss: 1385.277466, total loss: 0.067227,current_lr_value 0.003306\n",
      "1.0\n",
      "step 1400, entropy loss: 0.015519, l2_loss: 1252.671997, total loss: 0.065626,current_lr_value 0.003306\n",
      "1.0\n",
      "step 1500, entropy loss: 0.027219, l2_loss: 1196.699219, total loss: 0.075087,current_lr_value 0.003306\n",
      "1.0\n",
      "step 1600, entropy loss: 0.003383, l2_loss: 1151.473267, total loss: 0.049442,current_lr_value 0.003306\n",
      "1.0\n",
      "step 1700, entropy loss: 0.001410, l2_loss: 1075.914062, total loss: 0.044447,current_lr_value 0.003306\n",
      "1.0\n",
      "step 1800, entropy loss: 0.031524, l2_loss: 1002.858459, total loss: 0.071638,current_lr_value 0.003306\n",
      "0.99\n",
      "step 1800,p_value 0.989800\n",
      "step 1900, entropy loss: 0.001325, l2_loss: 948.916138, total loss: 0.039281,current_lr_value 0.001901\n",
      "1.0\n",
      "step 2000, entropy loss: 0.020786, l2_loss: 894.171814, total loss: 0.056553,current_lr_value 0.001901\n",
      "0.99\n",
      "step 2100, entropy loss: 0.020855, l2_loss: 851.606445, total loss: 0.054920,current_lr_value 0.001901\n",
      "0.99\n",
      "step 2200, entropy loss: 0.006791, l2_loss: 814.827820, total loss: 0.039384,current_lr_value 0.001901\n",
      "1.0\n",
      "step 2300, entropy loss: 0.038186, l2_loss: 765.380371, total loss: 0.068801,current_lr_value 0.001901\n",
      "0.99\n",
      "step 2400, entropy loss: 0.005883, l2_loss: 726.191650, total loss: 0.034931,current_lr_value 0.001901\n",
      "1.0\n",
      "step 2400,p_value 0.992400\n",
      "step 2500, entropy loss: 0.001058, l2_loss: 697.944092, total loss: 0.028976,current_lr_value 0.001093\n",
      "1.0\n",
      "step 2600, entropy loss: 0.010154, l2_loss: 673.485962, total loss: 0.037093,current_lr_value 0.001093\n",
      "1.0\n",
      "step 2700, entropy loss: 0.002202, l2_loss: 654.872986, total loss: 0.028397,current_lr_value 0.001093\n",
      "1.0\n",
      "step 2800, entropy loss: 0.001171, l2_loss: 632.287292, total loss: 0.026463,current_lr_value 0.001093\n",
      "1.0\n",
      "step 2900, entropy loss: 0.005670, l2_loss: 606.461365, total loss: 0.029928,current_lr_value 0.001093\n",
      "1.0\n",
      "step 3000, entropy loss: 0.001147, l2_loss: 582.921875, total loss: 0.024464,current_lr_value 0.001093\n",
      "1.0\n",
      "step 3000,p_value 0.992800\n",
      "step 3100, entropy loss: 0.001136, l2_loss: 568.578979, total loss: 0.023879,current_lr_value 0.000629\n",
      "1.0\n",
      "step 3200, entropy loss: 0.000399, l2_loss: 554.462524, total loss: 0.022578,current_lr_value 0.000629\n",
      "1.0\n",
      "step 3300, entropy loss: 0.001417, l2_loss: 541.656555, total loss: 0.023084,current_lr_value 0.000629\n",
      "1.0\n",
      "step 3400, entropy loss: 0.000234, l2_loss: 526.612061, total loss: 0.021298,current_lr_value 0.000629\n",
      "1.0\n",
      "step 3500, entropy loss: 0.000496, l2_loss: 512.912476, total loss: 0.021013,current_lr_value 0.000629\n",
      "1.0\n",
      "step 3600, entropy loss: 0.000669, l2_loss: 499.026642, total loss: 0.020630,current_lr_value 0.000629\n",
      "1.0\n",
      "step 3600,p_value 0.994100\n",
      "step 3700, entropy loss: 0.000401, l2_loss: 490.196411, total loss: 0.020009,current_lr_value 0.000361\n",
      "1.0\n",
      "step 3800, entropy loss: 0.000850, l2_loss: 482.054657, total loss: 0.020132,current_lr_value 0.000361\n",
      "1.0\n",
      "step 3900, entropy loss: 0.000606, l2_loss: 473.301575, total loss: 0.019538,current_lr_value 0.000361\n",
      "1.0\n",
      "step 4000, entropy loss: 0.000725, l2_loss: 464.747498, total loss: 0.019314,current_lr_value 0.000361\n",
      "1.0\n",
      "step 4100, entropy loss: 0.000038, l2_loss: 455.773926, total loss: 0.018269,current_lr_value 0.000361\n",
      "1.0\n",
      "step 4200, entropy loss: 0.002641, l2_loss: 446.839752, total loss: 0.020514,current_lr_value 0.000361\n",
      "1.0\n",
      "step 4200,p_value 0.993900\n",
      "step 4300, entropy loss: 0.003852, l2_loss: 442.052429, total loss: 0.021534,current_lr_value 0.000208\n",
      "1.0\n",
      "step 4400, entropy loss: 0.001961, l2_loss: 436.655518, total loss: 0.019427,current_lr_value 0.000208\n",
      "1.0\n",
      "step 4500, entropy loss: 0.002223, l2_loss: 431.320770, total loss: 0.019475,current_lr_value 0.000208\n",
      "1.0\n",
      "step 4600, entropy loss: 0.000530, l2_loss: 425.538177, total loss: 0.017551,current_lr_value 0.000208\n",
      "1.0\n",
      "step 4700, entropy loss: 0.001157, l2_loss: 420.045807, total loss: 0.017959,current_lr_value 0.000208\n",
      "1.0\n",
      "step 4800, entropy loss: 0.001433, l2_loss: 414.321686, total loss: 0.018006,current_lr_value 0.000208\n",
      "1.0\n",
      "step 4800,p_value 0.994400\n",
      "step 4900, entropy loss: 0.000422, l2_loss: 411.069580, total loss: 0.016864,current_lr_value 0.000119\n",
      "1.0\n",
      "step 5000, entropy loss: 0.000163, l2_loss: 407.558624, total loss: 0.016466,current_lr_value 0.000119\n",
      "1.0\n",
      "step 5100, entropy loss: 0.000276, l2_loss: 404.005219, total loss: 0.016437,current_lr_value 0.000119\n",
      "1.0\n",
      "step 5200, entropy loss: 0.000344, l2_loss: 400.265625, total loss: 0.016355,current_lr_value 0.000119\n",
      "1.0\n",
      "step 5300, entropy loss: 0.000252, l2_loss: 396.563538, total loss: 0.016114,current_lr_value 0.000119\n",
      "1.0\n",
      "step 5400, entropy loss: 0.001030, l2_loss: 392.857910, total loss: 0.016744,current_lr_value 0.000119\n",
      "1.0\n",
      "step 5400,p_value 0.994200\n",
      "step 5500, entropy loss: 0.004516, l2_loss: 390.636322, total loss: 0.020142,current_lr_value 0.000069\n",
      "1.0\n",
      "step 5600, entropy loss: 0.000662, l2_loss: 388.292938, total loss: 0.016194,current_lr_value 0.000069\n",
      "1.0\n",
      "step 5700, entropy loss: 0.000091, l2_loss: 385.893433, total loss: 0.015526,current_lr_value 0.000069\n",
      "1.0\n",
      "step 5800, entropy loss: 0.000214, l2_loss: 383.347961, total loss: 0.015548,current_lr_value 0.000069\n",
      "1.0\n",
      "step 5900, entropy loss: 0.000429, l2_loss: 380.857269, total loss: 0.015664,current_lr_value 0.000069\n",
      "1.0\n",
      "step 6000, entropy loss: 0.001178, l2_loss: 378.377136, total loss: 0.016313,current_lr_value 0.000069\n",
      "1.0\n",
      "step 6000,p_value 0.994100\n",
      "step 6100, entropy loss: 0.000251, l2_loss: 376.860168, total loss: 0.015325,current_lr_value 0.000040\n",
      "1.0\n",
      "step 6200, entropy loss: 0.000140, l2_loss: 375.276764, total loss: 0.015151,current_lr_value 0.000040\n",
      "1.0\n",
      "step 6300, entropy loss: 0.000372, l2_loss: 373.610962, total loss: 0.015316,current_lr_value 0.000040\n",
      "1.0\n",
      "step 6400, entropy loss: 0.000337, l2_loss: 371.939148, total loss: 0.015214,current_lr_value 0.000040\n",
      "1.0\n",
      "step 6500, entropy loss: 0.001095, l2_loss: 370.241760, total loss: 0.015905,current_lr_value 0.000040\n",
      "1.0\n",
      "step 6600, entropy loss: 0.000651, l2_loss: 368.542480, total loss: 0.015393,current_lr_value 0.000040\n",
      "1.0\n",
      "step 6600,p_value 0.994100\n",
      "step 6700, entropy loss: 0.001270, l2_loss: 367.457153, total loss: 0.015968,current_lr_value 0.000023\n",
      "1.0\n",
      "step 6800, entropy loss: 0.001300, l2_loss: 366.370636, total loss: 0.015954,current_lr_value 0.000023\n",
      "1.0\n",
      "step 6900, entropy loss: 0.000101, l2_loss: 365.282562, total loss: 0.014712,current_lr_value 0.000023\n",
      "1.0\n",
      "step 7000, entropy loss: 0.001024, l2_loss: 364.159149, total loss: 0.015591,current_lr_value 0.000023\n",
      "1.0\n",
      "step 7100, entropy loss: 0.003585, l2_loss: 363.046143, total loss: 0.018107,current_lr_value 0.000023\n",
      "1.0\n",
      "step 7200, entropy loss: 0.000334, l2_loss: 361.909210, total loss: 0.014810,current_lr_value 0.000023\n",
      "1.0\n",
      "step 7200,p_value 0.994300\n",
      "step 7300, entropy loss: 0.000267, l2_loss: 361.195862, total loss: 0.014715,current_lr_value 0.000013\n",
      "1.0\n",
      "step 7400, entropy loss: 0.000660, l2_loss: 360.491852, total loss: 0.015080,current_lr_value 0.000013\n",
      "1.0\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "step 7500, entropy loss: 0.002951, l2_loss: 359.760498, total loss: 0.017341,current_lr_value 0.000013\n",
      "1.0\n",
      "step 7600, entropy loss: 0.000587, l2_loss: 359.021149, total loss: 0.014948,current_lr_value 0.000013\n",
      "1.0\n",
      "step 7700, entropy loss: 0.000954, l2_loss: 358.255432, total loss: 0.015284,current_lr_value 0.000013\n",
      "1.0\n",
      "step 7800, entropy loss: 0.000731, l2_loss: 357.448334, total loss: 0.015029,current_lr_value 0.000013\n",
      "1.0\n",
      "step 7800,p_value 0.994000\n",
      "step 7900, entropy loss: 0.001177, l2_loss: 356.993347, total loss: 0.015457,current_lr_value 0.000008\n",
      "1.0\n",
      "step 8000, entropy loss: 0.000020, l2_loss: 356.527893, total loss: 0.014281,current_lr_value 0.000008\n",
      "1.0\n",
      "step 8100, entropy loss: 0.000432, l2_loss: 356.046265, total loss: 0.014674,current_lr_value 0.000008\n",
      "1.0\n",
      "step 8200, entropy loss: 0.000571, l2_loss: 355.566101, total loss: 0.014794,current_lr_value 0.000008\n",
      "1.0\n",
      "step 8300, entropy loss: 0.000348, l2_loss: 355.083466, total loss: 0.014552,current_lr_value 0.000008\n",
      "1.0\n",
      "step 8400, entropy loss: 0.000491, l2_loss: 354.566498, total loss: 0.014674,current_lr_value 0.000008\n",
      "1.0\n",
      "step 8400,p_value 0.993800\n",
      "step 8500, entropy loss: 0.000103, l2_loss: 354.264954, total loss: 0.014273,current_lr_value 0.000004\n",
      "1.0\n",
      "step 8600, entropy loss: 0.000790, l2_loss: 353.964294, total loss: 0.014949,current_lr_value 0.000004\n",
      "1.0\n",
      "step 8700, entropy loss: 0.000008, l2_loss: 353.660156, total loss: 0.014154,current_lr_value 0.000004\n",
      "1.0\n",
      "step 8800, entropy loss: 0.004139, l2_loss: 353.354462, total loss: 0.018273,current_lr_value 0.000004\n",
      "1.0\n",
      "step 8900, entropy loss: 0.000284, l2_loss: 353.028931, total loss: 0.014405,current_lr_value 0.000004\n",
      "1.0\n",
      "step 9000, entropy loss: 0.000693, l2_loss: 352.727905, total loss: 0.014802,current_lr_value 0.000004\n",
      "1.0\n",
      "step 9000,p_value 0.993900\n",
      "step 9100, entropy loss: 0.000337, l2_loss: 352.542816, total loss: 0.014439,current_lr_value 0.000002\n",
      "1.0\n",
      "step 9200, entropy loss: 0.000086, l2_loss: 352.351318, total loss: 0.014181,current_lr_value 0.000002\n",
      "1.0\n",
      "step 9300, entropy loss: 0.000795, l2_loss: 352.151917, total loss: 0.014881,current_lr_value 0.000002\n",
      "1.0\n",
      "step 9400, entropy loss: 0.000124, l2_loss: 351.961243, total loss: 0.014203,current_lr_value 0.000002\n",
      "1.0\n",
      "step 9500, entropy loss: 0.000785, l2_loss: 351.760620, total loss: 0.014855,current_lr_value 0.000002\n",
      "1.0\n",
      "step 9600, entropy loss: 0.000195, l2_loss: 351.555542, total loss: 0.014257,current_lr_value 0.000002\n",
      "1.0\n",
      "step 9600,p_value 0.993800\n",
      "step 9700, entropy loss: 0.000357, l2_loss: 351.442291, total loss: 0.014415,current_lr_value 0.000001\n",
      "1.0\n",
      "step 9800, entropy loss: 0.000969, l2_loss: 351.323151, total loss: 0.015022,current_lr_value 0.000001\n",
      "1.0\n",
      "step 9900, entropy loss: 0.001224, l2_loss: 351.203033, total loss: 0.015272,current_lr_value 0.000001\n",
      "1.0\n",
      "step 10000, entropy loss: 0.000300, l2_loss: 351.082367, total loss: 0.014343,current_lr_value 0.000001\n",
      "1.0\n",
      "step 10100, entropy loss: 0.000666, l2_loss: 350.959595, total loss: 0.014704,current_lr_value 0.000001\n",
      "1.0\n",
      "step 10200, entropy loss: 0.000792, l2_loss: 350.835999, total loss: 0.014826,current_lr_value 0.000001\n",
      "1.0\n",
      "step 10200,p_value 0.994000\n",
      "step 10300, entropy loss: 0.000283, l2_loss: 350.762909, total loss: 0.014314,current_lr_value 0.000001\n",
      "1.0\n",
      "step 10400, entropy loss: 0.000128, l2_loss: 350.689697, total loss: 0.014155,current_lr_value 0.000001\n",
      "1.0\n",
      "step 10500, entropy loss: 0.000627, l2_loss: 350.618896, total loss: 0.014652,current_lr_value 0.000001\n",
      "1.0\n",
      "step 10600, entropy loss: 0.000431, l2_loss: 350.546173, total loss: 0.014453,current_lr_value 0.000001\n",
      "1.0\n",
      "step 10700, entropy loss: 0.000168, l2_loss: 350.471954, total loss: 0.014187,current_lr_value 0.000001\n",
      "1.0\n",
      "step 10800, entropy loss: 0.000710, l2_loss: 350.397217, total loss: 0.014726,current_lr_value 0.000001\n",
      "1.0\n",
      "step 10800,p_value 0.994000\n",
      "step 10900, entropy loss: 0.000221, l2_loss: 350.355011, total loss: 0.014235,current_lr_value 0.000000\n",
      "1.0\n",
      "step 11000, entropy loss: 0.000311, l2_loss: 350.309601, total loss: 0.014324,current_lr_value 0.000000\n",
      "1.0\n",
      "step 11100, entropy loss: 0.000092, l2_loss: 350.264496, total loss: 0.014102,current_lr_value 0.000000\n",
      "1.0\n",
      "step 11200, entropy loss: 0.000081, l2_loss: 350.218842, total loss: 0.014090,current_lr_value 0.000000\n",
      "1.0\n",
      "step 11300, entropy loss: 0.001855, l2_loss: 350.173431, total loss: 0.015862,current_lr_value 0.000000\n",
      "1.0\n",
      "step 11400, entropy loss: 0.000475, l2_loss: 350.128052, total loss: 0.014480,current_lr_value 0.000000\n",
      "1.0\n",
      "step 11400,p_value 0.994000\n",
      "step 11500, entropy loss: 0.001328, l2_loss: 350.101562, total loss: 0.015332,current_lr_value 0.000000\n",
      "1.0\n",
      "step 11600, entropy loss: 0.000612, l2_loss: 350.075867, total loss: 0.014615,current_lr_value 0.000000\n",
      "1.0\n",
      "step 11700, entropy loss: 0.000744, l2_loss: 350.049744, total loss: 0.014746,current_lr_value 0.000000\n",
      "1.0\n",
      "step 11800, entropy loss: 0.000056, l2_loss: 350.022522, total loss: 0.014057,current_lr_value 0.000000\n",
      "1.0\n",
      "step 11900, entropy loss: 0.000734, l2_loss: 349.995178, total loss: 0.014733,current_lr_value 0.000000\n",
      "1.0\n",
      "step 12000, entropy loss: 0.000381, l2_loss: 349.968994, total loss: 0.014380,current_lr_value 0.000000\n",
      "1.0\n",
      "step 12000,p_value 0.994000\n",
      "(0.9944, 8.0)\n"
     ]
    }
   ],
   "source": [
    "init_op = tf.global_variables_initializer()\n",
    "sess.run(init_op)\n",
    "# Train\n",
    "dic={}\n",
    "for step in range(12000):\n",
    "    batch_xs, batch_ys = mnist.train.next_batch(100)\n",
    "    #lr = 0.001 #selu激活用0.001\n",
    "    lr = 0.01 #swish激活用0.01\n",
    "    _, loss, l2_loss_value, total_loss_value, current_lr_value= sess.run(\n",
    "               [train_step, cross_entropy, l2_loss, total_loss,current_learning_rate], \n",
    "               feed_dict={x: batch_xs, y_: batch_ys, init_learning_rate:lr})\n",
    "  \n",
    "    if (step+1) % 100 == 0:\n",
    "        print('step %d, entropy loss: %f, l2_loss: %f, total loss: %f,current_lr_value %f' % \n",
    "            (step+1, loss, l2_loss_value, total_loss_value ,current_lr_value))\n",
    "        # Test trained model\n",
    "        correct_prediction = tf.equal(tf.argmax(net, 1), tf.argmax(y_, 1))\n",
    "        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "        print(sess.run(accuracy, feed_dict={x: batch_xs, y_: batch_ys}))\n",
    "    if (step+1) % 600 == 0:\n",
    "        p_value = sess.run(accuracy, feed_dict={x: mnist.test.images,y_: mnist.test.labels})\n",
    "        dic[(step+1)/600]=p_value\n",
    "        print('step %d,p_value %f' % (step+1,p_value))\n",
    "print(max(zip(dic.values(),dic.keys())))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 由于tensorflow的计算方式不太适应，参数都直接在一份代码上调优，没有保存，所以仅将测得的结果总结于下：\n",
    "1.  激活函数：selu\n",
    "    Dropout：0.5\n",
    "    kernel size：7X7\n",
    "    初始学习率：0.001\n",
    "    kernel 数量：l1 50,l2 100\n",
    "    最佳结果：(0.9912, 17.0)\n",
    "2.  激活函数：selu\n",
    "    Dropout：0.5\n",
    "    kernel size：3X3\n",
    "    初始学习率：0.001\n",
    "    kernel 数量：l1 16,l2 32\n",
    "    最佳结果：(0.9861, 11.0)\n",
    "3.  激活函数：swish\n",
    "    Dropout：0.5\n",
    "    kernel size：5X5\n",
    "    初始学习率：0.01\n",
    "    kernel 数量：l1 16,l2 32\n",
    "    最佳结果：(0.9921, 13.0)\n",
    "4.  激活函数：swish\n",
    "    Dropout：未使用\n",
    "    kernel size：5X5 3X3\n",
    "    strides:2,2  1,1\n",
    "    初始学习率：0.01\n",
    "    kernel 数量：l1 32,l2 64\n",
    "    最佳结果：(0.9921, 6.0) \n",
    "5.  激活函数：swish\n",
    "    Dropout：未使用\n",
    "    kernel size：5X5\n",
    "    初始学习率：0.01\n",
    "    kernel 数量：l1 16,l2 32\n",
    "    最佳结果：(0.9944, 8.0)\n",
    "\n",
    "# 遇到两个问题，请助教老师帮忙解答一下：\n",
    "1.  selu激活函数学习率设为0.01刚开始很难收敛，而swiwh可以，为什么？\n",
    "2.  使用Dropout比没使用的结果还差，是不是使用方法有误？"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
