{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From C:\\Anaconda2\\envs\\python3\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\base.py:198: retry (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use the retry module or similar alternatives.\n"
     ]
    }
   ],
   "source": [
    "\"\"\"A very simple MNIST classifier.\n",
    "See extensive documentation at\n",
    "https://www.tensorflow.org/get_started/mnist/beginners\n",
    "\"\"\"\n",
    "from __future__ import absolute_import\n",
    "from __future__ import division\n",
    "from __future__ import print_function\n",
    "\n",
    "import argparse\n",
    "import sys\n",
    "\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "\n",
    "import tensorflow as tf\n",
    "import numpy as np\n",
    "\n",
    "#FLAGS = None\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "我们在这里调用系统提供的Mnist数据函数为我们读入数据，如果没有下载的话则进行下载。\n",
    "\n",
    "<font color=#ff0000>**这里将data_dir改为适合你的运行环境的目录**</font>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From <ipython-input-2-698ada706af1>:3: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n",
      "WARNING:tensorflow:From C:\\Anaconda2\\envs\\python3\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please write your own downloading logic.\n",
      "WARNING:tensorflow:From C:\\Anaconda2\\envs\\python3\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.data to implement this functionality.\n",
      "Extracting /tmp/tensorflow/mnist/input_data\\train-images-idx3-ubyte.gz\n",
      "WARNING:tensorflow:From C:\\Anaconda2\\envs\\python3\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.data to implement this functionality.\n",
      "Extracting /tmp/tensorflow/mnist/input_data\\train-labels-idx1-ubyte.gz\n",
      "WARNING:tensorflow:From C:\\Anaconda2\\envs\\python3\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:110: dense_to_one_hot (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.one_hot on tensors.\n",
      "Extracting /tmp/tensorflow/mnist/input_data\\t10k-images-idx3-ubyte.gz\n",
      "Extracting /tmp/tensorflow/mnist/input_data\\t10k-labels-idx1-ubyte.gz\n",
      "WARNING:tensorflow:From C:\\Anaconda2\\envs\\python3\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:290: DataSet.__init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n"
     ]
    }
   ],
   "source": [
    "# Import data\n",
    "data_dir = '/tmp/tensorflow/mnist/input_data'\n",
    "mnist = input_data.read_data_sets(data_dir, one_hot=True)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "首先定义输入数据和ground truth占位符"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Create the model\n",
    "x = tf.placeholder(tf.float32, [None, 784])\n",
    "y_ = tf.placeholder(tf.float32, [None, 10])\n",
    "\n",
    "x_image=tf.reshape(x,[-1,28,28,1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "#方法一：采用固定的学习率\n",
    "#learning_rate=tf.placeholder(tf.float32)\n",
    "\n",
    "#方法二：指数衰减法,初始学习率0.005，每个epoch（600步）衰减0.575\n",
    "global_steps=tf.Variable(0.0)\n",
    "learning_rate=tf.train.exponential_decay(0.005,global_steps,600,0.575,staircase=True)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 定义不同的激活函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "def swish(x):\n",
    "  return tf.nn.swish(x)\n",
    "\n",
    "\n",
    "def selu(x):\n",
    "  with tf.name_scope('elu') as scope:\n",
    "    alpha = 1.6732632423543772848170429916717\n",
    "    scale = 1.0507009873554804934193349852946\n",
    "    return scale*tf.where(x>=0.0, x, alpha*tf.nn.elu(x))\n",
    "\n",
    "def relu(x):\n",
    "    return tf.nn.relu(x)\n",
    "\n",
    "def activation(x):\n",
    "    return selu(x)\n",
    "    #return relu(x)\n",
    "    #return swish(x)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 方法一：使用原生态写法(在以下卷积神经网络配置下，selu-Adam-0.005初始指数衰减-3000步准确率为0.9915)\n",
    "\n",
    "计算结果为：\n",
    "step 3000, entropy loss: 0.017696, l2_loss: 1124.428589, total loss: 0.096406\n",
    "1.0\n",
    "0.9915"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "\"\\n#第1层：卷积1(28*28*1->28*28*32)\\n#卷积核的初始化分别采用标准差为0.1的正态分布、MSRA何方差进行初始化\\n#W_conv1=tf.Variable(tf.truncated_normal([5,5,1,32],stddev=0.1),collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\\nW_conv1=tf.Variable(tf.truncated_normal([5,5,1,32],stddev=np.sqrt(2/(28*28*1))),collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\\nb_conv1=tf.Variable(tf.constant(0.1,shape=[32]))\\nl_conv1=tf.nn.conv2d(x_image,W_conv1,strides=[1,1,1,1],padding='SAME')+b_conv1\\nh_conv1=activation(l_conv1)\\n\\n#第2层：池化1(28*28*32->14*14*32)\\nh_pool1=tf.nn.max_pool(h_conv1,ksize=[1,2,2,1],strides=[1,2,2,1],padding='VALID')\\n\\n#第3层：卷积2(14*14*32->14*14*64)\\n#卷积核的初始化分别采用标准差为0.1的正态分布、何方差进行初始化\\n#W_conv2=tf.Variable(tf.truncated_normal([5,5,32,64],stddev=0.1),collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\\nW_conv2=tf.Variable(tf.truncated_normal([5,5,32,64],stddev=np.sqrt(2/(5*5*32))),collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\\nb_conv2=tf.Variable(tf.constant(0.1,shape=[64]))\\nl_conv2=tf.nn.conv2d(h_pool1,W_conv2,strides=[1,1,1,1],padding='SAME')+b_conv2\\nh_conv2=activation(l_conv2)\\n\\n#第4层：池化2(14*14*64->7*7*64)\\nh_pool2=tf.nn.max_pool(h_conv2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='VALID')\\n\\n#第5层：全连接层1(7*7*64->1024)\\n#分别采用标准差为0.1的正态分布、何方差进行初始化W\\n#W_fc1=tf.Variable(tf.truncated_normal([7*7*64,1024],stddev=0.1),collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\\nW_fc1=tf.Variable(tf.truncated_normal([7*7*64,1024],stddev=np.sqrt(2/(7*7*64))),collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\\nb_fc1=tf.Variable(tf.constant(0.1,shape=[1024]))\\nh_pool2_flat=tf.reshape(h_pool2,[-1,7*7*64])\\nh_fc1=activation(tf.matmul(h_pool2_flat,W_fc1)+b_fc1)\\n\\n#定义dropout\\nkeep_prob=tf.placeholder(tf.float32)\\nh_fc1_drop=tf.nn.dropout(h_fc1,keep_prob)\\n\\n#第6层：全连接层2(1024->10)\\n#分别采用标准差为0.1的正态分布、何方差进行初始化W\\n#W_fc2=tf.Variable(tf.truncated_normal([1024,10],stddev=0.1),collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\\nW_fc2=tf.Variable(tf.truncated_normal([1024,10],stddev=np.sqrt(2/1024)),collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\\nb_fc2=tf.Variable(tf.constant(0.1,shape=[10]))\\ny=tf.matmul(h_fc1_drop,W_fc2)+b_fc2\\n\""
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''\n",
    "#第1层：卷积1(28*28*1->28*28*32)\n",
    "#卷积核的初始化分别采用标准差为0.1的正态分布、MSRA何方差进行初始化\n",
    "#W_conv1=tf.Variable(tf.truncated_normal([5,5,1,32],stddev=0.1),collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\n",
    "W_conv1=tf.Variable(tf.truncated_normal([5,5,1,32],stddev=np.sqrt(2/(28*28*1))),collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\n",
    "b_conv1=tf.Variable(tf.constant(0.1,shape=[32]))\n",
    "l_conv1=tf.nn.conv2d(x_image,W_conv1,strides=[1,1,1,1],padding='SAME')+b_conv1\n",
    "h_conv1=activation(l_conv1)\n",
    "\n",
    "#第2层：池化1(28*28*32->14*14*32)\n",
    "h_pool1=tf.nn.max_pool(h_conv1,ksize=[1,2,2,1],strides=[1,2,2,1],padding='VALID')\n",
    "\n",
    "#第3层：卷积2(14*14*32->14*14*64)\n",
    "#卷积核的初始化分别采用标准差为0.1的正态分布、何方差进行初始化\n",
    "#W_conv2=tf.Variable(tf.truncated_normal([5,5,32,64],stddev=0.1),collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\n",
    "W_conv2=tf.Variable(tf.truncated_normal([5,5,32,64],stddev=np.sqrt(2/(5*5*32))),collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\n",
    "b_conv2=tf.Variable(tf.constant(0.1,shape=[64]))\n",
    "l_conv2=tf.nn.conv2d(h_pool1,W_conv2,strides=[1,1,1,1],padding='SAME')+b_conv2\n",
    "h_conv2=activation(l_conv2)\n",
    "\n",
    "#第4层：池化2(14*14*64->7*7*64)\n",
    "h_pool2=tf.nn.max_pool(h_conv2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='VALID')\n",
    "\n",
    "#第5层：全连接层1(7*7*64->1024)\n",
    "#分别采用标准差为0.1的正态分布、何方差进行初始化W\n",
    "#W_fc1=tf.Variable(tf.truncated_normal([7*7*64,1024],stddev=0.1),collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\n",
    "W_fc1=tf.Variable(tf.truncated_normal([7*7*64,1024],stddev=np.sqrt(2/(7*7*64))),collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\n",
    "b_fc1=tf.Variable(tf.constant(0.1,shape=[1024]))\n",
    "h_pool2_flat=tf.reshape(h_pool2,[-1,7*7*64])\n",
    "h_fc1=activation(tf.matmul(h_pool2_flat,W_fc1)+b_fc1)\n",
    "\n",
    "#定义dropout\n",
    "keep_prob=tf.placeholder(tf.float32)\n",
    "h_fc1_drop=tf.nn.dropout(h_fc1,keep_prob)\n",
    "\n",
    "#第6层：全连接层2(1024->10)\n",
    "#分别采用标准差为0.1的正态分布、何方差进行初始化W\n",
    "#W_fc2=tf.Variable(tf.truncated_normal([1024,10],stddev=0.1),collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\n",
    "W_fc2=tf.Variable(tf.truncated_normal([1024,10],stddev=np.sqrt(2/1024)),collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\n",
    "b_fc2=tf.Variable(tf.constant(0.1,shape=[10]))\n",
    "y=tf.matmul(h_fc1_drop,W_fc2)+b_fc2\n",
    "'''"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 方法二：使用layers法(selu-Adam-0.005初始指数衰减，调整卷积核大小和数量，对比计算准确率)\n",
    "\n",
    "1.将卷积核大小由[5,5]调整为[3,3],数量分别为32,64不变，两个epoch基本稳定（1200步），2000步时测试集准确率0.9923\n",
    "(step 2000, entropy loss: 0.015654, l2_loss: 2066877.375000, total loss: 144.697083 1.0 0.9923)\n",
    "\n",
    "2.保持卷积核大小[3,3]，数量分别调整为48,96,四个epoch基本稳定（2400步），3000步时测试集准确率0.9927"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "\"\\n#1.将卷积核减小为3*3\\n\\n#卷积、池化层\\nh_conv1=tf.layers.conv2d(x_image,32,[3,3],padding='SAME',activation=tf.nn.relu)\\nh_pool1=tf.layers.max_pooling2d(h_conv1,pool_size=[2,2],strides=[2,2],padding='VALID')\\n\\nh_conv2=tf.layers.conv2d(h_pool1,64,[3,3],padding='SAME',activation=tf.nn.relu)\\nh_pool2=tf.layers.max_pooling2d(h_conv2,pool_size=[2,2],strides=[2,2],padding='VALID')\\n\\n#全连接层\\nh_pool2_flat=tf.layers.flatten(h_pool2)\\nh_fc1=tf.layers.dense(h_pool2_flat,1024,activation=tf.nn.relu)\\n\\n#定义dropout\\nkeep_prob=tf.placeholder(tf.float32)\\nh_fc1_drop=tf.nn.dropout(h_fc1,keep_prob)\\n\\ny=tf.layers.dense(h_fc1_drop,10,activation=None)\\n\""
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "'''\n",
    "#1.将卷积核减小为3*3\n",
    "\n",
    "#卷积、池化层\n",
    "h_conv1=tf.layers.conv2d(x_image,32,[3,3],padding='SAME',activation=tf.nn.relu)\n",
    "h_pool1=tf.layers.max_pooling2d(h_conv1,pool_size=[2,2],strides=[2,2],padding='VALID')\n",
    "\n",
    "h_conv2=tf.layers.conv2d(h_pool1,64,[3,3],padding='SAME',activation=tf.nn.relu)\n",
    "h_pool2=tf.layers.max_pooling2d(h_conv2,pool_size=[2,2],strides=[2,2],padding='VALID')\n",
    "\n",
    "#全连接层\n",
    "h_pool2_flat=tf.layers.flatten(h_pool2)\n",
    "h_fc1=tf.layers.dense(h_pool2_flat,1024,activation=tf.nn.relu)\n",
    "\n",
    "#定义dropout\n",
    "keep_prob=tf.placeholder(tf.float32)\n",
    "h_fc1_drop=tf.nn.dropout(h_fc1,keep_prob)\n",
    "\n",
    "y=tf.layers.dense(h_fc1_drop,10,activation=None)\n",
    "'''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "#2.卷积核为3*3,数量调整为48,96,\n",
    "\n",
    "#卷积、池化层\n",
    "h_conv1=tf.layers.conv2d(x_image,48,[3,3],padding='SAME',activation=tf.nn.relu)\n",
    "h_pool1=tf.layers.max_pooling2d(h_conv1,pool_size=[2,2],strides=[2,2],padding='VALID')\n",
    "\n",
    "h_conv2=tf.layers.conv2d(h_pool1,96,[3,3],padding='SAME',activation=tf.nn.relu)\n",
    "h_pool2=tf.layers.max_pooling2d(h_conv2,pool_size=[2,2],strides=[2,2],padding='VALID')\n",
    "\n",
    "#全连接层\n",
    "h_pool2_flat=tf.layers.flatten(h_pool2)\n",
    "h_fc1=tf.layers.dense(h_pool2_flat,1024,activation=tf.nn.relu)\n",
    "\n",
    "#定义dropout\n",
    "keep_prob=tf.placeholder(tf.float32)\n",
    "h_fc1_drop=tf.nn.dropout(h_fc1,keep_prob)\n",
    "\n",
    "y=tf.layers.dense(h_fc1_drop,10,activation=None)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "接下来我们计算交叉熵，注意这里不要使用注释中的手动计算方式，而是使用系统函数。\n",
    "另一个注意点就是，softmax_cross_entropy_with_logits的logits参数是**未经激活的wx+b**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From <ipython-input-9-9c356944e8f6>:10: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "\n",
      "Future major versions of TensorFlow will allow gradients to flow\n",
      "into the labels input on backprop by default.\n",
      "\n",
      "See tf.nn.softmax_cross_entropy_with_logits_v2.\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# The raw formulation of cross-entropy,\n",
    "#\n",
    "#   tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)),\n",
    "#                                 reduction_indices=[1]))\n",
    "#\n",
    "# can be numerically unstable.\n",
    "#\n",
    "# So here we use tf.nn.softmax_cross_entropy_with_logits on the raw\n",
    "# outputs of 'y', and then average across the batch.\n",
    "cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))\n",
    "\n",
    "#使用L2正则约束过拟合\n",
    "\n",
    "#L2损失之原生态写法版本\n",
    "#l2_loss=tf.add_n([tf.nn.l2_loss(w) for w in tf.get_collection('WEIGHTS')]) \n",
    "\n",
    "#L2损失之layers版本\n",
    "l2_loss = tf.add_n( [tf.nn.l2_loss(w) for w in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)] )\n",
    "\n",
    "#总损失，L2之前的系数7e-5是为了将l2损失与交叉熵损失拉平到一个数量级\n",
    "total_loss=cross_entropy + 7e-5*l2_loss"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "生成一个训练step"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "#方法一：采用固定学习率的SGD优化器求解\n",
    "#train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(total_loss)\n",
    "\n",
    "#方法二：采用固定学习率的Adam优化器求解\n",
    "#train_step = tf.train.AdamOptimizer(learning_rate).minimize(total_loss)\n",
    "\n",
    "#方法三：采用指数衰减学习率的Adam优化器求解\n",
    "train_step = tf.train.AdamOptimizer(learning_rate).minimize(total_loss,global_step=global_steps)\n",
    "\n",
    "sess = tf.Session()\n",
    "init_op = tf.global_variables_initializer()\n",
    "sess.run(init_op)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "在这里我们仍然调用系统提供的读取数据，为我们取得一个batch。\n",
    "然后我们运行3k个step(5 epochs)，对权重进行优化。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "step 100, entropy loss: 0.121120, l2_loss: 5744.730957, total loss: 0.523251\n",
      "0.98\n",
      "step 200, entropy loss: 0.149038, l2_loss: 20247.492188, total loss: 1.566362\n",
      "0.98\n",
      "step 300, entropy loss: 0.144650, l2_loss: 44738.890625, total loss: 3.276373\n",
      "0.94\n",
      "step 400, entropy loss: 0.027409, l2_loss: 79111.492188, total loss: 5.565214\n",
      "1.0\n",
      "step 500, entropy loss: 0.106018, l2_loss: 123315.273438, total loss: 8.738088\n",
      "0.98\n",
      "step 600, entropy loss: 0.031783, l2_loss: 177389.015625, total loss: 12.449015\n",
      "1.0\n",
      "step 700, entropy loss: 0.058025, l2_loss: 241456.671875, total loss: 16.959993\n",
      "0.96\n",
      "step 800, entropy loss: 0.045571, l2_loss: 315449.937500, total loss: 22.127066\n",
      "0.99\n",
      "step 900, entropy loss: 0.007262, l2_loss: 399375.343750, total loss: 27.963537\n",
      "1.0\n",
      "step 1000, entropy loss: 0.179195, l2_loss: 493218.312500, total loss: 34.704479\n",
      "0.96\n",
      "0.9855\n",
      "step 1100, entropy loss: 0.060540, l2_loss: 596986.625000, total loss: 41.849606\n",
      "0.99\n",
      "step 1200, entropy loss: 0.016652, l2_loss: 710668.187500, total loss: 49.763424\n",
      "0.99\n",
      "step 1300, entropy loss: 0.019861, l2_loss: 834474.750000, total loss: 58.433098\n",
      "0.98\n",
      "step 1400, entropy loss: 0.064963, l2_loss: 968253.312500, total loss: 67.842697\n",
      "0.99\n",
      "step 1500, entropy loss: 0.032744, l2_loss: 1111983.750000, total loss: 77.871613\n",
      "0.99\n",
      "step 1600, entropy loss: 0.040521, l2_loss: 1265667.500000, total loss: 88.637245\n",
      "0.99\n",
      "step 1700, entropy loss: 0.003856, l2_loss: 1429308.375000, total loss: 100.055443\n",
      "1.0\n",
      "step 1800, entropy loss: 0.036255, l2_loss: 1602914.750000, total loss: 112.240288\n",
      "0.98\n",
      "step 1900, entropy loss: 0.113625, l2_loss: 1786648.500000, total loss: 125.179024\n",
      "0.97\n",
      "step 2000, entropy loss: 0.015287, l2_loss: 1980373.000000, total loss: 138.641403\n",
      "0.98\n",
      "0.9919\n",
      "step 2100, entropy loss: 0.008897, l2_loss: 2184079.750000, total loss: 152.894485\n",
      "0.99\n",
      "step 2200, entropy loss: 0.022462, l2_loss: 2397779.000000, total loss: 167.866989\n",
      "1.0\n",
      "step 2300, entropy loss: 0.067566, l2_loss: 2621454.000000, total loss: 183.569351\n",
      "0.98\n",
      "step 2400, entropy loss: 0.010119, l2_loss: 2855104.000000, total loss: 199.867401\n",
      "1.0\n",
      "step 2500, entropy loss: 0.042019, l2_loss: 3098840.750000, total loss: 216.960876\n",
      "0.99\n",
      "step 2600, entropy loss: 0.005610, l2_loss: 3352575.000000, total loss: 234.685867\n",
      "1.0\n",
      "step 2700, entropy loss: 0.006925, l2_loss: 3616295.500000, total loss: 253.147614\n",
      "0.99\n",
      "step 2800, entropy loss: 0.007335, l2_loss: 3890000.750000, total loss: 272.307373\n",
      "1.0\n",
      "step 2900, entropy loss: 0.011295, l2_loss: 4173692.500000, total loss: 292.169769\n",
      "1.0\n",
      "step 3000, entropy loss: 0.013171, l2_loss: 4467369.500000, total loss: 312.729065\n",
      "0.99\n",
      "0.9927\n"
     ]
    }
   ],
   "source": [
    "# Train\n",
    "for step in range(3000):\n",
    "  batch_xs, batch_ys = mnist.train.next_batch(100)\n",
    "  #学习率对计算结果影响剧烈，以下标注了我的试验过程，分别是激活函数-优化器类型-学习率-计算结果，1：swish-SGD-0.01-3000:0.9762\n",
    "  #2：selu-SGD-0.01-3000:0.978、3：selu-Adam-0.01-3000:0.9778、4:selu-SGD-0.001-3000:0.9846,所以探索使用阶跃指数衰减法动态调整学习率\n",
    "\n",
    "  #方法一：固定学习率\n",
    "  #lr = 0.001\n",
    "  #_, loss, l2_loss_value, total_loss_value = sess.run([train_step, cross_entropy, l2_loss, total_loss], \n",
    "  #            feed_dict={x: batch_xs, y_: batch_ys, learning_rate:lr, keep_prob:0.5})\n",
    "    \n",
    "  #方法二：指数衰减学习率\n",
    "  _, loss, l2_loss_value, total_loss_value = sess.run([train_step, cross_entropy, l2_loss, total_loss], \n",
    "               feed_dict={x: batch_xs, y_: batch_ys, keep_prob:0.5})\n",
    "\n",
    "  #每100步输出模型在训练集上的准确率\n",
    "  if (step+1) % 100 == 0:\n",
    "    print('step %d, entropy loss: %f, l2_loss: %f, total loss: %f' %(step+1, loss, l2_loss_value, total_loss_value))\n",
    "    # Test trained model\n",
    "    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n",
    "    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "    print(sess.run(accuracy, feed_dict={x: batch_xs, y_: batch_ys, keep_prob:0.5}))\n",
    "    \n",
    "  #每1000步输出模型在测试集上的准确率\n",
    "  if (step+1) % 1000 == 0:\n",
    "    print(sess.run(accuracy, feed_dict={x: mnist.test.images,y_: mnist.test.labels, keep_prob:1}))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.5.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
