{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "本小节对学习率进行调整"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "\"\"\"A very simple MNIST classifier.\n",
    "See extensive documentation at\n",
    "https://www.tensorflow.org/get_started/mnist/beginners\n",
    "\"\"\"\n",
    "from __future__ import absolute_import\n",
    "from __future__ import division\n",
    "from __future__ import print_function\n",
    "\n",
    "import argparse\n",
    "import sys\n",
    "\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "\n",
    "import tensorflow as tf"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting ./MNIST/train-images-idx3-ubyte.gz\n",
      "Extracting ./MNIST/train-labels-idx1-ubyte.gz\n",
      "Extracting ./MNIST/t10k-images-idx3-ubyte.gz\n",
      "Extracting ./MNIST/t10k-labels-idx1-ubyte.gz\n"
     ]
    }
   ],
   "source": [
    "# 导入数据\n",
    "data_dir = './MNIST'\n",
    "mnist = input_data.read_data_sets(data_dir, one_hot=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "#定义数据\n",
    "x = tf.placeholder(tf.float32, [None, 784])   # 输入图片的大小，28x28=784\n",
    "y_ = tf.placeholder(tf.float32, [None, 10])   # 输出0-9共10个数字\n",
    "learning_rate = tf.placeholder(tf.float32)    # 用于接收dropout操作的值，dropout为了防止过拟合\n",
    "\n",
    "with tf.name_scope('reshape'):\n",
    "#-1代表先不考虑输入的图片例子多少这个维度，后面的1是channel的数量，因为我们输入的图片是黑白的，因此channel是1，例如如果是RGB图像，那么channel就是3\n",
    "  x_image = tf.reshape(x, [-1, 28, 28, 1])    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "#from keras.layers.initializers import he_normal\n",
    "# 卷积层定义\n",
    "#函数参数中的filter_size是指卷积核的大小,step表示布长\n",
    "#这里使用函数tf.contrib.layers.variance_scaling_initializer来对权重参数进行He/MRSA初始化，更改参数可以实现Xavier初始化\n",
    "def conv_op(input_op, filter_size, channel_out, name):\n",
    "    h_conv1 = tf.layers.conv2d(input_op, channel_out, [filter_size,filter_size],\n",
    "                             padding='SAME',\n",
    "                             activation=tf.nn.relu,name=name,kernel_initializer=tf.contrib.layers.variance_scaling_initializer())    \n",
    "    return h_conv1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 最大池化层\n",
    "def maxPool_op(input_op, filter_size, step, name):\n",
    "    h_pool1 = tf.layers.max_pooling2d(input_op, pool_size=[filter_size,filter_size],\n",
    "                        strides=[step, step], padding='VALID',name=name)\n",
    "    return h_pool1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "\"\\ndef full_connection(input_op, channel_out, name):\\n    channel_in = input_op.get_shape()[-1].value\\n    with tf.name_scope(name) as scope:\\n        weight = tf.Variable(tf.truncated_normal([channel_in, channel_out],mean=0,\\n                                                  dtype=tf.float32, stddev=0.1),\\n                                                  collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\\n        #weight = tf.get_variable(shape=[channel_in, channel_out], dtype=tf.float32,\\n        #                         initializer=xavier_initializer_conv2d(), name=scope + 'weight')\\n        bias = tf.Variable(tf.constant(value=0.0, shape=[channel_out], dtype=tf.float32), name='bias')\\n        input_op_reshape = tf.reshape(input_op, [-1, 7 * 7 * 64])\\n        fc = tf.nn.relu(tf.matmul(input_op_reshape, weight) + bias)\\n        return fc\\n\""
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 全连接层\n",
    "'''\n",
    "def full_connection(input_op, channel_out, name):\n",
    "    channel_in = input_op.get_shape()[-1].value\n",
    "    with tf.name_scope(name) as scope:\n",
    "        weight = tf.Variable(tf.truncated_normal([channel_in, channel_out],mean=0,\n",
    "                                                  dtype=tf.float32, stddev=0.1),\n",
    "                                                  collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\n",
    "        #weight = tf.get_variable(shape=[channel_in, channel_out], dtype=tf.float32,\n",
    "        #                         initializer=xavier_initializer_conv2d(), name=scope + 'weight')\n",
    "        bias = tf.Variable(tf.constant(value=0.0, shape=[channel_out], dtype=tf.float32), name='bias')\n",
    "        input_op_reshape = tf.reshape(input_op, [-1, 7 * 7 * 64])\n",
    "        fc = tf.nn.relu(tf.matmul(input_op_reshape, weight) + bias)\n",
    "        return fc\n",
    "'''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "#第一层卷积层，卷积核为5*5，深度为32，步长为1，输出为28*28*32\n",
    "conv1=conv_op(x_image,filter_size=5,channel_out=32,name='conv1')\n",
    "#第一个池化层，输出14*14*28\n",
    "pool1=maxPool_op(conv1,filter_size=2,step=2,name='pool1')\n",
    "#第二层卷积层，卷积核为5*5，深度为64，步长为1，输出为28*28*64\n",
    "conv2=conv_op(pool1,filter_size=5,channel_out=64,name='conv2')\n",
    "#第二个池化层，输出7*7*64\n",
    "pool2=maxPool_op(conv2,filter_size=2,step=2,name='pool2')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "from tensorflow.contrib.layers import flatten\n",
    "#全连接层，映射7*7*64特征图，映射为1024个特征\n",
    "with tf.name_scope('fc1'):\n",
    "  h_pool2_flat = flatten(pool2)\n",
    "  h_fc1 = tf.layers.dense(h_pool2_flat, 1024, activation=tf.nn.relu)\n",
    "\n",
    "# Dropout - controls the complexity of the model, prevents co-adaptation of\n",
    "# features.\n",
    "with tf.name_scope('dropout'):\n",
    "  keep_prob = tf.placeholder(tf.float32)\n",
    "  h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n",
    "\n",
    "# Map the 1024 features to 10 classes, one for each digit\n",
    "#这里同上，需要注意的是，最后暂不需要使用激活函数\n",
    "with tf.name_scope('fc2'):\n",
    "  y = tf.layers.dense(h_fc1_drop, 10, activation=None)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 设置正则化方法\n",
    "REGULARIZATION_RATE = 0.0001 # 比较合适的参数\n",
    "#REGULARIZATION_RATE = 0.001 # 比较合适的参数\n",
    "regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)  # 定义L2正则化损失函数\n",
    "#regularization = regularizer(weights1) + regularizer(weights2)  # 计算模型的正则化损失"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "step 50, entropy loss: 0.302917, l2_loss: 0.091034, total loss: 0.393952\n",
      "0.9\n",
      "step 100, entropy loss: 0.073493, l2_loss: 0.091307, total loss: 0.164800\n",
      "1.0\n",
      "step 150, entropy loss: 0.215386, l2_loss: 0.091405, total loss: 0.306791\n",
      "0.99\n",
      "step 200, entropy loss: 0.147701, l2_loss: 0.091456, total loss: 0.239157\n",
      "0.99\n",
      "step 250, entropy loss: 0.150818, l2_loss: 0.091509, total loss: 0.242327\n",
      "0.97\n",
      "step 300, entropy loss: 0.053123, l2_loss: 0.091486, total loss: 0.144609\n",
      "1.0\n",
      "step 350, entropy loss: 0.135455, l2_loss: 0.091492, total loss: 0.226947\n",
      "0.99\n",
      "step 400, entropy loss: 0.093690, l2_loss: 0.091447, total loss: 0.185137\n",
      "0.99\n",
      "step 450, entropy loss: 0.089758, l2_loss: 0.091381, total loss: 0.181139\n",
      "0.98\n",
      "step 500, entropy loss: 0.061515, l2_loss: 0.091348, total loss: 0.152864\n",
      "1.0\n",
      "0.9767\n",
      "step 550, entropy loss: 0.025921, l2_loss: 0.091273, total loss: 0.117194\n",
      "1.0\n",
      "step 600, entropy loss: 0.027527, l2_loss: 0.091230, total loss: 0.118757\n",
      "0.98\n",
      "step 650, entropy loss: 0.035471, l2_loss: 0.091152, total loss: 0.126622\n",
      "1.0\n",
      "step 700, entropy loss: 0.030242, l2_loss: 0.091089, total loss: 0.121331\n",
      "1.0\n",
      "step 750, entropy loss: 0.062686, l2_loss: 0.091016, total loss: 0.153702\n",
      "0.99\n",
      "step 800, entropy loss: 0.038941, l2_loss: 0.090935, total loss: 0.129877\n",
      "1.0\n",
      "step 850, entropy loss: 0.089466, l2_loss: 0.090844, total loss: 0.180310\n",
      "0.99\n",
      "step 900, entropy loss: 0.047694, l2_loss: 0.090767, total loss: 0.138461\n",
      "0.99\n",
      "step 950, entropy loss: 0.010813, l2_loss: 0.090670, total loss: 0.101483\n",
      "1.0\n",
      "step 1000, entropy loss: 0.007345, l2_loss: 0.090577, total loss: 0.097922\n",
      "0.98\n",
      "0.9838\n",
      "step 1050, entropy loss: 0.015656, l2_loss: 0.090469, total loss: 0.106125\n",
      "1.0\n",
      "step 1100, entropy loss: 0.007458, l2_loss: 0.090384, total loss: 0.097843\n",
      "1.0\n",
      "step 1150, entropy loss: 0.052309, l2_loss: 0.090298, total loss: 0.142607\n",
      "1.0\n",
      "step 1200, entropy loss: 0.010104, l2_loss: 0.090197, total loss: 0.100301\n",
      "0.99\n",
      "step 1250, entropy loss: 0.034446, l2_loss: 0.090116, total loss: 0.124562\n",
      "1.0\n",
      "step 1300, entropy loss: 0.019082, l2_loss: 0.089996, total loss: 0.109078\n",
      "0.99\n",
      "step 1350, entropy loss: 0.009777, l2_loss: 0.089891, total loss: 0.099667\n",
      "1.0\n",
      "step 1400, entropy loss: 0.048196, l2_loss: 0.089801, total loss: 0.137997\n",
      "1.0\n",
      "step 1450, entropy loss: 0.026732, l2_loss: 0.089688, total loss: 0.116420\n",
      "1.0\n",
      "step 1500, entropy loss: 0.016806, l2_loss: 0.089582, total loss: 0.106389\n",
      "1.0\n",
      "0.9879\n",
      "step 1550, entropy loss: 0.020941, l2_loss: 0.089461, total loss: 0.110403\n",
      "1.0\n",
      "step 1600, entropy loss: 0.005037, l2_loss: 0.089367, total loss: 0.094404\n",
      "1.0\n",
      "step 1650, entropy loss: 0.059193, l2_loss: 0.089256, total loss: 0.148450\n",
      "1.0\n",
      "step 1700, entropy loss: 0.004936, l2_loss: 0.089164, total loss: 0.094100\n",
      "1.0\n",
      "step 1750, entropy loss: 0.052070, l2_loss: 0.089063, total loss: 0.141133\n",
      "1.0\n",
      "step 1800, entropy loss: 0.013813, l2_loss: 0.088962, total loss: 0.102775\n",
      "1.0\n",
      "step 1850, entropy loss: 0.028918, l2_loss: 0.088854, total loss: 0.117772\n",
      "1.0\n",
      "step 1900, entropy loss: 0.040931, l2_loss: 0.088745, total loss: 0.129676\n",
      "1.0\n",
      "step 1950, entropy loss: 0.035021, l2_loss: 0.088630, total loss: 0.123651\n",
      "1.0\n",
      "step 2000, entropy loss: 0.008930, l2_loss: 0.088523, total loss: 0.097453\n",
      "1.0\n",
      "0.9884\n",
      "step 2050, entropy loss: 0.053579, l2_loss: 0.088409, total loss: 0.141988\n",
      "1.0\n",
      "step 2100, entropy loss: 0.009720, l2_loss: 0.088303, total loss: 0.098023\n",
      "1.0\n",
      "step 2150, entropy loss: 0.027386, l2_loss: 0.088180, total loss: 0.115566\n",
      "1.0\n",
      "step 2200, entropy loss: 0.029911, l2_loss: 0.088079, total loss: 0.117989\n",
      "1.0\n",
      "step 2250, entropy loss: 0.003746, l2_loss: 0.087975, total loss: 0.091721\n",
      "1.0\n",
      "step 2300, entropy loss: 0.118781, l2_loss: 0.087857, total loss: 0.206638\n",
      "1.0\n",
      "step 2350, entropy loss: 0.005347, l2_loss: 0.087753, total loss: 0.093100\n",
      "1.0\n",
      "step 2400, entropy loss: 0.045705, l2_loss: 0.087630, total loss: 0.133335\n",
      "1.0\n",
      "step 2450, entropy loss: 0.061616, l2_loss: 0.087514, total loss: 0.149130\n",
      "1.0\n",
      "step 2500, entropy loss: 0.031108, l2_loss: 0.087396, total loss: 0.118503\n",
      "1.0\n",
      "0.986\n",
      "step 2550, entropy loss: 0.028557, l2_loss: 0.087297, total loss: 0.115855\n",
      "1.0\n",
      "step 2600, entropy loss: 0.006035, l2_loss: 0.087185, total loss: 0.093220\n",
      "1.0\n",
      "step 2650, entropy loss: 0.069067, l2_loss: 0.087065, total loss: 0.156132\n",
      "1.0\n",
      "step 2700, entropy loss: 0.019679, l2_loss: 0.086952, total loss: 0.106631\n",
      "1.0\n",
      "step 2750, entropy loss: 0.009077, l2_loss: 0.086838, total loss: 0.095915\n",
      "1.0\n",
      "step 2800, entropy loss: 0.020565, l2_loss: 0.086733, total loss: 0.107299\n",
      "1.0\n",
      "step 2850, entropy loss: 0.046646, l2_loss: 0.086623, total loss: 0.133268\n",
      "1.0\n",
      "step 2900, entropy loss: 0.113754, l2_loss: 0.086513, total loss: 0.200266\n",
      "0.98\n",
      "step 2950, entropy loss: 0.066653, l2_loss: 0.086399, total loss: 0.153052\n",
      "1.0\n",
      "step 3000, entropy loss: 0.079300, l2_loss: 0.086292, total loss: 0.165592\n",
      "1.0\n",
      "0.9907\n",
      "step 3050, entropy loss: 0.006167, l2_loss: 0.086161, total loss: 0.092328\n",
      "0.99\n",
      "step 3100, entropy loss: 0.026058, l2_loss: 0.086055, total loss: 0.112114\n",
      "1.0\n",
      "step 3150, entropy loss: 0.022371, l2_loss: 0.085946, total loss: 0.108316\n",
      "1.0\n",
      "step 3200, entropy loss: 0.020007, l2_loss: 0.085838, total loss: 0.105845\n",
      "1.0\n",
      "step 3250, entropy loss: 0.007680, l2_loss: 0.085711, total loss: 0.093391\n",
      "0.99\n",
      "step 3300, entropy loss: 0.049125, l2_loss: 0.085603, total loss: 0.134728\n",
      "1.0\n",
      "step 3350, entropy loss: 0.002810, l2_loss: 0.085487, total loss: 0.088297\n",
      "1.0\n",
      "step 3400, entropy loss: 0.030094, l2_loss: 0.085373, total loss: 0.115467\n",
      "1.0\n",
      "step 3450, entropy loss: 0.015889, l2_loss: 0.085245, total loss: 0.101134\n",
      "1.0\n",
      "step 3500, entropy loss: 0.028848, l2_loss: 0.085128, total loss: 0.113976\n",
      "1.0\n",
      "0.9902\n",
      "step 3550, entropy loss: 0.013185, l2_loss: 0.085010, total loss: 0.098195\n",
      "1.0\n",
      "step 3600, entropy loss: 0.014916, l2_loss: 0.084898, total loss: 0.099814\n",
      "1.0\n",
      "step 3650, entropy loss: 0.024261, l2_loss: 0.084777, total loss: 0.109038\n",
      "1.0\n",
      "step 3700, entropy loss: 0.039891, l2_loss: 0.084665, total loss: 0.124556\n",
      "1.0\n",
      "step 3750, entropy loss: 0.012276, l2_loss: 0.084543, total loss: 0.096819\n",
      "1.0\n",
      "step 3800, entropy loss: 0.009678, l2_loss: 0.084434, total loss: 0.094113\n",
      "1.0\n",
      "step 3850, entropy loss: 0.043221, l2_loss: 0.084316, total loss: 0.127537\n",
      "0.99\n",
      "step 3900, entropy loss: 0.027356, l2_loss: 0.084200, total loss: 0.111556\n",
      "1.0\n",
      "step 3950, entropy loss: 0.005404, l2_loss: 0.084090, total loss: 0.089494\n",
      "1.0\n",
      "step 4000, entropy loss: 0.002721, l2_loss: 0.083974, total loss: 0.086694\n",
      "1.0\n",
      "0.9912\n"
     ]
    }
   ],
   "source": [
    "cross_entropy = tf.reduce_mean(\n",
    "    tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))\n",
    "regularization=0.0\n",
    "for w in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES):\n",
    "    regularization=regularization+regularizer(w)\n",
    "l2_loss=regularization\n",
    "#l2_loss = tf.add_n( [tf.nn.l2_loss(w) for w in tf.get_collection('WEIGHTS')] )\n",
    "#total_loss = cross_entropy + 7e-5*l2_loss\n",
    "total_loss = cross_entropy + l2_loss\n",
    "train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(total_loss)\n",
    "\n",
    "sess = tf.Session()\n",
    "init_op = tf.global_variables_initializer()\n",
    "sess.run(init_op)\n",
    "# Train\n",
    "for step in range(4000):\n",
    "  batch_xs, batch_ys = mnist.train.next_batch(100)\n",
    "  #lr = 0.01\n",
    "  lr = 0.2\n",
    "    \n",
    "    \n",
    "\n",
    "  _, loss, l2_loss_value, total_loss_value = sess.run(\n",
    "               [train_step, cross_entropy, l2_loss, total_loss], \n",
    "               feed_dict={x: batch_xs, y_: batch_ys, learning_rate:lr, keep_prob:0.5})\n",
    "  \n",
    "  if (step+1) % 50 == 0:\n",
    "    print('step %d, entropy loss: %f, l2_loss: %f, total loss: %f' % \n",
    "            (step+1, loss, l2_loss_value, total_loss_value))\n",
    "    # Test trained model\n",
    "    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n",
    "    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "    print(sess.run(accuracy, feed_dict={x: batch_xs, y_: batch_ys, keep_prob:0.5}))\n",
    "  if (step+1) % 500 == 0:\n",
    "    print(sess.run(accuracy, feed_dict={x: mnist.test.images,\n",
    "                                    y_: mnist.test.labels, keep_prob:0.5}))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "心得与小结:\n",
    "这里可以看到正确率很快就达到了98%，并且准确率已经达到了99%，因此这里的参数设置的比较合适。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 2",
   "language": "python",
   "name": "python2"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.15"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
