{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "\"\"\"A very simple MNIST classifier.\n",
    "See extensive documentation at\n",
    "https://www.tensorflow.org/get_started/mnist/beginners\n",
    "\"\"\"\n",
    "from __future__ import absolute_import\n",
    "from __future__ import division\n",
    "from __future__ import print_function\n",
    "\n",
    "import argparse\n",
    "import sys\n",
    "\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "\n",
    "import tensorflow as tf\n",
    "\n",
    "FLAGS = None\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "我们在这里调用系统提供的Mnist数据函数为我们读入数据，如果没有下载的话则进行下载。\n",
    "\n",
    "<font color=#ff0000>**这里将data_dir改为适合你的运行环境的目录**</font>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting ./MNIST/train-images-idx3-ubyte.gz\n",
      "Extracting ./MNIST/train-labels-idx1-ubyte.gz\n",
      "Extracting ./MNIST/t10k-images-idx3-ubyte.gz\n",
      "Extracting ./MNIST/t10k-labels-idx1-ubyte.gz\n"
     ]
    }
   ],
   "source": [
    "# Import data\n",
    "data_dir = './MNIST'\n",
    "mnist = input_data.read_data_sets(data_dir, one_hot=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 神经网络结构参数\n",
    "INPUT_NODE = 784  # 输入层节点数。等于MNIST图片的像素\n",
    "LAYER_NODE = 300  # 隐藏层节点数。只用一个隐藏层，含300个节点\n",
    "OUTPUT_NODE = 10  # 输出层节点数。等于0~9对应的10个数字"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "调整正则惩罚系数，发现REGULARIZATION_RATE = 0.0001比较合适"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 优化方法参数 \n",
    "#LEARNING_RATE = 1 # 学习率  在迭代4000次之后，正确率达到98%，但是波动性比较明显\n",
    "LEARNING_RATE = 0.8 # 学习率  在迭代4000次之后，正确率达到98%\n",
    "#LEARNING_RATE = 0.1 # 学习率  在迭代16500次之后，正确率达到98%\n",
    "#LEARNING_RATE = 0.2 # 学习率  在迭代10500次之后，正确率达到98%\n",
    "#LEARNING_RATE = 0.5 # 学习率  在迭代4500次之后，正确率达到98%\n",
    "\n",
    "#REGULARIZATION_RATE = 0.00001 # 固定学习率为0.8，在迭代4500次之后，正确率达到98% \n",
    "REGULARIZATION_RATE = 0.0001 # 固定学习率为0.8，在迭代3500次之后，正确率达到98% \n",
    "#REGULARIZATION_RATE = 0.001 # 固定学习率为0.8，在迭代10000次之后，正确率还未达到98% \n",
    "#REGULARIZATION_RATE = 0.01 # 正则化项在损失函数中的系数 \n",
    "\n",
    "# 训练参数\n",
    "BATCH_SIZE = 100  # 一个训练batch中的图片数\n",
    "TRAINING_STEPS = 30000  # 训练轮数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 实现模型\n",
    "x = tf.placeholder(tf.float32, [None, INPUT_NODE])  # 输入层\n",
    "y_ = tf.placeholder(tf.float32, [None, OUTPUT_NODE])  # 标签\n",
    "weights1 = tf.Variable(tf.truncated_normal([INPUT_NODE, LAYER_NODE], stddev=0.1))  #生成标准差为0.1的随机隐藏层权重\n",
    "biases1 = tf.Variable(tf.constant(0.1, shape=[LAYER_NODE]))  # 隐藏层偏置\n",
    "weights2 = tf.Variable(tf.truncated_normal([LAYER_NODE, OUTPUT_NODE], stddev=0.1))  #生成标准差为0.1的随机输出层权重 \n",
    "biases2 = tf.Variable(tf.constant(0.1, shape=[OUTPUT_NODE]))  # 输出层偏置\n",
    "# 计算隐藏层的前向传播结果\n",
    "first_layer = tf.nn.relu(tf.matmul(x, weights1) + biases1)    #使用sigmoid激活函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "y=tf.matmul(first_layer, weights2) + biases2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 设置正则化方法\n",
    "regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)  # 定义L2正则化损失函数\n",
    "regularization = regularizer(weights1) + regularizer(weights2)  # 计算模型的正则化损失"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "cross_entropy_mean = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))\n",
    "loss = cross_entropy_mean + regularization  # 总损失等于交叉熵损失和正则化损失的和"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_step = tf.train.GradientDescentOptimizer(LEARNING_RATE).minimize(loss)  # 优化损失函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "sess = tf.Session()\n",
    "init_op = tf.global_variables_initializer()\n",
    "sess.run(init_op)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "step 0,train_accuracy= 0.1226\n",
      "step 500,train_accuracy= 0.9592\n",
      "step 1000,train_accuracy= 0.9708\n",
      "step 1500,train_accuracy= 0.9676\n",
      "step 2000,train_accuracy= 0.9786\n",
      "step 2500,train_accuracy= 0.9786\n",
      "step 3000,train_accuracy= 0.979\n",
      "step 3500,train_accuracy= 0.9795\n",
      "step 4000,train_accuracy= 0.9806\n",
      "step 4500,train_accuracy= 0.9799\n",
      "step 5000,train_accuracy= 0.9782\n",
      "step 5500,train_accuracy= 0.9797\n",
      "step 6000,train_accuracy= 0.9772\n",
      "step 6500,train_accuracy= 0.9791\n",
      "step 7000,train_accuracy= 0.9807\n",
      "step 7500,train_accuracy= 0.9804\n",
      "step 8000,train_accuracy= 0.981\n",
      "step 8500,train_accuracy= 0.982\n",
      "step 9000,train_accuracy= 0.9815\n",
      "step 9500,train_accuracy= 0.9831\n",
      "step 10000,train_accuracy= 0.9825\n",
      "step 10500,train_accuracy= 0.9813\n",
      "step 11000,train_accuracy= 0.9806\n",
      "step 11500,train_accuracy= 0.982\n",
      "step 12000,train_accuracy= 0.9809\n",
      "step 12500,train_accuracy= 0.9819\n",
      "step 13000,train_accuracy= 0.9817\n",
      "step 13500,train_accuracy= 0.9816\n",
      "step 14000,train_accuracy= 0.9829\n",
      "step 14500,train_accuracy= 0.9811\n",
      "step 15000,train_accuracy= 0.9826\n",
      "step 15500,train_accuracy= 0.9818\n",
      "step 16000,train_accuracy= 0.9832\n",
      "step 16500,train_accuracy= 0.9803\n",
      "step 17000,train_accuracy= 0.9826\n",
      "step 17500,train_accuracy= 0.9826\n",
      "step 18000,train_accuracy= 0.9815\n",
      "step 18500,train_accuracy= 0.9805\n",
      "step 19000,train_accuracy= 0.9806\n",
      "step 19500,train_accuracy= 0.9834\n",
      "step 20000,train_accuracy= 0.9817\n",
      "step 20500,train_accuracy= 0.9824\n",
      "step 21000,train_accuracy= 0.9832\n",
      "step 21500,train_accuracy= 0.9829\n",
      "step 22000,train_accuracy= 0.9807\n",
      "step 22500,train_accuracy= 0.9843\n",
      "step 23000,train_accuracy= 0.9823\n",
      "step 23500,train_accuracy= 0.9831\n",
      "step 24000,train_accuracy= 0.9822\n",
      "step 24500,train_accuracy= 0.9831\n",
      "step 25000,train_accuracy= 0.9831\n",
      "step 25500,train_accuracy= 0.9813\n",
      "step 26000,train_accuracy= 0.9825\n",
      "step 26500,train_accuracy= 0.9814\n",
      "step 27000,train_accuracy= 0.9825\n",
      "step 27500,train_accuracy= 0.9827\n",
      "step 28000,train_accuracy= 0.9827\n",
      "step 28500,train_accuracy= 0.9832\n",
      "step 29000,train_accuracy= 0.9832\n",
      "step 29500,train_accuracy= 0.9843\n"
     ]
    }
   ],
   "source": [
    "# Train\n",
    "for i in range(TRAINING_STEPS):\n",
    "  batch_xs, batch_ys = mnist.train.next_batch(BATCH_SIZE)\n",
    "  if i%500==0: \n",
    "        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n",
    "        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "#        print (\"step %d,train_accuracy= %g\"%(i,sess.run(accuracy, feed_dict={x: batch_xs,\n",
    "#                                              y_: batch_ys})))\n",
    "        print (\"step %d,train_accuracy= %g\"%(i,sess.run(accuracy, feed_dict={x: mnist.test.images,\n",
    "                                              y_: mnist.test.labels})))\n",
    "  sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "寻找合适的正则化惩罚系数，发现REGULARIZATION_RATE = 0.0001比较合适。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 2",
   "language": "python",
   "name": "python2"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.15"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
