{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\YuGo\\Anaconda3\\lib\\site-packages\\h5py\\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n",
      "  from ._conv import register_converters as _register_converters\n"
     ]
    }
   ],
   "source": [
    "import tensorflow as tf\n",
    "\n",
    "from tensorflow.examples.tutorials.mnist import input_data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 99,
   "metadata": {},
   "outputs": [],
   "source": [
    "INPUT_NODE = 784  #输入层的节点数，图片为28*28，为图片的像素\n",
    "OUTPUT_NODE = 10   #输出层的节点数，等于类别的数目，需要区分0-9，所以为10类\n",
    "\n",
    "#配置神经网络的参数\n",
    "LAYER1_NODE = 1000 #隐藏层的节点数\n",
    "LAYER2_NODE = 1000 #隐藏层的节点数\n",
    "LAYER3_NODE = 1000 #隐藏层的节点数\n",
    "BATCH_SIZE = 100 #一个训练batch中的训练数据个数，数字越小，越接近随机梯度下降，越大越接近梯度下降\n",
    "REGULARIZATION_RATE = 0.0001 #描述网络复杂度的正则化向在损失函数中的系数\n",
    "LEARNING_RATE = 0.5 #基础的学习率\n",
    "TRAINING_STEPS = 50000 #训练轮数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 100,
   "metadata": {},
   "outputs": [],
   "source": [
    "#给定神经网络的输入和所有参数，计算神经网络的前向传播结果，定义了一个使用ReLU的三层全连接神经网络，通过加入隐藏层实现了多层网络结构\n",
    "def inference(input_tensor, weights1, biases1, weights2, biases2, weights3, biases3, weights4, biases4):\n",
    "    #若没有提供滑动平均类，则直接使用参数当前的取值\n",
    "    #计算隐藏层的前向传播结果，使用ReLU激活函数\n",
    "    layer1 = tf.nn.relu(tf.matmul(input_tensor, weights1) + biases1)\n",
    "    \n",
    "    layer2 = tf.nn.relu(tf.matmul(layer1, weights2) + biases2)\n",
    "    \n",
    "    layer3 = tf.nn.relu(tf.matmul(layer2, weights3) + biases3)\n",
    "    \n",
    "    return tf.nn.softmax(tf.matmul(layer3, weights4) + biases4)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 101,
   "metadata": {},
   "outputs": [],
   "source": [
    "#训练网络的过程\n",
    "def train(mnist):\n",
    "    x = tf.placeholder(tf.float32, [None, INPUT_NODE], name='x-input')\n",
    "    y_ = tf.placeholder(tf.float32, [None, OUTPUT_NODE], name='y-input')\n",
    "\n",
    "    #使用截断正态分布生成隐藏层的参数\n",
    "    weights1 = tf.Variable(tf.truncated_normal([INPUT_NODE, LAYER1_NODE], stddev=0.1))\n",
    "    biases1 = tf.Variable(tf.zeros([LAYER1_NODE]))\n",
    "    #使用截断正态分布生成隐藏层的参数\n",
    "    weights2 = tf.Variable(tf.truncated_normal([LAYER1_NODE, LAYER2_NODE], stddev=0.1))\n",
    "    biases2 = tf.Variable(tf.zeros([LAYER2_NODE]))\n",
    "    #生成输出层的参数\n",
    "    weights3 = tf.Variable(tf.truncated_normal([LAYER2_NODE, LAYER3_NODE], stddev=0.1))\n",
    "    biases3 = tf.Variable(tf.zeros([LAYER3_NODE]))\n",
    "    #生成输出层的参数\n",
    "    weights4 = tf.Variable(tf.truncated_normal([LAYER3_NODE, OUTPUT_NODE], stddev=0.1))\n",
    "    biases4 = tf.Variable(tf.zeros([OUTPUT_NODE]))\n",
    "\n",
    "    #计算在当前参数下神经网络前向传播的结果，这里的用于计算滑动平均的类为None，所以没有使用滑动平均值\n",
    "    y = inference(x, weights1, biases1, weights2, biases2, weights3, biases3, weights4, biases4)\n",
    "\n",
    "    #计算交叉熵，使用了sparse_softmax_cross_entropy_with_logits，当问题只有一个正确答案时，可以使用这个函数来加速交叉熵的计算。\n",
    "    #这个函数的第一个参数是神经网络不包括softmax层的前向传播结果，第二个是训练数据的正确答案,argmax返回最大值的位置\n",
    "    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))\n",
    "    #计算在当前batch中所有样例的交叉熵平均值\n",
    "    cross_entropy_mean = tf.reduce_mean(cross_entropy)\n",
    "    \n",
    "    #计算L2正则化损失\n",
    "    regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)\n",
    "    #计算网络的正则化损失\n",
    "    regularization = regularizer(weights1) + regularizer(weights2) + regularizer(weights3) + regularizer(weights4)\n",
    "    #总损失为交叉熵损失和正则化损失之和\n",
    "    loss_op = cross_entropy_mean + regularization\n",
    "\n",
    "    #使用GradientDescentOptimizer优化算法优化损失函数\n",
    "    train_step = tf.train.GradientDescentOptimizer(LEARNING_RATE).minimize(loss_op)\n",
    "\n",
    "    #f.argmax(y, 1)计算了每一个样例的预测答案，得到的结果是一个长度为batch的一维数组\n",
    "    #一维数组中的值就表示了每一个样例对应的数字识别结果\n",
    "    #tf.equal判断两个张量的每一维是否相等。如果相等返回True，反之返回False\n",
    "    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n",
    "    #首先将一个布尔型的数组转换为实数，然后计算平均值\n",
    "    #平均值就是网络在这一组数据上的正确率\n",
    "    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "\n",
    "    #初始会话并开始训练过程\n",
    "    with tf.Session() as sess:\n",
    "        tf.global_variables_initializer().run() #参数初始化\n",
    "        #准备验证数据，在神经网络的训练过程中，会通过验证数据来大致判断停止的条件和评判训练的效果\n",
    "        validate_data = {x: mnist.validation.images, y_:mnist.validation.labels}\n",
    "        #准备测试数据\n",
    "        test_data = {x:mnist.test.images, y_:mnist.test.labels}\n",
    "        #迭代的训练神经网络\n",
    "        for i in range(TRAINING_STEPS):\n",
    "\n",
    "            #每1000轮输出一次在验证数据集上的测试结果\n",
    "            if i%1000==0:\n",
    "                #计算滑动平均模型在验证数据上的结果，因为MNIST数据集较小，所以可以一次处理所有的验证数据\n",
    "                #loss = sess.run(loss_op)\n",
    "                loss, validate_acc = sess.run([loss_op, accuracy], feed_dict = validate_data)\n",
    "                print(\"After %d training steps, loss is %f, validation accuracy using average model is %g\" %(i, loss, validate_acc))\n",
    "\n",
    "            # 产生训练数据batch,开始训练\n",
    "            xs, ys = mnist.train.next_batch(BATCH_SIZE)  # xs为数据，ys为标签\n",
    "            sess.run(train_step, feed_dict={x:xs, y_:ys})\n",
    "\n",
    "        test_acc = sess.run(accuracy, feed_dict=test_data)\n",
    "        print(\"After %d training steps, loss is %f, test accuracy using average model is %g\" %(TRAINING_STEPS, loss, validate_acc))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 102,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting /tmp/tensorflow/mnist/input_data\\train-images-idx3-ubyte.gz\n",
      "Extracting /tmp/tensorflow/mnist/input_data\\train-labels-idx1-ubyte.gz\n",
      "Extracting /tmp/tensorflow/mnist/input_data\\t10k-images-idx3-ubyte.gz\n",
      "Extracting /tmp/tensorflow/mnist/input_data\\t10k-labels-idx1-ubyte.gz\n",
      "After 0 training steps, loss is 3.432753, validation accuracy using average model is 0.0872\n",
      "After 1000 training steps, loss is 2.574718, validation accuracy using average model is 0.8682\n",
      "After 2000 training steps, loss is 2.384270, validation accuracy using average model is 0.969\n",
      "After 3000 training steps, loss is 2.295437, validation accuracy using average model is 0.9756\n",
      "After 4000 training steps, loss is 2.216950, validation accuracy using average model is 0.9784\n",
      "After 5000 training steps, loss is 2.147448, validation accuracy using average model is 0.9804\n",
      "After 6000 training steps, loss is 2.083497, validation accuracy using average model is 0.9822\n",
      "After 7000 training steps, loss is 2.027007, validation accuracy using average model is 0.9828\n",
      "After 8000 training steps, loss is 1.975127, validation accuracy using average model is 0.9836\n",
      "After 9000 training steps, loss is 1.928848, validation accuracy using average model is 0.9844\n",
      "After 10000 training steps, loss is 1.887615, validation accuracy using average model is 0.9844\n",
      "After 11000 training steps, loss is 1.851010, validation accuracy using average model is 0.9832\n",
      "After 12000 training steps, loss is 1.815642, validation accuracy using average model is 0.9842\n",
      "After 13000 training steps, loss is 1.784704, validation accuracy using average model is 0.984\n",
      "After 14000 training steps, loss is 1.758051, validation accuracy using average model is 0.9846\n",
      "After 15000 training steps, loss is 1.732494, validation accuracy using average model is 0.985\n",
      "After 16000 training steps, loss is 1.711599, validation accuracy using average model is 0.983\n",
      "After 17000 training steps, loss is 1.690157, validation accuracy using average model is 0.9838\n",
      "After 18000 training steps, loss is 1.670487, validation accuracy using average model is 0.9846\n",
      "After 19000 training steps, loss is 1.654032, validation accuracy using average model is 0.9842\n",
      "After 20000 training steps, loss is 1.640339, validation accuracy using average model is 0.9828\n",
      "After 21000 training steps, loss is 1.624316, validation accuracy using average model is 0.985\n",
      "After 22000 training steps, loss is 1.612122, validation accuracy using average model is 0.9854\n",
      "After 23000 training steps, loss is 1.602582, validation accuracy using average model is 0.9838\n",
      "After 24000 training steps, loss is 1.590053, validation accuracy using average model is 0.9854\n",
      "After 25000 training steps, loss is 1.580307, validation accuracy using average model is 0.9856\n",
      "After 26000 training steps, loss is 1.572674, validation accuracy using average model is 0.9842\n",
      "After 27000 training steps, loss is 1.565462, validation accuracy using average model is 0.9846\n",
      "After 28000 training steps, loss is 1.559534, validation accuracy using average model is 0.9842\n",
      "After 29000 training steps, loss is 1.551555, validation accuracy using average model is 0.9856\n",
      "After 30000 training steps, loss is 1.545627, validation accuracy using average model is 0.9852\n",
      "After 31000 training steps, loss is 1.540023, validation accuracy using average model is 0.9852\n",
      "After 32000 training steps, loss is 1.535854, validation accuracy using average model is 0.986\n",
      "After 33000 training steps, loss is 1.534530, validation accuracy using average model is 0.9834\n",
      "After 34000 training steps, loss is 1.530193, validation accuracy using average model is 0.9844\n",
      "After 35000 training steps, loss is 1.523534, validation accuracy using average model is 0.9866\n",
      "After 36000 training steps, loss is 1.524568, validation accuracy using average model is 0.9822\n",
      "After 37000 training steps, loss is 1.517906, validation accuracy using average model is 0.9858\n",
      "After 38000 training steps, loss is 1.517919, validation accuracy using average model is 0.9838\n",
      "After 39000 training steps, loss is 1.521353, validation accuracy using average model is 0.98\n",
      "After 40000 training steps, loss is 1.513054, validation accuracy using average model is 0.9846\n",
      "After 41000 training steps, loss is 1.508958, validation accuracy using average model is 0.9862\n",
      "After 42000 training steps, loss is 1.511616, validation accuracy using average model is 0.9816\n",
      "After 43000 training steps, loss is 1.504748, validation accuracy using average model is 0.9862\n",
      "After 44000 training steps, loss is 1.503776, validation accuracy using average model is 0.986\n",
      "After 45000 training steps, loss is 1.505160, validation accuracy using average model is 0.984\n",
      "After 46000 training steps, loss is 1.501152, validation accuracy using average model is 0.988\n",
      "After 47000 training steps, loss is 1.499915, validation accuracy using average model is 0.9868\n",
      "After 48000 training steps, loss is 1.498453, validation accuracy using average model is 0.9872\n",
      "After 49000 training steps, loss is 1.497465, validation accuracy using average model is 0.9878\n",
      "After 50000 training steps, loss is 1.497465, test accuracy using average model is 0.9878\n"
     ]
    }
   ],
   "source": [
    "# 声明处理MNIST数据集的类,one_hot=True将标签表示为向量形式\n",
    "data_dir = '/tmp/tensorflow/mnist/input_data'\n",
    "mnist = input_data.read_data_sets(data_dir, one_hot=True)\n",
    "train(mnist)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
