{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting ./data/MNIST/train-images-idx3-ubyte.gz\n",
      "Extracting ./data/MNIST/train-labels-idx1-ubyte.gz\n",
      "Extracting ./data/MNIST/t10k-images-idx3-ubyte.gz\n",
      "Extracting ./data/MNIST/t10k-labels-idx1-ubyte.gz\n",
      "After 0 training step(s), validation accuracy using average model is 0.0592 \n",
      "After 1000 training step(s), validation accuracy using average model is 0.9758 \n",
      "After 2000 training step(s), validation accuracy using average model is 0.981 \n",
      "After 3000 training step(s), validation accuracy using average model is 0.9808 \n",
      "After 4000 training step(s), validation accuracy using average model is 0.9808 \n",
      "After 5000 training step(s), validation accuracy using average model is 0.9826 \n",
      "After 6000 training step(s), validation accuracy using average model is 0.9816 \n",
      "After 7000 training step(s), validation accuracy using average model is 0.9818 \n",
      "After 8000 training step(s), validation accuracy using average model is 0.9826 \n",
      "After 9000 training step(s), validation accuracy using average model is 0.9822 \n",
      "After 10000 training step(s), test accuracy using average model is 0.9829\n"
     ]
    }
   ],
   "source": [
    "import tensorflow as tf\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "mnist = input_data.read_data_sets(\"./data/MNIST/\", one_hot=True)\n",
    "\n",
    "\n",
    "INPUT_NODE = 784     \n",
    "OUTPUT_NODE = 10     \n",
    "LAYER1_NODE = 500         \n",
    "                              \n",
    "BATCH_SIZE = 100        \n",
    "\n",
    "# 模型相关的参数\n",
    "LEARNING_RATE_BASE = 0.8      \n",
    "LEARNING_RATE_DECAY = 0.99    \n",
    "REGULARAZTION_RATE = 0.0001   \n",
    "TRAINING_STEPS = 10000        \n",
    "MOVING_AVERAGE_DECAY = 0.99 \n",
    "\n",
    "def inference(input_tensor, avg_class, weights1, biases1, weights2, biases2):\n",
    "    # 不使用滑动平均类\n",
    "    if avg_class == None:\n",
    "        layer1 = tf.nn.relu(tf.matmul(input_tensor, weights1) + biases1)\n",
    "        return tf.matmul(layer1, weights2) + biases2\n",
    "\n",
    "    else:\n",
    "        \n",
    "        layer1 = tf.nn.relu(tf.matmul(input_tensor, avg_class.average(weights1)) + avg_class.average(biases1))\n",
    "        return tf.matmul(layer1, avg_class.average(weights2)) + avg_class.average(biases2)  \n",
    "    \n",
    "def train(mnist):\n",
    "    x = tf.placeholder(tf.float32, [None, INPUT_NODE], name='x-input')\n",
    "    y_ = tf.placeholder(tf.float32, [None, OUTPUT_NODE], name='y-input')\n",
    "    # 生成隐藏层的参数。\n",
    "    weights1 = tf.Variable(tf.truncated_normal([INPUT_NODE, LAYER1_NODE], stddev=0.1))\n",
    "    biases1 = tf.Variable(tf.constant(0.1, shape=[LAYER1_NODE]))\n",
    "    # 生成输出层的参数。\n",
    "    weights2 = tf.Variable(tf.truncated_normal([LAYER1_NODE, OUTPUT_NODE], stddev=0.1))\n",
    "    biases2 = tf.Variable(tf.constant(0.1, shape=[OUTPUT_NODE]))\n",
    "\n",
    "    # 计算不含滑动平均类的前向传播结果\n",
    "    y = inference(x, None, weights1, biases1, weights2, biases2)\n",
    "    \n",
    "    # 定义训练轮数及相关的滑动平均类 \n",
    "    global_step = tf.Variable(0, trainable=False)\n",
    "    variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)\n",
    "    variables_averages_op = variable_averages.apply(tf.trainable_variables())\n",
    "    average_y = inference(x, variable_averages, weights1, biases1, weights2, biases2)\n",
    "    \n",
    "    # 计算交叉熵及其平均值\n",
    "    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))\n",
    "    cross_entropy_mean = tf.reduce_mean(cross_entropy)\n",
    "    \n",
    "    # 损失函数的计算\n",
    "    regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE)\n",
    "    regularaztion = regularizer(weights1) + regularizer(weights2)\n",
    "    loss = cross_entropy_mean + regularaztion\n",
    "    \n",
    "    # 设置指数衰减的学习率。\n",
    "    learning_rate = tf.train.exponential_decay(\n",
    "        LEARNING_RATE_BASE,\n",
    "        global_step,\n",
    "        mnist.train.num_examples / BATCH_SIZE,\n",
    "        LEARNING_RATE_DECAY,\n",
    "        staircase=True)\n",
    "    \n",
    "    # 优化损失函数\n",
    "    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)\n",
    "    \n",
    "    # 反向传播更新参数和更新每一个参数的滑动平均值\n",
    "    with tf.control_dependencies([train_step, variables_averages_op]):\n",
    "        train_op = tf.no_op(name='train')\n",
    "\n",
    "    # 计算正确率\n",
    "    correct_prediction = tf.equal(tf.argmax(average_y, 1), tf.argmax(y_, 1))\n",
    "    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "    \n",
    "    # 初始化会话并开始训练过程。\n",
    "    with tf.Session() as sess:\n",
    "        tf.global_variables_initializer().run()\n",
    "        validate_feed = {x: mnist.validation.images, y_: mnist.validation.labels}\n",
    "        test_feed = {x: mnist.test.images, y_: mnist.test.labels} \n",
    "        \n",
    "        # 循环的训练神经网络。\n",
    "        for i in range(TRAINING_STEPS):\n",
    "            if i % 1000 == 0:\n",
    "                validate_acc = sess.run(accuracy, feed_dict=validate_feed)\n",
    "                print(\"After %d training step(s), validation accuracy using average model is %g \" % (i, validate_acc))\n",
    "            \n",
    "            xs,ys=mnist.train.next_batch(BATCH_SIZE)\n",
    "            sess.run(train_op,feed_dict={x:xs,y_:ys})\n",
    "\n",
    "        test_acc=sess.run(accuracy,feed_dict=test_feed)\n",
    "        print((\"After %d training step(s), test accuracy using average model is %g\" %(TRAINING_STEPS, test_acc)))\n",
    "\n",
    "train(mnist)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting ./data/MNIST/train-images-idx3-ubyte.gz\n",
      "Extracting ./data/MNIST/train-labels-idx1-ubyte.gz\n",
      "Extracting ./data/MNIST/t10k-images-idx3-ubyte.gz\n",
      "Extracting ./data/MNIST/t10k-labels-idx1-ubyte.gz\n",
      "After 0 training step(s), validation accuracy using average model is 0.0932 \n",
      "After 1000 training step(s), validation accuracy using average model is 0.9466 \n",
      "After 2000 training step(s), validation accuracy using average model is 0.962 \n",
      "After 3000 training step(s), validation accuracy using average model is 0.9662 \n",
      "After 4000 training step(s), validation accuracy using average model is 0.967 \n",
      "After 5000 training step(s), validation accuracy using average model is 0.9696 \n",
      "After 6000 training step(s), validation accuracy using average model is 0.9686 \n",
      "After 7000 training step(s), validation accuracy using average model is 0.9698 \n",
      "After 8000 training step(s), validation accuracy using average model is 0.97 \n",
      "After 9000 training step(s), validation accuracy using average model is 0.97 \n",
      "After 10000 training step(s), test accuracy using average model is 0.9666\n"
     ]
    }
   ],
   "source": [
    "import tensorflow as tf\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "mnist = input_data.read_data_sets(\"./data/MNIST/\", one_hot=True)\n",
    "\n",
    "\n",
    "INPUT_NODE = 784     \n",
    "OUTPUT_NODE = 10     \n",
    "LAYER1_NODE = 500 \n",
    "LAYER2_NODE = 500 \n",
    "LAYER3_NODE = 500 \n",
    "LAYER4_NODE = 500 \n",
    "LAYER5_NODE = 500 \n",
    "LAYER6_NODE = 500 \n",
    "LAYER7_NODE = 500 \n",
    "LAYER8_NODE = 300 \n",
    "LAYER9_NODE = 200\n",
    "LAYER10_NODE = 100 \n",
    "                              \n",
    "BATCH_SIZE = 100        \n",
    "\n",
    "# 模型相关的参数\n",
    "LEARNING_RATE_BASE = 0.008      \n",
    "LEARNING_RATE_DECAY = 0.99    \n",
    "REGULARAZTION_RATE = 0.0001   \n",
    "TRAINING_STEPS = 10000        \n",
    "MOVING_AVERAGE_DECAY = 0.99 \n",
    "\n",
    "def inference(input_tensor, avg_class, W, B):\n",
    "    # 不使用滑动平均类\n",
    "    if avg_class == None:\n",
    "        layer1 = tf.nn.relu(tf.matmul(input_tensor, W[0]) + B[0])\n",
    "        layer2 = tf.nn.relu(tf.matmul(layer1, W[1]) + B[1])\n",
    "        layer3 = tf.nn.relu(tf.matmul(layer2, W[2]) + B[2])\n",
    "        layer4 = tf.nn.relu(tf.matmul(layer3, W[3]) + B[3])\n",
    "        layer5 = tf.nn.relu(tf.matmul(layer4, W[4]) + B[4])\n",
    "        layer6 = tf.nn.relu(tf.matmul(layer5, W[5]) + B[5])\n",
    "        layer7 = tf.nn.relu(tf.matmul(layer6, W[6]) + B[6])\n",
    "        layer8 = tf.nn.relu(tf.matmul(layer7, W[7]) + B[7])\n",
    "        layer9 = tf.nn.relu(tf.matmul(layer8, W[8]) + B[8])\n",
    "        layer10 = tf.nn.relu(tf.matmul(layer9, W[9]) + B[9])\n",
    "        return tf.matmul(layer10, W[10]) + B[10]\n",
    "    \n",
    "    else:\n",
    "        \n",
    "        layer1 = tf.nn.relu(tf.matmul(input_tensor, avg_class.average(W[0])) + avg_class.average(B[0]))\n",
    "        layer2 = tf.nn.relu(tf.matmul(layer1, avg_class.average(W[1])) + avg_class.average(B[1]))\n",
    "        layer3 = tf.nn.relu(tf.matmul(layer2, avg_class.average(W[2])) + avg_class.average(B[2]))\n",
    "        layer4 = tf.nn.relu(tf.matmul(layer3, avg_class.average(W[3])) + avg_class.average(B[3]))\n",
    "        layer5 = tf.nn.relu(tf.matmul(layer4, avg_class.average(W[4])) + avg_class.average(B[4]))\n",
    "        layer6 = tf.nn.relu(tf.matmul(layer5, avg_class.average(W[5])) + avg_class.average(B[5]))\n",
    "        layer7 = tf.nn.relu(tf.matmul(layer6, avg_class.average(W[6])) + avg_class.average(B[6]))\n",
    "        layer8 = tf.nn.relu(tf.matmul(layer7, avg_class.average(W[7])) + avg_class.average(B[7]))\n",
    "        layer9 = tf.nn.relu(tf.matmul(layer8, avg_class.average(W[8])) + avg_class.average(B[8]))\n",
    "        layer10 = tf.nn.relu(tf.matmul(layer9, avg_class.average(W[9])) + avg_class.average(B[9]))\n",
    "        return tf.matmul(layer10, avg_class.average(W[10])) + avg_class.average(B[10])  \n",
    "    \n",
    "def train(mnist):\n",
    "    x = tf.placeholder(tf.float32, [None, INPUT_NODE], name='x-input')\n",
    "    y_ = tf.placeholder(tf.float32, [None, OUTPUT_NODE], name='y-input')\n",
    "    \n",
    "    weights1 = tf.Variable(tf.truncated_normal([INPUT_NODE, LAYER1_NODE], stddev=0.1))\n",
    "    biases1 = tf.Variable(tf.constant(0.1, shape=[LAYER1_NODE]))\n",
    "    \n",
    "    weights2 = tf.Variable(tf.truncated_normal([LAYER1_NODE, LAYER2_NODE], stddev=0.1))\n",
    "    biases2 = tf.Variable(tf.constant(0.1, shape=[ LAYER2_NODE]))\n",
    "    \n",
    "    weights3 = tf.Variable(tf.truncated_normal([ LAYER2_NODE,  LAYER3_NODE], stddev=0.1))\n",
    "    biases3 = tf.Variable(tf.constant(0.1, shape=[LAYER3_NODE]))\n",
    "    \n",
    "    weights4 = tf.Variable(tf.truncated_normal([LAYER3_NODE, LAYER4_NODE], stddev=0.1))\n",
    "    biases4 = tf.Variable(tf.constant(0.1, shape=[LAYER4_NODE]))\n",
    "    \n",
    "    weights5 = tf.Variable(tf.truncated_normal([LAYER4_NODE, LAYER5_NODE], stddev=0.1))\n",
    "    biases5 = tf.Variable(tf.constant(0.1, shape=[LAYER5_NODE]))\n",
    "    \n",
    "    weights6 = tf.Variable(tf.truncated_normal([LAYER5_NODE, LAYER6_NODE], stddev=0.1))\n",
    "    biases6 = tf.Variable(tf.constant(0.1, shape=[LAYER6_NODE]))\n",
    "    \n",
    "    weights7 = tf.Variable(tf.truncated_normal([LAYER6_NODE, LAYER7_NODE], stddev=0.1))\n",
    "    biases7 = tf.Variable(tf.constant(0.1, shape=[LAYER7_NODE]))\n",
    "    \n",
    "    weights8 = tf.Variable(tf.truncated_normal([LAYER7_NODE, LAYER8_NODE], stddev=0.1))\n",
    "    biases8 = tf.Variable(tf.constant(0.1, shape=[LAYER8_NODE]))\n",
    "    \n",
    "    weights9 = tf.Variable(tf.truncated_normal([LAYER8_NODE, LAYER9_NODE], stddev=0.1))\n",
    "    biases9 = tf.Variable(tf.constant(0.1, shape=[LAYER9_NODE]))\n",
    "    \n",
    "    weights10 = tf.Variable(tf.truncated_normal([LAYER9_NODE, LAYER10_NODE], stddev=0.1))\n",
    "    biases10 = tf.Variable(tf.constant(0.1, shape=[LAYER10_NODE]))\n",
    "    \n",
    "    weights11 = tf.Variable(tf.truncated_normal([LAYER10_NODE, OUTPUT_NODE], stddev=0.1))\n",
    "    biases11 = tf.Variable(tf.constant(0.1, shape=[OUTPUT_NODE]))\n",
    "    \n",
    "    W=[weights1, weights2, weights3, weights4, weights5, weights6, weights7, weights8, weights9, weights10, weights11]\n",
    "    B=[biases1, biases2, biases3, biases4, biases5, biases6, biases7, biases8, biases9, biases10, biases11]\n",
    "    \n",
    "    # 计算不含滑动平均类的前向传播结果\n",
    "    y = inference(x, None, W, B)\n",
    "    \n",
    "    # 定义训练轮数及相关的滑动平均类 \n",
    "    global_step = tf.Variable(0, trainable=False)\n",
    "    variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)\n",
    "    variables_averages_op = variable_averages.apply(tf.trainable_variables())\n",
    "    average_y = inference(x, variable_averages, W, B)\n",
    "    \n",
    "    # 计算交叉熵及其平均值\n",
    "    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))\n",
    "    cross_entropy_mean = tf.reduce_mean(cross_entropy)\n",
    "    \n",
    "    # 损失函数的计算\n",
    "    regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE)\n",
    "    regularaztion = regularizer(W[0]) \n",
    "    for i in range(1,11):\n",
    "        regularazation=regularaztion + regularizer(W[i]) \n",
    "    loss = cross_entropy_mean + regularaztion\n",
    "    \n",
    "    # 设置指数衰减的学习率。\n",
    "    learning_rate = tf.train.exponential_decay(\n",
    "        LEARNING_RATE_BASE,\n",
    "        global_step,\n",
    "        mnist.train.num_examples / BATCH_SIZE,\n",
    "        LEARNING_RATE_DECAY,\n",
    "        staircase=True)\n",
    "    \n",
    "    # 优化损失函数\n",
    "    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)\n",
    "    \n",
    "    # 反向传播更新参数和更新每一个参数的滑动平均值\n",
    "    with tf.control_dependencies([train_step, variables_averages_op]):\n",
    "        train_op = tf.no_op(name='train')\n",
    "\n",
    "    # 计算正确率\n",
    "    correct_prediction = tf.equal(tf.argmax(average_y, 1), tf.argmax(y_, 1))\n",
    "    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "    \n",
    "    # 初始化会话并开始训练过程。\n",
    "    with tf.Session() as sess:\n",
    "        tf.global_variables_initializer().run()\n",
    "        validate_feed = {x: mnist.validation.images, y_: mnist.validation.labels}\n",
    "        test_feed = {x: mnist.test.images, y_: mnist.test.labels} \n",
    "        \n",
    "        # 循环的训练神经网络。\n",
    "        for i in range(TRAINING_STEPS):\n",
    "            if i % 1000 == 0:\n",
    "                validate_acc = sess.run(accuracy, feed_dict=validate_feed)\n",
    "                print(\"After %d training step(s), validation accuracy using average model is %g \" % (i, validate_acc))\n",
    "            \n",
    "            xs,ys=mnist.train.next_batch(BATCH_SIZE)\n",
    "            sess.run(train_op,feed_dict={x:xs,y_:ys})\n",
    "\n",
    "        test_acc=sess.run(accuracy,feed_dict=test_feed)\n",
    "        print((\"After %d training step(s), test accuracy using average model is %g\" %(TRAINING_STEPS, test_acc)))\n",
    "\n",
    "train(mnist)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting ./data/MNIST/train-images-idx3-ubyte.gz\n",
      "Extracting ./data/MNIST/train-labels-idx1-ubyte.gz\n",
      "Extracting ./data/MNIST/t10k-images-idx3-ubyte.gz\n",
      "Extracting ./data/MNIST/t10k-labels-idx1-ubyte.gz\n",
      "After 0 training step(s), validation accuracy using average model is 0.0902 \n",
      "After 1000 training step(s), validation accuracy using average model is 0.9516 \n",
      "After 2000 training step(s), validation accuracy using average model is 0.9614 \n",
      "After 3000 training step(s), validation accuracy using average model is 0.9668 \n",
      "After 4000 training step(s), validation accuracy using average model is 0.9688 \n",
      "After 5000 training step(s), validation accuracy using average model is 0.971 \n",
      "After 6000 training step(s), validation accuracy using average model is 0.9742 \n",
      "After 7000 training step(s), validation accuracy using average model is 0.9752 \n",
      "After 8000 training step(s), validation accuracy using average model is 0.9738 \n",
      "After 9000 training step(s), validation accuracy using average model is 0.975 \n",
      "After 10000 training step(s), test accuracy using average model is 0.9722\n"
     ]
    }
   ],
   "source": [
    "import tensorflow as tf\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "mnist = input_data.read_data_sets(\"./data/MNIST/\", one_hot=True)\n",
    "\n",
    "\n",
    "INPUT_NODE = 784     \n",
    "OUTPUT_NODE = 10     \n",
    "LAYER1_NODE = 500 \n",
    "LAYER2_NODE = 500 \n",
    "LAYER3_NODE = 500 \n",
    "LAYER4_NODE = 500 \n",
    "LAYER5_NODE = 500 \n",
    "LAYER6_NODE = 500 \n",
    "LAYER7_NODE = 500 \n",
    "LAYER8_NODE = 300 \n",
    "LAYER9_NODE = 200\n",
    "LAYER10_NODE = 100 \n",
    "                              \n",
    "BATCH_SIZE = 100        \n",
    "\n",
    "# 模型相关的参数\n",
    "LEARNING_RATE_BASE = 0.008      \n",
    "LEARNING_RATE_DECAY = 0.99    \n",
    "REGULARAZTION_RATE = 0.0001   \n",
    "TRAINING_STEPS = 10000        \n",
    "MOVING_AVERAGE_DECAY = 0.99 \n",
    "\n",
    "def inference(input_tensor, avg_class, W, B):\n",
    "    # 不使用滑动平均类\n",
    "    if avg_class == None:\n",
    "        ac_1=tf.matmul(input_tensor, W[0]) + B[0]\n",
    "        layer1 = ac_1*tf.nn.sigmoid(ac_1)\n",
    "        ac_2 = tf.matmul(layer1, W[1]) + B[1]\n",
    "        layer2 = ac_2*tf.nn.sigmoid(ac_2)\n",
    "        ac_3 = tf.matmul(layer2, W[2]) + B[2]\n",
    "        layer3 = ac_3*tf.nn.sigmoid(ac_3)\n",
    "        ac_4 = tf.matmul(layer3, W[3]) + B[3]\n",
    "        layer4 = ac_4*tf.nn.sigmoid(ac_4)\n",
    "        ac_5 = tf.matmul(layer4, W[4]) + B[4]\n",
    "        layer5 = ac_5*tf.nn.sigmoid(ac_5)\n",
    "        ac_6 = tf.matmul(layer5, W[5]) + B[5]\n",
    "        layer6 = ac_6*tf.nn.sigmoid(ac_6)\n",
    "        ac_7 = tf.matmul(layer6, W[6]) + B[6]\n",
    "        layer7 = ac_7*tf.nn.sigmoid(ac_7)\n",
    "        ac_8 = tf.matmul(layer7, W[7]) + B[7]\n",
    "        layer8 = ac_8*tf.nn.sigmoid(ac_8)\n",
    "        ac_9 = tf.matmul(layer8, W[8]) + B[8]\n",
    "        layer9 = ac_9*tf.nn.sigmoid(ac_9)\n",
    "        ac_10 = tf.matmul(layer9, W[9]) + B[9]\n",
    "        layer10 = ac_10*tf.nn.sigmoid(ac_10)\n",
    "        return tf.matmul(layer10, W[10]) + B[10]\n",
    "    \n",
    "    else:\n",
    "        ac_1=tf.matmul(input_tensor, avg_class.average(W[0])) + avg_class.average(B[0])\n",
    "        layer1 = ac_1*tf.nn.sigmoid(ac_1)\n",
    "        ac_2=tf.matmul(layer1, avg_class.average(W[1])) + avg_class.average(B[1])\n",
    "        layer2 = ac_2*tf.nn.sigmoid(ac_2)\n",
    "        ac_3=tf.matmul(layer2, avg_class.average(W[2])) + avg_class.average(B[2])\n",
    "        layer3 = ac_3*tf.nn.sigmoid(ac_3)\n",
    "        ac_4=tf.matmul(layer3, avg_class.average(W[3])) + avg_class.average(B[3])\n",
    "        layer4 = ac_4*tf.nn.sigmoid(ac_4)\n",
    "        ac_5=tf.matmul(layer4, avg_class.average(W[4])) + avg_class.average(B[4])\n",
    "        layer5 = ac_5*tf.nn.sigmoid(ac_5)\n",
    "        ac_6=tf.matmul(layer5, avg_class.average(W[5])) + avg_class.average(B[5])\n",
    "        layer6 = ac_6*tf.nn.sigmoid(ac_6)\n",
    "        ac_7=tf.matmul(layer6, avg_class.average(W[6])) + avg_class.average(B[6])\n",
    "        layer7 = ac_7*tf.nn.sigmoid(ac_7)\n",
    "        ac_8=tf.matmul(layer7, avg_class.average(W[7])) + avg_class.average(B[7])\n",
    "        layer8 = ac_8*tf.nn.sigmoid(ac_8)\n",
    "        ac_9=tf.matmul(layer8, avg_class.average(W[8])) + avg_class.average(B[8])\n",
    "        layer9 = ac_9*tf.nn.sigmoid(ac_9)\n",
    "        ac_10=tf.matmul(layer9, avg_class.average(W[9])) + avg_class.average(B[9])\n",
    "        layer10 = ac_10*tf.nn.sigmoid(ac_10)\n",
    "        return tf.matmul(layer10, avg_class.average(W[10])) + avg_class.average(B[10])  \n",
    "    \n",
    "def train(mnist):\n",
    "    x = tf.placeholder(tf.float32, [None, INPUT_NODE], name='x-input')\n",
    "    y_ = tf.placeholder(tf.float32, [None, OUTPUT_NODE], name='y-input')\n",
    "    \n",
    "    weights1 = tf.Variable(tf.truncated_normal([INPUT_NODE, LAYER1_NODE], stddev=0.1))\n",
    "    biases1 = tf.Variable(tf.constant(0.1, shape=[LAYER1_NODE]))\n",
    "    \n",
    "    weights2 = tf.Variable(tf.truncated_normal([LAYER1_NODE, LAYER2_NODE], stddev=0.1))\n",
    "    biases2 = tf.Variable(tf.constant(0.1, shape=[ LAYER2_NODE]))\n",
    "    \n",
    "    weights3 = tf.Variable(tf.truncated_normal([ LAYER2_NODE,  LAYER3_NODE], stddev=0.1))\n",
    "    biases3 = tf.Variable(tf.constant(0.1, shape=[LAYER3_NODE]))\n",
    "    \n",
    "    weights4 = tf.Variable(tf.truncated_normal([LAYER3_NODE, LAYER4_NODE], stddev=0.1))\n",
    "    biases4 = tf.Variable(tf.constant(0.1, shape=[LAYER4_NODE]))\n",
    "    \n",
    "    weights5 = tf.Variable(tf.truncated_normal([LAYER4_NODE, LAYER5_NODE], stddev=0.1))\n",
    "    biases5 = tf.Variable(tf.constant(0.1, shape=[LAYER5_NODE]))\n",
    "    \n",
    "    weights6 = tf.Variable(tf.truncated_normal([LAYER5_NODE, LAYER6_NODE], stddev=0.1))\n",
    "    biases6 = tf.Variable(tf.constant(0.1, shape=[LAYER6_NODE]))\n",
    "    \n",
    "    weights7 = tf.Variable(tf.truncated_normal([LAYER6_NODE, LAYER7_NODE], stddev=0.1))\n",
    "    biases7 = tf.Variable(tf.constant(0.1, shape=[LAYER7_NODE]))\n",
    "    \n",
    "    weights8 = tf.Variable(tf.truncated_normal([LAYER7_NODE, LAYER8_NODE], stddev=0.1))\n",
    "    biases8 = tf.Variable(tf.constant(0.1, shape=[LAYER8_NODE]))\n",
    "    \n",
    "    weights9 = tf.Variable(tf.truncated_normal([LAYER8_NODE, LAYER9_NODE], stddev=0.1))\n",
    "    biases9 = tf.Variable(tf.constant(0.1, shape=[LAYER9_NODE]))\n",
    "    \n",
    "    weights10 = tf.Variable(tf.truncated_normal([LAYER9_NODE, LAYER10_NODE], stddev=0.1))\n",
    "    biases10 = tf.Variable(tf.constant(0.1, shape=[LAYER10_NODE]))\n",
    "    \n",
    "    weights11 = tf.Variable(tf.truncated_normal([LAYER10_NODE, OUTPUT_NODE], stddev=0.1))\n",
    "    biases11 = tf.Variable(tf.constant(0.1, shape=[OUTPUT_NODE]))\n",
    "    \n",
    "    W=[weights1, weights2, weights3, weights4, weights5, weights6, weights7, weights8, weights9, weights10, weights11]\n",
    "    B=[biases1, biases2, biases3, biases4, biases5, biases6, biases7, biases8, biases9, biases10, biases11]\n",
    "    \n",
    "    # 计算不含滑动平均类的前向传播结果\n",
    "    y = inference(x, None, W, B)\n",
    "    \n",
    "    # 定义训练轮数及相关的滑动平均类 \n",
    "    global_step = tf.Variable(0, trainable=False)\n",
    "    variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)\n",
    "    variables_averages_op = variable_averages.apply(tf.trainable_variables())\n",
    "    average_y = inference(x, variable_averages, W, B)\n",
    "    \n",
    "    # 计算交叉熵及其平均值\n",
    "    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))\n",
    "    cross_entropy_mean = tf.reduce_mean(cross_entropy)\n",
    "    \n",
    "    # 损失函数的计算\n",
    "    regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE)\n",
    "    regularaztion = regularizer(W[0]) \n",
    "    for i in range(1,11):\n",
    "        regularazation=regularaztion + regularizer(W[i]) \n",
    "    loss = cross_entropy_mean + regularaztion\n",
    "    \n",
    "    # 设置指数衰减的学习率。\n",
    "    learning_rate = tf.train.exponential_decay(\n",
    "        LEARNING_RATE_BASE,\n",
    "        global_step,\n",
    "        mnist.train.num_examples / BATCH_SIZE,\n",
    "        LEARNING_RATE_DECAY,\n",
    "        staircase=True)\n",
    "    \n",
    "    # 优化损失函数\n",
    "    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)\n",
    "    \n",
    "    # 反向传播更新参数和更新每一个参数的滑动平均值\n",
    "    with tf.control_dependencies([train_step, variables_averages_op]):\n",
    "        train_op = tf.no_op(name='train')\n",
    "\n",
    "    # 计算正确率\n",
    "    correct_prediction = tf.equal(tf.argmax(average_y, 1), tf.argmax(y_, 1))\n",
    "    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "    \n",
    "    # 初始化会话并开始训练过程。\n",
    "    with tf.Session() as sess:\n",
    "        tf.global_variables_initializer().run()\n",
    "        validate_feed = {x: mnist.validation.images, y_: mnist.validation.labels}\n",
    "        test_feed = {x: mnist.test.images, y_: mnist.test.labels} \n",
    "        \n",
    "        # 循环的训练神经网络。\n",
    "        for i in range(TRAINING_STEPS):\n",
    "            if i % 1000 == 0:\n",
    "                validate_acc = sess.run(accuracy, feed_dict=validate_feed)\n",
    "                print(\"After %d training step(s), validation accuracy using average model is %g \" % (i, validate_acc))\n",
    "            \n",
    "            xs,ys=mnist.train.next_batch(BATCH_SIZE)\n",
    "            sess.run(train_op,feed_dict={x:xs,y_:ys})\n",
    "\n",
    "        test_acc=sess.run(accuracy,feed_dict=test_feed)\n",
    "        print((\"After %d training step(s), test accuracy using average model is %g\" %(TRAINING_STEPS, test_acc)))\n",
    "\n",
    "train(mnist)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.5.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
