{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 导入工具包"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "from matplotlib import pyplot as plt\n",
    "import warnings\n",
    "warnings.filterwarnings('ignore')\n",
    "%matplotlib inline"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 读取数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting ./data/MNIST_data\\train-images-idx3-ubyte.gz\n",
      "Extracting ./data/MNIST_data\\train-labels-idx1-ubyte.gz\n",
      "Extracting ./data/MNIST_data\\t10k-images-idx3-ubyte.gz\n",
      "Extracting ./data/MNIST_data\\t10k-labels-idx1-ubyte.gz\n",
      "(55000, 784)\n",
      "(55000, 10)\n",
      "(5000, 784)\n",
      "(5000, 10)\n",
      "(10000, 784)\n",
      "(10000, 10)\n"
     ]
    }
   ],
   "source": [
    "mnist = input_data.read_data_sets(\"./data/MNIST_data\", one_hot=True)# 如果数据已经提前下载好，那就指定相应的目录就可以了。\n",
    "\n",
    "print(mnist.train.images.shape)\n",
    "print(mnist.train.labels.shape)\n",
    "\n",
    "print(mnist.validation.images.shape)\n",
    "print(mnist.validation.labels.shape)\n",
    "\n",
    "print(mnist.test.images.shape)\n",
    "print(mnist.test.labels.shape)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 如何修改隐层数量，修改后会起到什么样的效果10分。 "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 先看下单层隐层网络的结果"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 参数设置\n",
    "numClasses = 10 \n",
    "inputSize = 784 \n",
    "numHiddenUnits = 50 \n",
    "numHiddenUnitsLayer2 = 100\n",
    "trainingIterations = 10000 \n",
    "batchSize = 100 \n",
    "learning_rate = 0.3"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "X = tf.placeholder(tf.float32, shape = [None, inputSize])\n",
    "y = tf.placeholder(tf.float32, shape = [None,numClasses])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 单个隐层的网络模型\n",
    "w1=tf.Variable(tf.random_normal([inputSize, numHiddenUnits], stddev=0.1))\n",
    "b1=tf.Variable(tf.constant(0.1), [numHiddenUnits])\n",
    "w2=tf.Variable(tf.random_normal([numHiddenUnits, numClasses], stddev=0.1))\n",
    "b2=tf.Variable(tf.constant(0.1), [numClasses])\n",
    "\n",
    "hiddenLayerOutput1 = tf.matmul(X, w1) + b1\n",
    "hiddenLayerOutput1 = tf.nn.relu(hiddenLayerOutput1)\n",
    "finalOutput1 = tf.matmul(hiddenLayerOutput1, w2) + b2\n",
    "finalOutput1= tf.nn.relu(finalOutput1) # 得到的是属于每一个类别的概率值"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 单个隐层\n",
    "cross_entropy_loss_1 =tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=finalOutput1))\n",
    "optimizer_1 =tf.train.GradientDescentOptimizer(learning_rate=learning_rate)# 这里使用梯度下降优化器\n",
    "train_op_1=optimizer_1.minimize(cross_entropy_loss_1)\n",
    "correct_pred_1 = tf.equal(tf.argmax(finalOutput1, 1), tf.argmax(y, 1))\n",
    "accuracy_1 = tf.reduce_mean(tf.cast(correct_pred_1, tf.float32))#模型的精度"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "after 0 training steps, the loss is 2.32119, the validation accuracy is 0.1898\n",
      "after 1000 training steps, the loss is 0.40943, the validation accuracy is 0.954\n",
      "after 2000 training steps, the loss is 0.104501, the validation accuracy is 0.965\n",
      "after 3000 training steps, the loss is 0.0342909, the validation accuracy is 0.969\n",
      "after 4000 training steps, the loss is 0.0449765, the validation accuracy is 0.9724\n",
      "after 5000 training steps, the loss is 0.0361572, the validation accuracy is 0.974\n",
      "after 6000 training steps, the loss is 0.0208637, the validation accuracy is 0.9768\n",
      "after 7000 training steps, the loss is 0.0430203, the validation accuracy is 0.9774\n",
      "after 8000 training steps, the loss is 0.0431367, the validation accuracy is 0.9742\n",
      "after 9000 training steps, the loss is 0.0636933, the validation accuracy is 0.9754\n",
      "the training is finish!\n",
      "the test accuarcy is: 0.9728\n"
     ]
    }
   ],
   "source": [
    "saver = tf.train.Saver()\n",
    "with tf.Session() as sess:\n",
    "    sess.run(tf.global_variables_initializer())\n",
    "    #定义验证集与测试集\n",
    "    validate_data = {\n",
    "        X: mnist.validation.images,\n",
    "        y: mnist.validation.labels,\n",
    "    }\n",
    "    test_data = {X: mnist.test.images, y: mnist.test.labels}\n",
    "    for i in range(trainingIterations):\n",
    "            xs, ys = mnist.train.next_batch(batchSize)\n",
    "            _, loss = sess.run(\n",
    "                [train_op_1, cross_entropy_loss_1],\n",
    "                feed_dict={\n",
    "                    X: xs,\n",
    "                    y: ys,\n",
    "                })\n",
    "\n",
    "            #每100次训练打印一次损失值与验证准确率\n",
    "            if  i % 1000 == 0:\n",
    "                validate_accuracy = accuracy_1.eval(session=sess, feed_dict=validate_data)\n",
    "                print(\n",
    "                    \"after %d training steps, the loss is %g, the validation accuracy is %g\"\n",
    "                    % (i, loss, validate_accuracy))\n",
    "                #saver.save(sess, './data/model.ckpt', global_step=i)\n",
    "\n",
    "    print(\"the training is finish!\")\n",
    "    #最终的测试准确率\n",
    "    acc = sess.run(accuracy_1, feed_dict=test_data)\n",
    "    print(\"the test accuarcy is:\", acc)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 再来看下双层隐层的网络模型结果"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "numClasses = 10 \n",
    "inputSize = 784 \n",
    "numHiddenUnits = 50 \n",
    "numHiddenUnitsLayer2 = 100\n",
    "trainingIterations = 10000 \n",
    "batchSize = 100 \n",
    "learning_rate = 0.3"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 两层隐层的网络模型\n",
    "W1 = tf.Variable(tf.random_normal([inputSize, numHiddenUnits], stddev=0.1))\n",
    "B1 = tf.Variable(tf.constant(0.1), [numHiddenUnits])\n",
    "W2 = tf.Variable(tf.random_normal([numHiddenUnits, numHiddenUnitsLayer2], stddev=0.1))\n",
    "B2 = tf.Variable(tf.constant(0.1), [numHiddenUnitsLayer2])\n",
    "W3 = tf.Variable(tf.random_normal([numHiddenUnitsLayer2, numClasses], stddev=0.1))\n",
    "B3 = tf.Variable(tf.constant(0.1), [numClasses])\n",
    "\n",
    "hiddenLayerOutput2 = tf.matmul(X, W1) + B1\n",
    "hiddenLayerOutput2 = tf.nn.relu(hiddenLayerOutput2)\n",
    "hiddenLayer2Output2 = tf.matmul(hiddenLayerOutput2, W2) + B2\n",
    "hiddenLayer2Output2 = tf.nn.relu(hiddenLayer2Output2)\n",
    "finalOutput2 = tf.matmul(hiddenLayer2Output2, W3) + B3"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 两层隐层\n",
    "cross_entropy_loss_2 =tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=finalOutput2))\n",
    "optimizer_2 =tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\n",
    "train_op_2=optimizer_2.minimize(cross_entropy_loss_2)\n",
    "correct_pred_2 = tf.equal(tf.argmax(finalOutput2, 1), tf.argmax(y,1))\n",
    "accuracy_2 = tf.reduce_mean(tf.cast(correct_pred_2, tf.float32))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "after 0 training steps, the loss is 2.43653, the validation accuracy is 0.1422\n",
      "after 1000 training steps, the loss is 0.251387, the validation accuracy is 0.9632\n",
      "after 2000 training steps, the loss is 0.135899, the validation accuracy is 0.9712\n",
      "after 3000 training steps, the loss is 0.0594073, the validation accuracy is 0.974\n",
      "after 4000 training steps, the loss is 0.0249641, the validation accuracy is 0.9768\n",
      "after 5000 training steps, the loss is 0.0226018, the validation accuracy is 0.979\n",
      "after 6000 training steps, the loss is 0.0177473, the validation accuracy is 0.9796\n",
      "after 7000 training steps, the loss is 0.00161294, the validation accuracy is 0.9798\n",
      "after 8000 training steps, the loss is 0.00169621, the validation accuracy is 0.9776\n",
      "after 9000 training steps, the loss is 0.00799731, the validation accuracy is 0.9784\n",
      "the training is finish!\n",
      "the test accuarcy is: 0.9782\n"
     ]
    }
   ],
   "source": [
    "saver = tf.train.Saver()\n",
    "with tf.Session() as sess:\n",
    "    sess.run(tf.global_variables_initializer())\n",
    "    #定义验证集与测试集\n",
    "    validate_data = {\n",
    "        X: mnist.validation.images,\n",
    "        y: mnist.validation.labels,\n",
    "    }\n",
    "    test_data = {X: mnist.test.images, y: mnist.test.labels}\n",
    "    for i in range(trainingIterations):\n",
    "            xs, ys = mnist.train.next_batch(batchSize)\n",
    "            _, loss = sess.run(\n",
    "                [train_op_2, cross_entropy_loss_2],\n",
    "                feed_dict={\n",
    "                    X: xs,\n",
    "                    y: ys,\n",
    "                })\n",
    "\n",
    "            #每1000次训练打印一次损失值与验证准确率\n",
    "            if  i % 1000 == 0:\n",
    "                validate_accuracy = accuracy_2.eval(session=sess, feed_dict=validate_data)\n",
    "                print(\n",
    "                    \"after %d training steps, the loss is %g, the validation accuracy is %g\"\n",
    "                    % (i, loss, validate_accuracy))\n",
    "                #saver.save(sess, './data/model.ckpt', global_step=i)\n",
    "\n",
    "    print(\"the training is finish!\")\n",
    "    #最终的测试准确率\n",
    "    acc = sess.run(accuracy_2, feed_dict=test_data)\n",
    "    print(\"the test accuarcy is:\", acc)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "从上面的结果可以看出，增加一层隐层之后，模型的准确率提高了一点。但还不够，再增加神经元的数量看看。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 如何修改神经元个数，起到了什么样的效果10分。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 修改神经元个数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 参数设置\n",
    "numClasses = 10 \n",
    "inputSize = 784 \n",
    "numHiddenUnits = 256 # 调整了第一层神经元数量\n",
    "numHiddenUnitsLayer2 = 512# 调整了第二层神经元数量\n",
    "trainingIterations = 10000 \n",
    "batchSize = 100 \n",
    "learning_rate = 0.2 # 这里调整了学习率"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "after 0 training steps, the loss is 3.96999, the validation accuracy is 0.208\n",
      "after 1000 training steps, the loss is 0.184377, the validation accuracy is 0.9686\n",
      "after 2000 training steps, the loss is 0.113127, the validation accuracy is 0.9762\n",
      "after 3000 training steps, the loss is 0.0899009, the validation accuracy is 0.9756\n",
      "after 4000 training steps, the loss is 0.0053014, the validation accuracy is 0.9802\n",
      "after 5000 training steps, the loss is 0.00842555, the validation accuracy is 0.9816\n",
      "after 6000 training steps, the loss is 0.00159374, the validation accuracy is 0.982\n",
      "after 7000 training steps, the loss is 0.0012978, the validation accuracy is 0.9814\n",
      "after 8000 training steps, the loss is 0.00381769, the validation accuracy is 0.9806\n",
      "after 9000 training steps, the loss is 0.00161435, the validation accuracy is 0.9824\n",
      "the training is finish!\n",
      "the test accuarcy is: 0.9809\n"
     ]
    }
   ],
   "source": [
    "W1 = tf.Variable(tf.random_normal([inputSize, numHiddenUnits], stddev=0.1))\n",
    "B1 = tf.Variable(tf.constant(0.1), [numHiddenUnits])\n",
    "W2 = tf.Variable(tf.random_normal([numHiddenUnits, numHiddenUnitsLayer2], stddev=0.1))\n",
    "B2 = tf.Variable(tf.constant(0.1), [numHiddenUnitsLayer2])\n",
    "W3 = tf.Variable(tf.random_normal([numHiddenUnitsLayer2, numClasses], stddev=0.1))\n",
    "B3 = tf.Variable(tf.constant(0.1), [numClasses])\n",
    "\n",
    "hiddenLayerOutput2 = tf.matmul(X, W1) + B1\n",
    "hiddenLayerOutput2 = tf.nn.relu(hiddenLayerOutput2)\n",
    "hiddenLayer2Output2 = tf.matmul(hiddenLayerOutput2, W2) + B2\n",
    "hiddenLayer2Output2 = tf.nn.relu(hiddenLayer2Output2)\n",
    "finalOutput2 = tf.matmul(hiddenLayer2Output2, W3) + B3\n",
    "\n",
    "cross_entropy_loss_2 =tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=finalOutput2))\n",
    "optimizer_2 =tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\n",
    "train_op_2=optimizer_2.minimize(cross_entropy_loss_2)\n",
    "correct_pred_2 = tf.equal(tf.argmax(finalOutput2, 1), tf.argmax(y,1))\n",
    "accuracy_2 = tf.reduce_mean(tf.cast(correct_pred_2, tf.float32))\n",
    "\n",
    "saver = tf.train.Saver()\n",
    "with tf.Session() as sess:\n",
    "    sess.run(tf.global_variables_initializer())\n",
    "    #定义验证集与测试集\n",
    "    validate_data = {\n",
    "        X: mnist.validation.images,\n",
    "        y: mnist.validation.labels,\n",
    "    }\n",
    "    test_data = {X: mnist.test.images, y: mnist.test.labels}\n",
    "    for i in range(trainingIterations):\n",
    "            xs, ys = mnist.train.next_batch(batchSize)\n",
    "            _, loss = sess.run(\n",
    "                [train_op_2, cross_entropy_loss_2],\n",
    "                feed_dict={\n",
    "                    X: xs,\n",
    "                    y: ys,\n",
    "                })\n",
    "\n",
    "            #每1000次训练打印一次损失值与验证准确率\n",
    "            if  i % 1000 == 0:\n",
    "                validate_accuracy = accuracy_2.eval(session=sess, feed_dict=validate_data)\n",
    "                print(\n",
    "                    \"after %d training steps, the loss is %g, the validation accuracy is %g\"\n",
    "                    % (i, loss, validate_accuracy))\n",
    "                #saver.save(sess, './data/model.ckpt', global_step=i)\n",
    "\n",
    "    print(\"the training is finish!\")\n",
    "    #最终的测试准确率\n",
    "    acc = sess.run(accuracy_2, feed_dict=test_data)\n",
    "    print(\"the test accuarcy is:\", acc)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 没有明显报错的正常的log输出 ，log中的模型准确率达到98%  60分。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "可以看到，最后的准确率已经在98%以上了。说明增加神经元的个数对模型的结果起到了积极的作用（但神经元的个数不是越多越好，计算量会变大，也容易发生过拟合。）。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 在模型中添加L1/L2正则化 10分。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "从上面的结果可以看出，模型在后期已经有过拟合的现象了（精度在迭代7000次后开始下降），所以需要添加正则项来防止模型的过拟合。   \n",
    "正则化是跟着损失函数在一起的，主要的作用是为了防止模型过拟合，控制参数变化幅度。 "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 使⽤不同的初始化⽅式对模型有什么影响 10分。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "参数初始化的作用是用来控制模型的收敛速度，参数初始化越合理，模型迭代的次数越少，越容易收敛到最佳状态。  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "metadata": {},
   "outputs": [],
   "source": [
    "numClasses = 10 \n",
    "inputSize = 784 \n",
    "numHiddenUnits = 256 \n",
    "numHiddenUnitsLayer2 = 512\n",
    "trainingIterations = 10000 \n",
    "batchSize = 100 \n",
    "learning_rate = 0.2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "metadata": {},
   "outputs": [],
   "source": [
    "X = tf.placeholder(tf.float32, shape = [None, inputSize])\n",
    "y = tf.placeholder(tf.float32, shape = [None,numClasses])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {},
   "outputs": [],
   "source": [
    "W1 = tf.Variable(tf.random_normal([inputSize, numHiddenUnits], stddev=0.1))# 随机均匀分布参数\n",
    "# W1 = tf.Variable(tf.truncated_normal([inputSize, numHiddenUnits],stddev=0.1)# 截断的高斯分布参数\n",
    "B1 = tf.Variable(tf.constant(0.1), [numHiddenUnits])\n",
    "W2 = tf.Variable(tf.random_normal([numHiddenUnits, numHiddenUnitsLayer2], stddev=0.1))\n",
    "B2 = tf.Variable(tf.constant(0.1), [numHiddenUnitsLayer2])\n",
    "W3 = tf.Variable(tf.random_normal([numHiddenUnitsLayer2, numClasses], stddev=0.1))\n",
    "B3 = tf.Variable(tf.constant(0.1), [numClasses])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "metadata": {},
   "outputs": [],
   "source": [
    "hiddenLayerOutput2 = tf.matmul(X, W1) + B1\n",
    "hiddenLayerOutput2 = tf.nn.relu(hiddenLayerOutput2)\n",
    "hiddenLayer2Output2 = tf.matmul(hiddenLayerOutput2, W2) + B2\n",
    "hiddenLayer2Output2 = tf.nn.relu(hiddenLayer2Output2)\n",
    "finalOutput2 = tf.matmul(hiddenLayer2Output2, W3) + B3"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "metadata": {},
   "outputs": [],
   "source": [
    "# L2正则化\n",
    "REGULARIZATION_RATE=0.001 # 正则化项的权重\n",
    "# regularizer = tf.contrib.layers.l1_regularizer(REGULARIZATION_RATE)  L1正则化\n",
    "regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)\n",
    "regularization = regularizer(W1) + regularizer(W2)+regularizer(W3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 49,
   "metadata": {},
   "outputs": [],
   "source": [
    "cross_entropy_loss_2 =tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=finalOutput2))\n",
    "total_loss = cross_entropy_loss_2 + regularization # 损失函数+正则项\n",
    "optimizer_l2 =tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\n",
    "train_op_l2=optimizer_l2.minimize(total_loss)\n",
    "correct_pred_l2 = tf.equal(tf.argmax(finalOutput2, 1), tf.argmax(y,1))\n",
    "accuracy_l2 = tf.reduce_mean(tf.cast(correct_pred_l2, tf.float32))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 50,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "after 0 training steps, the loss is 5.3321, the validation accuracy is 0.2432\n",
      "after 1000 training steps, the loss is 1.21575, the validation accuracy is 0.969\n",
      "after 2000 training steps, the loss is 0.893099, the validation accuracy is 0.9744\n",
      "after 3000 training steps, the loss is 0.635589, the validation accuracy is 0.9776\n",
      "after 4000 training steps, the loss is 0.495809, the validation accuracy is 0.9782\n",
      "after 5000 training steps, the loss is 0.385474, the validation accuracy is 0.9794\n",
      "after 6000 training steps, the loss is 0.301344, the validation accuracy is 0.9784\n",
      "after 7000 training steps, the loss is 0.230302, the validation accuracy is 0.9794\n",
      "after 8000 training steps, the loss is 0.210708, the validation accuracy is 0.9804\n",
      "after 9000 training steps, the loss is 0.17619, the validation accuracy is 0.9816\n",
      "the training is finish!\n",
      "the test accuarcy is: 0.9792\n"
     ]
    }
   ],
   "source": [
    "saver = tf.train.Saver()\n",
    "with tf.Session() as sess:\n",
    "    sess.run(tf.global_variables_initializer())\n",
    "    #定义验证集与测试集\n",
    "    validate_data = {\n",
    "        X: mnist.validation.images,\n",
    "        y: mnist.validation.labels,\n",
    "    }\n",
    "    test_data = {X: mnist.test.images, y: mnist.test.labels}\n",
    "    for i in range(trainingIterations):\n",
    "            xs, ys = mnist.train.next_batch(batchSize)\n",
    "            _, loss = sess.run(\n",
    "                [train_op_l2, total_loss],\n",
    "                feed_dict={\n",
    "                    X: xs,\n",
    "                    y: ys,\n",
    "                })\n",
    "\n",
    "            #每1000次训练打印一次损失值与验证准确率\n",
    "            if  i % 1000 == 0:\n",
    "                validate_accuracy = accuracy_l2.eval(session=sess, feed_dict=validate_data)\n",
    "                print(\n",
    "                    \"after %d training steps, the loss is %g, the validation accuracy is %g\"\n",
    "                    % (i, loss, validate_accuracy))\n",
    "                #saver.save(sess, './data/model.ckpt', global_step=i)\n",
    "\n",
    "    print(\"the training is finish!\")\n",
    "    #最终的测试准确率\n",
    "    acc = sess.run(accuracy_l2, feed_dict=test_data)\n",
    "    print(\"the test accuarcy is:\", acc)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "从上面的结果可以看出，验证集的精度是一直在上升，并没有存在下降的现象，因此，参数的正则化控制了模型的过拟合现象。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.0"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
