{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/jack/anaconda3/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n",
      "  from ._conv import register_converters as _register_converters\n"
     ]
    }
   ],
   "source": [
    "\"\"\"A very simple MNIST classifier.\n",
    "See extensive documentation at https://www.tensorflow.org/get_started/mnist/beginners\n",
    "\"\"\"\n",
    "\n",
    "import tensorflow as tf\n",
    "from tensorflow.examples.tutorials.mnist import input_data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From <ipython-input-2-698ada706af1>:3: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n",
      "WARNING:tensorflow:From /home/jack/anaconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please write your own downloading logic.\n",
      "WARNING:tensorflow:From /home/jack/anaconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.data to implement this functionality.\n",
      "Extracting /tmp/tensorflow/mnist/input_data/train-images-idx3-ubyte.gz\n",
      "WARNING:tensorflow:From /home/jack/anaconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.data to implement this functionality.\n",
      "Extracting /tmp/tensorflow/mnist/input_data/train-labels-idx1-ubyte.gz\n",
      "WARNING:tensorflow:From /home/jack/anaconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:110: dense_to_one_hot (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.one_hot on tensors.\n",
      "Extracting /tmp/tensorflow/mnist/input_data/t10k-images-idx3-ubyte.gz\n",
      "Extracting /tmp/tensorflow/mnist/input_data/t10k-labels-idx1-ubyte.gz\n",
      "WARNING:tensorflow:From /home/jack/anaconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:290: DataSet.__init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n"
     ]
    }
   ],
   "source": [
    "# Import data\n",
    "data_dir = '/tmp/tensorflow/mnist/input_data'\n",
    "mnist = input_data.read_data_sets(data_dir, one_hot=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Create the model\n",
    "x = tf.placeholder(tf.float32, [None, 784])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define loss and optimizer\n",
    "y_ = tf.placeholder(tf.float32, [None, 10])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "W1 = tf.Variable(tf.random_normal([784,512],stddev=0.1))\n",
    "b1 = tf.Variable(tf.zeros([512]))\n",
    "y1 = tf.nn.relu(tf.matmul(x,W1)+b1)\n",
    "                 \n",
    "W2 = tf.Variable(tf.random_normal([512,128],stddev=0.1))\n",
    "b2 = tf.Variable(tf.zeros([128]))\n",
    "y2 = tf.nn.relu(tf.matmul(y1,W2)+b2)\n",
    "                 \n",
    "W3 = tf.Variable(tf.random_normal([128,10],stddev=0.1))\n",
    "b3 = tf.Variable(tf.zeros([10]))\n",
    "y = tf.matmul(y2,W3)+b3"
   ]
  },
  {
   "cell_type": "raw",
   "metadata": {},
   "source": [
    "   为提高到98%的准确率，在原代码中加隐层，此代码使用的是双隐层，经过多次试验，将第一层的神经元数量设为512，第二层的神经元数量设为128的效果较好，其中神经元数量过多似乎并没有什么意义，对最终的结果影响不大，计算的时间反而会增多。\n",
    "   \n",
    "   隐层采用的是ReLU激活函数，它的右侧是线型函数，左侧硬饱和，右侧无饱和，优点是改善了梯度弥散，缺点是左侧会出现梯度一直为0的情况，导致神经元不再更新。在此模型中，ReLU更适合作为隐层的激活函数。\n",
    "   \n",
    "   变量采用的是random_normal(正太分布随机数)，均值为mean，标准差为stddev，在该模型中，stddev设为0.1效果最好。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 设置L2正则参数，得到正则化的值\n",
    "reg = tf.contrib.layers.l2_regularizer(0.001)(W3)  # 定义L2正则化损失函数"
   ]
  },
  {
   "cell_type": "raw",
   "metadata": {},
   "source": [
    "   这里采用的是L2正则项，实际上没加正则项准确率已经能达到98%的正确率。在我加L1正则项时，正确率反而有所降低，我觉得可能是因为L1范数使权数稀疏造成的，加上L2正则项，将参数调到0.01，对结果仅有微小的提高，调的过高或过低都会使正确率下降。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From <ipython-input-7-e99206625028>:11: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "\n",
      "Future major versions of TensorFlow will allow gradients to flow\n",
      "into the labels input on backprop by default.\n",
      "\n",
      "See @{tf.nn.softmax_cross_entropy_with_logits_v2}.\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# The raw formulation of cross-entropy,\n",
    "#\n",
    "#   tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)),\n",
    "#                                 reduction_indices=[1]))\n",
    "#\n",
    "# can be numerically unstable.\n",
    "#\n",
    "# So here we use tf.nn.softmax_cross_entropy_with_logits on the raw\n",
    "# outputs of 'y', and then average across the batch.\n",
    "cross_entropy = tf.reduce_mean(\n",
    "    tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y)\n",
    ")\n",
    "cross_entropy_loss = cross_entropy + reg"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_step = tf.train.AdagradOptimizer(0.3).minimize(cross_entropy_loss)\n",
    "#创建Session\n",
    "sess = tf.Session()\n",
    "init_op = tf.global_variables_initializer()\n",
    "#启动初始化操作和内存分配任务\n",
    "sess.run(init_op)"
   ]
  },
  {
   "cell_type": "raw",
   "metadata": {},
   "source": [
    "   这里老师的代码中用的是实现梯度下降算法的优化器，我用这个优化器跑代码只能达到975%左右的正确率，后来尝试了多种优化器算法，发现 AdagradOptimizer 的效果最好，对结果有显著的提升。学习率设为0.3。\n",
    "   \n",
    "AdagradOptimizer 与 GradientDescentOptimizer 比较:\n",
    "\n",
    "   AdaGrad 可以自动变更学习率，只需要设定一个全局的学习率。如果某次梯度大，那么学习速率衰减的就快一点，如果某次的梯度小，那么学习率衰减的就慢一些。对于每个参数，随着其更新的总距离增多，其学习率也随之变慢。\n",
    "   \n",
    "   SGD 中，学习率需要逐渐减小，否则模型就无法收敛，由于误差，会导致每一次迭代的梯度受抽样的影响比较大。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "step: 100,    trian accuracy:0.901145,    test accuracy:0.8974,    loss:0.341597\n",
      "step: 200,    trian accuracy:0.934345,    test accuracy:0.9302,    loss:0.242442\n",
      "step: 300,    trian accuracy:0.935727,    test accuracy:0.9352,    loss:0.233482\n",
      "step: 400,    trian accuracy:0.955218,    test accuracy:0.9518,    loss:0.165962\n",
      "step: 500,    trian accuracy:0.967018,    test accuracy:0.9612,    loss:0.127372\n",
      "step: 600,    trian accuracy:0.965800,    test accuracy:0.9611,    loss:0.126396\n",
      "step: 700,    trian accuracy:0.974309,    test accuracy:0.9687,    loss:0.106882\n",
      "step: 800,    trian accuracy:0.976782,    test accuracy:0.9692,    loss:0.0994792\n",
      "step: 900,    trian accuracy:0.974218,    test accuracy:0.9651,    loss:0.102035\n",
      "step:1000,    trian accuracy:0.979782,    test accuracy:0.9706,    loss:0.0870287\n",
      "step:1100,    trian accuracy:0.980036,    test accuracy:0.9697,    loss:0.0839828\n",
      "step:1200,    trian accuracy:0.979327,    test accuracy:0.9672,    loss:0.0807314\n",
      "step:1300,    trian accuracy:0.982782,    test accuracy:0.9709,    loss:0.0739156\n",
      "step:1400,    trian accuracy:0.986164,    test accuracy:0.9761,    loss:0.0620319\n",
      "step:1500,    trian accuracy:0.980455,    test accuracy:0.9699,    loss:0.0776022\n",
      "step:1600,    trian accuracy:0.988436,    test accuracy:0.9765,    loss:0.0546685\n",
      "step:1700,    trian accuracy:0.987582,    test accuracy:0.9765,    loss:0.0570903\n",
      "step:1800,    trian accuracy:0.987891,    test accuracy:0.9755,    loss:0.0563887\n",
      "step:1900,    trian accuracy:0.990145,    test accuracy:0.9766,    loss:0.0481018\n",
      "step:2000,    trian accuracy:0.988491,    test accuracy:0.9774,    loss:0.0515047\n",
      "step:2100,    trian accuracy:0.984582,    test accuracy:0.9702,    loss:0.0633537\n",
      "step:2200,    trian accuracy:0.989564,    test accuracy:0.9768,    loss:0.0478639\n",
      "step:2300,    trian accuracy:0.991309,    test accuracy:0.9778,    loss:0.0414214\n",
      "step:2400,    trian accuracy:0.992473,    test accuracy:0.9768,    loss:0.0391189\n",
      "step:2500,    trian accuracy:0.991782,    test accuracy:0.9746,    loss:0.0400138\n",
      "step:2600,    trian accuracy:0.992109,    test accuracy:0.9771,    loss:0.0387105\n",
      "step:2700,    trian accuracy:0.991182,    test accuracy:0.9732,    loss:0.0414414\n",
      "step:2800,    trian accuracy:0.991655,    test accuracy:0.9772,    loss:0.0386084\n",
      "step:2900,    trian accuracy:0.994200,    test accuracy:0.9792,    loss:0.032519\n",
      "step:3000,    trian accuracy:0.993655,    test accuracy:0.9780,    loss:0.0335037\n",
      "step:3100,    trian accuracy:0.994691,    test accuracy:0.9765,    loss:0.0315932\n",
      "step:3200,    trian accuracy:0.994727,    test accuracy:0.9770,    loss:0.0305864\n",
      "step:3300,    trian accuracy:0.994382,    test accuracy:0.9768,    loss:0.0307327\n",
      "step:3400,    trian accuracy:0.997236,    test accuracy:0.9809,    loss:0.0232365\n",
      "step:3500,    trian accuracy:0.993436,    test accuracy:0.9766,    loss:0.0329657\n",
      "step:3600,    trian accuracy:0.994800,    test accuracy:0.9778,    loss:0.0292029\n",
      "step:3700,    trian accuracy:0.994782,    test accuracy:0.9791,    loss:0.0301284\n",
      "step:3800,    trian accuracy:0.996491,    test accuracy:0.9787,    loss:0.025179\n",
      "step:3900,    trian accuracy:0.997164,    test accuracy:0.9805,    loss:0.0227756\n",
      "step:4000,    trian accuracy:0.998291,    test accuracy:0.9811,    loss:0.0194869\n",
      "step:4100,    trian accuracy:0.997782,    test accuracy:0.9798,    loss:0.0205746\n",
      "step:4200,    trian accuracy:0.997727,    test accuracy:0.9804,    loss:0.0201639\n",
      "step:4300,    trian accuracy:0.997982,    test accuracy:0.9803,    loss:0.0192524\n",
      "step:4400,    trian accuracy:0.998345,    test accuracy:0.9806,    loss:0.0186025\n",
      "step:4500,    trian accuracy:0.998982,    test accuracy:0.9819,    loss:0.0166567\n",
      "step:4600,    trian accuracy:0.999127,    test accuracy:0.9802,    loss:0.0161828\n",
      "step:4700,    trian accuracy:0.998309,    test accuracy:0.9812,    loss:0.0175753\n",
      "step:4800,    trian accuracy:0.998727,    test accuracy:0.9804,    loss:0.0171217\n",
      "step:4900,    trian accuracy:0.998600,    test accuracy:0.9801,    loss:0.0165983\n",
      "step:5000,    trian accuracy:0.999236,    test accuracy:0.9800,    loss:0.0146607\n",
      "step:5100,    trian accuracy:0.999255,    test accuracy:0.9824,    loss:0.0141459\n",
      "step:5200,    trian accuracy:0.999200,    test accuracy:0.9805,    loss:0.0145706\n",
      "step:5300,    trian accuracy:0.999327,    test accuracy:0.9796,    loss:0.0142283\n",
      "step:5400,    trian accuracy:0.999691,    test accuracy:0.9822,    loss:0.0127664\n",
      "step:5500,    trian accuracy:0.999636,    test accuracy:0.9823,    loss:0.0131545\n",
      "step:5600,    trian accuracy:0.999709,    test accuracy:0.9812,    loss:0.0122106\n",
      "step:5700,    trian accuracy:0.999727,    test accuracy:0.9820,    loss:0.0117818\n",
      "step:5800,    trian accuracy:0.999745,    test accuracy:0.9819,    loss:0.0116796\n",
      "step:5900,    trian accuracy:0.999727,    test accuracy:0.9814,    loss:0.0122359\n",
      "step:6000,    trian accuracy:0.998836,    test accuracy:0.9793,    loss:0.0138903\n",
      "step:6100,    trian accuracy:0.999691,    test accuracy:0.9811,    loss:0.011267\n",
      "step:6200,    trian accuracy:0.999618,    test accuracy:0.9814,    loss:0.011134\n",
      "step:6300,    trian accuracy:0.999818,    test accuracy:0.9816,    loss:0.0108854\n",
      "step:6400,    trian accuracy:0.999800,    test accuracy:0.9812,    loss:0.0107872\n",
      "step:6500,    trian accuracy:0.999855,    test accuracy:0.9818,    loss:0.010754\n",
      "step:6600,    trian accuracy:0.999855,    test accuracy:0.9811,    loss:0.0105482\n",
      "step:6700,    trian accuracy:0.999855,    test accuracy:0.9815,    loss:0.0100398\n",
      "step:6800,    trian accuracy:0.999855,    test accuracy:0.9824,    loss:0.00984594\n",
      "step:6900,    trian accuracy:0.999891,    test accuracy:0.9819,    loss:0.00978439\n",
      "step:7000,    trian accuracy:0.999891,    test accuracy:0.9818,    loss:0.00982427\n",
      "step:7100,    trian accuracy:0.999600,    test accuracy:0.9809,    loss:0.0106728\n",
      "step:7200,    trian accuracy:0.999909,    test accuracy:0.9826,    loss:0.00932199\n",
      "step:7300,    trian accuracy:0.999891,    test accuracy:0.9819,    loss:0.00926466\n",
      "step:7400,    trian accuracy:0.999909,    test accuracy:0.9822,    loss:0.00910337\n",
      "step:7500,    trian accuracy:0.999945,    test accuracy:0.9822,    loss:0.00900503\n",
      "step:7600,    trian accuracy:0.999964,    test accuracy:0.9820,    loss:0.00892875\n",
      "step:7700,    trian accuracy:0.999873,    test accuracy:0.9820,    loss:0.00914064\n",
      "step:7800,    trian accuracy:0.999982,    test accuracy:0.9830,    loss:0.0086522\n",
      "step:7900,    trian accuracy:0.999945,    test accuracy:0.9823,    loss:0.00871449\n",
      "step:8000,    trian accuracy:0.999982,    test accuracy:0.9819,    loss:0.00847588\n"
     ]
    }
   ],
   "source": [
    "# Train\n",
    "for i in range(8000):\n",
    "    batch_xs, batch_ys = mnist.train.next_batch(100)\n",
    "    sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})\n",
    "# Test trained model\n",
    "    if (i+1)%100 == 0:\n",
    "        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n",
    "        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "        train_accuracy = sess.run(accuracy, feed_dict={x: mnist.train.images, y_: mnist.train.labels})\n",
    "        test_accuracy = sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels})\n",
    "        loss = sess.run(tf.reduce_mean(cross_entropy_loss), feed_dict={x: mnist.train.images, y_: mnist.train.labels})\n",
    "        print('step:%4d,%-3s trian accuracy:%8.6f, %3stest accuracy:%6.4f, %3sloss:%g' % (i+1,' ',train_accuracy,' ',test_accuracy,' ',loss))"
   ]
  },
  {
   "cell_type": "raw",
   "metadata": {},
   "source": [
    "   当迭代34个周期时，正确率首次达到98%，进一步的迭代，经过41轮迭代，测试集正确率基本上稳定在98%以上，最高可达到98.3%，其损失也是一直在下降，训练集正确率越来越趋近于100%，可见效果不错"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "总结：  \n",
    "\n",
    "   该模型最初是在tinymind上跑的，但是由于tinymind经常会断开连接，因此依然转到之前用的jupyter notebook上进行，在过程中也遇到很多问题，变量一开始使用的是zeros，跑完代码发现正确率仅为10%，进一步了解，tf.zeros()生成的是tensor，因此这里并不能用tf.zeros(),需要采用产生随机数的方法，因此这里利用tf.random_normal()产生随机数。\n",
    "\n",
    "   隐层和神经元的数量也是一个疑点，通过试验，发现隐层和神经元数量并不是越多越好，这里的参数是通过试验出来的，当我加入三个隐层和更多的神经元时，计算的时间会变慢很多，正确率甚至有时不升反降，因此选取合适的参数是比较重要的。\n",
    "    \n",
    "   正则化问题：\n",
    "        \n",
    "       L1 正则方法：tf.contrib.layers.l1_regularizer(scale, scope=None) \n",
    "\n",
    "       L2 正则方法: tf.contrib.layers.l2_regularizer(scale, scope=None)\n",
    "        \n",
    "   其参数也是需要合适的值，如果过大或者过小也会严重影响结果，但是我在这个模型中训练时似乎与是否加入正则项的关系不大，即使不加入正则项结果也能够使正确率达到98%，不知道是不是因为噪声影响的还是什么原因。\n",
    "   \n",
    "   优化算法：\n",
    "   \n",
    "   最初使用 GradientDescentOptimizer 正确率始终达不到98%，后来了解到多种优化算法，经试验，在这里使用 AdagradOptimizer 效果最好，它是根据梯度的大小决定学习率衰减的速率，较为灵活一点。但是每种算法都有有缺点，不可能一种算法能在每个模型中都能达到最好的效果，如何根据一个模型判断采用哪种算法呢"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
