{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\ProgramData\\Anaconda3\\lib\\site-packages\\h5py\\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n",
      "  from ._conv import register_converters as _register_converters\n"
     ]
    }
   ],
   "source": [
    "\"\"\"A very simple MNIST classifier.\n",
    "See extensive documentation at\n",
    "https://www.tensorflow.org/get_started/mnist/beginners\n",
    "\"\"\"\n",
    "import argparse\n",
    "import sys\n",
    "\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "from tensorflow.contrib.layers import xavier_initializer\n",
    "from tensorflow import contrib\n",
    "\n",
    "import tensorflow as tf\n",
    "\n",
    "FLAGS = None\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 1、数据读入"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting /tmp/tensorflow/mnist/haha\\train-images-idx3-ubyte.gz\n",
      "Extracting /tmp/tensorflow/mnist/haha\\train-labels-idx1-ubyte.gz\n",
      "Extracting /tmp/tensorflow/mnist/haha\\t10k-images-idx3-ubyte.gz\n",
      "Extracting /tmp/tensorflow/mnist/haha\\t10k-labels-idx1-ubyte.gz\n"
     ]
    }
   ],
   "source": [
    "data_dir = '/tmp/tensorflow/mnist/haha'\n",
    "mnist = input_data.read_data_sets(data_dir, one_hot=True)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 2、网络层参数设置（W、b、dropout概率、激活函数）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "x = tf.placeholder(tf.float32, [None, 784])\n",
    "\n",
    "\n",
    "#relu激活函数效果不错\n",
    "W1 = tf.get_variable('W1', [784, 200], tf.float32, xavier_initializer())  \n",
    "b1 = tf.Variable(tf.zeros([1,200]))\n",
    "y1 = tf.nn.relu(tf.matmul(x, W1) + b1)\n",
    "\n",
    "\n",
    "W2 = tf.get_variable('W2', [200, 200], tf.float32, xavier_initializer())  \n",
    "b2 = tf.Variable(tf.zeros([200]))\n",
    "y2 = tf.nn.relu(tf.matmul(y1, W2) + b2)\n",
    "\n",
    "\n",
    "W3 = tf.get_variable('W3', [200, 10], tf.float32, xavier_initializer())  \n",
    "b3 = tf.Variable(tf.zeros([10]))\n",
    "y = tf.matmul(y2, W3) + b3"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 3、损失函数和优化方法设置"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "接下来我们计算交叉熵，注意这里不要使用注释中的手动计算方式，而是使用系统函数。\n",
    "另一个注意点就是，softmax_cross_entropy_with_logits的logits参数是**未经激活的wx+b**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "#样本真实值，占位符http://localhost:8888/notebooks/Desktop/test_lq/3_mnist_homework.ipynb#\n",
    "y_ = tf.placeholder(tf.float32, [None, 10])\n",
    "#L2正则项,不知道怎么去循环，手动测试了下，0.001左右效果不错\n",
    "reg=tf.contrib.layers.apply_regularization(tf.contrib.layers.l2_regularizer(0.001),tf.trainable_variables())\n",
    "#交叉熵\n",
    "cross_entropy = tf.add(tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y)),reg)\n",
    "#最原始的梯度下降算法，收敛速度慢,学习率为0.1时效果较好\n",
    "train_step = tf.train.GradientDescentOptimizer(0.1).minimize(cross_entropy)\n",
    "#建立图计算模型\n",
    "sess = tf.Session()\n",
    "#定义初始化动作\n",
    "init_op = tf.global_variables_initializer()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 4、开始训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "#初始化\n",
    "sess.run(init_op)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "    epoch       train       valid        test\n",
      "   0epoch      0.1299      0.1240      0.1317\n",
      "   1epoch      0.9338      0.9354      0.9357\n",
      "   2epoch      0.9537      0.9560      0.9536\n",
      "   3epoch      0.9622      0.9616      0.9599\n",
      "   4epoch      0.9717      0.9690      0.9666\n",
      "   5epoch      0.9723      0.9684      0.9681\n",
      "   6epoch      0.9780      0.9738      0.9706\n",
      "   7epoch      0.9818      0.9744      0.9736\n",
      "   8epoch      0.9825      0.9744      0.9732\n",
      "   9epoch      0.9846      0.9772      0.9752\n",
      "  10epoch      0.9859      0.9786      0.9765\n",
      "  11epoch      0.9851      0.9788      0.9760\n",
      "  12epoch      0.9882      0.9784      0.9774\n",
      "  13epoch      0.9890      0.9794      0.9784\n",
      "  14epoch      0.9905      0.9816      0.9795\n",
      "  15epoch      0.9904      0.9808      0.9792\n",
      "  16epoch      0.9918      0.9818      0.9797\n",
      "  17epoch      0.9912      0.9802      0.9799\n",
      "  18epoch      0.9911      0.9808      0.9782\n",
      "  19epoch      0.9927      0.9790      0.9808\n",
      "  20epoch      0.9921      0.9788      0.9799\n",
      "  21epoch      0.9939      0.9834      0.9809\n",
      "  22epoch      0.9934      0.9836      0.9802\n",
      "  23epoch      0.9928      0.9808      0.9786\n",
      "  24epoch      0.9941      0.9816      0.9805\n",
      "  25epoch      0.9937      0.9840      0.9794\n",
      "  26epoch      0.9943      0.9828      0.9806\n",
      "  27epoch      0.9936      0.9822      0.9797\n",
      "  28epoch      0.9938      0.9812      0.9791\n",
      "  29epoch      0.9947      0.9834      0.9806\n",
      "  30epoch      0.9950      0.9814      0.9811\n",
      "  31epoch      0.9948      0.9836      0.9814\n",
      "  32epoch      0.9952      0.9830      0.9814\n",
      "  33epoch      0.9952      0.9824      0.9803\n",
      "  34epoch      0.9953      0.9826      0.9807\n",
      "  35epoch      0.9944      0.9822      0.9806\n",
      "  36epoch      0.9954      0.9834      0.9802\n",
      "  37epoch      0.9951      0.9834      0.9800\n",
      "  38epoch      0.9954      0.9828      0.9800\n",
      "  39epoch      0.9962      0.9820      0.9814\n",
      "  40epoch      0.9960      0.9836      0.9808\n",
      "  41epoch      0.9962      0.9842      0.9814\n",
      "  42epoch      0.9955      0.9832      0.9813\n",
      "  43epoch      0.9945      0.9810      0.9801\n",
      "  44epoch      0.9955      0.9836      0.9814\n",
      "  45epoch      0.9954      0.9826      0.9802\n",
      "  46epoch      0.9961      0.9842      0.9824\n",
      "  47epoch      0.9960      0.9840      0.9820\n",
      "  48epoch      0.9963      0.9844      0.9820\n",
      "  49epoch      0.9959      0.9850      0.9812\n",
      "  50epoch      0.9957      0.9840      0.9811\n",
      "  51epoch      0.9958      0.9830      0.9813\n",
      "  52epoch      0.9964      0.9830      0.9809\n",
      "  53epoch      0.9951      0.9824      0.9813\n",
      "  54epoch      0.9963      0.9834      0.9824\n",
      "  55epoch      0.9957      0.9842      0.9813\n",
      "  56epoch      0.9968      0.9842      0.9816\n",
      "  57epoch      0.9957      0.9844      0.9810\n",
      "  58epoch      0.9964      0.9826      0.9823\n",
      "  59epoch      0.9954      0.9840      0.9797\n",
      "  60epoch      0.9970      0.9836      0.9808\n",
      "  61epoch      0.9960      0.9842      0.9805\n",
      "  62epoch      0.9968      0.9836      0.9812\n",
      "  63epoch      0.9965      0.9832      0.9818\n",
      "  64epoch      0.9963      0.9844      0.9814\n",
      "  65epoch      0.9965      0.9832      0.9822\n",
      "  66epoch      0.9967      0.9840      0.9813\n",
      "  67epoch      0.9965      0.9842      0.9823\n",
      "  68epoch      0.9966      0.9848      0.9824\n",
      "  69epoch      0.9959      0.9832      0.9809\n",
      "  70epoch      0.9967      0.9844      0.9815\n",
      "  71epoch      0.9965      0.9852      0.9814\n",
      "  72epoch      0.9950      0.9818      0.9794\n",
      "  73epoch      0.9969      0.9844      0.9817\n",
      "  74epoch      0.9967      0.9836      0.9814\n",
      "  75epoch      0.9968      0.9834      0.9818\n",
      "  76epoch      0.9967      0.9830      0.9812\n",
      "  77epoch      0.9965      0.9834      0.9819\n",
      "  78epoch      0.9964      0.9826      0.9826\n",
      "  79epoch      0.9969      0.9834      0.9820\n",
      "  80epoch      0.9955      0.9824      0.9818\n",
      "  81epoch      0.9972      0.9836      0.9819\n",
      "  82epoch      0.9963      0.9826      0.9808\n",
      "  83epoch      0.9971      0.9832      0.9825\n",
      "  84epoch      0.9965      0.9832      0.9815\n",
      "  85epoch      0.9969      0.9840      0.9814\n",
      "  86epoch      0.9956      0.9824      0.9810\n",
      "  87epoch      0.9959      0.9836      0.9804\n",
      "  88epoch      0.9967      0.9840      0.9820\n",
      "  89epoch      0.9963      0.9832      0.9824\n",
      "  90epoch      0.9971      0.9824      0.9824\n",
      "  91epoch      0.9960      0.9830      0.9825\n",
      "  92epoch      0.9970      0.9838      0.9817\n",
      "  93epoch      0.9959      0.9830      0.9813\n",
      "  94epoch      0.9968      0.9832      0.9812\n",
      "  95epoch      0.9958      0.9834      0.9812\n",
      "  96epoch      0.9968      0.9838      0.9823\n",
      "  97epoch      0.9965      0.9824      0.9824\n",
      "  98epoch      0.9965      0.9832      0.9821\n",
      "  99epoch      0.9966      0.9810      0.9819\n"
     ]
    }
   ],
   "source": [
    "# 计算正确率\n",
    "correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n",
    "accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "print('    epoch       train       valid        test')\n",
    "for _ in range(55000):\n",
    "    batch_xs, batch_ys = mnist.train.next_batch(100)\n",
    "    sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})    \n",
    "    if _%550==0:\n",
    "        print('%4sepoch  %10.4f  %10.4f  %10.4f'%(_//550,\\\n",
    "        sess.run(accuracy, feed_dict={x: mnist.train.images,y_: mnist.train.labels}),\\\n",
    "        sess.run(accuracy, feed_dict={x: mnist.validation.images,y_: mnist.validation.labels}),\\\n",
    "        sess.run(accuracy, feed_dict={x: mnist.test.images,y_: mnist.test.labels})))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 总结："
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "1、加了隐层后，效果比较差，发现是初始化的原因，初始化全0会导致delta函数传递为0，初始化为1效果也很差，因为每个w的梯度一样，无法收敛到极致，最后选用了xavier的初始化函数。在神经元100个的情况下，双隐层比单隐层略微提升，大约提升0.003左右，由于分类任务较为简单，所以采用双隐层网络即可，毋须再增加层数，神经元先选200个，后期再dropout。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "2、激活函数选为relu的情况下，效果比较好，此时测试集正确率在0.981左右。但20个epoch时发生过拟合，过拟合后损失函数处于极值点，梯度不更新，要提升测试集正确率需要考虑正则。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "3、L2正则参数选择0.001，参数再增大会减小校验集分数。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "4、估计初始化和方差调整还可以提升分数，不过xavier够用就行，哈哈！"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
