{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "\"\"\"A very simple MNIST classifier.\n",
    "See extensive documentation at\n",
    "https://www.tensorflow.org/get_started/mnist/beginners\n",
    "\"\"\"\n",
    "from __future__ import absolute_import\n",
    "from __future__ import division\n",
    "from __future__ import print_function\n",
    "\n",
    "import argparse\n",
    "import sys\n",
    "\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "\n",
    "import tensorflow as tf\n",
    "\n",
    "FLAGS = None\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1.4.0\n"
     ]
    }
   ],
   "source": [
    "print(tf.__version__)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "我们在这里调用系统提供的Mnist数据函数为我们读入数据，如果没有下载的话则进行下载。\n",
    "\n",
    "<font color=#ff0000>**这里将data_dir改为适合你的运行环境的目录**</font>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting /home/yungao.gy/ai_hw/week7/mnist/train-images-idx3-ubyte.gz\n",
      "Extracting /home/yungao.gy/ai_hw/week7/mnist/train-labels-idx1-ubyte.gz\n",
      "Extracting /home/yungao.gy/ai_hw/week7/mnist/t10k-images-idx3-ubyte.gz\n",
      "Extracting /home/yungao.gy/ai_hw/week7/mnist/t10k-labels-idx1-ubyte.gz\n"
     ]
    }
   ],
   "source": [
    "# Import data\n",
    "# data_dir = '/tmp/tensorflow/mnist/input_data'\n",
    "data_dir = '/home/yungao.gy/ai_hw/week7/mnist'\n",
    "mnist = input_data.read_data_sets(data_dir, one_hot=True)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "一个非常非常简陋的模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Create the model\n",
    "regularizer = tf.contrib.layers.l2_regularizer(scale=7e-5)\n",
    "\n",
    "x = tf.placeholder(tf.float32, [None, 784])\n",
    "W_1 = tf.get_variable(name=\"W_1\", shape=[784, 1000], regularizer=regularizer)\n",
    "b_1 = tf.get_variable(name=\"b_1\", shape=[1000], regularizer=regularizer)\n",
    "\n",
    "#W_1 = tf.Variable(tf.random_normal([784, 100]))\n",
    "#b_1 = tf.Variable(tf.random_normal([100]))\n",
    "logit_1 = tf.matmul(x, W_1) + b_1\n",
    "y_1 = tf.nn.sigmoid(logit_1)\n",
    "\n",
    "\n",
    "W_2 = tf.get_variable(name=\"W_2\", shape=[1000, 10], regularizer=regularizer)\n",
    "b_2 = tf.get_variable(name=\"b_2\", shape=[10], regularizer=regularizer)\n",
    "\n",
    "#W_2 = tf.Variable(tf.random_normal([100, 10]))\n",
    "#b_2 = tf.Variable(tf.random_normal([10]))\n",
    "\n",
    "y = tf.matmul(y_1, W_2) + b_2"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "定义我们的ground truth 占位符"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# Define loss and optimizer\n",
    "y_ = tf.placeholder(tf.float32, [None, 10])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "接下来我们计算交叉熵，注意这里不要使用注释中的手动计算方式，而是使用系统函数。\n",
    "另一个注意点就是，softmax_cross_entropy_with_logits的logits参数是**未经激活的wx+b**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From <ipython-input-6-5441f180d723>:11: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "\n",
      "Future major versions of TensorFlow will allow gradients to flow\n",
      "into the labels input on backprop by default.\n",
      "\n",
      "See tf.nn.softmax_cross_entropy_with_logits_v2.\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# The raw formulation of cross-entropy,\n",
    "#\n",
    "#   tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)),\n",
    "#                                 reduction_indices=[1]))\n",
    "#\n",
    "# can be numerically unstable.\n",
    "#\n",
    "# So here we use tf.nn.softmax_cross_entropy_with_logits on the raw\n",
    "# outputs of 'y', and then average across the batch.\n",
    "cross_entropy = tf.reduce_mean(\n",
    "    tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "# weights = tf.get_variable(\n",
    "#         name=\"weights\",\n",
    "#         regularizer=regularizer,\n",
    "#     )\n",
    "reg_variables = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n",
    "reg_term = tf.contrib.layers.apply_regularization(regularizer, reg_variables)\n",
    "\n",
    "loss = cross_entropy + reg_term"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "生成一个训练step"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "train_step = tf.train.GradientDescentOptimizer(0.25).minimize(loss)\n",
    "\n",
    "correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n",
    "accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "\n",
    "sess = tf.Session()\n",
    "init_op = tf.global_variables_initializer()\n",
    "sess.run(init_op)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "在这里我们仍然调用系统提供的读取数据，为我们取得一个batch。\n",
    "然后我们运行3k个step(5 epochs)，对权重进行优化。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "train 0.1\n",
      "test 0.101\n",
      "train 0.88\n",
      "test 0.9119\n",
      "train 0.9\n",
      "test 0.9127\n",
      "train 0.92\n",
      "test 0.9285\n",
      "train 0.9\n",
      "test 0.9357\n",
      "train 0.94\n",
      "test 0.9406\n",
      "train 0.95\n",
      "test 0.9495\n",
      "train 0.95\n",
      "test 0.9509\n",
      "train 0.97\n",
      "test 0.9574\n",
      "train 0.97\n",
      "test 0.9604\n",
      "train 1.0\n",
      "test 0.964\n",
      "train 0.94\n",
      "test 0.9641\n",
      "train 0.97\n",
      "test 0.9656\n",
      "train 0.98\n",
      "test 0.968\n",
      "train 0.97\n",
      "test 0.9693\n",
      "train 1.0\n",
      "test 0.9702\n",
      "train 0.99\n",
      "test 0.9713\n",
      "train 0.99\n",
      "test 0.9719\n",
      "train 0.99\n",
      "test 0.973\n",
      "train 1.0\n",
      "test 0.973\n",
      "train 0.96\n",
      "test 0.9729\n",
      "train 1.0\n",
      "test 0.9739\n",
      "train 0.98\n",
      "test 0.9741\n",
      "train 0.99\n",
      "test 0.976\n",
      "train 0.98\n",
      "test 0.9755\n",
      "train 0.98\n",
      "test 0.9766\n",
      "train 0.97\n",
      "test 0.9771\n",
      "train 0.99\n",
      "test 0.9774\n",
      "train 0.99\n",
      "test 0.9765\n",
      "train 0.98\n",
      "test 0.9774\n",
      "train 1.0\n",
      "test 0.9785\n",
      "train 1.0\n",
      "test 0.9776\n",
      "train 0.99\n",
      "test 0.979\n",
      "train 0.99\n",
      "test 0.9783\n",
      "train 0.99\n",
      "test 0.9786\n",
      "train 1.0\n",
      "test 0.9786\n",
      "train 1.0\n",
      "test 0.9794\n",
      "train 0.99\n",
      "test 0.9799\n",
      "train 1.0\n",
      "test 0.9783\n",
      "train 1.0\n",
      "test 0.9801\n",
      "train 0.99\n",
      "test 0.9788\n",
      "train 1.0\n",
      "test 0.9799\n",
      "train 1.0\n",
      "test 0.9791\n",
      "train 0.98\n",
      "test 0.9798\n",
      "train 1.0\n",
      "test 0.9799\n",
      "train 1.0\n",
      "test 0.9805\n",
      "train 1.0\n",
      "test 0.9802\n",
      "train 0.99\n",
      "test 0.9799\n",
      "train 0.99\n",
      "test 0.9797\n",
      "train 0.99\n",
      "test 0.9797\n"
     ]
    }
   ],
   "source": [
    "# Train\n",
    "for i in range(50000):\n",
    "    batch_xs, batch_ys = mnist.train.next_batch(100)\n",
    "    sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})\n",
    "    \n",
    "    if i % 1000 == 0:\n",
    "        batch_xs, batch_ys = mnist.train.next_batch(100)\n",
    "        print(\"train\", sess.run(accuracy, feed_dict={x: batch_xs,\n",
    "                                      y_: batch_ys}))\n",
    "        print(\"test\", sess.run(accuracy, feed_dict={x: mnist.test.images,\n",
    "                                      y_: mnist.test.labels}))\n",
    "        \n",
    "  "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "验证我们模型在测试数据上的准确率"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.9805\n"
     ]
    }
   ],
   "source": [
    "  # Test trained model\n",
    "correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n",
    "accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "print(sess.run(accuracy, feed_dict={x: mnist.test.images,\n",
    "                                      y_: mnist.test.labels}))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "毫无疑问，这个模型是一个非常简陋，性能也不理想的模型。目前只能达到92%左右的准确率。\n",
    "接下来，希望大家利用现有的知识，将这个模型优化至98%以上的准确率。\n",
    "Hint：\n",
    "- 多隐层\n",
    "- 激活函数\n",
    "- 正则化\n",
    "- 初始化\n",
    "- 摸索一下各个超参数\n",
    "  - 隐层神经元数量\n",
    "  - 学习率\n",
    "  - 正则化惩罚因子\n",
    "  - 最好每隔几个step就对loss、accuracy等等进行一次输出，这样才能有根据地进行调整"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": true
   },
   "source": [
    "选择进行的优化包括\n",
    "1. 增加了一个1000个节点的隐层，提高网络的表达能力\n",
    "2. 增加了L2正则项，因为发现增加隐层后，对训练数据可以达到1.0，但是测试据集上性能无法再提升\n",
    "3. 增加了训练的轮数\n",
    "\n",
    "经过上述调整，性能接近0.98左右"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 2",
   "language": "python",
   "name": "python2"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
