{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "将这个模型优化至98%以上的准确率。 Hint：\n",
    "多隐层\n",
    "激活函数\n",
    "正则化\n",
    "初始化\n",
    "摸索一下各个超参数\n",
    "隐层神经元数量\n",
    "学习率\n",
    "正则化惩罚因子\n",
    "最好每隔几个step就对loss、accuracy等等进行一次输出，这样才能有根据地进行调整"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [],
   "source": [
    "\"\"\"A very simple MNIST classifier.\n",
    "See extensive documentation at\n",
    "https://www.tensorflow.org/get_started/mnist/beginners    #需要翻墙去看\n",
    "\"\"\"\n",
    "from __future__ import absolute_import\n",
    "from __future__ import division\n",
    "from __future__ import print_function\n",
    "import numpy as np\n",
    "import argparse\n",
    "import sys\n",
    "\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "\n",
    "import tensorflow as tf\n",
    "\n",
    "FLAGS = None\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "我们在这里调用系统提供的Mnist数据函数为我们读入数据，如果没有下载的话则进行下载。\n",
    "\n",
    "<font color=#ff0000>**这里将data_dir改为适合你的运行环境的目录**</font>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting /input_data\\train-images-idx3-ubyte.gz\n",
      "Extracting /input_data\\train-labels-idx1-ubyte.gz\n",
      "Extracting /input_data\\t10k-images-idx3-ubyte.gz\n",
      "Extracting /input_data\\t10k-labels-idx1-ubyte.gz\n"
     ]
    }
   ],
   "source": [
    "# Import data\n",
    "data_dir = '/input_data'\n",
    "mnist = input_data.read_data_sets(data_dir, one_hot=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Create the model\n",
    "def init_weight(shape):\n",
    "    return tf.Variable(tf.random_normal(shape,stddev=1))\n",
    "\n",
    "'''def model(X,w_h1,w_o,b_h1):\n",
    "    h1=tf.nn.swish(tf.add(tf.matmul(X,w_h1),b_h1))\n",
    "    #h2=tf.nn.tanh(tf.add(tf.matmul(h1,w_h2),b_h2))\n",
    "    return tf.matmul(h1,w_o)'''\n",
    "def model(X,w_h1,w_h2,w_o,b_h1,b_h2):\n",
    "    h1=tf.nn.sigmoid(tf.add(tf.matmul(X,w_h1),b_h1))\n",
    "    h2=tf.nn.sigmoid(tf.add(tf.matmul(h1,w_h2),b_h2))\n",
    "    #h=tf.add(tf.matmul(X,w_h),b_h)\n",
    "    return tf.matmul(h2,w_o)\n",
    "\n",
    "#为方便调参，把超参数都放在这里。\n",
    "inputdimension=784\n",
    "outputdimension=10\n",
    "layer_1_cellnumber=256\n",
    "layer_2_cellnumber=128\n",
    "\n",
    "learning_rate=0.5\n",
    "\n",
    "batch_size=128\n",
    "steps=2000\n",
    "\n",
    "REGULARIZATION_RATE = 0.00004  # 正则化项在损失函数中的系数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "metadata": {},
   "outputs": [],
   "source": [
    "X = tf.placeholder(tf.float32, [None, inputdimension])  #hold a place，the 1st dimension is indeterminate,upto the input data\n",
    "y_ = tf.placeholder(tf.float32, [None, outputdimension])#hold a place for ground truth of y，the 1st dimension is determined by the input data\n",
    "w_h1= init_weight([inputdimension,layer_1_cellnumber])#weights of hidden layer1 \n",
    "w_h2= init_weight([layer_1_cellnumber,layer_2_cellnumber])#weights of hidden layer2 \n",
    "w_o= init_weight([layer_2_cellnumber,outputdimension])#weight of output layer\n",
    "#w_o= init_weight([layer_1_cellnumber,outputdimension])#weight of output layer\n",
    "\n",
    "b_h1= tf.Variable(tf.random_uniform([layer_1_cellnumber], minval=-10,maxval=10,dtype=tf.float32))\n",
    "b_h2= tf.Variable(tf.random_uniform([layer_2_cellnumber], minval=-10,maxval=10,dtype=tf.float32))\n",
    "b_o= tf.Variable(tf.random_uniform([outputdimension], minval=-10,maxval=10,dtype=tf.float32))\n",
    "py_x = tf.add(model(X,w_h1,w_h2,w_o,b_h1,b_h2),b_o)\n",
    "#py_x = tf.add(model(X,w_h1,w_o,b_h1),b_o)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 设置正则化方法\n",
    "regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)  # 定义L2正则化损失函数\n",
    "regularization = regularizer(w_h1) + regularizer(w_o)  # 计算模型的正则化损失"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "接下来我们计算交叉熵，注意这里不要使用注释中的手动计算方式，而是使用系统函数。\n",
    "另一个注意点就是，softmax_cross_entropy_with_logits的logits参数是**未经激活的wx+b**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "metadata": {},
   "outputs": [],
   "source": [
    "# The raw formulation of cross-entropy,\n",
    "#\n",
    "#   tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)),\n",
    "#                                 reduction_indices=[1]))\n",
    "#\n",
    "# can be numerically unstable.\n",
    "#\n",
    "# So here we use tf.nn.softmax_cross_entropy_with_logits on the raw\n",
    "# outputs of 'y', and then average across the batch.\n",
    "\n",
    "#cross_entropy = tf.reduce_mean(\n",
    "#    tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))\n",
    "cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=py_x))\n",
    "loss = cross_entropy + regularization"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0 0.796875 0.835\n",
      "10 0.96875 0.9234\n",
      "20 0.984375 0.9385\n",
      "30 0.9921875 0.9438\n",
      "40 0.9921875 0.9485\n",
      "50 1.0 0.9516\n",
      "60 1.0 0.9535\n",
      "70 1.0 0.9544\n",
      "80 1.0 0.9562\n",
      "90 1.0 0.9583\n",
      "100 1.0 0.9589\n",
      "110 1.0 0.9613\n",
      "120 1.0 0.9617\n",
      "130 1.0 0.9635\n",
      "140 1.0 0.9647\n",
      "150 1.0 0.9655\n",
      "160 1.0 0.968\n",
      "170 1.0 0.9689\n",
      "180 1.0 0.969\n",
      "190 1.0 0.9704\n",
      "200 1.0 0.972\n",
      "210 1.0 0.9716\n",
      "220 1.0 0.9726\n",
      "230 1.0 0.9733\n",
      "240 1.0 0.975\n",
      "250 1.0 0.9751\n",
      "260 1.0 0.9759\n",
      "270 1.0 0.9763\n",
      "280 1.0 0.9773\n",
      "290 1.0 0.9771\n",
      "300 1.0 0.9772\n",
      "310 1.0 0.9776\n",
      "320 1.0 0.9782\n",
      "330 1.0 0.9777\n",
      "340 1.0 0.979\n",
      "350 1.0 0.9781\n",
      "360 1.0 0.9778\n",
      "370 1.0 0.979\n",
      "380 0.9921875 0.9551\n",
      "390 1.0 0.9782\n",
      "400 1.0 0.9783\n",
      "410 1.0 0.9784\n",
      "420 1.0 0.9784\n",
      "430 1.0 0.9791\n",
      "440 1.0 0.9794\n",
      "450 1.0 0.9748\n",
      "460 1.0 0.9794\n",
      "461 1.0 0.9803\n"
     ]
    }
   ],
   "source": [
    "# Train\n",
    "# train my model\n",
    "#生成一个训练step\n",
    "#train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)\n",
    "train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)\n",
    "total_batch = mnist.train.num_examples//batch_size  # 计算batch数量取整\n",
    "\n",
    "sess=tf.Session()\n",
    "init_op = tf.global_variables_initializer()\n",
    "sess.run(init_op) \n",
    "for e in range(steps):\n",
    "    for u in range(total_batch):\n",
    "        batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n",
    "        sess.run(train_step, feed_dict={X: batch_xs, y_: batch_ys})  # 不断的进行优化\n",
    "#验证模型在测试数据上的准确率         \n",
    "    correct_prediction = tf.equal(tf.argmax(py_x, 1), tf.argmax(y_, 1))#comapare the index of y and y_，reture True if equal, False for else\n",
    "    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "    test_accuracy=sess.run(accuracy, feed_dict={X: mnist.test.images,y_: mnist.test.labels})\n",
    "    if test_accuracy>=0.98:\n",
    "        print(e,sess.run(accuracy, feed_dict={X: batch_xs, y_: batch_ys}),test_accuracy)\n",
    "        break\n",
    "    if e % 10== 0:\n",
    "        print(e,sess.run(accuracy, feed_dict={X: batch_xs, y_: batch_ys}),test_accuracy)\n",
    "           "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    " 一个隐含层(128个神经元，sigmoid激活函数)，学习率0.5，准确率到95.6%。\n",
    "两个隐含层（128，128个神经元，sigmoid激活函数)，学习率0.5，准确率没有提高（95.32%）。用relu激活函数，准确率更低。\n",
    "用单隐含层，增加神经元个数（300），准确率不会提高，迭代数次后，训练集准确率保持100%，而测试准确率较低，疑是过拟合。\n",
    "仍保持单隐含层(128个神经元，sigmoid激活函数)增加L2正则，准确率提高，正则化项在损失函数中的系数取0.0001，仍然过拟合。改为0.01后欠拟合，再改为0.001后转好，加大神经元数量到256，800步时准确率达到98.05%，之后收敛于97.8%。\n",
    "多次尝试后，换回两个隐含层，调参数成功。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
