{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 1. 导入工具包"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "E:\\ProgramData\\Anaconda3\\lib\\site-packages\\h5py\\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n",
      "  from ._conv import register_converters as _register_converters\n"
     ]
    }
   ],
   "source": [
    "# Mnist dataset\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "\n",
    "import tensorflow as tf\n",
    "import numpy as np"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 2. 准备数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting ./data/train-images-idx3-ubyte.gz\n",
      "Extracting ./data/train-labels-idx1-ubyte.gz\n",
      "Extracting ./data/t10k-images-idx3-ubyte.gz\n",
      "Extracting ./data/t10k-labels-idx1-ubyte.gz\n"
     ]
    }
   ],
   "source": [
    "# Import data\n",
    "data_dir = './data/'\n",
    "mnist = input_data.read_data_sets(data_dir, one_hot=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Datasets(train=<tensorflow.contrib.learn.python.learn.datasets.mnist.DataSet object at 0x000000A206CCB5C0>, validation=<tensorflow.contrib.learn.python.learn.datasets.mnist.DataSet object at 0x000000A209F25BE0>, test=<tensorflow.contrib.learn.python.learn.datasets.mnist.DataSet object at 0x000000A209F25C18>)"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "mnist"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 3. 准备模型"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 3.1. 建立一个单隐层模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "# number of nodes in hidden layer 1\n",
    "##n_hidden_1_unit = 1000\n",
    "\n",
    "# add a hidden layer\n",
    "##x = tf.placeholder(tf.float32, [None, 784])\n",
    "##W_h = tf.Variable(tf.random_normal([784, n_hidden_1_unit]))\n",
    "##b_h = tf.Variable(tf.zeros([n_hidden_1_unit]))\n",
    "##logit_h = tf.matmul(x, W_h) + b_h"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# add the output layer\n",
    "##W = tf.Variable(tf.zeros([n_hidden_1_unit, 10]))\n",
    "##b = tf.Variable(tf.zeros([10]))\n",
    "##y = tf.matmul(tf.nn.tanh(logit_h), W) + b"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "# ground truth\n",
    "##y_ = tf.placeholder(tf.float32, [None, 10])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "# cross entropy loss\n",
    "##cross_entropy = tf.reduce_mean(\n",
    "##    tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=y))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "##help(tf.contrib.layers.l2_regularizer)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "# train step\n",
    "##train_step = tf.train.GradientDescentOptimizer(0.3).minimize(cross_entropy)\n",
    "\n",
    "# training steps\n",
    "##steps = 5000"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "# initialize variables\n",
    "##sess = tf.Session()\n",
    "##init_op = tf.global_variables_initializer()\n",
    "##sess.run(init_op)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 3.2. 建立一个多隐层模型"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "调整超参数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# number of nodes in layers\n",
    "n_input = 784\n",
    "n_hidden_1_unit = 100\n",
    "n_hidden_2_unit = 50\n",
    "n_output = 10\n",
    "\n",
    "# training batch size\n",
    "batch_size = 100\n",
    "\n",
    "# training steps\n",
    "data_size = 60000\n",
    "epoch = 50\n",
    "steps_per_epoch = data_size / batch_size\n",
    "steps = int(epoch * steps_per_epoch)\n",
    "\n",
    "# activations\n",
    "activations = {0:None, 1:tf.nn.sigmoid, 2:tf.nn.relu, 3:tf.nn.tanh, 4:tf.nn.elu, 5:tf.nn.selu , 6:tf.nn.swish}\n",
    "\n",
    "# activation for hidden layers\n",
    "h_active = 2\n",
    "\n",
    "# weights for all layers\n",
    "Ws = []\n",
    "\n",
    "# learning rate\n",
    "learning_rate = 0.2\n",
    "\n",
    "# regularization param\n",
    "scale = 0.001\n",
    "\n",
    "# learning rate decay\n",
    "learning_rate_decay = 0.95\n",
    "\n",
    "# weight initializer\n",
    "#xavier = tf.contrib.layers.xavier_initializer()\n",
    "def msra_initializer(input_size, output_size):\n",
    "    std = np.sqrt(2/input_size)\n",
    "    W = tf.Variable(tf.random_normal([input_size, output_size], stddev = std))\n",
    "    return W"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "建立模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "# adding a single layer\n",
    "def addLayer(x, input_size, output_size, active = 0):\n",
    "    #W = tf.Variable(tf.random_normal([input_size, output_size]))\n",
    "    #W = xavier([input_size, output_size])\n",
    "    W = msra_initializer(input_size, output_size)\n",
    "    b = tf.Variable(tf.zeros(output_size))\n",
    "    y = tf.matmul(x, W) + b\n",
    "    Ws.append(W) # 加入权重列表\n",
    "    if active == 0:\n",
    "        return y\n",
    "    else:\n",
    "        activation = activations[active] # 获取对应激活函数\n",
    "        return activation(y)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "# input placeholder\n",
    "x = tf.placeholder(tf.float32, [None, n_input])\n",
    "\n",
    "# add hidden layers\n",
    "hidden_1 = addLayer(x, n_input, n_hidden_1_unit, h_active)\n",
    "hidden_2 = addLayer(hidden_1, n_hidden_1_unit, n_hidden_2_unit, h_active)\n",
    "\n",
    "# add output layer\n",
    "y = addLayer(hidden_2, n_hidden_2_unit, n_output)\n",
    "\n",
    "# ground truth\n",
    "y_ = tf.placeholder(tf.float32, [None, 10])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "# regularization\n",
    "regularizer = tf.contrib.layers.l2_regularizer(scale)\n",
    "reg = tf.contrib.layers.apply_regularization(regularizer, Ws)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "# cross entropy loss\n",
    "cross_entropy = tf.reduce_mean(\n",
    "    tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=y)) + reg"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "# train step\n",
    "train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "# initialize variables\n",
    "sess = tf.Session()\n",
    "init_op = tf.global_variables_initializer()\n",
    "sess.run(init_op)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 4. 训练模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "# accuracy on test set\n",
    "correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n",
    "accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "\n",
    "# print accuracy and cross entropy\n",
    "def printTestAccuracy():\n",
    "    print(\"Test Accuracy: \", sess.run(accuracy, feed_dict={x: mnist.test.images,\n",
    "                                          y_: mnist.test.labels}))\n",
    "    \n",
    "def printTrainAccuracy():\n",
    "    print(\"Train Accuracy: \", sess.run(accuracy, feed_dict={x: mnist.train.images,\n",
    "                                          y_: mnist.train.labels}))\n",
    "        \n",
    "def printLoss():\n",
    "    print(\"Cross entropy: \", sess.run(cross_entropy, feed_dict={x: mnist.train.images,\n",
    "                                          y_: mnist.train.labels}))\n",
    "    \n",
    "def printInfo(step):\n",
    "    print(\"Step %d:  Test Accuracy:%s  Train Accuracy:%s  Cross entropy with reg:%s\" % (step,\n",
    "                                                       sess.run(accuracy, feed_dict={x: mnist.test.images,y_: mnist.test.labels}),\n",
    "                                                       sess.run(accuracy, feed_dict={x: mnist.train.images,y_: mnist.train.labels}),\n",
    "                                                       sess.run(cross_entropy, feed_dict={x: mnist.train.images,y_: mnist.train.labels})))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 600:  Test Accuracy:0.9509  Train Accuracy:0.9520182  Cross entropy with reg:0.3220994\n",
      "learning rate shrinked to: 0.19\n",
      "Step 1200:  Test Accuracy:0.9636  Train Accuracy:0.96876365  Cross entropy with reg:0.25793627\n",
      "learning rate shrinked to: 0.1805\n",
      "Step 1800:  Test Accuracy:0.9676  Train Accuracy:0.9718364  Cross entropy with reg:0.23197673\n",
      "learning rate shrinked to: 0.171475\n",
      "Step 2400:  Test Accuracy:0.9699  Train Accuracy:0.9780909  Cross entropy with reg:0.20490646\n",
      "learning rate shrinked to: 0.16290124999999997\n",
      "Step 3000:  Test Accuracy:0.9736  Train Accuracy:0.9827091  Cross entropy with reg:0.18202627\n",
      "learning rate shrinked to: 0.15475618749999998\n",
      "Step 3600:  Test Accuracy:0.9732  Train Accuracy:0.9832  Cross entropy with reg:0.17351824\n",
      "learning rate shrinked to: 0.14701837812499996\n",
      "Step 4200:  Test Accuracy:0.9724  Train Accuracy:0.98385453  Cross entropy with reg:0.16592759\n",
      "learning rate shrinked to: 0.13966745921874996\n",
      "Step 4800:  Test Accuracy:0.977  Train Accuracy:0.9852909  Cross entropy with reg:0.15672511\n",
      "learning rate shrinked to: 0.13268408625781244\n",
      "Step 5400:  Test Accuracy:0.9748  Train Accuracy:0.98605454  Cross entropy with reg:0.15095156\n",
      "learning rate shrinked to: 0.1260498819449218\n",
      "Step 6000:  Test Accuracy:0.9694  Train Accuracy:0.98218185  Cross entropy with reg:0.16111705\n",
      "learning rate shrinked to: 0.11974738784767572\n",
      "Step 6600:  Test Accuracy:0.977  Train Accuracy:0.98925453  Cross entropy with reg:0.13963044\n",
      "learning rate shrinked to: 0.11376001845529193\n",
      "Step 7200:  Test Accuracy:0.9766  Train Accuracy:0.9892909  Cross entropy with reg:0.1359013\n",
      "learning rate shrinked to: 0.10807201753252733\n",
      "Step 7800:  Test Accuracy:0.9757  Train Accuracy:0.9908  Cross entropy with reg:0.13147089\n",
      "learning rate shrinked to: 0.10266841665590096\n",
      "Step 8400:  Test Accuracy:0.9791  Train Accuracy:0.9906909  Cross entropy with reg:0.12974992\n",
      "learning rate shrinked to: 0.0975349958231059\n",
      "Step 9000:  Test Accuracy:0.9751  Train Accuracy:0.9894364  Cross entropy with reg:0.13158467\n",
      "learning rate shrinked to: 0.09265824603195061\n",
      "Step 9600:  Test Accuracy:0.9754  Train Accuracy:0.9903273  Cross entropy with reg:0.12935388\n",
      "learning rate shrinked to: 0.08802533373035308\n",
      "Step 10200:  Test Accuracy:0.9775  Train Accuracy:0.99216366  Cross entropy with reg:0.124071546\n",
      "learning rate shrinked to: 0.08362406704383542\n",
      "Step 10800:  Test Accuracy:0.9766  Train Accuracy:0.99134547  Cross entropy with reg:0.12544845\n",
      "learning rate shrinked to: 0.07944286369164365\n",
      "Step 11400:  Test Accuracy:0.9779  Train Accuracy:0.9921455  Cross entropy with reg:0.12248726\n",
      "learning rate shrinked to: 0.07547072050706147\n",
      "Step 12000:  Test Accuracy:0.9781  Train Accuracy:0.9921273  Cross entropy with reg:0.12286201\n",
      "learning rate shrinked to: 0.07169718448170839\n",
      "Step 12600:  Test Accuracy:0.9792  Train Accuracy:0.993  Cross entropy with reg:0.12177831\n",
      "learning rate shrinked to: 0.06811232525762297\n",
      "Step 13200:  Test Accuracy:0.9802  Train Accuracy:0.9923273  Cross entropy with reg:0.12291245\n",
      "learning rate shrinked to: 0.06470670899474182\n",
      "Step 13800:  Test Accuracy:0.9765  Train Accuracy:0.9906909  Cross entropy with reg:0.12594372\n",
      "learning rate shrinked to: 0.061471373545004725\n",
      "Step 14400:  Test Accuracy:0.9794  Train Accuracy:0.9929636  Cross entropy with reg:0.12020548\n",
      "learning rate shrinked to: 0.058397804867754484\n",
      "Step 15000:  Test Accuracy:0.9775  Train Accuracy:0.99072725  Cross entropy with reg:0.124923915\n",
      "learning rate shrinked to: 0.055477914624366756\n",
      "Step 15600:  Test Accuracy:0.9805  Train Accuracy:0.99432725  Cross entropy with reg:0.11656902\n",
      "learning rate shrinked to: 0.052704018893148415\n",
      "Step 16200:  Test Accuracy:0.9803  Train Accuracy:0.9936182  Cross entropy with reg:0.117759876\n",
      "learning rate shrinked to: 0.05006881794849099\n",
      "Step 16800:  Test Accuracy:0.9785  Train Accuracy:0.9918  Cross entropy with reg:0.122303784\n",
      "learning rate shrinked to: 0.047565377051066435\n",
      "Step 17400:  Test Accuracy:0.9785  Train Accuracy:0.99283636  Cross entropy with reg:0.11949338\n",
      "learning rate shrinked to: 0.04518710819851311\n",
      "Step 18000:  Test Accuracy:0.9782  Train Accuracy:0.99307275  Cross entropy with reg:0.1186699\n",
      "learning rate shrinked to: 0.04292775278858745\n",
      "Step 18600:  Test Accuracy:0.9804  Train Accuracy:0.9940364  Cross entropy with reg:0.11745349\n",
      "learning rate shrinked to: 0.040781365149158075\n",
      "Step 19200:  Test Accuracy:0.9773  Train Accuracy:0.9920545  Cross entropy with reg:0.12063432\n",
      "learning rate shrinked to: 0.03874229689170017\n",
      "Step 19800:  Test Accuracy:0.9801  Train Accuracy:0.9934  Cross entropy with reg:0.11690931\n",
      "learning rate shrinked to: 0.03680518204711516\n",
      "Step 20400:  Test Accuracy:0.9765  Train Accuracy:0.9896727  Cross entropy with reg:0.12552583\n",
      "learning rate shrinked to: 0.0349649229447594\n",
      "Step 21000:  Test Accuracy:0.9804  Train Accuracy:0.9944909  Cross entropy with reg:0.11577207\n",
      "learning rate shrinked to: 0.033216676797521424\n",
      "Step 21600:  Test Accuracy:0.9779  Train Accuracy:0.9930909  Cross entropy with reg:0.118719354\n",
      "learning rate shrinked to: 0.03155584295764535\n",
      "Step 22200:  Test Accuracy:0.9797  Train Accuracy:0.9937818  Cross entropy with reg:0.11619608\n",
      "learning rate shrinked to: 0.029978050809763082\n",
      "Step 22800:  Test Accuracy:0.9769  Train Accuracy:0.99087274  Cross entropy with reg:0.12229833\n",
      "learning rate shrinked to: 0.028479148269274928\n",
      "Step 23400:  Test Accuracy:0.981  Train Accuracy:0.99454546  Cross entropy with reg:0.11471029\n",
      "learning rate shrinked to: 0.02705519085581118\n",
      "Step 24000:  Test Accuracy:0.9814  Train Accuracy:0.9950182  Cross entropy with reg:0.113452196\n",
      "learning rate shrinked to: 0.025702431313020618\n",
      "Step 24600:  Test Accuracy:0.9784  Train Accuracy:0.9936182  Cross entropy with reg:0.11828922\n",
      "learning rate shrinked to: 0.024417309747369585\n",
      "Step 25200:  Test Accuracy:0.9778  Train Accuracy:0.9925454  Cross entropy with reg:0.12120494\n",
      "learning rate shrinked to: 0.023196444260001104\n",
      "Step 25800:  Test Accuracy:0.9791  Train Accuracy:0.99407274  Cross entropy with reg:0.11667159\n",
      "learning rate shrinked to: 0.022036622047001048\n",
      "Step 26400:  Test Accuracy:0.9783  Train Accuracy:0.99349093  Cross entropy with reg:0.1177624\n",
      "learning rate shrinked to: 0.020934790944650995\n",
      "Step 27000:  Test Accuracy:0.9802  Train Accuracy:0.9946909  Cross entropy with reg:0.11436492\n",
      "learning rate shrinked to: 0.019888051397418442\n",
      "Step 27600:  Test Accuracy:0.981  Train Accuracy:0.9946182  Cross entropy with reg:0.11291984\n",
      "learning rate shrinked to: 0.01889364882754752\n",
      "Step 28200:  Test Accuracy:0.9798  Train Accuracy:0.99283636  Cross entropy with reg:0.11779757\n",
      "learning rate shrinked to: 0.01794896638617014\n",
      "Step 28800:  Test Accuracy:0.9799  Train Accuracy:0.9945091  Cross entropy with reg:0.11434165\n",
      "learning rate shrinked to: 0.017051518066861632\n",
      "Step 29400:  Test Accuracy:0.9785  Train Accuracy:0.99347275  Cross entropy with reg:0.116952434\n",
      "learning rate shrinked to: 0.01619894216351855\n",
      "Finally: \n",
      "Cross entropy:  0.11465212\n",
      "Train Accuracy:  0.99463636\n",
      "Test Accuracy:  0.9803\n"
     ]
    }
   ],
   "source": [
    "# training\n",
    "for step in range(steps):\n",
    "    batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n",
    "    sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})\n",
    "    #if step % (steps / 10) == 0: # 输出10次\n",
    "    #    printInfo(step)\n",
    "    #    print(\"================\")\n",
    "        \n",
    "    if step > 0 and step % steps_per_epoch == 0: #　每个epoch输出\n",
    "        learning_rate = learning_rate * learning_rate_decay\n",
    "        printInfo(step)\n",
    "        print(\"learning rate shrinked to: %s\" % learning_rate)\n",
    "print(\"Finally: \")   \n",
    "printLoss()\n",
    "printTrainAccuracy()\n",
    "printTestAccuracy()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "基础模型 92%\n",
    "\n",
    "\n",
    "添加单个隐层（tanh，800units）11%\n",
    "\n",
    "隐层权重初始化由zeros改为使用random_normal 94%\n",
    "\n",
    "隐层改为使用400units 92%\n",
    "\n",
    "隐层改为使用600units 92%\n",
    "\n",
    "隐层改为使用700units 93%\n",
    "\n",
    "隐层改为使用1000units 93% -- 800unit时结果比较好\n",
    "\n",
    "激活函数改为sigmoid（800units）93%\n",
    "\n",
    "\n",
    "添加第二个隐层（sigmoid, 800units）92%\n",
    "\n",
    "调整训练steps为12000 93% -- 训练集准确率到100%了，过拟合\n",
    "\n",
    "加入L2正则(0.01) 79%\n",
    "\n",
    "正则参数改为0.001 93% -- 测试集准确率高于训练集92%\n",
    "\n",
    "尝试减少第二个隐层的神经元为400units 94%\n",
    "\n",
    "第二隐层改为使用200units 92%\n",
    "\n",
    "第一隐层改为使用400units(二层400units) 94%\n",
    "\n",
    "第一隐层改为使用200units 94%\n",
    "\n",
    "第二隐层改为使用200units 94%\n",
    "\n",
    "\n",
    "激活函数改为tanh 97%\n",
    "\n",
    "第二隐层改为使用100units 97%\n",
    "\n",
    "第二隐层改为使用50units 97%\n",
    "\n",
    "激活函数改为relu 11% -- 增加epoch也没有下降。没有用0初始化权重。。增加隐层神经元后交叉熵变成nan了。。。\n",
    "\n",
    "学习率由 0.3 改为 0.01  -- 正常了，准确率变为92%，可能是因为学习率高，交叉熵结果才出现问题\n",
    "\n",
    "激活函数改为elu 92% -- tanh比较接近98%\n",
    "\n",
    "\n",
    "学习率由 0.3 改为 0.1 97%\n",
    "\n",
    "加入学习率衰减(每个epoch衰减为0.3) 97%\n",
    "\n",
    "增加为40个epoch  97%\n",
    "\n",
    "学习率衰减改为0.5  97%\n",
    "\n",
    "减小正则参数至0.0001 93% -- 训练集98%，过拟合，恢复0.001\n",
    "\n",
    "\n",
    "隐层权重初始化由random_normal改为使用msra  11%\n",
    "\n",
    "学习率改为0.03 94%\n",
    "\n",
    "学习率改为0.1 97%\n",
    "\n",
    "\n",
    "= = = = = = = = = = = = = = = = = = = = = = = =\n",
    "\n",
    "尝试了各种调整的组合，最后终于超过了98%。。。。最高是98.3%\n",
    "\n",
    "参数如下：\n",
    "#### number of nodes in layers\n",
    "n_input = 784\n",
    "n_hidden_1_unit = 100\n",
    "n_hidden_2_unit = 50\n",
    "n_output = 10\n",
    "\n",
    "#### training batch size\n",
    "batch_size = 100\n",
    "\n",
    "#### training steps\n",
    "epoch = 50\n",
    "\n",
    "#### activation for hidden layers - tf.nn.relu\n",
    "h_active = 2\n",
    "\n",
    "#### learning rate\n",
    "learning_rate = 0.2\n",
    "\n",
    "#### regularization param - L2\n",
    "scale = 0.001\n",
    "\n",
    "#### learning rate decay\n",
    "learning_rate_decay = 0.95\n",
    "\n",
    "#### weight initializer - MSRA\n",
    "#xavier = tf.contrib.layers.xavier_initializer()\n",
    "def msra_initializer(input_size, output_size):\n",
    "    std = np.sqrt(2/input_size)\n",
    "    W = tf.Variable(tf.random_normal([input_size, output_size], stddev = std))\n",
    "    return W"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
