{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# - 多隐层\n",
    "\t\n",
    "\t增加了2个隐层\n",
    "\t```\n",
    "\t    for index in range(len(HIDDENS)):\n",
    "        hide_size = HIDDENS[index]\n",
    "\n",
    "        hidden_w1 = tf.Variable(tf.random_normal([_input_size, hide_size], stddev=0.1))\n",
    "        hidden_b1 = tf.Variable(tf.zeros([hide_size]))\n",
    "\n",
    "        # 激活，最后一个到输出层的不要激活，走自动激活\n",
    "        hidden_1 = tf.matmul(_input_data, hidden_w1) + hidden_b1\n",
    "\t```\n",
    "\t\n",
    "# - 激活函数\n",
    "\t\n",
    "\t隐层之间的输出做了激活，使用 \n",
    "\t```         \n",
    "\tif len(HIDDENS) != (index + 1):\n",
    "            # layer_1 = tf.nn.relu(tf.add(tf.matmul(_X, _weights['w1']), _biases['b1']))  # 隐层\n",
    "            hidden_1 = tf.nn.relu(hidden_1)\n",
    "    ``` \n",
    "    发现 relu 的效果 比 sigmoid 要好\n",
    "\n",
    "# - 正则化\n",
    "\n",
    "\t```\n",
    "\t # TODO reg , 正则参数，防止过拟合  scale\n",
    "    for w in hidden_ws:\n",
    "        tf.add_to_collection(tf.GraphKeys.WEIGHTS, w)\n",
    "    tf.add_to_collection(tf.GraphKeys.WEIGHTS, W)\n",
    "    regularizer = tf.contrib.layers.l2_regularizer(scale=scale)\n",
    "    reg_term = tf.contrib.layers.apply_regularization(regularizer)\n",
    "\t```\n",
    "\t参考网上的做法，采用 l2 正则，防止过拟合\n",
    "\n",
    "# - 初始化\n",
    "\n",
    "\t```\n",
    "\ttf.Variable(tf.random_normal([_input_size, hide_size], stddev=0.1))\n",
    "\t```\n",
    "\n",
    "# - 摸索一下各个超参数\n",
    "\n",
    "\t\n",
    "\t具体见代码\n",
    "\t```\n",
    "\tdef getBestAccuracy(HIDDENS=[], learning_rate=0.3, scale=0.1, INPUT_SIZE=784, OUTPUT_SIZE=10, mnist=None):\n",
    "\t```\n",
    "\n",
    "#   - 隐层神经元数量\n",
    "#   - 学习率\n",
    "#   - 正则化惩罚因子\n",
    "#   - 最好每隔几个step就对loss、accuracy等等进行一次输出，这样才能有根据地进行调整"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/Users/wuzhong/anaconda3/envs/py27/lib/python2.7/site-packages/h5py/__init__.py:34: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n",
      "  from ._conv import register_converters as _register_converters\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting /tmp/tensorflow/mnist/input_data/train-images-idx3-ubyte.gz\n",
      "Extracting /tmp/tensorflow/mnist/input_data/train-labels-idx1-ubyte.gz\n",
      "Extracting /tmp/tensorflow/mnist/input_data/t10k-images-idx3-ubyte.gz\n",
      "Extracting /tmp/tensorflow/mnist/input_data/t10k-labels-idx1-ubyte.gz\n",
      "[150, 30] 0.1 0.0002 0.9801\n",
      "best lr accuracy 0.1 0.9801\n",
      "[150, 30] 0.30000000000000004 0.0002 0.9801\n",
      "[150, 30] 0.5 0.0002 0.9693\n",
      "[150, 30] 0.7000000000000001 0.0002 0.098\n",
      "[150, 30] 0.9 0.0002 0.1135\n",
      "[150, 30] 0.1 0.001 0.9803\n",
      "best scale accuracy 0.001 0.9803\n",
      "[150, 30] 0.1 0.000755 0.9806\n",
      "best scale accuracy 0.000755 0.9806\n",
      "[150, 30] 0.1 0.00051 0.9807\n",
      "best scale accuracy 0.00051 0.9807\n",
      "[150, 30] 0.1 0.00026500000000000004 0.98\n",
      "[150, 30] 0.1 2e-05 0.9775\n",
      "[150, 50] 0.1 0.00051 0.98\n",
      "[150, 30] 0.1 0.00051 0.98\n",
      "[150, 15] 0.1 0.00051 0.9804\n",
      "[300, 50] 0.1 0.00051 0.98\n",
      "[150, 50] 0.1 0.00051 0.9802\n",
      "[100, 50] 0.1 0.00051 0.9808\n",
      "best hidden accuracy [100, 50] 0.9808\n",
      "best parmas are :  (0.1, 0.9801) (0.00051, 0.9807) ([100, 50], 0.9808)\n"
     ]
    }
   ],
   "source": [
    "# coding: utf-8\n",
    "\n",
    "# 毫无疑问，这个模型是一个非常简陋，性能也不理想的模型。目前只能达到92%左右的准确率。\n",
    "# 接下来，希望大家利用现有的知识，将这个模型优化至98%以上的准确率。\n",
    "# Hint：\n",
    "# - 多隐层\n",
    "# - 激活函数\n",
    "# - 正则化\n",
    "# - 初始化\n",
    "# - 摸索一下各个超参数\n",
    "#   - 隐层神经元数量\n",
    "#   - 学习率\n",
    "#   - 正则化惩罚因子\n",
    "#   - 最好每隔几个step就对loss、accuracy等等进行一次输出，这样才能有根据地进行调整\n",
    "\n",
    "\"\"\"A very simple MNIST classifier.\n",
    "See extensive documentation at\n",
    "https://www.tensorflow.org/get_started/mnist/beginners\n",
    "\"\"\"\n",
    "from __future__ import absolute_import\n",
    "from __future__ import division\n",
    "from __future__ import print_function\n",
    "\n",
    "import os\n",
    "\n",
    "os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n",
    "\n",
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "\n",
    "FLAGS = None\n",
    "\n",
    "\n",
    "def getBestAccuracy(HIDDENS=[], learning_rate=0.3, scale=0.1, INPUT_SIZE=784, OUTPUT_SIZE=10, mnist=None):\n",
    "    # Create the model\n",
    "    x = tf.placeholder(tf.float32, [None, INPUT_SIZE])\n",
    "    y = tf.placeholder(tf.float32, [None, OUTPUT_SIZE])\n",
    "\n",
    "    hidden_ws = []\n",
    "    _input_size = INPUT_SIZE\n",
    "    _tmp_sigmoid = None\n",
    "    _input_data = x\n",
    "\n",
    "    for index in range(len(HIDDENS)):\n",
    "        hide_size = HIDDENS[index]\n",
    "\n",
    "        hidden_w1 = tf.Variable(tf.random_normal([_input_size, hide_size], stddev=0.1))\n",
    "        hidden_b1 = tf.Variable(tf.zeros([hide_size]))\n",
    "\n",
    "        # 激活，最后一个到输出层的不要激活，走自动激活\n",
    "        hidden_1 = tf.matmul(_input_data, hidden_w1) + hidden_b1\n",
    "        if len(HIDDENS) != (index + 1):\n",
    "            # layer_1 = tf.nn.relu(tf.add(tf.matmul(_X, _weights['w1']), _biases['b1']))  # 隐层\n",
    "            hidden_1 = tf.nn.relu(hidden_1)\n",
    "            # hidden_1 = tf.nn.sigmoid(hidden_1)\n",
    "\n",
    "        hidden_ws.append(hidden_w1)\n",
    "        _input_data = hidden_1\n",
    "        _tmp_sigmoid = hidden_1\n",
    "        _input_size = hide_size\n",
    "\n",
    "    W = tf.Variable(tf.zeros([_input_size, OUTPUT_SIZE]))\n",
    "    b = tf.Variable(tf.zeros([OUTPUT_SIZE]))\n",
    "\n",
    "    # logits\n",
    "    logits = tf.matmul(_input_data, W) + b\n",
    "\n",
    "    # 激活函数\n",
    "    output = tf.nn.sigmoid(logits)\n",
    "\n",
    "    # 交叉熵\n",
    "    # cross_entropy = tf.losses.sigmoid_cross_entropy(multi_class_labels=y, logits=logits)\n",
    "\n",
    "    # TODO reg , 正则参数，防止过拟合  scale\n",
    "    for w in hidden_ws:\n",
    "        tf.add_to_collection(tf.GraphKeys.WEIGHTS, w)\n",
    "    tf.add_to_collection(tf.GraphKeys.WEIGHTS, W)\n",
    "    regularizer = tf.contrib.layers.l2_regularizer(scale=scale)\n",
    "    reg_term = tf.contrib.layers.apply_regularization(regularizer)\n",
    "    cross_entropy = tf.reduce_mean(\n",
    "        tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=logits)) + reg_term\n",
    "\n",
    "    # 梯度下降\n",
    "    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)\n",
    "    # print(train_step)\n",
    "\n",
    "    # 初始化\n",
    "    sess = tf.Session()\n",
    "    init_op = tf.global_variables_initializer()\n",
    "    sess.run(init_op)\n",
    "\n",
    "    # Train\n",
    "    best = 0\n",
    "    for index in range(80):\n",
    "        for _ in range(500):\n",
    "            batch_xs, batch_ys = mnist.train.next_batch(100)\n",
    "            sess.run([cross_entropy, train_step], feed_dict={x: batch_xs, y: batch_ys})\n",
    "\n",
    "        # Test trained model\n",
    "        correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))\n",
    "        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "        accuracy_value = sess.run(accuracy, feed_dict={x: mnist.test.images,\n",
    "                                                       y: mnist.test.labels})\n",
    "        # print(index, accuracy_value)\n",
    "\n",
    "        if accuracy_value > best:\n",
    "            best = accuracy_value\n",
    "\n",
    "        if best > 0.98:\n",
    "            break\n",
    "\n",
    "    print(HIDDENS, learning_rate, scale, best)\n",
    "    return best\n",
    "\n",
    "\n",
    "if __name__ == '__main__':\n",
    "    # Import data, input_date api 会读取文件，如果不存在，会从网上下载然后写入到临时目录\n",
    "    data_dir = '/tmp/tensorflow/mnist/input_data'\n",
    "    mnist = input_data.read_data_sets(data_dir, one_hot=True)\n",
    "\n",
    "    learning_rates = np.linspace(0.1, 0.9, 5)\n",
    "    best_lr_pair = (0, 0)\n",
    "    for lr in learning_rates:\n",
    "        accuracy = getBestAccuracy(HIDDENS=[150, 30], learning_rate=lr, scale=1.0 / 5000, mnist=mnist)\n",
    "        if accuracy > best_lr_pair[1]:\n",
    "            print(\"best lr accuracy\", lr, accuracy)\n",
    "            best_lr_pair = (lr, accuracy)\n",
    "\n",
    "    scales = np.linspace(1.0 / 1000, 1.0 / 50000, 5)\n",
    "    best_scale_pair = (1.0 / 5000, best_lr_pair[1])\n",
    "    for sc in scales:\n",
    "        accuracy = getBestAccuracy(HIDDENS=[150, 30], learning_rate=best_lr_pair[0], scale=sc, mnist=mnist)\n",
    "        if accuracy > best_scale_pair[1]:\n",
    "            print(\"best scale accuracy\", sc, accuracy)\n",
    "            best_scale_pair = (sc, accuracy)\n",
    "\n",
    "    # 隐层测试先忽略\n",
    "    hiddens = [[150, 50], [150, 30], [150, 15]]\n",
    "    best_hidden_pair = (hiddens[0], best_scale_pair[1])\n",
    "    for hd in hiddens:\n",
    "        accuracy = getBestAccuracy(HIDDENS=hd, learning_rate=best_lr_pair[0], scale=best_scale_pair[0], mnist=mnist)\n",
    "        if accuracy > best_hidden_pair[1]:\n",
    "            print(\"best hidden accuracy\", hd, accuracy)\n",
    "            best_hidden_pair = (hd, accuracy)\n",
    "\n",
    "    hiddens2 = [[300, best_hidden_pair[0][1]], [150, best_hidden_pair[0][1]], [100, best_hidden_pair[0][1]]]\n",
    "    for hd in hiddens2:\n",
    "        accuracy = getBestAccuracy(HIDDENS=hd, learning_rate=best_lr_pair[0], scale=best_scale_pair[0], mnist=mnist)\n",
    "        if accuracy > best_hidden_pair[1]:\n",
    "            print(\"best hidden accuracy\", hd, accuracy)\n",
    "            best_hidden_pair = (hd, accuracy)\n",
    "\n",
    "    print(\"best parmas are : \", best_lr_pair , best_scale_pair, best_hidden_pair)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 最终参数\n",
    "\n",
    "学习率 0.1\n",
    "惩罚参数  0.00051\n",
    "2个隐层， [100,50]"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 2",
   "language": "python",
   "name": "python2"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
