{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 1. 探究隐层对准确率的影响"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "#导入需要的工具包\n",
    "import tensorflow as tf\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "import numpy as np"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting /input_data\\train-images-idx3-ubyte.gz\n",
      "Extracting /input_data\\train-labels-idx1-ubyte.gz\n",
      "Extracting /input_data\\t10k-images-idx3-ubyte.gz\n",
      "Extracting /input_data\\t10k-labels-idx1-ubyte.gz\n"
     ]
    }
   ],
   "source": [
    "#导入数据\n",
    "data_dir = \"/input_data\"\n",
    "mnist = input_data.read_data_sets(data_dir, one_hot = True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "#隐层函数\n",
    "def addlayer(inputdata, input_size, output_size, active = None): #output_size隐层神经元数\n",
    "    w = tf.Variable(tf.truncated_normal([input_size, output_size],stddev=0.1))\n",
    "    b = tf.Variable(tf.zeros([output_size]))\n",
    "    logit = tf.matmul(inputdata, w) + b\n",
    "    if active == None:\n",
    "        return logit\n",
    "    else:\n",
    "        return active(logit)\n",
    "#损失函数\n",
    "def CE(y_, y):\n",
    "    ce = tf.reduce_mean(-tf.reduce_sum(y_*tf.log(y),reduction_indices=[1])) #交叉熵\n",
    "    return ce\n",
    "#梯度下降\n",
    "def GD(cost, learning_rate = 0.5):\n",
    "    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)\n",
    "    return train_step"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "学习率 隐层神经元 精确度\n",
      "0.1 50 0.9213\n",
      "0.1 150 0.9208\n",
      "0.1 250 0.9184\n",
      "0.1 350 0.9198\n",
      "0.1 450 0.9218\n",
      "0.30000000000000004 50 0.9446\n",
      "0.30000000000000004 150 0.9443\n",
      "0.30000000000000004 250 0.9423\n",
      "0.30000000000000004 350 0.9423\n",
      "0.30000000000000004 450 0.9424\n",
      "0.5000000000000001 50 0.9552\n",
      "0.5000000000000001 150 0.9558\n",
      "0.5000000000000001 250 0.9551\n",
      "0.5000000000000001 350 0.9578\n",
      "0.5000000000000001 450 0.9566\n",
      "0.7000000000000001 50 0.9606\n",
      "0.7000000000000001 150 0.9642\n",
      "0.7000000000000001 250 0.9641\n",
      "0.7000000000000001 350 0.9643\n",
      "0.7000000000000001 450 0.9667\n",
      "0.9000000000000001 50 0.961\n",
      "0.9000000000000001 150 0.9674\n",
      "0.9000000000000001 250 0.9681\n",
      "0.9000000000000001 350 0.9695\n",
      "0.9000000000000001 450 0.098\n"
     ]
    }
   ],
   "source": [
    "#单隐层\n",
    "learning_rate = np.arange(0.1,1,0.2)\n",
    "layer1_count = np.arange(50,500,100)\n",
    "print(\"学习率 隐层神经元 精确度\")\n",
    "for lr in learning_rate:\n",
    "    for lc in layer1_count:\n",
    "        #前向计算\n",
    "        x = tf.placeholder(tf.float32,[None, 784])\n",
    "        y_ = tf.placeholder(tf.float32, [None, 10])\n",
    "        h1 = addlayer(x, 784, lc, tf.nn.sigmoid)\n",
    "        y = addlayer(h1, lc,10,tf.nn.softmax)\n",
    "        cost = CE(y_,y)\n",
    "        #反向传播\n",
    "        train_step = GD(cost,lr)\n",
    "        with tf.Session() as sess:\n",
    "            init = tf.global_variables_initializer()\n",
    "            sess.run(init) #初始化全局变量\n",
    "            for _ in range(3000):                \n",
    "                #迭代\n",
    "                batch_xs, batch_ys = mnist.train.next_batch(100)\n",
    "                sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})\n",
    "            #预测准确率\n",
    "            correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(y_,1))\n",
    "            accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))\n",
    "            print(lr,lc,accuracy.eval({x:mnist.test.images,y_:mnist.test.labels}))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "学习率 隐层神经元1 隐层神经元2 精确度\n",
      "0.5 50 50 0.9591\n",
      "0.5 50 250 0.9544\n",
      "0.5 50 450 0.9495\n",
      "0.5 250 50 0.9535\n",
      "0.5 250 250 0.9526\n",
      "0.5 250 450 0.9536\n",
      "0.5 450 50 0.9519\n",
      "0.5 450 250 0.9515\n",
      "0.5 450 450 0.9505\n",
      "0.7 50 50 0.9593\n",
      "0.7 50 250 0.9604\n",
      "0.7 50 450 0.9569\n",
      "0.7 250 50 0.959\n",
      "0.7 250 250 0.96\n",
      "0.7 250 450 0.9575\n",
      "0.7 450 50 0.959\n",
      "0.7 450 250 0.956\n",
      "0.7 450 450 0.9609\n",
      "0.8999999999999999 50 50 0.9626\n",
      "0.8999999999999999 50 250 0.9624\n",
      "0.8999999999999999 50 450 0.9622\n",
      "0.8999999999999999 250 50 0.9628\n",
      "0.8999999999999999 250 250 0.9604\n",
      "0.8999999999999999 250 450 0.098\n",
      "0.8999999999999999 450 50 0.9653\n",
      "0.8999999999999999 450 250 0.9646\n",
      "0.8999999999999999 450 450 0.9644\n"
     ]
    }
   ],
   "source": [
    "#多隐层\n",
    "learning_rate = np.arange(0.5,1,0.2)\n",
    "layer1_count = np.arange(50,500,200)\n",
    "layer2_count = np.arange(50,500,200)\n",
    "print(\"学习率 隐层神经元1 隐层神经元2 精确度\")\n",
    "for lr in learning_rate:\n",
    "    for lc1 in layer1_count:\n",
    "        for lc2 in layer2_count:\n",
    "            #前向计算\n",
    "            x = tf.placeholder(tf.float32,[None, 784])\n",
    "            y_ = tf.placeholder(tf.float32, [None, 10])\n",
    "            h1 = addlayer(x, 784, lc1, tf.nn.sigmoid)\n",
    "            h2 = addlayer(h1, lc1, lc2, tf.nn.sigmoid)\n",
    "            y = addlayer(h2, lc2,10,tf.nn.softmax)\n",
    "            cost = CE(y_,y)\n",
    "            #反向传播\n",
    "            train_step = GD(cost,lr)\n",
    "            with tf.Session() as sess:\n",
    "                init = tf.global_variables_initializer()\n",
    "                sess.run(init) #初始化全局变量\n",
    "                for _ in range(3000):                \n",
    "                    #迭代\n",
    "                    batch_xs, batch_ys = mnist.train.next_batch(100)\n",
    "                    sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})\n",
    "                #预测准确率\n",
    "                correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(y_,1))\n",
    "                accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))\n",
    "                print(lr,lc1,lc2,accuracy.eval({x:mnist.test.images,y_:mnist.test.labels}))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "从上述计算结果看出，隐层数量为1和2时，测试集的准确率没有显著的提升，因此减小模型的复杂性，使用单隐层神经网络，\n",
    "此时学习率为0.9，神经元数量为300左右时准确率最高"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
