{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\Anaconda\\lib\\site-packages\\h5py\\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n",
      "  from ._conv import register_converters as _register_converters\n"
     ]
    }
   ],
   "source": [
    "\"\"\"A very simple MNIST classifier.\n",
    "See extensive documentation at\n",
    "https://www.tensorflow.org/get_started/mnist/beginners\n",
    "\"\"\"\n",
    "from __future__ import absolute_import\n",
    "from __future__ import division\n",
    "from __future__ import print_function\n",
    "\n",
    "import argparse\n",
    "import sys\n",
    "\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "\n",
    "import tensorflow as tf\n",
    "\n",
    "FLAGS = None\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "我们在这里调用系统提供的Mnist数据函数为我们读入数据，如果没有下载的话则进行下载。\n",
    "\n",
    "<font color=#ff0000>**这里将data_dir改为适合你的运行环境的目录**</font>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1041,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Successfully downloaded train-images-idx3-ubyte.gz 9912422 bytes.\n",
      "Extracting ../第六周\\train-images-idx3-ubyte.gz\n",
      "Successfully downloaded train-labels-idx1-ubyte.gz 28881 bytes.\n",
      "Extracting ../第六周\\train-labels-idx1-ubyte.gz\n",
      "Successfully downloaded t10k-images-idx3-ubyte.gz 1648877 bytes.\n",
      "Extracting ../第六周\\t10k-images-idx3-ubyte.gz\n",
      "Successfully downloaded t10k-labels-idx1-ubyte.gz 4542 bytes.\n",
      "Extracting ../第六周\\t10k-labels-idx1-ubyte.gz\n"
     ]
    }
   ],
   "source": [
    "# Import data\n",
    "# data_dir = '/tmp/tensorflow/mnist/input_data'\n",
    "data_dir = '../第六周'\n",
    "mnist = input_data.read_data_sets(data_dir, one_hot=True)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 三层神经网络模型。输入输出+单隐层,输入层784个神经元，隐层392个神经元，输出层10个神经元。激活函数为sigmoid\n",
    "#### 权重w用高斯分布初始化"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1201,
   "metadata": {},
   "outputs": [],
   "source": [
    "### 输入层\n",
    "x_1 = tf.placeholder(tf.float32, [None, 784])  ##每个图分为28*28个像素点，分为784个神经元\n",
    "w_1 = tf.Variable(tf.random_normal([784,392],mean=-0.007, stddev=0.052))##第一层神经元的权重,初始化为均值-0.007，标准差=0.052\n",
    "b_1 = tf.Variable(tf.zeros([392]))              ##第一层神经元的截距\n",
    "logits_1 = tf.matmul(x_1,w_1) + b_1           #第一层logits"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1202,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 隐层\n",
    "x_2=tf.nn.sigmoid(logits_1)                 ##第一层的logits经过激活函数sigmoid,即为第二层的输入\n",
    "w_2 = tf.Variable(tf.random_normal([392,10],mean=0.004, stddev=0.042))##第二层神经元的权重,初始化为均值0.015，标准差0.042的高斯分布\n",
    "b_2 = tf.Variable(tf.zeros([10]))              ##第二层神经元的截距\n",
    "logits_2 = tf.matmul(x_2,w_2) + b_2          #第二层logits"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1203,
   "metadata": {},
   "outputs": [],
   "source": [
    "#输出层\n",
    "y=tf.nn.sigmoid(logits_2)        #第二层的logits经过激活函数sigmoid,即为输出层的输入"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "定义我们的ground truth 占位符"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1204,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define loss and optimizer\n",
    "y_ = tf.placeholder(tf.float32, [None, 10])      #占位符，最终的输出目标"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 交叉熵损失"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1205,
   "metadata": {},
   "outputs": [],
   "source": [
    "cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=logits_2))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 损失函数+正则项,构成最终的损失函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1206,
   "metadata": {},
   "outputs": [],
   "source": [
    "lamda=0.0001\n",
    "regularizer = tf.contrib.layers.l2_regularizer(lamda)  #L2正则\n",
    "regularization = regularizer(w_1) + regularizer(w_2)\n",
    "loss=cross_entropy+regularization"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 生成一个训练step"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1207,
   "metadata": {},
   "outputs": [],
   "source": [
    "global_step = tf.Variable(0)        # 学习步数\n",
    "learning_rate = tf.train.exponential_decay(1.1, global_step, 300, 0.99, staircase=True)     #生成指数衰减的学习率  初始学习率值1.1，每300步按0.96速度衰减\n",
    "\n",
    "train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)   #构建一个优化器(梯度下降)，降为最小的目标为损失函数loss\n",
    "\n",
    "sess = tf.Session()\n",
    "init_op = tf.global_variables_initializer()\n",
    "sess.run(init_op)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "在这里我们仍然调用系统提供的读取数据，为我们取得一个batch。\n",
    "然后我们运行3k个step(5 epochs)，对权重进行优化。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1208,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# Train,小批量梯度下降\n",
    "for i in range(30000):\n",
    "    batch_xs, batch_ys = mnist.train.next_batch(200)    #每次从数据集取200条\n",
    "    sess.run(train_step, feed_dict={x_1: batch_xs, y_: batch_ys})"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 这里把最终的w_1,w_2值打印出来，看优化方向向哪里靠近，以便重复调整w的初始值"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1209,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "优化后w_1均值-0.005709309130907059,标准差0.047780510038137436\n",
      "优化后w_2均值0.00042038262472487986,标准差0.389710009098053\n"
     ]
    }
   ],
   "source": [
    "w_1_value,w_2_value = sess.run([w_1,w_2])\n",
    "print(\"优化后w_1均值{},标准差{}\".format(w_1_value.mean(),w_1_value.std()))\n",
    "print(\"优化后w_2均值{},标准差{}\".format(w_2_value.mean(),w_2_value.std()))\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "验证我们模型在测试数据上的准确率"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1210,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.9808\n"
     ]
    }
   ],
   "source": [
    "# Test trained model\n",
    "correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))    ##equal返回矩阵对应元素是否相同，argmax返回矩阵内最大的元素对应下标\n",
    "accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "print(sess.run(accuracy, feed_dict={x_1: mnist.test.images,\n",
    "                                      y_: mnist.test.labels}))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 最终准确率为98.08%。单隐层,392个神经元,权重w1,w2均初始化为高斯分布，L2正则。最后构造一个按指数衰减的学习率器，学习3W次，每次批量200条数据做梯度下降"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
