{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 导入必要模块"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [],
   "source": [
    "from __future__ import absolute_import\n",
    "from __future__ import division\n",
    "from __future__ import print_function\n",
    "\n",
    "import argparse\n",
    "import sys\n",
    "import numpy as np\n",
    "\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "\n",
    "import tensorflow as tf\n",
    "\n",
    "FLAGS = None"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting /Users/cuiyue/Desktop/AI/第六周/mnist/train-images-idx3-ubyte.gz\n",
      "Extracting /Users/cuiyue/Desktop/AI/第六周/mnist/train-labels-idx1-ubyte.gz\n",
      "Extracting /Users/cuiyue/Desktop/AI/第六周/mnist/t10k-images-idx3-ubyte.gz\n",
      "Extracting /Users/cuiyue/Desktop/AI/第六周/mnist/t10k-labels-idx1-ubyte.gz\n"
     ]
    }
   ],
   "source": [
    "data_dir = '/Users/cuiyue/Desktop/AI/第六周/mnist'\n",
    "mnist = input_data.read_data_sets(data_dir, one_hot=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 考虑增加一层隐层，构建一个两层神经网络\n",
    "\n",
    "# 定义隐层激活函数\n",
    "# sigmoid函数\n",
    "def sigmoid(x):\n",
    "    return tf.nn.sigmoid(x)\n",
    "\n",
    "# relu函数\n",
    "def relu(x):\n",
    "    return tf.nn.relu(x)\n",
    "\n",
    "# selu函数\n",
    "def selu(x):\n",
    "    with tf.name_scope('elu') as scope:\n",
    "        alpha = 1.6732632423543772848170429916717\n",
    "        scale = 1.0507009873554804934193349852946\n",
    "    return scale*tf.where(x>=0.0, x, alpha*tf.nn.elu(x))\n",
    "\n",
    "# swish函数\n",
    "def swish(x):\n",
    "    return x * tf.nn.sigmoid(x)\n",
    "\n",
    "# 激活函数\n",
    "def activation(x):\n",
    "    #return sigmoid(x)\n",
    "# 调试时方便更换以下其他激活函数\n",
    "    return relu(x)\n",
    "  #  return selu(x)\n",
    "  # return swish(x)\n",
    "\n",
    "# 隐层单元数设置\n",
    "l1_units_num = 100 \n",
    "# l1参数设置\n",
    "x = tf.placeholder(tf.float32, [None, 784])\n",
    "# 对权重采用MSRA初始化\n",
    "w_l1 = tf.Variable(tf.truncated_normal([784, l1_units_num], stddev=np.sqrt(2/784)))\n",
    "# 用0.001初始化隐层偏置\n",
    "b_l1 = tf.Variable(tf.constant(0.001, shape=[l1_units_num]))\n",
    "logits_l1 = tf.matmul(x, w_l1) + b_l1\n",
    "y_l1 = activation(logits_l1)\n",
    "\n",
    "# l2参数设置\n",
    "l2_units_num = 10\n",
    "# 对权重采用MSRA初始化\n",
    "w_l2 = tf.Variable(tf.truncated_normal([l1_units_num, l2_units_num], stddev = np.sqrt(2/l1_units_num)))\n",
    "# 用0.001初始化隐层偏置\n",
    "b_l2 = tf.Variable(tf.constant(0.001, shape = [l2_units_num]))\n",
    "logits_l2 = tf.matmul(y_l1,  w_l2) + b_l2\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义损失函数和优化器\n",
    "# 定义目标向量\n",
    "y_ = tf.placeholder(tf.float32, [None, 10])\n",
    "# 定义交叉熵损失函数\n",
    "cross_entropy = tf.reduce_mean(\n",
    "    tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=logits_l2))\n",
    "# 定义正则项，采用l2正则\n",
    "l2_norm = tf.nn.l2_loss(w_l1) + tf.nn.l2_loss(w_l2)\n",
    "\n",
    "# 定义带正则项的损失函数,\n",
    "cost = cross_entropy + 4e-5*l2_norm\n",
    "# 定义学习率\n",
    "init_learning_rate = tf.placeholder(tf.float32)\n",
    "epoch_steps = tf.to_int64(tf.div(60000, tf.shape(x)[0]))\n",
    "global_step = tf.train.get_or_create_global_step()\n",
    "current_epoch = global_step//epoch_steps\n",
    "decay_times = current_epoch \n",
    "current_learning_rate = tf.multiply(init_learning_rate, \n",
    "                                    tf.pow(0.575, tf.to_float(decay_times)))\n",
    "# 定义优化器\n",
    "optimizer = tf.train.AdamOptimizer(current_learning_rate)\n",
    "# 梯度\n",
    "gradients = optimizer.compute_gradients(cost)\n",
    "# 定义train step\n",
    "train_step = tf.train.AdamOptimizer(current_learning_rate).minimize(cost, global_step=global_step)\n",
    "\n",
    "# 定于预测函数\n",
    "correct_prediction = tf.equal(tf.argmax(logits_l2, 1), tf.argmax(y_, 1))\n",
    "accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "\n",
    "# 准备运行并初始化全局参数\n",
    "sess = tf.InteractiveSession()\n",
    "tf.global_variables_initializer().run()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "step 100, cross_entropy_loss: 0.344835, l2_loss: 266.196472, total loss: 0.355483\n",
      "0.9282\n",
      "step 200, cross_entropy_loss: 0.083058, l2_loss: 384.213348, total loss: 0.098427\n",
      "0.9411\n",
      "step 300, cross_entropy_loss: 0.301574, l2_loss: 478.455231, total loss: 0.320712\n",
      "0.951\n",
      "step 400, cross_entropy_loss: 0.066636, l2_loss: 537.191406, total loss: 0.088124\n",
      "0.9533\n",
      "step 500, cross_entropy_loss: 0.151035, l2_loss: 606.023926, total loss: 0.175276\n",
      "0.9597\n",
      "step 600, cross_entropy_loss: 0.154607, l2_loss: 667.974304, total loss: 0.181326\n",
      "0.9595\n",
      "step 700, cross_entropy_loss: 0.057026, l2_loss: 665.052246, total loss: 0.083628\n",
      "0.9692\n",
      "step 800, cross_entropy_loss: 0.147302, l2_loss: 662.027893, total loss: 0.173783\n",
      "0.9673\n",
      "step 900, cross_entropy_loss: 0.079350, l2_loss: 668.588013, total loss: 0.106094\n",
      "0.9731\n",
      "step 1000, cross_entropy_loss: 0.124740, l2_loss: 676.957214, total loss: 0.151818\n",
      "0.9628\n",
      "step 1100, cross_entropy_loss: 0.060293, l2_loss: 693.851074, total loss: 0.088047\n",
      "0.9722\n",
      "step 1200, cross_entropy_loss: 0.043523, l2_loss: 690.838135, total loss: 0.071157\n",
      "0.975\n",
      "step 1300, cross_entropy_loss: 0.036030, l2_loss: 684.727478, total loss: 0.063419\n",
      "0.9767\n",
      "step 1400, cross_entropy_loss: 0.039186, l2_loss: 677.805298, total loss: 0.066299\n",
      "0.9789\n",
      "step 1500, cross_entropy_loss: 0.013754, l2_loss: 673.133179, total loss: 0.040680\n",
      "0.9779\n",
      "step 1600, cross_entropy_loss: 0.031083, l2_loss: 669.401306, total loss: 0.057859\n",
      "0.9745\n",
      "step 1700, cross_entropy_loss: 0.034658, l2_loss: 666.275574, total loss: 0.061309\n",
      "0.9773\n",
      "step 1800, cross_entropy_loss: 0.016423, l2_loss: 664.958740, total loss: 0.043021\n",
      "0.9767\n",
      "step 1900, cross_entropy_loss: 0.017479, l2_loss: 659.979248, total loss: 0.043878\n",
      "0.9791\n",
      "step 2000, cross_entropy_loss: 0.017028, l2_loss: 653.834534, total loss: 0.043181\n",
      "0.9803\n",
      "step 2100, cross_entropy_loss: 0.040121, l2_loss: 647.951599, total loss: 0.066039\n",
      "0.9798\n",
      "step 2200, cross_entropy_loss: 0.029328, l2_loss: 644.815552, total loss: 0.055121\n",
      "0.9794\n",
      "step 2300, cross_entropy_loss: 0.030797, l2_loss: 640.700806, total loss: 0.056425\n",
      "0.9803\n",
      "step 2400, cross_entropy_loss: 0.024996, l2_loss: 635.259338, total loss: 0.050407\n",
      "0.9793\n",
      "step 2500, cross_entropy_loss: 0.020720, l2_loss: 630.900085, total loss: 0.045956\n",
      "0.9804\n",
      "step 2600, cross_entropy_loss: 0.038485, l2_loss: 626.779175, total loss: 0.063556\n",
      "0.9809\n",
      "step 2700, cross_entropy_loss: 0.046182, l2_loss: 623.267395, total loss: 0.071113\n",
      "0.9806\n",
      "step 2800, cross_entropy_loss: 0.018390, l2_loss: 619.189697, total loss: 0.043158\n",
      "0.98\n",
      "step 2900, cross_entropy_loss: 0.017929, l2_loss: 615.166382, total loss: 0.042535\n",
      "0.9807\n",
      "step 3000, cross_entropy_loss: 0.011884, l2_loss: 611.818665, total loss: 0.036356\n",
      "0.9805\n"
     ]
    }
   ],
   "source": [
    "# 训练模型\n",
    "for step in range(3000):\n",
    "    batch_xs, batch_ys = mnist.train.next_batch(100)\n",
    "    lr = 1e-2\n",
    "    _, cross_entropy_loss, l2_loss_value, total_loss_value, current_lr_value = \\\n",
    "    sess.run([train_step, cross_entropy, l2_norm, cost, current_learning_rate], feed_dict={x: batch_xs, y_: batch_ys, init_learning_rate:lr})\n",
    "    # 打印结果\n",
    "    if (step+1) % 100 == 0:\n",
    "        print('step %d, cross_entropy_loss: %f, l2_loss: %f, total loss: %f' % (step+1, cross_entropy_loss, l2_loss_value, total_loss_value))\n",
    "        print(sess.run(accuracy, feed_dict={x: mnist.test.images,y_: mnist.test.labels}))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 2",
   "language": "python",
   "name": "python2"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
