{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1.8.0\n"
     ]
    }
   ],
   "source": [
    "from __future__ import absolute_import\n",
    "from __future__ import division\n",
    "from __future__ import print_function\n",
    "\n",
    "import argparse\n",
    "import sys\n",
    "\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "\n",
    "import tensorflow as tf\n",
    "\n",
    "FLAGS = None\n",
    "\n",
    "print(tf.__version__)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting ./inputdata/train-images-idx3-ubyte.gz\n",
      "Extracting ./inputdata/train-labels-idx1-ubyte.gz\n",
      "Extracting ./inputdata/t10k-images-idx3-ubyte.gz\n",
      "Extracting ./inputdata/t10k-labels-idx1-ubyte.gz\n"
     ]
    }
   ],
   "source": [
    "# Import data\n",
    "data_dir = './inputdata'\n",
    "mnist = input_data.read_data_sets(data_dir, one_hot=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 变量赋值\n",
    "def set_global_var(val=[2, 300, 200, 0.6, 0.0001, 'relu', 'Y', 'gd']):\n",
    "    global LAYER1_NODE,BATCH_SIZE,LEARNING_RATE_BASE,LEARNING_RATE_DECAY\n",
    "    global REGULARIZATION_RATE,hiddenLayer,Afun,LosswithReg,Opti,INPUT_NODE,OUTPUT_NODE,TRAINING_STEPS\n",
    "    \n",
    "    hiddenLayer = val[0] #隐藏层数目\n",
    "    LAYER1_NODE = val[1] #应藏层节点数\n",
    "    BATCH_SIZE = val[2] # 每个Batch的大小\n",
    "    LEARNING_RATE_BASE = val[3] # 最开始的学习率\n",
    "    LEARNING_RATE_DECAY = 0.995 # 学习率衰减率\n",
    "    REGULARIZATION_RATE = val[4] # 描述模型复杂度的正则化项在损失函数中的系数\n",
    "    Afun=val[5] #激活函数\n",
    "    LosswithReg=val[6] #是否考虑正则损失\n",
    "    Opti=val[7] #优化函数\n",
    "    \n",
    "    # 定义模型训练的相关常数\n",
    "    INPUT_NODE = 28*28 # 每一张图片都是28*28的\n",
    "    OUTPUT_NODE = 10 # 输出是一 个10分类,0,1,2,...,8,9\n",
    "    TRAINING_STEPS = 5000 # 训练轮数\n",
    "\n",
    "#激活函数选择\n",
    "def get_activation_function(layer_n, af):\n",
    "    if af=='relu':\n",
    "        layer = tf.nn.relu(layer_n)     \n",
    "    if af=='leaky_relu':\n",
    "        layer = tf.nn.leaky_relu(layer_n)\n",
    "    if af=='dropout':\n",
    "        layer = tf.nn.dropout(layer_n,keep_prob)\n",
    "    if af=='sigmoid':\n",
    "        layer = tf.nn.sigmoid(layer_n) \n",
    "    return layer\n",
    "# 定义隐藏层\n",
    "def multilayer_perceptron(x, weight, bias, af='relu',nlayer=0):\n",
    "    # 如果nlayer取0 则不考虑隐藏层\n",
    "    W = tf.Variable(tf.zeros([INPUT_NODE, OUTPUT_NODE]))\n",
    "    b = tf.Variable(tf.zeros([OUTPUT_NODE]))\n",
    "    out_layer= tf.matmul(x, W) + b\n",
    "    if nlayer>=1:\n",
    "        layer1 = tf.add(tf.matmul(x, weight['h1']), bias['h1'])\n",
    "        layer1 = get_activation_function(layer1, af)\n",
    "        if nlayer==1:\n",
    "            out_layer = tf.add(tf.matmul(layer1, weight['out']), bias['out'])\n",
    "    if nlayer>=2:\n",
    "        layer2 = tf.add(tf.matmul(layer1, weight['h2']), bias['h2'])\n",
    "        layer2 = get_activation_function(layer2, af)\n",
    "        if nlayer==2:\n",
    "            out_layer = tf.add(tf.matmul(layer2, weight['out']), bias['out'])\n",
    "    if nlayer>=3:\n",
    "        layer3 = tf.add(tf.matmul(layer2, weight['h3']), bias['h3'])\n",
    "        layer3 = get_activation_function(layer3, af)\n",
    "        out_layer = tf.add(tf.matmul(layer3, weight['out']), bias['out'])\n",
    "        \n",
    "    return out_layer\n",
    "\n",
    "#定义损失函数\n",
    "def get_loss_func(pred_y, y_,weight, nlayer = 0):\n",
    "    cross_entropy =tf.nn.softmax_cross_entropy_with_logits(logits=pred_y, labels=y_)\n",
    "    # 计算在当前batch中所有样例的交叉熵平均值\n",
    "    cross_entropy_mean = tf.reduce_mean(cross_entropy)\n",
    "    # 计算L2正则化损失函数\n",
    "    regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)\n",
    "    \n",
    "     # 如果nlayer等于0 则不考虑正则化损失和\n",
    "    out_loss = cross_entropy_mean\n",
    "    \n",
    "    if LosswithReg=='N':\n",
    "        return out_loss\n",
    "     # +1层正则损失\n",
    "    if nlayer>=1:\n",
    "        out_loss += regularizer(weight['h1']) \n",
    "    \n",
    "     # +2层正则损失        \n",
    "    if nlayer>=2:\n",
    "        out_loss += regularizer(weight['h2'])\n",
    "\n",
    "    # +3层正则损失\n",
    "    if nlayer>=3:\n",
    "        out_loss += regularizer(weight['h3']) \n",
    "        \n",
    "    # 总损失=交叉熵损失+正则化损失\n",
    "    return out_loss\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 训练模型\n",
    "# 1. 搭建模型：数据输入、数据label、权值初始化、前向传播、反向传播、更新参数\n",
    "# 2. 运行模型：上面虽然把模型已经搭建好了，但是模型没有真正运行起来\n",
    "\n",
    "def train_DNN(mnist):\n",
    "    start = time.time()\n",
    "    keep_prob = tf.placeholder(tf.float32)\n",
    "    global_step = tf.Variable(0, trainable=False)\n",
    "    \n",
    "    # 定义权重变量字典\n",
    "    weight = {\n",
    "        'h1': tf.Variable(tf.truncated_normal([INPUT_NODE, LAYER1_NODE],stddev = 0.1)),\n",
    "        'h2': tf.Variable(tf.truncated_normal([LAYER1_NODE, LAYER1_NODE],stddev = 0.1)), \n",
    "        'h3': tf.Variable(tf.truncated_normal([LAYER1_NODE, LAYER1_NODE],stddev = 0.1)), \n",
    "        'out': tf.Variable(tf.zeros([LAYER1_NODE, OUTPUT_NODE]))\n",
    "    }\n",
    "    \n",
    "    # 定义偏量变量字典\n",
    "    bias = {\n",
    "        'h1': tf.Variable(tf.zeros([LAYER1_NODE])),\n",
    "        'h2': tf.Variable(tf.zeros([LAYER1_NODE])), \n",
    "        'h3': tf.Variable(tf.zeros([LAYER1_NODE])), \n",
    "        'out': tf.Variable(tf.zeros([OUTPUT_NODE]))\n",
    "    }\n",
    "    \n",
    "    # 模型的输入\n",
    "    x = tf.placeholder(tf.float32, [None, INPUT_NODE], name='x-input')\n",
    "    y_ = tf.placeholder(tf.float32, [None, OUTPUT_NODE], name='y-input')\n",
    "    \n",
    "     # 建立模型\n",
    "    pred_y = multilayer_perceptron(x, weight, bias,Afun,hiddenLayer)\n",
    "    \n",
    "    #获得损失函数\n",
    "    loss = get_loss_func(pred_y, y_, weight, hiddenLayer)\n",
    "    \n",
    "    # 设置指数衰减的学习率\n",
    "        # 设置指数衰减的学习率\n",
    "    learning_rate = tf.train.exponential_decay(\n",
    "        LEARNING_RATE_BASE, # 基础的学习率，随着迭代的进行，更新变量时使用的\n",
    "                            # 学习率在这个基础上递减\n",
    "        global_step,        # 当前迭代的轮数\n",
    "        mnist.train.num_examples / BATCH_SIZE, # 过完所有的训练数据需要的迭代次数\n",
    "        LEARNING_RATE_DECAY # 学习率的衰减速度\n",
    "        )\n",
    "    # 优化算法\n",
    "    if Opti=='gd':\n",
    "        train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)\n",
    "\n",
    "    if Opti=='mom':\n",
    "        train_step = tf.train.MomentumOptimizer(learning_rate,0.001).minimize(loss)\n",
    "\n",
    "    if Opti=='rmsp':\n",
    "        train_step = tf.train.RMSPropOptimizer(learning_rate,0.9,0.1).minimize(loss)\n",
    "    \n",
    "    sess = tf.Session()\n",
    "    init_op = tf.global_variables_initializer()\n",
    "    sess.run(init_op)\n",
    "    \n",
    "        # Train\n",
    "    for _ in range(TRAINING_STEPS):\n",
    "        batch_xs, batch_ys = mnist.train.next_batch(BATCH_SIZE)\n",
    "        if Afun=='dropout': \n",
    "            sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys, keep_prob:0.8})\n",
    "        else:\n",
    "            sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})\n",
    "    \n",
    "    # Test trained model\n",
    "    correct_prediction = tf.equal(tf.argmax(pred_y, 1), tf.argmax(y_, 1))\n",
    "    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "    print(sess.run(accuracy, feed_dict={x: mnist.test.images,\n",
    "                                        y_: mnist.test.labels}))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.9829\n",
      "\n",
      "Time used is 6 sec.\n"
     ]
    }
   ],
   "source": [
    "import time\n",
    "start = time.time()\n",
    "set_global_var()\n",
    "train_DNN(mnist)\n",
    "\n",
    "end = time.time()\n",
    "print(\"\\nTime used is %d sec.\" % (end-start))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
