{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 112,
   "metadata": {},
   "outputs": [],
   "source": [
    "\"\"\"A very simple MNIST classifier.\n",
    "See extensive documentation at\n",
    "https://www.tensorflow.org/get_started/mnist/beginners\n",
    "\"\"\"\n",
    "from __future__ import absolute_import\n",
    "from __future__ import division\n",
    "from __future__ import print_function\n",
    "\n",
    "import argparse\n",
    "import sys\n",
    "import numpy as np\n",
    "\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "import tensorflow as tf\n",
    "\n",
    "FLAGS = None"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 113,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 几种激活函数的选择和参数初始化方式\n",
    "def swish(x):\n",
    "    return x * tf.nn.sigmoid(x)\n",
    "def jihuo(x):\n",
    "    #return tf.nn.sigmoid(x)\n",
    "    #return tf.nn.relu(x)\n",
    "    #return tf.nn.selu(x)\n",
    "    return swish(x)\n",
    "def chushihua(shape, std_dev = 0.1):\n",
    "    return tf.truncated_normal(shape, stddev = std_dev)\n",
    "    #return tf.zeros(shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 114,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting ./tensorflowdata\\train-images-idx3-ubyte.gz\n",
      "Extracting ./tensorflowdata\\train-labels-idx1-ubyte.gz\n",
      "Extracting ./tensorflowdata\\t10k-images-idx3-ubyte.gz\n",
      "Extracting ./tensorflowdata\\t10k-labels-idx1-ubyte.gz\n"
     ]
    }
   ],
   "source": [
    "# Import data\n",
    "data_dir = './tensorflowdata'\n",
    "mnist = input_data.read_data_sets(data_dir, one_hot=True)\n",
    "#mnist = tf.keras.datasets.mnist.load_data(path='./tensorflowdata')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 115,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 模型建立\n",
    "init_LR = tf.placeholder(tf.float32)\n",
    "x = tf.placeholder(tf.float32, [None, 784])\n",
    "\n",
    "epoch_steps = tf.to_int64(tf.div(60000, tf.shape(x)[0]))\n",
    "global_step = tf.train.get_or_create_global_step()\n",
    "current_epoch = global_step//epoch_steps\n",
    "decay_times = current_epoch\n",
    "current_LR = tf.multiply(init_LR,tf.pow(0.575,tf.to_float(decay_times))) #0.575\n",
    "\n",
    "L1_units_count = 500  #100\n",
    "W1 = tf.Variable(chushihua([784,L1_units_count],std_dev = np.sqrt(2/784))) #np.sqrt(2/nin):MSRA初始化\n",
    "b1 = tf.Variable(tf.constant(0.001,shape = [L1_units_count]))\n",
    "logits1 = tf.matmul(x, W1) + b1\n",
    "output1 = jihuo(logits1)\n",
    "\n",
    "L2_units_count = 10\n",
    "W2 = tf.Variable(chushihua([L1_units_count,L2_units_count],std_dev = np.sqrt(2/L1_units_count)))\n",
    "b2 = tf.Variable(tf.constant(0.001,shape = [L2_units_count]))\n",
    "logits2 = tf.matmul(output1, W2) + b2\n",
    "\n",
    "y = logits2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 116,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define loss and optimizer\n",
    "y_ = tf.placeholder(tf.float32, [None, 10])\n",
    "# 几种损失函数\n",
    "# The raw formulation of cross-entropy,tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)),reduction_indices=[1]))\n",
    "# can be numerically unstable.So here we use tf.nn.softmax_cross_entropy_with_logits on the raw outputs of 'y', \n",
    "# and then average across the batch.\n",
    "cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))\n",
    "l2_loss = tf.nn.l2_loss(W1) + tf.nn.l2_loss(W2)\n",
    "total_loss = cross_entropy + 4e-5*l2_loss\n",
    "optimizer = tf.train.AdamOptimizer(current_LR)\n",
    "#optimizer = tf.train.RMSPropOptimizer(current_LR)\n",
    "gradients = optimizer.compute_gradients(total_loss)\n",
    "train_step = optimizer.apply_gradients(gradients)\n",
    "#train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)\n",
    "correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n",
    "accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 117,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\Anaconda3\\envs\\tensorflow\\lib\\site-packages\\tensorflow\\python\\client\\session.py:1711: UserWarning: An interactive session is already active. This can cause out-of-memory errors in some cases. You must explicitly call `InteractiveSession.close()` to release resources held by the other session(s).\n",
      "  warnings.warn('An interactive session is already active. This can '\n"
     ]
    }
   ],
   "source": [
    "sess = tf.InteractiveSession()\n",
    "tf.global_variables_initializer().run()\n",
    "#init_op = tf.global_variables_initializer()\n",
    "#sess.run(init_op)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 118,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "step 100, entropy loss: 0.159452, L2 loss: 732.489990, total loss: 0.188752\n",
      "0.9691\n",
      "step 200, entropy loss: 0.083822, L2 loss: 786.026062, total loss: 0.115263\n",
      "0.97\n",
      "step 300, entropy loss: 0.062238, L2 loss: 878.849976, total loss: 0.097392\n",
      "0.9701\n",
      "step 400, entropy loss: 0.050352, L2 loss: 929.948303, total loss: 0.087550\n",
      "0.9772\n",
      "step 500, entropy loss: 0.031077, L2 loss: 998.670227, total loss: 0.071024\n",
      "0.9781\n",
      "step 600, entropy loss: 0.031096, L2 loss: 1060.957886, total loss: 0.073534\n",
      "0.976\n",
      "step 700, entropy loss: 0.027612, L2 loss: 1169.096313, total loss: 0.074376\n",
      "0.9785\n",
      "step 800, entropy loss: 0.054408, L2 loss: 1193.770264, total loss: 0.102159\n",
      "0.9731\n",
      "step 900, entropy loss: 0.040905, L2 loss: 1269.549194, total loss: 0.091687\n",
      "0.9775\n",
      "step 1000, entropy loss: 0.020473, L2 loss: 1350.478882, total loss: 0.074493\n",
      "0.9809\n",
      "step 1100, entropy loss: 0.036162, L2 loss: 1395.404785, total loss: 0.091978\n",
      "0.977\n",
      "step 1200, entropy loss: 0.051917, L2 loss: 1466.949951, total loss: 0.110595\n",
      "0.9754\n",
      "step 1300, entropy loss: 0.047490, L2 loss: 1501.483276, total loss: 0.107550\n",
      "0.9752\n",
      "step 1400, entropy loss: 0.032549, L2 loss: 1568.205566, total loss: 0.095278\n",
      "0.9757\n",
      "step 1500, entropy loss: 0.038984, L2 loss: 1579.990234, total loss: 0.102183\n",
      "0.9717\n",
      "step 1600, entropy loss: 0.025480, L2 loss: 1494.025879, total loss: 0.085241\n",
      "0.9757\n",
      "step 1700, entropy loss: 0.034564, L2 loss: 1429.153320, total loss: 0.091731\n",
      "0.9785\n",
      "step 1800, entropy loss: 0.016964, L2 loss: 1380.752319, total loss: 0.072194\n",
      "0.9787\n",
      "step 1900, entropy loss: 0.009371, L2 loss: 1353.015625, total loss: 0.063492\n",
      "0.977\n",
      "step 2000, entropy loss: 0.067825, L2 loss: 1401.146973, total loss: 0.123871\n",
      "0.9752\n",
      "step 2100, entropy loss: 0.010564, L2 loss: 1420.779663, total loss: 0.067396\n",
      "0.9767\n",
      "step 2200, entropy loss: 0.024746, L2 loss: 1430.663086, total loss: 0.081972\n",
      "0.9767\n",
      "step 2300, entropy loss: 0.043521, L2 loss: 1406.111938, total loss: 0.099765\n",
      "0.975\n",
      "step 2400, entropy loss: 0.014397, L2 loss: 1376.915649, total loss: 0.069473\n",
      "0.9757\n",
      "step 2500, entropy loss: 0.050970, L2 loss: 1409.570312, total loss: 0.107353\n",
      "0.9769\n",
      "step 2600, entropy loss: 0.036306, L2 loss: 1404.922119, total loss: 0.092503\n",
      "0.9738\n",
      "step 2700, entropy loss: 0.028139, L2 loss: 1363.404175, total loss: 0.082675\n",
      "0.9774\n",
      "step 2800, entropy loss: 0.025936, L2 loss: 1417.648926, total loss: 0.082642\n",
      "0.9752\n",
      "step 2900, entropy loss: 0.015430, L2 loss: 1345.050903, total loss: 0.069233\n",
      "0.9788\n",
      "step 3000, entropy loss: 0.040429, L2 loss: 1358.054321, total loss: 0.094751\n",
      "0.9771\n"
     ]
    }
   ],
   "source": [
    "# Train\n",
    "for step in range(3000):\n",
    "    batch_xs, batch_ys = mnist.train.next_batch(500)\n",
    "    lr = 1e-2\n",
    "    _, loss, l2_loss_value, total_loss_value, current_lr_value = \\\n",
    "    sess.run([train_step, cross_entropy, l2_loss, total_loss, current_LR], feed_dict={x: batch_xs, y_: batch_ys,init_LR:lr})\n",
    "    if (step+1) % 100 == 0:\n",
    "        print('step %d, entropy loss: %f, L2 loss: %f, total loss: %f' % (step+1,loss,l2_loss_value,total_loss_value))\n",
    "        #print(sess.run(accuracy,feed_dict={x: batch_xs, y: batch_ys}))\n",
    "        print(sess.run(accuracy,feed_dict={x: mnist.test.images,y_: mnist.test.labels}))\n",
    "        #print(current_lr_value)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "swish-100L1C-Adam  -400batch-3000steps:acc=0.9775<br>\n",
    "selu -100L1C-Adam  -400batch-3000steps:acc=0.979<br>\n",
    "swish-100L1C-SGD0.5-400batch-3000steps:acc=0.9774<br>\n",
    "swish-100L1C-Adam  -500batch-3000steps:acc=0.9775<br>\n",
    "swish-100L1C-Adam  -500batch-3000steps:acc=0.9775<br>\n",
    "swish-500L1C-Adam  -500batch-1000steps:acc=0.9809<br>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
