{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/anaconda3/envs/tensorflow/lib/python3.5/importlib/_bootstrap.py:222: RuntimeWarning: compiletime version 3.6 of module 'tensorflow.python.framework.fast_tensor_util' does not match runtime version 3.5\n",
      "  return f(*args, **kwds)\n"
     ]
    }
   ],
   "source": [
    "from __future__ import absolute_import\n",
    "from __future__ import division\n",
    "from __future__ import print_function\n",
    "\n",
    "import argparse\n",
    "import sys\n",
    "\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "\n",
    "import tensorflow as tf"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting /tmp/tensorflow/mnist/input_data/train-images-idx3-ubyte.gz\n",
      "Extracting /tmp/tensorflow/mnist/input_data/train-labels-idx1-ubyte.gz\n",
      "Extracting /tmp/tensorflow/mnist/input_data/t10k-images-idx3-ubyte.gz\n",
      "Extracting /tmp/tensorflow/mnist/input_data/t10k-labels-idx1-ubyte.gz\n"
     ]
    }
   ],
   "source": [
    "# Import data\n",
    "data_dir = '/tmp/tensorflow/mnist/input_data'\n",
    "mnist = input_data.read_data_sets(data_dir, one_hot=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "#define a function to add the new layers\n",
    "#Parameters in the function \n",
    "#1, InputData:  An array containing the input data\n",
    "#2, inputSize:  the size of the InputData\n",
    "#3, outSize: the size of the OutputData\n",
    "#4, initialBias: set the initial data of the bias\n",
    "#4, active: whether the Output is the original data or the output of the activation function\n",
    "#5, The function will return the Array which is the OutputData\n",
    "\n",
    "# def newlayer(InputData, inputSize, outSize, initialBias, active=None):\n",
    "#     W = tf.Variable(tf.zeros([inputSize,outSize]))\n",
    "#     Bias = tf.Variable(tf.zeros([1,outSize])+initialBias)\n",
    "#     OutputData = tf.matmul(InputData,W)+Bias\n",
    "#     if active == None:\n",
    "#         return OutputData\n",
    "#     else:\n",
    "#         return active(OutputData)\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "#define the input\n",
    "x = tf.placeholder(tf.float32, [None,784])\n",
    "inputSize1 = 784\n",
    "\n",
    "#define the first hidden layer\n",
    "#Here define the number of cells in the this Layer\n",
    "outSize1 = 240  #150-250  \n",
    "#initial w1, tried random_normal, but zeros was better\n",
    "w1 = tf.Variable(tf.zeros([inputSize1,outSize1]))\n",
    "#just try\n",
    "init_b1 = 0.01\n",
    "b1 = tf.Variable(tf.zeros([1,outSize1])+init_b1)\n",
    "#use the sigmoid activation function in this layer\n",
    "y1 = tf.nn.sigmoid(tf.matmul(x,w1) + b1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "#define the other hidden layer \n",
    "#the input was the output data from the first hidden layer\n",
    "inputSize2 = outSize1\n",
    "\n",
    "#As this layer was used as the output layer, the cell number should be the same as the output\n",
    "outSize2 = 10\n",
    "w2 = tf.Variable(tf.random_normal([inputSize2, outSize2]))\n",
    "init_b2 = 0\n",
    "b2 = tf.Variable(tf.zeros([1,outSize2])+init_b2)\n",
    "#here is the logits\n",
    "logits = tf.matmul(y1,w2)+b2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "# #define the output layer \n",
    "# #the input was the output data from the first hidden layer\n",
    "# inputSize3 = outSize2\n",
    "\n",
    "# #As this layer was used as the output layer, the cell number should be the same as the output\n",
    "# outSize3 = 10\n",
    "# w3 = tf.Variable(tf.random_normal([inputSize3, outSize3]))\n",
    "# init_b3 = 0.01\n",
    "# b3 = tf.Variable(tf.zeros([1,outSize3])+init_b3)\n",
    "# #here is the logits\n",
    "# logits = tf.matmul(y2,w3)+b3"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "# define the Input Layer \n",
    "#1, define the input placeholder\n",
    "# x = tf.placeholder(tf.float32, [None, 784])\n",
    "# #Use the newlayer function to create a Input Layer\n",
    "# Inputlayer_outSize = 100\n",
    "# #Out_Inputlayer\n",
    "# y = newlayer(x,784,Inputlayer_outSize,0,active=None)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Add Hidden Layers\n",
    "#The Output data of the Input Layer was used as the Input Data for the hidden Layers\n",
    "# Hiddenlayer1_outSize = 50\n",
    "# #Out_Hiddenlayer1\n",
    "# y = newlayer(Out_Inputlayer,Inputlayer_outSize,Hiddenlayer1_outSize,0,active=None)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "# #Add Hidden Layers\n",
    "# #The Output data of the Input Layer was used as the Input Data for the hidden Layers\n",
    "# Hiddenlayer2_outSize = 10\n",
    "# y = newlayer(Out_Hiddenlayer1,Hiddenlayer1_outSize,Hiddenlayer2_outSize,0,active=None)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From <ipython-input-10-d8f30af4c911>:13: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "\n",
      "Future major versions of TensorFlow will allow gradients to flow\n",
      "into the labels input on backprop by default.\n",
      "\n",
      "See tf.nn.softmax_cross_entropy_with_logits_v2.\n",
      "\n"
     ]
    }
   ],
   "source": [
    "#define the loss function\n",
    "y_ = tf.placeholder(tf.float32, [None, 10])\n",
    "#Use the softmax as the activaiton funtion at the ouptut\n",
    "# loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=logits))\n",
    "#Use the sogmiod as the activation function \n",
    "# loss = tf.reduce_mean(tf.losses.sigmoid_cross_entropy(multi_class_labels=y_, logits=logits))\n",
    "\n",
    "#Try to add the regulerized items\n",
    "Lambda_Param1 = 1e-5\n",
    "Lambda_Param2 = 1e-5\n",
    "regularizers = Lambda_Param1*tf.nn.l2_loss(w1) + Lambda_Param2*tf.nn.l2_loss(w2)\n",
    "\n",
    "loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=logits)+regularizers)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "#define the train step\n",
    "#Observe the learning rate, after adding a hidden layer, \n",
    "#if learning rate is quite large, the loss could not converge.\n",
    "#if the learning rate is small, the convergent process takes more cycle number\n",
    "# train_step = tf.train.GradientDescentOptimizer(0.3).minimize(loss)\n",
    "train_step = tf.train.AdamOptimizer(1e-3).minimize(loss) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "#define the session, intial the variable use in the model\n",
    "sess = tf.Session()\n",
    "init_op = tf.global_variables_initializer()\n",
    "#Run the variable initial part\n",
    "sess.run(init_op)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Accuracy Calc\n",
    "correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y_, 1))\n",
    "accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step: 0, Cross_entropy: 11.168238, Train_Accuracy: 0.100000, Test_Accuracy: 0.084800\n",
      "Step: 1000, Cross_entropy: 0.434526, Train_Accuracy: 0.920000, Test_Accuracy: 0.921300\n",
      "Step: 2000, Cross_entropy: 0.191273, Train_Accuracy: 0.950000, Test_Accuracy: 0.936600\n",
      "Step: 3000, Cross_entropy: 0.178386, Train_Accuracy: 0.940000, Test_Accuracy: 0.952000\n",
      "Step: 4000, Cross_entropy: 0.107146, Train_Accuracy: 0.980000, Test_Accuracy: 0.962900\n",
      "Step: 5000, Cross_entropy: 0.057817, Train_Accuracy: 0.990000, Test_Accuracy: 0.967700\n",
      "Step: 6000, Cross_entropy: 0.094005, Train_Accuracy: 0.980000, Test_Accuracy: 0.971200\n",
      "Step: 7000, Cross_entropy: 0.102525, Train_Accuracy: 0.980000, Test_Accuracy: 0.972100\n",
      "Step: 8000, Cross_entropy: 0.043954, Train_Accuracy: 1.000000, Test_Accuracy: 0.975200\n",
      "Step: 9000, Cross_entropy: 0.031588, Train_Accuracy: 1.000000, Test_Accuracy: 0.976500\n",
      "Step: 10000, Cross_entropy: 0.032417, Train_Accuracy: 1.000000, Test_Accuracy: 0.976500\n",
      "Step: 11000, Cross_entropy: 0.026682, Train_Accuracy: 1.000000, Test_Accuracy: 0.978800\n",
      "Step: 12000, Cross_entropy: 0.032678, Train_Accuracy: 1.000000, Test_Accuracy: 0.977600\n",
      "Step: 13000, Cross_entropy: 0.030661, Train_Accuracy: 1.000000, Test_Accuracy: 0.979200\n",
      "Step: 14000, Cross_entropy: 0.024815, Train_Accuracy: 1.000000, Test_Accuracy: 0.980600\n",
      "Step: 15000, Cross_entropy: 0.021634, Train_Accuracy: 1.000000, Test_Accuracy: 0.979000\n",
      "Step: 16000, Cross_entropy: 0.026459, Train_Accuracy: 1.000000, Test_Accuracy: 0.979600\n",
      "Step: 17000, Cross_entropy: 0.027393, Train_Accuracy: 1.000000, Test_Accuracy: 0.979000\n",
      "Step: 18000, Cross_entropy: 0.026652, Train_Accuracy: 1.000000, Test_Accuracy: 0.980400\n",
      "Step: 19000, Cross_entropy: 0.025589, Train_Accuracy: 1.000000, Test_Accuracy: 0.980500\n",
      "Step: 20000, Cross_entropy: 0.024610, Train_Accuracy: 1.000000, Test_Accuracy: 0.980100\n",
      "Step: 21000, Cross_entropy: 0.025431, Train_Accuracy: 1.000000, Test_Accuracy: 0.980100\n",
      "Step: 22000, Cross_entropy: 0.032816, Train_Accuracy: 0.990000, Test_Accuracy: 0.978100\n",
      "Step: 23000, Cross_entropy: 0.028921, Train_Accuracy: 1.000000, Test_Accuracy: 0.980500\n",
      "Step: 24000, Cross_entropy: 0.023815, Train_Accuracy: 1.000000, Test_Accuracy: 0.980100\n",
      "Step: 25000, Cross_entropy: 0.022605, Train_Accuracy: 1.000000, Test_Accuracy: 0.980500\n",
      "Step: 26000, Cross_entropy: 0.027690, Train_Accuracy: 1.000000, Test_Accuracy: 0.981000\n",
      "Step: 27000, Cross_entropy: 0.022109, Train_Accuracy: 1.000000, Test_Accuracy: 0.981800\n",
      "Step: 28000, Cross_entropy: 0.027776, Train_Accuracy: 1.000000, Test_Accuracy: 0.978300\n",
      "Step: 29000, Cross_entropy: 0.021161, Train_Accuracy: 1.000000, Test_Accuracy: 0.981200\n",
      "Step: 30000, Cross_entropy: 0.021759, Train_Accuracy: 1.000000, Test_Accuracy: 0.979200\n",
      "Step: 31000, Cross_entropy: 0.023796, Train_Accuracy: 1.000000, Test_Accuracy: 0.981800\n",
      "Step: 32000, Cross_entropy: 0.023429, Train_Accuracy: 1.000000, Test_Accuracy: 0.981500\n",
      "Step: 33000, Cross_entropy: 0.022678, Train_Accuracy: 1.000000, Test_Accuracy: 0.980800\n",
      "Step: 34000, Cross_entropy: 0.019779, Train_Accuracy: 1.000000, Test_Accuracy: 0.980500\n",
      "Step: 35000, Cross_entropy: 0.023179, Train_Accuracy: 1.000000, Test_Accuracy: 0.980700\n",
      "Step: 36000, Cross_entropy: 0.023549, Train_Accuracy: 1.000000, Test_Accuracy: 0.981200\n",
      "Step: 37000, Cross_entropy: 0.024976, Train_Accuracy: 1.000000, Test_Accuracy: 0.981700\n",
      "Step: 38000, Cross_entropy: 0.021364, Train_Accuracy: 1.000000, Test_Accuracy: 0.981400\n",
      "Step: 39000, Cross_entropy: 0.027135, Train_Accuracy: 1.000000, Test_Accuracy: 0.981000\n",
      "Step: 40000, Cross_entropy: 0.022210, Train_Accuracy: 1.000000, Test_Accuracy: 0.981300\n",
      "Step: 41000, Cross_entropy: 0.020763, Train_Accuracy: 1.000000, Test_Accuracy: 0.981800\n",
      "Step: 42000, Cross_entropy: 0.019118, Train_Accuracy: 1.000000, Test_Accuracy: 0.982200\n",
      "Step: 43000, Cross_entropy: 0.021295, Train_Accuracy: 1.000000, Test_Accuracy: 0.982600\n",
      "Step: 44000, Cross_entropy: 0.043853, Train_Accuracy: 1.000000, Test_Accuracy: 0.981400\n",
      "Step: 45000, Cross_entropy: 0.021831, Train_Accuracy: 1.000000, Test_Accuracy: 0.982500\n",
      "Step: 46000, Cross_entropy: 0.026853, Train_Accuracy: 1.000000, Test_Accuracy: 0.979400\n",
      "Step: 47000, Cross_entropy: 0.021601, Train_Accuracy: 1.000000, Test_Accuracy: 0.980900\n",
      "Step: 48000, Cross_entropy: 0.023451, Train_Accuracy: 1.000000, Test_Accuracy: 0.982200\n",
      "Step: 49000, Cross_entropy: 0.021707, Train_Accuracy: 1.000000, Test_Accuracy: 0.981900\n",
      "Step: 50000, Cross_entropy: 0.020224, Train_Accuracy: 1.000000, Test_Accuracy: 0.983100\n",
      "Step: 51000, Cross_entropy: 0.020036, Train_Accuracy: 1.000000, Test_Accuracy: 0.982400\n",
      "Step: 52000, Cross_entropy: 0.039741, Train_Accuracy: 1.000000, Test_Accuracy: 0.981700\n",
      "Step: 53000, Cross_entropy: 0.020625, Train_Accuracy: 1.000000, Test_Accuracy: 0.982900\n",
      "Step: 54000, Cross_entropy: 0.019458, Train_Accuracy: 1.000000, Test_Accuracy: 0.983000\n",
      "Step: 55000, Cross_entropy: 0.041538, Train_Accuracy: 0.990000, Test_Accuracy: 0.981900\n",
      "Step: 56000, Cross_entropy: 0.018390, Train_Accuracy: 1.000000, Test_Accuracy: 0.982900\n",
      "Step: 57000, Cross_entropy: 0.018755, Train_Accuracy: 1.000000, Test_Accuracy: 0.982500\n",
      "Step: 58000, Cross_entropy: 0.021588, Train_Accuracy: 1.000000, Test_Accuracy: 0.983900\n",
      "Step: 59000, Cross_entropy: 0.019775, Train_Accuracy: 1.000000, Test_Accuracy: 0.982600\n",
      "Step: 60000, Cross_entropy: 0.019674, Train_Accuracy: 1.000000, Test_Accuracy: 0.982600\n"
     ]
    }
   ],
   "source": [
    "# Begin to train\n",
    "#the number of epochs, (cycleNum * batchSize)/size of the training set\n",
    "#Adjust the epochs numbers, and balance the loss and running time\n",
    "#Lower learning rate leads to larger epochs number \n",
    "\n",
    "cycleNum = 60000\n",
    "batchSize = 100\n",
    "for step in range(cycleNum+1):\n",
    "    batch_xs, batch_ys = mnist.train.next_batch(batchSize)\n",
    "#     real_y = sess.run([logits],feed_dict={x: batch_xs, y_: batch_ys})\n",
    "    each_loss, _ = sess.run([loss,train_step], feed_dict={x: batch_xs, y_: batch_ys})\n",
    "#     print(each_loss)\n",
    "    if (step % 1000 == 0):\n",
    "        print(\"Step: %d, Cross_entropy: %f, Train_Accuracy: %f, Test_Accuracy: %f\" % \n",
    "              (step,\n",
    "               each_loss,sess.run(accuracy, feed_dict={x: batch_xs, y_: batch_ys}),\n",
    "               sess.run(accuracy, feed_dict={x: mnist.test.images,y_: mnist.test.labels})))\n",
    "    elif (step == cycleNum):\n",
    "        print(\"Step: %d, Cross_entropy: %f, Train_Accuracy: %f, Test_Accuracy: %f\" % \n",
    "              (step,\n",
    "               each_loss,sess.run(accuracy, feed_dict={x: batch_xs, y_: batch_ys}),\n",
    "               sess.run(accuracy, feed_dict={x: mnist.test.images,y_: mnist.test.labels})))\n",
    "    \n",
    "        \n",
    "#     print(real_y)\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.9826\n"
     ]
    }
   ],
   "source": [
    "# Test trained model\n",
    "correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y_, 1))\n",
    "accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "print(sess.run(accuracy, feed_dict={x: mnist.test.images,\n",
    "                                      y_: mnist.test.labels}))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.5.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
