{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "\n",
    "import tensorflow as tf"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting ./input_data\\train-images-idx3-ubyte.gz\n",
      "Extracting ./input_data\\train-labels-idx1-ubyte.gz\n",
      "Extracting ./input_data\\t10k-images-idx3-ubyte.gz\n",
      "Extracting ./input_data\\t10k-labels-idx1-ubyte.gz\n"
     ]
    }
   ],
   "source": [
    "# Import data\n",
    "data_dir = './input_data'\n",
    "mnist = input_data.read_data_sets(data_dir, one_hot=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# Create the model\n",
    "x = tf.placeholder(tf.float32, [None, 784])\n",
    "W1 = tf.Variable(tf.truncated_normal([784, 500], stddev=0.1))\n",
    "b1 = tf.Variable(tf.zeros([500]) + 0.1)\n",
    "#y = tf.matmul(x, W) + b\n",
    "logits1 = tf.matmul(x, W1) + b1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Create hidden layer\n",
    "h1 = tf.nn.tanh(logits1)\n",
    "W2 = tf.Variable(tf.truncated_normal([500, 100], stddev=0.1))\n",
    "b2 = tf.Variable(tf.zeros([100]) + 0.1)\n",
    "#y = tf.matmul(h1, W2) + b2 \n",
    "logits2 = tf.matmul(h1, W2) + b2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# Create hidden layer\n",
    "h2 = tf.nn.tanh(logits2)\n",
    "W3 = tf.Variable(tf.truncated_normal([100, 10], stddev=0.1))\n",
    "b3 = tf.Variable(tf.zeros([10]) + 0.1)\n",
    "y = tf.matmul(h2, W3) + b3"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# Define ground truth\n",
    "y_ = tf.placeholder(tf.float32, [None, 10])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# The raw formulation of cross-entropy,\n",
    "#\n",
    "#   tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)),\n",
    "#                                 reduction_indices=[1]))\n",
    "#\n",
    "# can be numerically unstable.\n",
    "#\n",
    "# So here we use tf.nn.softmax_cross_entropy_with_logits on the raw\n",
    "# outputs of 'y', and then average across the batch.\n",
    "loss = tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y)\n",
    "cross_entropy_mean = tf.reduce_mean(loss)\n",
    "\n",
    "regularizer = tf.contrib.layers.l2_regularizer(0.0001)\n",
    "regularization = regularizer(W1) + regularizer(W2) + regularizer(W3)\n",
    "cross_entropy = cross_entropy_mean + regularization"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# Create train step\n",
    "train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)\n",
    "\n",
    "sess = tf.Session()\n",
    "init_op = tf.global_variables_initializer()\n",
    "sess.run(init_op)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n",
    "accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Trained 0 times\n",
      "0.0341634\n",
      "-0.153855\n",
      "-0.355284\n",
      "Trained 100 times\n",
      "0.033993\n",
      "-0.152866\n",
      "-0.362305\n",
      "Trained 200 times\n",
      "0.0338234\n",
      "-0.151706\n",
      "-0.36632\n",
      "Trained 300 times\n",
      "0.0336547\n",
      "-0.149882\n",
      "-0.364323\n",
      "Trained 400 times\n",
      "0.0334869\n",
      "-0.148254\n",
      "-0.371966\n",
      "Trained 500 times\n",
      "0.0333199\n",
      "-0.144891\n",
      "-0.375763\n",
      "Trained 600 times\n",
      "0.0331537\n",
      "-0.14533\n",
      "-0.381797\n",
      "Trained 700 times\n",
      "0.0329883\n",
      "-0.151108\n",
      "-0.393004\n",
      "Trained 800 times\n",
      "0.0328238\n",
      "-0.149466\n",
      "-0.389861\n",
      "Trained 900 times\n",
      "0.0326601\n",
      "-0.152916\n",
      "-0.388867\n",
      "Trained 1000 times\n",
      "0.0324972\n",
      "-0.154439\n",
      "-0.392167\n",
      "Trained 1100 times\n",
      "0.0323351\n",
      "-0.153173\n",
      "-0.394445\n",
      "Trained 1200 times\n",
      "0.0321738\n",
      "-0.152142\n",
      "-0.397194\n",
      "Trained 1300 times\n",
      "0.0320133\n",
      "-0.15206\n",
      "-0.397521\n",
      "Trained 1400 times\n",
      "0.0318537\n",
      "-0.147162\n",
      "-0.398864\n",
      "Trained 1500 times\n",
      "0.0316948\n",
      "-0.14262\n",
      "-0.399539\n",
      "Trained 1600 times\n",
      "0.0315367\n",
      "-0.142255\n",
      "-0.399627\n",
      "Trained 1700 times\n",
      "0.0313794\n",
      "-0.142046\n",
      "-0.400617\n",
      "Trained 1800 times\n",
      "0.0312229\n",
      "-0.142335\n",
      "-0.402371\n",
      "Trained 1900 times\n",
      "0.0310672\n",
      "-0.141077\n",
      "-0.411964\n",
      "Trained 2000 times\n",
      "0.0309122\n",
      "-0.14081\n",
      "-0.411855\n",
      "Trained 2100 times\n",
      "0.030758\n",
      "-0.145137\n",
      "-0.413821\n",
      "Trained 2200 times\n",
      "0.0306046\n",
      "-0.142405\n",
      "-0.415864\n",
      "Trained 2300 times\n",
      "0.030452\n",
      "-0.1428\n",
      "-0.420412\n",
      "Trained 2400 times\n",
      "0.0303001\n",
      "-0.142095\n",
      "-0.422282\n",
      "Trained 2500 times\n",
      "0.030149\n",
      "-0.136299\n",
      "-0.432232\n",
      "Trained 2600 times\n",
      "0.0299986\n",
      "-0.135944\n",
      "-0.437107\n",
      "Trained 2700 times\n",
      "0.029849\n",
      "-0.136515\n",
      "-0.441699\n",
      "Trained 2800 times\n",
      "0.0297001\n",
      "-0.134619\n",
      "-0.434887\n",
      "Trained 2900 times\n",
      "0.029552\n",
      "-0.135449\n",
      "-0.43687\n",
      "Trained 3000 times\n",
      "0.0294046\n",
      "-0.137852\n",
      "-0.431005\n",
      "Trained 3100 times\n",
      "0.0292579\n",
      "-0.137626\n",
      "-0.428904\n",
      "Trained 3200 times\n",
      "0.029112\n",
      "-0.136968\n",
      "-0.430568\n",
      "Trained 3300 times\n",
      "0.0289668\n",
      "-0.13259\n",
      "-0.436365\n",
      "Trained 3400 times\n",
      "0.0288223\n",
      "-0.13235\n",
      "-0.435745\n",
      "Trained 3500 times\n",
      "0.0286786\n",
      "-0.131561\n",
      "-0.435712\n",
      "Trained 3600 times\n",
      "0.0285355\n",
      "-0.133349\n",
      "-0.442647\n",
      "Trained 3700 times\n",
      "0.0283932\n",
      "-0.132302\n",
      "-0.441507\n",
      "Trained 3800 times\n",
      "0.0282516\n",
      "-0.13308\n",
      "-0.442284\n",
      "Trained 3900 times\n",
      "0.0281107\n",
      "-0.131955\n",
      "-0.446139\n",
      "Trained 4000 times\n",
      "0.0279705\n",
      "-0.131962\n",
      "-0.442755\n",
      "Trained 4100 times\n",
      "0.027831\n",
      "-0.132301\n",
      "-0.447526\n"
     ]
    }
   ],
   "source": [
    "# Train model\n",
    "for _ in range(4200):\n",
    "  batch_xs, batch_ys = mnist.train.next_batch(100)\n",
    "  W1_values, W2_values, W3_values, __ = sess.run([W1, W2, W3, train_step], feed_dict={x: batch_xs, y_: batch_ys})\n",
    "  if(_%100 == 0):\n",
    "        print(\"Trained \" + str(_) + \" times\")\n",
    "        print(W1_values[0][0])\n",
    "        print(W2_values[0][0])\n",
    "        print(W3_values[0][0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "accuracy value is 0.9801\n"
     ]
    }
   ],
   "source": [
    "# Test model\n",
    "accuracy_value = sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels})\n",
    "print(\"accuracy value is \" + str(accuracy_value))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
