{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "import tensorflow as tf\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "from tensorflow.examples.tutorials.mnist import mnist"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "batch_size = 100\n",
    "learning_rate = 0.8\n",
    "training_step = 30000\n",
    "\n",
    "n_input = 784\n",
    "n_hidden =  500\n",
    "n_labels = 10"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "metadata": {},
   "outputs": [],
   "source": [
    "def inference(x_input, reuse = tf.AUTO_REUSE):\n",
    "    with tf.variable_scope(\"hidden\", reuse=reuse):\n",
    "        weights_hidden = tf.get_variable(\"weights\", [n_input, n_hidden], initializer=tf.random_normal_initializer(stddev= 0.1)) \n",
    "        biases_hidden = tf.get_variable(\"biases\", [n_hidden], initializer= tf.constant_initializer)\n",
    "        out_hidden = tf.nn.relu(tf.matmul(x_input, weights_hidden) + biases_hidden)\n",
    "    \n",
    "    with tf.variable_scope(\"out\", reuse=reuse):\n",
    "        weights = tf.get_variable(\"weights\", [n_hidden, n_labels], initializer=tf.random_normal_initializer(stddev=0.1))\n",
    "        biases = tf.get_variable(\"biases\", [n_labels], initializer=tf.constant_initializer)\n",
    "        out = tf.matmul(out_hidden, weights) + biases\n",
    "        \n",
    "    return out        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {},
   "outputs": [],
   "source": [
    "def train(mnist):\n",
    "    x = tf.placeholder(\"float\", [None, n_input])\n",
    "    y = tf.placeholder(\"float\", [None, n_labels])\n",
    "    pred = inference(x)\n",
    "    \n",
    "    # 损失函数\n",
    "    cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels= y))\n",
    "    \n",
    "    # 优化\n",
    "    optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cross_entropy)\n",
    "    \n",
    "    # 准确率计算\n",
    "    correct_pred = tf.equal(tf.argmax(y, 1), tf.argmax(pred, 1))\n",
    "    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n",
    "    \n",
    "    init = tf.global_variables_initializer()\n",
    "    \n",
    "    with tf.Session() as sess:\n",
    "        sess.run(init)\n",
    "        \n",
    "        # 验证集与测试集\n",
    "        validate_data = {x: mnist.validation.images, y: mnist.validation.labels}\n",
    "        test_data = {x: mnist.test.images, y: mnist.test.labels}\n",
    "        \n",
    "        for i in range(training_step):\n",
    "            \n",
    "            xs, ys = mnist.train.next_batch(batch_size)\n",
    "            _, loss = sess.run([optimizer, cross_entropy], feed_dict={x:xs, y:ys})\n",
    "            \n",
    "            if i % 1000 == 0:\n",
    "                validate_accuracy = sess.run(accuracy, feed_dict=validate_data)\n",
    "                print(\"after %d training steps, the loss is %g, the validation accuracy is %g\" % (i, loss, validate_accuracy))  \n",
    "        print(\"the training is finish!\")  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting ./mnist_data_sets/train-images-idx3-ubyte.gz\n",
      "Extracting ./mnist_data_sets/train-labels-idx1-ubyte.gz\n",
      "Extracting ./mnist_data_sets/t10k-images-idx3-ubyte.gz\n",
      "Extracting ./mnist_data_sets/t10k-labels-idx1-ubyte.gz\n",
      "after 0 training steps, the loss is 3.33047, the validation accuracy is 0.183\n",
      "after 1000 training steps, the loss is 0.0604399, the validation accuracy is 0.973\n",
      "after 2000 training steps, the loss is 0.0328597, the validation accuracy is 0.9768\n",
      "after 3000 training steps, the loss is 0.0359508, the validation accuracy is 0.9844\n",
      "after 4000 training steps, the loss is 0.0113616, the validation accuracy is 0.9828\n",
      "after 5000 training steps, the loss is 0.00765337, the validation accuracy is 0.9832\n",
      "after 6000 training steps, the loss is 0.00195865, the validation accuracy is 0.983\n",
      "after 7000 training steps, the loss is 0.00112187, the validation accuracy is 0.9836\n",
      "after 8000 training steps, the loss is 0.00201272, the validation accuracy is 0.9834\n",
      "after 9000 training steps, the loss is 0.00143327, the validation accuracy is 0.9834\n",
      "after 10000 training steps, the loss is 0.000433491, the validation accuracy is 0.9844\n",
      "after 11000 training steps, the loss is 0.00173629, the validation accuracy is 0.9838\n",
      "after 12000 training steps, the loss is 0.000768704, the validation accuracy is 0.9842\n",
      "after 13000 training steps, the loss is 0.00116519, the validation accuracy is 0.9836\n",
      "after 14000 training steps, the loss is 0.000619716, the validation accuracy is 0.9842\n",
      "after 15000 training steps, the loss is 0.000556853, the validation accuracy is 0.9836\n"
     ]
    }
   ],
   "source": [
    "mnist = input_data.read_data_sets(\"./mnist_data_sets/\", one_hot=True)  \n",
    "train(mnist)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 2",
   "language": "python",
   "name": "python2"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.14"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
