{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "\"\"\"A very simple MNIST classifier.\n",
    "See extensive documentation at\n",
    "https://www.tensorflow.org/get_started/mnist/beginners\n",
    "\"\"\"\n",
    "from __future__ import absolute_import\n",
    "from __future__ import division\n",
    "from __future__ import print_function\n",
    "\n",
    "import argparse\n",
    "import sys\n",
    "\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "\n",
    "import tensorflow as tf\n",
    "\n",
    "FLAGS = None\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "我们在这里调用系统提供的Mnist数据函数为我们读入数据，如果没有下载的话则进行下载。\n",
    "\n",
    "<font color=#ff0000>**这里将data_dir改为适合你的运行环境的目录**</font>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From <ipython-input-2-53f41b763965>:4: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n",
      "WARNING:tensorflow:From C:\\Users\\win\\Anaconda2\\envs\\tensorflow-gpu\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please write your own downloading logic.\n",
      "WARNING:tensorflow:From C:\\Users\\win\\Anaconda2\\envs\\tensorflow-gpu\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.data to implement this functionality.\n",
      "Extracting ../data/mnist/input_data\\train-images-idx3-ubyte.gz\n",
      "WARNING:tensorflow:From C:\\Users\\win\\Anaconda2\\envs\\tensorflow-gpu\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.data to implement this functionality.\n",
      "Extracting ../data/mnist/input_data\\train-labels-idx1-ubyte.gz\n",
      "WARNING:tensorflow:From C:\\Users\\win\\Anaconda2\\envs\\tensorflow-gpu\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:110: dense_to_one_hot (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.one_hot on tensors.\n",
      "Extracting ../data/mnist/input_data\\t10k-images-idx3-ubyte.gz\n",
      "Extracting ../data/mnist/input_data\\t10k-labels-idx1-ubyte.gz\n",
      "WARNING:tensorflow:From C:\\Users\\win\\Anaconda2\\envs\\tensorflow-gpu\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:290: DataSet.__init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n"
     ]
    }
   ],
   "source": [
    "# Import data\n",
    "#data_dir = '/tmp/tensorflow/mnist/input_data'\n",
    "data_dir = '../data/mnist/input_data'\n",
    "mnist = input_data.read_data_sets(data_dir, one_hot=True)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "一个非常非常简陋的模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "step 100, entropy loss: 2.282035, l2_loss: 50681.539062, total loss: 5.829743\n",
      "0.14\n",
      "step 200, entropy loss: 2.179348, l2_loss: 50674.449219, total loss: 5.726560\n",
      "0.28\n",
      "step 300, entropy loss: 2.063561, l2_loss: 50667.421875, total loss: 5.610280\n",
      "0.44\n",
      "step 400, entropy loss: 1.993236, l2_loss: 50660.402344, total loss: 5.539464\n",
      "0.5\n",
      "step 500, entropy loss: 1.964499, l2_loss: 50653.402344, total loss: 5.510237\n",
      "0.48\n",
      "step 600, entropy loss: 1.857697, l2_loss: 50646.398438, total loss: 5.402945\n",
      "0.59\n",
      "step 700, entropy loss: 1.868544, l2_loss: 50639.414062, total loss: 5.413303\n",
      "0.58\n",
      "step 800, entropy loss: 1.867570, l2_loss: 50632.433594, total loss: 5.411840\n",
      "0.63\n",
      "step 900, entropy loss: 1.757532, l2_loss: 50625.437500, total loss: 5.301312\n",
      "0.64\n",
      "step 1000, entropy loss: 1.739249, l2_loss: 50618.484375, total loss: 5.282543\n",
      "0.7\n",
      "0.68\n",
      "0.58\n",
      "0.74\n",
      "0.7\n",
      "0.78\n",
      "0.76\n",
      "0.66\n",
      "0.78\n",
      "0.76\n",
      "0.74\n",
      "step 1100, entropy loss: 1.750080, l2_loss: 50611.519531, total loss: 5.292887\n",
      "0.65\n",
      "step 1200, entropy loss: 1.695027, l2_loss: 50604.542969, total loss: 5.237345\n",
      "0.78\n",
      "step 1300, entropy loss: 1.649235, l2_loss: 50597.582031, total loss: 5.191066\n",
      "0.77\n",
      "step 1400, entropy loss: 1.686414, l2_loss: 50590.605469, total loss: 5.227757\n",
      "0.76\n",
      "step 1500, entropy loss: 1.646321, l2_loss: 50583.617188, total loss: 5.187174\n",
      "0.81\n",
      "step 1600, entropy loss: 1.663109, l2_loss: 50576.617188, total loss: 5.203473\n",
      "0.81\n",
      "step 1700, entropy loss: 1.663690, l2_loss: 50569.625000, total loss: 5.203564\n",
      "0.79\n",
      "step 1800, entropy loss: 1.624972, l2_loss: 50562.640625, total loss: 5.164357\n",
      "0.83\n",
      "step 1900, entropy loss: 1.606341, l2_loss: 50555.664062, total loss: 5.145237\n",
      "0.87\n",
      "step 2000, entropy loss: 1.608284, l2_loss: 50548.695312, total loss: 5.146692\n",
      "0.83\n",
      "0.82\n",
      "0.84\n",
      "0.9\n",
      "0.92\n",
      "0.78\n",
      "0.96\n",
      "0.88\n",
      "0.88\n",
      "0.86\n",
      "0.86\n",
      "step 2100, entropy loss: 1.647483, l2_loss: 50541.710938, total loss: 5.185403\n",
      "0.84\n",
      "step 2200, entropy loss: 1.546144, l2_loss: 50534.738281, total loss: 5.083576\n",
      "0.89\n",
      "step 2300, entropy loss: 1.574449, l2_loss: 50527.742188, total loss: 5.111391\n",
      "0.88\n",
      "step 2400, entropy loss: 1.581188, l2_loss: 50520.761719, total loss: 5.117641\n",
      "0.85\n",
      "step 2500, entropy loss: 1.621649, l2_loss: 50513.773438, total loss: 5.157614\n",
      "0.85\n",
      "step 2600, entropy loss: 1.589769, l2_loss: 50506.773438, total loss: 5.125244\n",
      "0.9\n",
      "step 2700, entropy loss: 1.537238, l2_loss: 50499.785156, total loss: 5.072223\n",
      "0.94\n",
      "step 2800, entropy loss: 1.587605, l2_loss: 50492.785156, total loss: 5.122100\n",
      "0.91\n",
      "step 2900, entropy loss: 1.559001, l2_loss: 50485.796875, total loss: 5.093007\n",
      "0.87\n",
      "step 3000, entropy loss: 1.577885, l2_loss: 50478.804688, total loss: 5.111402\n",
      "0.9\n",
      "0.9\n",
      "0.94\n",
      "0.98\n",
      "0.9\n",
      "0.92\n",
      "0.94\n",
      "0.88\n",
      "0.96\n",
      "0.86\n",
      "0.98\n"
     ]
    }
   ],
   "source": [
    "# Create the model\n",
    "#x = tf.placeholder(tf.float32, [None, 784])\n",
    "#W = tf.Variable(tf.zeros([784, 10]))\n",
    "#b = tf.Variable(tf.zeros([10]))\n",
    "#y = tf.matmul(x, W) + b\n",
    "\n",
    "x = tf.placeholder(tf.float32, [None, 784])\n",
    "y_ = tf.placeholder(tf.float32, [None, 10])\n",
    "learning_rate = tf.placeholder(tf.float32)\n",
    "\n",
    "x_image = tf.reshape(x, [-1,28,28,1])\n",
    "\n",
    "def weight_variable(shape):\n",
    "    initial = tf.truncated_normal(shape, stddev=0.1)\n",
    "    return tf.Variable(initial)\n",
    "\n",
    "def bias_variable(shape):\n",
    "    initial = tf.constant(0.1, shape=shape)\n",
    "    return tf.Variable(initial)\n",
    "\n",
    "def conv2d(x, W):\n",
    "    return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME')\n",
    "\n",
    "def max_pool_2x2(x):\n",
    "    return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')\n",
    "\n",
    "W_conv1 = weight_variable([5,5,1,32])\n",
    "b_conv1 = bias_variable([32])\n",
    "\n",
    "h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1)+b_conv1)\n",
    "h_pool1 = max_pool_2x2(h_conv1)\n",
    "\n",
    "W_conv2 = weight_variable([5,5,32,64])\n",
    "b_conv2 = bias_variable([64])\n",
    "\n",
    "h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2)+b_conv2)\n",
    "h_pool2 = max_pool_2x2(h_conv2)\n",
    "\n",
    "W_fc1 = weight_variable([7*7*64, 1024])\n",
    "b_fc1 = bias_variable([1024])\n",
    "\n",
    "h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])\n",
    "h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1)+b_fc1)\n",
    "\n",
    "keep_prob = tf.placeholder(\"float\")\n",
    "h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n",
    "\n",
    "W_fc2 = weight_variable([1024,10])\n",
    "b_fc2 = bias_variable([10])\n",
    "\n",
    "y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)\n",
    "\n",
    "#cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv))\n",
    "cross_entropy = tf.reduce_mean(\n",
    "    tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))\n",
    "#train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\n",
    "l2_loss = tf.add_n( [tf.nn.l2_loss(w) for w in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)] )\n",
    "total_loss = cross_entropy + 7e-5*l2_loss\n",
    "train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(total_loss)\n",
    "correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))\n",
    "accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n",
    "\n",
    "sess = tf.Session()\n",
    "sess.run(tf.initialize_all_variables())\n",
    "\n",
    "for step in range(3000):\n",
    "  batch_xs, batch_ys = mnist.train.next_batch(100)\n",
    "  lr = 0.01\n",
    "  _, loss, l2_loss_value, total_loss_value = sess.run(\n",
    "               [train_step, cross_entropy, l2_loss, total_loss], \n",
    "               feed_dict={x: batch_xs, y_: batch_ys, learning_rate:lr, keep_prob:0.5})\n",
    "  \n",
    "  if (step+1) % 100 == 0:\n",
    "    print('step %d, entropy loss: %f, l2_loss: %f, total loss: %f' % \n",
    "            (step+1, loss, l2_loss_value, total_loss_value))\n",
    "    # Test trained model\n",
    "    #correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n",
    "    #accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "    print(sess.run(accuracy, feed_dict={x: batch_xs, y_: batch_ys, keep_prob:0.5}))\n",
    "  if (step+1) % 1000 == 0:\n",
    "    for i in range(10):\n",
    "        testSet = mnist.test.next_batch(50)\n",
    "        print(sess.run(accuracy, feed_dict={x: testSet[0],\n",
    "                                    y_: testSet[1], keep_prob:0.5}))\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "毫无疑问，这个模型是一个非常简陋，性能也不理想的模型。目前只能达到92%左右的准确率。\n",
    "接下来，希望大家利用现有的知识，将这个模型优化至98%以上的准确率。\n",
    "Hint：\n",
    "- 卷积\n",
    "- 池化\n",
    "- 激活函数\n",
    "- 正则化\n",
    "- 初始化\n",
    "- 摸索一下各个超参数\n",
    "  - 卷积kernel size\n",
    "  - 卷积kernel 数量\n",
    "  - 学习率\n",
    "  - 正则化惩罚因子\n",
    "  - 最好每隔几个step就对loss、accuracy等等进行一次输出，这样才能有根据地进行调整"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.5.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
