{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 评价标准\n",
    "\n",
    "- 准确度达到98%或者以上60分，作为及格标准，未达到者本作业不及格，不予打分。\n",
    "- 使用了正则化因子或文档中给出描述：10分。\n",
    "- 手动初始化参数或文档中给出描述：10分，不设置初始化参数的，只使用默认初始化认为学员没考虑到初始化问题，不给分。\n",
    "- 学习率调整：10分，需要文档中给出描述。\n",
    "- 卷积kernel size和数量调整：10分，需要文档中给出描述。\n",
    "\n",
    "### 说明\n",
    "\n",
    "1. 最终模型的正确率在 98.24% , 符合预期要求\n",
    "2. 正则化因子采用了 0.0005\n",
    "3. 卷积核采用了正态初始化\n",
    "4. 学习率采用 0.1\n",
    "5. kernel_size 采用 [5,5] "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## code\n",
    "\n",
    "https://www.tinymind.com/weixin-42679665/mnist-tensorflow/code"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# coding: utf-8\n",
    "\n",
    "\"\"\"A very simple MNIST classifier.\n",
    "See extensive documentation at\n",
    "https://www.tensorflow.org/get_started/mnist/beginners\n",
    "\"\"\"\n",
    "from __future__ import absolute_import\n",
    "from __future__ import division\n",
    "from __future__ import print_function\n",
    "\n",
    "import tensorflow as tf\n",
    "\n",
    "print(tf.__version__)\n",
    "\n",
    "\n",
    "# print(dir(tf))\n",
    "\n",
    "def calBestAccuracy(mnist, kernel_size=[5, 5], lr=0.01,\n",
    "                    reg_param=7e-5):\n",
    "    # Define loss and optimizer\n",
    "    x = tf.placeholder(tf.float32, [None, 784])\n",
    "    y_ = tf.placeholder(tf.float32, [None, 10])\n",
    "    learning_rate = tf.placeholder(tf.float32)\n",
    "\n",
    "    with tf.name_scope('reshape'):\n",
    "        x_image = tf.reshape(x, [-1, 28, 28, 1])\n",
    "\n",
    "    # First convolutional layer - maps one grayscale image to 32 feature maps.\n",
    "    with tf.name_scope('conv1'):\n",
    "        '''\n",
    "        kernel_initializer: An initializer for the convolution kernel.\n",
    "        bias_initializer: An initializer for the bias vector. If None, no bias will be applied.\n",
    "        '''\n",
    "        # TODO 1. 使用正太分布初始化卷积核\n",
    "        h_conv1 = tf.layers.conv2d(x_image, 32, kernel_size,\n",
    "                                   padding='SAME',\n",
    "                                   activation=tf.nn.relu,\n",
    "                                   kernel_initializer=tf.truncated_normal_initializer(),\n",
    "                                   # bias_initializer=tf.Constant(0)\n",
    "                                   )\n",
    "\n",
    "    # Pooling layer - downsamples by 2X.\n",
    "    with tf.name_scope('pool1'):\n",
    "        h_pool1 = tf.layers.max_pooling2d(h_conv1, pool_size=[2, 2],\n",
    "                                          strides=[2, 2], padding='VALID')\n",
    "\n",
    "    # Second convolutional layer -- maps 32 feature maps to 64.\n",
    "    with tf.name_scope('conv2'):\n",
    "        h_conv2 = tf.layers.conv2d(h_pool1, 64, kernel_size,\n",
    "                                   padding='SAME',\n",
    "                                   activation=tf.nn.relu,\n",
    "                                   # kernel_initializer=tf.TruncatedNormal(stddev=0.01),\n",
    "                                   # bias_initializer=tf.Constant(0)\n",
    "                                   )\n",
    "\n",
    "    # Second pooling layer.\n",
    "    with tf.name_scope('pool2'):\n",
    "        h_pool2 = tf.layers.max_pooling2d(h_conv2, pool_size=[2, 2],\n",
    "                                          strides=[2, 2], padding='VALID')\n",
    "\n",
    "    # Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image\n",
    "    # is down to 7x7x64 feature maps -- maps this to 1024 features.\n",
    "    with tf.name_scope('fc1'):\n",
    "        h_pool2_flat = tf.layers.flatten(h_pool2)\n",
    "        h_fc1 = tf.layers.dense(h_pool2_flat, 1024, activation=tf.nn.relu)\n",
    "\n",
    "    # Dropout - controls the complexity of the model, prevents co-adaptation of\n",
    "    # features.\n",
    "    with tf.name_scope('dropout'):\n",
    "        keep_prob = tf.placeholder(tf.float32)\n",
    "        h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n",
    "\n",
    "    # Map the 1024 features to 10 classes, one for each digit\n",
    "    with tf.name_scope('fc2'):\n",
    "        y = tf.layers.dense(h_fc1_drop, 10, activation=None)\n",
    "\n",
    "    # The raw formulation of cross-entropy,\n",
    "    #\n",
    "    #   tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)),\n",
    "    #                                 reduction_indices=[1]))\n",
    "    #\n",
    "    # can be numerically unstable.\n",
    "    #\n",
    "    # So here we use tf.nn.softmax_cross_entropy_with_logits on the raw\n",
    "    # outputs of 'y', and then average across the batch.\n",
    "    cross_entropy = tf.reduce_mean(\n",
    "        tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))\n",
    "\n",
    "    # TODO L2损失，正则化因子\n",
    "    l2_loss = tf.add_n([tf.nn.l2_loss(w) for w in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)])\n",
    "    total_loss = cross_entropy + reg_param * l2_loss\n",
    "    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(total_loss)\n",
    "\n",
    "    sess = tf.Session()\n",
    "    init_op = tf.global_variables_initializer()\n",
    "    sess.run(init_op)\n",
    "    # Train\n",
    "    for step in range(3000):\n",
    "        batch_xs, batch_ys = mnist.train.next_batch(100)\n",
    "        lr = lr\n",
    "        _, loss, l2_loss_value, total_loss_value = sess.run(\n",
    "            [train_step, cross_entropy, l2_loss, total_loss],\n",
    "            feed_dict={x: batch_xs, y_: batch_ys, learning_rate: lr, keep_prob: 0.5})\n",
    "\n",
    "        if (step + 1) % 100 == 0:\n",
    "            print('step %d, entropy loss: %f, l2_loss: %f, total loss: %f' %\n",
    "                  (step + 1, loss, l2_loss_value, total_loss_value))\n",
    "            # Test trained model\n",
    "            correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n",
    "            accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "            print(sess.run(accuracy, feed_dict={x: batch_xs, y_: batch_ys, keep_prob: 0.5}))\n",
    "        if (step + 1) % 1000 == 0:\n",
    "            print(sess.run(accuracy, feed_dict={x: mnist.test.images,\n",
    "                                                y_: mnist.test.labels, keep_prob: 0.5}))\n",
    "\n",
    "\n",
    "if __name__ == '__main__':\n",
    "    # Import data\n",
    "    data_dir = '/tmp/tensorflow/mnist/input_data'\n",
    "    from tensorflow.examples.tutorials.mnist import input_data\n",
    "\n",
    "    mnist = input_data.read_data_sets(data_dir, one_hot=True)\n",
    "\n",
    "    from tinyenv.flags import flags\n",
    "\n",
    "    # Call this first to load the parameters.\n",
    "    FLAGS = flags()\n",
    "    # Then you can use the parameters like such:\n",
    "\n",
    "    lr = FLAGS.learning_rate\n",
    "    kernel_size = FLAGS.kernel_size\n",
    "    reg_param = FLAGS.reg_param\n",
    "\n",
    "    print(lr, kernel_size, reg_param)\n",
    "\n",
    "    calBestAccuracy(mnist, kernel_size=[kernel_size, kernel_size], reg_param=reg_param, lr=lr)\n",
    "\n",
    "    "
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 执行结果\n",
    "\n",
    "https://www.tinymind.com/executions/2r6zybo4\n",
    "\n",
    "### 执行参数\n",
    "\n",
    "![./input.png](./param.png)\n",
    "\n",
    "### 输出结果\n",
    "![./input.png](./result.png)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 2",
   "language": "python",
   "name": "python2"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
