{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "E:\\Users\\SEELE\\Anaconda3\\lib\\site-packages\\h5py\\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n",
      "  from ._conv import register_converters as _register_converters\n"
     ]
    }
   ],
   "source": [
    "\"\"\"A very simple MNIST classifier.\n",
    "See extensive documentation at\n",
    "https://www.tensorflow.org/get_started/mnist/beginners\n",
    "\"\"\"\n",
    "from __future__ import absolute_import\n",
    "from __future__ import division\n",
    "from __future__ import print_function\n",
    "\n",
    "import argparse\n",
    "import sys\n",
    "\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "\n",
    "import tensorflow as tf\n",
    "\n",
    "FLAGS = None\n",
    "\n",
    "from matplotlib import pyplot"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "我们在这里调用系统提供的Mnist数据函数为我们读入数据，如果没有下载的话则进行下载。\n",
    "\n",
    "<font color=#ff0000>**这里将data_dir改为适合你的运行环境的目录**</font>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From <ipython-input-2-f603c702140e>:3: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n",
      "WARNING:tensorflow:From E:\\Users\\SEELE\\Anaconda3\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please write your own downloading logic.\n",
      "WARNING:tensorflow:From E:\\Users\\SEELE\\Anaconda3\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.data to implement this functionality.\n",
      "Extracting /tmp/tensorflow/mnist/input_data\\train-images-idx3-ubyte.gz\n",
      "WARNING:tensorflow:From E:\\Users\\SEELE\\Anaconda3\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.data to implement this functionality.\n",
      "Extracting /tmp/tensorflow/mnist/input_data\\train-labels-idx1-ubyte.gz\n",
      "WARNING:tensorflow:From E:\\Users\\SEELE\\Anaconda3\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:110: dense_to_one_hot (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.one_hot on tensors.\n",
      "Extracting /tmp/tensorflow/mnist/input_data\\t10k-images-idx3-ubyte.gz\n",
      "Extracting /tmp/tensorflow/mnist/input_data\\t10k-labels-idx1-ubyte.gz\n",
      "WARNING:tensorflow:From E:\\Users\\SEELE\\Anaconda3\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:290: DataSet.__init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n"
     ]
    }
   ],
   "source": [
    "# Import data\n",
    "data_dir = '/tmp/tensorflow/mnist/input_data'\n",
    "mnist = input_data.read_data_sets(data_dir, one_hot=True)\n",
    "\n",
    "global max_acc\n",
    "max_acc = 0\n",
    "\n",
    "batch_global = 1000\n",
    "step_global = 5000"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "一个非常非常简陋的模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "def weight_variable(shape):\n",
    "    initial = tf.truncated_normal(shape,stddev=0.1)\n",
    "    return tf.Variable(initial)\n",
    "def bias_variable(shape):\n",
    "    initial = tf.constant(0.1,shape=shape)\n",
    "    return tf.Variable(initial)\n",
    "def conv2d(x,W):\n",
    "    return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME')\n",
    "def max_pool(x,kernel_size):\n",
    "    return tf.nn.max_pool(x,ksize=[1,kernel_size,kernel_size,1],strides=[1,2,2,1],padding='SAME')\n",
    "\n",
    "\n",
    "def train(conv1_kernel_size = 5,\n",
    "          conv1_out_channel = 32,\n",
    "          conv1_sig = tf.nn.relu,\n",
    "          fc1_kernel_size = 2,\n",
    "          conv2_kernel_size = 5,\n",
    "          conv2_out_channel = 64,\n",
    "          conv2_sig = tf.nn.relu,\n",
    "          fc2_kernel_size = 2,\n",
    "          fc_size = 1024,\n",
    "          l2_rate = 1e-4,\n",
    "          batchSize = 100,\n",
    "          trainingStep = 30000,\n",
    "          drop_keep_prob = 0.5,\n",
    "          Learning_rate_base = 0.5):\n",
    "\n",
    "    x = tf.placeholder(tf.float32,[None,784])\n",
    "    y_ = tf.placeholder(tf.float32,[None,10])\n",
    "    x_image = tf.reshape(x,[-1,28,28,1])\n",
    "    W_conv1 = weight_variable([conv1_kernel_size,conv1_kernel_size,1,conv1_out_channel])\n",
    "    b_conv1 = bias_variable([conv1_out_channel])\n",
    "\n",
    "    h_conv1 = conv1_sig(conv2d(x_image,W_conv1)+b_conv1)\n",
    "    h_pool1 = max_pool(h_conv1,fc1_kernel_size)\n",
    "\n",
    "    W_conv2 = weight_variable([conv2_kernel_size,conv2_kernel_size,conv1_out_channel,conv2_out_channel])\n",
    "    b_conv2 = bias_variable([conv2_out_channel])\n",
    "\n",
    "    h_conv2 = conv2_sig(conv2d(h_pool1,W_conv2)+b_conv2)\n",
    "    h_pool2 = max_pool(h_conv2,fc2_kernel_size)\n",
    "\n",
    "    pool_shape = h_pool2.get_shape().as_list()\n",
    "    nodes = pool_shape[1] * pool_shape[2] * pool_shape[3]\n",
    "    reshaped = tf.reshape(h_pool2, [-1,nodes])\n",
    "\n",
    "    W_fc1 = weight_variable([nodes,fc_size])\n",
    "    # W_fc1 = weight_variable([7*7*conv2_out_channel,fc_size])\n",
    "    b_fc1 = bias_variable([fc_size])\n",
    "\n",
    "    # h_pool2_flat = tf.reshape(h_pool2,[-1,7*7*conv2_out_channel])\n",
    "    h_fc1 = tf.nn.relu(tf.matmul(reshaped,W_fc1)+b_fc1)\n",
    "    \n",
    "    \n",
    "    \n",
    "\n",
    "    keep_prob = tf.placeholder(tf.float32)\n",
    "\n",
    "    h_fc1_drop = tf.nn.dropout(h_fc1,keep_prob)\n",
    "    \n",
    "    W_fc2 = weight_variable([fc_size,84])\n",
    "    b_fc2 = bias_variable([84])\n",
    "    h_fc2 = tf.nn.relu(tf.matmul(h_fc1_drop,W_fc2)+b_fc2)\n",
    "    \n",
    "    W_fc3 = weight_variable([84,10])\n",
    "    b_fc3 = bias_variable([10])\n",
    "    h_fc3 = tf.matmul(h_fc2,W_fc3)+b_fc3\n",
    "\n",
    "    \n",
    "\n",
    "#     y_conv = tf.matmul(h_fc1_drop,W_fc2)+b_fc2 #tf.nn.softmax(\n",
    "    y = h_fc3\n",
    "\n",
    "    # 定义我们的ground truth 占位符\n",
    "    y_ = tf.placeholder(tf.float32, [None, 10])\n",
    "\n",
    "    # 接下来我们计算交叉熵，注意这里不要使用注释中的手动计算方式，而是使用系统函数。\n",
    "    # 另一个注意点就是，softmax_cross_entropy_with_logits的logits参数是**未经激活的wx+b**\n",
    "\n",
    "    cross_entropy = tf.reduce_mean(\n",
    "        tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))\n",
    "\n",
    "    loss = cross_entropy\n",
    "\n",
    "\n",
    "    #计算l2正则化损失函数\n",
    "    regularizer = tf.contrib.layers.l2_regularizer(l2_rate)\n",
    "    #计算模型的正则化损失，只计算权重，不使用b\n",
    "    regularization = regularizer(W_fc1) + regularizer(W_fc2)+ regularizer(W_fc3) #regularizer(W_conv1) + regularizer(W_conv2) + \n",
    "\n",
    "    loss = loss + regularization\n",
    "\n",
    "\n",
    "    # 设置学习率\n",
    "    Learning_rate_base = 0.5\n",
    "    Learning_rate_decay = 0.99\n",
    "    global_step = tf.Variable(0,trainable = False)\n",
    "\n",
    "    Learning_rate = tf.train.exponential_decay(\n",
    "        Learning_rate_base,\n",
    "        global_step,\n",
    "        mnist.train.num_examples / batchSize,\n",
    "        Learning_rate_decay)\n",
    "\n",
    "\n",
    "\n",
    "    train_step = tf.train.GradientDescentOptimizer(0.01).minimize(loss,global_step = global_step)\n",
    "\n",
    "    sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))\n",
    "    init_op = tf.global_variables_initializer()\n",
    "    sess.run(init_op)\n",
    "\n",
    "\n",
    "    saver=tf.train.Saver(max_to_keep=1)\n",
    "    global max_acc\n",
    "    # 在这里我们仍然调用系统提供的读取数据，为我们取得一个batch。 然后我们运行3k个step(5 epochs)，对权重进行优化。\n",
    "    for _ in range(trainingStep):\n",
    "        batch_xs, batch_ys = mnist.train.next_batch(batchSize)\n",
    "        sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys,keep_prob:drop_keep_prob})\n",
    "\n",
    "        if _ %batchSize == 0:\n",
    "            correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n",
    "            accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "            acc = sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels,keep_prob:drop_keep_prob})\n",
    "            print(\"batch \" + str(_) + \" acc is \" + str(acc))\n",
    "            \n",
    "            if acc>max_acc:\n",
    "                max_acc=acc\n",
    "                saver.save(sess,'ckpt/mnist.ckpt',global_step=_+1)\n",
    "                print(\"替换准确率最高模型：\" + str(max_acc))\n",
    "            \n",
    "    # Test trained model\n",
    "#     correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n",
    "#     accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "#     print(sess.run(accuracy, feed_dict={x: mnist.test.images,\n",
    "#                                           y_: mnist.test.labels,keep_prob:drop_keep_prob}))\n",
    "    \n",
    "    return max_acc\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "生成一个训练step"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From <ipython-input-3-051543a25751>:81: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "\n",
      "Future major versions of TensorFlow will allow gradients to flow\n",
      "into the labels input on backprop by default.\n",
      "\n",
      "See tf.nn.softmax_cross_entropy_with_logits_v2.\n",
      "\n",
      "batch 0 acc is 0.1045\n",
      "替换准确率最高模型：0.1045\n",
      "batch 1000 acc is 0.9323\n",
      "替换准确率最高模型：0.9323\n"
     ]
    }
   ],
   "source": [
    "# 超参数调优--激活函数\n",
    "acc_array = {}\n",
    "best_j = 0\n",
    "best_acc = 0\n",
    "vals = [tf.nn.relu,tf.nn.sigmoid,tf.nn.tanh]\n",
    "for j in range(len(vals)):\n",
    "    val = vals[j]\n",
    "    accuraccy = train(conv1_sig = val,conv2_sig = val,batchSize = batch_global,trainingStep = step_global)\n",
    "    acc_array[j] = accuraccy\n",
    "    if accuraccy > best_acc:\n",
    "        best_acc = accuraccy\n",
    "        best_j = j\n",
    "    \n",
    "print(\"最好的是：\"+ str(vals[best_j]) + \", 准确率为：\" + str(best_acc))\n",
    "print(\"超参数调优--激活函数\")\n",
    "pyplot.plot(acc_array.keys(), acc_array.values(), 'b-')\n",
    "pyplot.show()\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 超参数调优--keep_drop\n",
    "acc_array = {}\n",
    "best_j = 0\n",
    "best_acc = 0\n",
    "vals = [0.1,0.5,1]\n",
    "for j in range(len(vals)):\n",
    "    val = vals[j]\n",
    "    accuraccy = train(drop_keep_prob = val,batchSize = batch_global,trainingStep = step_global)\n",
    "    acc_array[j] = accuraccy\n",
    "    if accuraccy > best_acc:\n",
    "        best_acc = accuraccy\n",
    "        best_j = j\n",
    "    \n",
    "print(\"最好的是：\"+ str(vals[best_j]) + \", 准确率为：\" + str(best_acc))\n",
    "print(\"超参数调优--keep_drop\")\n",
    "pyplot.plot(acc_array.keys(), acc_array.values(), 'b-')\n",
    "pyplot.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 超参数调优--l2正则\n",
    "acc_array = {}\n",
    "best_j = 0\n",
    "best_acc = 0\n",
    "vals = [1e-2,1e-4,1e-6]\n",
    "for j in range(len(vals)):\n",
    "    val = vals[j]\n",
    "    accuraccy = train(l2_rate = val,batchSize = batch_global,trainingStep = step_global)\n",
    "    acc_array[j] = accuraccy\n",
    "    if accuraccy > best_acc:\n",
    "        best_acc = accuraccy\n",
    "        best_j = j\n",
    "    \n",
    "print(\"最好的是：\"+ str(vals[best_j]) + \", 准确率为：\" + str(best_acc))\n",
    "print(\"超参数调优--l2正则\")\n",
    "pyplot.plot(acc_array.keys(), acc_array.values(), 'b-')\n",
    "pyplot.show()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 超参数调优--卷积核大小\n",
    "acc_array = {}\n",
    "best_j = 0\n",
    "best_acc = 0\n",
    "vals = [3,5,7]\n",
    "for j in range(len(vals)):\n",
    "    val = vals[j]\n",
    "    accuraccy = train(conv1_kernel_size = val,batchSize = batch_global,trainingStep = step_global)\n",
    "    acc_array[j] = accuraccy\n",
    "    if accuraccy > best_acc:\n",
    "        best_acc = accuraccy\n",
    "        best_j = j\n",
    "    \n",
    "print(\"最好的是：\"+ str(vals[best_j]) + \", 准确率为：\" + str(best_acc))\n",
    "print(\"超参数调优--卷积核大小\")\n",
    "pyplot.plot(acc_array.keys(), acc_array.values(), 'b-')\n",
    "pyplot.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 超参数调优--池化核大小\n",
    "acc_array = {}\n",
    "best_j = 0\n",
    "best_acc = 0\n",
    "vals = [2,3,4]\n",
    "for j in range(len(vals)):\n",
    "    val = vals[j]\n",
    "    accuraccy = train(fc1_kernel_size = val,batchSize = batch_global,trainingStep = step_global)\n",
    "    acc_array[j] = accuraccy\n",
    "    if accuraccy > best_acc:\n",
    "        best_acc = accuraccy\n",
    "        best_j = j\n",
    "    \n",
    "print(\"最好的是：\"+ str(vals[best_j]) + \", 准确率为：\" + str(best_acc))\n",
    "print(\"超参数调优--池化核大小\")\n",
    "pyplot.plot(acc_array.keys(), acc_array.values(), 'b-')\n",
    "pyplot.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 超参数调优--步长基础\n",
    "acc_array = {}\n",
    "best_j = 0\n",
    "best_acc = 0\n",
    "vals = [0.1,0.3,0.5]\n",
    "for j in range(len(vals)):\n",
    "    val = vals[j]\n",
    "    accuraccy = train(Learning_rate_base = val,batchSize = batch_global,trainingStep = step_global)\n",
    "    acc_array[j] = accuraccy\n",
    "    if accuraccy > best_acc:\n",
    "        best_acc = accuraccy\n",
    "        best_j = j\n",
    "    \n",
    "print(\"最好的是：\"+ str(vals[best_j]) + \", 准确率为：\" + str(best_acc))\n",
    "print(\"超参数调优--步长基础\")\n",
    "pyplot.plot(acc_array.keys(), acc_array.values(), 'b-')\n",
    "pyplot.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "毫无疑问，这个模型是一个非常简陋，性能也不理想的模型。目前只能达到92%左右的准确率。\n",
    "接下来，希望大家利用现有的知识，将这个模型优化至98%以上的准确率。\n",
    "Hint：\n",
    "- 卷积\n",
    "- 池化\n",
    "- 激活函数\n",
    "- 正则化\n",
    "- 初始化\n",
    "- 摸索一下各个超参数\n",
    "  - 卷积kernel size\n",
    "  - 卷积kernel 数量\n",
    "  - 学习率\n",
    "  - 正则化惩罚因子\n",
    "  - 最好每隔几个step就对loss、accuracy等等进行一次输出，这样才能有根据地进行调整"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
