{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\Anaconda\\lib\\site-packages\\h5py\\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n",
      "  from ._conv import register_converters as _register_converters\n",
      "Using TensorFlow backend.\n"
     ]
    }
   ],
   "source": [
    "\"\"\"A very simple MNIST classifier.\n",
    "See extensive documentation at\n",
    "https://www.tensorflow.org/get_started/mnist/beginners\n",
    "\"\"\"\n",
    "from __future__ import absolute_import\n",
    "from __future__ import division\n",
    "from __future__ import print_function\n",
    "\n",
    "import argparse\n",
    "import sys\n",
    "import tensorflow as tf\n",
    "\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "\n",
    "\n",
    "\n",
    "from keras.layers.core import Dense, Flatten,Dropout\n",
    "from keras.layers.convolutional import Conv2D\n",
    "from keras.layers.pooling import MaxPooling2D\n",
    "\n",
    "from keras import backend as K\n",
    "\n",
    "K.image_data_format() \n",
    "\n",
    "K.set_learning_phase(1) #set learning phase"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 导入数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Successfully downloaded train-images-idx3-ubyte.gz 9912422 bytes.\n",
      "Extracting ../第七周\\train-images-idx3-ubyte.gz\n",
      "Successfully downloaded train-labels-idx1-ubyte.gz 28881 bytes.\n",
      "Extracting ../第七周\\train-labels-idx1-ubyte.gz\n",
      "Successfully downloaded t10k-images-idx3-ubyte.gz 1648877 bytes.\n",
      "Extracting ../第七周\\t10k-images-idx3-ubyte.gz\n",
      "Successfully downloaded t10k-labels-idx1-ubyte.gz 4542 bytes.\n",
      "Extracting ../第七周\\t10k-labels-idx1-ubyte.gz\n"
     ]
    }
   ],
   "source": [
    "# Import data\n",
    "data_dir = '../第七周'\n",
    "mnist = input_data.read_data_sets(data_dir, one_hot=True)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 输入输出占位符"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Create the model\n",
    "x = tf.placeholder(tf.float32, [None, 784])\n",
    "y_ = tf.placeholder(tf.float32, [None, 10])\n",
    "x_image = tf.reshape(x, [-1, 28, 28, 1])  #转换输入数据shape,以便于用于网络中"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 定义函数，根据参数构造卷积神经网络。用于摸索不同的参数获得最佳的性能"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "#定义一个函数，用于构建网络(此处默认为二层卷积):\n",
    "def construct_net(input_net,\n",
    "                  kernel_num1,kernel_num2,\n",
    "                  kernel_size1,kernel_size2,\n",
    "                  strides,padding,input_shape,\n",
    "                  pool_size1,pool_size2,\n",
    "                  kernel_initializer=None,activity_regularizer=None,\n",
    "                  dropout_rate=0.5,\n",
    "                  activation=\"relu\"):\n",
    "    \n",
    "    net = Conv2D(kernel_num1,kernel_size1,strides=strides,activation=activation,padding=padding,\n",
    "                 kernel_initializer=kernel_initializer ,activity_regularizer=activity_regularizer,\n",
    "                 input_shape=input_shape)(input_net)  #卷积层1,必须声明输入维度\n",
    "    net = MaxPooling2D(pool_size1)(net)               #池化层1\n",
    "    \n",
    "    net = Conv2D(kernel_num2,kernel_size2,strides=strides,activation=activation,padding=padding,\n",
    "             kernel_initializer=kernel_initializer ,activity_regularizer=activity_regularizer)(net)  #卷积层2\n",
    "    net = MaxPooling2D(pool_size2)(net)              #池化层2\n",
    "    \n",
    "    net=Dropout(dropout_rate)(net)                   #dropput层,默认比例0.5\n",
    "    net = Flatten()(net)                            #压平输出维度。即卷积层与全连接层的过渡层\n",
    "    net = Dense(1024, activation=activation)(net)  #全连接层\n",
    "    net = Dense(10,activation='softmax')(net)  #输出数据，最后的激活函数必须为softmax\n",
    "    \n",
    "    return net\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 1.初步构建网络，训练数据，摸索各项参数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "from keras.objectives import categorical_crossentropy\n",
    "from keras import initializers\n",
    "from keras import regularizers\n",
    "\n",
    "\n",
    "\n",
    "net = construct_net(x_image,\n",
    "                  kernel_num1 = 48,kernel_num2 = 48,                                     #卷积核数目,第一层48，第二层48\n",
    "                  kernel_size1 = [3,3],kernel_size2 = [3,3],                             #卷积核size,第一层[3,3],第二层[3*3]\n",
    "                  strides = [1,1],padding = \"SAME\",input_shape=[28,28,1],                #步长1滑动，padding=\"SAME\"，初始输入维度为[28,28,1]\n",
    "                  pool_size1 = [2,2],pool_size2 = [2,2],                                 #池化层size，第一层[2,2],第二层[2,2]\n",
    "                  kernel_initializer = initializers.random_normal(stddev=0.01),          #权重初始化为正态分布，标准差0.01                       \n",
    "                  activity_regularizer = regularizers.l1_l2(0.01),                       #施加在输出上的正则项，L1+L2正则，lambda=0.01     \n",
    "                  dropout_rate=0.5,activation = \"relu\")                                  #dropout0.5 激活函数Relu\n",
    "\n",
    "loss = tf.reduce_mean(categorical_crossentropy(y_, net))\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 设置衰减的学习器"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "sess = tf.Session()\n",
    "K.set_session(sess)\n",
    "\n",
    "global_step = tf.Variable(0)        # 学习步数\n",
    "learning_rate = tf.train.exponential_decay(0.01, global_step, 100, 0.99, staircase=True)     #生成指数衰减的学习率  初始学习率值0.01，每100步按0.99速度衰减\n",
    "train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)   #构建一个优化器(梯度下降)，降为最小的目标为损失函数loss\n",
    "\n",
    "init_op = tf.global_variables_initializer()\n",
    "sess.run(init_op)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "step 100,learing_rate [0.0099],loss 2.2998626232147217\n",
      "test_score:  0.1139\n",
      "step 200,learing_rate [0.009801],loss 2.294757604598999\n",
      "test_score:  0.1135\n",
      "step 300,learing_rate [0.00970299],loss 2.297482490539551\n",
      "test_score:  0.1138\n",
      "step 400,learing_rate [0.00960596],loss 2.2882080078125\n",
      "test_score:  0.1229\n",
      "step 500,learing_rate [0.009509901],loss 2.286931037902832\n",
      "test_score:  0.1881\n",
      "step 600,learing_rate [0.009414802],loss 2.2644472122192383\n",
      "test_score:  0.3634\n",
      "step 700,learing_rate [0.009320654],loss 2.207782030105591\n",
      "test_score:  0.4206\n",
      "step 800,learing_rate [0.009227447],loss 1.9282559156417847\n",
      "test_score:  0.5497\n",
      "step 900,learing_rate [0.009135174],loss 0.9739226698875427\n",
      "test_score:  0.6774\n",
      "step 1000,learing_rate [0.009043821],loss 0.7131621837615967\n",
      "test_score:  0.7595\n"
     ]
    }
   ],
   "source": [
    "# Train\n",
    "for step in range(1000):\n",
    "    \n",
    "    batch_xs, batch_ys = mnist.train.next_batch(100)\n",
    "    _, loss_value= sess.run( [train_step, loss], feed_dict={x: batch_xs, y_: batch_ys})\n",
    "  \n",
    "    if (step+1) % 100 == 0:\n",
    "        learning_rate_value=sess.run([learning_rate])\n",
    "        print(\"step {},learing_rate {},loss {}\".format(step+1,learning_rate_value,loss_value))\n",
    "        correct_prediction = tf.equal(tf.argmax(net, 1), tf.argmax(y_, 1))\n",
    "        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "        test_score=sess.run(accuracy, feed_dict={x: mnist.test.images,y_: mnist.test.labels})\n",
    "\n",
    "        print(\"test_score: \",test_score)    \n",
    "    "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 1000次梯度下降,最终测试集正确率76%。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 2.重新调整一下参数试试"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 调整卷积核数目与size，droupout比例"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [],
   "source": [
    "net = construct_net(x_image,\n",
    "                  kernel_num1 = 32,kernel_num2 = 64,                                     #卷积核数目,第一层32，第二层64\n",
    "                  kernel_size1 = [5,5],kernel_size2 = [5,5],                             #卷积核数目,第一层[5,5]，第二层[5,5]\n",
    "                  strides = [1,1],padding = \"SAME\",input_shape=[28,28,1],                #不变\n",
    "                  pool_size1 = [2,2],pool_size2 = [2,2],                                 #不变\n",
    "                  kernel_initializer = initializers.random_normal(stddev=0.01),          #不变                     \n",
    "                  activity_regularizer = regularizers.l1_l2(0.001),                      #正则项改为L1+L2正则，lambda=0.001     \n",
    "                  dropout_rate=0.25,activation = \"relu\")                                  #0.25\n",
    "loss = tf.reduce_mean(categorical_crossentropy(y_, net))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 重置学习率"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [],
   "source": [
    "global_step = tf.Variable(0)        \n",
    "learning_rate = tf.train.exponential_decay(0.0095, global_step, 100, 0.99, staircase=True)         \n",
    "train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)   \n",
    "\n",
    "init_op = tf.global_variables_initializer()\n",
    "sess.run(init_op)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "step 100,learing_rate [0.009405],loss 2.297111749649048\n",
      "test_score:  0.224\n",
      "step 200,learing_rate [0.00931095],loss 2.2832939624786377\n",
      "test_score:  0.3027\n",
      "step 300,learing_rate [0.009217841],loss 2.2167980670928955\n",
      "test_score:  0.3728\n",
      "step 400,learing_rate [0.009125662],loss 1.6953243017196655\n",
      "test_score:  0.5665\n",
      "step 500,learing_rate [0.009034405],loss 1.0394383668899536\n",
      "test_score:  0.7647\n",
      "step 600,learing_rate [0.008944062],loss 0.5433394908905029\n",
      "test_score:  0.828\n",
      "step 700,learing_rate [0.008854621],loss 0.4131075143814087\n",
      "test_score:  0.8544\n",
      "step 800,learing_rate [0.008766075],loss 0.5726116299629211\n",
      "test_score:  0.8728\n",
      "step 900,learing_rate [0.008678415],loss 0.4520035684108734\n",
      "test_score:  0.8794\n",
      "step 1000,learing_rate [0.0085916305],loss 0.2685607969760895\n",
      "test_score:  0.895\n"
     ]
    }
   ],
   "source": [
    "# Train\n",
    "for step in range(1000):\n",
    "    \n",
    "    batch_xs, batch_ys = mnist.train.next_batch(100)\n",
    "    _, loss_value= sess.run( [train_step, loss], feed_dict={x: batch_xs, y_: batch_ys})\n",
    "  \n",
    "    if (step+1) % 100 == 0:\n",
    "        learning_rate_value=sess.run([learning_rate])\n",
    "        print(\"step {},learing_rate {},loss {}\".format(step+1,learning_rate_value,loss_value))\n",
    "        correct_prediction = tf.equal(tf.argmax(net, 1), tf.argmax(y_, 1))\n",
    "        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "        test_score=sess.run(accuracy, feed_dict={x: mnist.test.images,y_: mnist.test.labels})\n",
    "\n",
    "        print(\"test_score: \",test_score)\n",
    "    "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 正确率提高不少,训练集最高89%正确率"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 3.进一步优化，减小学习率，加大搜索深度"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [],
   "source": [
    "net = construct_net(x_image,\n",
    "                  kernel_num1 = 32,kernel_num2 = 64,                                     #卷积核数目,第一层32，第二层64\n",
    "                  kernel_size1 = [5,5],kernel_size2 = [5,5],                             #卷积核数目,第一层[5,5]，第二层[5,5]\n",
    "                  strides = [1,1],padding = \"SAME\",input_shape=[28,28,1],                #不变\n",
    "                  pool_size1 = [2,2],pool_size2 = [2,2],                                 #不变\n",
    "                  kernel_initializer = initializers.random_normal(stddev=0.01),          #不变                      \n",
    "                  activity_regularizer = regularizers.l1_l2(0.001),                       #不变    \n",
    "                  dropout_rate=0.25,activation = \"relu\")                                  #0.25\n",
    "loss = tf.reduce_mean(categorical_crossentropy(y_, net))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [],
   "source": [
    "global_step = tf.Variable(0)        #学习步数\n",
    "learning_rate = tf.train.exponential_decay(0.092, global_step, 100, 0.98, staircase=True)         #初始学习率微调为0.0092\n",
    "train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)   #构建一个优化器(梯度下降)，降为最小的目标为损失函数loss\n",
    "\n",
    "init_op = tf.global_variables_initializer()\n",
    "sess.run(init_op)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "step 100,learing_rate [0.090160005],loss 0.3140428960323334\n",
      "test_score:  0.8903\n",
      "step 200,learing_rate [0.08835681],loss 0.2273411899805069\n",
      "test_score:  0.9418\n",
      "step 300,learing_rate [0.086589664],loss 0.15667539834976196\n",
      "test_score:  0.9534\n",
      "step 400,learing_rate [0.08485787],loss 0.14337068796157837\n",
      "test_score:  0.9592\n",
      "step 500,learing_rate [0.08316072],loss 0.05901096761226654\n",
      "test_score:  0.9666\n",
      "step 600,learing_rate [0.08149751],loss 0.06775205582380295\n",
      "test_score:  0.9719\n",
      "step 700,learing_rate [0.079867564],loss 0.07285448163747787\n",
      "test_score:  0.9746\n",
      "step 800,learing_rate [0.07827021],loss 0.083175428211689\n",
      "test_score:  0.9763\n",
      "step 900,learing_rate [0.07670481],loss 0.0533035546541214\n",
      "test_score:  0.9788\n",
      "step 1000,learing_rate [0.07517072],loss 0.0910586565732956\n",
      "test_score:  0.9727\n",
      "step 1100,learing_rate [0.0736673],loss 0.018551798537373543\n",
      "test_score:  0.9822\n",
      "step 1200,learing_rate [0.07219396],loss 0.10410640388727188\n",
      "test_score:  0.977\n",
      "step 1300,learing_rate [0.07075008],loss 0.09902777522802353\n",
      "test_score:  0.9804\n",
      "step 1400,learing_rate [0.06933508],loss 0.08400369435548782\n",
      "test_score:  0.9801\n",
      "step 1500,learing_rate [0.06794838],loss 0.08805850893259048\n",
      "test_score:  0.9813\n",
      "step 1600,learing_rate [0.06658941],loss 0.02851034887135029\n",
      "test_score:  0.9832\n",
      "step 1700,learing_rate [0.065257624],loss 0.02258117124438286\n",
      "test_score:  0.9833\n",
      "step 1800,learing_rate [0.063952476],loss 0.025362933054566383\n",
      "test_score:  0.9841\n",
      "step 1900,learing_rate [0.06267343],loss 0.04350505769252777\n",
      "test_score:  0.9846\n",
      "step 2000,learing_rate [0.06141996],loss 0.02214573137462139\n",
      "test_score:  0.9846\n",
      "step 2100,learing_rate [0.06019156],loss 0.01712772808969021\n",
      "test_score:  0.9845\n",
      "step 2200,learing_rate [0.05898773],loss 0.02260717935860157\n",
      "test_score:  0.9844\n",
      "step 2300,learing_rate [0.05780798],loss 0.0811452567577362\n",
      "test_score:  0.9851\n",
      "step 2400,learing_rate [0.05665182],loss 0.053012434393167496\n",
      "test_score:  0.9855\n",
      "step 2500,learing_rate [0.055518784],loss 0.011694085784256458\n",
      "test_score:  0.9847\n",
      "step 2600,learing_rate [0.05440841],loss 0.03203009441494942\n",
      "test_score:  0.986\n",
      "step 2700,learing_rate [0.053320244],loss 0.022672057151794434\n",
      "test_score:  0.9867\n",
      "step 2800,learing_rate [0.052253835],loss 0.020514504984021187\n",
      "test_score:  0.9862\n",
      "step 2900,learing_rate [0.05120876],loss 0.013482170179486275\n",
      "test_score:  0.986\n",
      "step 3000,learing_rate [0.05018459],loss 0.009610479697585106\n",
      "test_score:  0.9869\n"
     ]
    }
   ],
   "source": [
    "# Train\n",
    "for step in range(3000):\n",
    "    \n",
    "    batch_xs, batch_ys = mnist.train.next_batch(100)\n",
    "    _, loss_value= sess.run( [train_step, loss], feed_dict={x: batch_xs, y_: batch_ys})\n",
    "  \n",
    "    if (step+1) % 100 == 0:\n",
    "        learning_rate_value=sess.run([learning_rate])\n",
    "        print(\"step {},learing_rate {},loss {}\".format(step+1,learning_rate_value,loss_value))\n",
    "        correct_prediction = tf.equal(tf.argmax(net, 1), tf.argmax(y_, 1))\n",
    "        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "\n",
    "        test_score=sess.run(accuracy, feed_dict={x: mnist.test.images,y_: mnist.test.labels})\n",
    "        print(\"test_score: \",test_score)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 优化在第1100步时就已达到98%成功率，3000后成功率为98.69%,且还未收敛。如若继续加大学习次数，应该可以达到99%。最终结果："
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 卷积：双层卷积，卷积核:第一层32,size[5,5],第二层64，size[5,5]\n",
    "#### 池化:  maxpooling最大区域池化，size均为[5,5]\n",
    "#### 激活函数: relu,输出层为softmax\n",
    "#### dropout: keep_rate 为0.25\n",
    "#### 正则化: 施加在每层卷积的输出上的正则化，L1+L2正则，参数lambda=0.001\n",
    "#### 权重初始化: 采用均值0，标准差0.01的正态分布初始化\n",
    "#### 学习率：逐步缩减的学习器。初始化学习率为0.092，每100步以0.98速率缩减，最终3000次后学习率为0.05,训练集的loss为0.0096"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
