{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# HW7_2 Mnist by cnn"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 60,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'channels_last'"
      ]
     },
     "execution_count": 60,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import argparse\n",
    "import sys\n",
    "import tensorflow as tf\n",
    "\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "\n",
    "from keras.layers.core import Dense, Flatten\n",
    "from keras.layers.convolutional import Conv2D\n",
    "from keras.layers.pooling import MaxPooling2D\n",
    "from keras.initializers import TruncatedNormal,Constant #权重初始化参数\n",
    "from keras.regularizers import l2 #l2正则\n",
    "from keras.layers.normalization import BatchNormalization\n",
    "\n",
    "from keras import backend as K\n",
    "\n",
    "K.image_data_format()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 61,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting input_data\\train-images-idx3-ubyte.gz\n",
      "Extracting input_data\\train-labels-idx1-ubyte.gz\n",
      "Extracting input_data\\t10k-images-idx3-ubyte.gz\n",
      "Extracting input_data\\t10k-labels-idx1-ubyte.gz\n"
     ]
    }
   ],
   "source": [
    "data_dir = 'input_data'\n",
    "mnist = input_data.read_data_sets(data_dir, one_hot=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 62,
   "metadata": {},
   "outputs": [],
   "source": [
    "#设置\n",
    "X = tf.placeholder(tf.float32, [None, 784],name='X')\n",
    "y_ = tf.placeholder(tf.float32, [None, 10],name='y_')\n",
    "\n",
    "\n",
    "#将数据化为矩阵\n",
    "with tf.name_scope('reshape'):\n",
    "    x_image = tf.reshape(X, [-1, 28, 28, 1])\n",
    "\n",
    "#卷积 初始化 卷积核参数kernel_initializer 并且对权重添l2正则 对filter卷积核个数以及卷积核大小kernel_size进行调整，\n",
    "#为啥要对这些进行描述，感觉好怪 -_-   ：(\n",
    "conv_1 = Conv2D(filters=48, kernel_size=[3,3],kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.05, seed=123), \n",
    "                strides=[1,1],activation='relu',kernel_regularizer=l2(0.0001),\n",
    "                 padding='same',input_shape=[28,28,1])(x_image)\n",
    "\n",
    "#在卷积后添加BatchNormalization\n",
    "bat_1=BatchNormalization()(conv_1)\n",
    "#keras.layers.normalization.BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', \n",
    "#gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, \n",
    "#beta_constraint=None, gamma_constraint=None)\n",
    "\n",
    "#池化\n",
    "pool_1 = MaxPooling2D(pool_size=[2,2])(bat_1)\n",
    "\n",
    "#卷积\n",
    "conv_2 = Conv2D(filters=100, kernel_size=[5,5], kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.05, seed=456),\n",
    "                strides=[1,1],activation='relu',kernel_regularizer=l2(0.0001),\n",
    "                padding='same')(pool_1)\n",
    "\n",
    "bat_2=BatchNormalization()(conv_2)\n",
    "#池化\n",
    "pool_2 = MaxPooling2D(pool_size=[2,2])(bat_2)\n",
    "\n",
    "#压平 .....\n",
    "flat = Flatten()(pool_2)\n",
    "\n",
    "#对全连接层同样初始化权重以及添加正则,以及初始化bias\n",
    "fc = Dense(800, activation='relu',kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.1, seed=111),\n",
    "          kernel_regularizer=l2(0.0001),bias_initializer=Constant(value=0.1))(flat)\n",
    "\n",
    "bat_3=BatchNormalization()(fc)\n",
    "\n",
    "y = Dense(10,activation='softmax',kernel_initializer=TruncatedNormal(mean=0.0, stddev=0.1, seed=222),\n",
    "         kernel_regularizer=l2(0.0001),bias_initializer=Constant(value=0.1))(fc)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 63,
   "metadata": {},
   "outputs": [],
   "source": [
    "from keras.objectives import categorical_crossentropy\n",
    "cross_entropy = tf.reduce_mean(categorical_crossentropy(y_, y))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 64,
   "metadata": {},
   "outputs": [],
   "source": [
    "#l2 正则 评分标准说的正则因子应该就是这个吧\n",
    "#l2_loss = tf.add_n( [tf.nn.l2_loss(w) for w in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)] )\n",
    "\n",
    "#整体损失\n",
    "total_loss = cross_entropy \n",
    "\n",
    "#设置学习率参数，在循环中进行更新，每大循环一次，更新一次学习率\n",
    "learn_rate=tf.Variable(0.001, dtype=tf.float32)\n",
    "train_step = tf.train.AdamOptimizer(learn_rate).minimize(total_loss)\n",
    "\n",
    "sess = tf.Session()\n",
    "\n",
    "K.set_session(sess)\n",
    "\n",
    "init_op = tf.global_variables_initializer()\n",
    "sess.run(init_op)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 65,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第0次迭代的accuracy--0.9686999917030334 leanr_rate-- 0.001\n",
      "第100次迭代的accuracy--0.9807999730110168 leanr_rate-- 0.00095\n",
      "第200次迭代的accuracy--0.9836000204086304 leanr_rate-- 0.0009025\n",
      "第300次迭代的accuracy--0.9876000285148621 leanr_rate-- 0.000857375\n",
      "第400次迭代的accuracy--0.9883999824523926 leanr_rate-- 0.00081450626\n",
      "第500次迭代的accuracy--0.9896000027656555 leanr_rate-- 0.0007737809\n",
      "第600次迭代的accuracy--0.9876000285148621 leanr_rate-- 0.0007350919\n",
      "第700次迭代的accuracy--0.9868000149726868 leanr_rate-- 0.0006983373\n",
      "第800次迭代的accuracy--0.9857000112533569 leanr_rate-- 0.0006634204\n",
      "第900次迭代的accuracy--0.9886000156402588 leanr_rate-- 0.0006302494\n",
      "第1000次迭代的accuracy--0.9907000064849854 leanr_rate-- 0.0005987369\n",
      "第1100次迭代的accuracy--0.991100013256073 leanr_rate-- 0.0005688001\n",
      "第1200次迭代的accuracy--0.9908999800682068 leanr_rate-- 0.0005403601\n",
      "第1300次迭代的accuracy--0.991100013256073 leanr_rate-- 0.0005133421\n",
      "第1400次迭代的accuracy--0.9907000064849854 leanr_rate-- 0.000487675\n",
      "第1500次迭代的accuracy--0.9908999800682068 leanr_rate-- 0.00046329122\n",
      "第1600次迭代的accuracy--0.9914000034332275 leanr_rate-- 0.00044012666\n",
      "第1700次迭代的accuracy--0.9919000267982483 leanr_rate-- 0.00041812033\n",
      "第1800次迭代的accuracy--0.9908000230789185 leanr_rate-- 0.00039721432\n",
      "第1900次迭代的accuracy--0.9921000003814697 leanr_rate-- 0.0003773536\n"
     ]
    }
   ],
   "source": [
    "for n in range(20):\n",
    "    sess.run(tf.assign(learn_rate, 0.001 * (0.95 ** n)))#将learn_rate进行变化\n",
    "    for i in range(100):\n",
    "        batch_xs, batch_ys = mnist.train.next_batch(100)\n",
    "        sess.run(train_step, feed_dict={X: batch_xs, y_: batch_ys })\n",
    "    \n",
    "    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n",
    "    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "    print(\"第{}次迭代的accuracy--{}\".format(n*100,sess.run(accuracy, feed_dict={X: mnist.test.images,y_: mnist.test.labels})),\n",
    "                 \"leanr_rate--\",sess.run(learn_rate))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "不是说添加了BatchNormalization层后能加速计算么.... 加了之后好像算的更慢了...不知道是不是我使用的有问题，如果能多迭代两千步，可能准确率还能更高，因为现在学习率变化才降到0.0003，感觉想要达到0.995的正确率，还要再考虑drop等"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
