{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 导入需要的包和数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/guanyu/anaconda3/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n",
      "  from ._conv import register_converters as _register_converters\n",
      "Using TensorFlow backend.\n"
     ]
    }
   ],
   "source": [
    "import tensorflow as tf  \n",
    "# from tensorflow.examples.tutorials.mnist import input_data  \n",
    "import argparse\n",
    "import sys  \n",
    "import numpy\n",
    "from keras.datasets import mnist\n",
    "from keras.models import Sequential\n",
    "from keras.layers import Dense\n",
    "from keras.layers import Dropout\n",
    "from keras.layers import Flatten\n",
    "from keras.layers.convolutional import Conv2D\n",
    "from keras.layers.convolutional import MaxPooling2D\n",
    "from keras.utils import np_utils\n",
    "from keras import backend as K\n",
    "K.set_image_dim_ordering('th')\n",
    "import matplotlib.pyplot as plt\n",
    "from keras import regularizers\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 载入数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "# load data\n",
    "(X_train, y_train), (X_test, y_test) = mnist.load_data()\n",
    "# reshape to be [samples][pixels][width][height]\n",
    "X_train = X_train.reshape(X_train.shape[0], 1, 28, 28).astype('float32')\n",
    "X_test = X_test.reshape(X_test.shape[0], 1, 28, 28).astype('float32')\n",
    "\n",
    "X_train = X_train / 255\n",
    "X_test = X_test / 255\n",
    "# one hot encode outputs\n",
    "y_train = np_utils.to_categorical(y_train)\n",
    "y_test = np_utils.to_categorical(y_test)\n",
    "num_classes = y_test.shape[1]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 设定随机种子"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "seed = 7\n",
    "numpy.random.seed(seed)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 构造CNN\n",
    "## 这里先构造一个模型，使用两层卷积，随意设置kernal的规模，数量；初始化权重使用random_uniform，正则使用L2，学习率使用adam的默认参数，之后会进行调优。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 使用两层卷积结构\n",
    "def baseline_model():\n",
    "    # create model\n",
    "    model = Sequential()\n",
    "    model.add(Conv2D(32, (5, 5),kernel_initializer='random_uniform',padding='valid',kernel_regularizer=regularizers.l2(0.01),input_shape=(1, 28, 28), activation='relu'))\n",
    "    model.add(MaxPooling2D(pool_size=(2, 2)))\n",
    "    model.add(Conv2D(15,(3,3),kernel_initializer='random_uniform',activation='relu'))  \n",
    "    model.add(MaxPooling2D(pool_size=(2,2)))  \n",
    "    model.add(Dropout(0.2))\n",
    "    model.add(Flatten())\n",
    "    model.add(Dense(128,activation='relu'))\n",
    "    model.add(Dense(64,activation='relu'))\n",
    "    model.add(Dense(num_classes, activation='softmax'))\n",
    "    # Compile model\n",
    "    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n",
    "    return model"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 训练模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train on 60000 samples, validate on 10000 samples\n",
      "Epoch 1/10\n",
      " - 245s - loss: 0.4364 - acc: 0.8663 - val_loss: 0.1162 - val_acc: 0.9709\n",
      "Epoch 2/10\n",
      " - 253s - loss: 0.1277 - acc: 0.9666 - val_loss: 0.0765 - val_acc: 0.9806\n",
      "Epoch 3/10\n",
      " - 249s - loss: 0.0962 - acc: 0.9748 - val_loss: 0.0554 - val_acc: 0.9872\n",
      "Epoch 4/10\n",
      " - 249s - loss: 0.0799 - acc: 0.9787 - val_loss: 0.0515 - val_acc: 0.9863\n",
      "Epoch 5/10\n",
      " - 249s - loss: 0.0698 - acc: 0.9819 - val_loss: 0.0439 - val_acc: 0.9888\n",
      "Epoch 6/10\n",
      " - 249s - loss: 0.0602 - acc: 0.9837 - val_loss: 0.0382 - val_acc: 0.9897\n",
      "Epoch 7/10\n",
      " - 250s - loss: 0.0560 - acc: 0.9849 - val_loss: 0.0411 - val_acc: 0.9887\n",
      "Epoch 8/10\n",
      " - 248s - loss: 0.0502 - acc: 0.9861 - val_loss: 0.0357 - val_acc: 0.9916\n",
      "Epoch 9/10\n",
      " - 249s - loss: 0.0492 - acc: 0.9865 - val_loss: 0.0340 - val_acc: 0.9906\n",
      "Epoch 10/10\n",
      " - 250s - loss: 0.0446 - acc: 0.9880 - val_loss: 0.0314 - val_acc: 0.9924\n"
     ]
    }
   ],
   "source": [
    "# build the model\n",
    "model = baseline_model()\n",
    "# Fit the model\n",
    "model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=200, verbose=2)\n",
    "# Final evaluation of the model\n",
    "scores = model.evaluate(X_test, y_test, verbose=0)\n",
    "# print(\"Baseline Error: %.2f%%\" % (100-scores[1]*100))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 以上是初步尝试使用Keras，可看出正确率达到了98.80%，之前使用单层卷积，正确率达到了99.55%，所以下面去掉一层卷积"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "def baseline_model_1():\n",
    "    # create model\n",
    "    model = Sequential()\n",
    "    model.add(Conv2D(32, (5, 5),kernel_initializer='random_uniform',padding='valid',kernel_regularizer=regularizers.l2(0.01),input_shape=(1, 28, 28), activation='relu'))\n",
    "    model.add(MaxPooling2D(pool_size=(2, 2))) \n",
    "    model.add(Dropout(0.2))\n",
    "    model.add(Flatten())\n",
    "    model.add(Dense(128,activation='relu'))\n",
    "    model.add(Dense(num_classes, activation='softmax'))\n",
    "    # Compile model\n",
    "    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n",
    "    return model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train on 60000 samples, validate on 10000 samples\n",
      "Epoch 1/10\n",
      " - 237s - loss: 0.2762 - acc: 0.9271 - val_loss: 0.1025 - val_acc: 0.9765\n",
      "Epoch 2/10\n",
      " - 240s - loss: 0.1005 - acc: 0.9765 - val_loss: 0.0800 - val_acc: 0.9810\n",
      "Epoch 3/10\n",
      " - 240s - loss: 0.0798 - acc: 0.9820 - val_loss: 0.0630 - val_acc: 0.9853\n",
      "Epoch 4/10\n",
      " - 278s - loss: 0.0669 - acc: 0.9850 - val_loss: 0.0574 - val_acc: 0.9867\n",
      "Epoch 5/10\n",
      " - 320s - loss: 0.0584 - acc: 0.9872 - val_loss: 0.0529 - val_acc: 0.9871\n",
      "Epoch 6/10\n",
      " - 301s - loss: 0.0534 - acc: 0.9880 - val_loss: 0.0517 - val_acc: 0.9875\n",
      "Epoch 7/10\n",
      " - 309s - loss: 0.0478 - acc: 0.9896 - val_loss: 0.0482 - val_acc: 0.9879\n",
      "Epoch 8/10\n",
      " - 279s - loss: 0.0439 - acc: 0.9908 - val_loss: 0.0450 - val_acc: 0.9888\n",
      "Epoch 9/10\n",
      " - 305s - loss: 0.0415 - acc: 0.9904 - val_loss: 0.0454 - val_acc: 0.9883\n",
      "Epoch 10/10\n",
      " - 280s - loss: 0.0369 - acc: 0.9922 - val_loss: 0.0434 - val_acc: 0.9892\n"
     ]
    }
   ],
   "source": [
    "# build the model\n",
    "model = baseline_model_1()\n",
    "# Fit the model\n",
    "model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=200, verbose=2)\n",
    "# Final evaluation of the model\n",
    "scores = model.evaluate(X_test, y_test, verbose=0)\n",
    "# print(\"Baseline Error: %.2f%%\" % (100-scores[1]*100))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 似乎都使用默认的参数，效果更好，\n",
    "## 参数调优：尝试扩大kernal的规模为20x20"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "def baseline_model_2():\n",
    "    # create model\n",
    "    model = Sequential()\n",
    "    model.add(Conv2D(32, (20, 20),kernel_initializer='random_uniform',padding='valid',kernel_regularizer=regularizers.l2(0.01),input_shape=(1, 28, 28), activation='relu'))\n",
    "    model.add(MaxPooling2D(pool_size=(2, 2))) \n",
    "    model.add(Dropout(0.2))\n",
    "    model.add(Flatten())\n",
    "    model.add(Dense(128,activation='relu'))\n",
    "    model.add(Dense(num_classes, activation='softmax'))\n",
    "    # Compile model\n",
    "    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n",
    "    return model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train on 60000 samples, validate on 10000 samples\n",
      "Epoch 1/10\n",
      " - 68s - loss: 0.4561 - acc: 0.8925 - val_loss: 0.2156 - val_acc: 0.9577\n",
      "Epoch 2/10\n",
      " - 69s - loss: 0.2049 - acc: 0.9564 - val_loss: 0.1447 - val_acc: 0.9731\n",
      "Epoch 3/10\n",
      " - 67s - loss: 0.1563 - acc: 0.9670 - val_loss: 0.1196 - val_acc: 0.9780\n",
      "Epoch 4/10\n",
      " - 78s - loss: 0.1365 - acc: 0.9711 - val_loss: 0.1153 - val_acc: 0.9772\n",
      "Epoch 5/10\n",
      " - 81s - loss: 0.1210 - acc: 0.9756 - val_loss: 0.1021 - val_acc: 0.9807\n",
      "Epoch 6/10\n",
      " - 81s - loss: 0.1128 - acc: 0.9771 - val_loss: 0.0877 - val_acc: 0.9840\n",
      "Epoch 7/10\n",
      " - 82s - loss: 0.1067 - acc: 0.9778 - val_loss: 0.0927 - val_acc: 0.9817\n",
      "Epoch 8/10\n",
      " - 74s - loss: 0.1033 - acc: 0.9796 - val_loss: 0.1003 - val_acc: 0.9798\n",
      "Epoch 9/10\n",
      " - 74s - loss: 0.0968 - acc: 0.9807 - val_loss: 0.0862 - val_acc: 0.9836\n",
      "Epoch 10/10\n",
      " - 73s - loss: 0.0954 - acc: 0.9807 - val_loss: 0.0774 - val_acc: 0.9853\n"
     ]
    }
   ],
   "source": [
    "# build the model\n",
    "model = baseline_model_2()\n",
    "# Fit the model\n",
    "model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=200, verbose=2)\n",
    "# Final evaluation of the model\n",
    "scores = model.evaluate(X_test, y_test, verbose=0)\n",
    "# print(\"Baseline Error: %.2f%%\" % (100-scores[1]*100))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 时间大致上减少到了原来的1/4，正确率下降了一些，如果增加epoch的话，应该可以增加到99%以上，之前试验过，如果使用更小的kernal，比如2x2，那么时间会增加，但是正确率并不会显著增加，因为手写识别的图片细节处并没有那么细微"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 增加kernal的数量为64，为节省时间，epoch减少为3，大致看看结果"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "def baseline_model_3():\n",
    "    # create model\n",
    "    model = Sequential()\n",
    "    model.add(Conv2D(64, (20, 20),kernel_initializer='random_uniform',padding='valid',kernel_regularizer=regularizers.l2(0.01),input_shape=(1, 28, 28), activation='relu'))\n",
    "    model.add(MaxPooling2D(pool_size=(2, 2))) \n",
    "    model.add(Dropout(0.2))\n",
    "    model.add(Flatten())\n",
    "    model.add(Dense(128,activation='relu'))\n",
    "    model.add(Dense(num_classes, activation='softmax'))\n",
    "    # Compile model\n",
    "    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n",
    "    return model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train on 60000 samples, validate on 10000 samples\n",
      "Epoch 1/3\n",
      " - 114s - loss: 0.4405 - acc: 0.9100 - val_loss: 0.2061 - val_acc: 0.9612\n",
      "Epoch 2/3\n",
      " - 103s - loss: 0.1913 - acc: 0.9624 - val_loss: 0.1400 - val_acc: 0.9743\n",
      "Epoch 3/3\n",
      " - 124s - loss: 0.1513 - acc: 0.9699 - val_loss: 0.1208 - val_acc: 0.9798\n"
     ]
    }
   ],
   "source": [
    "# build the model\n",
    "model = baseline_model_3()\n",
    "# Fit the model\n",
    "model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=3, batch_size=200, verbose=2)\n",
    "# Final evaluation of the model\n",
    "scores = model.evaluate(X_test, y_test, verbose=0)\n",
    "# print(\"Baseline Error: %.2f%%\" % (100-scores[1]*100))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 时间基本上增加了一倍，准确率有所上升，可以推测，适当减小kernal规模，适当增加kernal数量，应该可以增加准确率"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 3 更改初始化权重参数为lecun"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "def baseline_model_4():\n",
    "    # create model\n",
    "    model = Sequential()\n",
    "    model.add(Conv2D(64, (20, 20),kernel_initializer='lecun_normal',padding='valid',kernel_regularizer=regularizers.l2(0.01),input_shape=(1, 28, 28), activation='relu'))\n",
    "    model.add(MaxPooling2D(pool_size=(2, 2))) \n",
    "    model.add(Dropout(0.2))\n",
    "    model.add(Flatten())\n",
    "    model.add(Dense(128,activation='relu'))\n",
    "    model.add(Dense(num_classes, activation='softmax'))\n",
    "    # Compile model\n",
    "    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n",
    "    return model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train on 60000 samples, validate on 10000 samples\n",
      "Epoch 1/3\n",
      " - 109s - loss: 0.5484 - acc: 0.9045 - val_loss: 0.2308 - val_acc: 0.9594\n",
      "Epoch 2/3\n",
      " - 106s - loss: 0.2036 - acc: 0.9614 - val_loss: 0.1511 - val_acc: 0.9741\n",
      "Epoch 3/3\n",
      " - 109s - loss: 0.1546 - acc: 0.9696 - val_loss: 0.1263 - val_acc: 0.9763\n"
     ]
    }
   ],
   "source": [
    "# build the model\n",
    "model = baseline_model_4()\n",
    "# Fit the model\n",
    "model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=3, batch_size=200, verbose=2)\n",
    "# Final evaluation of the model\n",
    "scores = model.evaluate(X_test, y_test, verbose=0)\n",
    "# print(\"Baseline Error: %.2f%%\" % (100-scores[1]*100))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 感觉变化不大，试着在后面的dense层也增加初始化权重参数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [],
   "source": [
    "def baseline_model_5():\n",
    "    # create model\n",
    "    model = Sequential()\n",
    "    model.add(Conv2D(64, (20, 20),kernel_initializer='lecun_normal',padding='valid',kernel_regularizer=regularizers.l2(0.01),input_shape=(1, 28, 28), activation='relu'))\n",
    "    model.add(MaxPooling2D(pool_size=(2, 2))) \n",
    "    model.add(Dropout(0.2))\n",
    "    model.add(Flatten())\n",
    "    model.add(Dense(128,kernel_initializer='lecun_normal',activation='relu'))\n",
    "    model.add(Dense(num_classes, activation='softmax'))\n",
    "    # Compile model\n",
    "    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n",
    "    return model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train on 60000 samples, validate on 10000 samples\n",
      "Epoch 1/3\n",
      " - 99s - loss: 0.5281 - acc: 0.9073 - val_loss: 0.2205 - val_acc: 0.9625\n",
      "Epoch 2/3\n",
      " - 117s - loss: 0.1981 - acc: 0.9620 - val_loss: 0.1475 - val_acc: 0.9747\n",
      "Epoch 3/3\n",
      " - 117s - loss: 0.1519 - acc: 0.9707 - val_loss: 0.1225 - val_acc: 0.9789\n"
     ]
    }
   ],
   "source": [
    "# build the model\n",
    "model = baseline_model_5()\n",
    "# Fit the model\n",
    "model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=3, batch_size=200, verbose=2)\n",
    "# Final evaluation of the model\n",
    "scores = model.evaluate(X_test, y_test, verbose=0)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 感觉正确率有所上升"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 4 更改正则化因子"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [],
   "source": [
    "def baseline_model_6():\n",
    "    # create model\n",
    "    model = Sequential()\n",
    "    model.add(Conv2D(64, (20, 20),kernel_initializer='lecun_normal',padding='valid',kernel_regularizer=regularizers.l2(0.01),input_shape=(1, 28, 28), activation='relu'))\n",
    "    model.add(MaxPooling2D(pool_size=(2, 2))) \n",
    "    model.add(Dropout(0.2))\n",
    "    model.add(Flatten())\n",
    "    model.add(Dense(128,kernel_initializer='lecun_normal',kernel_regularizer=regularizers.l2(0.01),activation='relu'))\n",
    "    model.add(Dense(num_classes, activation='softmax'))\n",
    "    # Compile model\n",
    "    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n",
    "    return model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train on 60000 samples, validate on 10000 samples\n",
      "Epoch 1/3\n",
      " - 115s - loss: 0.8785 - acc: 0.9014 - val_loss: 0.4016 - val_acc: 0.9528\n",
      "Epoch 2/3\n",
      " - 128s - loss: 0.3783 - acc: 0.9497 - val_loss: 0.3228 - val_acc: 0.9627\n",
      "Epoch 3/3\n",
      " - 133s - loss: 0.3283 - acc: 0.9572 - val_loss: 0.2890 - val_acc: 0.9699\n"
     ]
    }
   ],
   "source": [
    "# build the model\n",
    "model = baseline_model_6()\n",
    "# Fit the model\n",
    "model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=3, batch_size=200, verbose=2)\n",
    "# Final evaluation of the model\n",
    "scores = model.evaluate(X_test, y_test, verbose=0)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 正确率下降了，说明正则化因子不应该用在那一层"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [],
   "source": [
    "def baseline_model_7():\n",
    "    # create model\n",
    "    model = Sequential()\n",
    "    model.add(Conv2D(64, (20, 20),kernel_initializer='lecun_normal',padding='valid',kernel_regularizer=regularizers.l2(0.01),input_shape=(1, 28, 28), activation='relu'))\n",
    "    model.add(MaxPooling2D(pool_size=(2, 2))) \n",
    "    model.add(Dropout(0.2))\n",
    "    model.add(Flatten())\n",
    "    model.add(Dense(128,kernel_initializer='lecun_normal',activation='relu'))\n",
    "    model.add(Dense(num_classes,kernel_regularizer=regularizers.l2(0.01),activation='softmax'))\n",
    "    # Compile model\n",
    "    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n",
    "    return model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train on 60000 samples, validate on 10000 samples\n",
      "Epoch 1/3\n",
      " - 106s - loss: 0.6870 - acc: 0.9064 - val_loss: 0.3597 - val_acc: 0.9574\n",
      "Epoch 2/3\n",
      " - 95s - loss: 0.3177 - acc: 0.9571 - val_loss: 0.2501 - val_acc: 0.9698\n",
      "Epoch 3/3\n",
      " - 95s - loss: 0.2534 - acc: 0.9649 - val_loss: 0.2110 - val_acc: 0.9736\n"
     ]
    }
   ],
   "source": [
    "# build the model\n",
    "model = baseline_model_7()\n",
    "# Fit the model\n",
    "model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=3, batch_size=200, verbose=2)\n",
    "# Final evaluation of the model\n",
    "scores = model.evaluate(X_test, y_test, verbose=0)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 正确率有所上升，但是不确定，因为本身结果不是一定的,需要更多的epoch来确定"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 5 调整学习率"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {},
   "outputs": [],
   "source": [
    "from keras.optimizers import Adam"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "metadata": {},
   "outputs": [],
   "source": [
    "def baseline_model_8():\n",
    "    # create model\n",
    "    model = Sequential()\n",
    "    model.add(Conv2D(64, (20, 20),kernel_initializer='lecun_normal',padding='valid',kernel_regularizer=regularizers.l2(0.01),input_shape=(1, 28, 28), activation='relu'))\n",
    "    model.add(MaxPooling2D(pool_size=(2, 2))) \n",
    "    model.add(Dropout(0.2))\n",
    "    model.add(Flatten())\n",
    "    model.add(Dense(128,kernel_initializer='lecun_normal',activation='relu'))\n",
    "    model.add(Dense(num_classes,kernel_regularizer=regularizers.l2(0.01),activation='softmax'))\n",
    "    # Compile model\n",
    "    adam = Adam(lr=0.001,beta_1=0.9,beta_2=0.999,epsilon=1e-8)\n",
    "    model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])\n",
    "    return model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train on 60000 samples, validate on 10000 samples\n",
      "Epoch 1/3\n",
      " - 91s - loss: 0.6710 - acc: 0.9079 - val_loss: 0.3431 - val_acc: 0.9584\n",
      "Epoch 2/3\n",
      " - 94s - loss: 0.3103 - acc: 0.9577 - val_loss: 0.2494 - val_acc: 0.9685\n",
      "Epoch 3/3\n",
      " - 93s - loss: 0.2517 - acc: 0.9645 - val_loss: 0.2146 - val_acc: 0.9735\n"
     ]
    }
   ],
   "source": [
    "model = baseline_model_8()\n",
    "# Fit the model\n",
    "model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=3, batch_size=200, verbose=2)\n",
    "# Final evaluation of the model\n",
    "scores = model.evaluate(X_test, y_test, verbose=0)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 不能立刻看出来，epoch太少，需要更多的epoch，接着试验一下sgd，梯度下降算法的优化器"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {},
   "outputs": [],
   "source": [
    "def baseline_model_9():\n",
    "    # create model\n",
    "    model = Sequential()\n",
    "    model.add(Conv2D(64, (20, 20),kernel_initializer='lecun_normal',padding='valid',kernel_regularizer=regularizers.l2(0.01),input_shape=(1, 28, 28), activation='relu'))\n",
    "    model.add(MaxPooling2D(pool_size=(2, 2))) \n",
    "    model.add(Dropout(0.2))\n",
    "    model.add(Flatten())\n",
    "    model.add(Dense(128,kernel_initializer='lecun_normal',activation='relu'))\n",
    "    model.add(Dense(num_classes,kernel_regularizer=regularizers.l2(0.01),activation='softmax'))\n",
    "    # Compile model\n",
    "    model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])\n",
    "    return model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train on 60000 samples, validate on 10000 samples\n",
      "Epoch 1/10\n",
      " - 104s - loss: 1.7772 - acc: 0.7150 - val_loss: 1.1540 - val_acc: 0.8783\n",
      "Epoch 2/10\n",
      " - 137s - loss: 1.1102 - acc: 0.8681 - val_loss: 0.9850 - val_acc: 0.8989\n",
      "Epoch 3/10\n",
      " - 136s - loss: 0.9835 - acc: 0.8880 - val_loss: 0.8964 - val_acc: 0.9075\n",
      "Epoch 4/10\n",
      " - 151s - loss: 0.8986 - acc: 0.8974 - val_loss: 0.8220 - val_acc: 0.9164\n",
      "Epoch 5/10\n",
      " - 140s - loss: 0.8301 - acc: 0.9056 - val_loss: 0.7629 - val_acc: 0.9217\n",
      "Epoch 6/10\n",
      " - 136s - loss: 0.7716 - acc: 0.9113 - val_loss: 0.7136 - val_acc: 0.9243\n",
      "Epoch 7/10\n",
      " - 147s - loss: 0.7217 - acc: 0.9166 - val_loss: 0.6675 - val_acc: 0.9293\n",
      "Epoch 8/10\n",
      " - 119s - loss: 0.6784 - acc: 0.9202 - val_loss: 0.6285 - val_acc: 0.9319\n",
      "Epoch 9/10\n",
      " - 140s - loss: 0.6390 - acc: 0.9239 - val_loss: 0.5918 - val_acc: 0.9341\n",
      "Epoch 10/10\n",
      " - 115s - loss: 0.6127 - acc: 0.9259 - val_loss: 0.5760 - val_acc: 0.9371\n"
     ]
    }
   ],
   "source": [
    "from keras.callbacks import LearningRateScheduler\n",
    "from keras.optimizers import SGD\n",
    "import math\n",
    "  \n",
    "def step_decay(epoch):\n",
    "    initial_lrate = 0.01\n",
    "    drop = 0.5\n",
    "    epochs_drop = 10.0\n",
    "    lrate = initial_lrate * math.pow(drop,math.floor((1+epoch)/epochs_drop))\n",
    "    return lrate\n",
    "lrate = LearningRateScheduler(step_decay)\n",
    "sgd = SGD(lr=0.0, momentum=0.9, decay=0.0, nesterov=False)\n",
    "# model.fit(train_set_x, train_set_y, validation_split=0.1, nb_epoch=200, batch_size=256, callbacks=[lrate])‘\n",
    "model = baseline_model_9()\n",
    "# Fit the model\n",
    "model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=200, verbose=2,callbacks=[lrate])\n",
    "# Final evaluation of the model\n",
    "scores = model.evaluate(X_test, y_test, verbose=0)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 感觉收敛速度比较慢，效果不是很好，需要的epoch应该很多，还是使用adam，最后做一次最终的预测看看"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "metadata": {},
   "outputs": [],
   "source": [
    "def baseline_model_10():\n",
    "    # create model\n",
    "    model = Sequential()\n",
    "    model.add(Conv2D(64, (5, 5),kernel_initializer='lecun_normal',padding='valid',kernel_regularizer=regularizers.l2(0.01),input_shape=(1, 28, 28), activation='relu'))\n",
    "    model.add(MaxPooling2D(pool_size=(2, 2))) \n",
    "    model.add(Dropout(0.2))\n",
    "    model.add(Flatten())\n",
    "    model.add(Dense(128,kernel_initializer='lecun_normal',activation='relu'))\n",
    "    model.add(Dense(num_classes,kernel_regularizer=regularizers.l2(0.01),activation='softmax'))\n",
    "    # Compile model\n",
    "    adam = Adam(lr=0.001,beta_1=0.9,beta_2=0.999,epsilon=1e-8)\n",
    "    model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])\n",
    "    return model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train on 60000 samples, validate on 10000 samples\n",
      "Epoch 1/10\n",
      " - 589s - loss: 0.5308 - acc: 0.9433 - val_loss: 0.2620 - val_acc: 0.9760\n",
      "Epoch 2/10\n",
      " - 502s - loss: 0.2118 - acc: 0.9782 - val_loss: 0.1699 - val_acc: 0.9796\n",
      "Epoch 3/10\n",
      " - 497s - loss: 0.1585 - acc: 0.9814 - val_loss: 0.1392 - val_acc: 0.9815\n",
      "Epoch 4/10\n",
      " - 497s - loss: 0.1365 - acc: 0.9825 - val_loss: 0.1269 - val_acc: 0.9826\n",
      "Epoch 5/10\n",
      " - 498s - loss: 0.1225 - acc: 0.9842 - val_loss: 0.1124 - val_acc: 0.9863\n",
      "Epoch 6/10\n",
      " - 497s - loss: 0.1133 - acc: 0.9855 - val_loss: 0.1074 - val_acc: 0.9844\n",
      "Epoch 7/10\n",
      " - 477s - loss: 0.1065 - acc: 0.9860 - val_loss: 0.1041 - val_acc: 0.9848\n",
      "Epoch 8/10\n",
      " - 476s - loss: 0.0999 - acc: 0.9874 - val_loss: 0.0990 - val_acc: 0.9850\n",
      "Epoch 9/10\n",
      " - 477s - loss: 0.0954 - acc: 0.9880 - val_loss: 0.0945 - val_acc: 0.9863\n",
      "Epoch 10/10\n",
      " - 486s - loss: 0.0926 - acc: 0.9877 - val_loss: 0.0923 - val_acc: 0.9867\n"
     ]
    }
   ],
   "source": [
    "model = baseline_model_10()\n",
    "# Fit the model\n",
    "model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=200, verbose=2)\n",
    "# Final evaluation of the model\n",
    "scores = model.evaluate(X_test, y_test, verbose=0)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 感觉并没有默认的效果好，增大epoch应该也可以达到99%以上\n",
    "## 尝试加入batchnormalization层"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 57,
   "metadata": {},
   "outputs": [],
   "source": [
    "from keras.layers import BatchNormalization\n",
    "def baseline_model_11():\n",
    "    # create model\n",
    "    model = Sequential()\n",
    "    \n",
    "    model.add(Conv2D(32, (15, 15),input_shape=(1, 28, 28), activation='relu'))\n",
    "    model.add(MaxPooling2D(pool_size=(2, 2))) \n",
    "    model.add(Dropout(0.2))\n",
    "    model.add(Flatten())\n",
    "    model.add(Dense(128,activation='relu'))\n",
    "    model.add(BatchNormalization(epsilon=1e-06, mode=0, momentum=0.9, weights=None, beta_init='zero', gamma_init='one'))\n",
    "    model.add(Dense(num_classes,activation='softmax'))\n",
    "  \n",
    "    \n",
    "    # Compile model\n",
    "#     adam = Adam(lr=0.001,beta_1=0.9,beta_2=0.999,epsilon=1e-8)\n",
    "    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n",
    "    return model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 58,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/guanyu/.local/lib/python3.6/site-packages/ipykernel_launcher.py:11: UserWarning: Update your `BatchNormalization` call to the Keras 2 API: `BatchNormalization(epsilon=1e-06, momentum=0.9, weights=None, beta_initializer=\"zero\", gamma_initializer=\"one\")`\n",
      "  # This is added back by InteractiveShellApp.init_path()\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train on 60000 samples, validate on 10000 samples\n",
      "Epoch 1/10\n",
      " - 126s - loss: 0.1875 - acc: 0.9433 - val_loss: 0.0830 - val_acc: 0.9738\n",
      "Epoch 2/10\n",
      " - 127s - loss: 0.0627 - acc: 0.9809 - val_loss: 0.0546 - val_acc: 0.9826\n",
      "Epoch 3/10\n",
      " - 125s - loss: 0.0448 - acc: 0.9861 - val_loss: 0.0560 - val_acc: 0.9825\n",
      "Epoch 4/10\n",
      " - 135s - loss: 0.0362 - acc: 0.9891 - val_loss: 0.0379 - val_acc: 0.9874\n",
      "Epoch 5/10\n",
      " - 126s - loss: 0.0279 - acc: 0.9912 - val_loss: 0.0327 - val_acc: 0.9886\n",
      "Epoch 6/10\n",
      " - 123s - loss: 0.0235 - acc: 0.9926 - val_loss: 0.0373 - val_acc: 0.9890\n",
      "Epoch 7/10\n",
      " - 121s - loss: 0.0213 - acc: 0.9934 - val_loss: 0.0387 - val_acc: 0.9869\n",
      "Epoch 8/10\n",
      " - 122s - loss: 0.0186 - acc: 0.9939 - val_loss: 0.0389 - val_acc: 0.9885\n",
      "Epoch 9/10\n",
      " - 121s - loss: 0.0158 - acc: 0.9950 - val_loss: 0.0390 - val_acc: 0.9896\n",
      "Epoch 10/10\n",
      " - 121s - loss: 0.0146 - acc: 0.9950 - val_loss: 0.0344 - val_acc: 0.9898\n",
      "10000/10000 [==============================] - 11s 1ms/step\n"
     ]
    }
   ],
   "source": [
    "model = baseline_model_11()\n",
    "# Fit the model\n",
    "model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=200, verbose=2)\n",
    "# Final evaluation of the model\n",
    "scores = model.evaluate(X_test, y_test, verbose=1)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 效果显著，最后达到了99.5%准确率"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
