{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/jack/anaconda3/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n",
      "  from ._conv import register_converters as _register_converters\n"
     ]
    }
   ],
   "source": [
    "\"\"\"A very simple MNIST classifier.\n",
    "See extensive documentation at\n",
    "https://www.tensorflow.org/get_started/mnist/beginners\n",
    "\"\"\"\n",
    "from __future__ import absolute_import\n",
    "from __future__ import division\n",
    "from __future__ import print_function\n",
    "\n",
    "import argparse\n",
    "import sys\n",
    "\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "\n",
    "import tensorflow as tf\n",
    "\n",
    "FLAGS = None\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From <ipython-input-2-698ada706af1>:3: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n",
      "WARNING:tensorflow:From /home/jack/anaconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please write your own downloading logic.\n",
      "WARNING:tensorflow:From /home/jack/anaconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.data to implement this functionality.\n",
      "Extracting /tmp/tensorflow/mnist/input_data/train-images-idx3-ubyte.gz\n",
      "WARNING:tensorflow:From /home/jack/anaconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.data to implement this functionality.\n",
      "Extracting /tmp/tensorflow/mnist/input_data/train-labels-idx1-ubyte.gz\n",
      "WARNING:tensorflow:From /home/jack/anaconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:110: dense_to_one_hot (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.one_hot on tensors.\n",
      "Extracting /tmp/tensorflow/mnist/input_data/t10k-images-idx3-ubyte.gz\n",
      "Extracting /tmp/tensorflow/mnist/input_data/t10k-labels-idx1-ubyte.gz\n",
      "WARNING:tensorflow:From /home/jack/anaconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:290: DataSet.__init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n"
     ]
    }
   ],
   "source": [
    "# Import data\n",
    "data_dir = '/tmp/tensorflow/mnist/input_data'\n",
    "mnist = input_data.read_data_sets(data_dir, one_hot=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Create the model\n",
    "x = tf.placeholder(tf.float32, [None, 784])\n",
    "y_ = tf.placeholder(tf.float32, [None, 10])\n",
    "learning_rate = tf.placeholder(tf.float32)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "with tf.name_scope('reshape'):\n",
    "    x_image = tf.reshape(x,[-1,28,28,1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "with tf.name_scope('conv1'):\n",
    "    shape = [5,5,1,32]\n",
    "    W_conv1 = tf.Variable(tf.random_normal(shape,stddev=0.1),collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\n",
    "    \n",
    "    shape = [32]\n",
    "    b_conv1 = tf.Variable(tf.constant(0.1,shape=shape))\n",
    "    l_conv1 = tf.nn.conv2d(x_image, W_conv1, strides=[1,1,1,1], padding='SAME') + b_conv1\n",
    "    \n",
    "    h_conv1 = tf.nn.relu(l_conv1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "with tf.name_scope('pool1'):\n",
    "    h_pool1 = tf.nn.max_pool(h_conv1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "with tf.name_scope('conv2'):\n",
    "    W_conv2 = tf.Variable(tf.random_normal([3,3,32,64], stddev=0.1), collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\n",
    "    \n",
    "    b_conv2 = tf.Variable(tf.constant(0.1, shape=[64]))\n",
    "    l_conv2 = tf.nn.conv2d(h_pool1, W_conv2, strides=[1,1,1,1], padding='SAME') + b_conv2\n",
    "    \n",
    "    h_conv2 = tf.nn.relu(l_conv2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "with tf.name_scope('pool2'):\n",
    "    h_pool2 = tf.nn.max_pool(h_conv2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "with tf.name_scope('fc1'):\n",
    "    W_fc1 = tf.Variable(tf.random_normal([7*7*64,1024],stddev=0.1),collections=[tf.GraphKeys.GLOBAL_VARIABLES, 'WEIGHTS'])\n",
    "    b_fc1 = tf.Variable(tf.constant(0.1, shape=[1024]))\n",
    "    \n",
    "    h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])\n",
    "    h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "with tf.name_scope('dropout'):\n",
    "    keep_prob = tf.placeholder(tf.float32)\n",
    "    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "with tf.name_scope('fc2'):\n",
    "    W_fc2 = tf.Variable(tf.random_normal([1024,10],stddev=0.1), collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\n",
    "    b_fc2 = tf.Variable(tf.constant(0.1,shape=[10]))\n",
    "    y = tf.matmul(h_fc1_drop, W_fc2) + b_fc2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From <ipython-input-12-bf86c3447efc>:11: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "\n",
      "Future major versions of TensorFlow will allow gradients to flow\n",
      "into the labels input on backprop by default.\n",
      "\n",
      "See @{tf.nn.softmax_cross_entropy_with_logits_v2}.\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# The raw formulation of cross-entropy,\n",
    "#\n",
    "#   tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)),\n",
    "#                                 reduction_indices=[1]))\n",
    "#\n",
    "# can be numerically unstable.\n",
    "#\n",
    "# So here we use tf.nn.softmax_cross_entropy_with_logits on the raw\n",
    "# outputs of 'y', and then average across the batch.\n",
    "cross_entropy = tf.reduce_mean(\n",
    "    tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "l2_loss = tf.add_n([tf.nn.l2_loss(w) for w in tf.get_collection('WEIGHTS')])\n",
    "total_loss = cross_entropy + 7e-5*l2_loss\n",
    "train_step = tf.train.AdagradOptimizer(learning_rate).minimize(total_loss)\n",
    "\n",
    "sess = tf.Session()\n",
    "init_op = tf.global_variables_initializer()\n",
    "sess.run(init_op)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "在这里我们仍然调用系统提供的读取数据，为我们取得一个batch。\n",
    "然后我们运行3k个step(5 epochs)，对权重进行优化。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "step: 100,    train accuracy:0.918900,    test accuracy:0.9218,    loss:1.44221\n",
      "step: 200,    train accuracy:0.951300,    test accuracy:0.9534,    loss:1.36302\n",
      "step: 300,    train accuracy:0.949000,    test accuracy:0.9483,    loss:1.34249\n",
      "step: 400,    train accuracy:0.963400,    test accuracy:0.9622,    loss:1.30321\n",
      "step: 500,    train accuracy:0.969900,    test accuracy:0.9708,    loss:1.21168\n",
      "step: 600,    train accuracy:0.971000,    test accuracy:0.9733,    loss:1.20773\n",
      "step: 700,    train accuracy:0.973400,    test accuracy:0.9732,    loss:1.17518\n",
      "step: 800,    train accuracy:0.970600,    test accuracy:0.9733,    loss:1.19202\n",
      "step: 900,    train accuracy:0.976400,    test accuracy:0.9767,    loss:1.17957\n",
      "step:1000,    train accuracy:0.977900,    test accuracy:0.9787,    loss:1.24861\n",
      "step:1100,    train accuracy:0.978400,    test accuracy:0.9786,    loss:1.13074\n",
      "step:1200,    train accuracy:0.979500,    test accuracy:0.9807,    loss:1.11698\n",
      "step:1300,    train accuracy:0.978400,    test accuracy:0.9772,    loss:1.26733\n",
      "step:1400,    train accuracy:0.981400,    test accuracy:0.9818,    loss:1.12849\n",
      "step:1500,    train accuracy:0.982600,    test accuracy:0.9834,    loss:1.10985\n",
      "step:1600,    train accuracy:0.984000,    test accuracy:0.9818,    loss:1.07038\n",
      "step:1700,    train accuracy:0.981100,    test accuracy:0.9841,    loss:1.1267\n",
      "step:1800,    train accuracy:0.983600,    test accuracy:0.9837,    loss:1.09698\n",
      "step:1900,    train accuracy:0.983600,    test accuracy:0.9837,    loss:1.2467\n",
      "step:2000,    train accuracy:0.984700,    test accuracy:0.9850,    loss:1.15237\n",
      "step:2100,    train accuracy:0.984500,    test accuracy:0.9840,    loss:1.12819\n",
      "step:2200,    train accuracy:0.982500,    test accuracy:0.9832,    loss:1.16818\n",
      "step:2300,    train accuracy:0.985800,    test accuracy:0.9873,    loss:1.05005\n",
      "step:2400,    train accuracy:0.984400,    test accuracy:0.9861,    loss:1.04943\n",
      "step:2500,    train accuracy:0.985200,    test accuracy:0.9841,    loss:1.07203\n",
      "step:2600,    train accuracy:0.984800,    test accuracy:0.9861,    loss:1.05921\n",
      "step:2700,    train accuracy:0.985300,    test accuracy:0.9860,    loss:1.05721\n",
      "step:2800,    train accuracy:0.986700,    test accuracy:0.9850,    loss:1.07036\n",
      "step:2900,    train accuracy:0.987100,    test accuracy:0.9879,    loss:1.04881\n",
      "step:3000,    train accuracy:0.988000,    test accuracy:0.9869,    loss:1.01151\n",
      "step:3100,    train accuracy:0.986900,    test accuracy:0.9865,    loss:1.06694\n",
      "step:3200,    train accuracy:0.985900,    test accuracy:0.9871,    loss:1.01829\n",
      "step:3300,    train accuracy:0.986400,    test accuracy:0.9877,    loss:1.07527\n",
      "step:3400,    train accuracy:0.986800,    test accuracy:0.9858,    loss:0.994817\n",
      "step:3500,    train accuracy:0.988100,    test accuracy:0.9872,    loss:0.990565\n",
      "step:3600,    train accuracy:0.985900,    test accuracy:0.9884,    loss:1.02732\n",
      "step:3700,    train accuracy:0.986700,    test accuracy:0.9855,    loss:0.982813\n",
      "step:3800,    train accuracy:0.985800,    test accuracy:0.9837,    loss:1.03574\n",
      "step:3900,    train accuracy:0.987700,    test accuracy:0.9889,    loss:0.9754\n",
      "step:4000,    train accuracy:0.985500,    test accuracy:0.9838,    loss:0.972734\n",
      "step:4100,    train accuracy:0.986800,    test accuracy:0.9881,    loss:0.966434\n",
      "step:4200,    train accuracy:0.988500,    test accuracy:0.9885,    loss:1.02295\n",
      "step:4300,    train accuracy:0.988600,    test accuracy:0.9887,    loss:0.987026\n",
      "step:4400,    train accuracy:0.988100,    test accuracy:0.9877,    loss:0.961481\n",
      "step:4500,    train accuracy:0.988000,    test accuracy:0.9876,    loss:0.945565\n",
      "step:4600,    train accuracy:0.987500,    test accuracy:0.9875,    loss:0.947219\n",
      "step:4700,    train accuracy:0.988500,    test accuracy:0.9874,    loss:0.966389\n",
      "step:4800,    train accuracy:0.989100,    test accuracy:0.9901,    loss:0.944037\n",
      "step:4900,    train accuracy:0.989500,    test accuracy:0.9887,    loss:0.936978\n",
      "step:5000,    train accuracy:0.987000,    test accuracy:0.9865,    loss:0.923299\n",
      "step:5100,    train accuracy:0.988000,    test accuracy:0.9878,    loss:0.95887\n",
      "step:5200,    train accuracy:0.988500,    test accuracy:0.9887,    loss:0.92707\n",
      "step:5300,    train accuracy:0.988400,    test accuracy:0.9878,    loss:0.946092\n",
      "step:5400,    train accuracy:0.988000,    test accuracy:0.9879,    loss:0.91864\n",
      "step:5500,    train accuracy:0.988600,    test accuracy:0.9887,    loss:0.903418\n",
      "step:5600,    train accuracy:0.987600,    test accuracy:0.9872,    loss:0.911678\n",
      "step:5700,    train accuracy:0.988300,    test accuracy:0.9895,    loss:0.90102\n",
      "step:5800,    train accuracy:0.989700,    test accuracy:0.9907,    loss:0.928305\n",
      "step:5900,    train accuracy:0.989300,    test accuracy:0.9890,    loss:0.931759\n",
      "step:6000,    train accuracy:0.989400,    test accuracy:0.9884,    loss:0.894517\n",
      "step:6100,    train accuracy:0.987900,    test accuracy:0.9871,    loss:0.884685\n",
      "step:6200,    train accuracy:0.989900,    test accuracy:0.9896,    loss:0.878553\n",
      "step:6300,    train accuracy:0.988500,    test accuracy:0.9877,    loss:0.887917\n",
      "step:6400,    train accuracy:0.989800,    test accuracy:0.9892,    loss:0.89144\n",
      "step:6500,    train accuracy:0.988700,    test accuracy:0.9887,    loss:0.893713\n",
      "step:6600,    train accuracy:0.988300,    test accuracy:0.9899,    loss:0.867477\n",
      "step:6700,    train accuracy:0.988400,    test accuracy:0.9895,    loss:0.887483\n",
      "step:6800,    train accuracy:0.988900,    test accuracy:0.9881,    loss:0.875115\n",
      "step:6900,    train accuracy:0.989300,    test accuracy:0.9890,    loss:0.858394\n",
      "step:7000,    train accuracy:0.989700,    test accuracy:0.9890,    loss:0.848847\n",
      "step:7100,    train accuracy:0.990000,    test accuracy:0.9893,    loss:0.84441\n",
      "step:7200,    train accuracy:0.988400,    test accuracy:0.9896,    loss:0.859911\n",
      "step:7300,    train accuracy:0.990400,    test accuracy:0.9893,    loss:0.83981\n",
      "step:7400,    train accuracy:0.990100,    test accuracy:0.9893,    loss:0.844772\n",
      "step:7500,    train accuracy:0.988500,    test accuracy:0.9893,    loss:0.83409\n",
      "step:7600,    train accuracy:0.988400,    test accuracy:0.9879,    loss:0.826451\n",
      "step:7700,    train accuracy:0.989600,    test accuracy:0.9884,    loss:0.824241\n",
      "step:7800,    train accuracy:0.989900,    test accuracy:0.9886,    loss:0.821188\n",
      "step:7900,    train accuracy:0.988900,    test accuracy:0.9895,    loss:0.833833\n",
      "step:8000,    train accuracy:0.988800,    test accuracy:0.9889,    loss:0.814007\n",
      "step:8100,    train accuracy:0.989700,    test accuracy:0.9904,    loss:0.82636\n",
      "step:8200,    train accuracy:0.988900,    test accuracy:0.9889,    loss:0.818347\n",
      "step:8300,    train accuracy:0.990600,    test accuracy:0.9909,    loss:0.815276\n",
      "step:8400,    train accuracy:0.989300,    test accuracy:0.9895,    loss:0.808714\n",
      "step:8500,    train accuracy:0.990400,    test accuracy:0.9904,    loss:0.795857\n",
      "step:8600,    train accuracy:0.989400,    test accuracy:0.9895,    loss:0.792848\n",
      "step:8700,    train accuracy:0.990300,    test accuracy:0.9884,    loss:0.789777\n",
      "step:8800,    train accuracy:0.988900,    test accuracy:0.9894,    loss:0.809194\n",
      "step:8900,    train accuracy:0.989600,    test accuracy:0.9895,    loss:0.780363\n",
      "step:9000,    train accuracy:0.989300,    test accuracy:0.9904,    loss:0.782024\n",
      "step:9100,    train accuracy:0.989900,    test accuracy:0.9903,    loss:0.785985\n",
      "step:9200,    train accuracy:0.989700,    test accuracy:0.9890,    loss:0.772654\n",
      "step:9300,    train accuracy:0.989600,    test accuracy:0.9891,    loss:0.791036\n",
      "step:9400,    train accuracy:0.989500,    test accuracy:0.9894,    loss:0.776301\n",
      "step:9500,    train accuracy:0.988600,    test accuracy:0.9891,    loss:0.76539\n",
      "step:9600,    train accuracy:0.990000,    test accuracy:0.9904,    loss:0.756941\n",
      "step:9700,    train accuracy:0.989700,    test accuracy:0.9893,    loss:0.757954\n",
      "step:9800,    train accuracy:0.988900,    test accuracy:0.9894,    loss:0.778234\n",
      "step:9900,    train accuracy:0.989700,    test accuracy:0.9899,    loss:0.75091\n",
      "step:10000,    train accuracy:0.990300,    test accuracy:0.9900,    loss:0.750007\n"
     ]
    }
   ],
   "source": [
    "# Train\n",
    "for step in range(10000):\n",
    "    batch_xs, batch_ys = mnist.train.next_batch(100)\n",
    "    lr = 0.1\n",
    "\n",
    "    #启动初始化操作和内存分配任务\n",
    "    _,loss,l2_loss_value,total_loss_value = sess.run([train_step,cross_entropy,l2_loss,total_loss], feed_dict={x: batch_xs, y_: batch_ys, learning_rate:lr, keep_prob:0.5})\n",
    "    \n",
    "    if (step+1)%100 == 0:\n",
    "        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n",
    "        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "        train_accuracy = sess.run(accuracy, feed_dict={x: mnist.test.images,\n",
    "                                      y_: mnist.test.labels, keep_prob:0.5})\n",
    "        test_accuracy = sess.run(accuracy, feed_dict={x: mnist.test.images,\n",
    "                                      y_: mnist.test.labels, keep_prob:0.5})\n",
    "        loss = sess.run(tf.reduce_mean(total_loss_value), feed_dict={x: mnist.test.images,\n",
    "                                      y_: mnist.test.labels, keep_prob:0.5})\n",
    "        print('step:%4d,%-3s train accuracy:%8.6f, %3stest accuracy:%6.4f, %3sloss:%g' % (step+1,' ',train_accuracy,' ',test_accuracy,' ',loss))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "验证我们模型在测试数据上的准确率：由结果可以看出，当把step设为10000时，准确率最大可以达到0.99以上，效果较之前提升了近1%，可以说，效果较为理想。通过比较训练集与测试集的准确率，该模型有效的防止了过拟合。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "总结：\n",
    "\n",
    "   该模型是使用tensorflow训练的一个卷积神经网络，采用的是tensorflow的原生写法，较其他的写法代码相对繁琐一些，但是能够更好的理解卷积神经网络的原理，其写法看起来也更为直观一些。模型中加入了两个卷积层，两个池化层以及两个全连接层。\n",
    "    \n",
    "   首先变量采用的是random_normal(正太分布随机数)，均值为mean，标准差为stddev，在该模型中，stddev设为0.1效果最好。\n",
    "    \n",
    "   该模型采用的是ReLU激活函数，加入了L2正则，正则化惩罚因子设为0.00007，在这里，正则化因子不能较大，否则会使结果降低不少。\n",
    "   \n",
    "   学习率调整为0.1，采用的是AdagradOptimizer算法，该优化器可以根据梯度的大小调节学习率衰减的快慢，通过试验，效果好于一般的梯度下降方法，而且在我采用这种算法时，对于学习率只要调整的不是过小，结果都可以达到理想值。\n",
    "    \n",
    "   在第一层卷积中，设生成32个5*5*1的卷积核,第二层卷积中，生成64个3*3*1个卷积核，对于卷积核的调整，卷积核过大或者过小效果都不太好，在该模型中，我调整的卷积核越大时，准确率会随之降低。\n",
    "   池化层采用的是max_pool。\n",
    "   该模型采用了dropout方法，改善了过拟合的情况。\n",
    "   \n",
    "   在这次的训练过程中，发现卷积核的数量及size以及采用的梯度下降方法对结果影响挺大，在采用卷积神经网络时，准确率可以很容易达到0.98以上，在达到0.99的准确率时，可能需要调节更优的参数才能使正确率更高。\n",
    "   \n",
    "   小作业在写的过程中一直报错，就不贴上来了。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
