{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Successfully downloaded train-images-idx3-ubyte.gz 9912422 bytes.\n",
      "Extracting data/train-images-idx3-ubyte.gz\n",
      "Successfully downloaded train-labels-idx1-ubyte.gz 28881 bytes.\n",
      "Extracting data/train-labels-idx1-ubyte.gz\n",
      "Successfully downloaded t10k-images-idx3-ubyte.gz 1648877 bytes.\n",
      "Extracting data/t10k-images-idx3-ubyte.gz\n",
      "Successfully downloaded t10k-labels-idx1-ubyte.gz 4542 bytes.\n",
      "Extracting data/t10k-labels-idx1-ubyte.gz\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "import matplotlib.pyplot as plt\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "\n",
    "mnist = input_data.read_data_sets('data/', one_hot=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "#第一层卷积层尺寸和深度  \n",
    "CONV_1_SIZE = 3      \n",
    "CONV_1_DEEP = 32    \n",
    "INPUT_CHANNELS = 1 #输入通道数  \n",
    "  \n",
    "#第二层卷积层尺寸和深度  \n",
    "CONV_2_SIZE = 3  \n",
    "CONV_2_DEEP = 64  \n",
    "  \n",
    "#每批次数据集的大小  \n",
    "BATCH_SIZE = 100  \n",
    "  \n",
    "#学习率  \n",
    "LEARNING_RATE_INIT = 1e-3    #学习率初始值  \n",
    "x = tf.placeholder(tf.float32, [None, 784])  \n",
    "y_ = tf.placeholder(tf.float32, [None, 10])  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    " #对输入向量x转换成图像矩阵形式  \n",
    "with tf.variable_scope('reshape'):  \n",
    "    x_image = tf.reshape(x, [-1, 28, 28, 1]) #因为数据的条数未知,所以为-1  \n",
    "  \n",
    "#卷积层1  \n",
    "with tf.variable_scope('conv1'):  \n",
    "    initial_value = tf.truncated_normal([CONV_1_SIZE,CONV_1_SIZE,INPUT_CHANNELS,CONV_1_DEEP], stddev=0.1)  \n",
    "    conv_1_w = tf.Variable(initial_value=initial_value, collections=[tf.GraphKeys.GLOBAL_VARIABLES, 'WEIGHTS'])  \n",
    "    conv_1_b = tf.Variable(initial_value=tf.constant(0.1, shape=[CONV_1_DEEP]))  \n",
    "    conv_1_l = tf.nn.conv2d(x_image, conv_1_w, strides=[1,1,1,1], padding='SAME') + conv_1_b  \n",
    "    conv_1_h = tf.nn.relu(conv_1_l)  \n",
    "  \n",
    "#池化层1  \n",
    "with tf.variable_scope('pool1'):  \n",
    "    pool_1_h = tf.nn.max_pool(conv_1_h, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID')  \n",
    "  \n",
    "#卷积层2  \n",
    "with tf.variable_scope('conv2'):  \n",
    "    conv_2_w = tf.Variable(tf.truncated_normal([CONV_2_SIZE,CONV_2_SIZE,CONV_1_DEEP,CONV_2_DEEP], stddev=0.1),  \n",
    "                           collections=[tf.GraphKeys.GLOBAL_VARIABLES, 'WEIGHTS'])  \n",
    "    conv_2_b = tf.Variable(tf.constant(0.1, shape=[CONV_2_DEEP]))  \n",
    "    conv_2_l = tf.nn.conv2d(pool_1_h, conv_2_w, strides=[1,1,1,1], padding='SAME') + conv_2_b  \n",
    "    conv_2_h = tf.nn.relu(conv_2_l)  \n",
    "  \n",
    "#池化层2  \n",
    "with tf.name_scope('pool2'):  \n",
    "    pool_2_h = tf.nn.max_pool(conv_2_h, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID')  \n",
    "  \n",
    "#全连接层1  \n",
    "with tf.name_scope('fc1'):  \n",
    "    #  \n",
    "    fc_1_w = tf.Variable(tf.truncated_normal([7*7*64, 1024], stddev=0.1))  \n",
    "    fc_1_b = tf.Variable(tf.constant(0.1, shape=[1024]))  \n",
    "    #全连接层的输入为向量,而池化层2的输出为7x7x64的矩阵,所以这里要将矩阵转化成一个向量  \n",
    "    pool_2_h_flat = tf.reshape(pool_2_h, [-1,7*7*64])  \n",
    "    fc_1_h = tf.nn.relu(tf.matmul(pool_2_h_flat, fc_1_w) + fc_1_b)  \n",
    "      \n",
    "#dropout在训练时会随机将部分节点的输出改为0,以避免过拟合问题,从而使得模型在测试数据上的效果更好  \n",
    "#dropout一般只在全连接层而不是卷积层或者池化层使用  \n",
    "with tf.name_scope('dropout'):  \n",
    "    keep_prob = tf.placeholder(tf.float32)  \n",
    "    fc_1_h_drop = tf.nn.dropout(fc_1_h, keep_prob)  \n",
    "      \n",
    "#全连接层2 And 输出层  \n",
    "with tf.name_scope('fc2'):  \n",
    "    fc_2_w = tf.Variable(tf.truncated_normal([1024,10], stddev=0.1), collections=[tf.GraphKeys.GLOBAL_VARIABLES, 'WEIGHTS'])  \n",
    "    fc_2_b = tf.Variable(tf.constant(0.1, shape=[10]))  \n",
    "    y = tf.matmul(fc_1_h_drop, fc_2_w) + fc_2_b  \n",
    "      "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "#交叉熵  \n",
    "cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))  \n",
    "  \n",
    "#l2正则项  \n",
    "l2_loss = tf.add_n([tf.nn.l2_loss(w) for w in tf.get_collection('WEIGHTS')])  \n",
    "  \n",
    "#代价函数 = 交叉熵加上惩罚项  \n",
    "total_loss = cross_entropy + 7e-5*l2_loss  \n",
    "  \n",
    "#定义一个Adam优化器  \n",
    "train_step = tf.train.AdamOptimizer(LEARNING_RATE_INIT).minimize(total_loss)  \n",
    "  \n",
    "sess = tf.InteractiveSession()  \n",
    "init_op = tf.global_variables_initializer()  \n",
    "sess.run(init_op)  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "step:99, loss:0.197458, train_acc:0.980000, test_acc:0.970100\n",
      "step:199, loss:0.142206, train_acc:0.980000, test_acc:0.977000\n",
      "step:299, loss:0.094599, train_acc:0.990000, test_acc:0.979400\n",
      "step:399, loss:0.096292, train_acc:0.970000, test_acc:0.981600\n",
      "step:499, loss:0.063990, train_acc:0.990000, test_acc:0.983300\n",
      "step:599, loss:0.032097, train_acc:1.000000, test_acc:0.980500\n",
      "step:699, loss:0.030644, train_acc:1.000000, test_acc:0.985400\n",
      "step:799, loss:0.033100, train_acc:0.990000, test_acc:0.988400\n",
      "step:899, loss:0.082696, train_acc:1.000000, test_acc:0.988500\n",
      "step:999, loss:0.172963, train_acc:0.980000, test_acc:0.985800\n",
      "step:1099, loss:0.126484, train_acc:0.980000, test_acc:0.987100\n",
      "step:1199, loss:0.024750, train_acc:1.000000, test_acc:0.989000\n",
      "step:1299, loss:0.111793, train_acc:0.990000, test_acc:0.989500\n",
      "step:1399, loss:0.017189, train_acc:1.000000, test_acc:0.989800\n",
      "step:1499, loss:0.016086, train_acc:1.000000, test_acc:0.989900\n",
      "step:1599, loss:0.028070, train_acc:1.000000, test_acc:0.988400\n",
      "step:1699, loss:0.022382, train_acc:0.990000, test_acc:0.990900\n",
      "step:1799, loss:0.030221, train_acc:0.990000, test_acc:0.988300\n",
      "step:1899, loss:0.034324, train_acc:0.990000, test_acc:0.990600\n",
      "step:1999, loss:0.063877, train_acc:1.000000, test_acc:0.989800\n",
      "step:2099, loss:0.094486, train_acc:0.990000, test_acc:0.990900\n",
      "step:2199, loss:0.033447, train_acc:1.000000, test_acc:0.990100\n",
      "step:2299, loss:0.065936, train_acc:1.000000, test_acc:0.990800\n",
      "step:2399, loss:0.024407, train_acc:1.000000, test_acc:0.991300\n",
      "step:2499, loss:0.033195, train_acc:1.000000, test_acc:0.990700\n",
      "step:2599, loss:0.014929, train_acc:1.000000, test_acc:0.990900\n",
      "step:2699, loss:0.061112, train_acc:0.990000, test_acc:0.991400\n",
      "step:2799, loss:0.103849, train_acc:0.990000, test_acc:0.992100\n",
      "step:2899, loss:0.013895, train_acc:1.000000, test_acc:0.990500\n",
      "step:2999, loss:0.023447, train_acc:1.000000, test_acc:0.988200\n",
      "step:3099, loss:0.038033, train_acc:0.990000, test_acc:0.991000\n",
      "step:3199, loss:0.052918, train_acc:1.000000, test_acc:0.990000\n",
      "step:3299, loss:0.023739, train_acc:1.000000, test_acc:0.990900\n",
      "step:3399, loss:0.009423, train_acc:1.000000, test_acc:0.989900\n",
      "step:3499, loss:0.038781, train_acc:0.990000, test_acc:0.991700\n",
      "step:3599, loss:0.058988, train_acc:0.990000, test_acc:0.991700\n",
      "step:3699, loss:0.009901, train_acc:1.000000, test_acc:0.992700\n",
      "step:3799, loss:0.034867, train_acc:0.990000, test_acc:0.990500\n",
      "step:3899, loss:0.063399, train_acc:1.000000, test_acc:0.991400\n",
      "step:3999, loss:0.012256, train_acc:1.000000, test_acc:0.992600\n",
      "step:4099, loss:0.018841, train_acc:0.990000, test_acc:0.990300\n",
      "step:4199, loss:0.017140, train_acc:1.000000, test_acc:0.992000\n",
      "step:4299, loss:0.010259, train_acc:1.000000, test_acc:0.992600\n",
      "step:4399, loss:0.040710, train_acc:0.990000, test_acc:0.991700\n",
      "step:4499, loss:0.022140, train_acc:1.000000, test_acc:0.992600\n",
      "step:4599, loss:0.011785, train_acc:1.000000, test_acc:0.992400\n",
      "step:4699, loss:0.100383, train_acc:0.990000, test_acc:0.993000\n",
      "step:4799, loss:0.042087, train_acc:1.000000, test_acc:0.993000\n",
      "step:4899, loss:0.013502, train_acc:0.990000, test_acc:0.992400\n",
      "step:4999, loss:0.023216, train_acc:1.000000, test_acc:0.992700\n"
     ]
    }
   ],
   "source": [
    "#Train  \n",
    "for step in range(5000):  \n",
    "    batch_xs, batch_ys = mnist.train.next_batch(BATCH_SIZE)  \n",
    "    _, loss, l2_loss_value, total_loss_value = sess.run(  \n",
    "        [train_step, cross_entropy, l2_loss, total_loss],  \n",
    "        feed_dict={x: batch_xs, y_:batch_ys, keep_prob:0.5})  \n",
    "      \n",
    "    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))  \n",
    "    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) #  \n",
    "    if (step+1)%100 == 0:  \n",
    "        #每隔200步评估一下训练集和测试集  \n",
    "        train_accuracy = accuracy.eval(feed_dict={x:batch_xs, y_:batch_ys, keep_prob:1.0})  \n",
    "        test_accuracy = accuracy.eval(feed_dict={x:mnist.test.images, y_:mnist.test.labels, keep_prob:1.0})  \n",
    "        #for i in xrange(10):  \n",
    "          #  testSet = mnist.test.next_batch(50)  \n",
    " \n",
    "        print(\"step:%d, loss:%f, train_acc:%f, test_acc:%f\" % (step, total_loss_value, train_accuracy, test_accuracy))  "
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
