{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## cifar10分类的卷积神经网络(模块化设计版)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 1.导入必需模块,数据预处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/usr/local/lib/python3.5/dist-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n",
      "  from ._conv import register_converters as _register_converters\n"
     ]
    }
   ],
   "source": [
    "import os\n",
    "os.chdir('/home/david/tensorflow/卷积神经网络/cifar10')   # 更改当前目录为cifar10模块下\n",
    "import cifar10, cifar10_input\n",
    "import tensorflow as tf\n",
    "import numpy as np\n",
    "import time"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Filling queue with 20000 CIFAR images before starting to train. This will take a few minutes.\n"
     ]
    }
   ],
   "source": [
    "# 下载数据集并解压\n",
    "# cifar10.maybe_download_and_extract()  # 默认下载目录为`/tmp/cifar10_data/cifar-10-batches-bin`\n",
    "batch_size = 128\n",
    "data_dir = './cifar-10-batches-bin'\n",
    "# 数据集预处理\n",
    "images_train, labels_train = cifar10_input.distorted_inputs(data_dir=data_dir, batch_size=batch_size)\n",
    "images_test , labels_test  = cifar10_input.inputs(eval_data=True, data_dir=data_dir, batch_size=batch_size)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 2.定义网络权重初始化函数,权重带损失"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 为权重添加l2损失\n",
    "def variable_with_weight_loss(shape, stddev, w1, name):\n",
    "    \"\"\"\n",
    "    功能: 为权重进行初始化,并给权重添加一定的损失\n",
    "    参数: shape:权重向量的形状;stddev:标准差的大小;w1:控制权重的损失大小\n",
    "    返回: 初始化的权重向量\n",
    "    \"\"\"\n",
    "    if name is None:\n",
    "        var = tf.Variable(tf.truncated_normal(shape, stddev=stddev))\n",
    "    else:\n",
    "        var = tf.Variable(tf.truncated_normal(shape, stddev=stddev, name=name))\n",
    "    if w1 is not None:   # 为权重添加损失\n",
    "        weight_loss = tf.multiply(tf.nn.l2_loss(var), w1, name='weight_loss')  # 计算weight的loss\n",
    "        tf.add_to_collection('losses', weight_loss)\n",
    "    return var"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 3.定义网络的函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 显示网络结构的函数\n",
    "def print_activations(t):\n",
    "    \"\"\"\n",
    "    功能: 输出网络的结构\n",
    "    参数: t:输入的向量\n",
    "    返回: 无\n",
    "    \"\"\"\n",
    "    print('{:<15}{:<15}'.format(str(t.op.name), str(t.get_shape().as_list())))  # 以列表形式输出\n",
    "\n",
    "# 网络定义的函数,2层卷积,3层全连接\n",
    "def inference(images):\n",
    "    \"\"\"\n",
    "    功能: 网络的推理函数,即网络的结构定义函数\n",
    "    参数: images:输入的图像\n",
    "    返回: 网络输出的特征向量,维度与分类的个数有关\n",
    "    \"\"\"\n",
    "    # 第一层卷积层[5,5,3,64]\n",
    "    with tf.name_scope('conv1') as scope:\n",
    "        kernel = variable_with_weight_loss([5, 5, 3, 64], stddev=5e-2, w1=0.0, name='weight')\n",
    "        conv_kernel = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')\n",
    "        bias = tf.Variable(tf.constant(0.0, shape=[64]), name='bias')\n",
    "        conv1 = tf.nn.relu(tf.nn.bias_add(conv_kernel, bias), name=scope)\n",
    "        print_activations(conv1)  # 输出卷积层的结构\n",
    "        pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1')\n",
    "        print_activations(pool1)  # 输出池化层的结构\n",
    "        norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001/9.0, beta=0.75, name='norm1')\n",
    "        print_activations(norm1)  # 输出lrn层的结构\n",
    "    # 第二层卷积层[5, 5, 64, 64]\n",
    "    with tf.name_scope('conv2') as scope:\n",
    "        kernel = variable_with_weight_loss([5, 5, 64, 64], stddev=5e-2, w1=0.0, name='weight')\n",
    "        conv_kernel = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')\n",
    "        bias = tf.Variable(tf.constant(0.0, shape=[64]), name='bias')\n",
    "        conv2 = tf.nn.relu(tf.nn.bias_add(conv_kernel, bias), name=scope)\n",
    "        print_activations(conv2)\n",
    "        norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001/9.0, beta=0.75, name='norm2')\n",
    "        print_activations(norm2)\n",
    "        pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool2')\n",
    "        print_activations(pool2)\n",
    "    # 第三层全连接层[384]\n",
    "    with tf.name_scope('fc3') as scope:\n",
    "        pool2_flat = tf.reshape(pool2, [batch_size, -1])   # 将feature map重塑为一维度向量\n",
    "        dim = pool2_flat.get_shape()[1].value               # 获取一维向量的长度\n",
    "        weight = variable_with_weight_loss([dim, 384], stddev=0.04, w1=0.004, name='weight')\n",
    "        bias = tf.Variable(tf.constant(0.0, shape=[384]), name='bias')\n",
    "        fc3 = tf.nn.relu(tf.matmul(pool2_flat, weight) + bias, name=scope)\n",
    "        print_activations(fc3)\n",
    "    # 第四层全连接层[192]\n",
    "    with tf.name_scope('fc4') as scope:\n",
    "        weight = variable_with_weight_loss([384, 192], stddev=0.04, w1=0.004, name='weight')\n",
    "        bias = tf.Variable(tf.constant(0.0, shape=[192]), name='bias')\n",
    "        fc4 = tf.nn.relu(tf.matmul(fc3, weight) + bias, name=scope)\n",
    "        print_activations(fc4)\n",
    "    # 第五层全连接层,输出分类层\n",
    "    with tf.name_scope('logits') as scope:\n",
    "        weight = variable_with_weight_loss([192, 10], stddev=1.0/192, w1=0.0, name='weight')\n",
    "        bias = tf.Variable(tf.constant(0.0, shape=[10]), name='bias')\n",
    "        logits = tf.add(tf.matmul(fc4, weight), bias, name=scope)         #bias_add与+,add的区别????????????\n",
    "        print_activations(logits)\n",
    "    return logits  # 输出网络提取的特征"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 4.定义损失函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 总的损失函数=网络输出损失+权重损失\n",
    "def loss(logits, labels):\n",
    "    \"\"\"\n",
    "    功能: 计算预测值与标签的损失+全连接层的权重损失\n",
    "    参数: logits:预测输出,这里为特征不是概率; labels:训练集标签\n",
    "    返回: 总的损失\n",
    "    \"\"\"\n",
    "    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, \\\n",
    "                                                      labels=labels, name='cross_entropy_per_examples')\n",
    "    print('cross_entropy:', cross_entropy)\n",
    "    cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')  # 对batch_size个数样本的损失求平均\n",
    "    tf.add_to_collection('losses', cross_entropy_mean)    # 将模型输出损失添加到losses中\n",
    "    losses = tf.get_collection('losses')\n",
    "    print('\\nlosses:', losses)\n",
    "    total_loss = tf.add_n(losses, name='total_loss') # 求总的loss\n",
    "    return total_loss"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 5.网络前向传播并计算损失"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "conv1          [128, 24, 24, 64]\n",
      "conv1/pool1    [128, 12, 12, 64]\n",
      "conv1/norm1    [128, 12, 12, 64]\n",
      "conv2          [128, 12, 12, 64]\n",
      "conv2/norm2    [128, 12, 12, 64]\n",
      "conv2/pool2    [128, 6, 6, 64]\n",
      "fc3            [128, 384]     \n",
      "fc4            [128, 192]     \n",
      "logits         [128, 10]      \n"
     ]
    }
   ],
   "source": [
    "# 定义占位符变量用于传入数据\n",
    "image_holder = tf.placeholder(tf.float32, [batch_size, 24, 24, 3])   \n",
    "label_holder = tf.placeholder(tf.int32, [batch_size])\n",
    "# 网络正向传播计算输出\n",
    "logits = inference(image_holder)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "cross_entropy: Tensor(\"cross_entropy_per_examples/cross_entropy_per_examples:0\", shape=(128,), dtype=float32)\n",
      "\n",
      "losses: [<tf.Tensor 'conv1/weight_loss:0' shape=() dtype=float32>, <tf.Tensor 'conv2/weight_loss:0' shape=() dtype=float32>, <tf.Tensor 'fc3/weight_loss:0' shape=() dtype=float32>, <tf.Tensor 'fc4/weight_loss:0' shape=() dtype=float32>, <tf.Tensor 'logits/weight_loss:0' shape=() dtype=float32>, <tf.Tensor 'cross_entropy:0' shape=() dtype=float32>]\n"
     ]
    }
   ],
   "source": [
    "# 计算网络总的损失\n",
    "loss = loss(logits, label_holder)\n",
    "# 优化方法\n",
    "train_op = tf.train.AdamOptimizer(1e-3).minimize(loss)\n",
    "# 测试网络的准确率\n",
    "top_k_op = tf.nn.in_top_k(logits, label_holder, 1)  # top_k准确率,默认为1"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 6.创建会话,初始变量并训练网络"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[<Thread(QueueRunnerThread-input_producer-input_producer/input_producer_EnqueueMany, started daemon 140623629756160)>,\n",
       " <Thread(QueueRunnerThread-shuffle_batch/random_shuffle_queue-shuffle_batch/random_shuffle_queue_enqueue, started daemon 140623621363456)>,\n",
       " <Thread(QueueRunnerThread-shuffle_batch/random_shuffle_queue-shuffle_batch/random_shuffle_queue_enqueue, started daemon 140623369729792)>,\n",
       " <Thread(QueueRunnerThread-shuffle_batch/random_shuffle_queue-shuffle_batch/random_shuffle_queue_enqueue, started daemon 140623361337088)>,\n",
       " <Thread(QueueRunnerThread-shuffle_batch/random_shuffle_queue-shuffle_batch/random_shuffle_queue_enqueue, started daemon 140623352944384)>,\n",
       " <Thread(QueueRunnerThread-shuffle_batch/random_shuffle_queue-shuffle_batch/random_shuffle_queue_enqueue, started daemon 140621993998080)>,\n",
       " <Thread(QueueRunnerThread-shuffle_batch/random_shuffle_queue-shuffle_batch/random_shuffle_queue_enqueue, started daemon 140621985605376)>,\n",
       " <Thread(QueueRunnerThread-shuffle_batch/random_shuffle_queue-shuffle_batch/random_shuffle_queue_enqueue, started daemon 140621977212672)>,\n",
       " <Thread(QueueRunnerThread-shuffle_batch/random_shuffle_queue-shuffle_batch/random_shuffle_queue_enqueue, started daemon 140621968819968)>,\n",
       " <Thread(QueueRunnerThread-shuffle_batch/random_shuffle_queue-shuffle_batch/random_shuffle_queue_enqueue, started daemon 140621960427264)>,\n",
       " <Thread(QueueRunnerThread-shuffle_batch/random_shuffle_queue-shuffle_batch/random_shuffle_queue_enqueue, started daemon 140621952034560)>,\n",
       " <Thread(QueueRunnerThread-shuffle_batch/random_shuffle_queue-shuffle_batch/random_shuffle_queue_enqueue, started daemon 140621943641856)>,\n",
       " <Thread(QueueRunnerThread-shuffle_batch/random_shuffle_queue-shuffle_batch/random_shuffle_queue_enqueue, started daemon 140621457127168)>,\n",
       " <Thread(QueueRunnerThread-shuffle_batch/random_shuffle_queue-shuffle_batch/random_shuffle_queue_enqueue, started daemon 140621448734464)>,\n",
       " <Thread(QueueRunnerThread-shuffle_batch/random_shuffle_queue-shuffle_batch/random_shuffle_queue_enqueue, started daemon 140621440341760)>,\n",
       " <Thread(QueueRunnerThread-shuffle_batch/random_shuffle_queue-shuffle_batch/random_shuffle_queue_enqueue, started daemon 140621431949056)>,\n",
       " <Thread(QueueRunnerThread-shuffle_batch/random_shuffle_queue-shuffle_batch/random_shuffle_queue_enqueue, started daemon 140621423556352)>,\n",
       " <Thread(QueueRunnerThread-input/input_producer-input/input_producer/input_producer_EnqueueMany, started daemon 140621415163648)>,\n",
       " <Thread(QueueRunnerThread-batch/fifo_queue-batch/fifo_queue_enqueue, started daemon 140621406770944)>,\n",
       " <Thread(QueueRunnerThread-batch/fifo_queue-batch/fifo_queue_enqueue, started daemon 140620920256256)>,\n",
       " <Thread(QueueRunnerThread-batch/fifo_queue-batch/fifo_queue_enqueue, started daemon 140620911863552)>,\n",
       " <Thread(QueueRunnerThread-batch/fifo_queue-batch/fifo_queue_enqueue, started daemon 140620903470848)>,\n",
       " <Thread(QueueRunnerThread-batch/fifo_queue-batch/fifo_queue_enqueue, started daemon 140620895078144)>,\n",
       " <Thread(QueueRunnerThread-batch/fifo_queue-batch/fifo_queue_enqueue, started daemon 140620886685440)>,\n",
       " <Thread(QueueRunnerThread-batch/fifo_queue-batch/fifo_queue_enqueue, started daemon 140620878292736)>,\n",
       " <Thread(QueueRunnerThread-batch/fifo_queue-batch/fifo_queue_enqueue, started daemon 140620869900032)>,\n",
       " <Thread(QueueRunnerThread-batch/fifo_queue-batch/fifo_queue_enqueue, started daemon 140620383385344)>,\n",
       " <Thread(QueueRunnerThread-batch/fifo_queue-batch/fifo_queue_enqueue, started daemon 140620374992640)>,\n",
       " <Thread(QueueRunnerThread-batch/fifo_queue-batch/fifo_queue_enqueue, started daemon 140620366599936)>,\n",
       " <Thread(QueueRunnerThread-batch/fifo_queue-batch/fifo_queue_enqueue, started daemon 140620358207232)>,\n",
       " <Thread(QueueRunnerThread-batch/fifo_queue-batch/fifo_queue_enqueue, started daemon 140620349814528)>,\n",
       " <Thread(QueueRunnerThread-batch/fifo_queue-batch/fifo_queue_enqueue, started daemon 140620341421824)>,\n",
       " <Thread(QueueRunnerThread-batch/fifo_queue-batch/fifo_queue_enqueue, started daemon 140620333029120)>,\n",
       " <Thread(QueueRunnerThread-batch/fifo_queue-batch/fifo_queue_enqueue, started daemon 140619846514432)>]"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "sess = tf.InteractiveSession()           # 创建会话\n",
    "tf.global_variables_initializer().run()  # 全局变量初始化\n",
    "tf.train.start_queue_runners()           # 开启16个线程加速图像预处理过程"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "step 0,loss=4.68 (14.8 examples/sec; 8.661 sec/batch)\n",
      "step 10,loss=3.63 (1824.2 examples/sec; 0.070 sec/batch)\n",
      "step 20,loss=3.17 (1988.0 examples/sec; 0.064 sec/batch)\n",
      "step 30,loss=2.74 (1876.7 examples/sec; 0.068 sec/batch)\n",
      "step 40,loss=2.40 (1994.7 examples/sec; 0.064 sec/batch)\n",
      "step 50,loss=2.42 (1982.8 examples/sec; 0.065 sec/batch)\n",
      "step 60,loss=2.35 (1950.3 examples/sec; 0.066 sec/batch)\n",
      "step 70,loss=2.00 (1888.1 examples/sec; 0.068 sec/batch)\n",
      "step 80,loss=2.01 (1816.2 examples/sec; 0.070 sec/batch)\n",
      "step 90,loss=1.82 (1825.9 examples/sec; 0.070 sec/batch)\n",
      "step 100,loss=1.96 (1906.1 examples/sec; 0.067 sec/batch)\n",
      "step 110,loss=2.19 (1879.0 examples/sec; 0.068 sec/batch)\n",
      "step 120,loss=1.98 (1843.9 examples/sec; 0.069 sec/batch)\n",
      "step 130,loss=1.83 (1927.3 examples/sec; 0.066 sec/batch)\n",
      "step 140,loss=1.94 (1885.0 examples/sec; 0.068 sec/batch)\n",
      "step 150,loss=1.75 (1891.7 examples/sec; 0.068 sec/batch)\n",
      "step 160,loss=1.82 (2046.7 examples/sec; 0.063 sec/batch)\n",
      "step 170,loss=1.78 (1966.2 examples/sec; 0.065 sec/batch)\n",
      "step 180,loss=1.77 (1753.1 examples/sec; 0.073 sec/batch)\n",
      "step 190,loss=1.80 (2014.9 examples/sec; 0.064 sec/batch)\n",
      "step 200,loss=1.68 (1788.5 examples/sec; 0.072 sec/batch)\n",
      "step 210,loss=1.82 (1821.5 examples/sec; 0.070 sec/batch)\n",
      "step 220,loss=1.60 (1879.2 examples/sec; 0.068 sec/batch)\n",
      "step 230,loss=1.66 (1909.6 examples/sec; 0.067 sec/batch)\n",
      "step 240,loss=1.80 (1819.6 examples/sec; 0.070 sec/batch)\n",
      "step 250,loss=1.79 (1834.9 examples/sec; 0.070 sec/batch)\n",
      "step 260,loss=1.71 (1780.8 examples/sec; 0.072 sec/batch)\n",
      "step 270,loss=1.50 (1838.1 examples/sec; 0.070 sec/batch)\n",
      "step 280,loss=1.62 (1904.9 examples/sec; 0.067 sec/batch)\n",
      "step 290,loss=1.66 (2038.2 examples/sec; 0.063 sec/batch)\n",
      "step 300,loss=1.56 (1962.3 examples/sec; 0.065 sec/batch)\n",
      "step 310,loss=1.66 (1787.8 examples/sec; 0.072 sec/batch)\n",
      "step 320,loss=1.72 (1790.0 examples/sec; 0.072 sec/batch)\n",
      "step 330,loss=1.69 (1914.6 examples/sec; 0.067 sec/batch)\n",
      "step 340,loss=1.54 (1904.6 examples/sec; 0.067 sec/batch)\n",
      "step 350,loss=1.44 (1834.4 examples/sec; 0.070 sec/batch)\n",
      "step 360,loss=1.51 (1934.1 examples/sec; 0.066 sec/batch)\n",
      "step 370,loss=1.46 (1786.0 examples/sec; 0.072 sec/batch)\n",
      "step 380,loss=1.38 (1846.5 examples/sec; 0.069 sec/batch)\n",
      "step 390,loss=1.55 (1807.1 examples/sec; 0.071 sec/batch)\n",
      "step 400,loss=1.49 (1958.1 examples/sec; 0.065 sec/batch)\n",
      "step 410,loss=1.51 (1843.8 examples/sec; 0.069 sec/batch)\n",
      "step 420,loss=1.52 (2017.4 examples/sec; 0.063 sec/batch)\n",
      "step 430,loss=1.40 (1855.4 examples/sec; 0.069 sec/batch)\n",
      "step 440,loss=1.65 (1798.2 examples/sec; 0.071 sec/batch)\n",
      "step 450,loss=1.45 (2014.9 examples/sec; 0.064 sec/batch)\n",
      "step 460,loss=1.45 (1975.2 examples/sec; 0.065 sec/batch)\n",
      "step 470,loss=1.53 (1852.8 examples/sec; 0.069 sec/batch)\n",
      "step 480,loss=1.49 (1943.2 examples/sec; 0.066 sec/batch)\n",
      "step 490,loss=1.43 (1899.2 examples/sec; 0.067 sec/batch)\n",
      "step 500,loss=1.55 (1869.2 examples/sec; 0.068 sec/batch)\n",
      "step 510,loss=1.48 (1829.1 examples/sec; 0.070 sec/batch)\n",
      "step 520,loss=1.26 (1839.2 examples/sec; 0.070 sec/batch)\n",
      "step 530,loss=1.21 (1773.8 examples/sec; 0.072 sec/batch)\n",
      "step 540,loss=1.56 (1906.8 examples/sec; 0.067 sec/batch)\n",
      "step 550,loss=1.43 (1793.6 examples/sec; 0.071 sec/batch)\n",
      "step 560,loss=1.57 (1899.9 examples/sec; 0.067 sec/batch)\n",
      "step 570,loss=1.40 (1753.9 examples/sec; 0.073 sec/batch)\n",
      "step 580,loss=1.50 (1894.0 examples/sec; 0.068 sec/batch)\n",
      "step 590,loss=1.48 (1816.2 examples/sec; 0.070 sec/batch)\n",
      "step 600,loss=1.38 (1867.7 examples/sec; 0.069 sec/batch)\n",
      "step 610,loss=1.27 (1796.5 examples/sec; 0.071 sec/batch)\n",
      "step 620,loss=1.40 (1828.9 examples/sec; 0.070 sec/batch)\n",
      "step 630,loss=1.57 (1993.7 examples/sec; 0.064 sec/batch)\n",
      "step 640,loss=1.48 (1926.8 examples/sec; 0.066 sec/batch)\n",
      "step 650,loss=1.32 (1854.8 examples/sec; 0.069 sec/batch)\n",
      "step 660,loss=1.40 (1769.2 examples/sec; 0.072 sec/batch)\n",
      "step 670,loss=1.47 (1928.7 examples/sec; 0.066 sec/batch)\n",
      "step 680,loss=1.44 (1799.5 examples/sec; 0.071 sec/batch)\n",
      "step 690,loss=1.43 (1861.7 examples/sec; 0.069 sec/batch)\n",
      "step 700,loss=1.17 (1891.0 examples/sec; 0.068 sec/batch)\n",
      "step 710,loss=1.28 (1917.4 examples/sec; 0.067 sec/batch)\n",
      "step 720,loss=1.55 (1850.8 examples/sec; 0.069 sec/batch)\n",
      "step 730,loss=1.46 (1966.8 examples/sec; 0.065 sec/batch)\n",
      "step 740,loss=1.48 (2075.4 examples/sec; 0.062 sec/batch)\n",
      "step 750,loss=1.27 (1774.8 examples/sec; 0.072 sec/batch)\n",
      "step 760,loss=1.19 (1758.1 examples/sec; 0.073 sec/batch)\n",
      "step 770,loss=1.45 (1861.7 examples/sec; 0.069 sec/batch)\n",
      "step 780,loss=1.19 (1838.1 examples/sec; 0.070 sec/batch)\n",
      "step 790,loss=1.31 (1888.3 examples/sec; 0.068 sec/batch)\n",
      "step 800,loss=1.45 (1903.4 examples/sec; 0.067 sec/batch)\n",
      "step 810,loss=1.31 (1908.5 examples/sec; 0.067 sec/batch)\n",
      "step 820,loss=1.45 (1929.8 examples/sec; 0.066 sec/batch)\n",
      "step 830,loss=1.14 (1898.0 examples/sec; 0.067 sec/batch)\n",
      "step 840,loss=1.42 (2004.1 examples/sec; 0.064 sec/batch)\n",
      "step 850,loss=1.25 (1837.2 examples/sec; 0.070 sec/batch)\n",
      "step 860,loss=1.18 (1916.8 examples/sec; 0.067 sec/batch)\n",
      "step 870,loss=1.40 (1996.4 examples/sec; 0.064 sec/batch)\n",
      "step 880,loss=1.21 (1992.7 examples/sec; 0.064 sec/batch)\n",
      "step 890,loss=1.13 (1905.3 examples/sec; 0.067 sec/batch)\n",
      "step 900,loss=1.23 (1851.5 examples/sec; 0.069 sec/batch)\n",
      "step 910,loss=1.34 (1763.1 examples/sec; 0.073 sec/batch)\n",
      "step 920,loss=1.42 (1868.6 examples/sec; 0.069 sec/batch)\n",
      "step 930,loss=1.28 (1818.5 examples/sec; 0.070 sec/batch)\n",
      "step 940,loss=1.41 (1852.9 examples/sec; 0.069 sec/batch)\n",
      "step 950,loss=1.20 (1840.2 examples/sec; 0.070 sec/batch)\n",
      "step 960,loss=1.45 (1837.8 examples/sec; 0.070 sec/batch)\n",
      "step 970,loss=1.40 (1766.5 examples/sec; 0.072 sec/batch)\n",
      "step 980,loss=1.22 (1811.9 examples/sec; 0.071 sec/batch)\n",
      "step 990,loss=1.22 (1931.7 examples/sec; 0.066 sec/batch)\n",
      "step 1000,loss=1.35 (1820.3 examples/sec; 0.070 sec/batch)\n",
      "step 1010,loss=1.35 (1770.2 examples/sec; 0.072 sec/batch)\n",
      "step 1020,loss=1.29 (1946.3 examples/sec; 0.066 sec/batch)\n",
      "step 1030,loss=1.40 (1995.8 examples/sec; 0.064 sec/batch)\n",
      "step 1040,loss=1.35 (1816.3 examples/sec; 0.070 sec/batch)\n",
      "step 1050,loss=1.30 (1925.0 examples/sec; 0.066 sec/batch)\n",
      "step 1060,loss=1.23 (2062.8 examples/sec; 0.062 sec/batch)\n",
      "step 1070,loss=1.21 (1848.1 examples/sec; 0.069 sec/batch)\n",
      "step 1080,loss=1.00 (1806.5 examples/sec; 0.071 sec/batch)\n",
      "step 1090,loss=1.13 (1829.1 examples/sec; 0.070 sec/batch)\n",
      "step 1100,loss=1.23 (1910.5 examples/sec; 0.067 sec/batch)\n",
      "step 1110,loss=1.33 (1902.8 examples/sec; 0.067 sec/batch)\n",
      "step 1120,loss=1.22 (1795.8 examples/sec; 0.071 sec/batch)\n",
      "step 1130,loss=1.22 (1920.7 examples/sec; 0.067 sec/batch)\n",
      "step 1140,loss=1.21 (1747.9 examples/sec; 0.073 sec/batch)\n",
      "step 1150,loss=1.13 (1744.8 examples/sec; 0.073 sec/batch)\n",
      "step 1160,loss=1.27 (1863.6 examples/sec; 0.069 sec/batch)\n",
      "step 1170,loss=1.11 (1938.6 examples/sec; 0.066 sec/batch)\n",
      "step 1180,loss=1.12 (1936.1 examples/sec; 0.066 sec/batch)\n",
      "step 1190,loss=1.15 (1754.5 examples/sec; 0.073 sec/batch)\n",
      "step 1200,loss=1.17 (1950.0 examples/sec; 0.066 sec/batch)\n",
      "step 1210,loss=1.20 (1829.7 examples/sec; 0.070 sec/batch)\n",
      "step 1220,loss=1.02 (1795.8 examples/sec; 0.071 sec/batch)\n",
      "step 1230,loss=1.33 (1891.6 examples/sec; 0.068 sec/batch)\n",
      "step 1240,loss=1.21 (1904.6 examples/sec; 0.067 sec/batch)\n",
      "step 1250,loss=1.34 (1854.7 examples/sec; 0.069 sec/batch)\n",
      "step 1260,loss=1.41 (1938.1 examples/sec; 0.066 sec/batch)\n",
      "step 1270,loss=1.27 (1987.5 examples/sec; 0.064 sec/batch)\n",
      "step 1280,loss=1.11 (1915.0 examples/sec; 0.067 sec/batch)\n",
      "step 1290,loss=1.10 (1853.8 examples/sec; 0.069 sec/batch)\n",
      "step 1300,loss=1.30 (1922.0 examples/sec; 0.067 sec/batch)\n",
      "step 1310,loss=1.10 (1943.0 examples/sec; 0.066 sec/batch)\n",
      "step 1320,loss=1.26 (1843.0 examples/sec; 0.069 sec/batch)\n",
      "step 1330,loss=1.12 (1793.2 examples/sec; 0.071 sec/batch)\n",
      "step 1340,loss=1.14 (1934.7 examples/sec; 0.066 sec/batch)\n",
      "step 1350,loss=1.07 (1922.1 examples/sec; 0.067 sec/batch)\n",
      "step 1360,loss=1.25 (2072.6 examples/sec; 0.062 sec/batch)\n",
      "step 1370,loss=1.27 (1794.0 examples/sec; 0.071 sec/batch)\n",
      "step 1380,loss=1.26 (1818.6 examples/sec; 0.070 sec/batch)\n",
      "step 1390,loss=1.30 (1808.5 examples/sec; 0.071 sec/batch)\n",
      "step 1400,loss=1.14 (1780.5 examples/sec; 0.072 sec/batch)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "step 1410,loss=1.09 (1805.8 examples/sec; 0.071 sec/batch)\n",
      "step 1420,loss=1.15 (1850.4 examples/sec; 0.069 sec/batch)\n",
      "step 1430,loss=1.28 (1877.9 examples/sec; 0.068 sec/batch)\n",
      "step 1440,loss=1.23 (1880.3 examples/sec; 0.068 sec/batch)\n",
      "step 1450,loss=1.32 (1852.8 examples/sec; 0.069 sec/batch)\n",
      "step 1460,loss=1.15 (1885.7 examples/sec; 0.068 sec/batch)\n",
      "step 1470,loss=1.14 (1904.5 examples/sec; 0.067 sec/batch)\n",
      "step 1480,loss=1.11 (1894.3 examples/sec; 0.068 sec/batch)\n",
      "step 1490,loss=1.29 (1879.9 examples/sec; 0.068 sec/batch)\n",
      "step 1500,loss=1.23 (1790.6 examples/sec; 0.071 sec/batch)\n",
      "step 1510,loss=1.30 (1844.3 examples/sec; 0.069 sec/batch)\n",
      "step 1520,loss=1.24 (1924.9 examples/sec; 0.066 sec/batch)\n",
      "step 1530,loss=1.04 (1959.7 examples/sec; 0.065 sec/batch)\n",
      "step 1540,loss=1.31 (1973.7 examples/sec; 0.065 sec/batch)\n",
      "step 1550,loss=1.22 (1889.7 examples/sec; 0.068 sec/batch)\n",
      "step 1560,loss=1.04 (1922.2 examples/sec; 0.067 sec/batch)\n",
      "step 1570,loss=1.24 (1826.2 examples/sec; 0.070 sec/batch)\n",
      "step 1580,loss=1.08 (1810.2 examples/sec; 0.071 sec/batch)\n",
      "step 1590,loss=1.25 (1894.0 examples/sec; 0.068 sec/batch)\n",
      "step 1600,loss=1.19 (1820.7 examples/sec; 0.070 sec/batch)\n",
      "step 1610,loss=1.26 (1882.1 examples/sec; 0.068 sec/batch)\n",
      "step 1620,loss=1.14 (1954.1 examples/sec; 0.066 sec/batch)\n",
      "step 1630,loss=1.18 (1862.0 examples/sec; 0.069 sec/batch)\n",
      "step 1640,loss=1.16 (1846.3 examples/sec; 0.069 sec/batch)\n",
      "step 1650,loss=1.20 (1769.6 examples/sec; 0.072 sec/batch)\n",
      "step 1660,loss=1.22 (1853.6 examples/sec; 0.069 sec/batch)\n",
      "step 1670,loss=1.08 (1893.0 examples/sec; 0.068 sec/batch)\n",
      "step 1680,loss=1.26 (1859.7 examples/sec; 0.069 sec/batch)\n",
      "step 1690,loss=1.20 (1831.2 examples/sec; 0.070 sec/batch)\n",
      "step 1700,loss=1.05 (1785.0 examples/sec; 0.072 sec/batch)\n",
      "step 1710,loss=1.16 (1770.3 examples/sec; 0.072 sec/batch)\n",
      "step 1720,loss=1.09 (1712.4 examples/sec; 0.075 sec/batch)\n",
      "step 1730,loss=1.25 (1856.0 examples/sec; 0.069 sec/batch)\n",
      "step 1740,loss=1.23 (1869.6 examples/sec; 0.068 sec/batch)\n",
      "step 1750,loss=1.21 (1903.8 examples/sec; 0.067 sec/batch)\n",
      "step 1760,loss=1.09 (2185.4 examples/sec; 0.059 sec/batch)\n",
      "step 1770,loss=1.07 (1848.5 examples/sec; 0.069 sec/batch)\n",
      "step 1780,loss=1.00 (2108.6 examples/sec; 0.061 sec/batch)\n",
      "step 1790,loss=1.11 (1791.7 examples/sec; 0.071 sec/batch)\n",
      "step 1800,loss=1.26 (1968.3 examples/sec; 0.065 sec/batch)\n",
      "step 1810,loss=1.28 (1826.0 examples/sec; 0.070 sec/batch)\n",
      "step 1820,loss=1.18 (1809.5 examples/sec; 0.071 sec/batch)\n",
      "step 1830,loss=1.05 (1857.0 examples/sec; 0.069 sec/batch)\n",
      "step 1840,loss=1.18 (1853.5 examples/sec; 0.069 sec/batch)\n",
      "step 1850,loss=1.14 (1872.3 examples/sec; 0.068 sec/batch)\n",
      "step 1860,loss=1.03 (1899.3 examples/sec; 0.067 sec/batch)\n",
      "step 1870,loss=0.98 (1940.6 examples/sec; 0.066 sec/batch)\n",
      "step 1880,loss=1.32 (1984.8 examples/sec; 0.064 sec/batch)\n",
      "step 1890,loss=1.20 (1759.9 examples/sec; 0.073 sec/batch)\n",
      "step 1900,loss=1.25 (1919.5 examples/sec; 0.067 sec/batch)\n",
      "step 1910,loss=1.06 (1912.9 examples/sec; 0.067 sec/batch)\n",
      "step 1920,loss=1.15 (1875.4 examples/sec; 0.068 sec/batch)\n",
      "step 1930,loss=1.06 (1696.2 examples/sec; 0.075 sec/batch)\n",
      "step 1940,loss=1.06 (1801.9 examples/sec; 0.071 sec/batch)\n",
      "step 1950,loss=1.10 (1883.2 examples/sec; 0.068 sec/batch)\n",
      "step 1960,loss=1.11 (1880.0 examples/sec; 0.068 sec/batch)\n",
      "step 1970,loss=1.23 (1959.5 examples/sec; 0.065 sec/batch)\n",
      "step 1980,loss=1.22 (1827.6 examples/sec; 0.070 sec/batch)\n",
      "step 1990,loss=0.85 (1856.4 examples/sec; 0.069 sec/batch)\n",
      "step 2000,loss=1.05 (1827.6 examples/sec; 0.070 sec/batch)\n",
      "step 2010,loss=1.11 (1921.6 examples/sec; 0.067 sec/batch)\n",
      "step 2020,loss=1.08 (1859.2 examples/sec; 0.069 sec/batch)\n",
      "step 2030,loss=1.20 (1824.9 examples/sec; 0.070 sec/batch)\n",
      "step 2040,loss=1.24 (2018.4 examples/sec; 0.063 sec/batch)\n",
      "step 2050,loss=1.13 (1775.0 examples/sec; 0.072 sec/batch)\n",
      "step 2060,loss=1.10 (1998.6 examples/sec; 0.064 sec/batch)\n",
      "step 2070,loss=1.27 (1882.1 examples/sec; 0.068 sec/batch)\n",
      "step 2080,loss=1.19 (1949.2 examples/sec; 0.066 sec/batch)\n",
      "step 2090,loss=1.21 (1903.1 examples/sec; 0.067 sec/batch)\n",
      "step 2100,loss=1.17 (1911.9 examples/sec; 0.067 sec/batch)\n",
      "step 2110,loss=1.04 (1867.6 examples/sec; 0.069 sec/batch)\n",
      "step 2120,loss=1.15 (1807.2 examples/sec; 0.071 sec/batch)\n",
      "step 2130,loss=1.04 (1831.5 examples/sec; 0.070 sec/batch)\n",
      "step 2140,loss=1.33 (1860.6 examples/sec; 0.069 sec/batch)\n",
      "step 2150,loss=1.13 (1945.2 examples/sec; 0.066 sec/batch)\n",
      "step 2160,loss=1.04 (1904.0 examples/sec; 0.067 sec/batch)\n",
      "step 2170,loss=1.12 (1802.6 examples/sec; 0.071 sec/batch)\n",
      "step 2180,loss=1.03 (1898.6 examples/sec; 0.067 sec/batch)\n",
      "step 2190,loss=1.14 (1854.3 examples/sec; 0.069 sec/batch)\n",
      "step 2200,loss=1.14 (1889.7 examples/sec; 0.068 sec/batch)\n",
      "step 2210,loss=1.08 (1770.8 examples/sec; 0.072 sec/batch)\n",
      "step 2220,loss=1.17 (1753.1 examples/sec; 0.073 sec/batch)\n",
      "step 2230,loss=1.24 (1865.8 examples/sec; 0.069 sec/batch)\n",
      "step 2240,loss=1.23 (1861.7 examples/sec; 0.069 sec/batch)\n",
      "step 2250,loss=1.04 (1879.2 examples/sec; 0.068 sec/batch)\n",
      "step 2260,loss=1.25 (1909.0 examples/sec; 0.067 sec/batch)\n",
      "step 2270,loss=0.96 (1835.1 examples/sec; 0.070 sec/batch)\n",
      "step 2280,loss=1.06 (1996.7 examples/sec; 0.064 sec/batch)\n",
      "step 2290,loss=1.19 (1920.3 examples/sec; 0.067 sec/batch)\n",
      "step 2300,loss=1.07 (1892.6 examples/sec; 0.068 sec/batch)\n",
      "step 2310,loss=1.15 (1884.4 examples/sec; 0.068 sec/batch)\n",
      "step 2320,loss=1.06 (1903.4 examples/sec; 0.067 sec/batch)\n",
      "step 2330,loss=1.05 (1942.6 examples/sec; 0.066 sec/batch)\n",
      "step 2340,loss=1.18 (1889.9 examples/sec; 0.068 sec/batch)\n",
      "step 2350,loss=1.05 (1935.9 examples/sec; 0.066 sec/batch)\n",
      "step 2360,loss=1.09 (1950.6 examples/sec; 0.066 sec/batch)\n",
      "step 2370,loss=1.08 (1951.0 examples/sec; 0.066 sec/batch)\n",
      "step 2380,loss=1.06 (1881.7 examples/sec; 0.068 sec/batch)\n",
      "step 2390,loss=1.09 (1865.3 examples/sec; 0.069 sec/batch)\n",
      "step 2400,loss=1.12 (1930.9 examples/sec; 0.066 sec/batch)\n",
      "step 2410,loss=1.15 (1845.5 examples/sec; 0.069 sec/batch)\n",
      "step 2420,loss=1.20 (1909.3 examples/sec; 0.067 sec/batch)\n",
      "step 2430,loss=1.11 (1790.4 examples/sec; 0.071 sec/batch)\n",
      "step 2440,loss=1.03 (1748.5 examples/sec; 0.073 sec/batch)\n",
      "step 2450,loss=0.96 (1680.2 examples/sec; 0.076 sec/batch)\n",
      "step 2460,loss=1.12 (1923.6 examples/sec; 0.067 sec/batch)\n",
      "step 2470,loss=1.00 (1813.9 examples/sec; 0.071 sec/batch)\n",
      "step 2480,loss=1.00 (1762.6 examples/sec; 0.073 sec/batch)\n",
      "step 2490,loss=1.15 (1826.9 examples/sec; 0.070 sec/batch)\n",
      "step 2500,loss=1.01 (1928.4 examples/sec; 0.066 sec/batch)\n",
      "step 2510,loss=1.20 (1945.2 examples/sec; 0.066 sec/batch)\n",
      "step 2520,loss=1.03 (1833.2 examples/sec; 0.070 sec/batch)\n",
      "step 2530,loss=1.09 (1833.7 examples/sec; 0.070 sec/batch)\n",
      "step 2540,loss=1.08 (1850.1 examples/sec; 0.069 sec/batch)\n",
      "step 2550,loss=1.04 (1976.4 examples/sec; 0.065 sec/batch)\n",
      "step 2560,loss=1.25 (1751.7 examples/sec; 0.073 sec/batch)\n",
      "step 2570,loss=1.16 (1901.6 examples/sec; 0.067 sec/batch)\n",
      "step 2580,loss=1.15 (1836.8 examples/sec; 0.070 sec/batch)\n",
      "step 2590,loss=1.00 (1864.7 examples/sec; 0.069 sec/batch)\n",
      "step 2600,loss=1.03 (1925.0 examples/sec; 0.066 sec/batch)\n",
      "step 2610,loss=1.33 (1839.7 examples/sec; 0.070 sec/batch)\n",
      "step 2620,loss=1.00 (1829.3 examples/sec; 0.070 sec/batch)\n",
      "step 2630,loss=1.19 (1801.3 examples/sec; 0.071 sec/batch)\n",
      "step 2640,loss=1.17 (1806.0 examples/sec; 0.071 sec/batch)\n",
      "step 2650,loss=0.88 (1918.6 examples/sec; 0.067 sec/batch)\n",
      "step 2660,loss=1.19 (1922.6 examples/sec; 0.067 sec/batch)\n",
      "step 2670,loss=1.16 (1787.0 examples/sec; 0.072 sec/batch)\n",
      "step 2680,loss=0.98 (1686.3 examples/sec; 0.076 sec/batch)\n",
      "step 2690,loss=0.93 (1885.7 examples/sec; 0.068 sec/batch)\n",
      "step 2700,loss=1.09 (1836.8 examples/sec; 0.070 sec/batch)\n",
      "step 2710,loss=0.95 (1897.7 examples/sec; 0.067 sec/batch)\n",
      "step 2720,loss=1.01 (1914.9 examples/sec; 0.067 sec/batch)\n",
      "step 2730,loss=1.33 (1783.1 examples/sec; 0.072 sec/batch)\n",
      "step 2740,loss=0.91 (1736.9 examples/sec; 0.074 sec/batch)\n",
      "step 2750,loss=1.08 (1842.5 examples/sec; 0.069 sec/batch)\n",
      "step 2760,loss=0.85 (1811.9 examples/sec; 0.071 sec/batch)\n",
      "step 2770,loss=1.15 (1905.2 examples/sec; 0.067 sec/batch)\n",
      "step 2780,loss=1.08 (1937.6 examples/sec; 0.066 sec/batch)\n",
      "step 2790,loss=0.99 (1823.4 examples/sec; 0.070 sec/batch)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "step 2800,loss=0.99 (1742.6 examples/sec; 0.073 sec/batch)\n",
      "step 2810,loss=1.21 (1878.7 examples/sec; 0.068 sec/batch)\n",
      "step 2820,loss=1.16 (1873.8 examples/sec; 0.068 sec/batch)\n",
      "step 2830,loss=0.95 (1924.8 examples/sec; 0.066 sec/batch)\n",
      "step 2840,loss=0.90 (1861.2 examples/sec; 0.069 sec/batch)\n",
      "step 2850,loss=0.97 (1896.9 examples/sec; 0.067 sec/batch)\n",
      "step 2860,loss=0.99 (1816.2 examples/sec; 0.070 sec/batch)\n",
      "step 2870,loss=1.08 (1845.3 examples/sec; 0.069 sec/batch)\n",
      "step 2880,loss=1.03 (1758.8 examples/sec; 0.073 sec/batch)\n",
      "step 2890,loss=0.99 (1847.8 examples/sec; 0.069 sec/batch)\n",
      "step 2900,loss=0.86 (1814.6 examples/sec; 0.071 sec/batch)\n",
      "step 2910,loss=1.01 (1847.6 examples/sec; 0.069 sec/batch)\n",
      "step 2920,loss=1.30 (1954.7 examples/sec; 0.065 sec/batch)\n",
      "step 2930,loss=1.09 (1745.9 examples/sec; 0.073 sec/batch)\n",
      "step 2940,loss=0.92 (1906.3 examples/sec; 0.067 sec/batch)\n",
      "step 2950,loss=1.03 (1711.7 examples/sec; 0.075 sec/batch)\n",
      "step 2960,loss=1.06 (1866.9 examples/sec; 0.069 sec/batch)\n",
      "step 2970,loss=1.07 (1850.1 examples/sec; 0.069 sec/batch)\n",
      "step 2980,loss=1.09 (1823.4 examples/sec; 0.070 sec/batch)\n",
      "step 2990,loss=1.04 (1814.9 examples/sec; 0.071 sec/batch)\n"
     ]
    }
   ],
   "source": [
    "# 开始训练网络\n",
    "for step in range(3000):\n",
    "    start_time = time.time()             # 计时开始\n",
    "    images_batch, labels_batch = sess.run([images_train, labels_train])  # 获取训练集\n",
    "    _, loss_value = sess.run([train_op, loss], \\\n",
    "                             feed_dict={image_holder: images_batch, label_holder: labels_batch})\n",
    "    duration = time.time() - start_time  # 获取一个batch训练的时间\n",
    "    if step % 10 == 0:   # 迭代10次\n",
    "        examples_per_sec = batch_size / duration  # 每s迭代的样本数\n",
    "        sec_per_batch = float(duration)\n",
    "        train_info = 'step %d,loss=%.2f (%.1f examples/sec; %.3f sec/batch)'\n",
    "        print(train_info % (step, loss_value, examples_per_sec, sec_per_batch))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 7.测试模型准确率"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "collect number: 92\n",
      "---------------------------------------\n",
      "collect number: 92\n",
      "---------------------------------------\n",
      "collect number: 83\n",
      "---------------------------------------\n",
      "collect number: 88\n",
      "---------------------------------------\n",
      "collect number: 106\n",
      "---------------------------------------\n",
      "collect number: 82\n",
      "---------------------------------------\n",
      "collect number: 87\n",
      "---------------------------------------\n",
      "collect number: 102\n",
      "---------------------------------------\n",
      "collect number: 97\n",
      "---------------------------------------\n",
      "collect number: 96\n",
      "---------------------------------------\n",
      "collect number: 98\n",
      "---------------------------------------\n",
      "collect number: 88\n",
      "---------------------------------------\n",
      "collect number: 92\n",
      "---------------------------------------\n",
      "collect number: 88\n",
      "---------------------------------------\n",
      "collect number: 94\n",
      "---------------------------------------\n",
      "collect number: 84\n",
      "---------------------------------------\n",
      "collect number: 91\n",
      "---------------------------------------\n",
      "collect number: 93\n",
      "---------------------------------------\n",
      "collect number: 88\n",
      "---------------------------------------\n",
      "collect number: 91\n",
      "---------------------------------------\n",
      "collect number: 92\n",
      "---------------------------------------\n",
      "collect number: 83\n",
      "---------------------------------------\n",
      "collect number: 86\n",
      "---------------------------------------\n",
      "collect number: 89\n",
      "---------------------------------------\n",
      "collect number: 83\n",
      "---------------------------------------\n",
      "collect number: 88\n",
      "---------------------------------------\n",
      "collect number: 87\n",
      "---------------------------------------\n",
      "collect number: 89\n",
      "---------------------------------------\n",
      "collect number: 86\n",
      "---------------------------------------\n",
      "collect number: 95\n",
      "---------------------------------------\n",
      "collect number: 102\n",
      "---------------------------------------\n",
      "collect number: 95\n",
      "---------------------------------------\n",
      "collect number: 94\n",
      "---------------------------------------\n",
      "collect number: 96\n",
      "---------------------------------------\n",
      "collect number: 97\n",
      "---------------------------------------\n",
      "collect number: 87\n",
      "---------------------------------------\n",
      "collect number: 100\n",
      "---------------------------------------\n",
      "collect number: 95\n",
      "---------------------------------------\n",
      "collect number: 93\n",
      "---------------------------------------\n",
      "collect number: 90\n",
      "---------------------------------------\n",
      "collect number: 88\n",
      "---------------------------------------\n",
      "collect number: 92\n",
      "---------------------------------------\n",
      "collect number: 98\n",
      "---------------------------------------\n",
      "collect number: 97\n",
      "---------------------------------------\n",
      "collect number: 92\n",
      "---------------------------------------\n",
      "collect number: 89\n",
      "---------------------------------------\n",
      "collect number: 87\n",
      "---------------------------------------\n",
      "collect number: 92\n",
      "---------------------------------------\n",
      "collect number: 94\n",
      "---------------------------------------\n",
      "collect number: 97\n",
      "---------------------------------------\n",
      "collect number: 88\n",
      "---------------------------------------\n",
      "collect number: 98\n",
      "---------------------------------------\n",
      "collect number: 86\n",
      "---------------------------------------\n",
      "collect number: 84\n",
      "---------------------------------------\n",
      "collect number: 85\n",
      "---------------------------------------\n",
      "collect number: 98\n",
      "---------------------------------------\n",
      "collect number: 86\n",
      "---------------------------------------\n",
      "collect number: 92\n",
      "---------------------------------------\n",
      "collect number: 98\n",
      "---------------------------------------\n",
      "collect number: 91\n",
      "---------------------------------------\n",
      "collect number: 94\n",
      "---------------------------------------\n",
      "collect number: 88\n",
      "---------------------------------------\n",
      "collect number: 84\n",
      "---------------------------------------\n",
      "collect number: 91\n",
      "---------------------------------------\n",
      "collect number: 85\n",
      "---------------------------------------\n",
      "collect number: 94\n",
      "---------------------------------------\n",
      "collect number: 88\n",
      "---------------------------------------\n",
      "collect number: 92\n",
      "---------------------------------------\n",
      "collect number: 88\n",
      "---------------------------------------\n",
      "collect number: 95\n",
      "---------------------------------------\n",
      "collect number: 89\n",
      "---------------------------------------\n",
      "collect number: 106\n",
      "---------------------------------------\n",
      "collect number: 80\n",
      "---------------------------------------\n",
      "collect number: 93\n",
      "---------------------------------------\n",
      "collect number: 95\n",
      "---------------------------------------\n",
      "collect number: 93\n",
      "---------------------------------------\n",
      "collect number: 86\n",
      "---------------------------------------\n",
      "collect number: 95\n",
      "---------------------------------------\n",
      "collect number: 90\n",
      "---------------------------------------\n"
     ]
    }
   ],
   "source": [
    "# 用测试集测试模型准确率\n",
    "num_examples = 10000\n",
    "import math\n",
    "num_iter = int(math.ceil(num_examples / batch_size))   # ceil函数的作用是将一个小数取比它大的整数,如:30.1取31\n",
    "true_count = 0\n",
    "total_example_count = num_iter * batch_size\n",
    "step = 0\n",
    "while step < num_iter:\n",
    "    image_batch, label_batch = sess.run([images_test, labels_test])   # 获取batch_size的测试数据\n",
    "    # 开始测试\n",
    "    predictions = sess.run([top_k_op], \\\n",
    "                          feed_dict={image_holder: image_batch, label_holder: label_batch})\n",
    "#     print(predictions)\n",
    "    print('collect number:', np.sum(predictions))  \n",
    "    true_count += np.sum(predictions)              \n",
    "    step += 1\n",
    "    print('---------------------------------------')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "precision @ 1 = 0.714\n"
     ]
    }
   ],
   "source": [
    "precision = true_count / total_example_count\n",
    "print('precision @ 1 = %.3f' % precision)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.5.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
