{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 使用tensorflow，构造并训练一个神经网络，在测试机上达到超过98%的准确率。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\Anaconda\\lib\\site-packages\\h5py\\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n",
      "  from ._conv import register_converters as _register_converters\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting data/train-images-idx3-ubyte.gz\n",
      "Extracting data/train-labels-idx1-ubyte.gz\n",
      "Extracting data/t10k-images-idx3-ubyte.gz\n",
      "Extracting data/t10k-labels-idx1-ubyte.gz\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "import matplotlib.pyplot as plt\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "\n",
    "mnist = input_data.read_data_sets('data/', one_hot=True)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#成功下载了 "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "\"\"\"\n",
    "x = tf.placeholder(tf.float32,[None,784])\n",
    "w = tf.Variable(tf.zeros([784,10]))\n",
    "b = tf.Variable(tf.zeros([10]) )\n",
    "y = tf.matmul(x,w) + b\n",
    "\"\"\"\n",
    "#定义初始化参数\n",
    "def weight_variable(shape):# initial  w param\n",
    "    initial = tf.truncated_normal(shape,stddev = 0.1)\n",
    "    return tf.Variable(initial)  \n",
    "\n",
    "def bias_variable(shape):\n",
    "    initial = tf.constant(0.1,shape = shape)\n",
    "    return tf.Variable(initial)\n",
    "##定义卷积和池化操作  \n",
    "''''' \n",
    "卷积后的图像高宽计算公式： W2 = (W1 - Fw + 2P) / S + 1 \n",
    "其中：Fw为filter的宽，P为周围补0的圈数，S是步幅 \n",
    "'''  \n",
    "def conv2d(x,w):\n",
    "    return tf.nn.conv2d(x,w,strides = [1,1,1,1],padding = 'SAME') #strides \n",
    "#池化用简单传统的2x2大小的模板做max pooling\n",
    "def max_pool_2x2(x):\n",
    "    return tf.nn.max_pool(x,ksize = [1,2,2,1],strides = [1,2,2,1],padding = 'SAME')  #ksize strides \n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "#占位符 \n",
    "x = tf.placeholder(tf.float32,[None,784])\n",
    "y_= tf.placeholder(tf.float32,[None,10])\n",
    "\n",
    " #先将图像数据进行维度的变化  \n",
    "x_image = tf.reshape(x,[-1,28,28,1]) # transform 1D x to  28*28 \n",
    "\n",
    "x = tf.placeholder(tf.float32,[None,784])\n",
    "y_ = tf.placeholder(tf.float32,[None,10])\n",
    "x_image = tf.reshape(x,[-1,28,28,1])\n",
    "#定义卷积操作的filter为5x5的矩阵，且输出32个feature map, 输入的图片的通道数为1，因为是灰度图像  \n",
    "w_conv1 = weight_variable([5,5,1,32])\n",
    "b_conv1 = bias_variable([32])\n",
    "h_conv1 = tf.nn.relu(conv2d(x_image,w_conv1) + b_conv1)\n",
    "h_pool1 = max_pool_2x2(h_conv1)\n",
    "#定义卷积操作的map为5x5的矩阵，且输出64个feature map, 输入的图片的通道数为32 \n",
    "w_conv2 = weight_variable([5,5,32,64]) #weight_variable([5,5,1,64])\n",
    "b_conv2 = bias_variable([64])\n",
    "  #输出h_conv2维度为：[-1, 14, 14, 64]\n",
    "h_conv2 = tf.nn.relu(conv2d(h_pool1,w_conv2) + b_conv2)   #tf.nn.relu(conv2d(x_image,w_conv2) + b_conv2)\n",
    "#将卷积完的结果进行pooling操作  \n",
    "    #输出h_pool2维度为：[-1, 7, 7, 64] \n",
    "h_pool2 = max_pool_2x2(h_conv2)\n",
    "#密集连接层 ，图片尺寸减小到7x7，加一个1024个神经元的全连接层，用于处理整个图片。\n",
    "#然后把池化层输出的张量reshape成一些向量，乘上权重矩阵，加上偏置，然后对其使用ReLU  \n",
    "w_fc1 = weight_variable([7*7*64,1024])\n",
    "b_fc1 = bias_variable([1024])\n",
    "h_pool2_flat = tf.reshape(h_pool2,[-1,7*7*64])\n",
    "h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat,w_fc1) + b_fc1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "#cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = y_,logits = y))\n",
    "#\n",
    "\"\"\"\n",
    "y_conv = tf.nn.relu(tf.nn.softmax( y) )  \n",
    "cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_*tf.log(y_conv),reduction_indices=[1])) \n",
    "\"\"\"\n",
    "#添加一个softmax层\n",
    "w_fc2 = weight_variable([1024,10])\n",
    "b_fc2 = bias_variable([10])\n",
    "y_conv = tf.nn.softmax(tf.matmul(h_fc1,w_fc2) + b_fc2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'\\ntrain_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)\\n#train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\\nsess = tf.Session()\\ninit_op = tf.global_variables_initializer()\\nsess.run(init_op)\\n'"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#训练模型构造，尝试使用ADAM优化器来做梯度下降，得到更好的效果\n",
    "cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_*tf.log(y_conv),reduction_indices=[1])) #reduction_indices=[1] 等同于 1   \n",
    "train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\n",
    " \n",
    "correction_prediction = tf.equal(tf.argmax(y_conv,1),tf.argmax(y_,1)) #argmax arg_max\n",
    "accuracy = tf.reduce_mean(tf.cast(correction_prediction,tf.float32))\n",
    "sess = tf.Session()\n",
    "init_op = tf.global_variables_initializer()\n",
    "sess.run(init_op)\n",
    "\"\"\"\n",
    "train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)\n",
    "#train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\n",
    "sess = tf.Session()\n",
    "init_op = tf.global_variables_initializer()\n",
    "sess.run(init_op)\n",
    "\"\"\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "step 0,train accuracy 0.06\n",
      "step 100,train accuracy 0.89\n",
      "step 200,train accuracy 0.92\n",
      "step 300,train accuracy 0.98\n",
      "step 400,train accuracy 0.95\n",
      "step 500,train accuracy 0.95\n",
      "step 600,train accuracy 0.95\n",
      "step 700,train accuracy 0.96\n",
      "step 800,train accuracy 0.98\n",
      "step 900,train accuracy 0.96\n",
      "step 1000,train accuracy 0.98\n",
      "step 1100,train accuracy 0.95\n",
      "step 1200,train accuracy 0.99\n",
      "step 1300,train accuracy 0.99\n",
      "step 1400,train accuracy 0.99\n",
      "step 1500,train accuracy 0.99\n",
      "step 1600,train accuracy 0.98\n",
      "step 1700,train accuracy 1\n",
      "step 1800,train accuracy 0.99\n",
      "step 1900,train accuracy 0.97\n",
      "step 2000,train accuracy 0.97\n",
      "step 2100,train accuracy 1\n",
      "step 2200,train accuracy 0.98\n",
      "step 2300,train accuracy 0.99\n",
      "step 2400,train accuracy 0.98\n",
      "step 2500,train accuracy 0.99\n",
      "step 2600,train accuracy 1\n",
      "step 2700,train accuracy 0.99\n",
      "step 2800,train accuracy 0.98\n",
      "step 2900,train accuracy 1\n",
      "step 3000,train accuracy 0.99\n",
      "step 3100,train accuracy 0.99\n",
      "step 3200,train accuracy 1\n",
      "step 3300,train accuracy 1\n",
      "step 3400,train accuracy 1\n",
      "step 3500,train accuracy 0.98\n",
      "step 3600,train accuracy 1\n",
      "step 3700,train accuracy 0.98\n",
      "step 3800,train accuracy 0.98\n",
      "step 3900,train accuracy 0.99\n",
      "step 4000,train accuracy 1\n",
      "step 4100,train accuracy 1\n",
      "step 4200,train accuracy 1\n",
      "step 4300,train accuracy 1\n",
      "step 4400,train accuracy 1\n",
      "step 4500,train accuracy 0.99\n",
      "step 4600,train accuracy 0.97\n",
      "step 4700,train accuracy 1\n",
      "step 4800,train accuracy 0.99\n",
      "step 4900,train accuracy 1\n",
      "step 5000,train accuracy 0.99\n",
      "step 5100,train accuracy 1\n",
      "step 5200,train accuracy 0.99\n",
      "step 5300,train accuracy 0.99\n",
      "step 5400,train accuracy 1\n",
      "step 5500,train accuracy 0.99\n",
      "step 5600,train accuracy 1\n",
      "step 5700,train accuracy 1\n",
      "step 5800,train accuracy 1\n",
      "step 5900,train accuracy 1\n",
      "step 6000,train accuracy 1\n",
      "step 6100,train accuracy 1\n",
      "step 6200,train accuracy 1\n",
      "step 6300,train accuracy 1\n",
      "step 6400,train accuracy 1\n",
      "step 6500,train accuracy 1\n",
      "step 6600,train accuracy 1\n",
      "step 6700,train accuracy 1\n",
      "step 6800,train accuracy 1\n",
      "step 6900,train accuracy 1\n",
      "step 7000,train accuracy 0.99\n",
      "step 7100,train accuracy 0.99\n",
      "step 7200,train accuracy 1\n",
      "step 7300,train accuracy 1\n",
      "step 7400,train accuracy 1\n",
      "step 7500,train accuracy 1\n",
      "step 7600,train accuracy 1\n",
      "step 7700,train accuracy 1\n",
      "step 7800,train accuracy 1\n",
      "step 7900,train accuracy 1\n",
      "step 8000,train accuracy 1\n",
      "step 8100,train accuracy 1\n",
      "step 8200,train accuracy 1\n",
      "step 8300,train accuracy 1\n",
      "step 8400,train accuracy 1\n",
      "step 8500,train accuracy 1\n",
      "step 8600,train accuracy 1\n",
      "step 8700,train accuracy 1\n",
      "step 8800,train accuracy 1\n",
      "step 8900,train accuracy 1\n",
      "step 9000,train accuracy 1\n",
      "step 9100,train accuracy 1\n",
      "step 9200,train accuracy 1\n",
      "step 9300,train accuracy 1\n",
      "step 9400,train accuracy 1\n",
      "step 9500,train accuracy 1\n",
      "step 9600,train accuracy 1\n",
      "step 9700,train accuracy 1\n",
      "step 9800,train accuracy 1\n",
      "step 9900,train accuracy 1\n"
     ]
    }
   ],
   "source": [
    "#跌打10000次 得到更好的效果\n",
    "correct_prediction = tf.equal(tf.argmax(y_conv,1),tf.argmax(y_,1))\n",
    "accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))\n",
    "for i in range(10000):\n",
    "    batch_x ,batch_y = mnist.train.next_batch(100)\n",
    "    sess.run(train_step,feed_dict = { x : batch_x, y_ : batch_y})\n",
    "    if i%100 == 0:\n",
    "        train_accuracy = accuracy.eval(session=sess,feed_dict = {x:batch_x,y_:batch_y})\n",
    "        print(\"step %d,train accuracy %g\"%(i,train_accuracy))\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "test accuracy 0.9916\n"
     ]
    }
   ],
   "source": [
    "#打印在测试集的准确度\n",
    "#correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(y_,1))\n",
    "accuraty = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))\n",
    "#print(sess.run(accuraty,feed_dict = {x:mnist.test.images,y_:mnist.test.labels}))\n",
    "print(\"test accuracy %g\"%accuracy.eval(session = sess,feed_dict = {x:mnist.test.images,y_:mnist.test.labels}))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": true
   },
   "source": [
    "#####  结果达到了0.9916，还不错，达到了老师的要求"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": true
   },
   "source": [
    "## 作业总结\n",
    "1、熟悉理解了tensorflow的框架\n",
    "2、可搭建出一个简单可使用的模型，但是对于其他的API还是需要多多学习。\n",
    "3、效果来看，迭代次数多，效果会好一些，电脑性能差，不能更多尝试。"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
