{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "name:曾露莎"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "from __future__ import absolute_import\n",
    "from __future__ import division\n",
    "from __future__ import print_function\n",
    "\n",
    "import argparse\n",
    "import sys\n",
    "\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "\n",
    "import tensorflow as tf"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting /tmp/tensorflow/mnist/input_data/train-images-idx3-ubyte.gz\n",
      "Extracting /tmp/tensorflow/mnist/input_data/train-labels-idx1-ubyte.gz\n",
      "Extracting /tmp/tensorflow/mnist/input_data/t10k-images-idx3-ubyte.gz\n",
      "Extracting /tmp/tensorflow/mnist/input_data/t10k-labels-idx1-ubyte.gz\n"
     ]
    }
   ],
   "source": [
    "# Import data\n",
    "data_dir = '/tmp/tensorflow/mnist/input_data'\n",
    "mnist = input_data.read_data_sets(data_dir, one_hot=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "#数据准备\n",
    "x = tf.placeholder(tf.float32, [None, 784])\n",
    "y_ = tf.placeholder(tf.float32, [None, 10])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "#定义参数\n",
    "\n",
    "#最终输出数目\n",
    "output_num=10\n",
    "\n",
    "\n",
    "#卷积层1\n",
    "cov1_size=5\n",
    "cov1_channel=1\n",
    "cov1_outputnum=48\n",
    "\n",
    "#池化层1\n",
    "\n",
    "#卷积层2\n",
    "cov2_size=5\n",
    "cov2_channel= cov1_outputnum\n",
    "cov2_outputnum=64\n",
    "\n",
    "#池化层2\n",
    "\n",
    "#全连接层1\n",
    "layer1_node=1024\n",
    "\n",
    "#全连接层2\n",
    "#layer2_node=500\n",
    "\n",
    "#正则参数\n",
    "lamda=0.00001\n",
    "\n",
    "#学习率\n",
    "learning_rate_base=0.05\n",
    "global_step= tf.Variable(0,trainable=False)\n",
    "learning_rate_decay=0.8\n",
    "\n",
    "#一个batch的训练数目\n",
    "batch_size=200\n",
    "\n",
    "#\n",
    "keep_prob=0.5"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "#可变学习率\n",
    "learning_rate=tf.train.exponential_decay(learning_rate_base,global_step,600,learning_rate_decay)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "#构建网络\n",
    "\n",
    "with tf.name_scope('reshape'):\n",
    "  x_image = tf.reshape(x, [-1, 28, 28, 1])\n",
    "\n",
    "# 第一层卷积层，输出cov1_outputnum张特征图\n",
    "with tf.name_scope('conv1'):\n",
    "  shape = [cov1_size, cov1_size, cov1_channel, cov1_outputnum]\n",
    "  W_conv1 = tf.Variable(tf.truncated_normal(shape, stddev=0.1),\n",
    "                        collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS']) #采用截断正态初始化参数\n",
    "#  initializer_conv1 = tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_IN', \n",
    "#                                                                   uniform=False, seed=None, dtype=tf.float32)\n",
    "# W_conv1 = tf.get_variable('conv1_w',shape=shape,dtype=tf.float32,\n",
    "#                           initializer=initializer_conv1)\n",
    "                            #tf.contrib.layers.xavier_initializer_conv2d())\n",
    "  #W_conv1 = tf.contrib.layers.variance_scaling_initializer(factor=2.0, mode='FAN_IN', uniform=False, seed=None, dtype=tf.float32)\n",
    "\n",
    "\n",
    "  b_conv1 = tf.Variable(tf.constant(0.1, shape=[cov1_outputnum])) #采用常数初始化参数\n",
    "  #卷积，步长为1\n",
    "  l_conv1 = tf.nn.conv2d(x_image, W_conv1, strides=[1, 1, 1, 1], \n",
    "                         padding='SAME') + b_conv1\n",
    "  #relu函数激活\n",
    "  h_conv1 = tf.nn.relu(l_conv1)\n",
    "\n",
    "# 池化层，下采样，一半\n",
    "with tf.name_scope('pool1'):\n",
    "  h_pool1 = tf.nn.max_pool(h_conv1, ksize=[1, 2, 2, 1],\n",
    "                        strides=[1, 2, 2, 1], padding='VALID')\n",
    "\n",
    "# 第二层卷积层，输出cov2_outputnum张特征图\n",
    "with tf.name_scope('conv2'):\n",
    "  #采用截断正态初始化参数\n",
    "  W_conv2 = tf.Variable(tf.truncated_normal([cov2_size, cov2_size, cov2_channel, cov2_outputnum], stddev=0.1),                       \n",
    "                        collections=[tf.GraphKeys.GLOBAL_VARIABLES,'WEIGHTS'])\n",
    "#  initializer_conv2 = tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_IN', \n",
    "#                                                                   uniform=False, seed=None, dtype=tf.float32)\n",
    "#  W_conv2 = tf.get_variable('conv2_w',shape=[cov2_size, cov2_size, cov2_channel, cov2_outputnum],dtype=tf.float32,\n",
    "#                            initializer=initializer_conv2)\n",
    "  #采用常数初始化参数    \n",
    "  b_conv2 = tf.Variable(tf.constant(0.1, shape=[cov2_outputnum]))\n",
    "  l_conv2 = tf.nn.conv2d(h_pool1, W_conv2, strides=[1, 1, 1, 1], \n",
    "                         padding='SAME') + b_conv2\n",
    "  #采用relu激活\n",
    "  h_conv2 = tf.nn.relu(l_conv2)\n",
    "\n",
    "# 第二层池化，下采样，一半\n",
    "with tf.name_scope('pool2'):\n",
    "  h_pool2 = tf.nn.max_pool(h_conv2, ksize=[1, 2, 2, 1],\n",
    "                        strides=[1, 2, 2, 1], padding='VALID')\n",
    "\n",
    "#全连接层1,输入7*7*64\n",
    "with tf.name_scope('fc1'):\n",
    "  #截断正态初始参数\n",
    "  W_fc1 = tf.Variable(tf.truncated_normal([7 * 7 * 64, layer1_node], stddev=0.1))\n",
    "#  initializer_fc1 = tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_IN', \n",
    "#                                                                   uniform=False, seed=None, dtype=tf.float32)\n",
    "#  W_fc1 = tf.get_variable('fc1_w',shape=[7 * 7 * 64, layer1_node],dtype=tf.float32,\n",
    "#                            initializer=initializer_fc1)\n",
    "  b_fc1 = tf.Variable(tf.constant(0.1, shape=[layer1_node]))\n",
    "\n",
    "  h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])\n",
    "  #relu激活\n",
    "  h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n",
    "    \n",
    "##全连接层2\n",
    "#with tf.name_scope('fc2'):\n",
    "#  #截断正态初始参数\n",
    "#  #W_fc2 = tf.Variable(tf.truncated_normal([layer1_node, layer2_node], stddev=0.1))\n",
    "#  initializer_fc2 = tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_IN', \n",
    "#                                                                  uniform=False, seed=None, dtype=tf.float32)\n",
    "#  W_fc2 = tf.get_variable('fc2_w',shape=[layer1_node, layer2_node],dtype=tf.float32,\n",
    "#                            initializer=initializer_fc2)    \n",
    "#  b_fc2 = tf.Variable(tf.constant(0.1, shape=[layer2_node]))\n",
    "\n",
    "  #relu激活\n",
    "#  h_fc2 = tf.nn.relu(tf.matmul(h_fc1, W_fc2) + b_fc2)\n",
    "\n",
    "#使用dropout\n",
    "with tf.name_scope('dropout'):\n",
    "  #keep_prob = tf.placeholder(tf.float32)\n",
    "  h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n",
    "\n",
    "# 最终输出\n",
    "with tf.name_scope('fc2'):\n",
    "  W_fc2 = tf.Variable(tf.truncated_normal([layer1_node, output_num], stddev=0.1))\n",
    "#  initializer_fc2 = tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_IN', \n",
    "#                                                                   uniform=False, seed=None, dtype=tf.float32)\n",
    "#  W_fc2 = tf.get_variable('fc2_w',shape=[layer1_node, output_num],dtype=tf.float32,\n",
    "#                            initializer=initializer_fc2)    \n",
    "  b_fc2 = tf.Variable(tf.constant(0.1, shape=[output_num]))\n",
    "\n",
    "  y = tf.matmul(h_fc1_drop, W_fc2) + b_fc2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From <ipython-input-10-6dbcb38d571c>:3: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "\n",
      "Future major versions of TensorFlow will allow gradients to flow\n",
      "into the labels input on backprop by default.\n",
      "\n",
      "See `tf.nn.softmax_cross_entropy_with_logits_v2`.\n",
      "\n"
     ]
    }
   ],
   "source": [
    "#计算加入正则的损失函数,只在全连接层加入正则\n",
    "#计算交叉熵\n",
    "cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))\n",
    "#正则L2,层数改变时要更新\n",
    "regularizer = tf.contrib.layers.l2_regularizer(lamda)\n",
    "regularization = regularizer(W_fc1)+regularizer(W_fc2)\n",
    "#总的损失函数\n",
    "loss = cross_entropy+regularization"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "#生成step，用adam优化\n",
    "train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step=global_step)\n",
    "\n",
    "sess = tf.Session()\n",
    "init_op = tf.global_variables_initializer()\n",
    "sess.run(init_op)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.855\n",
      "0.5678272\n",
      "0.92\n",
      "0.3120632\n",
      "0.945\n",
      "0.38289702\n",
      "0.95\n",
      "0.22923037\n",
      "0.96\n",
      "0.32702038\n",
      "0.96\n",
      "0.26777396\n",
      "0.975\n",
      "0.19379756\n",
      "0.975\n",
      "0.2977755\n",
      "0.97\n",
      "0.19573614\n",
      "0.935\n",
      "0.2864031\n",
      "0.97\n",
      "0.19202301\n",
      "0.985\n",
      "0.19424164\n",
      "0.985\n",
      "0.18826237\n",
      "0.97\n",
      "0.22794303\n",
      "0.965\n",
      "0.20651399\n",
      "0.98\n",
      "0.19453678\n",
      "0.97\n",
      "0.20063972\n",
      "0.98\n",
      "0.22047183\n",
      "0.975\n",
      "0.20332392\n",
      "0.975\n",
      "0.19656691\n",
      "0.995\n",
      "0.14663187\n",
      "0.965\n",
      "0.185206\n",
      "0.99\n",
      "0.16334683\n",
      "0.99\n",
      "0.18479909\n",
      "0.99\n",
      "0.15840858\n",
      "0.99\n",
      "0.14211279\n",
      "0.995\n",
      "0.17488825\n",
      "0.99\n",
      "0.18592231\n",
      "0.965\n",
      "0.1885271\n",
      "1.0\n",
      "0.15737328\n",
      "0.975\n",
      "0.16316874\n",
      "0.98\n",
      "0.1690585\n",
      "0.98\n",
      "0.17167333\n",
      "0.985\n",
      "0.21186474\n",
      "0.995\n",
      "0.15639678\n",
      "0.995\n",
      "0.1472495\n",
      "0.995\n",
      "0.18031554\n",
      "0.97\n",
      "0.225908\n",
      "0.975\n",
      "0.20281981\n",
      "0.995\n",
      "0.16949594\n",
      "0.97\n",
      "0.21573022\n",
      "0.98\n",
      "0.16212277\n",
      "0.985\n",
      "0.24295688\n",
      "0.985\n",
      "0.18106863\n",
      "0.985\n",
      "0.1599573\n",
      "0.975\n",
      "0.1444621\n",
      "0.99\n",
      "0.17656146\n",
      "0.96\n",
      "0.24255526\n",
      "0.98\n",
      "0.19065732\n",
      "0.985\n",
      "0.16934302\n"
     ]
    }
   ],
   "source": [
    "# Train\n",
    "for i in range(5000):\n",
    "    batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n",
    "    sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})\n",
    "    if (i+1) % 100 == 0:\n",
    "        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n",
    "        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "\n",
    "        print(sess.run(accuracy, feed_dict={x: batch_xs, y_: batch_ys}))\n",
    "        print(sess.run(loss, feed_dict={x: batch_xs, y_: batch_ys}))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.98\n",
      "[ True  True  True ...  True  True  True]\n",
      "0.9831455\n",
      "4999\n"
     ]
    }
   ],
   "source": [
    "# Test trained model\n",
    "#correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n",
    "#accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "\n",
    "print(sess.run(accuracy, feed_dict={x: mnist.test.images,\n",
    "                                      y_: mnist.test.labels}))\n",
    "print(sess.run(correct_prediction, feed_dict={x: mnist.test.images,\n",
    "                                      y_: mnist.test.labels}))\n",
    "\n",
    "print(sess.run(accuracy, feed_dict={x: mnist.train.images,\n",
    "                                      y_: mnist.train.labels}))\n",
    "print(i)"
   ]
  },
  {
   "cell_type": "raw",
   "metadata": {},
   "source": [
    "第一次结果：测试集正确率0.1032\n",
    "第一次参数：\n",
    "cov1_size=5\n",
    "cov1_channel=1\n",
    "cov1_outputnum=32\n",
    "cov2_size=5\n",
    "cov2_channel= cov1_outputnum\n",
    "cov2_outputnum=64\n",
    "#正则参数\n",
    "lamda=0.0001\n",
    "#学习率\n",
    "learning_rate_base=0.8\n",
    "global_step= tf.Variable(0,trainable=False)\n",
    "learning_rate_decay=0.99\n",
    "#一个batch的训练数目\n",
    "batch_size=200\n",
    "#\n",
    "keep_prob=0.5\n",
    "#全连接层1\n",
    "layer1_node=1024\n",
    "#全连接层2\n",
    "layer2_node=60\n",
    "\n",
    "第2次结果：测试集正确率0.9759,在第一次基础上1）把全连接层2去掉，2）同时学习率调整到0.01 3)迭代次数2000还是3000，忘记了\n",
    "第3次结果：测试集正确率0.9776,在第2次基础上1）增加全连接层2,500点 2)正则参数改为0.00001 3)迭代次数4612\n",
    "第4次结果以及之后的很多次，都在调整不同初始化参数，学习率参数，以及使用不同优化器。很慢，快吐血了。\n",
    "\n",
    "之所以采用navtive的方法写代码，是想具体再了解一下计算过程。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.7rc1"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
