{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 0,
   "metadata": {
    "colab": {
     "autoexec": {
      "startup": false,
      "wait_interval": 0
     }
    },
    "colab_type": "code",
    "id": "AFIchtHPxrM5"
   },
   "outputs": [],
   "source": [
    "import tensorflow as tf\n",
    "from __future__ import absolute_import\n",
    "from __future__ import division\n",
    "from __future__ import print_function\n",
    "import argparse\n",
    "import sys\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "import urllib\n",
    "import time\n",
    "FLAGS = None"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {
    "colab": {
     "autoexec": {
      "startup": false,
      "wait_interval": 0
     },
     "base_uri": "https://localhost:8080/",
     "height": 89
    },
    "colab_type": "code",
    "executionInfo": {
     "elapsed": 1342,
     "status": "ok",
     "timestamp": 1529336128810,
     "user": {
      "displayName": "Lip Gallagher",
      "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s128",
      "userId": "113091702821929511633"
     },
     "user_tz": -480
    },
    "id": "1aYBNpD-zglv",
    "outputId": "4e9198d2-3451-4919-d215-16d2a31fbd7d"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting ./train-images-idx3-ubyte.gz\n",
      "Extracting ./train-labels-idx1-ubyte.gz\n",
      "Extracting ./t10k-images-idx3-ubyte.gz\n",
      "Extracting ./t10k-labels-idx1-ubyte.gz\n"
     ]
    }
   ],
   "source": [
    "# load data\n",
    "data_dir = './Desktop/Data/week_06'\n",
    "mnist = input_data.read_data_sets('./',source_url='http://yann.lecun.com/exdb/mnist/', one_hot=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 0,
   "metadata": {
    "colab": {
     "autoexec": {
      "startup": false,
      "wait_interval": 0
     }
    },
    "colab_type": "code",
    "id": "FjT1WThmzka2"
   },
   "outputs": [],
   "source": [
    "def weight_variable(shape):\n",
    "    initial = tf.truncated_normal(shape, stddev=0.1)\n",
    "    return tf.Variable(initial, collections=[tf.GraphKeys.GLOBAL_VARIABLES,'Weights'])\n",
    "def bias_variable(shape):\n",
    "    initial = tf.truncated_normal(shape, stddev=0.1)\n",
    "    return tf.Variable(initial)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 0,
   "metadata": {
    "colab": {
     "autoexec": {
      "startup": false,
      "wait_interval": 0
     }
    },
    "colab_type": "code",
    "id": "FwguWk_zznsZ"
   },
   "outputs": [],
   "source": [
    "x = tf.placeholder(tf.float32, [None, 784])#\n",
    "y_ = tf.placeholder(tf.float32, [None, 10])#"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 0,
   "metadata": {
    "colab": {
     "autoexec": {
      "startup": false,
      "wait_interval": 0
     }
    },
    "colab_type": "code",
    "id": "bM-jS-lCzp-k"
   },
   "outputs": [],
   "source": [
    "with tf.name_scope('reshape'):\n",
    "    x_image = tf.reshape(x, [-1, 28, 28, 1])\n",
    "\n",
    "with tf.name_scope('conv1'):\n",
    "    w_conv1 = weight_variable([5,5,1,32])\n",
    "    b_conv1 = bias_variable([32])\n",
    "    l_conv1 = tf.nn.conv2d(x_image, w_conv1, strides=[1,1,1,1],\n",
    "                          padding = 'SAME') + b_conv1\n",
    "    # 激活函数\n",
    "    h_conv1 = tf.nn.relu(l_conv1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 0,
   "metadata": {
    "colab": {
     "autoexec": {
      "startup": false,
      "wait_interval": 0
     }
    },
    "colab_type": "code",
    "id": "jBJMAAk8zr-A"
   },
   "outputs": [],
   "source": [
    "# 28*28 - 14*14(32)\n",
    "with tf.name_scope('pool1'):\n",
    "    h_pool1 = tf.nn.max_pool(h_conv1, ksize=[1,2,2,1],\n",
    "                            strides =[1,2,2,1], padding='VALID')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 0,
   "metadata": {
    "colab": {
     "autoexec": {
      "startup": false,
      "wait_interval": 0
     }
    },
    "colab_type": "code",
    "id": "FSGuftHAzuA6"
   },
   "outputs": [],
   "source": [
    "with tf.name_scope('conv2'):\n",
    "    w_conv2 = weight_variable([5,5,32,64])\n",
    "    b_conv2 = bias_variable([64])\n",
    "    l_conv2 = tf.nn.conv2d(h_pool1, w_conv2, strides=[1,1,1,1],\n",
    "                          padding='SAME') + b_conv2\n",
    "    h_conv2 = tf.nn.relu(l_conv2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 0,
   "metadata": {
    "colab": {
     "autoexec": {
      "startup": false,
      "wait_interval": 0
     }
    },
    "colab_type": "code",
    "id": "ebphhYCszvtS"
   },
   "outputs": [],
   "source": [
    "keep_prob = tf.placeholder(tf.float32) #\n",
    "h_conv2_drop = tf.nn.dropout(h_conv2, keep_prob)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 0,
   "metadata": {
    "colab": {
     "autoexec": {
      "startup": false,
      "wait_interval": 0
     }
    },
    "colab_type": "code",
    "id": "rEJLfOPbzxq-"
   },
   "outputs": [],
   "source": [
    "with tf.name_scope('pool2'):\n",
    "    h_pool2 = tf.nn.max_pool(h_conv2_drop, ksize=[1,2,2,1],\n",
    "                            strides=[1,2,2,1], padding='SAME')\n",
    "# 14x14x64 -->  7x7x64"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 0,
   "metadata": {
    "colab": {
     "autoexec": {
      "startup": false,
      "wait_interval": 0
     }
    },
    "colab_type": "code",
    "id": "Plh29ot5zzwu"
   },
   "outputs": [],
   "source": [
    "with tf.name_scope('fc1'):\n",
    "    w_fc1 = weight_variable([7*7*64, 1024])\n",
    "    b_fc1 = bias_variable([1024])\n",
    "    h_pool2_flat = tf.reshape(h_pool2, [-1,7*7*64])\n",
    "    h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 0,
   "metadata": {
    "colab": {
     "autoexec": {
      "startup": false,
      "wait_interval": 0
     }
    },
    "colab_type": "code",
    "id": "wB9OGzioz5dt"
   },
   "outputs": [],
   "source": [
    "h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 0,
   "metadata": {
    "colab": {
     "autoexec": {
      "startup": false,
      "wait_interval": 0
     }
    },
    "colab_type": "code",
    "id": "v7YFjNdsz7WB"
   },
   "outputs": [],
   "source": [
    "with tf.name_scope('fc2'):\n",
    "    w_fc2 = weight_variable([1024,10])\n",
    "    b_fc2 = bias_variable([10])\n",
    "    y = tf.matmul(h_fc1_drop, w_fc2) + b_fc2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 0,
   "metadata": {
    "colab": {
     "autoexec": {
      "startup": false,
      "wait_interval": 0
     }
    },
    "colab_type": "code",
    "id": "d0YaC8aAz82C"
   },
   "outputs": [],
   "source": [
    "decay_rate = 0.96\n",
    "\n",
    "decay_steps = 1000\n",
    "\n",
    "global_ = tf.Variable(tf.constant(0))\n",
    "\n",
    "cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=y))\n",
    "\n",
    "#l2_loss = tf.add_n([tf.nn.l2_loss(w) for w in tf.get_collection('Weights')])\n",
    "\n",
    "#total_loss = cross_entropy + 7e-5*l2_loss\n",
    "\n",
    "\n",
    "\n",
    "#updateparameter = tf.group(update_parameter, update_parameter2)\n",
    "\n",
    "correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_, 1))\n",
    "\n",
    "accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "\n",
    "lr = tf.train.exponential_decay(0.01, global_, 1000,0.99 ) # (learning_rate, global, decay_step, decay_rate)\n",
    "train_step = tf.train.GradientDescentOptimizer(lr).minimize(cross_entropy)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {
    "colab": {
     "autoexec": {
      "startup": false,
      "wait_interval": 0
     },
     "base_uri": "https://localhost:8080/",
     "height": 1817
    },
    "colab_type": "code",
    "executionInfo": {
     "elapsed": 3065839,
     "status": "ok",
     "timestamp": 1529340915368,
     "user": {
      "displayName": "Lip Gallagher",
      "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s128",
      "userId": "113091702821929511633"
     },
     "user_tz": -480
    },
    "id": "KTSpS4qXz-rm",
    "outputId": "4e75e2d4-ebde-4a31-cfb0-7cbe6463f690"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "step 100 : entropy loss: 0.547093, learning_rate: 0.00999006\n",
      "step 200 : entropy loss: 0.658505, learning_rate: 0.00998002\n",
      "step 300 : entropy loss: 0.389883, learning_rate: 0.00996999\n",
      "step 400 : entropy loss: 0.182838, learning_rate: 0.00995998\n",
      "step 500 : entropy loss: 0.239473, learning_rate: 0.00994997\n",
      "step 600 : entropy loss: 0.134234, learning_rate: 0.00993998\n",
      "step 700 : entropy loss: 0.16237, learning_rate: 0.00992999\n",
      "step 800 : entropy loss: 0.294226, learning_rate: 0.00992002\n",
      "step 900 : entropy loss: 0.225279, learning_rate: 0.00991005\n",
      "step 1000 : entropy loss: 0.301203, learning_rate: 0.0099001\n",
      "step 1100 : entropy loss: 0.107087, learning_rate: 0.00989015\n",
      "step 1200 : entropy loss: 0.0748727, learning_rate: 0.00988022\n",
      "step 1300 : entropy loss: 0.124798, learning_rate: 0.00987029\n",
      "step 1400 : entropy loss: 0.213426, learning_rate: 0.00986038\n",
      "step 1500 : entropy loss: 0.117386, learning_rate: 0.00985047\n",
      "step 1600 : entropy loss: 0.0411882, learning_rate: 0.00984058\n",
      "step 1700 : entropy loss: 0.156216, learning_rate: 0.00983069\n",
      "step 1800 : entropy loss: 0.168336, learning_rate: 0.00982082\n",
      "step 1900 : entropy loss: 0.225149, learning_rate: 0.00981095\n",
      "step 2000 : entropy loss: 0.103616, learning_rate: 0.0098011\n",
      "step 2100 : entropy loss: 0.123882, learning_rate: 0.00979125\n",
      "step 2200 : entropy loss: 0.115971, learning_rate: 0.00978142\n",
      "step 2300 : entropy loss: 0.0996101, learning_rate: 0.00977159\n",
      "step 2400 : entropy loss: 0.0943595, learning_rate: 0.00976178\n",
      "step 2500 : entropy loss: 0.164778, learning_rate: 0.00975197\n",
      "step 2600 : entropy loss: 0.0601167, learning_rate: 0.00974217\n",
      "step 2700 : entropy loss: 0.0797438, learning_rate: 0.00973239\n",
      "step 2800 : entropy loss: 0.128506, learning_rate: 0.00972261\n",
      "step 2900 : entropy loss: 0.203437, learning_rate: 0.00971284\n",
      "step 3000 : entropy loss: 0.149703, learning_rate: 0.00970309\n",
      "step 3100 : entropy loss: 0.0406435, learning_rate: 0.00969334\n",
      "step 3200 : entropy loss: 0.124693, learning_rate: 0.0096836\n",
      "step 3300 : entropy loss: 0.0689893, learning_rate: 0.00967388\n",
      "step 3400 : entropy loss: 0.105455, learning_rate: 0.00966416\n",
      "step 3500 : entropy loss: 0.126024, learning_rate: 0.00965445\n",
      "step 3700 : entropy loss: 0.0988861, learning_rate: 0.00963506\n",
      "step 3800 : entropy loss: 0.0733275, learning_rate: 0.00962539\n",
      "step 3900 : entropy loss: 0.0319875, learning_rate: 0.00961572\n",
      "step 4000 : entropy loss: 0.096612, learning_rate: 0.00960606\n",
      "step 4100 : entropy loss: 0.0715689, learning_rate: 0.00959641\n",
      "step 4200 : entropy loss: 0.0871794, learning_rate: 0.00958677\n",
      "step 4300 : entropy loss: 0.113212, learning_rate: 0.00957714\n",
      "step 4400 : entropy loss: 0.060648, learning_rate: 0.00956752\n",
      "step 4500 : entropy loss: 0.116486, learning_rate: 0.00955791\n",
      "step 4600 : entropy loss: 0.103686, learning_rate: 0.0095483\n",
      "step 4700 : entropy loss: 0.0300869, learning_rate: 0.00953871\n",
      "step 4800 : entropy loss: 0.0595895, learning_rate: 0.00952913\n",
      "step 4900 : entropy loss: 0.0607501, learning_rate: 0.00951956\n",
      "step 5000 : entropy loss: 0.109295, learning_rate: 0.00951\n",
      "step 5100 : entropy loss: 0.120676, learning_rate: 0.00950044\n",
      "step 5200 : entropy loss: 0.0439732, learning_rate: 0.0094909\n",
      "step 5300 : entropy loss: 0.042167, learning_rate: 0.00948137\n",
      "step 5400 : entropy loss: 0.0583238, learning_rate: 0.00947184\n",
      "step 5500 : entropy loss: 0.0975562, learning_rate: 0.00946233\n",
      "step 5600 : entropy loss: 0.0324762, learning_rate: 0.00945282\n",
      "step 5700 : entropy loss: 0.149319, learning_rate: 0.00944333\n",
      "step 5800 : entropy loss: 0.0863054, learning_rate: 0.00943384\n",
      "step 5900 : entropy loss: 0.0220461, learning_rate: 0.00942436\n",
      "step 6000 : entropy loss: 0.0631105, learning_rate: 0.0094149\n",
      "step 6100 : entropy loss: 0.0779958, learning_rate: 0.00940544\n",
      "step 6200 : entropy loss: 0.0661816, learning_rate: 0.00939599\n",
      "step 6300 : entropy loss: 0.114338, learning_rate: 0.00938655\n",
      "step 6400 : entropy loss: 0.113257, learning_rate: 0.00937712\n",
      "step 6500 : entropy loss: 0.04633, learning_rate: 0.0093677\n",
      "step 6600 : entropy loss: 0.0185065, learning_rate: 0.00935829\n",
      "step 6700 : entropy loss: 0.0369185, learning_rate: 0.00934889\n",
      "step 6800 : entropy loss: 0.0871257, learning_rate: 0.0093395\n",
      "step 6900 : entropy loss: 0.0725006, learning_rate: 0.00933012\n",
      "step 7000 : entropy loss: 0.0379271, learning_rate: 0.00932075\n",
      "step 7100 : entropy loss: 0.0944356, learning_rate: 0.00931138\n",
      "step 7200 : entropy loss: 0.037325, learning_rate: 0.00930203\n",
      "step 7300 : entropy loss: 0.0600299, learning_rate: 0.00929269\n",
      "step 7400 : entropy loss: 0.0730991, learning_rate: 0.00928335\n",
      "step 7500 : entropy loss: 0.0268274, learning_rate: 0.00927403\n",
      "step 7600 : entropy loss: 0.0454812, learning_rate: 0.00926471\n",
      "step 7700 : entropy loss: 0.0657343, learning_rate: 0.0092554\n",
      "step 7800 : entropy loss: 0.0341563, learning_rate: 0.00924611\n",
      "step 7900 : entropy loss: 0.0534192, learning_rate: 0.00923682\n",
      "step 8000 : entropy loss: 0.0694282, learning_rate: 0.00922754\n",
      "step 8100 : entropy loss: 0.150814, learning_rate: 0.00921827\n",
      "step 8200 : entropy loss: 0.0534539, learning_rate: 0.00920901\n",
      "step 8300 : entropy loss: 0.113366, learning_rate: 0.00919976\n",
      "step 8400 : entropy loss: 0.0530786, learning_rate: 0.00919052\n",
      "step 8500 : entropy loss: 0.0563318, learning_rate: 0.00918129\n",
      "step 8600 : entropy loss: 0.0392595, learning_rate: 0.00917206\n",
      "step 8700 : entropy loss: 0.109769, learning_rate: 0.00916285\n",
      "step 8800 : entropy loss: 0.0224851, learning_rate: 0.00915365\n",
      "step 8900 : entropy loss: 0.0109924, learning_rate: 0.00914445\n",
      "step 9000 : entropy loss: 0.0443523, learning_rate: 0.00913526\n",
      "step 9100 : entropy loss: 0.0283378, learning_rate: 0.00912609\n",
      "step 9200 : entropy loss: 0.0644315, learning_rate: 0.00911692\n",
      "step 9300 : entropy loss: 0.0345144, learning_rate: 0.00910776\n",
      "step 9400 : entropy loss: 0.0151957, learning_rate: 0.00909861\n",
      "step 9500 : entropy loss: 0.07032, learning_rate: 0.00908947\n",
      "step 9600 : entropy loss: 0.0126581, learning_rate: 0.00908034\n",
      "step 9700 : entropy loss: 0.0218794, learning_rate: 0.00907122\n",
      "step 9800 : entropy loss: 0.057186, learning_rate: 0.00906211\n",
      "step 9900 : entropy loss: 0.0540107, learning_rate: 0.00905301\n",
      "step 10000 : entropy loss: 0.0206365, learning_rate: 0.00904391\n",
      "test accuracy 0.9834\n"
     ]
    }
   ],
   "source": [
    "with tf.Session() as sess:\n",
    "    init_op = tf.global_variables_initializer()\n",
    "    sess.run(init_op)\n",
    "    for step in range(10000):\n",
    "        batch_xs, batch_ys = mnist.train.next_batch(100)\n",
    "        ts, loss, l, = sess.run([train_step, cross_entropy, lr],\n",
    "                                 feed_dict={x:batch_xs, y_:batch_ys, keep_prob: 0.75, global_: step})\n",
    "        test_acc = sess.run(accuracy, feed_dict={x: mnist.test.images,\n",
    "                                            y_: mnist.test.labels, keep_prob:0.75})\n",
    "        if (step+1) % 100==0:\n",
    "            print('step %d : entropy loss: %g, learning_rate: %g' %(step+1, loss, l))\n",
    "    print('test accuracy %g' % (test_acc))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 0,
   "metadata": {
    "colab": {
     "autoexec": {
      "startup": false,
      "wait_interval": 0
     }
    },
    "colab_type": "code",
    "id": "TseGjjjW0Uv_"
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "accelerator": "GPU",
  "colab": {
   "default_view": {},
   "name": "Untitled0.ipynb",
   "provenance": [],
   "version": "0.3.2",
   "views": {}
  },
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
