{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import tensorflow as tf\n",
    "from __future__ import absolute_import\n",
    "from __future__ import division\n",
    "from __future__ import print_function\n",
    "import argparse\n",
    "import sys\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "import urllib\n",
    "import time\n",
    "FLAGS = None"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From <ipython-input-2-f47bfa4be670>:3: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n",
      "WARNING:tensorflow:From D:\\Anaconda3\\envs\\py3\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please write your own downloading logic.\n",
      "WARNING:tensorflow:From D:\\Anaconda3\\envs\\py3\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.data to implement this functionality.\n",
      "Extracting ./train-images-idx3-ubyte.gz\n",
      "WARNING:tensorflow:From D:\\Anaconda3\\envs\\py3\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.data to implement this functionality.\n",
      "Extracting ./train-labels-idx1-ubyte.gz\n",
      "WARNING:tensorflow:From D:\\Anaconda3\\envs\\py3\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:110: dense_to_one_hot (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.one_hot on tensors.\n",
      "Extracting ./t10k-images-idx3-ubyte.gz\n",
      "Extracting ./t10k-labels-idx1-ubyte.gz\n",
      "WARNING:tensorflow:From D:\\Anaconda3\\envs\\py3\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:290: DataSet.__init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n"
     ]
    }
   ],
   "source": [
    "# load data\n",
    "data_dir = './Desktop/Data/week_06'\n",
    "mnist = input_data.read_data_sets('./',source_url='http://yann.lecun.com/exdb/mnist/', one_hot=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义占位符x, 变量w, \n",
    "x = tf.placeholder(tf.float32,[None, 784])\n",
    "#w = tf.Variable(tf.zeros([784,10]))\n",
    "#b = tf.Variable(tf.zeros([10]))\n",
    "#y = tf.matmul(x,w) + b"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "#占位符y_\n",
    "y_ = tf.placeholder(tf.float32, [None, 10])\n",
    "keep_prob = tf.placeholder(tf.float32)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 设置隐藏层节点数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "layer_node = 1000"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 设置learningrate参数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "learning_rate = tf.Variable(0.1, dtype= tf.float32)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 设置training_step, batch_size"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "training_step = 20000\n",
    "batch_size = 100"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 设置隐藏层数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "weight1 = tf.Variable(tf.truncated_normal([784,layer_node], stddev=0.1)) #标准差为0.1，,74*1000的矩阵\n",
    "bias1 = tf.Variable(tf.constant(0.0, shape=[layer_node]))\n",
    "#weight2 = tf.Variable(tf.truncated_normal([layer_node,300], stddev=0.1))\n",
    "#bias2 = tf.Variable(tf.constant(0.0, shape=[300]))\n",
    "weight3 =tf.Variable(tf.truncated_normal([layer_node, 10], stddev = 0.1)) \n",
    "bias3 = tf.Variable(tf.constant(0.0, shape= [10]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "#计算\n",
    "layer1 = tf.nn.tanh(tf.matmul(x, weight1) + bias1)\n",
    "layer1_drop = tf.nn.dropout(layer1, keep_prob)  #使训练过程中，在某次训练中不更新权值，也不参加盛景网络计算，节省时间，防止过拟合\n",
    "#layer2 = tf.nn.tanh(tf.matmul(layer1_drop, weight2) + bias2)\n",
    "#layer2_drop = tf.nn.dropout(layer2, keep_prob)\n",
    "y = tf.matmul(layer1_drop, weight3) + bias3 "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 计算交叉熵"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(\n",
    "                             labels=y_,\n",
    "                              logits=y))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 增加正则化的损失函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 正则化系数\n",
    "#regularization_rate = 0.1\n",
    "#regularizer = tf.contrib.layers.l2_regularizer(regularization_rate)  #l2正则化\n",
    "#regularization = regularizer(weight1) + regularizer(weight2) + regularizer(weight3)\n",
    "loss = cross_entropy# + regularization #总损失函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "#train_step梯度下降\n",
    "train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)\n",
    "#sess = tf.Session()\n",
    "#init_op = tf.global_variables_initializer()\n",
    "#sess.run(init_op)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "After 0 , test accuracy using average model is 0.979 , learing_rate is 0.1 \n",
      "After 1 , test accuracy using average model is 0.9804 , learing_rate is 0.093 \n",
      "After 2 , test accuracy using average model is 0.9812 , learing_rate is 0.08649 \n",
      "After 3 , test accuracy using average model is 0.9812 , learing_rate is 0.0804357 \n",
      "After 4 , test accuracy using average model is 0.9814 , learing_rate is 0.0748052 \n",
      "After 5 , test accuracy using average model is 0.9818 , learing_rate is 0.0695688 \n",
      "After 6 , test accuracy using average model is 0.9819 , learing_rate is 0.064699 \n",
      "After 7 , test accuracy using average model is 0.9814 , learing_rate is 0.0601701 \n",
      "After 8 , test accuracy using average model is 0.9816 , learing_rate is 0.0559582 \n",
      "After 9 , test accuracy using average model is 0.9816 , learing_rate is 0.0520411 \n",
      "After 10 , test accuracy using average model is 0.982 , learing_rate is 0.0483982 \n",
      "After 11 , test accuracy using average model is 0.982 , learing_rate is 0.0450104 \n",
      "After 12 , test accuracy using average model is 0.9818 , learing_rate is 0.0418596 \n",
      "After 13 , test accuracy using average model is 0.982 , learing_rate is 0.0389295 \n",
      "After 14 , test accuracy using average model is 0.9819 , learing_rate is 0.0362044 \n",
      "After 15 , test accuracy using average model is 0.982 , learing_rate is 0.0336701 \n",
      "After 16 , test accuracy using average model is 0.982 , learing_rate is 0.0313132 \n",
      "After 17 , test accuracy using average model is 0.9819 , learing_rate is 0.0291213 \n",
      "After 18 , test accuracy using average model is 0.9819 , learing_rate is 0.0270828 \n",
      "After 19 , test accuracy using average model is 0.9819 , learing_rate is 0.025187 \n",
      "After 20 , test accuracy using average model is 0.9819 , learing_rate is 0.0234239 \n",
      "After 21 , test accuracy using average model is 0.9819 , learing_rate is 0.0217842 \n",
      "After 22 , test accuracy using average model is 0.9819 , learing_rate is 0.0202593 \n",
      "After 23 , test accuracy using average model is 0.9819 , learing_rate is 0.0188412 \n",
      "After 24 , test accuracy using average model is 0.9819 , learing_rate is 0.0175223 \n",
      "After 25 , test accuracy using average model is 0.9819 , learing_rate is 0.0162957 \n",
      "After 26 , test accuracy using average model is 0.9819 , learing_rate is 0.015155 \n",
      "After 27 , test accuracy using average model is 0.9819 , learing_rate is 0.0140942 \n",
      "After 28 , test accuracy using average model is 0.9819 , learing_rate is 0.0131076 \n",
      "After 29 , test accuracy using average model is 0.9819 , learing_rate is 0.0121901 \n",
      "wasted time 7784.421243429184\n"
     ]
    }
   ],
   "source": [
    "# Train\n",
    "correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))  #相同返回true否则返回false，存放于布尔列表，argmax返回最大值的位置\n",
    "accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # 转换为32位浮点数\n",
    "init = tf.global_variables_initializer()\n",
    "with tf.Session() as sess:\n",
    "    sess.run(init)\n",
    "    \n",
    "    start_time = time.time()\n",
    "    for e in range(30):\n",
    "        sess.run(tf.assign(learning_rate, 0.1*(0.93**e))) #学习率衰减\n",
    "        for i in range(training_step):\n",
    "            #一轮产生的batch数据，运行并训练\n",
    "            batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n",
    "            sess.run(train_step, feed_dict={x: batch_xs,\n",
    "                                            y_: batch_ys, keep_prob:1.0})\n",
    "        lr = sess.run(learning_rate)\n",
    "        \n",
    "        \n",
    "        # print结果\n",
    "        test_acc = sess.run(accuracy, feed_dict={x: mnist.test.images,\n",
    "                                            y_: mnist.test.labels, keep_prob:1.0})# 选取test中数据\n",
    "        print('After %d , test accuracy using average model is %g , learing_rate is %g '%(e, test_acc, lr))\n",
    "   \n",
    "    #for j in range(10):\n",
    "    #    testSet = mnist.test.next_batch(2000)\n",
    "    #    print('After %d time, test accuarcy using average model is %g'%(j,accuracy.eval(feed_dict={x: testSet[0],\n",
    "    #                                                                                        y_: testSet[1]})))\n",
    "    #print(testSet[0])\n",
    "        \n",
    "    end_time = time.time()\n",
    "    print('wasted time', str(end_time - start_time))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 上面结果神经网络参数：learningrate--0.012-0.1, 两层隐藏层（1000，10），迭代次数20000，batchsize=100，\n",
    "这轮结果是没有使用tf.contrib.layer.l2_regularizer,y_pred函数也没带激活函数，但是结果却更好。\n",
    "总结：探索过程中，发现对准确率影响较大是隐藏层数量以及其节点数，正则等参数对其影响较小（达不到质的提升）迭代次数也只是为了更好找到最佳解的，对于batch-size的大小，只尝试了50,100，但是直觉认为batchsize越大，对于准确率是有提升的（但计算消耗很大）"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 神经网络参数： learningrate--0.069-0.1, 三层隐藏层(1000,500,300,10)， l2正则（系数0.01）， 激活函数无，迭代次数20000,batchsize=100\n",
    "After 0 , test accuracy using average model is 0.9739 , learing_rate is 0.1 \n",
    "After 1 , test accuracy using average model is 0.9714 , learing_rate is 0.096 \n",
    "After 2 , test accuracy using average model is 0.977 , learing_rate is 0.09216 \n",
    "After 3 , test accuracy using average model is 0.9755 , learing_rate is 0.0884736 \n",
    "After 4 , test accuracy using average model is 0.9662 , learing_rate is 0.0849347 \n",
    "After 5 , test accuracy using average model is 0.9742 , learing_rate is 0.0815373 \n",
    "After 6 , test accuracy using average model is 0.9625 , learing_rate is 0.0782758 \n",
    "After 7 , test accuracy using average model is 0.9728 , learing_rate is 0.0751447 \n",
    "After 8 , test accuracy using average model is 0.9632 , learing_rate is 0.072139 \n",
    "After 9 , test accuracy using average model is 0.9687 , learing_rate is 0.0692534 \n",
    "wasted time 3800.9013991355896\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 神经网络参数： learningrate--0.0069-0.01, 隐藏层(1000,500,300,10)， l2正则（系数0.01）， 激活函数softmax，迭代次数10000 batchsize=100\n",
    "After 0 , test accuracy using average model is 0.839 , learing_rate is 0.01 \n",
    "After 1 , test accuracy using average model is 0.9277 , learing_rate is 0.0096 \n",
    "After 2 , test accuracy using average model is 0.9325 , learing_rate is 0.009216 \n",
    "After 3 , test accuracy using average model is 0.9364 , learing_rate is 0.00884736 \n",
    "After 4 , test accuracy using average model is 0.9374 , learing_rate is 0.00849347 \n",
    "After 5 , test accuracy using average model is 0.9411 , learing_rate is 0.00815373 \n",
    "After 6 , test accuracy using average model is 0.9421 , learing_rate is 0.00782758 \n",
    "After 7 , test accuracy using average model is 0.9435 , learing_rate is 0.00751447 \n",
    "After 8 , test accuracy using average model is 0.9443 , learing_rate is 0.0072139 \n",
    "After 9 , test accuracy using average model is 0.9457 , learing_rate is 0.00692534 \n",
    "wasted time 1878.380437374115\n",
    "**从结果来看learningrate还可以继续调"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 神经网络参数： learningrate--0.0046-0.01, 隐藏层(1000,500,300,10), l2正则（系数0.01）， 激活函数softmax，迭代次数20000,batchsize=50\n",
    "After 0 , test accuracy using average model is 0.8377 , learing_rate is 0.01 \n",
    "After 1 , test accuracy using average model is 0.8437 , learing_rate is 0.0096 \n",
    "After 2 , test accuracy using average model is 0.8474 , learing_rate is 0.009216 \n",
    "After 3 , test accuracy using average model is 0.8496 , learing_rate is 0.00884736 \n",
    "After 4 , test accuracy using average model is 0.8515 , learing_rate is 0.00849347 \n",
    "After 5 , test accuracy using average model is 0.8534 , learing_rate is 0.00815373 \n",
    "After 6 , test accuracy using average model is 0.855 , learing_rate is 0.00782758 \n",
    "After 7 , test accuracy using average model is 0.9464 , learing_rate is 0.00751447 \n",
    "After 8 , test accuracy using average model is 0.949 , learing_rate is 0.0072139 \n",
    "After 9 , test accuracy using average model is 0.9504 , learing_rate is 0.00692534 \n",
    "After 10 , test accuracy using average model is 0.9519 , learing_rate is 0.00664833 \n",
    "After 11 , test accuracy using average model is 0.9524 , learing_rate is 0.00638239 \n",
    "After 12 , test accuracy using average model is 0.9535 , learing_rate is 0.0061271 \n",
    "After 13 , test accuracy using average model is 0.9546 , learing_rate is 0.00588201 \n",
    "After 14 , test accuracy using average model is 0.9548 , learing_rate is 0.00564673 \n",
    "After 15 , test accuracy using average model is 0.9563 , learing_rate is 0.00542086 \n",
    "After 16 , test accuracy using average model is 0.9584 , learing_rate is 0.00520403 \n",
    "After 17 , test accuracy using average model is 0.9586 , learing_rate is 0.00499587 \n",
    "After 18 , test accuracy using average model is 0.9583 , learing_rate is 0.00479603 \n",
    "After 19 , test accuracy using average model is 0.9591 , learing_rate is 0.00460419 \n",
    "wasted time 5556.852833747864"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 神经网络参数： learningrate--0.0061-0.01, 隐藏层(1000,500,300,10),， 激活函数softmax，迭代次数20000,batchsize=100 \n",
    "After 0 , test accuracy using average model is 0.8624 , learing_rate is 0.01 \n",
    "After 1 , test accuracy using average model is 0.8711 , learing_rate is 0.0098 \n",
    "After 2 , test accuracy using average model is 0.8748 , learing_rate is 0.009604 \n",
    "After 3 , test accuracy using average model is 0.8773 , learing_rate is 0.00941192 \n",
    "After 4 , test accuracy using average model is 0.8781 , learing_rate is 0.00922368 \n",
    "After 5 , test accuracy using average model is 0.9685 , learing_rate is 0.00903921 \n",
    "After 6 , test accuracy using average model is 0.9711 , learing_rate is 0.00885842 \n",
    "After 7 , test accuracy using average model is 0.9727 , learing_rate is 0.00868126 \n",
    "After 8 , test accuracy using average model is 0.9739 , learing_rate is 0.00850763 \n",
    "After 9 , test accuracy using average model is 0.9748 , learing_rate is 0.00833748 \n",
    "After 10 , test accuracy using average model is 0.9749 , learing_rate is 0.00817073 \n",
    "After 11 , test accuracy using average model is 0.9756 , learing_rate is 0.00800731 \n",
    "After 12 , test accuracy using average model is 0.9756 , learing_rate is 0.00784717 \n",
    "After 13 , test accuracy using average model is 0.9762 , learing_rate is 0.00769022 \n",
    "After 14 , test accuracy using average model is 0.9756 , learing_rate is 0.00753642 \n",
    "After 15 , test accuracy using average model is 0.976 , learing_rate is 0.00738569 \n",
    "After 16 , test accuracy using average model is 0.9763 , learing_rate is 0.00723798 \n",
    "After 17 , test accuracy using average model is 0.9764 , learing_rate is 0.00709322 \n",
    "After 18 , test accuracy using average model is 0.9762 , learing_rate is 0.00695135 \n",
    "After 19 , test accuracy using average model is 0.9762 , learing_rate is 0.00681233 \n",
    "After 20 , test accuracy using average model is 0.9762 , learing_rate is 0.00667608 \n",
    "After 21 , test accuracy using average model is 0.9762 , learing_rate is 0.00654256 \n",
    "After 22 , test accuracy using average model is 0.9761 , learing_rate is 0.00641171 \n",
    "After 23 , test accuracy using average model is 0.9763 , learing_rate is 0.00628347 \n",
    "After 24 , test accuracy using average model is 0.9761 , learing_rate is 0.0061578 \n",
    "wasted time 9964.983964443207"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
