{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 229,
   "metadata": {},
   "outputs": [],
   "source": [
    "from __future__ import absolute_import\n",
    "from __future__ import division\n",
    "from __future__ import print_function\n",
    "\n",
    "import argparse\n",
    "import sys\n",
    "import numpy as np\n",
    "\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "\n",
    "import tensorflow as tf\n",
    "\n",
    "FLAGS = None"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 230,
   "metadata": {},
   "outputs": [],
   "source": [
    "INPUT_NODE = 784  #输入层的节点数，图片为28*28，为图片的像素\n",
    "OUTPUT_NODE = 10   #输出层的节点数，等于类别的数目，需要区分0-9，所以为10类\n",
    "#LAYER1_NODE = 100  #隐藏层的节点数，此神经网络只有一层隐藏层\n",
    "#LAYER1_NODE = 300\n",
    "LAYER1_NODE = 500\n",
    "TRAINING_STEPS = 3000 #训练轮数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 231,
   "metadata": {},
   "outputs": [],
   "source": [
    "#激活\n",
    "def activation(x):\n",
    "  return x * tf.nn.sigmoid(x)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 232,
   "metadata": {},
   "outputs": [],
   "source": [
    "#初始化\n",
    "def initialize(shape, stddev=0.1):\n",
    "  return tf.truncated_normal(shape, stddev=stddev)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 233,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting MNIST_data/train-images-idx3-ubyte.gz\n",
      "Extracting MNIST_data/train-labels-idx1-ubyte.gz\n",
      "Extracting MNIST_data/t10k-images-idx3-ubyte.gz\n",
      "Extracting MNIST_data/t10k-labels-idx1-ubyte.gz\n"
     ]
    }
   ],
   "source": [
    "#读取数据\n",
    "data_dir = 'MNIST_data/'\n",
    "mnist = input_data.read_data_sets(data_dir, one_hot=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 234,
   "metadata": {},
   "outputs": [],
   "source": [
    "#基础学习率\n",
    "init_learning_rate = tf.placeholder(tf.float32)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 235,
   "metadata": {},
   "outputs": [],
   "source": [
    "x = tf.placeholder(tf.float32, [None, INPUT_NODE], name='x-input')\n",
    "y_ = tf.placeholder(tf.float32, [None, OUTPUT_NODE], name='y-input')  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 236,
   "metadata": {},
   "outputs": [],
   "source": [
    "epoch_steps = tf.to_int64(tf.div(60000, tf.shape(x)[0]))\n",
    "global_step = tf.train.get_or_create_global_step()\n",
    "current_epoch = global_step//epoch_steps\n",
    "decay_times = current_epoch \n",
    "#学习率\n",
    "current_learning_rate = tf.multiply(init_learning_rate, tf.pow(0.575, tf.to_float(decay_times)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 237,
   "metadata": {},
   "outputs": [],
   "source": [
    "#生成隐藏层的参数\n",
    "weights1 = tf.Variable(tf.truncated_normal([INPUT_NODE, LAYER1_NODE], stddev=0.1))    \n",
    "biases1 = tf.Variable(tf.constant(0.01, shape=[LAYER1_NODE]))\n",
    "    \n",
    "weights2 = tf.Variable(tf.truncated_normal([LAYER1_NODE, OUTPUT_NODE], stddev=0.1))\n",
    "biases2 = tf.Variable(tf.constant(0.01, shape=[OUTPUT_NODE]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 238,
   "metadata": {},
   "outputs": [],
   "source": [
    "#给定神经网络的输入和所有参数\n",
    "layer1 = activation(tf.matmul(x, weights1) + biases1)\n",
    "y = tf.matmul(layer1, weights2)+biases2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 239,
   "metadata": {},
   "outputs": [],
   "source": [
    "#计算交叉熵\n",
    "cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))\n",
    "#计算L2正则化损失\n",
    "l2_loss = tf.nn.l2_loss(weights1) + tf.nn.l2_loss(weights2)\n",
    "#总损失为交叉熵损失和正则化损失之和\n",
    "total_loss = cross_entropy + 4e-5*l2_loss"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 240,
   "metadata": {},
   "outputs": [],
   "source": [
    "#使用Adam优化器优化损失函数\n",
    "optimizer = tf.train.AdamOptimizer(current_learning_rate)\n",
    "gradients = optimizer.compute_gradients(total_loss)\n",
    "train_step = optimizer.apply_gradients(gradients)\n",
    "train_step = tf.train.AdamOptimizer(current_learning_rate).minimize(total_loss, global_step=global_step)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 241,
   "metadata": {},
   "outputs": [],
   "source": [
    "#平均值就是网络在这一组数据上的正确率 \n",
    "correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n",
    "accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 242,
   "metadata": {},
   "outputs": [],
   "source": [
    "#初始会话并开始训练过程\n",
    "sess = tf.InteractiveSession()\n",
    "tf.global_variables_initializer().run()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 243,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "step 0\n",
      "0.238\n",
      "step 100\n",
      "0.9414\n",
      "step 200\n",
      "0.9511\n",
      "step 300\n",
      "0.9437\n",
      "step 400\n",
      "0.9539\n",
      "step 500\n",
      "0.9624\n",
      "step 600\n",
      "0.9567\n",
      "step 700\n",
      "0.9714\n",
      "step 800\n",
      "0.9751\n",
      "step 900\n",
      "0.976\n",
      "step 1000\n",
      "0.9735\n",
      "step 1100\n",
      "0.9733\n",
      "step 1200\n",
      "0.9695\n",
      "step 1300\n",
      "0.9799\n",
      "step 1400\n",
      "0.9788\n",
      "step 1500\n",
      "0.9795\n",
      "step 1600\n",
      "0.9796\n",
      "step 1700\n",
      "0.9805\n",
      "step 1800\n",
      "0.9815\n",
      "step 1900\n",
      "0.9835\n",
      "step 2000\n",
      "0.9825\n",
      "step 2100\n",
      "0.9828\n",
      "step 2200\n",
      "0.9831\n",
      "step 2300\n",
      "0.9828\n",
      "step 2400\n",
      "0.9829\n",
      "step 2500\n",
      "0.9834\n",
      "step 2600\n",
      "0.9838\n",
      "step 2700\n",
      "0.9837\n",
      "step 2800\n",
      "0.9832\n",
      "step 2900\n",
      "0.983\n"
     ]
    }
   ],
   "source": [
    "# 训练\n",
    "for step in range(TRAINING_STEPS):\n",
    "  xs, ys = mnist.train.next_batch(100)\n",
    "  lr = 1e-2\n",
    "  _, loss, l2_loss_value, total_loss_value, current_lr_value = \\\n",
    "      sess.run([train_step, cross_entropy, l2_loss, total_loss,current_learning_rate],feed_dict={x: xs, y_: ys,init_learning_rate:lr})\n",
    "  #准备测试数据\n",
    "  test_data = {x:mnist.test.images, y_:mnist.test.labels}\n",
    "    \n",
    "  if step % 100 == 0:\n",
    "    print('step %d' % step)\n",
    "    print(sess.run(accuracy, feed_dict=test_data))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.1"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
