{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "\"\"\"A very simple MNIST classifier.\n",
    "See extensive documentation at https://www.tensorflow.org/get_started/mnist/beginners\n",
    "\"\"\"\n",
    "from __future__ import absolute_import\n",
    "from __future__ import division\n",
    "from __future__ import print_function\n",
    "\n",
    "import argparse\n",
    "import sys\n",
    "\n",
    "import tensorflow as tf\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "\n",
    "import warnings\n",
    "warnings.filterwarnings(\"ignore\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "我们在这里调用系统提供的 MNIST 数据函数为我们读入数据，如果没有下载的话则进行下载。\n",
    "<font style='color:red'>注意修改目录</font>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From <ipython-input-2-6aa352baaeba>:4: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n",
      "WARNING:tensorflow:From /root/anaconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please write your own downloading logic.\n",
      "WARNING:tensorflow:From /root/anaconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.data to implement this functionality.\n",
      "Extracting ./mnist/input_data/train-images-idx3-ubyte.gz\n",
      "WARNING:tensorflow:From /root/anaconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.data to implement this functionality.\n",
      "Extracting ./mnist/input_data/train-labels-idx1-ubyte.gz\n",
      "WARNING:tensorflow:From /root/anaconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:110: dense_to_one_hot (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.one_hot on tensors.\n",
      "Extracting ./mnist/input_data/t10k-images-idx3-ubyte.gz\n",
      "Extracting ./mnist/input_data/t10k-labels-idx1-ubyte.gz\n",
      "WARNING:tensorflow:From /root/anaconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:290: DataSet.__init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n"
     ]
    }
   ],
   "source": [
    "# Import data\n",
    "data_dir = './mnist/input_data'\n",
    "# data_dir ='/home/ice-melt/csdn/06/mnist/input_data'\n",
    "mnist = input_data.read_data_sets(data_dir, one_hot=True)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "为方便调试,将变量提取出来"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "nodeNum1 = 512\n",
    "nodeNum2 = 256\n",
    "epoch = 51 # 训练多少个周期\n",
    "batch_size = 100"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "神经元数目512,256,准确率0.9649,损失1.698668,学习率0.500000\n",
      "神经元数目512,256,准确率0.9678,损失1.072912,学习率0.475000\n",
      "神经元数目512,256,准确率0.9674,损失0.726461,学习率0.451250\n",
      "神经元数目512,256,准确率0.9714,损失0.505144,学习率0.428688\n",
      "神经元数目512,256,准确率0.9770,损失0.365389,学习率0.407253\n",
      "神经元数目512,256,准确率0.9770,损失0.287610,学习率0.386890\n",
      "神经元数目512,256,准确率0.9790,损失0.229997,学习率0.367546\n",
      "神经元数目512,256,准确率0.9794,损失0.194583,学习率0.349169\n",
      "神经元数目512,256,准确率0.9798,损失0.170715,学习率0.331710\n",
      "神经元数目512,256,准确率0.9792,损失0.161182,学习率0.315125\n",
      "神经元数目512,256,准确率0.9804,损失0.144558,学习率0.299368\n",
      "神经元数目512,256,准确率0.9791,损失0.137891,学习率0.284400\n",
      "神经元数目512,256,准确率0.9803,损失0.132394,学习率0.270180\n",
      "神经元数目512,256,准确率0.9803,损失0.123340,学习率0.256671\n",
      "神经元数目512,256,准确率0.9825,损失0.117105,学习率0.243837\n",
      "神经元数目512,256,准确率0.9836,损失0.111565,学习率0.231646\n",
      "神经元数目512,256,准确率0.9811,损失0.112407,学习率0.220063\n",
      "神经元数目512,256,准确率0.9838,损失0.104784,学习率0.209060\n",
      "神经元数目512,256,准确率0.9818,损失0.106292,学习率0.198607\n",
      "神经元数目512,256,准确率0.9822,损失0.107904,学习率0.188677\n",
      "神经元数目512,256,准确率0.9833,损失0.097717,学习率0.179243\n",
      "神经元数目512,256,准确率0.9827,损失0.098396,学习率0.170281\n",
      "神经元数目512,256,准确率0.9807,损失0.106838,学习率0.161767\n",
      "神经元数目512,256,准确率0.9840,损失0.095484,学习率0.153678\n",
      "神经元数目512,256,准确率0.9842,损失0.091032,学习率0.145995\n",
      "神经元数目512,256,准确率0.9827,损失0.093039,学习率0.138695\n",
      "神经元数目512,256,准确率0.9837,损失0.091660,学习率0.131760\n",
      "神经元数目512,256,准确率0.9842,损失0.086131,学习率0.125172\n",
      "神经元数目512,256,准确率0.9837,损失0.088038,学习率0.118913\n",
      "神经元数目512,256,准确率0.9846,损失0.086571,学习率0.112968\n",
      "神经元数目512,256,准确率0.9844,损失0.087266,学习率0.107319\n",
      "神经元数目512,256,准确率0.9843,损失0.087727,学习率0.101953\n",
      "神经元数目512,256,准确率0.9825,损失0.092793,学习率0.096856\n",
      "神经元数目512,256,准确率0.9850,损失0.084176,学习率0.092013\n",
      "神经元数目512,256,准确率0.9838,损失0.084549,学习率0.087412\n",
      "神经元数目512,256,准确率0.9840,损失0.084142,学习率0.083042\n",
      "神经元数目512,256,准确率0.9850,损失0.082054,学习率0.078890\n",
      "神经元数目512,256,准确率0.9847,损失0.081654,学习率0.074945\n",
      "神经元数目512,256,准确率0.9836,损失0.082729,学习率0.071198\n",
      "神经元数目512,256,准确率0.9845,损失0.081735,学习率0.067638\n",
      "神经元数目512,256,准确率0.9846,损失0.082068,学习率0.064256\n",
      "神经元数目512,256,准确率0.9841,损失0.082766,学习率0.061043\n",
      "神经元数目512,256,准确率0.9845,损失0.081817,学习率0.057991\n",
      "神经元数目512,256,准确率0.9849,损失0.080833,学习率0.055092\n",
      "神经元数目512,256,准确率0.9842,损失0.081835,学习率0.052337\n",
      "神经元数目512,256,准确率0.9846,损失0.080251,学习率0.049720\n",
      "神经元数目512,256,准确率0.9849,损失0.080451,学习率0.047234\n",
      "神经元数目512,256,准确率0.9849,损失0.080218,学习率0.044872\n",
      "神经元数目512,256,准确率0.9847,损失0.079008,学习率0.042629\n",
      "神经元数目512,256,准确率0.9842,损失0.079847,学习率0.040497\n",
      "神经元数目512,256,准确率0.9841,损失0.079439,学习率0.038472\n"
     ]
    }
   ],
   "source": [
    "'''\n",
    "创建一个包含两层的神经网络模型\n",
    "尝试修改\n",
    "1.各隐层神经元数目\n",
    "2.各层激活函数(sigmoid,relu,tanh)\n",
    "3.权重初始化方式\n",
    "4.学习率\n",
    "'''\n",
    "x = tf.placeholder(tf.float32, [None, 784])\n",
    "y_ = tf.placeholder(tf.float32, [None, 10])\n",
    "lr = tf.Variable(0.01, dtype=tf.float32)\n",
    "\n",
    "W1 = tf.Variable(tf.random_normal([784, nodeNum1], stddev=0.1,seed=1))\n",
    "b1 = tf.Variable(tf.zeros([nodeNum1])+0.1)\n",
    "L1 = tf.nn.relu(tf.matmul(x, W1) + b1)\n",
    "\n",
    "W2 = tf.Variable(tf.random_normal([nodeNum1, nodeNum2], stddev=0.1,seed=1))\n",
    "b2 = tf.Variable(tf.zeros([nodeNum2])+0.1)\n",
    "L2 = tf.nn.relu(tf.matmul(L1, W2) + b2)\n",
    "\n",
    "W3 = tf.Variable(tf.random_normal([nodeNum2, 10], stddev=0.1,seed=1))\n",
    "b3 = tf.Variable(tf.zeros([10])+0.1)\n",
    "\n",
    "y = tf.matmul(L2, W3) + b3\n",
    "\n",
    "\n",
    "_lambda = 0.001\n",
    "# 计算损失和(交叉熵+正则),只计算神经网络边上权重的正则化损失\n",
    "cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=y)\\\n",
    "                          + tf.contrib.layers.l2_regularizer(_lambda)(W1)\\\n",
    "                          + tf.contrib.layers.l2_regularizer(_lambda)(W2))\n",
    "\n",
    "# 生成一个训练step\n",
    "train_step = tf.train.GradientDescentOptimizer(lr).minimize(cross_entropy)\n",
    "\n",
    "# 结果存放在一个布尔型列表中\n",
    "correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n",
    "\n",
    "# 求准确率\n",
    "accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "\n",
    "\n",
    "init_op = tf.global_variables_initializer()\n",
    "\n",
    "\n",
    "n_batch = mnist.train.num_examples // batch_size\n",
    "\n",
    "with tf.Session() as sess:\n",
    "    sess.run(init_op)\n",
    "    # 训练epoch个周期\n",
    "    for e in range(epoch):\n",
    "        # 学习率不断变化,先大后小\n",
    "        sess.run(tf.assign(lr, 0.5 * (0.95 ** e)))\n",
    "        for batch in range(n_batch):\n",
    "            # 获取一个批次的数据进行训练\n",
    "            batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n",
    "            sess.run(train_step, feed_dict={x:batch_xs, y_:batch_ys})\n",
    "        # 当前周期训练完毕后,测试准确率\n",
    "        learning_rate = sess.run(lr)\n",
    "        acc,loss = sess.run([accuracy,cross_entropy], feed_dict={x:mnist.test.images, y_:mnist.test.labels})\n",
    "        print('神经元数目%s,%s,准确率%.4f,损失%f,学习率%f'%(nodeNum1,nodeNum2,acc,loss,learning_rate)) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
