{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "E:\\anaconda3\\envs\\tf1.14\\lib\\site-packages\\tensorflow\\python\\framework\\dtypes.py:516: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n",
      "E:\\anaconda3\\envs\\tf1.14\\lib\\site-packages\\tensorflow\\python\\framework\\dtypes.py:517: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n",
      "E:\\anaconda3\\envs\\tf1.14\\lib\\site-packages\\tensorflow\\python\\framework\\dtypes.py:518: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n",
      "E:\\anaconda3\\envs\\tf1.14\\lib\\site-packages\\tensorflow\\python\\framework\\dtypes.py:519: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n",
      "E:\\anaconda3\\envs\\tf1.14\\lib\\site-packages\\tensorflow\\python\\framework\\dtypes.py:520: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n",
      "E:\\anaconda3\\envs\\tf1.14\\lib\\site-packages\\tensorflow\\python\\framework\\dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n",
      "E:\\anaconda3\\envs\\tf1.14\\lib\\site-packages\\tensorboard\\compat\\tensorflow_stub\\dtypes.py:541: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n",
      "E:\\anaconda3\\envs\\tf1.14\\lib\\site-packages\\tensorboard\\compat\\tensorflow_stub\\dtypes.py:542: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n",
      "E:\\anaconda3\\envs\\tf1.14\\lib\\site-packages\\tensorboard\\compat\\tensorflow_stub\\dtypes.py:543: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n",
      "E:\\anaconda3\\envs\\tf1.14\\lib\\site-packages\\tensorboard\\compat\\tensorflow_stub\\dtypes.py:544: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n",
      "E:\\anaconda3\\envs\\tf1.14\\lib\\site-packages\\tensorboard\\compat\\tensorflow_stub\\dtypes.py:545: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n",
      "E:\\anaconda3\\envs\\tf1.14\\lib\\site-packages\\tensorboard\\compat\\tensorflow_stub\\dtypes.py:550: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n"
     ]
    }
   ],
   "source": [
    "# 第三方工具包导入\n",
    "import tensorflow as tf\n",
    "# mnist数据导入\n",
    "from tensorflow.examples.tutorials.mnist import input_data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From <ipython-input-2-dae013eb731d>:2: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n",
      "WARNING:tensorflow:From E:\\anaconda3\\envs\\tf1.14\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please write your own downloading logic.\n",
      "WARNING:tensorflow:From E:\\anaconda3\\envs\\tf1.14\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.data to implement this functionality.\n",
      "Extracting .\\train-images-idx3-ubyte.gz\n",
      "WARNING:tensorflow:From E:\\anaconda3\\envs\\tf1.14\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.data to implement this functionality.\n",
      "Extracting .\\train-labels-idx1-ubyte.gz\n",
      "WARNING:tensorflow:From E:\\anaconda3\\envs\\tf1.14\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:110: dense_to_one_hot (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.one_hot on tensors.\n",
      "Extracting .\\t10k-images-idx3-ubyte.gz\n",
      "Extracting .\\t10k-labels-idx1-ubyte.gz\n",
      "WARNING:tensorflow:From E:\\anaconda3\\envs\\tf1.14\\lib\\site-packages\\tensorflow\\contrib\\learn\\python\\learn\\datasets\\mnist.py:290: DataSet.__init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n"
     ]
    }
   ],
   "source": [
    "# 下载mnist数据，数据集格式——独热编码\n",
    "mnist = input_data.read_data_sets('.', one_hot=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From <ipython-input-3-ac9f617128c4>:40: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "\n",
      "Future major versions of TensorFlow will allow gradients to flow\n",
      "into the labels input on backprop by default.\n",
      "\n",
      "See `tf.nn.softmax_cross_entropy_with_logits_v2`.\n",
      "\n",
      "WARNING:tensorflow:From <ipython-input-3-ac9f617128c4>:46: arg_max (from tensorflow.python.ops.gen_math_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use `tf.math.argmax` instead\n"
     ]
    }
   ],
   "source": [
    "# **************** 定义神经网络模型(实质是分配模型存储空间) ****************\n",
    "# 学习率\n",
    "learning_rate = tf.placeholder(tf.float32)\n",
    "\n",
    "# ************ 输入层 ************\n",
    "# 输入数据矩阵(N x 784,每张图：1 x 784)\n",
    "x = tf.placeholder(tf.float32, [None, 784], name='x')\n",
    "\n",
    "# ************ 输入层与隐层之间的权重和偏置项 ************\n",
    "# 模型权重矩阵(784 x 300),从正态分布获取\n",
    "w1 = tf.Variable(tf.truncated_normal([784, 300], stddev=0.1), name='weight1')\n",
    "# 模型偏置项矩阵(1 x 300)\n",
    "b1 = tf.Variable(tf.zeros([300])+0.1, name='bias1')\n",
    "# 增加L2正则项\n",
    "reg1 = tf.contrib.layers.l2_regularizer(0.001)(w1)\n",
    "# 第1个隐层：激活函数\n",
    "# 输出矩阵(N x 300)\n",
    "layer1 = tf.nn.relu(tf.matmul(x, w1) + b1)\n",
    "\n",
    "# ************ 隐层与输出层之间的权重和偏置项 ************\n",
    "# 模型权重矩阵(300 x 10),从正态分布获取\n",
    "w2 = tf.Variable(tf.truncated_normal([300, 10]), name='weight2')\n",
    "# 模型偏置项矩阵(1 x 10)\n",
    "b2 = tf.Variable(tf.zeros([10])+0.1, name='bias2')\n",
    "# 增加L2正则项\n",
    "reg2 = tf.contrib.layers.l2_regularizer(0.001)(w2)\n",
    "\n",
    "# 最终输出的预测值\n",
    "prediction = tf.matmul(layer1, w2) + b2\n",
    "\n",
    "# ************ 输出层 ************\n",
    "# 模型输出lable,期望输出结果\n",
    "y = tf.placeholder(tf.float32, [None, 10], name='y')\n",
    "\n",
    "# 正则项\n",
    "#regulariztion = reg1 + reg2\n",
    "\n",
    "# 目标函数 = 交叉商损失函数(+ 正则项)\n",
    "cross_entropy = tf.reduce_mean(\n",
    "    tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=prediction)) #+ regulariztion\n",
    "\n",
    "# 优化器,使用梯度下降法,以lr的学习速率,不断修改模型参数来最小化loss\n",
    "train_step = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)\n",
    "\n",
    "# 预测对比,判断每一个预测是否准确: arg_max是求出一维向量中最大的值所在的位置。返回布尔值\n",
    "correct_prediction = tf.equal(tf.arg_max(y, 1), tf.arg_max(prediction, 1))\n",
    "\n",
    "# 准确率统计: 布尔值转为浮点型,统计准确率\n",
    "accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 启用计算图的session\n",
    "sess = tf.Session()\n",
    "# 初始化计算图\n",
    "sess.run(tf.global_variables_initializer())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "####### epoch [1] beginning #######\n",
      "learning_rate: 0.001\n",
      "entropy loss: 0.0002081925340462476\n",
      "train result:  1.0\n",
      "test result: 0.9812999963760376\n",
      "\n",
      "####### epoch [2] beginning #######\n",
      "learning_rate: 0.00095\n",
      "entropy loss: 2.169831759601948e-06\n",
      "train result:  1.0\n",
      "test result: 0.9811000227928162\n",
      "\n",
      "####### epoch [3] beginning #######\n",
      "learning_rate: 0.0009025\n",
      "entropy loss: 0.0\n",
      "train result:  1.0\n",
      "test result: 0.9843999743461609\n",
      "\n",
      "####### epoch [4] beginning #######\n",
      "learning_rate: 0.000857375\n",
      "entropy loss: 0.0\n",
      "train result:  1.0\n",
      "test result: 0.982200026512146\n",
      "\n",
      "####### epoch [5] beginning #######\n",
      "learning_rate: 0.0008145062499999999\n",
      "entropy loss: 1.117583110499254e-07\n",
      "train result:  1.0\n",
      "test result: 0.9825000166893005\n",
      "\n",
      "####### epoch [6] beginning #######\n",
      "learning_rate: 0.0007737809374999998\n",
      "entropy loss: 0.0\n",
      "train result:  1.0\n",
      "test result: 0.9818999767303467\n",
      "\n",
      "####### epoch [7] beginning #######\n",
      "learning_rate: 0.0007350918906249999\n",
      "entropy loss: 0.0\n",
      "train result:  1.0\n",
      "test result: 0.982699990272522\n",
      "\n",
      "####### epoch [8] beginning #######\n",
      "learning_rate: 0.0006983372960937497\n",
      "entropy loss: 0.0\n",
      "train result:  1.0\n",
      "test result: 0.9836000204086304\n",
      "\n",
      "####### epoch [9] beginning #######\n",
      "learning_rate: 0.0006634204312890623\n",
      "entropy loss: 0.0\n",
      "train result:  1.0\n",
      "test result: 0.9824000000953674\n",
      "\n",
      "####### epoch [10] beginning #######\n",
      "learning_rate: 0.0006302494097246091\n",
      "entropy loss: 0.0\n",
      "train result:  1.0\n",
      "test result: 0.9829000234603882\n",
      "\n",
      "####### epoch [11] beginning #######\n",
      "learning_rate: 0.0005987369392383787\n",
      "entropy loss: 0.0\n",
      "train result:  1.0\n",
      "test result: 0.9804999828338623\n",
      "\n",
      "####### epoch [12] beginning #######\n",
      "learning_rate: 0.0005688000922764596\n",
      "entropy loss: 0.0\n",
      "train result:  1.0\n",
      "test result: 0.9825999736785889\n",
      "\n",
      "####### epoch [13] beginning #######\n",
      "learning_rate: 0.0005403600876626366\n",
      "entropy loss: 0.0\n",
      "train result:  1.0\n",
      "test result: 0.9828000068664551\n",
      "\n",
      "####### epoch [14] beginning #######\n",
      "learning_rate: 0.0005133420832795048\n",
      "entropy loss: 0.0\n",
      "train result:  1.0\n",
      "test result: 0.9832000136375427\n",
      "\n",
      "####### epoch [15] beginning #######\n",
      "learning_rate: 0.00048767497911552955\n",
      "entropy loss: 0.0\n",
      "train result:  1.0\n",
      "test result: 0.9829999804496765\n",
      "\n",
      "####### epoch [16] beginning #######\n",
      "learning_rate: 0.000463291230159753\n",
      "entropy loss: 0.0\n",
      "train result:  1.0\n",
      "test result: 0.9833999872207642\n",
      "\n",
      "####### epoch [17] beginning #######\n",
      "learning_rate: 0.00044012666865176535\n",
      "entropy loss: 0.0\n",
      "train result:  1.0\n",
      "test result: 0.9828000068664551\n",
      "\n",
      "####### epoch [18] beginning #######\n",
      "learning_rate: 0.0004181203352191771\n",
      "entropy loss: 0.0\n",
      "train result:  1.0\n",
      "test result: 0.9825999736785889\n",
      "\n",
      "####### epoch [19] beginning #######\n",
      "learning_rate: 0.0003972143184582182\n",
      "entropy loss: 0.0\n",
      "train result:  1.0\n",
      "test result: 0.9818999767303467\n",
      "\n",
      "####### epoch [20] beginning #######\n",
      "learning_rate: 0.00037735360253530727\n",
      "entropy loss: 0.0\n",
      "train result:  1.0\n",
      "test result: 0.9818000197410583\n",
      "\n",
      "####### epoch [21] beginning #######\n",
      "learning_rate: 0.0003584859224085419\n",
      "entropy loss: 0.0\n",
      "train result:  1.0\n",
      "test result: 0.9817000031471252\n",
      "\n",
      "####### epoch [22] beginning #######\n",
      "learning_rate: 0.0003405616262881148\n",
      "entropy loss: 0.0\n",
      "train result:  1.0\n",
      "test result: 0.9817000031471252\n",
      "\n",
      "####### epoch [23] beginning #######\n",
      "learning_rate: 0.000323533544973709\n",
      "entropy loss: 0.0\n",
      "train result:  1.0\n",
      "test result: 0.9819999933242798\n",
      "\n",
      "####### epoch [24] beginning #######\n",
      "learning_rate: 0.00030735686772502356\n",
      "entropy loss: 0.0\n",
      "train result:  1.0\n",
      "test result: 0.9822999835014343\n",
      "\n",
      "####### epoch [25] beginning #######\n",
      "learning_rate: 0.0002919890243387724\n",
      "entropy loss: 0.0\n",
      "train result:  1.0\n",
      "test result: 0.9821000099182129\n",
      "\n",
      "####### epoch [26] beginning #######\n",
      "learning_rate: 0.00027738957312183375\n",
      "entropy loss: 0.0\n",
      "train result:  1.0\n",
      "test result: 0.9821000099182129\n",
      "\n",
      "####### epoch [27] beginning #######\n",
      "learning_rate: 0.00026352009446574203\n",
      "entropy loss: 0.0\n",
      "train result:  1.0\n",
      "test result: 0.982200026512146\n",
      "\n",
      "####### epoch [28] beginning #######\n",
      "learning_rate: 0.00025034408974245495\n",
      "entropy loss: 0.0\n",
      "train result:  1.0\n",
      "test result: 0.9822999835014343\n",
      "\n",
      "####### epoch [29] beginning #######\n",
      "learning_rate: 0.00023782688525533216\n",
      "entropy loss: 0.0\n",
      "train result:  1.0\n",
      "test result: 0.9822999835014343\n",
      "\n",
      "####### epoch [30] beginning #######\n",
      "learning_rate: 0.00022593554099256555\n",
      "entropy loss: 0.0\n",
      "train result:  1.0\n",
      "test result: 0.9822999835014343\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# ******************* 训练数据输入,模型训练开始 *******************\n",
    "# 模型迭代训练55次\n",
    "for epoch in range(30):\n",
    "    # 学习率设置\n",
    "    lr = 0.001 * (0.95**epoch) \n",
    "    print('####### ' + 'epoch [{}] beginning'.format(epoch + 1) + ' #######') \n",
    "    # 每次训练将所有训练数据集过一遍\n",
    "    for step in range(mnist.train.num_examples):        \n",
    "        # 获取一批(64个)样本图片\n",
    "        batch_x, batch_y = mnist.train.next_batch(64)\n",
    "        _, loss = sess.run([train_step, cross_entropy], \n",
    "                           feed_dict={x:batch_x, y:batch_y, learning_rate:lr})\n",
    "    # 训练完成一次展示一次结果\n",
    "    print('learning_rate: {}\\nentropy loss: {}'.format(lr, loss))\n",
    "    # 训练集结果\n",
    "    print('train result: ', sess.run(accuracy, feed_dict={x:batch_x, y:batch_y}))\n",
    "    # 测试集结果\n",
    "    print('test result: {}\\n'.format(\n",
    "        sess.run(accuracy, feed_dict={x:mnist.test.images, y:mnist.test.labels})))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
