{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From <ipython-input-1-9cc890f7026c>:3: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n",
      "WARNING:tensorflow:From /home/liu/anaconda3/lib/python3.7/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please write your own downloading logic.\n",
      "WARNING:tensorflow:From /home/liu/anaconda3/lib/python3.7/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.data to implement this functionality.\n",
      "Extracting ./train-images-idx3-ubyte.gz\n",
      "WARNING:tensorflow:From /home/liu/anaconda3/lib/python3.7/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.data to implement this functionality.\n",
      "Extracting ./train-labels-idx1-ubyte.gz\n",
      "WARNING:tensorflow:From /home/liu/anaconda3/lib/python3.7/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:110: dense_to_one_hot (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.one_hot on tensors.\n",
      "Extracting ./t10k-images-idx3-ubyte.gz\n",
      "Extracting ./t10k-labels-idx1-ubyte.gz\n",
      "WARNING:tensorflow:From /home/liu/anaconda3/lib/python3.7/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:290: DataSet.__init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n",
      "WARNING:tensorflow:From <ipython-input-1-9cc890f7026c>:40: arg_max (from tensorflow.python.ops.gen_math_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use `tf.math.argmax` instead\n",
      "##########\n",
      "step[500],entropy loss:[0.16802461445331573]，l2_loss_value:[976.8212890625], total_loss_value:[0.23640210926532745]\n",
      "0.97\n",
      "0.961\n",
      "##########\n",
      "step[1000],entropy loss:[0.08009503036737442]，l2_loss_value:[969.7951049804688], total_loss_value:[0.1479806900024414]\n",
      "1.0\n",
      "0.9706\n",
      "##########\n",
      "step[1500],entropy loss:[0.03803849592804909]，l2_loss_value:[954.5576782226562], total_loss_value:[0.10485753417015076]\n",
      "1.0\n",
      "0.9762\n",
      "##########\n",
      "step[2000],entropy loss:[0.03617660701274872]，l2_loss_value:[939.5559692382812], total_loss_value:[0.10194552689790726]\n",
      "1.0\n",
      "0.9785\n",
      "##########\n",
      "step[2500],entropy loss:[0.018155192956328392]，l2_loss_value:[930.1177978515625], total_loss_value:[0.08326344192028046]\n",
      "1.0\n",
      "0.9798\n",
      "##########\n",
      "step[3000],entropy loss:[0.017104798927903175]，l2_loss_value:[920.8174438476562], total_loss_value:[0.08156201988458633]\n",
      "1.0\n",
      "0.981\n",
      "##########\n",
      "step[3500],entropy loss:[0.007370267994701862]，l2_loss_value:[913.2306518554688], total_loss_value:[0.07129641622304916]\n",
      "1.0\n",
      "0.9804\n",
      "##########\n",
      "step[4000],entropy loss:[0.02084718644618988]，l2_loss_value:[909.9771728515625], total_loss_value:[0.08454558998346329]\n",
      "1.0\n",
      "0.9805\n"
     ]
    }
   ],
   "source": [
    "import tensorflow as tf \n",
    "from tensorflow.examples.tutorials.mnist import input_data#导入数据\n",
    "mnist=input_data.read_data_sets('.',one_hot=True )\n",
    "\n",
    "\n",
    "def initialize(shape, stddev=0.1):\n",
    "    return tf.truncated_normal(shape, stddev=0.1)#参数 stddev 用于设置正态分布被截断前的标准差\n",
    "\n",
    "\n",
    "learning_rate=tf.placeholder(tf.float32)#指定超参数\n",
    "\n",
    "\n",
    "L1_units_count = 300#第一层权重的维数\n",
    "\n",
    "x=tf.placeholder(tf.float32,[None,784],name='x')#模型输入，[bachsize,维度]\n",
    "W_1=tf.Variable(initialize([784,L1_units_count],stddev=0.05),name='weight_1')#第一层权重,从正态分布中截取一部分数据来生成指定形状的值\n",
    "b_1=tf.Variable(tf.zeros([L1_units_count]),name='b_1')#偏置\n",
    "\n",
    "logits_1=tf.matmul(x,W_1)+b_1#输出logits\n",
    "output_1 = logits_1*tf.nn.sigmoid(logits_1)#激活函数为\n",
    "\n",
    "L2_units_count = 10#第二层权重的维数\n",
    "\n",
    "W_2=tf.Variable(initialize([L1_units_count,L2_units_count],\n",
    "                           stddev=0.05),name='weight_2')#第二层权重,从正态分布中截取一部分数据来生成指定形状的值\n",
    "b_2=tf.Variable(tf.zeros([L2_units_count]),name='b_2')#偏置\n",
    "\n",
    "logits_2=tf.matmul(output_1,W_2)+b_2#模型输出\n",
    "output_2=logits_2\n",
    "\n",
    "\n",
    "\n",
    "logits=logits_2\n",
    "y=tf.placeholder(tf.float32,[None,10])#真实值\n",
    "cross_entropy= tf.reduce_mean(\n",
    "    tf.nn.softmax_cross_entropy_with_logits_v2(labels=y,logits=logits))#交叉熵损失\n",
    "l2_loss = tf.nn.l2_loss(W_1) + tf.nn.l2_loss(W_2)#加入l2损失\n",
    "total_loss = cross_entropy + 7e-5*l2_loss\n",
    "train_step=tf.train.GradientDescentOptimizer(learning_rate).minimize(total_loss)#每运行一次，前向传播，反向传播一次,以最小交叉熵进行优化\n",
    "correct_prediction=tf.equal(tf.arg_max(y,1),tf.arg_max(logits,1))#预测是否正确，布尔型\n",
    "accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))#正确率\n",
    "\n",
    "\n",
    "sess=tf.Session()#生成计算图\n",
    "sess.run(tf.global_variables_initializer())#初始化\n",
    "\n",
    "\n",
    "#分批次训练\n",
    "lr=1#学习率\n",
    "for step in range(4000):\n",
    "    if step<1800:\n",
    "        lr = 1\n",
    "    elif step<3400:\n",
    "        lr = 0.3\n",
    "    else:\n",
    "        lr = 0.1\n",
    "\n",
    "    batch_x,batch_y=mnist.train.next_batch(100)#增大数据集有利于正确率提高\n",
    "    _, loss, l2_loss_value, total_loss_value = sess.run(\n",
    "               [train_step, cross_entropy, l2_loss, total_loss], \n",
    "               feed_dict={x: batch_x, y: batch_y, learning_rate:lr})\n",
    "\n",
    "    if(step+1)%500==0:#输出过程各参数变化\n",
    "        print('#'*10)\n",
    "        print('step[{}],entropy loss:[{}]，l2_loss_value:[{}], total_loss_value:[{}]'.format(step+1,loss,l2_loss_value,total_loss_value))\n",
    "        print(sess.run(accuracy,feed_dict={x:batch_x,y:batch_y}))\n",
    "        print(sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels}))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 如何修改隐层数量，修改后会起到什么样的效果\n",
    "增加代码：\n",
    "第二层权重：W_2=tf.Variable(initialize([L1_units_count,L2_units_count],stddev=0.05),name='weight_2')  \n",
    "第二层偏置：b_2=tf.Variable(tf.zeros([L2_units_count]),name='b_2')  \n",
    "第二次层输出：logits_2=tf.matmul(output_1,W_2)+b_2;\n",
    "output_2=logits_2  \n",
    "多个隐藏层其实是对输入特征多层次的抽象，最终的目的就是为了更好的线性划分不同类型的数据。\n",
    "\n",
    "\n",
    "### 如何修改神经元个数，起到了什么样的效果\n",
    "\n",
    "通过调节权重矩阵的维数，神经元数量越多，线性划分越多，而过多就和过拟合。\n",
    "\n",
    "### 如何在模型中添加L1/L2正则化，正则化起什么作⽤\n",
    "\n",
    "在计算损失时加l1/l2损失，再对其取一个权重\n",
    "l2_loss = tf.nn.l2_loss(W_1) + tf.nn.l2_loss(W_2)#加入l2损失\n",
    "total_loss = cross_entropy + 7e-5*l2_loss  \n",
    "起到消除过拟合的作用，防止数据在训练集非常准确，而测试集结果较差。\n",
    "\n",
    "### 使⽤不同的初始化⽅式对模型有什么影响\n",
    "\n",
    "初始化对训练深度神经网络的收敛性有重要影响，简单的初始化方案可以加速训练，结果精确。过大的初始化值会导致梯度爆炸，初始化值太小会导致梯度消失。  \n",
    "tf.truncated_normal(shape, mean=0.0, stddev=1.0, dtype=tf.float32, seed=None, name=None),用于从服从指定正态分布的数值中取出随机数。 shape表示生成张量的维度，mean是均值，stddev是标准差。  \n",
    "在此例中标准差stddev=0.05，取得较小，可以提高模型正确率。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.0"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
