{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting MNIST_data\\train-images-idx3-ubyte.gz\n",
      "Extracting MNIST_data\\train-labels-idx1-ubyte.gz\n",
      "Extracting MNIST_data\\t10k-images-idx3-ubyte.gz\n",
      "Extracting MNIST_data\\t10k-labels-idx1-ubyte.gz\n",
      "in iteration 0the test accuracy is : 0.9036the train accuracy is : 0.9007273\n",
      "in iteration 1the test accuracy is : 0.922the train accuracy is : 0.9190364\n",
      "in iteration 2the test accuracy is : 0.931the train accuracy is : 0.9305818\n",
      "in iteration 3the test accuracy is : 0.9379the train accuracy is : 0.93734545\n",
      "in iteration 4the test accuracy is : 0.9412the train accuracy is : 0.9438546\n",
      "in iteration 5the test accuracy is : 0.941the train accuracy is : 0.9469636\n",
      "in iteration 6the test accuracy is : 0.9456the train accuracy is : 0.95034546\n",
      "in iteration 7the test accuracy is : 0.9456the train accuracy is : 0.95305455\n",
      "in iteration 8the test accuracy is : 0.9495the train accuracy is : 0.9562182\n",
      "in iteration 9the test accuracy is : 0.9531the train accuracy is : 0.95878184\n",
      "in iteration 10the test accuracy is : 0.9529the train accuracy is : 0.9601091\n",
      "in iteration 11the test accuracy is : 0.9573the train accuracy is : 0.96378183\n",
      "in iteration 12the test accuracy is : 0.9554the train accuracy is : 0.96385455\n",
      "in iteration 13the test accuracy is : 0.9547the train accuracy is : 0.9658727\n",
      "in iteration 14the test accuracy is : 0.958the train accuracy is : 0.9670182\n",
      "in iteration 15the test accuracy is : 0.959the train accuracy is : 0.9675091\n",
      "in iteration 16the test accuracy is : 0.9594the train accuracy is : 0.96976364\n",
      "in iteration 17the test accuracy is : 0.9604the train accuracy is : 0.9698182\n",
      "in iteration 18the test accuracy is : 0.9593the train accuracy is : 0.9712909\n",
      "in iteration 19the test accuracy is : 0.9606the train accuracy is : 0.9720182\n",
      "in iteration 20the test accuracy is : 0.9604the train accuracy is : 0.97334546\n",
      "in iteration 21the test accuracy is : 0.9626the train accuracy is : 0.9740546\n",
      "in iteration 22the test accuracy is : 0.9647the train accuracy is : 0.9753091\n",
      "in iteration 23the test accuracy is : 0.9626the train accuracy is : 0.97572726\n",
      "in iteration 24the test accuracy is : 0.9644the train accuracy is : 0.9756909\n",
      "in iteration 25the test accuracy is : 0.9667the train accuracy is : 0.97712725\n",
      "in iteration 26the test accuracy is : 0.9641the train accuracy is : 0.9768909\n",
      "in iteration 27the test accuracy is : 0.9644the train accuracy is : 0.9777273\n",
      "in iteration 28the test accuracy is : 0.9671the train accuracy is : 0.9780727\n",
      "in iteration 29the test accuracy is : 0.9661the train accuracy is : 0.9795273\n",
      "in iteration 30the test accuracy is : 0.967the train accuracy is : 0.97972727\n",
      "in iteration 31the test accuracy is : 0.9665the train accuracy is : 0.9799273\n",
      "in iteration 32the test accuracy is : 0.9671the train accuracy is : 0.98103637\n",
      "in iteration 33the test accuracy is : 0.966the train accuracy is : 0.97998184\n",
      "in iteration 34the test accuracy is : 0.9674the train accuracy is : 0.9816909\n",
      "in iteration 35the test accuracy is : 0.9688the train accuracy is : 0.98147273\n",
      "in iteration 36the test accuracy is : 0.968the train accuracy is : 0.9826364\n",
      "in iteration 37the test accuracy is : 0.9683the train accuracy is : 0.98196363\n",
      "in iteration 38the test accuracy is : 0.9708the train accuracy is : 0.98252726\n",
      "in iteration 39the test accuracy is : 0.9687the train accuracy is : 0.9832\n",
      "in iteration 40the test accuracy is : 0.9692the train accuracy is : 0.9834545\n",
      "in iteration 41the test accuracy is : 0.9675the train accuracy is : 0.98383635\n",
      "in iteration 42the test accuracy is : 0.9685the train accuracy is : 0.98385453\n",
      "in iteration 43the test accuracy is : 0.969the train accuracy is : 0.9846182\n",
      "in iteration 44the test accuracy is : 0.9701the train accuracy is : 0.9851091\n",
      "in iteration 45the test accuracy is : 0.9719the train accuracy is : 0.98465455\n",
      "in iteration 46the test accuracy is : 0.9713the train accuracy is : 0.98594546\n",
      "in iteration 47the test accuracy is : 0.9717the train accuracy is : 0.9864727\n",
      "in iteration 48the test accuracy is : 0.9705the train accuracy is : 0.9855091\n"
     ]
    }
   ],
   "source": [
    "# 通过随机化减少某些隐含层的神经元，达到正则化的目的\n",
    "\n",
    "# 数据集介绍\n",
    "# MNIST数据集，100k的训练数据，10k的预测数据，数据由tensorflow中的examples.tutorials.mnist读取 \n",
    "# 数据集介绍：：Yann LeCun's website\n",
    "# 由28*28的像素组成输入特征，输出特征为0-9的数字\n",
    "\n",
    "# 可调节参数：\n",
    "# --------\n",
    "# batch_size, initial_weight,交叉熵损失函数,learning_rate,epoch_n\n",
    "# --------\n",
    "\n",
    "import tensorflow as tf\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "\n",
    "mnist = input_data.read_data_sets(\"MNIST_data\", one_hot = True)\n",
    "\n",
    "# mini_batch的大小\n",
    "batch_size = 100\n",
    "batch_n = mnist.train.num_examples // batch_size\n",
    "\n",
    "# # 定义两个placeholder用来feed数据，分别代表x和y --784列和10列(one-hot)\n",
    "x = tf.placeholder(tf.float32, [None, 784])\n",
    "y = tf.placeholder(tf.float32, [None, 10])\n",
    "\n",
    "# # ----\n",
    "# # 构建多分类回归\n",
    "# # 定义weight和bias，初始化分别为截断正态随机和0.0\n",
    "\n",
    "# 第一层\n",
    "num_L1 = 2000\n",
    "keep_prob1 = 0.8\n",
    "weight_L1 = tf.Variable(tf.truncated_normal([784, num_L1], stddev = 0.1))\n",
    "bias_L1 = tf.Variable(tf.zeros([num_L1]) + 0.1)\n",
    "a_L1 = tf.matmul(x, weight_L1) + bias_L1\n",
    "z_L1 = tf.nn.tanh(a_L1)\n",
    "z_dropout_L1 = tf.nn.dropout(z_L1, keep_prob1)\n",
    "\n",
    "# 第二层\n",
    "num_L2 = 2000\n",
    "keep_prob2 = 0.8\n",
    "weight_L2 = tf.Variable(tf.truncated_normal([num_L1, num_L2], stddev = 0.1))\n",
    "bias_L2 = tf.Variable(tf.zeros([num_L2]) + 0.1)\n",
    "a_L2 = tf.matmul(z_dropout_L1, weight_L2) + bias_L2\n",
    "z_L2 = tf.nn.tanh(a_L2)\n",
    "z_dropout_L2 = tf.nn.dropout(z_L2, keep_prob2)\n",
    "\n",
    "# 第三层\n",
    "num_L3 = 1000\n",
    "keep_prob3 = 0.8\n",
    "weight_L3 = tf.Variable(tf.truncated_normal([num_L2, num_L3], stddev = 0.1))\n",
    "bias_L3 = tf.Variable(tf.zeros([num_L3]) + 0.1)\n",
    "a_L3 = tf.matmul(z_dropout_L2, weight_L3) + bias_L3\n",
    "z_L3 = tf.nn.tanh(a_L3)\n",
    "z_dropout_L3 = tf.nn.dropout(z_L3, keep_prob3)\n",
    "\n",
    "# 输出层\n",
    "num_y = 10\n",
    "keep_prob3 = 0.8\n",
    "weight_L4 = tf.Variable(tf.truncated_normal([num_L3, num_y], stddev = 0.1))\n",
    "bias_L4 = tf.Variable(tf.zeros([num_y]) + 0.1)\n",
    "a = tf.matmul(z_dropout_L3, weight_L4) + bias_L4\n",
    "y_head = tf.nn.softmax(a)\n",
    "\n",
    "\n",
    "# # 定义交叉熵损失函数并依据梯度下降法进行训练 -- 这样梯度下降的train就变成了x和y的函数\n",
    "learning_rate = 0.2\n",
    "loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = y, logits = y_head))\n",
    "optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n",
    "train = optimizer.minimize(loss)\n",
    "\n",
    "init = tf.global_variables_initializer()\n",
    "correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_head, 1)) # tf.argmax找到x中最大的id, 1为axis\n",
    "correction = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # tf.cast 转换类型，将bool转为float，从而求得准确率\n",
    "\n",
    "# 迭代500次，进行mini_batch梯度下降\n",
    "epoch_n = 50\n",
    "with tf.Session() as session:\n",
    "    session.run(init)\n",
    "    for step in range(epoch_n):\n",
    "        for batch in range(batch_n):\n",
    "            batch_x, batch_y = mnist.train.next_batch(batch_size)\n",
    "            session.run(train, feed_dict= {x: batch_x, y: batch_y}) # 此处是最小化\n",
    "        test_corr = session.run(correction, feed_dict= {x: mnist.test.images, y: mnist.test.labels}) # 基于测试集对准确率进行测试\n",
    "        train_corr = session.run(correction, feed_dict= {x: mnist.train.images, y: mnist.train.labels}) # 基于训练集集对准确率进行测试\n",
    "        print(\"in iteration \" + str(step) + \"the test accuracy is : \" + str(test_corr) + \n",
    "              \"the train accuracy is : \" + str(train_corr)) # 打印准确率\n",
    "    \n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "kernel\n"
     ]
    }
   ],
   "source": [
    "print(\"kernel\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.5.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
