{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "\"\"\"A very simple MNIST classifier.\n",
    "See extensive documentation at\n",
    "https://www.tensorflow.org/get_started/mnist/beginners\n",
    "\"\"\"\n",
    "import argparse\n",
    "import sys\n",
    "\n",
    "from tensorflow.examples.tutorials.mnist import input_data\n",
    "from tensorflow.contrib import layers\n",
    "import tensorflow as tf\n",
    "import numpy as np\n",
    "FLAGS = None\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "我们在这里调用系统提供的Mnist数据函数为我们读入数据，如果没有下载的话则进行下载。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting ./mnist\\train-images-idx3-ubyte.gz\n",
      "Extracting ./mnist\\train-labels-idx1-ubyte.gz\n",
      "Extracting ./mnist\\t10k-images-idx3-ubyte.gz\n",
      "Extracting ./mnist\\t10k-labels-idx1-ubyte.gz\n"
     ]
    }
   ],
   "source": [
    "# Import data\n",
    "data_dir = './mnist'\n",
    "mnist = input_data.read_data_sets(data_dir, one_hot=True)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "全连接3层神经网络"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "num_neuron_hidden_layer1=600\n",
    "num_neuron_hidden_layer2=100\n",
    "# Create the model\n",
    "x = tf.placeholder(tf.float32, [None, 784])\n",
    "\n",
    "# one hidden layer,neurons 100\n",
    "W1 = tf.Variable(tf.random_normal(\n",
    "    [784, num_neuron_hidden_layer1], \n",
    "    stddev=1/np.sqrt(num_neuron_hidden_layer1)))\n",
    "b1 = tf.Variable(tf.zeros([num_neuron_hidden_layer1]))\n",
    "# hidden layer's output,None*100 \n",
    "y1 = tf.nn.relu(tf.matmul(x, W1) + b1)\n",
    "\n",
    "\n",
    "# hidden layer2\n",
    "W2 = tf.Variable(tf.random_normal(\n",
    "    [num_neuron_hidden_layer1,num_neuron_hidden_layer2], \n",
    "    stddev=1/np.sqrt(num_neuron_hidden_layer2)))\n",
    "b2 = tf.Variable(tf.zeros([num_neuron_hidden_layer2]))\n",
    "y2 = tf.nn.sigmoid(tf.matmul(y1, W2) + b2)\n",
    "\n",
    "\n",
    "# output layer\n",
    "W_out = tf.Variable(tf.random_normal([num_neuron_hidden_layer2, 10]))\n",
    "b_out = tf.Variable(tf.zeros([10]))\n",
    "y_out = tf.matmul(y2, W_out) + b_out"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "定义我们的ground truth 占位符"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# Define loss and optimizer\n",
    "y_ = tf.placeholder(tf.float32, [None, 10])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "接下来我们计算交叉熵，注意这里不要使用注释中的手动计算方式，而是使用系统函数。\n",
    "另一个注意点就是，softmax_cross_entropy_with_logits的logits参数是**未经激活的wx+b**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# The raw formulation of cross-entropy,\n",
    "#\n",
    "#   tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)),\n",
    "#                                 reduction_indices=[1]))\n",
    "#\n",
    "# can be numerically unstable.\n",
    "#\n",
    "# So here we use tf.nn.softmax_cross_entropy_with_logits on the raw\n",
    "# outputs of 'y', and then average across the batch.\n",
    "cross_entropy = tf.reduce_mean(\n",
    "    tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_out))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "生成一个训练step"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "train_step = tf.train.GradientDescentOptimizer(0.1).minimize(cross_entropy)\n",
    "\n",
    "sess = tf.Session()\n",
    "init_op = tf.global_variables_initializer()\n",
    "sess.run(init_op)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "测试模型性能"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def test(X_,label_,sess_):\n",
    "    correct_prediction = tf.equal(tf.argmax(y_out, 1), tf.argmax(y_, 1))\n",
    "    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n",
    "    return sess_.run([accuracy,cross_entropy], feed_dict={x: X_,\n",
    "                                          y_: label_})"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "在这里我们仍然调用系统提供的读取数据，为我们取得一个batch。\n",
    "然后我们运行1000个epochs，对权重进行优化。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch:10,loss:0.13439905643463135,train accuracy:0.963309109210968,test_accuracy:0.9593999981880188\n",
      "Epoch:20,loss:0.08560342341661453,train accuracy:0.9781818389892578,test_accuracy:0.9693999886512756\n",
      "Epoch:30,loss:0.06267009675502777,train accuracy:0.984745442867279,test_accuracy:0.9740999937057495\n",
      "Epoch:40,loss:0.04792417958378792,train accuracy:0.9894000291824341,test_accuracy:0.9761999845504761\n",
      "Epoch:50,loss:0.03793219104409218,train accuracy:0.9922000169754028,test_accuracy:0.9768000245094299\n",
      "Epoch:60,loss:0.030963635072112083,train accuracy:0.9941999912261963,test_accuracy:0.9781000018119812\n",
      "Epoch:70,loss:0.025687681511044502,train accuracy:0.9958727359771729,test_accuracy:0.9790999889373779\n",
      "Epoch:80,loss:0.02172309160232544,train accuracy:0.9970727562904358,test_accuracy:0.9794999957084656\n",
      "Epoch:90,loss:0.018467895686626434,train accuracy:0.9979636073112488,test_accuracy:0.9797999858856201\n",
      "Epoch:100,loss:0.01590953953564167,train accuracy:0.9987999796867371,test_accuracy:0.9797000288963318\n",
      "Epoch:110,loss:0.013873571529984474,train accuracy:0.9991636276245117,test_accuracy:0.9800000190734863\n",
      "Epoch:120,loss:0.012259949930012226,train accuracy:0.9993818402290344,test_accuracy:0.9797000288963318\n",
      "Epoch:130,loss:0.010990386828780174,train accuracy:0.9994909167289734,test_accuracy:0.9797999858856201\n",
      "Epoch:140,loss:0.009811554104089737,train accuracy:0.9996545314788818,test_accuracy:0.980400025844574\n",
      "Epoch:150,loss:0.008847549557685852,train accuracy:0.9997090697288513,test_accuracy:0.9800999760627747\n",
      "Epoch:160,loss:0.008037133142352104,train accuracy:0.9997454285621643,test_accuracy:0.980400025844574\n",
      "Epoch:170,loss:0.007344988640397787,train accuracy:0.9998000264167786,test_accuracy:0.980400025844574\n",
      "Epoch:180,loss:0.006762169301509857,train accuracy:0.9998727440834045,test_accuracy:0.9800999760627747\n",
      "Epoch:190,loss:0.006218524184077978,train accuracy:0.999927282333374,test_accuracy:0.980400025844574\n",
      "Epoch:200,loss:0.005768400616943836,train accuracy:0.9999454617500305,test_accuracy:0.9804999828338623\n",
      "Epoch:210,loss:0.005361239425837994,train accuracy:0.999963641166687,test_accuracy:0.9807999730110168\n",
      "Epoch:220,loss:0.004988153465092182,train accuracy:0.999963641166687,test_accuracy:0.9807000160217285\n",
      "Epoch:230,loss:0.004664012696594,train accuracy:0.999963641166687,test_accuracy:0.98089998960495\n",
      "Epoch:240,loss:0.004389328882098198,train accuracy:0.999963641166687,test_accuracy:0.9805999994277954\n",
      "Epoch:250,loss:0.0041342079639434814,train accuracy:0.999963641166687,test_accuracy:0.9807999730110168\n",
      "Epoch:260,loss:0.003910370171070099,train accuracy:0.9999818205833435,test_accuracy:0.98089998960495\n",
      "Epoch:270,loss:0.0037072584964334965,train accuracy:0.9999818205833435,test_accuracy:0.98089998960495\n",
      "Epoch:280,loss:0.003509826958179474,train accuracy:1.0,test_accuracy:0.9810000061988831\n",
      "Epoch:290,loss:0.003344405209645629,train accuracy:1.0,test_accuracy:0.98089998960495\n",
      "Epoch:300,loss:0.003187777241691947,train accuracy:1.0,test_accuracy:0.9810000061988831\n",
      "Epoch:310,loss:0.003042494412511587,train accuracy:1.0,test_accuracy:0.9807999730110168\n",
      "Epoch:320,loss:0.0029132370837032795,train accuracy:1.0,test_accuracy:0.9807999730110168\n",
      "Epoch:330,loss:0.0027895213570445776,train accuracy:1.0,test_accuracy:0.98089998960495\n",
      "Epoch:340,loss:0.0026761575136333704,train accuracy:1.0,test_accuracy:0.9810000061988831\n",
      "Epoch:350,loss:0.002568622352555394,train accuracy:1.0,test_accuracy:0.9811000227928162\n",
      "Epoch:360,loss:0.0024739091750234365,train accuracy:1.0,test_accuracy:0.98089998960495\n",
      "Epoch:370,loss:0.0023832814767956734,train accuracy:1.0,test_accuracy:0.9812999963760376\n",
      "Epoch:380,loss:0.0022953401785343885,train accuracy:1.0,test_accuracy:0.9810000061988831\n",
      "Epoch:390,loss:0.002217174507677555,train accuracy:1.0,test_accuracy:0.9814000129699707\n",
      "Epoch:400,loss:0.0021407753229141235,train accuracy:1.0,test_accuracy:0.9811000227928162\n",
      "Epoch:410,loss:0.0020706206560134888,train accuracy:1.0,test_accuracy:0.9815000295639038\n",
      "Epoch:420,loss:0.002004210837185383,train accuracy:1.0,test_accuracy:0.9811000227928162\n",
      "Epoch:430,loss:0.0019399069715291262,train accuracy:1.0,test_accuracy:0.9811999797821045\n",
      "Epoch:440,loss:0.0018817230593413115,train accuracy:1.0,test_accuracy:0.9811000227928162\n",
      "Epoch:450,loss:0.001825065235607326,train accuracy:1.0,test_accuracy:0.9811000227928162\n",
      "Epoch:460,loss:0.0017726989462971687,train accuracy:1.0,test_accuracy:0.9811999797821045\n",
      "Epoch:470,loss:0.001723485765978694,train accuracy:1.0,test_accuracy:0.9814000129699707\n",
      "Epoch:480,loss:0.0016756802797317505,train accuracy:1.0,test_accuracy:0.9811000227928162\n",
      "Epoch:490,loss:0.001629884704016149,train accuracy:1.0,test_accuracy:0.9811999797821045\n",
      "Epoch:500,loss:0.0015884399181231856,train accuracy:1.0,test_accuracy:0.9814000129699707\n",
      "Epoch:510,loss:0.0015467946650460362,train accuracy:1.0,test_accuracy:0.9812999963760376\n",
      "Epoch:520,loss:0.00150739261880517,train accuracy:1.0,test_accuracy:0.9814000129699707\n",
      "Epoch:530,loss:0.0014713458949699998,train accuracy:1.0,test_accuracy:0.9811999797821045\n",
      "Epoch:540,loss:0.0014354233862832189,train accuracy:1.0,test_accuracy:0.9815999865531921\n",
      "Epoch:550,loss:0.0014000809751451015,train accuracy:1.0,test_accuracy:0.9812999963760376\n",
      "Epoch:560,loss:0.0013671013293787837,train accuracy:1.0,test_accuracy:0.9815000295639038\n",
      "Epoch:570,loss:0.0013360179727897048,train accuracy:1.0,test_accuracy:0.9815999865531921\n",
      "Epoch:580,loss:0.0013063570950180292,train accuracy:1.0,test_accuracy:0.9815999865531921\n",
      "Epoch:590,loss:0.0012778874952346087,train accuracy:1.0,test_accuracy:0.9815000295639038\n",
      "Epoch:600,loss:0.001250292407348752,train accuracy:1.0,test_accuracy:0.9815000295639038\n",
      "Epoch:610,loss:0.0012240309733897448,train accuracy:1.0,test_accuracy:0.9815000295639038\n",
      "Epoch:620,loss:0.0011988314799964428,train accuracy:1.0,test_accuracy:0.9817000031471252\n",
      "Epoch:630,loss:0.0011738525936380029,train accuracy:1.0,test_accuracy:0.9817000031471252\n",
      "Epoch:640,loss:0.0011501635890454054,train accuracy:1.0,test_accuracy:0.9815999865531921\n",
      "Epoch:650,loss:0.0011273581767454743,train accuracy:1.0,test_accuracy:0.9815000295639038\n",
      "Epoch:660,loss:0.0011058168020099401,train accuracy:1.0,test_accuracy:0.9815999865531921\n",
      "Epoch:670,loss:0.001084152259863913,train accuracy:1.0,test_accuracy:0.9815000295639038\n",
      "Epoch:680,loss:0.0010638670064508915,train accuracy:1.0,test_accuracy:0.9815999865531921\n",
      "Epoch:690,loss:0.0010444059735164046,train accuracy:1.0,test_accuracy:0.9815000295639038\n",
      "Epoch:700,loss:0.0010254945373162627,train accuracy:1.0,test_accuracy:0.9814000129699707\n",
      "Epoch:710,loss:0.0010067714611068368,train accuracy:1.0,test_accuracy:0.9815000295639038\n",
      "Epoch:720,loss:0.0009895132388919592,train accuracy:1.0,test_accuracy:0.9815000295639038\n",
      "Epoch:730,loss:0.0009718498331494629,train accuracy:1.0,test_accuracy:0.9815000295639038\n",
      "Epoch:740,loss:0.0009556969162076712,train accuracy:1.0,test_accuracy:0.9817000031471252\n",
      "Epoch:750,loss:0.0009389573242515326,train accuracy:1.0,test_accuracy:0.9815999865531921\n",
      "Epoch:760,loss:0.000924298248719424,train accuracy:1.0,test_accuracy:0.9814000129699707\n",
      "Epoch:770,loss:0.0009083009208552539,train accuracy:1.0,test_accuracy:0.9815999865531921\n",
      "Epoch:780,loss:0.0008935520309023559,train accuracy:1.0,test_accuracy:0.9815999865531921\n",
      "Epoch:790,loss:0.0008791915606707335,train accuracy:1.0,test_accuracy:0.9815999865531921\n",
      "Epoch:800,loss:0.0008652952965348959,train accuracy:1.0,test_accuracy:0.9815999865531921\n",
      "Epoch:810,loss:0.0008518406539224088,train accuracy:1.0,test_accuracy:0.9815000295639038\n",
      "Epoch:820,loss:0.0008388268179260194,train accuracy:1.0,test_accuracy:0.9817000031471252\n",
      "Epoch:830,loss:0.0008263552444986999,train accuracy:1.0,test_accuracy:0.9815000295639038\n",
      "Epoch:840,loss:0.0008139703422784805,train accuracy:1.0,test_accuracy:0.9818000197410583\n",
      "Epoch:850,loss:0.0008019501692615449,train accuracy:1.0,test_accuracy:0.9815000295639038\n",
      "Epoch:860,loss:0.0007902666111476719,train accuracy:1.0,test_accuracy:0.9818000197410583\n",
      "Epoch:870,loss:0.0007790852105244994,train accuracy:1.0,test_accuracy:0.9817000031471252\n",
      "Epoch:880,loss:0.0007675282540731132,train accuracy:1.0,test_accuracy:0.9815999865531921\n",
      "Epoch:890,loss:0.0007567324792034924,train accuracy:1.0,test_accuracy:0.9818000197410583\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch:900,loss:0.0007462483481504023,train accuracy:1.0,test_accuracy:0.9818000197410583\n",
      "Epoch:910,loss:0.0007362552569247782,train accuracy:1.0,test_accuracy:0.9818000197410583\n",
      "Epoch:920,loss:0.0007262288709171116,train accuracy:1.0,test_accuracy:0.9818000197410583\n",
      "Epoch:930,loss:0.0007165326387621462,train accuracy:1.0,test_accuracy:0.9818999767303467\n",
      "Epoch:940,loss:0.0007069518906064332,train accuracy:1.0,test_accuracy:0.9818000197410583\n",
      "Epoch:950,loss:0.0006977022858336568,train accuracy:1.0,test_accuracy:0.9818999767303467\n",
      "Epoch:960,loss:0.0006886303890496492,train accuracy:1.0,test_accuracy:0.9818999767303467\n",
      "Epoch:970,loss:0.0006800427800044417,train accuracy:1.0,test_accuracy:0.9818999767303467\n",
      "Epoch:980,loss:0.0006710268789902329,train accuracy:1.0,test_accuracy:0.9818999767303467\n",
      "Epoch:990,loss:0.0006626223330385983,train accuracy:1.0,test_accuracy:0.9818999767303467\n",
      "Epoch:1000,loss:0.0006544062052853405,train accuracy:1.0,test_accuracy:0.9818999767303467\n"
     ]
    }
   ],
   "source": [
    "for e in range(1,1001):\n",
    "    # Train\n",
    "    for _ in range(60):\n",
    "        batch_xs, batch_ys = mnist.train.next_batch(1000)\n",
    "        sess.run(train_step,feed_dict={x: batch_xs, y_: batch_ys})\n",
    "    \n",
    "    acc,loss=test(mnist.train.images,mnist.train.labels,sess)\n",
    "    te_acc,te_loss=test(mnist.test.images,mnist.test.labels,sess)\n",
    "    if e%10==0:\n",
    "        print(\"Epoch:{},loss:{},train accuracy:{},test_accuracy:{}\".format(e,loss,acc,te_acc))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "模型优化Hint：\n",
    "- 多隐层\n",
    "- 激活函数\n",
    "- 正则化\n",
    "- 初始化\n",
    "- 摸索一下各个超参数\n",
    "  - 隐层神经元数量\n",
    "  - 学习率\n",
    "  - 正则化惩罚因子\n",
    "  - 最好每隔几个step就对loss、accuracy等等进行一次输出，这样才能有根据地进行调整"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
