{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 一、文本深度学习\n",
    "\n",
    "\n",
    "## 1、RNN循环神经网络\n",
    "\n",
    "\n",
    "### 1.1 RNN - 图像识别\n",
    "\n",
    "    数据集为MNIST，双层LSTM单元"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import tensorflow as tf\n",
    "import numpy as np\n",
    "from tensorflow.contrib import rnn\n",
    "from tensorflow.examples.tutorials.mnist import input_data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "collapsed": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From <ipython-input-2-e270719c02f0>:2: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n",
      "WARNING:tensorflow:From /Users/apple/anaconda3/lib/python3.7/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please write your own downloading logic.\n",
      "WARNING:tensorflow:From /Users/apple/anaconda3/lib/python3.7/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.data to implement this functionality.\n",
      "Extracting ./data/V/train-images-idx3-ubyte.gz\n",
      "WARNING:tensorflow:From /Users/apple/anaconda3/lib/python3.7/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.data to implement this functionality.\n",
      "Extracting ./data/V/train-labels-idx1-ubyte.gz\n",
      "WARNING:tensorflow:From /Users/apple/anaconda3/lib/python3.7/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:110: dense_to_one_hot (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use tf.one_hot on tensors.\n",
      "Extracting ./data/V/t10k-images-idx3-ubyte.gz\n",
      "Extracting ./data/V/t10k-labels-idx1-ubyte.gz\n",
      "WARNING:tensorflow:From /Users/apple/anaconda3/lib/python3.7/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:290: DataSet.__init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n",
      "(55000, 784)\n"
     ]
    }
   ],
   "source": [
    "# 读入\n",
    "mnist = input_data.read_data_sets('./data/V/', one_hot=True)\n",
    "print (mnist.train.images.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "lr = 1e-3\n",
    "input_size = 28        # 每行输入28个特征点\n",
    "timestep_size = 28   # 持续输入28行\n",
    "hidden_size = 256    # 隐含层的数量\n",
    "layer_num = 2          # LSTM layer 的层数\n",
    "class_num = 10        # 10分类问题\n",
    "\n",
    "_X = tf.placeholder(tf.float32, [None, 784])\n",
    "y = tf.placeholder(tf.float32, [None, class_num])\n",
    "\n",
    "batch_size = tf.placeholder(tf.int32, [])  \n",
    "keep_prob = tf.placeholder(tf.float32, [])\n",
    "\n",
    "X = tf.reshape(_X, [-1, 28, 28])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "collapsed": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From <ipython-input-4-18c2c5bbc354>:3: LSTMCell.__init__ (from tensorflow.python.ops.rnn_cell_impl) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "This class is equivalent as tf.keras.layers.LSTMCell, and will be replaced by that in Tensorflow 2.0.\n",
      "WARNING:tensorflow:From <ipython-input-4-18c2c5bbc354>:6: MultiRNNCell.__init__ (from tensorflow.python.ops.rnn_cell_impl) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "This class is equivalent as tf.keras.layers.StackedRNNCells, and will be replaced by that in Tensorflow 2.0.\n",
      "WARNING:tensorflow:From /Users/apple/anaconda3/lib/python3.7/site-packages/tensorflow/python/ops/resource_variable_ops.py:435: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Colocations handled automatically by placer.\n",
      "WARNING:tensorflow:From /Users/apple/anaconda3/lib/python3.7/site-packages/tensorflow/python/ops/rnn_cell_impl.py:1259: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n"
     ]
    }
   ],
   "source": [
    "# 神经元\n",
    "def lstm_cell():\n",
    "    cell = rnn.LSTMCell(hidden_size, reuse=tf.get_variable_scope().reuse)\n",
    "    return rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)\n",
    "\n",
    "mlstm_cell = tf.contrib.rnn.MultiRNNCell([lstm_cell() for _ in range(layer_num)], state_is_tuple = True)\n",
    "\n",
    "#用全零来初始化状态\n",
    "init_state = mlstm_cell.zero_state(batch_size, dtype=tf.float32)\n",
    "\n",
    "#得到每一层的输出结果\n",
    "outputs = list()\n",
    "state = init_state\n",
    "with tf.variable_scope('RNN'):\n",
    "    for timestep in range(timestep_size):\n",
    "        if timestep > 0:\n",
    "            tf.get_variable_scope().reuse_variables()\n",
    "        (cell_output, state) = mlstm_cell(X[:, timestep, :],state)\n",
    "        outputs.append(cell_output)\n",
    "h_state = outputs[-1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Softmax层参数\n",
    "W = tf.Variable(tf.truncated_normal([hidden_size, class_num], stddev=0.1), dtype=tf.float32)\n",
    "bias = tf.Variable(tf.constant(0.1,shape=[class_num]), dtype=tf.float32)\n",
    "y_pre = tf.nn.softmax(tf.matmul(h_state, W) + bias)\n",
    "\n",
    "\n",
    "# 损失和评估函数\n",
    "cross_entropy = -tf.reduce_mean(y * tf.log(y_pre))\n",
    "train_op = tf.train.AdamOptimizer(lr).minimize(cross_entropy)\n",
    "\n",
    "correct_prediction = tf.equal(tf.argmax(y_pre,1), tf.argmax(y,1))\n",
    "accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Iter0, step 200, training accuracy 0.929688\n",
      "test accuracy 0.9089\n"
     ]
    }
   ],
   "source": [
    "# 运行\n",
    "sess = tf.Session()\n",
    "sess.run(tf.global_variables_initializer())\n",
    "\n",
    "# 靠！我电脑风扇疯狂转，真的是维修过一次就带不动了orz，真可怕 \n",
    "for i in range(200):\n",
    "    _batch_size = 128\n",
    "    batch = mnist.train.next_batch(_batch_size)\n",
    "    if (i+1)%200 == 0:\n",
    "        train_accuracy = sess.run(accuracy, feed_dict={_X:batch[0], y: batch[1], keep_prob: 1.0, batch_size: _batch_size})\n",
    "        # 已经迭代完成的 epoch 数: mnist.train.epochs_completed\n",
    "        print (\"Iter%d, step %d, training accuracy %g\" % ( mnist.train.epochs_completed, (i+1), train_accuracy))\n",
    "    sess.run(train_op, feed_dict={_X: batch[0], y: batch[1], keep_prob: 0.5, batch_size: _batch_size})\n",
    "\n",
    "# 计算测试数据的准确率\n",
    "print (\"test accuracy %g\"% sess.run(accuracy, \n",
    "                                    feed_dict={ _X: mnist.test.images, y: mnist.test.labels, keep_prob: 1.0, batch_size:mnist.test.images.shape[0]}))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "    单个图像RNN每层结果"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(5, 784) (5, 10)\n",
      "_outputs.shape = (28, 5, 256)\n"
     ]
    }
   ],
   "source": [
    "_batch_size = 5\n",
    "X_batch, y_batch = mnist.test.next_batch(_batch_size)\n",
    "print (X_batch.shape, y_batch.shape)\n",
    "\n",
    "_outputs, _state = sess.run([outputs, state], feed_dict={_X: X_batch, y: y_batch, keep_prob: 1.0, batch_size: _batch_size})\n",
    "print ('_outputs.shape =', np.asarray(_outputs).shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0. 0. 0. 1. 0. 0. 0. 0. 0. 0.]\n"
     ]
    },
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAP0AAAD7CAYAAAChbJLhAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAADb1JREFUeJzt3W2slPWZx/HfD4GGhwAGKyjq+kKDEbGrHPGZwOpufUirCRsSY2MMrueFjVbNpmnftVnTN6uGpmooQWvSmDQpBkjTbBQfoKauVjSmclgf3qxipKKgklANB7z2BcNm5DD/mXPP4+H6fhLDmbnmP/fFcH7eM/O/7/vviBCAPCb1uwEAvUXogWQIPZAMoQeSIfRAMoQeSIbQA8kQeiAZQg8kM7kXG7HNYX9A930aEd9u9qDKe3rbv7D9J9vbbF9R9XkAdMz7rTyo0p7e9jWSpkfEMtuzJW20/d2IGK3yfAB6p+qe/hpJ6yUpIr6QtE3S4k41BaB7qn6mnyvp47rbf5N0av0DbA9LGq74/AC6pOqefq+k+i8M5kvaU/+AiFgXEUMRMVS1OQCdVzX0L0i6XZJqn+mXSXqrQz0B6KJKb+8j4jnb19reVrvrp3yJB0wM7sWVc5inB3ri9VY+TnNEHpAMoQeSIfRAMoQeSIbQA8kQeiAZQg8kQ+iBZAg9kAyhB5Ih9EAyhB5IhtADyRB6IBlCDyRD6IFkCD2QDKEHkiH0QDKEHkiG0APJEHogGUIPJEPogWQIPZAMoQeSIfRAMoQeSIbQA8kQeiCZyqG3faPtEdtbbf++k00B6J7JbYy9RNItEfHXTjUDoPvaCf2QpCtsH5b07xEx0qGeAHRRO6G/PSI+tX2GpN9I+ucO9QSgiyp/po+IT2t/fijpM9tT6uu2h21vt729zR4BdFCl0Nu+y/YPaj/PlzQnIkbrHxMR6yJiKCKGOtAngA5xRIx/kD1N0m8lnSLpsKR7I+KtwuPHvxEA4/V6KzvZSp/pI+JLSf9aZSyA/mrni7wUrrvuuoa1W2+9tTj26aefLtZXrFhRrL/wwgvF+uLFixvWFi5cWBz77LPPFusXXHBBsb5mzZpivWTu3LnF+o4dOyo/N5rjiDwgGUIPJEPogWQIPZAMoQeSIfRAMpUOzhn3RibwwTmff/55w9qsWbOKY7/++utifdKkifv/3NHR0eYPauDgwYPF+t13312sT55cnmlev379uHs6QbR0cM7E/a0DUAmhB5Ih9EAyhB5IhtADyRB6IBlCDyST/tTaJUuWFOvvvfde5bHtzsPv37+/WH/11Vcb1s4555zi2OnTpxfr8+bNK9anTJlSrLcz9oknnqj83JJ04YUXNqzdc889bT33iYA9PZAMoQeSIfRAMoQeSIbQA8kQeiAZQg8kw/n0TZTmlGfOnNnVbR8+fLhYL83jT5s2rTi22TEEU6dOLdabKW3/0ksvLY59/PHHi/U5c+YU66XX7YYbbiiO3bJlS7E+4DifHsBYhB5IhtADyRB6IBlCDyRD6IFkCD2QTEvn09u+SdIvJd0cEW/aPl/So5IsaaekH0VE9QuhD7DS9d0/++yzHnYyPl9++WVb4w8cONDW+NLxH9dff31x7IwZM9ratu2GtWbLZGfQ0p4+IjZLerLurgcl3RYRyyWNSFrd8c4AdMW4397bnirpQETsqt21TtK1He0KQNdU+Uw/V9Keozdqb+vTX3YLmCiqhHWvpFOO3rA9RdKYg51tD0sart4agG4Y954+Ig5KmmH7tNpdd0oac5ZCRKyLiKFWTgAA0DtV35b/RNJTtifpyBd593auJQDd1HLoI+JndT/vkPRP3WgIQHfxBVxSV111VbG+cuXKYn3hwoXF+sUXX9ywduqppxbHNnPo0KFi/YEHHmhY27RpU1vbPhFwRB6QDKEHkiH0QDKEHkiG0APJEHogGabsJrCTTz65Ya3ZpZxLyzlL0uTJg/ur0ay30inPX331VafbmXDY0wPJEHogGUIPJEPogWQIPZAMoQeSIfRAMoM7GYumli5d2rB29tlnF8d2ex6+dPrrxx9/XBy7YMGCtra9Zs2ahrVmlwZfv359W9ueCNjTA8kQeiAZQg8kQ+iBZAg9kAyhB5Ih9EAyzNNPYDNnzmxYmzJlSnHsyMhIsf7MM88U6xs2bCjWP/nkk4a1ffv2Fcc2u0T2ww8/XKyXlsJeu3ZtcezLL79crO/cubNYnwjY0wPJEHogGUIPJEPogWQIPZAMoQeSIfRAMo6I5g+yb5L0S0k3R8Sbtfu21j3kPyPij4XxzTeCjjrzzDOL9V27dvWok86bP39+sf7GG29UHtvs+IXFixcX6332ekQMNXtQSwfnRMRm2xcdvW17qqTdEXFLGw0C6IOqb++/I2mJ7a22f97JhgB0V9XQvyNpUUQslzTJ9srOtQSgmyqFPiL2R8Ro7ebvJF107GNsD9vebnt7Ow0C6Kxxh972JNvP2f5W7a5Vkl4/9nERsS4ihlr5YgFA74z7LLuI+Nr2ryRttT0q6c8RsbHzrQHohpZDHxE/q/t5s6TN3WgIQHdxPv0JaiLPwzezd+/eYn337t0Na6eddlpx7Jw5cyr1NJFwRB6QDKEHkiH0QDKEHkiG0APJEHogmQk/ZXfllVcW680up7xxI8cVTTSzZs0q1kvTcs1OJX/kkUcq9TSRsKcHkiH0QDKEHkiG0APJEHogGUIPJEPogWRaugR22xvp4iWwX3rppWJ90aJFxfo777xTrN93330Na6+88kpxLI7vpJNOKtabHXtx1113FeurVq1qWNuzZ09xbLNLZA+4li6BzZ4eSIbQA8kQeiAZQg8kQ+iBZAg9kAyhB5KZ8PP0H3zwQbF+xhlntPX8pXn8xx57rDh206ZNxfpHH31UrB8+fLhY76dmc+3nnntuw9qTTz5ZHLt06dIqLf2/ffv2NawtX768OHbHjh1tbbvPmKcHMBahB5Ih9EAyhB5IhtADyRB6IBlCDyTT0jy97YckLZE0VdJ/SHpf0qOSLGmnpB9FxGhhfNfm6Xfu3Fmsn3feed3adNseffTRYr3Z3610jMJZZ51Vqaejmr1ul1xySbF+2WWXtbX9kgMHDhTrl19+ecPaBJ+Hb6alefqmi13YvlnSvohYbnu6pC2SvpB0W0Tssv1DSasl/brdjgF0Xytv79+TtFaSIuLvkvZIOhARu2r1dZKu7U57ADqt6Z4+IkaO/mz7+5L+IumMuvqo7Qm/PBaQRctf5NleIelqSQ9JOqXu/imSxhwkbnvY9nbb2zvRKIDOaCn0tpdJ+p6kH0fEQUkzbB9dJfBOHfmc/w0RsS4ihlr5YgFA77TyRd7Vkv4g6U1JL9qWpLslPWV7kqQRSfd2s0kAnTPhT62dN29esT48PFys33///cX67Nmzx90Typr9zr322mvF+urVq4v1ZlOdJzBOrQUwFqEHkiH0QDKEHkiG0APJEHogGUIPJDPh5+m77Y477mhYO/3004tjmx0DMH369GJ98uTysVO1A6UqOXToULG+bdu2Yr3Z6a0bNmxoWNu9e3dx7PPPP1+soyHm6QGMReiBZAg9kAyhB5Ih9EAyhB5IhtADyTBP30cLFiwo1s8///xifdq0aZW3/e677xbrb7/9duXnRt8wTw9gLEIPJEPogWQIPZAMoQeSIfRAMoQeSIZ5euDEwTw9gLEIPZAMoQeSIfRAMoQeSIbQA8kQeiCZlkJv+yHbW22/bPv62n1b6/67sbttAuiU8moKkmzfLGlfRCy3PV3SFtvPS9odEbd0vUMAHdXKnv49SWslKSL+LulDSRdJWlLby/+8i/0B6LCmoY+IkYjYK0m2vy/pvyX9j6RFEbFc0iTbK48dZ3vY9nbb2zvcM4A2tPxFnu0Vkq6OiDURsT8iRmul3+nInv8bImJdRAy1ciwwgN5p9Yu8ZZK+J+nHtifZfs72t2rlVZJe71aDADqrlS/yrpb0B0lvSnqxdvdGSVttj0r6c0Rs7F6LADqJU2uBEwen1gIYi9ADyRB6IBlCDyRD6IFkCD2QDKEHkiH0QDKEHkiG0APJEHogGUIPJEPogWQIPZAMoQeSaXoRjQ75VNL7dbdPqd03iOitGnobv0739Q+tPKgnF9EYs1F7+6BeO4/eqqG38etXX7y9B5Ih9EAy/Qr9uj5ttxX0Vg29jV9f+urLZ3oA/cPbeyCZnofe9i9s/8n2NttX9Hr7JbZvtD1SW6Pv9wPQz022/9f2P9Zun2/7xVp/j9meMii91e7r+0rGx66wPGCv2UCs/tyreXpJku1rJE2PiGW2Z0vaaPu7dUtk9dslkm6JiL/2uxFJiojNtuuXDHtQ0m0Rscv2DyWtlvTrQejN9lT1eSXj462wLOkLDcBrNkirP/d6T3+NpPWSFBFfSNomaXGPeygZkvSg7f+yvajfzdSrhepAROyq3bVO0rV9bOlY31H/VzI+doXlPRqc12xgVn/udejnSvq47vbfJJ3a4x5Kbo+If5F0p6Q1/W7mGHN15JdYklR7d9TTd2pNvKMmKxl323FWWP6LBuQ1q7r6czf0OvR7JX277vZ81f2j9FtEfFr780NJn/Xz899x7NWRwzYlSbXeDvevnW9qZSXjXjm6wrKkhzRgr9l4V3/uhl6H/gVJt0tS7TP9Mklv9biH47J9l+0f1H6eL2nOAH3XoIg4KGmG7dNqd92pI59Z+26QVjKuX2F50F6zQVn9uadvdSLiOdvX2t5Wu+unAxSs30j6re1/05G9wb197ud4fiLpKduTJI1oQHqMiK9t/0p9Xsn42BWWbUvS3RqA12yQVn/m4BwgGQ7OAZIh9EAyhB5IhtADyRB6IBlCDyRD6IFkCD2QzP8BL9j9d5s3W/MAAAAASUVORK5CYII=\n",
      "text/plain": [
       "<Figure size 432x288 with 1 Axes>"
      ]
     },
     "metadata": {
      "needs_background": "light"
     },
     "output_type": "display_data"
    }
   ],
   "source": [
    "# 画图\n",
    "import matplotlib.pyplot as plt\n",
    "print (mnist.train.labels[4])\n",
    "\n",
    "X3 = mnist.train.images[4]\n",
    "img3 = X3.reshape([28, 28])\n",
    "plt.imshow(img3, cmap='gray')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(28, 1, 256)\n",
      "(28, 256)\n"
     ]
    }
   ],
   "source": [
    "X3.shape = [-1, 784]\n",
    "y_batch = mnist.train.labels[0]\n",
    "y_batch.shape = [-1, class_num]\n",
    "\n",
    "X3_outputs = np.array(sess.run(outputs, feed_dict={_X: X3, y: y_batch, keep_prob: 1.0, batch_size: 1}))\n",
    "print (X3_outputs.shape)\n",
    "\n",
    "X3_outputs.shape = [28, hidden_size]\n",
    "print (X3_outputs.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXcAAAD5CAYAAADcDXXiAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAABhZJREFUeJzt3VF24jYAQNGhK8kys7Gsz/PRfLSnnYDBwtLzvb85A0aWnwUYz23btl8AtPx19gYAcDxxBwgSd4AgcQcIEneAIHEHCBJ3gCBxBwgSd4Cg5eL+8fm1fXx+/fFntT/97d7jPvOcz/7tle0ZacTz1sZoNsZhnJXHdsq4v3LA33vcox/zFbNtzz1nbO+7TzYjPbuAGPmcI7ZntXldNWXcGceJk6IzTtizv6MXdzhQ6Z1GyRXHT9yBXc78WOsK7zqPIu4AEznqZCPuAEHiDhAk7gBB4g4QJO4AQeIOECTuAEHiDhAk7gBB4g4QJO4AQeIOECTuAEHiDhAk7gBB4g4QJO4AQeIOECTuAEG3bUv+37AAl2blDhAk7gBB4g4QJO4AQeIOECTu3z4+v1w2NMjH59dmfNdin903+xiJO0CQuMObzLzKo2e5uM/+VuifXtnWs17jKmP769fz27rSHHrET6+l9DrZZ7m4n+GnGNRCAa9a7XhYbXsflYt7dUdV3Ts52p88YqVF1ru2dcq4j3rxq+z8WRm/c6wULuYxZdyv4owD1olzLOPALMQdJnDG6twX0m3iTpL49Dip7CPuABM56gQm7gBB4g4QJO4Ai9jzkY24A3mr3QbkCOIOECTuAAeb4bJNcQcIEneAIHEHCBJ3gCBxBwgSd4AgcQcIEneAIHEHCLpt27K3TgDgD6zcAYLEHSBI3AGCxB0gSNwBgsT9ATPcm7nK2I5lbMeZfe6KO0CQuAMEiTtAkLgDBIk7QJC4AwSJO0DQpeI+8zWpAEe6VNwBrkLcAYLEHSBI3DmE7zNgLuIOELRc3Ge/ExvADKaM+1UCfoXXyOuucjxwrCnjXuGghDlc8TgUd4Agcb8Y7ybg36rHQy7u1R3FPqvNAyddjpaLO68RmNcYP2Yh7pOaMRIzbhPvd9a7DO9u9hH3b89OGhPuPKuN+2rbyzmOmifiDnCwGRZ94s5wr0z0EQfIDAfeDH4ag9XGaLXtfQdxh8XNFrXZtucVsy0u9vy727Zl9gMA36zcAYLEHSBI3AGCxB0gSNwBgsT9Aa6hHcfYjmVsx5l97oo7QJC4AwSJO0CQuAMEiTtAkLgDBIk7QJC4AwSJO0CQuAMEiTtAkLgDBIk7QJC4AwTl4j7zLTgB3iUXd4CzzXCvd3EHCBJ3Hnb2SgSedcW5K+5wUVcM3ur27DNxhwnM8BktLZeKu4NnPfYZZ1p5/k0Z95VWMSttK3AdU8b9Ks44KYw6GTnB/W3U2K40vqO21dzdR9wZ7ow4/fR8q8XynjPGtjR+szlqbG/bZh8B1Fi5AwSJO0CQuAMEiTtAkLgDBIk7QJC4T8p1xOO4TnssYzuOG4cBXJy4AwSJO0CQuAMEiTtAkLgDBIk7QJC4P8B10cBqxB0gSNwBgsQdIEjcAYLEHSBI3L89ezWMK2mAGYk7QFAu7lbRAMG4AyDuAEniznC+dIb3E3eAIHEHCBJ3HnbGRys+zrnPGPF/poz7qM9oHQTz8Xk8M1t5bk4Z94p74Vp54sxutbE94yS32hixz23b7F+AGit3gCBxBwgSd4AgcQcIEneAIHEHCBL3SbkGeRw/nBprtbFdbXsfJe4AQeIOECTuAEHiDhAk7gBB4g4QJO4AQeIOECTuAEHiDhAk7gBB4g6Tcy8cniHuAEHiDuzincQaxB0gSNwBgsQdIEjcAYLEHWAiR31ZLe4AQeIOsIg9q3pxJ8l12FyduAP/4YdK6xP3byMm8r0DpHTw3Hudo8b36MesMUbjPDuv33XivG2bfQ9QY+UOECTuAEHiDhAk7gBB4g4QJO4AQZeKu2t+xzG24/hB0VjVsb1U3AGuQtwBgsQdIEjcAYLEHSBI3AGCxB0gSNwBgsQdIEjcAYLEHSBI3AGCxB0gSNwBgsQdIEjcAYLEHSBI3AGCxB0gSNwBgsQdIEjcAYLEHSBI3E/08fm1PfM3gHtu26YhADVW7gBB4g4QJO4AQeIOECTuAEHiDhAk7gBB4g4QJO4AQeIOECTuAEHiDhAk7gBB4g4QJO4AQeIOECTuAEHiDhAk7gBB4g4QJO4AQeIOEPQbvcadEX3/36kAAAAASUVORK5CYII=\n",
      "text/plain": [
       "<Figure size 432x288 with 28 Axes>"
      ]
     },
     "metadata": {
      "needs_background": "light"
     },
     "output_type": "display_data"
    }
   ],
   "source": [
    "# 预测过程图\n",
    "h_W = sess.run(W, feed_dict={_X:X3, y: y_batch, keep_prob: 1.0, batch_size: 1})\n",
    "h_bias = sess.run(bias, feed_dict={_X:X3, y: y_batch, keep_prob: 1.0, batch_size: 1})\n",
    "h_bias.shape = [-1, 10]\n",
    "\n",
    "bar_index = range(class_num)\n",
    "for i in range(X3_outputs.shape[0]):\n",
    "    plt.subplot(7, 4, i+1)\n",
    "    X3_h_shate = X3_outputs[i, :].reshape([-1, hidden_size])\n",
    "    pro = sess.run(tf.nn.softmax(tf.matmul(X3_h_shate, h_W) + h_bias))\n",
    "    plt.bar(bar_index, pro[0], width=0.2 , align='center')\n",
    "    plt.axis('off')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 1.2 LSTM - 斐波拉契数列\n",
    "\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From <ipython-input-12-0870039cf1f7>:32: BasicLSTMCell.__init__ (from tensorflow.python.ops.rnn_cell_impl) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "This class is equivalent as tf.keras.layers.LSTMCell, and will be replaced by that in Tensorflow 2.0.\n",
      "WARNING:tensorflow:From <ipython-input-12-0870039cf1f7>:33: dynamic_rnn (from tensorflow.python.ops.rnn) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use `keras.layers.RNN(cell)`, which is equivalent to this API\n",
      "WARNING:tensorflow:From /Users/apple/anaconda3/lib/python3.7/site-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use tf.cast instead.\n",
      "0 115.53497\n",
      "100 58.06972\n",
      "200 23.862585\n",
      "300 9.637144\n",
      "400 4.939095\n",
      "500 3.4626875\n",
      "600 2.888036\n",
      "700 2.295357\n",
      "800 1.6927446\n",
      "900 1.1440173\n",
      "Model saved to ./model/VI/\n",
      "WARNING:tensorflow:From /Users/apple/anaconda3/lib/python3.7/site-packages/tensorflow/python/training/saver.py:1266: checkpoint_exists (from tensorflow.python.training.checkpoint_management) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use standard file APIs to check for files with this prefix.\n",
      "INFO:tensorflow:Restoring parameters from ./model/VI/\n",
      "\n",
      "Lets run some tests!\n",
      "\n",
      "When the input is [[1], [2], [3], [4]]\n",
      "The ground truth output should be [[1], [3], [5], [7]]\n",
      "And the model thinks it is [0.69872254 2.8432064  5.450609   7.4157248 ]\n",
      "\n",
      "When the input is [[4], [5], [6], [7]]\n",
      "The ground truth output should be [[4], [9], [11], [13]]\n",
      "And the model thinks it is [ 3.885928  9.067664 11.851666 12.719682]\n",
      "\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "from tensorflow.contrib import rnn\n",
    "\n",
    "class SeriesPredictor:\n",
    "\n",
    "    def __init__(self, input_dim, seq_size, hidden_dim=10):\n",
    "        # Hyperparameters\n",
    "        self.input_dim = input_dim\n",
    "        self.seq_size = seq_size\n",
    "        self.hidden_dim = hidden_dim\n",
    "\n",
    "        # Weight variables and input placeholders\n",
    "        self.W_out = tf.Variable(tf.random_normal([hidden_dim, 1]), name='W_out')\n",
    "        self.b_out = tf.Variable(tf.random_normal([1]), name='b_out')\n",
    "        self.x = tf.placeholder(tf.float32, [None, seq_size, input_dim])\n",
    "        self.y = tf.placeholder(tf.float32, [None, seq_size])\n",
    "\n",
    "        # Cost optimizer\n",
    "        self.cost = tf.reduce_mean(tf.square(self.model() - self.y))\n",
    "        self.train_op = tf.train.AdamOptimizer().minimize(self.cost)\n",
    "\n",
    "        # Auxiliary ops\n",
    "        self.saver = tf.train.Saver()\n",
    "\n",
    "    def model(self):\n",
    "        \"\"\"\n",
    "        :param x: inputs of size [T, batch_size, input_size]\n",
    "        :param W: matrix of fully-connected output layer weights\n",
    "        :param b: vector of fully-connected output layer biases\n",
    "        \"\"\"\n",
    "        cell = rnn.BasicLSTMCell(self.hidden_dim)\n",
    "        outputs, states = tf.nn.dynamic_rnn(cell, self.x, dtype=tf.float32)\n",
    "        num_examples = tf.shape(self.x)[0]\n",
    "        #W_repeated = tf.tile(tf.expand_dims(self.W_out, 0), [num_examples, 1, 1])\n",
    "        \n",
    "        tf_expand = tf.expand_dims(self.W_out, 0)\n",
    "        tf_tile = tf.tile(tf_expand, [num_examples, 1, 1])\n",
    "        out = tf.matmul(outputs, tf_tile) + self.b_out\n",
    "        out = tf.squeeze(out)\n",
    "        return out\n",
    "\n",
    "    def train(self, train_x, train_y):\n",
    "        with tf.Session() as sess:\n",
    "            tf.get_variable_scope().reuse_variables()\n",
    "            sess.run(tf.global_variables_initializer())\n",
    "            for i in range(1000):\n",
    "                _, mse = sess.run([self.train_op, self.cost], feed_dict={self.x: train_x, self.y: train_y})\n",
    "                if i % 100 == 0:\n",
    "                    print(i, mse)\n",
    "            save_path = self.saver.save(sess, './model/VI/')\n",
    "            print('Model saved to {}'.format(save_path))\n",
    "\n",
    "    def test(self, test_x):\n",
    "        with tf.Session() as sess:\n",
    "            tf.get_variable_scope().reuse_variables()\n",
    "            self.saver.restore(sess, './model/VI/')\n",
    "            output = sess.run(self.model(), feed_dict={self.x: test_x})\n",
    "            return output\n",
    "\n",
    "\n",
    "if __name__ == '__main__':\n",
    "    predictor = SeriesPredictor(input_dim=1, seq_size=4, hidden_dim=10)\n",
    "    train_x = [[[1], [2], [5], [6]],\n",
    "               [[5], [7], [7], [8]],\n",
    "               [[3], [4], [5], [7]]]\n",
    "    train_y = [[1, 3, 7, 11],\n",
    "               [5, 12, 14, 15],\n",
    "               [3, 7, 9, 12]]\n",
    "    predictor.train(train_x, train_y)\n",
    "\n",
    "    test_x = [[[1], [2], [3], [4]],  # 1, 3, 5, 7\n",
    "              [[4], [5], [6], [7]]]  # 4, 9, 11, 13\n",
    "    actual_y = [[[1], [3], [5], [7]],\n",
    "                [[4], [9], [11], [13]]]\n",
    "    pred_y = predictor.test(test_x)\n",
    "    \n",
    "    print(\"\\nLets run some tests!\\n\")\n",
    "    \n",
    "    for i, x in enumerate(test_x):\n",
    "        print(\"When the input is {}\".format(x))\n",
    "        print(\"The ground truth output should be {}\".format(actual_y[i]))\n",
    "        print(\"And the model thinks it is {}\\n\".format(pred_y[i]))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 1.3 时间序列模型\n",
    "\n",
    "    "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "    请查看code/VI中的代码"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 2、CNN卷积神经网络\n",
    "\n",
    "\n",
    "### 2.1 CNN - 文本分类\n",
    "\n",
    "    "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "    请查看code/VI中的代码"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 3、LSTM长短记忆网络\n",
    "\n",
    "### 3.1 LSTM - 情感分析\n",
    "\n",
    "    使用谷歌训练好的词向量模型来创建，该模型有40万个词向量，每个向量的维数为50。其包含两个文件，一是40w单词Python列表，二是40w*50维的单词向量矩阵。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "from os import listdir\n",
    "from os.path import isfile, join"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Loaded it!\n",
      "400000\n",
      "(400000, 50)\n"
     ]
    }
   ],
   "source": [
    "wordsList = np.load('./data/VI/wordsList.npy')\n",
    "wordVectors = np.load('./data/VI/wordVectors.npy')\n",
    "\n",
    "wordsList = wordsList.tolist() # Originally loaded as numpy array\n",
    "wordsList = [word.decode('UTF-8') for word in wordsList] # Encode words as UTF-8\n",
    "\n",
    "print('Loaded it!')\n",
    "print(len(wordsList))\n",
    "print(wordVectors.shape)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "    在词库中搜索单词，比如 “baseball”，然后可以通过访问嵌入矩阵来得到相应的向量"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([-1.9327  ,  1.0421  , -0.78515 ,  0.91033 ,  0.22711 , -0.62158 ,\n",
       "       -1.6493  ,  0.07686 , -0.5868  ,  0.058831,  0.35628 ,  0.68916 ,\n",
       "       -0.50598 ,  0.70473 ,  1.2664  , -0.40031 , -0.020687,  0.80863 ,\n",
       "       -0.90566 , -0.074054, -0.87675 , -0.6291  , -0.12685 ,  0.11524 ,\n",
       "       -0.55685 , -1.6826  , -0.26291 ,  0.22632 ,  0.713   , -1.0828  ,\n",
       "        2.1231  ,  0.49869 ,  0.066711, -0.48226 , -0.17897 ,  0.47699 ,\n",
       "        0.16384 ,  0.16537 , -0.11506 , -0.15962 , -0.94926 , -0.42833 ,\n",
       "       -0.59457 ,  1.3566  , -0.27506 ,  0.19918 , -0.36008 ,  0.55667 ,\n",
       "       -0.70315 ,  0.17157 ], dtype=float32)"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 例子一\n",
    "baseballIndex = wordsList.index('baseball')\n",
    "wordVectors[baseballIndex]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "    构造句子的向量表示，可以使用TensorFlow嵌入函数，其参数为是嵌入矩阵（词向量矩阵）和词对应的索引表"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(10,)\n",
      "[    41    804 201534   1005     15   7446      5  13767      0      0]\n",
      "WARNING:tensorflow:From /Users/apple/anaconda3/lib/python3.7/site-packages/tensorflow/python/ops/embedding_ops.py:132: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Colocations handled automatically by placer.\n",
      "(10, 50)\n"
     ]
    }
   ],
   "source": [
    "# 例子二\n",
    "maxSeqLength = 10 #Maximum length of sentence\n",
    "numDimensions = 300 #Dimensions for each word vector\n",
    "\n",
    "firstSentence = np.zeros((maxSeqLength), dtype='int32')\n",
    "firstSentence[0] = wordsList.index(\"i\")\n",
    "firstSentence[1] = wordsList.index(\"thought\")\n",
    "firstSentence[2] = wordsList.index(\"the\")\n",
    "firstSentence[3] = wordsList.index(\"movie\")\n",
    "firstSentence[4] = wordsList.index(\"was\")\n",
    "firstSentence[5] = wordsList.index(\"incredible\")\n",
    "firstSentence[6] = wordsList.index(\"and\")\n",
    "firstSentence[7] = wordsList.index(\"inspiring\")\n",
    "# firstSentence[8] and firstSentence[9] are going to be 0\n",
    "\n",
    "\n",
    "# 句子中词的索引值\n",
    "print(firstSentence.shape)\n",
    "print(firstSentence)\n",
    "\n",
    "\n",
    "# 输出数据是一个 10*50 的词矩阵，其中包括 10 个词，每个词的向量维度是 50。就是去找到这些词对应的向量\n",
    "with tf.Session() as sess:\n",
    "    print(tf.nn.embedding_lookup(wordVectors,firstSentence).eval().shape)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "---"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "    IMDB数据集，包含25000条电影数据，其中12500条正向数据，12500条负向数据，数据被存储在npy文本文件中。运行时，我只使用Tiny文件，一共只保留了30条。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Positive files finished\n",
      "Negative files finished\n"
     ]
    }
   ],
   "source": [
    "# 加载文件\n",
    "positiveFiles = ['./data/VI/TinyPosReviews/' + f for f in listdir('./data/VI/TinyPosReviews/') if isfile(join('./data/VI/TinyPosReviews/', f))]\n",
    "negativeFiles = ['./data/VI/TinyNegReviews/' + f for f in listdir('./data/VI/TinyNegReviews/') if isfile(join('./data/VI/TinyNegReviews/', f))]\n",
    "numWords = []\n",
    "\n",
    "\n",
    "# 处理文件\n",
    "for pf in positiveFiles:\n",
    "    with open(pf, \"r\", encoding='utf-8') as f:\n",
    "        line=f.readline()\n",
    "        counter = len(line.split())\n",
    "        numWords.append(counter)       \n",
    "print('Positive files finished')\n",
    "\n",
    "\n",
    "for nf in negativeFiles:\n",
    "    with open(nf, \"r\", encoding='utf-8') as f:\n",
    "        line=f.readline()\n",
    "        counter = len(line.split())\n",
    "        numWords.append(counter)  \n",
    "print('Negative files finished')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "The total number of files is 30\n",
      "The total number of words in the files is 4441\n",
      "The average number of words in the files is 148.03333333333333\n"
     ]
    }
   ],
   "source": [
    "# 查看数据基本信息\n",
    "numFiles = len(numWords)\n",
    "\n",
    "print('The total number of files is', numFiles)\n",
    "print('The total number of words in the files is', sum(numWords))\n",
    "print('The average number of words in the files is', sum(numWords)/len(numWords))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAX8AAAEICAYAAAC3Y/QeAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAEV1JREFUeJzt3XuQZGV9xvHvwzUsKDeXoMEFU4qgRjFMNKIgBjRaqCwVjBeMooGlMJqgUnEtSUUlIsVNRBNgETGFUSISSCkgwYRVLsZk18QLlbKICKWWmgUUb4jA/vJHn5FhmdntmZ0zPc37/VRN7enT5/Lrd3uefuftPm+nqpAktWWLURcgSVp4hr8kNcjwl6QGGf6S1CDDX5IaZPhLUoN6C/8khyW5OcnqJJf2dR5J0uxt1eOxfw94dVV9rcdzSJLmIH1d5JXks8A2wAPAiVV1cy8nkiTNWp/h/5iquiPJHsBFVfXCDe5fAawA2H777fffZ599eqlDkh6p1q5de0dVLZ3Lvr2F/0NOknwKOKqq7pvu/omJiVqzZk3vdUjSI0mStVU1MZd9e3nDN8mbkry2W94d2Gmm4JckLby+3vC9CLg4yTEMxvxP6Ok8kqQ56CX8q+oe4Mg+ji1J2nxe5CVJDTL8JalBhr8kNcjwl6QGGf6S1CDDX5IaZPhLUoMMf0lqkOEvSQ0y/CWpQYa/JDXI8JekBhn+ktQgw1+SGmT4S1KDDH9JapDhL0kNMvwlqUGGvyQ1yPCXpAYZ/pLUIMNfkhpk+EtSgwx/SWqQ4S9JDTL8JalBhr8kNcjwl6QGGf6S1CDDX5IaZPhLUoMMf0lqkOEvSQ0y/CWpQYa/JDXI8JekBvUe/kmekuSNfZ9HkjS8XsM/yc7Ax4FH93keSdLs9Bb+SbYEPgic3dc5JElzs1WPx34vcA6wA7DLhncmWQGsAFi2bFmPZWg+7LXyyoetu+3Uw0ZQiaT50Ev4J3kJsBx4LrATsEOSNVV1w+Q2VbUKWAUwMTFRfdQhSZpeL+FfVVcDVwMkORjYb2rwS5JGy496SlKD+hzzB6CqVgOr+z6PJGl49vwlqUGGvyQ1yPCXpAYZ/pLUIMNfkhpk+EtSgwx/SWqQ4S9JDTL8JalBhr8kNcjwl6QGGf6S1CDDX5IaZPhLUoMMf0lqkOEvSQ0y/CWpQYa/JDXI8JekBhn+ktQgw1+SGmT4S1KDDH9JapDhL0kNMvwlqUGGvyQ1yPCXpAYZ/pLUIMNfkhpk+EtSgwx/SWqQ4S9JDTL8JalBhr8kNcjwl6QG9RL+SZYkuSTJTUluTPK8Ps4jSZqbvnr+uwPnVNUBwLHAUT2dR5I0B0OFf5LXJ9l62INW1a1VdVOSM4A1wEfnWqAkaf4N2/P/BfCpJO9MstOwB6+qE4FnAKcleci5kqxIsibJmnXr1g1fsSRpsw0V/lV1aVUdAVwPnJvknCRPmGn7JPsm2abb9xbgW8CuGxxzVVVNVNXE0qVL5/4IJEmzNvSYf5LtgN8FlgBfBU5McvEMmx8EvK7bbydgz6qyey9Ji8RWw2yU5D3AvsCFVXVOt/rCJDN12S8ELkhyFLAeeMdmVypJmjdDhT/wGeDkqro/yZZV9QDATL35qrofeMM81ShJmmfDDvscALysW16e5Nie6pEkLYBhw/+VVXU5QFVdBry2v5IkSX0bNvzv3eD2+vkuRJK0cIYN/58l2QUgyW48/MVAkjRGhn3D913AJ5LcABwCvK2/kiRJfRsq/Kvq60mOAPYBPlBVP++3LElSn2Yzsdt6YB2wa5JlPdUjSVoAw17kdQLwUuAbQHU/Dv1I0pgadsz/NVX1rF4rkSQtmGGHfX7caxWSpAU19PQOSc4DLgLuAaiqr/VWlSSpV8OG/y7A94EXd7cLMPwlaUwN+1HP9yTZsaru7rsgSVL/hv0ax1cAX0yyZZLLkuzZc12SpB4N+4bvq4A7u6mcTwDO668kSVLfhg3/HSYXquo7wLb9lCNJWgjDhv/tkwtJHgekn3IkSQth2PA/H3h0klOAaxhM9CZJGlPDftpnbZLnA08GTqmqn/VbliSpT8PO7fPXDD7bP3mbqnpvb1VJkno17EVeV3T/bsfgi9m/3k85kqSFMOywz1cnl5N8Gbga+HBfRUmS+jWb+fwBqKoCdu6hFknSAhl2zP8DDMb8twT2BT7ZZ1GSpH4NO+Z/dvfvemBdVf2yp3okSQtg2PA/fOqN5MFrvKrqnPksSJLUv2HH/LcFnsXgaxyfzeAK3692P5KkMTNsz/9Q4LCquj/JjcAVVfXBHuuSJPVo2J7/1lV1P0BV3YsTu0nSWBs2/H86OYd/kr0Ap3eQpDE27LDPW4GPJLkdWAYc319JkqS+DXuF763AC5PsXFU/6rkmSVLPhv0ax72SnA/8JIm9fkkac8OO+Z8E7N19jeMvk5zeY02SpJ4NG/7L6KZ0rqqLgN/vrSJJUu+GDf+7JxcyuLzXr3GUpDE2bPh/Htguyd7ARxl8leNGJTkzyeokNyV5yeYUKUmaX8N+2uf8JD8G3gxcW1Wf2Nj2SZYDd1XVwUmWANcmuaaq1m9+yZKkzTXslM4nV9VfAf845HFvAa4HqKpfJPkuDhVJ0qIx7EVeOyd5JbAauAegqn4y08ZVdfPkcpKXA1/qPinElPUrgBUAy5Ytm13VC2SvlVdOu/62Uw9b4ErGy0ztNp2Z2nI+jrFY+bzSYrDRMf8kr+gWd2Aw1v9+4IM8OL//RiV5AXBgVT1s+6paVVUTVTWxdOnS2VUtSdosm+r5Hw9cWlVHJ9mzqt447IGTHAS8DHj75hQoSZp/s/kO3xp2wyQHAp8B9geu6z7184TZFidJ6semev5PSnIWgzdrpy5XVb1tpp2q6npgx/krU5I0nzYV/s+bsjzUOL8kafHbaPhX1e0LVYgkaeHMZsxfkvQIYfhLUoMMf0lqkOEvSQ0y/CWpQYa/JDXI8JekBhn+ktQgw1+SGmT4S1KDDH9JapDhL0kNMvwlqUGGvyQ1yPCXpAYZ/pLUIMNfkhpk+EtSgwx/SWqQ4S9JDTL8JalBhr8kNcjwl6QGGf6S1CDDX5IaZPhLUoMMf0lqkOEvSQ0y/CWpQYa/JDXI8JekBhn+ktQgw1+SGtRb+Cc5PMltSfbr6xySpLnZqq8DV9U/J3lmX8eXJM2dwz6S1KDeev6bkmQFsAJg2bJloypjUdlr5ZW9HPe2Uw9b0PPNRl81zHTc2bTFTNuOm9m08SPlMc+H2TyHxrGNR9bzr6pVVTVRVRNLly4dVRmS1CSHfSSpQYa/JDWo1zH/qnp3n8eXJM2NPX9JapDhL0kNMvwlqUGGvyQ1yPCXpAYZ/pLUIMNfkhpk+EtSgwx/SWqQ4S9JDTL8JalBhr8kNcjwl6QGGf6S1CDDX5IaZPhLUoMMf0lqkOEvSQ0y/CWpQYa/JDXI8JekBhn+ktQgw1+SGmT4S1KDDH9JapDhL0kNMvwlqUGGvyQ1yPCXpAYZ/pLUIMNfkhpk+EtSgwx/SWqQ4S9JDTL8JalBhr8kNai38E9ySpIvJvlCkgP6Oo8kafa26uOgSQ4BllTVQUl2BC5P8odVdV8f55MkzU5fPf9DgI8AVNXdwBeA3+npXJKkWUpVzf9Bk/OBk6pqXXf7OOD2qvrclG1WACu6m08DvjHvhSycxwB3jLqIORrn2sH6R836R+vJVfWouezYy7APcCewFFjX3d4d+M+pG1TVKmAVQJI1VTXRUy29G+f6x7l2sP5Rs/7RSrJmrvv2Nezzb8DRAN2Y/0HA13s6lyRplnrp+VfV55McmuQL3ap3+mavJC0efQ37UFUrZ7H5qr7qWCDjXP841w7WP2rWP1pzrr+XN3wlSYubV/hKUoMMf0lq0EjDf1yngEhyWJKbk6xOcmmSpyS5rrv9d0m2HnWNUyU5PMltSfbrbk9bb5K3JLkhyfVJlo+26gdtWH+3bvWUn8O6dYvy+ZTkzK7Om5K8ZAzb/yH1d+vGov2TLElySVf7jUmeN07tP1393frNb/+qGskPg6uAz+6Wd2Tw8dCtR1XPLGt/N/D0KbevAh7fLf8ZcNyoa5yh5v1mqhd4InAZEGAb4Bpg11HXPUP92wCfHIfnE7AceFe3vAS4cZzaf4b6x6n9fxs4oFt+CnDumLX/dPXPS/uPsuc/zlNATABnJLk6yTOAn1fVd7r7VgGHjq60jUuyDdPX+wfARTXwK+ATwPNHVOamPAPYv+v1vKdbt1ifT7cA5wFU1S+A/2O82n/D+r8LPJMxaf+qurWqbkpyBrAG+Bhj1P7T1P9R5un5P8rw3xX44ZTbPwB2G1Ets3V0Vb0IOBY4g8EvNAA1uJ6ht4/QzoNdmb7ecfr/+Cbw1Ko6GNgiyR+xSOuvqpur6k6AJC8H/oMxav9p6v8S8D+MSftPqqoTGYTmqUyZzmGxt/+kKfWfxjw9/0cZ/pNTQEzanSm/FItZVd3R/ftd4KcMagegGz98YESlDeNOBvOZAA+pd2z+P6rqJ/XgRYOXMOiJLur6k7wAOBA4kzFs/8n6q+rscWr/JPt2f+1SVbcA3wb2nnL/om7/aer/FrDtfLT/KMN/LKeASPKmJK/tlncHdgC2TfLYbpNjgWtHVd+mdH/Sbj9NvdcBfwK/Hhp6NbB6FDVuTJItknw+ybbdqj8G1rKIn09JDgJeBvzlOLb/1PrHsP0PAl4HkGQn4PHAPWPU/hvWvyfwyflo/5ENT9T4TgFxEXBxkmMY9BhOAAr4hyRbADd36xazlWxQb1Xdl+Tfk1zfbXNmVd01uhKnV1Xrk3wIWJ3kPuDGqrocYDE+n5IcCHwG+G/guiQAb2FM2n/D+rvVlzMm7Q9cCFyQ5ChgPfAO4JeMSfszff2/xTy0v1f4SlKDvMhLkhpk+EtSgwx/SWqQ4S9JDTL8JalBhr8WXJIdk1yTZE2Sv+0+cjfWknys+xx2X8ffK8nZ3fLRi2XiMY2vsf+l01g6BLisBl+cfRWDCcMkLaDFPAeNHrm+yODClauq6srJlUmOBI5kMP3BD4CzquorSXYDzmFwyfp64NPA3lV1QpIrqmp5t/9ODGY2PDrJ3sCfA08CfgVcVVXnJjmawUUyz2UwH8r13bwpJNkTOL1bv4TBFZ7vAn4TeDuDWRUD3AC8v6rWb+xBJjkEeD3wWOAu4ILu4saPdcd4FbAzcHpVXdL9BfReBldnPgB8Fnhh1yaXAHskWQd8D3hqkuOAXaY+BmlY9vy14Lq5kVYA78uD88M/BziJQcBvBezBIAgBPgyc0U1kdSiDS9w35QLgacC2wKOA45I8rbvvAOCIqno2sHuSJ3brzwVWVtUhVfUc4Ppu37MZzOS6BNgOOAJ40cZO3r2QnAUsA7Zm8AJySrpLfIHdq+pQ4DnAm7p1bwV+WFUHVdULGMxDs1tV/YzBC8Wnq+p9Ux7D8u4xPHbKY5CGYs9fI1FV67pe+MVJbmUQZiur6nOT2+TBL8XZtqrWdPutT7KKwRQJG9qu229X4PtV9aqpx+ou4Z8APl5V93Z3fQV4XJK7gB9U1a1Tarxq8njdC89DjrWJhzgBnFtV522wX3X5f2F3jl9154bBXyNHTjn/PyV58wzHn/oY1gKPA/53EzVJv2bPXwsuycoke9RgbpHbGQzNfBl42+SEVUkOBU7udrk3yf7d+i0YfPnGpJ2mvEj8KUA3BfG+kz39JDsDV0zOjgjcM2X/9cAW3TwuT5jag87gG9t2BNZ39ZDkN4BPdUNRG/NfwDHd/iR5OoO54yfdM80+axhMKDZ5/pcyGBaazsMewybqkR7Cnr9G4Qrg77vQ/iZwdVU9kOTTDCY/ux+4FTi+2/4twDld4BaDL9qYHMI5D7gpyd0MxsUnw/s1wFlTZj98e9fL3lhdxwJndu8dbAH8C4M3pN8IfCjJSQx+Z06uqummy/1sV/u3q+oNSf4GuLJb9yPgmE20y+kMviToOAbvU1wLfL+77w7g4CQv3sQxpKE4sZvGTpK9GMzEuNhnT52VJIcD35sc4ur+2nldVf3FaCvTI5E9f2nx+FfgtCTHMxjKKcBP8agX9vwlqUG+SSRJDTL8JalBhr8kNcjwl6QGGf6S1KD/Bzm63nMDv8uzAAAAAElFTkSuQmCC\n",
      "text/plain": [
       "<Figure size 432x288 with 1 Axes>"
      ]
     },
     "metadata": {
      "needs_background": "light"
     },
     "output_type": "display_data"
    }
   ],
   "source": [
    "# 画图\n",
    "import matplotlib.pyplot as plt\n",
    "%matplotlib inline\n",
    "\n",
    "plt.hist(numWords, 50)\n",
    "plt.xlabel('Sequence Length')\n",
    "plt.ylabel('Frequency')\n",
    "plt.axis([0, 350, 0, 5])\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 综上图示，最大句子长度设置为150\n",
    "maxSeqLength = 150"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Working-class romantic drama from director Martin Ritt is as unbelievable as they come, yet there are moments of pleasure due mostly to the charisma of stars Jane Fonda and Robert De Niro (both terrific). She's a widow who can't move on, he's illiterate and a closet-inventor--you can probably guess the rest. Adaptation of Pat Barker's novel \"Union Street\" (a better title!) is so laid-back it verges on bland, and the film's editing is a mess, but it's still pleasant; a rosy-hued blue-collar fantasy. There are no overtures to serious issues (even the illiteracy angle is just a plot-tool for the ensuing love story) and no real fireworks, though the characters are intentionally a bit colorless and the leads are toned down to an interesting degree. The finale is pure fluff--and cynics will find it difficult to swallow--though these two characters deserve a happy ending and the picture wouldn't really be satisfying any other way. *** from ****\n",
      "\n",
      "workingclass romantic drama from director martin ritt is as unbelievable as they come yet there are moments of pleasure due mostly to the charisma of stars jane fonda and robert de niro both terrific shes a widow who cant move on hes illiterate and a closetinventoryou can probably guess the rest adaptation of pat barkers novel union street a better title is so laidback it verges on bland and the films editing is a mess but its still pleasant a rosyhued bluecollar fantasy there are no overtures to serious issues even the illiteracy angle is just a plottool for the ensuing love story and no real fireworks though the characters are intentionally a bit colorless and the leads are toned down to an interesting degree the finale is pure fluffand cynics will find it difficult to swallowthough these two characters deserve a happy ending and the picture wouldnt really be satisfying any other way  from \n",
      "\n",
      "\n",
      "\n",
      "[399999   5845   2692     25    369   1398  98521     14     19  11609\n",
      "     19     39    326    553     63     32   4039      3   7794    445\n",
      "   1246      4 201534  19116      3   1569   4917  21336      5    893\n",
      "    342  24458    150  11025 173886      7   6649     38  52717    483\n",
      "     13  69407  22489      5      7 399999     86    965   5020 201534\n",
      "   1033   7175      3   4672 116167   1999    354    491      7    439\n",
      "    698     14    100 137903     20  52467     13  18014      5 201534\n",
      "   1588   9742     14      7   7562     34     47    149   8829      7\n",
      " 399999 399999   5847     63     32     84  20018      4   1034    615\n",
      "    151 201534  27422   7582     14    120      7 399999     10 201534\n",
      "  10379    835    523      5     84    567   9239    413 201534   2153\n",
      "     32  11220      7   1594  38133      5 201534   2390     32  22513\n",
      "    135      4     29   4001   1714 201534   8462     14   5744 399999\n",
      "  45502     43    596     20   1013      4 399999    158     55   2153\n",
      "   7763      7   1751   1945      5 201534   1835 255441    588     30]\n"
     ]
    }
   ],
   "source": [
    "# 例子三：将单个文件中的文本转换成索引矩阵\n",
    "# 1 读取\n",
    "fname = positiveFiles[3]\n",
    "with open(fname) as f:\n",
    "    for lines in f:\n",
    "        print(lines, end=\"\\n\\n\")\n",
    "        exit\n",
    "\n",
    "\n",
    "# 2 清洗\n",
    "import re\n",
    "strip_special_chars = re.compile(\"[^A-Za-z0-9 ]+\")\n",
    "\n",
    "def cleanSentences(string):\n",
    "    string = string.lower().replace(\"<br />\", \" \")\n",
    "    return re.sub(strip_special_chars, \"\", string.lower())\n",
    "\n",
    "\n",
    "\n",
    "# 3 转换成索引\n",
    "firstFile = np.zeros((maxSeqLength), dtype='int32')\n",
    "\n",
    "with open(fname) as f:\n",
    "    indexCounter = 0\n",
    "    line = f.readline()\n",
    "    cleanedLine = cleanSentences(line)\n",
    "    print(cleanedLine)\n",
    "    \n",
    "    split = cleanedLine.split()\n",
    "    for word in split:\n",
    "        try:\n",
    "            firstFile[indexCounter] = wordsList.index(word)\n",
    "        except ValueError:\n",
    "            firstFile[indexCounter] = 399999 #Vector for unknown words\n",
    "        indexCounter = indexCounter + 1\n",
    "        if indexCounter >= maxSeqLength:\n",
    "               break\n",
    "\n",
    "print(\"\\n\\n\")\n",
    "print(firstFile)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "---"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "done !\n"
     ]
    }
   ],
   "source": [
    "#  处理所有文件，得到30*150的向量\n",
    "ids = np.zeros((numFiles, maxSeqLength), dtype='int32')\n",
    "fileCounter = 0\n",
    "\n",
    "# 正向\n",
    "for pf in positiveFiles:\n",
    "    with open(pf, \"r\") as f:\n",
    "        indexCounter = 0\n",
    "        line=f.readline()\n",
    "        cleanedLine = cleanSentences(line)\n",
    "        split = cleanedLine.split()\n",
    "        for word in split:\n",
    "            try:\n",
    "                ids[fileCounter][indexCounter] = wordsList.index(word)\n",
    "            except ValueError:\n",
    "                ids[fileCounter][indexCounter] = 399999 #Vector for unkown words\n",
    "            indexCounter = indexCounter + 1\n",
    "            if indexCounter >= maxSeqLength:\n",
    "                break\n",
    "        fileCounter = fileCounter + 1 \n",
    "\n",
    "\n",
    "# 负向\n",
    "for nf in negativeFiles:\n",
    "    with open(nf, \"r\") as f:\n",
    "        indexCounter = 0\n",
    "        line=f.readline()\n",
    "        cleanedLine = cleanSentences(line)\n",
    "        split = cleanedLine.split()\n",
    "        for word in split:\n",
    "            try:\n",
    "                ids[fileCounter][indexCounter] = wordsList.index(word)\n",
    "            except ValueError:\n",
    "                ids[fileCounter][indexCounter] = 399999 #Vector for unkown words\n",
    "            indexCounter = indexCounter + 1\n",
    "            if indexCounter >= maxSeqLength:\n",
    "                break\n",
    "        fileCounter = fileCounter + 1 \n",
    "# Pass into embedding function and see if it evaluates. \n",
    "\n",
    "np.save('./data/VI/tinyIdsMatrix', ids)\n",
    "print(\"done !\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 辅助函数，数据分批\n",
    "from random import randint\n",
    "\n",
    "def getTrainBatch():\n",
    "    labels = []\n",
    "    arr = np.zeros([batchSize, maxSeqLength])\n",
    "    for i in range(batchSize):\n",
    "        if (i % 2 == 0): \n",
    "            num = randint(1,10)\n",
    "            labels.append([1,0])\n",
    "        else:\n",
    "            num = randint(21,30)\n",
    "            labels.append([0,1])\n",
    "        arr[i] = ids[num-1:num]\n",
    "    return arr, labels\n",
    "\n",
    "def getTestBatch():\n",
    "    labels = []\n",
    "    arr = np.zeros([batchSize, maxSeqLength])\n",
    "    for i in range(batchSize):\n",
    "        num = randint(11,20)\n",
    "        if (num <= 15):\n",
    "            labels.append([1,0])\n",
    "        else:\n",
    "            labels.append([0,1])\n",
    "        arr[i] = ids[num-1:num]\n",
    "    return arr, labels"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "    两个占位符，数据输入和标签数据。其中，数据输入占位符，是一个整数化的索引数组。标签占位符则代表一组值，每一个值都为 [1,0] 或者 [0,1]，这个取决于数据是正向的还是负向的。之后调用 tf.nn.embedding_lookup() 函数来得到词向量，该函数最后将返回一个三维向量，第一个维度是批处理大小，第二个维度是句子长度，第三个维度是词向量长度。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 构建模型\n",
    "# 参数设置\n",
    "batchSize = 24\n",
    "lstmUnits = 64\n",
    "numClasses = 2\n",
    "iterations = 500\n",
    "\n",
    "tf.reset_default_graph()\n",
    "\n",
    "# 占位符\n",
    "labels = tf.placeholder(tf.float32, [batchSize, numClasses])\n",
    "input_data = tf.placeholder(tf.int32, [batchSize, maxSeqLength])\n",
    "\n",
    "data = tf.Variable(tf.zeros([batchSize, maxSeqLength, numDimensions]),dtype=tf.float32)\n",
    "data = tf.nn.embedding_lookup(wordVectors,input_data)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "    构造好数据形式后，将数据输入到LSTM中，利用 tf.nn.rnn_cell.BasicLSTMCell 函数，构造LSTM单元。之后将LSTM cel 和三维的数据输入到 tf.nn.dynamic_rnn中展开整个网络，构建RNN模型。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "WARNING: The TensorFlow contrib module will not be included in TensorFlow 2.0.\n",
      "For more information, please see:\n",
      "  * https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md\n",
      "  * https://github.com/tensorflow/addons\n",
      "If you depend on functionality not listed there, please file an issue.\n",
      "\n",
      "WARNING:tensorflow:From <ipython-input-13-db6a6fc2c55e>:1: BasicLSTMCell.__init__ (from tensorflow.python.ops.rnn_cell_impl) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "This class is equivalent as tf.keras.layers.LSTMCell, and will be replaced by that in Tensorflow 2.0.\n",
      "WARNING:tensorflow:From <ipython-input-13-db6a6fc2c55e>:3: dynamic_rnn (from tensorflow.python.ops.rnn) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use `keras.layers.RNN(cell)`, which is equivalent to this API\n",
      "WARNING:tensorflow:From /Users/apple/anaconda3/lib/python3.7/site-packages/tensorflow/python/ops/rnn_cell_impl.py:1259: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n"
     ]
    }
   ],
   "source": [
    "lstmCell = tf.contrib.rnn.BasicLSTMCell(lstmUnits)\n",
    "lstmCell = tf.contrib.rnn.DropoutWrapper(cell=lstmCell, output_keep_prob=0.75)\n",
    "value, _ = tf.nn.dynamic_rnn(lstmCell, data, dtype=tf.float32)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "    由于堆栈的LSTM是一个比较好的网络架构，也就是前一个LSTM隐藏层的输出是下一个LSTM的输入，这样模型可以记住更多的上下文信息，但其训练参数会增多使得训练时间变长，过拟合的几率也会增加。具体而言，dynamicRNN函数的第一个输出，可以被认为是最后的隐藏状态向量，以及这个向量将被重新调整维度，然后乘以最后的权重矩阵和一个偏置项来获得最终的输出值。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From <ipython-input-14-8153406b5ed1>:15: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "\n",
      "Future major versions of TensorFlow will allow gradients to flow\n",
      "into the labels input on backprop by default.\n",
      "\n",
      "See `tf.nn.softmax_cross_entropy_with_logits_v2`.\n",
      "\n",
      "WARNING:tensorflow:From /Users/apple/anaconda3/lib/python3.7/site-packages/tensorflow/python/ops/array_grad.py:425: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use tf.cast instead.\n"
     ]
    }
   ],
   "source": [
    "# 参数初始化\n",
    "weight = tf.Variable(tf.truncated_normal([lstmUnits, numClasses]))\n",
    "bias = tf.Variable(tf.constant(0.1, shape=[numClasses]))\n",
    "value = tf.transpose(value, [1, 0, 2])\n",
    "\n",
    "# 取最终的结果值\n",
    "last = tf.gather(value, int(value.get_shape()[0]) - 1)\n",
    "prediction = (tf.matmul(last, weight) + bias)\n",
    "\n",
    "# 评测\n",
    "correctPred = tf.equal(tf.argmax(prediction,1), tf.argmax(labels,1))\n",
    "accuracy = tf.reduce_mean(tf.cast(correctPred, tf.float32))\n",
    "\n",
    "# 损失函数和优化器\n",
    "loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=labels))\n",
    "optimizer = tf.train.AdamOptimizer().minimize(loss)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "iteration 101/500... loss 0.031875234097242355... accuracy 1.0...\n",
      "saved to ./model/VI/pretrained_lstm.ckpt-100\n",
      "iteration 201/500... loss 0.0007557172211818397... accuracy 1.0...\n",
      "saved to ./model/VI/pretrained_lstm.ckpt-200\n",
      "iteration 301/500... loss 0.0018739145016297698... accuracy 1.0...\n",
      "saved to ./model/VI/pretrained_lstm.ckpt-300\n",
      "iteration 401/500... loss 0.0003018965944647789... accuracy 1.0...\n",
      "saved to ./model/VI/pretrained_lstm.ckpt-400\n"
     ]
    }
   ],
   "source": [
    "# 训练\n",
    "sess = tf.InteractiveSession()\n",
    "saver = tf.train.Saver()\n",
    "sess.run(tf.global_variables_initializer())\n",
    "\n",
    "\n",
    "for i in range(iterations):\n",
    "    # Next Batch of reviews\n",
    "    nextBatch, nextBatchLabels = getTrainBatch();\n",
    "    sess.run(optimizer, {input_data: nextBatch, labels: nextBatchLabels}) \n",
    "    \n",
    "    if (i % 100 == 0 and i != 0):\n",
    "        loss_ = sess.run(loss, {input_data: nextBatch, labels: nextBatchLabels})\n",
    "        accuracy_ = sess.run(accuracy, {input_data: nextBatch, labels: nextBatchLabels})\n",
    "        \n",
    "        print(\"iteration {}/{}...\".format(i+1, iterations),\n",
    "              \"loss {}...\".format(loss_),\n",
    "              \"accuracy {}...\".format(accuracy_))    \n",
    "    \n",
    "    # Save the network every 100 training iterations\n",
    "    if (i % 100 == 0 and i != 0):\n",
    "        save_path = saver.save(sess, \"./model/VI/pretrained_lstm.ckpt\", global_step=i)\n",
    "        print(\"saved to %s\" % save_path)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "    导入一个预训练的模型需要使用TensorFlow的会话函数Server，之后利用该会话函数来调用restore函数，该这个函数包括两个参数，一个表示当前的会话，另一个表示保存的模型。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /Users/apple/anaconda3/lib/python3.7/site-packages/tensorflow/python/training/saver.py:1266: checkpoint_exists (from tensorflow.python.training.checkpoint_management) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use standard file APIs to check for files with this prefix.\n",
      "INFO:tensorflow:Restoring parameters from ./model/VI/pretrained_lstm.ckpt-400\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/Users/apple/anaconda3/lib/python3.7/site-packages/tensorflow/python/client/session.py:1702: UserWarning: An interactive session is already active. This can cause out-of-memory errors in some cases. You must explicitly call `InteractiveSession.close()` to release resources held by the other session(s).\n",
      "  warnings.warn('An interactive session is already active. This can '\n"
     ]
    }
   ],
   "source": [
    "sess = tf.InteractiveSession()\n",
    "saver = tf.train.Saver()\n",
    "saver.restore(sess, tf.train.latest_checkpoint('./model/VI/'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Accuracy for this batch: 75.0\n",
      "Accuracy for this batch: 83.33333134651184\n",
      "Accuracy for this batch: 70.83333134651184\n",
      "Accuracy for this batch: 83.33333134651184\n",
      "Accuracy for this batch: 66.66666865348816\n",
      "Accuracy for this batch: 87.5\n",
      "Accuracy for this batch: 70.83333134651184\n",
      "Accuracy for this batch: 79.16666865348816\n",
      "Accuracy for this batch: 83.33333134651184\n",
      "Accuracy for this batch: 83.33333134651184\n"
     ]
    }
   ],
   "source": [
    "# 测试\n",
    "iterations = 10\n",
    "for i in range(iterations):\n",
    "    nextBatch, nextBatchLabels = getTestBatch();\n",
    "    print(\"Accuracy for this batch:\", (sess.run(accuracy, {input_data: nextBatch, labels: nextBatchLabels})) * 100)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "    由于这里只拿了20条数据训练，若拿2.3w数据训练的话，正确率在92%左右"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 3.2 SiameseLSTM - 文本相似度\n",
    "\n",
    "    "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "    请查看code/VI/中的代码"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
