{
 "metadata": {
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.5-final"
  },
  "orig_nbformat": 2,
  "kernelspec": {
   "name": "python3",
   "display_name": "Python 3.8.5 64-bit ('base': conda)",
   "metadata": {
    "interpreter": {
     "hash": "aa9e82663741a35949d10b71616b7da32b0b1a8a92bded1e278bf973221dadc2"
    }
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2,
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {},
   "outputs": [],
   "source": [
    "import tensorflow as tf\n",
    "from tensorflow.keras import layers, Sequential, datasets, preprocessing, Model, optimizers, losses"
   ]
  },
  {
   "source": [
    "## Eembedding"
   ],
   "cell_type": "markdown",
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "x = tf.range(10) # [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n",
    "x = tf.random.shuffle(x) # [7, 8, 6, 2, 5, 3, 0, 9, 1, 4]\n",
    "\n",
    "net = layers.Embedding(10, 4)\n",
    "out = net(x)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": [
       "[<tf.Variable 'kernel:0' shape=(4, 3) dtype=float32, numpy=\n",
       " array([[-0.15055227,  0.35227942, -0.88685304],\n",
       "        [ 0.6489284 , -0.5729007 , -0.4817061 ],\n",
       "        [ 0.0089348 , -0.10512948,  0.10662079],\n",
       "        [ 0.06818122,  0.089059  , -0.24246407]], dtype=float32)>,\n",
       " <tf.Variable 'recurrent_kernel:0' shape=(3, 3) dtype=float32, numpy=\n",
       " array([[-0.95574176,  0.15597728, -0.24945676],\n",
       "        [ 0.10088807,  0.97023976,  0.22012803],\n",
       "        [-0.27636787, -0.18521835,  0.943035  ]], dtype=float32)>,\n",
       " <tf.Variable 'bias:0' shape=(3,) dtype=float32, numpy=array([0., 0., 0.], dtype=float32)>]"
      ]
     },
     "metadata": {},
     "execution_count": 2
    }
   ],
   "source": [
    "cell = layers.SimpleRNNCell(3)\n",
    "cell.build(input_shape=(None, 4))\n",
    "cell.trainable_variables"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "(4, 100)\n(4, 64)\n"
     ]
    }
   ],
   "source": [
    "h0 = [tf.zeros([4, 64])]\n",
    "x = tf.random.normal([4, 80, 100])\n",
    "xt = x[:, 0, :]\n",
    "print(xt.shape)\n",
    "cell = layers.SimpleRNNCell(64)\n",
    "out, h1 = cell(xt, h0)\n",
    "print(out.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "h = h0\n",
    "for xt in tf.unstack(x, axis=1):\n",
    "    out, h = cell(xt, h)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 多层网络\n",
    "x = tf.random.normal([4, 80, 100])\n",
    "xt = x[:, 0, :]\n",
    "cell0 = layers.SimpleRNNCell(64)\n",
    "cell1 = layers.SimpleRNNCell(64)\n",
    "h0 = [tf.zeros([4, 64])]\n",
    "h1 = [tf.zeros([4, 64])]\n",
    "\n",
    "\n",
    "for xt in tf.unstack(x, axis=1):\n",
    "    out0, h0 = cell0(xt, h0)\n",
    "    out1, h1 = cell1(out0, h1)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# SimpleRNN\n",
    "layer = layers.SimpleRNN(64, return_sequences=True)\n",
    "out = layer(x)"
   ]
  },
  {
   "source": [
    "### IMDB 影评数据 情感分类"
   ],
   "cell_type": "markdown",
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "batchsz = 128\n",
    "total_words = 10000\n",
    "max_review_len = 80\n",
    "embedding_len = 100\n",
    "\n",
    "(x_train, y_train), (x_test, y_test) = datasets.imdb.load_data(num_words=total_words)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [],
   "source": [
    "word_index = datasets.imdb.get_word_index()\n",
    "word_index = {k: (v+3) for k, v in word_index.items()}\n",
    "word_index[\"<PAD>\"] = 0\n",
    "word_index[\"<START>\"] = 1\n",
    "word_index[\"<UNK>\"] = 2\n",
    "word_index[\"<UNUSED>\"] = 3\n",
    "reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "metadata": {},
   "outputs": [],
   "source": [
    "def decode_review(text):\n",
    "    return \" \".join([reverse_word_index.get(i, \"?\") for i in text])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {},
   "outputs": [],
   "source": [
    "x_train = preprocessing.sequence.pad_sequences(x_train, maxlen=max_review_len)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "metadata": {},
   "outputs": [],
   "source": [
    "x_test = preprocessing.sequence.pad_sequences(x_test, maxlen=max_review_len)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "metadata": {},
   "outputs": [],
   "source": [
    "db_train = tf.data.Dataset.from_tensor_slices((x_train, y_train))\n",
    "db_train = db_train.shuffle(1000).batch(batchsz, drop_remainder=True)\n",
    "db_test = tf.data.Dataset.from_tensor_slices((x_test, y_test))\n",
    "db_test = db_test.batch(batchsz, drop_remainder=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 67,
   "metadata": {},
   "outputs": [],
   "source": [
    "class MyRNN(Model):\n",
    "    # Cell 方式构建多层网络\n",
    "    def __init__(self, units):\n",
    "        super(MyRNN, self).__init__()\n",
    "        # [b, 64]，构建 Cell 初始化状态向量，重复使用\n",
    "        self.state0 = [tf.zeros([batchsz, units])]\n",
    "        self.state1 = [tf.zeros([batchsz, units])]\n",
    "        # 词向量编码 [b, 80] => [b, 80, 100]\n",
    "        self.embedding = layers.Embedding(total_words, embedding_len,\n",
    "        input_length=max_review_len)\n",
    "        # 构建 2 个 Cell，使用 dropout 技术防止过拟合\n",
    "        self.rnn_cell0 = layers.SimpleRNNCell(units, dropout=0.5)\n",
    "        self.rnn_cell1 = layers.SimpleRNNCell(units, dropout=0.5)\n",
    "        # 构建分类网络，用于将 CELL 的输出特征进行分类， 2 分类\n",
    "        # [b, 80, 100] => [b, 64] => [b, 1]\n",
    "        self.outlayer = layers.Dense(1)\n",
    "\n",
    "    def call(self, inputs, training=None):\n",
    "        x = inputs # [b, 80]\n",
    "        # 获取词向量: [b, 80] => [b, 80, 100]\n",
    "        x = self.embedding(x)\n",
    "        # 通过 2 个 RNN CELL,[b, 80, 100] => [b, 64]\n",
    "        state0 = self.state0\n",
    "        state1 = self.state1\n",
    "        for word in tf.unstack(x, axis=1): # word: [b, 100]\n",
    "            out0, state0 = self.rnn_cell0(word, state0, training)\n",
    "            out1, state1 = self.rnn_cell1(out0, state1, training)\n",
    "        # 末层最后一个输出作为分类网络的输入: [b, 64] => [b, 1]\n",
    "        x = self.outlayer(out1)\n",
    "        # 通过激活函数， p(y is pos|x)\n",
    "        prob = tf.sigmoid(x)\n",
    "        return prob  \n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 55,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": [
       "<BatchDataset shapes: ((128, 80), (128,)), types: (tf.int32, tf.int64)>"
      ]
     },
     "metadata": {},
     "execution_count": 55
    }
   ],
   "source": [
    "db_train"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 68,
   "metadata": {},
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "Epoch 1/20\n",
      "195/195 [==============================] - 27s 94ms/step - loss: 0.7060 - accuracy: 0.5075 - val_loss: 0.6764 - val_accuracy: 0.5794\n",
      "Epoch 2/20\n",
      "195/195 [==============================] - 17s 88ms/step - loss: 0.6505 - accuracy: 0.6158 - val_loss: 0.4393 - val_accuracy: 0.7995\n",
      "Epoch 3/20\n",
      "195/195 [==============================] - 16s 80ms/step - loss: 0.3844 - accuracy: 0.8337 - val_loss: 0.4399 - val_accuracy: 0.8241\n",
      "Epoch 4/20\n",
      "195/195 [==============================] - 16s 80ms/step - loss: 0.2666 - accuracy: 0.8930 - val_loss: 0.5058 - val_accuracy: 0.8143\n",
      "Epoch 5/20\n",
      "195/195 [==============================] - 17s 86ms/step - loss: 0.1825 - accuracy: 0.9303 - val_loss: 0.5693 - val_accuracy: 0.8084\n",
      "Epoch 6/20\n",
      "195/195 [==============================] - 15s 72ms/step - loss: 0.1295 - accuracy: 0.9524 - val_loss: 0.6392 - val_accuracy: 0.8143\n",
      "Epoch 7/20\n",
      "195/195 [==============================] - 12s 62ms/step - loss: 0.1028 - accuracy: 0.9642 - val_loss: 0.7337 - val_accuracy: 0.8051\n",
      "Epoch 8/20\n",
      "195/195 [==============================] - 12s 63ms/step - loss: 0.0837 - accuracy: 0.9693 - val_loss: 0.7367 - val_accuracy: 0.8029\n",
      "Epoch 9/20\n",
      "195/195 [==============================] - 12s 64ms/step - loss: 0.0628 - accuracy: 0.9780 - val_loss: 0.7445 - val_accuracy: 0.8094\n",
      "Epoch 10/20\n",
      "195/195 [==============================] - 12s 63ms/step - loss: 0.0563 - accuracy: 0.9813 - val_loss: 0.8108 - val_accuracy: 0.8006\n",
      "Epoch 11/20\n",
      "195/195 [==============================] - 13s 66ms/step - loss: 0.0475 - accuracy: 0.9833 - val_loss: 0.8481 - val_accuracy: 0.7808\n",
      "Epoch 12/20\n",
      "195/195 [==============================] - 12s 64ms/step - loss: 0.0471 - accuracy: 0.9826 - val_loss: 0.8664 - val_accuracy: 0.7766\n",
      "Epoch 13/20\n",
      "195/195 [==============================] - 13s 65ms/step - loss: 0.0761 - accuracy: 0.9722 - val_loss: 0.9647 - val_accuracy: 0.8088\n",
      "Epoch 14/20\n",
      "195/195 [==============================] - 13s 65ms/step - loss: 0.0561 - accuracy: 0.9788 - val_loss: 0.8470 - val_accuracy: 0.8000\n",
      "Epoch 15/20\n",
      "195/195 [==============================] - 18s 90ms/step - loss: 0.0378 - accuracy: 0.9874 - val_loss: 0.9212 - val_accuracy: 0.8126\n",
      "Epoch 16/20\n",
      "195/195 [==============================] - 13s 68ms/step - loss: 0.0360 - accuracy: 0.9870 - val_loss: 0.9595 - val_accuracy: 0.8111\n",
      "Epoch 17/20\n",
      "195/195 [==============================] - 15s 79ms/step - loss: 0.0806 - accuracy: 0.9720 - val_loss: 0.8537 - val_accuracy: 0.7856\n",
      "Epoch 18/20\n",
      "195/195 [==============================] - 17s 88ms/step - loss: 0.0449 - accuracy: 0.9839 - val_loss: 0.8907 - val_accuracy: 0.8029\n",
      "Epoch 19/20\n",
      "195/195 [==============================] - 14s 72ms/step - loss: 0.0415 - accuracy: 0.9862 - val_loss: 0.9156 - val_accuracy: 0.7967\n",
      "Epoch 20/20\n",
      "195/195 [==============================] - 16s 83ms/step - loss: 0.0301 - accuracy: 0.9891 - val_loss: 0.9275 - val_accuracy: 0.8081\n",
      "195/195 [==============================] - 3s 16ms/step - loss: 0.9275 - accuracy: 0.8081\n"
     ]
    },
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": [
       "[0.9274718165397644, 0.8080528974533081]"
      ]
     },
     "metadata": {},
     "execution_count": 68
    }
   ],
   "source": [
    "units = 64 # RNN 状态向量长度 n\n",
    "epochs = 20 # 训练 epochs\n",
    "model = MyRNN(units) # 创建模型\n",
    "# 装配\n",
    "model.compile(optimizer = optimizers.Adam(0.001),\n",
    "loss = losses.BinaryCrossentropy(),\n",
    "metrics=['accuracy'])\n",
    "# 训练和验证\n",
    "model.fit(db_train, epochs=epochs, validation_data=db_test)\n",
    "# 测试\n",
    "model.evaluate(db_test)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 77,
   "metadata": {},
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "Help on method compile in module tensorflow.python.keras.engine.training:\n\ncompile(optimizer='rmsprop', loss=None, metrics=None, loss_weights=None, weighted_metrics=None, run_eagerly=None, steps_per_execution=None, **kwargs) method of __main__.MyRNN instance\n    Configures the model for training.\n    \n    Arguments:\n        optimizer: String (name of optimizer) or optimizer instance. See\n          `tf.keras.optimizers`.\n        loss: String (name of objective function), objective function or\n          `tf.keras.losses.Loss` instance. See `tf.keras.losses`. An objective\n          function is any callable with the signature `loss = fn(y_true,\n          y_pred)`, where y_true = ground truth values with shape =\n          `[batch_size, d0, .. dN]`, except sparse loss functions such as sparse\n          categorical crossentropy where shape = `[batch_size, d0, .. dN-1]`.\n          y_pred = predicted values with shape = `[batch_size, d0, .. dN]`. It\n          returns a weighted loss float tensor. If a custom `Loss` instance is\n          used and reduction is set to NONE, return value has the shape\n          [batch_size, d0, .. dN-1] ie. per-sample or per-timestep loss values;\n          otherwise, it is a scalar. If the model has multiple outputs, you can\n          use a different loss on each output by passing a dictionary or a list\n          of losses. The loss value that will be minimized by the model will\n          then be the sum of all individual losses.\n        metrics: List of metrics to be evaluated by the model during training\n          and testing. Each of this can be a string (name of a built-in\n          function), function or a `tf.keras.metrics.Metric` instance. See\n          `tf.keras.metrics`. Typically you will use `metrics=['accuracy']`. A\n          function is any callable with the signature `result = fn(y_true,\n          y_pred)`. To specify different metrics for different outputs of a\n          multi-output model, you could also pass a dictionary, such as\n            `metrics={'output_a': 'accuracy', 'output_b': ['accuracy', 'mse']}`.\n              You can also pass a list (len = len(outputs)) of lists of metrics\n              such as `metrics=[['accuracy'], ['accuracy', 'mse']]` or\n              `metrics=['accuracy', ['accuracy', 'mse']]`. When you pass the\n              strings 'accuracy' or 'acc', we convert this to one of\n              `tf.keras.metrics.BinaryAccuracy`,\n              `tf.keras.metrics.CategoricalAccuracy`,\n              `tf.keras.metrics.SparseCategoricalAccuracy` based on the loss\n              function used and the model output shape. We do a similar\n              conversion for the strings 'crossentropy' and 'ce' as well.\n        loss_weights: Optional list or dictionary specifying scalar coefficients\n          (Python floats) to weight the loss contributions of different model\n          outputs. The loss value that will be minimized by the model will then\n          be the *weighted sum* of all individual losses, weighted by the\n          `loss_weights` coefficients.\n            If a list, it is expected to have a 1:1 mapping to the model's\n              outputs. If a dict, it is expected to map output names (strings)\n              to scalar coefficients.\n        weighted_metrics: List of metrics to be evaluated and weighted by\n          sample_weight or class_weight during training and testing.\n        run_eagerly: Bool. Defaults to `False`. If `True`, this `Model`'s\n          logic will not be wrapped in a `tf.function`. Recommended to leave\n          this as `None` unless your `Model` cannot be run inside a\n          `tf.function`.\n        steps_per_execution: Int. Defaults to 1. The number of batches to\n          run during each `tf.function` call. Running multiple batches\n          inside a single `tf.function` call can greatly improve performance\n          on TPUs or small models with a large Python overhead.\n          At most, one full epoch will be run each\n          execution. If a number larger than the size of the epoch is passed,\n          the execution will be truncated to the size of the epoch.\n          Note that if `steps_per_execution` is set to `N`,\n          `Callback.on_batch_begin` and `Callback.on_batch_end` methods\n          will only be called every `N` batches\n          (i.e. before/after each `tf.function` execution).\n        **kwargs: Arguments supported for backwards compatibility only.\n    \n    Raises:\n        ValueError: In case of invalid arguments for\n            `optimizer`, `loss` or `metrics`.\n\n"
     ]
    }
   ],
   "source": [
    "help(model.compile)"
   ]
  }
 ]
}