{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-05-07T06:03:08.784259Z",
     "start_time": "2020-05-07T06:03:08.777541Z"
    },
    "lines_to_next_cell": 2
   },
   "outputs": [],
   "source": [
    "import copy\n",
    "import json\n",
    "import math\n",
    "import six\n",
    "import tensorflow as tf"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-05-07T05:58:45.251364Z",
     "start_time": "2020-05-07T05:58:45.203526Z"
    },
    "code_folding": [
     0
    ]
   },
   "outputs": [],
   "source": [
    "from official.modeling import tf_utils"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# BERT模型\n",
    "`BERT`模型基于`Transformer`的编码器，由12层或更多的`EncoderLayer`组成，如下图所示，其中输入数据在模型中的维度变化如图右所示：\n",
    "<img src=\"../images/bert结构.png\" width=\"80%\">"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-05-07T08:31:38.165389Z",
     "start_time": "2020-05-07T08:31:37.390440Z"
    }
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Calling BertTokenizer.from_pretrained() with the path to a single file or url is deprecated\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "110106428\n"
     ]
    }
   ],
   "source": [
    "# huggingface 的 bert 模型\n",
    "from transformers import BertTokenizer, TFBertForPreTraining\n",
    "\n",
    "tokenizer = BertTokenizer.from_pretrained(\n",
    "    '../models/bert/vocabulary.txt',  # 从保存有词汇表的本地文件载入\n",
    "    do_lower_case=True)\n",
    "\n",
    "model = TFBertForPreTraining.from_pretrained(\n",
    "    \"../../H/models/huggingface/bert-base-uncased/\")\n",
    "\n",
    "# 模型所有的参数，以 list 的形式\n",
    "params = model.weights"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-05-07T08:31:41.499831Z",
     "start_time": "2020-05-07T08:31:41.487456Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model: \"tf_bert_for_pre_training_1\"\n",
      "_________________________________________________________________\n",
      "Layer (type)                 Output Shape              Param #   \n",
      "=================================================================\n",
      "bert (TFBertMainLayer)       multiple                  109482240 \n",
      "_________________________________________________________________\n",
      "nsp___cls (TFBertNSPHead)    multiple                  1538      \n",
      "_________________________________________________________________\n",
      "mlm___cls (TFBertMLMHead)    multiple                  24459834  \n",
      "=================================================================\n",
      "Total params: 110,106,428\n",
      "Trainable params: 110,106,428\n",
      "Non-trainable params: 0\n",
      "_________________________________________________________________\n"
     ]
    }
   ],
   "source": [
    "model.summary()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 模型配置\n",
    "创建模型时需要指定的参数：\n",
    "- `vocab_size`, 词汇表的大小，用于词嵌入矩阵\n",
    "- `hidden_size=768`, 编码层的尺寸\n",
    "- `num_hidden_layers=12`, 编码层的层数\n",
    "- `num_attention_heads=12`, 多头注意力的头数\n",
    "- `intermediate_size=3072`, 编码器中前向层的尺寸\n",
    "- `hidden_act=\"gelu\"`, 编码器中激活函数\n",
    "- `hidden_dropout_prob=0.1`,编码器中全连接层的 dropout\n",
    "- `attention_probs_dropout_prob=0.1`, 注意力中的 dropout\n",
    "- `max_position_embeddings=512`, 最大位置数\n",
    "- `type_vocab_size=16`，文本为两个句子时的单词属于哪个句子"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-05-07T05:59:03.849003Z",
     "start_time": "2020-05-07T05:59:03.840475Z"
    }
   },
   "outputs": [],
   "source": [
    "class BertConfig(object):\n",
    "    def __init__(self, vocab_size, hidden_size=768, num_hidden_layers=12,\n",
    "                 num_attention_heads=12, intermediate_size=3072,\n",
    "                 hidden_act=\"gelu\", hidden_dropout_prob=0.1,\n",
    "                 attention_probs_dropout_prob=0.1, max_position_embeddings=512,\n",
    "                 type_vocab_size=16, initializer_range=0.02,\n",
    "                 backward_compatible=True):\n",
    "        self.vocab_size = vocab_size\n",
    "        self.hidden_size = hidden_size\n",
    "        self.num_hidden_layers = num_hidden_layers\n",
    "        self.num_attention_heads = num_attention_heads\n",
    "        self.hidden_act = hidden_act\n",
    "        self.intermediate_size = intermediate_size\n",
    "        self.hidden_dropout_prob = hidden_dropout_prob\n",
    "        self.attention_probs_dropout_prob = attention_probs_dropout_prob\n",
    "        self.max_position_embeddings = max_position_embeddings\n",
    "        self.type_vocab_size = type_vocab_size\n",
    "        self.initializer_range = initializer_range\n",
    "        self.backward_compatible = backward_compatible\n",
    "    \n",
    "    @classmethod\n",
    "    def from_dict(cls, json_object):\n",
    "        \"\"\"Constructs a `BertConfig` from a Python dictionary of parameters.\"\"\"\n",
    "        config = BertConfig(vocab_size=None)\n",
    "        for (key, value) in six.iteritems(json_object):\n",
    "            config.__dict__[key] = value\n",
    "        return config\n",
    "    \n",
    "    @classmethod\n",
    "    def from_json_file(cls, json_file):\n",
    "        \"\"\"Constructs a `BertConfig` from a json file of parameters.\"\"\"\n",
    "        with tf.io.gfile.GFile(json_file, \"r\") as reader:\n",
    "            text = reader.read()\n",
    "        return cls.from_dict(json.loads(text))\n",
    "    \n",
    "    def to_dict(self):\n",
    "        \"\"\"Serializes this instance to a Python dictionary.\"\"\"\n",
    "        output = copy.deepcopy(self.__dict__)\n",
    "        return output\n",
    "    \n",
    "    def to_json_string(self):\n",
    "        \"\"\"Serializes this instance to a JSON string.\"\"\"\n",
    "        return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\""
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 整体\n",
    "输入为：\n",
    "- 文本向量化后的 `input_ids`：每个`token`对应的id \n",
    "- 因为文本向量填充成同样的长度对应的 `mask`：填充的`token`标记为0，文本`token`标记为1\n",
    "- 文本为两个句子时对应的 `segment_ids`：上句对应的`token`，标记为0，下句的`token`标记为1\n",
    "  \n",
    "输出为：\n",
    "- 第一个 `token:[CLS]` 对应的输出  `pooled_output`\n",
    "- 每个 `token` 或最后一个 `token` 对应的输出 `sequence_output`"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-05-07T05:59:08.146510Z",
     "start_time": "2020-05-07T05:59:08.140850Z"
    }
   },
   "outputs": [],
   "source": [
    "def get_bert_model(input_word_ids, input_mask, input_type_ids, config=None,\n",
    "                   name=None, float_type=tf.float32):\n",
    "    \"\"\"Wraps the core BERT model as a keras.Model.\"\"\"\n",
    "    bert_model_layer = BertModel(config=config, float_type=float_type,\n",
    "                                 name=name)\n",
    "    pooled_output, sequence_output = bert_model_layer(input_word_ids,\n",
    "                                                      input_mask,\n",
    "                                                      input_type_ids)\n",
    "    bert_model = tf.keras.Model(\n",
    "        inputs=[input_word_ids, input_mask, input_type_ids],\n",
    "        outputs=[pooled_output, sequence_output])\n",
    "    return bert_model"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-05-07T05:23:14.594303Z",
     "start_time": "2020-05-07T05:23:14.588266Z"
    }
   },
   "source": [
    "## Bert框架\n",
    "\n",
    "- `embedding_lookup`，将输入单词序列转换成词嵌入向量，`[batch,seq_len]-->[batch,seq_len,hidden_size]`\n",
    "- `embedding_postprocessor`，将上一步的词嵌入矩阵，加上位置编码及句子编码\n",
    "- `encoder`，编码器，`[batch,seq_len,hidden_size]-->[batch,seq_len,hidden_size]`\n",
    "-` pooler_transform`，最终的输出层，"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-05-07T05:59:12.868495Z",
     "start_time": "2020-05-07T05:59:12.851701Z"
    }
   },
   "outputs": [],
   "source": [
    "class BertModel(tf.keras.layers.Layer):\n",
    "    \"\"\"BERT model (\"Bidirectional Encoder Representations from Transformers\").\n",
    "  \n",
    "    Example usage:\n",
    "  \n",
    "    ```python\n",
    "    # Already been converted into WordPiece token ids\n",
    "    input_word_ids = tf.constant([[31, 51, 99], [15, 5, 0]])\n",
    "    input_mask = tf.constant([[1, 1, 1], [1, 1, 0]])\n",
    "    input_type_ids = tf.constant([[0, 0, 1], [0, 2, 0]])\n",
    "  \n",
    "    config = modeling.BertConfig(vocab_size=32000, hidden_size=512,\n",
    "      num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)\n",
    "  \n",
    "    pooled_output, sequence_output = modeling.BertModel(config=config)(\n",
    "      input_word_ids=input_word_ids,\n",
    "      input_mask=input_mask,\n",
    "      input_type_ids=input_type_ids)\n",
    "    ...\n",
    "    ```\n",
    "    \"\"\"\n",
    "    \n",
    "    def __init__(self, config, float_type=tf.float32, **kwargs):\n",
    "        super(BertModel, self).__init__(**kwargs)\n",
    "        self.config = (BertConfig.from_dict(config) if isinstance(config,\n",
    "                                                                  dict) else copy.deepcopy(\n",
    "            config))\n",
    "        self.float_type = float_type\n",
    "    \n",
    "    def build(self, unused_input_shapes):\n",
    "        \"\"\"Implements build() for the layer.\"\"\"\n",
    "        self.embedding_lookup = EmbeddingLookup(\n",
    "            vocab_size=self.config.vocab_size,\n",
    "            embedding_size=self.config.hidden_size,\n",
    "            initializer_range=self.config.initializer_range, dtype=tf.float32,\n",
    "            name=\"word_embeddings\")\n",
    "        self.embedding_postprocessor = EmbeddingPostprocessor(\n",
    "            use_type_embeddings=True,\n",
    "            token_type_vocab_size=self.config.type_vocab_size,\n",
    "            use_position_embeddings=True,\n",
    "            max_position_embeddings=self.config.max_position_embeddings,\n",
    "            dropout_prob=self.config.hidden_dropout_prob,\n",
    "            initializer_range=self.config.initializer_range, dtype=tf.float32,\n",
    "            name=\"embedding_postprocessor\")\n",
    "        self.encoder = Transformer(\n",
    "            num_hidden_layers=self.config.num_hidden_layers,\n",
    "            hidden_size=self.config.hidden_size,\n",
    "            num_attention_heads=self.config.num_attention_heads,\n",
    "            intermediate_size=self.config.intermediate_size,\n",
    "            intermediate_activation=self.config.hidden_act,\n",
    "            hidden_dropout_prob=self.config.hidden_dropout_prob,\n",
    "            attention_probs_dropout_prob=self.config.attention_probs_dropout_prob,\n",
    "            initializer_range=self.config.initializer_range,\n",
    "            backward_compatible=self.config.backward_compatible,\n",
    "            float_type=self.float_type, name=\"encoder\")\n",
    "        self.pooler_transform = tf.keras.layers.Dense(\n",
    "            units=self.config.hidden_size, activation=\"tanh\",\n",
    "            kernel_initializer=get_initializer(self.config.initializer_range),\n",
    "            name=\"pooler_transform\")\n",
    "        super(BertModel, self).build(unused_input_shapes)\n",
    "    \n",
    "    def __call__(self, input_word_ids, input_mask=None, input_type_ids=None,\n",
    "                 **kwargs):\n",
    "        inputs = tf_utils.pack_inputs(\n",
    "            [input_word_ids, input_mask, input_type_ids])\n",
    "        return super(BertModel, self).__call__(inputs, **kwargs)\n",
    "    \n",
    "    def call(self, inputs, mode=\"bert\"):\n",
    "        \"\"\"Implements call() for the layer.\n",
    "    \n",
    "        Args:\n",
    "          inputs: packed input tensors.\n",
    "          mode: string, `bert` or `encoder`.\n",
    "        Returns:\n",
    "          Output tensor of the last layer for BERT training (mode=`bert`) which\n",
    "          is a float Tensor of shape [batch_size, seq_length, hidden_size] or\n",
    "          a list of output tensors for encoder usage (mode=`encoder`).\n",
    "        \"\"\"\n",
    "        unpacked_inputs = tf_utils.unpack_inputs(inputs)\n",
    "        input_word_ids = unpacked_inputs[0]\n",
    "        input_mask = unpacked_inputs[1]\n",
    "        input_type_ids = unpacked_inputs[2]\n",
    "        \n",
    "        # token ids --> token embeddings\n",
    "        word_embeddings = self.embedding_lookup(input_word_ids)\n",
    "        \n",
    "        # 单词分属两个两个句子时的编码，及单词在句子中位置的编码\n",
    "        embedding_tensor = self.embedding_postprocessor(\n",
    "            word_embeddings=word_embeddings, token_type_ids=input_type_ids)\n",
    "        if self.float_type == tf.float16:\n",
    "            embedding_tensor = tf.cast(embedding_tensor, tf.float16)\n",
    "            \n",
    "        # 创建遮挡，文本转换成等长向量时，填充的token在模型中不进行计算\n",
    "        attention_mask = None\n",
    "        if input_mask is not None:\n",
    "            attention_mask = create_attention_mask_from_input_mask(\n",
    "                input_word_ids, input_mask)\n",
    "        \n",
    "        if mode == \"encoder\":\n",
    "            return self.encoder(embedding_tensor, attention_mask,\n",
    "                return_all_layers=True)\n",
    "        \n",
    "        # 编码器输出\n",
    "        sequence_output = self.encoder(embedding_tensor, attention_mask)\n",
    "        \n",
    "        # 首个token的输出\n",
    "        first_token_tensor = tf.squeeze(sequence_output[:, 0:1, :], axis=1)\n",
    "        pooled_output = self.pooler_transform(first_token_tensor)\n",
    "        \n",
    "        return (pooled_output, sequence_output)\n",
    "    \n",
    "    def get_config(self):\n",
    "        config = {\"config\": self.config.to_dict()}\n",
    "        base_config = super(BertModel, self).get_config()\n",
    "        return dict(list(base_config.items()) + list(config.items()))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 输入处理\n",
    "![](../images/bert-input.png)\n",
    "输入由三部分组成：词嵌入、位置编码、句子编码"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 词嵌入"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class EmbeddingLookup(tf.keras.layers.Layer):\n",
    "    \"\"\"Looks up words embeddings for id tensor.\"\"\"\n",
    "    \n",
    "    def __init__(self, vocab_size, embedding_size=768, initializer_range=0.02,\n",
    "                 **kwargs):\n",
    "        super(EmbeddingLookup, self).__init__(**kwargs)\n",
    "        self.vocab_size = vocab_size\n",
    "        self.embedding_size = embedding_size\n",
    "        self.initializer_range = initializer_range\n",
    "    \n",
    "    def build(self, unused_input_shapes):\n",
    "        \"\"\"Implements build() for the layer.\"\"\"\n",
    "        self.embeddings = self.add_weight(\"embeddings\",\n",
    "            shape=[self.vocab_size, self.embedding_size],\n",
    "            initializer=get_initializer(self.initializer_range),\n",
    "            dtype=self.dtype)\n",
    "        super(EmbeddingLookup, self).build(unused_input_shapes)\n",
    "    \n",
    "    def call(self, inputs):\n",
    "        \"\"\"Implements call() for the layer.\"\"\"\n",
    "        input_shape = tf_utils.get_shape_list(inputs)\n",
    "        \n",
    "        # 将 betch,seq_len 的数据展平，便于计算\n",
    "        flat_input = tf.reshape(inputs, [-1])\n",
    "        output = tf.gather(self.embeddings, flat_input)\n",
    "        \n",
    "        # 再还原成 batch 数据\n",
    "        output = tf.reshape(output, input_shape + [self.embedding_size])\n",
    "        return output"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 词嵌入后处理\n",
    "- 每个单词对应的词向量 加上 该单词对应位置的位置编码\n",
    "- 如果 `use_type_embeddings=True`，就需要指定单词所属句子时的向量\n",
    "- 上述三者相加后，再对每个词向量正则化处理：\n",
    "```\n",
    "mean_i         = sum(x_i[j] for j in range(k)) / k\n",
    "var_i          = sum((x_i[j] - mean_i) ** 2 for j in range(k)) / k\n",
    "x_i_normalized = (x_i - mean_i) / sqrt(var_i + epsilon)\n",
    "output_i       = x_i_normalized * gamma + beta\n",
    "```\n",
    "    - gamma 和 beta 为待学习的参数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class EmbeddingPostprocessor(tf.keras.layers.Layer):\n",
    "    \"\"\"Performs various post-processing on a word embedding tensor.\"\"\"\n",
    "    \n",
    "    def __init__(self, use_type_embeddings=False, token_type_vocab_size=None,\n",
    "                 use_position_embeddings=True, max_position_embeddings=512,\n",
    "                 dropout_prob=0.0, initializer_range=0.02, initializer=None,\n",
    "                 **kwargs):\n",
    "        super(EmbeddingPostprocessor, self).__init__(**kwargs)\n",
    "        self.use_type_embeddings = use_type_embeddings\n",
    "        self.token_type_vocab_size = token_type_vocab_size\n",
    "        self.use_position_embeddings = use_position_embeddings\n",
    "        self.max_position_embeddings = max_position_embeddings\n",
    "        self.dropout_prob = dropout_prob\n",
    "        self.initializer_range = initializer_range\n",
    "        \n",
    "        if not initializer:\n",
    "            self.initializer = get_initializer(self.initializer_range)\n",
    "        else:\n",
    "            self.initializer = initializer\n",
    "        \n",
    "        if self.use_type_embeddings and not self.token_type_vocab_size:\n",
    "            raise ValueError(\"If `use_type_embeddings` is True, then \"\n",
    "                             \"`token_type_vocab_size` must be specified.\")\n",
    "    \n",
    "    def build(self, input_shapes):\n",
    "        \"\"\"Implements build() for the layer.\"\"\"\n",
    "        (word_embeddings_shape, _) = input_shapes\n",
    "        width = word_embeddings_shape.as_list()[-1]\n",
    "        self.type_embeddings = None\n",
    "        \n",
    "        # 单词分属不同的句子时，对应一个矩阵参数，token_type_vocab_size 为句子种类数\n",
    "        if self.use_type_embeddings:\n",
    "            self.type_embeddings = self.add_weight(\"type_embeddings\",\n",
    "                shape=[self.token_type_vocab_size, width],\n",
    "                initializer=get_initializer(self.initializer_range),\n",
    "                dtype=self.dtype)\n",
    "        \n",
    "        # 单词再句子中的位置，对应一个矩阵参数\n",
    "        self.position_embeddings = None\n",
    "        if self.use_position_embeddings:\n",
    "            self.position_embeddings = self.add_weight(\"position_embeddings\",\n",
    "                shape=[self.max_position_embeddings, width],\n",
    "                initializer=get_initializer(self.initializer_range),\n",
    "                dtype=self.dtype)\n",
    "        \n",
    "        # 归一化\n",
    "        self.output_layer_norm = tf.keras.layers.LayerNormalization(\n",
    "            name=\"layer_norm\", axis=-1, epsilon=1e-12, dtype=tf.float32)\n",
    "        self.output_dropout = tf.keras.layers.Dropout(rate=self.dropout_prob,\n",
    "                                                      dtype=tf.float32)\n",
    "        super(EmbeddingPostprocessor, self).build(input_shapes)\n",
    "    \n",
    "    def __call__(self, word_embeddings, token_type_ids=None, **kwargs):\n",
    "        inputs = tf_utils.pack_inputs([word_embeddings, token_type_ids])\n",
    "        return super(EmbeddingPostprocessor, self).__call__(inputs, **kwargs)\n",
    "    \n",
    "    def call(self, inputs):\n",
    "        \"\"\"Implements call() for the layer.\"\"\"\n",
    "        # 输入数据为 (word_embeddings,token_type_ids) 元组\n",
    "        unpacked_inputs = tf_utils.unpack_inputs(inputs)\n",
    "        word_embeddings = unpacked_inputs[0]\n",
    "        token_type_ids = unpacked_inputs[1]\n",
    "        input_shape = tf_utils.get_shape_list(word_embeddings, expected_rank=3)\n",
    "        \n",
    "        # 词向量: batch_size, seq_len, width\n",
    "        batch_size = input_shape[0]\n",
    "        seq_length = input_shape[1]\n",
    "        width = input_shape[2]\n",
    "        \n",
    "        output = word_embeddings\n",
    "        \n",
    "        # 加上单词分属不同句子时的编码\n",
    "        if self.use_type_embeddings:\n",
    "            flat_token_type_ids = tf.reshape(token_type_ids, [-1])\n",
    "            token_type_embeddings = tf.gather(self.type_embeddings,\n",
    "                                              flat_token_type_ids)\n",
    "            token_type_embeddings = tf.reshape(token_type_embeddings,\n",
    "                                               [batch_size, seq_length, width])\n",
    "            output += token_type_embeddings\n",
    "        \n",
    "        # 加上单词的位置编码\n",
    "        if self.use_position_embeddings:\n",
    "            position_embeddings = tf.expand_dims(\n",
    "                tf.slice(self.position_embeddings, [0, 0], [seq_length, width]),\n",
    "                axis=0)\n",
    "            \n",
    "            output += position_embeddings\n",
    "        \n",
    "        # 归一化\n",
    "        output = self.output_layer_norm(output)\n",
    "        \n",
    "        # dropout 处理\n",
    "        output = self.output_dropout(output)\n",
    "        \n",
    "        return output"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## attention mask"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 创建 attention_mask\n",
    "def create_attention_mask_from_input_mask(from_tensor, to_mask):\n",
    "    \"\"\"Create 3D attention mask from a 2D tensor mask.\n",
    "  \n",
    "    Args:\n",
    "      from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].\n",
    "      to_mask: int32 Tensor of shape [batch_size, to_seq_length].\n",
    "  \n",
    "    Returns:\n",
    "      float Tensor of shape [batch_size, from_seq_length, to_seq_length].\n",
    "    \"\"\"\n",
    "    from_shape = tf_utils.get_shape_list(from_tensor, expected_rank=[2, 3])\n",
    "    batch_size = from_shape[0]\n",
    "    from_seq_length = from_shape[1]\n",
    "\n",
    "    to_shape = tf_utils.get_shape_list(to_mask, expected_rank=2)\n",
    "    to_seq_length = to_shape[1]\n",
    "\n",
    "    to_mask = tf.cast(tf.reshape(to_mask, [batch_size, 1, to_seq_length]),\n",
    "                      dtype=from_tensor.dtype)\n",
    "\n",
    "    # We don't assume that `from_tensor` is a mask (although it could be). We\n",
    "    # don't actually care if we attend *from* padding tokens (only *to* padding)\n",
    "    # tokens so we create a tensor of all ones.\n",
    "    #\n",
    "    # `broadcast_ones` = [batch_size, from_seq_length, 1]\n",
    "    broadcast_ones = tf.ones(shape=[batch_size, from_seq_length, 1],\n",
    "                             dtype=from_tensor.dtype)\n",
    "\n",
    "    # Here we broadcast along two dimensions to create the mask.\n",
    "    mask = broadcast_ones * to_mask\n",
    "\n",
    "    return mask"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 参数量"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-05-07T08:57:57.530419Z",
     "start_time": "2020-05-07T08:57:57.523902Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "======输入参数及预处理=============\n",
      "word_embeddings/weight:0 (30522, 768) True\n",
      "position_embeddings/embeddings:0 (512, 768) True\n",
      "token_type_embeddings/embeddings:0 (2, 768) True\n",
      "LayerNorm/gamma:0 (768,) True\n",
      "LayerNorm/beta:0 (768,) True\n"
     ]
    }
   ],
   "source": [
    "print(\"======输入参数及预处理=============\")\n",
    "for param in params[:5]:\n",
    "    print('/'.join(param.name.split('/')[-2:]), param.shape, param.trainable)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-05-07T08:55:11.545249Z",
     "start_time": "2020-05-07T08:55:11.431498Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<tf.Variable 'tf_bert_for_pre_training_1/bert/embeddings/LayerNorm/gamma:0' shape=(768,) dtype=float32, numpy=\n",
       "array([0.9260566 , 0.8851115 , 0.85807985, 0.8616906 , 0.8937205 ,\n",
       "       0.8969075 , 0.9296931 , 0.9137383 , 0.93712723, 0.80840456,\n",
       "       0.7991786 , 0.8071278 , 0.9030929 , 0.8197847 , 0.9100005 ,\n",
       "       0.8493103 , 0.81518257, 0.8612902 , 0.9141819 , 0.86518496,\n",
       "       0.92338425, 0.86718833, 0.9007915 , 0.86840504, 0.84404385,\n",
       "       0.8990014 , 0.78913987, 0.92746514, 0.85011405, 0.8412803 ,\n",
       "       0.91792315, 0.8641226 , 0.9185111 , 0.9656955 , 0.8861148 ,\n",
       "       0.87096494, 0.9102782 , 0.873895  , 0.91330135, 0.8879689 ,\n",
       "       0.91300935, 0.93739486, 0.8823456 , 0.8621854 , 0.8811713 ,\n",
       "       0.8707897 , 0.8569555 , 0.94453305, 0.9162866 , 0.9356308 ,\n",
       "       0.9264842 , 0.85043895, 0.9299852 , 0.34471667, 0.86500484,\n",
       "       0.81973606, 0.8722222 , 0.85660684, 0.8939192 , 0.80511427,\n",
       "       0.9006626 , 0.8482888 , 0.38700742, 0.8888622 , 0.89233655,\n",
       "       0.87715036, 0.89628065, 0.9548264 , 0.8944359 , 0.8945792 ,\n",
       "       0.94709164, 0.9488946 , 0.93487215, 0.7813892 , 0.9254878 ,\n",
       "       0.7942986 , 0.8806103 , 0.38572693, 0.78999954, 0.84777164,\n",
       "       0.8886096 , 0.92151225, 0.92916363, 0.8989887 , 0.7790041 ,\n",
       "       0.82554054, 0.87168235, 0.8777793 , 0.9020569 , 0.9190425 ,\n",
       "       0.86048114, 0.8761628 , 0.7084113 , 0.8598885 , 0.89807343,\n",
       "       0.8091747 , 0.40208572, 0.7916741 , 0.8922608 , 0.9117843 ,\n",
       "       0.9458537 , 0.9489009 , 0.8743955 , 0.8402207 , 0.8030819 ,\n",
       "       0.29234427, 0.93144053, 0.9064982 , 0.8851589 , 0.81150734,\n",
       "       0.9090044 , 0.89483654, 0.9023836 , 0.91465044, 0.89942664,\n",
       "       0.87086827, 0.9005642 , 0.90315676, 0.89452034, 0.8992497 ,\n",
       "       0.867412  , 0.15099949, 0.8701771 , 0.88595575, 0.64526594,\n",
       "       0.85540724, 0.8652409 , 0.79231924, 0.89505863, 0.83703536,\n",
       "       0.9121292 , 0.886018  , 0.401027  , 0.88356113, 0.8962519 ,\n",
       "       0.9265354 , 0.8997337 , 0.83378536, 0.9042948 , 0.42124978,\n",
       "       0.9173842 , 0.9093346 , 0.92141825, 0.90985024, 0.9185157 ,\n",
       "       0.18774867, 0.9138334 , 0.8465666 , 0.8435096 , 0.8339577 ,\n",
       "       0.8081294 , 0.9064035 , 0.73182   , 0.8841658 , 0.9488698 ,\n",
       "       0.9095927 , 0.9133144 , 0.83684975, 0.91724795, 0.12419887,\n",
       "       0.9089825 , 0.91277605, 0.84425056, 0.98305887, 0.91454303,\n",
       "       0.91756535, 0.8640871 , 0.8422351 , 0.38466504, 0.8948746 ,\n",
       "       0.9380962 , 0.89403415, 0.82847625, 0.94385993, 0.9317705 ,\n",
       "       0.1185294 , 0.8464395 , 0.84179324, 0.87033844, 0.86234915,\n",
       "       0.8935847 , 0.93493855, 0.8706666 , 0.9317667 , 0.9361691 ,\n",
       "       0.91714686, 0.9230645 , 0.8718406 , 0.93313015, 0.84092784,\n",
       "       0.88140565, 0.8904832 , 0.8714436 , 0.8592164 , 0.877544  ,\n",
       "       0.88654906, 0.91471326, 0.9134799 , 0.79894114, 0.80810976,\n",
       "       0.8708406 , 0.9013753 , 0.27419195, 0.8637597 , 0.86090654,\n",
       "       0.8354063 , 0.9148476 , 0.846354  , 0.93758523, 0.86974245,\n",
       "       0.855309  , 0.95012665, 0.83799297, 0.90748125, 0.87598044,\n",
       "       0.26170397, 0.12636614, 0.9096406 , 0.5226113 , 0.85526776,\n",
       "       0.92894036, 0.8843869 , 0.94361556, 0.8767759 , 0.8212878 ,\n",
       "       0.35094735, 0.83547956, 0.7795069 , 0.92877126, 0.89614105,\n",
       "       0.874064  , 0.8784776 , 0.8788857 , 0.90821713, 0.9163048 ,\n",
       "       0.8848732 , 0.8641444 , 0.8015474 , 0.9238662 , 0.79528356,\n",
       "       0.83801305, 0.8893795 , 0.9044388 , 0.86459774, 0.8414454 ,\n",
       "       0.3916431 , 0.84050643, 0.8416263 , 0.85926163, 0.48643973,\n",
       "       0.89124477, 0.8795802 , 0.87387836, 0.8618943 , 0.91015565,\n",
       "       0.86088246, 0.84509903, 0.87018776, 0.85425574, 0.87294364,\n",
       "       0.93329674, 0.88294226, 0.9353181 , 0.8981494 , 0.7935064 ,\n",
       "       0.85893804, 0.90796405, 0.8969525 , 0.9056606 , 0.9113772 ,\n",
       "       0.871921  , 0.89276415, 0.86300933, 0.86208177, 0.9334718 ,\n",
       "       0.93724394, 0.83230907, 0.85410315, 0.8904335 , 0.9081434 ,\n",
       "       0.8410402 , 0.90203875, 0.92853105, 0.3199371 , 0.8969159 ,\n",
       "       0.8276323 , 0.8606241 , 0.9147349 , 0.85958403, 0.93085396,\n",
       "       0.8497338 , 0.4076764 , 0.85892564, 0.8980277 , 0.8604694 ,\n",
       "       0.8013012 , 0.34660855, 0.91647774, 0.8923925 , 0.8897576 ,\n",
       "       0.8680035 , 0.9267826 , 0.9153016 , 0.89703697, 0.7475861 ,\n",
       "       0.9126237 , 0.85415   , 0.8793676 , 0.8637017 , 0.88988066,\n",
       "       0.8577796 , 0.3876985 , 0.8875742 , 0.88868934, 0.9539487 ,\n",
       "       0.8464176 , 0.92198163, 0.9094117 , 0.8906177 , 0.44273287,\n",
       "       0.89336216, 0.86392045, 0.8825681 , 0.8951436 , 0.9021225 ,\n",
       "       0.9081227 , 0.8710621 , 0.9088694 , 0.90027475, 0.8644249 ,\n",
       "       0.8242602 , 0.41708273, 0.8461327 , 0.9071259 , 0.8809113 ,\n",
       "       0.8439526 , 0.8420253 , 0.930305  , 0.90434605, 0.9185034 ,\n",
       "       0.7761407 , 0.8800631 , 0.8776769 , 0.8956282 , 0.8651067 ,\n",
       "       0.37409583, 0.9124579 , 0.82786006, 0.88625747, 0.95682067,\n",
       "       0.91303474, 0.93084234, 0.9036759 , 0.28337437, 0.89667785,\n",
       "       0.90629894, 0.88146603, 0.8594908 , 0.88622   , 0.85950845,\n",
       "       0.90782285, 0.8375815 , 0.8270193 , 0.83298904, 0.9076985 ,\n",
       "       0.8695242 , 0.87462884, 0.8756038 , 0.89099956, 0.8325928 ,\n",
       "       0.8959902 , 0.82798654, 0.92926496, 0.84385616, 0.8842619 ,\n",
       "       0.97470844, 0.9028642 , 0.8918419 , 0.83396786, 0.92282045,\n",
       "       0.87626326, 0.5710762 , 0.8947103 , 0.91752934, 0.89111555,\n",
       "       0.8976591 , 0.8908455 , 0.9205152 , 0.90374535, 0.8681316 ,\n",
       "       0.81718713, 0.8670587 , 0.8722043 , 0.8607442 , 0.91174054,\n",
       "       0.88865304, 0.8645432 , 0.8426859 , 0.8681325 , 0.92559934,\n",
       "       0.8529574 , 0.6331255 , 0.85653865, 0.8803129 , 0.79488313,\n",
       "       0.88872886, 0.91123855, 0.88170385, 0.8718456 , 0.930356  ,\n",
       "       0.9083776 , 0.874565  , 0.93609285, 0.88446087, 0.88723963,\n",
       "       0.8586721 , 0.87977093, 0.872544  , 0.8966105 , 0.32442543,\n",
       "       0.8557221 , 0.78901726, 0.88968027, 0.84193355, 0.8508675 ,\n",
       "       0.85809606, 0.8827611 , 0.92155766, 0.901737  , 0.77066356,\n",
       "       0.9107565 , 0.80981594, 0.9066029 , 0.81776696, 0.9126179 ,\n",
       "       0.95096415, 0.88318485, 0.8150854 , 0.87245554, 0.8794705 ,\n",
       "       0.89965457, 0.8966909 , 0.86489487, 0.37507328, 0.8935021 ,\n",
       "       0.7731586 , 0.93679166, 0.8688368 , 0.8895556 , 0.88159966,\n",
       "       0.8823327 , 0.9228342 , 0.89567864, 0.90600795, 0.92713505,\n",
       "       0.94189936, 0.8765977 , 0.8950364 , 0.8504107 , 0.89285034,\n",
       "       0.8664158 , 0.36027777, 0.9115588 , 0.91172   , 0.86185426,\n",
       "       0.8387775 , 0.847822  , 0.80329585, 0.83494294, 0.85583216,\n",
       "       0.89093864, 0.9124519 , 0.8739958 , 0.89530367, 0.8341385 ,\n",
       "       0.8762595 , 0.84939957, 0.88968134, 0.8817865 , 0.8597422 ,\n",
       "       0.916143  , 0.87331843, 0.35750112, 0.91283137, 0.8563415 ,\n",
       "       0.8596366 , 0.8857348 , 0.8926817 , 0.86825037, 0.89289725,\n",
       "       0.9099532 , 0.85813475, 0.8915597 , 0.90470254, 0.93350255,\n",
       "       0.89361185, 0.3575774 , 0.8567318 , 0.9137295 , 0.8022014 ,\n",
       "       0.8484341 , 0.88654727, 0.85386884, 0.8017783 , 0.7963872 ,\n",
       "       0.87032807, 0.8703493 , 0.87147284, 0.9089967 , 0.9052607 ,\n",
       "       0.7068387 , 0.8063977 , 0.89014715, 0.8739348 , 0.8705156 ,\n",
       "       0.8709645 , 0.8931937 , 0.34227282, 0.891124  , 0.87587005,\n",
       "       0.91158056, 0.83055013, 0.92386436, 0.8674634 , 0.8873916 ,\n",
       "       0.9189294 , 0.88050306, 0.923116  , 0.89377266, 0.08930355,\n",
       "       0.80464107, 0.805246  , 0.9390387 , 0.91317844, 0.89413285,\n",
       "       0.8958143 , 0.9140189 , 0.86672854, 0.854776  , 0.83868825,\n",
       "       0.8630073 , 0.15312779, 0.9126465 , 0.8282655 , 0.9089906 ,\n",
       "       0.36096454, 0.895338  , 0.87417865, 0.8376608 , 0.91495806,\n",
       "       0.90085113, 0.83824533, 0.9281552 , 0.9210454 , 0.94156283,\n",
       "       0.92984235, 0.8366345 , 0.8119768 , 0.89949995, 0.91619384,\n",
       "       0.9217038 , 0.87148285, 0.856305  , 0.9000319 , 0.84717023,\n",
       "       0.89832586, 0.8892268 , 0.88231486, 0.8943245 , 0.9087766 ,\n",
       "       0.8511314 , 0.90410316, 0.87121344, 0.6926475 , 0.9087988 ,\n",
       "       0.8996104 , 0.89785665, 0.3114806 , 0.9052158 , 0.8957853 ,\n",
       "       0.8546862 , 0.9020623 , 0.9064642 , 0.86115766, 0.907965  ,\n",
       "       0.85588765, 0.91148484, 0.87404114, 0.8123533 , 0.9028607 ,\n",
       "       0.90696883, 0.89752287, 0.92848325, 0.8457143 , 0.8160455 ,\n",
       "       0.81963074, 0.89041114, 0.86125386, 0.92821175, 0.88534766,\n",
       "       0.8925787 , 0.93720835, 0.91011757, 0.84036714, 0.8815988 ,\n",
       "       0.84218264, 0.9576188 , 0.83140427, 0.9008031 , 0.8606466 ,\n",
       "       0.9050308 , 0.8171841 , 0.9053481 , 0.80682796, 0.3706334 ,\n",
       "       0.87583303, 0.8679653 , 0.9558147 , 0.8983481 , 0.8546272 ,\n",
       "       0.84576684, 0.89256775, 0.9332979 , 0.86570567, 0.9089619 ,\n",
       "       0.8623334 , 0.89365476, 0.8931066 , 0.8623956 , 0.8948079 ,\n",
       "       0.84665275, 0.8905788 , 0.8557842 , 0.89291424, 0.90498143,\n",
       "       0.91523993, 0.9014969 , 0.9237611 , 0.829541  , 0.9300411 ,\n",
       "       0.8608682 , 0.84562397, 0.895178  , 0.8211924 , 0.9222569 ,\n",
       "       0.8452472 , 0.86415845, 0.8961134 , 0.87351227, 0.8625414 ,\n",
       "       0.9423521 , 0.9192055 , 0.8447117 , 0.86605847, 0.17414762,\n",
       "       0.8413308 , 0.81279856, 0.89001554, 0.9181191 , 0.88312185,\n",
       "       0.8918923 , 0.92285645, 0.90218145, 0.8878144 , 0.9240814 ,\n",
       "       0.8676207 , 0.9140367 , 0.8970822 , 0.8902197 , 0.85999465,\n",
       "       0.41311353, 0.87957317, 0.850072  , 0.83332455, 0.9113273 ,\n",
       "       0.90931803, 0.88811547, 0.88831574, 0.94464177, 0.8709082 ,\n",
       "       0.905707  , 0.9258063 , 0.83668745, 0.923908  , 0.94400054,\n",
       "       0.893105  , 0.8712581 , 0.9279269 , 0.9032569 , 0.91828257,\n",
       "       0.91040635, 0.87974507, 0.8417782 , 0.96570194, 0.8737701 ,\n",
       "       0.8632181 , 0.8565091 , 0.8197477 , 0.88923025, 0.31277785,\n",
       "       0.91984355, 0.9015648 , 0.8745782 , 0.82651246, 0.90615976,\n",
       "       0.83866596, 0.7941187 , 0.8617799 , 0.8993736 , 0.8789901 ,\n",
       "       0.8074849 , 0.8651469 , 0.79385865, 0.9074964 , 0.8526359 ,\n",
       "       0.9195547 , 0.8239075 , 0.8573685 , 0.90989727, 0.85294914,\n",
       "       0.84308237, 0.897531  , 0.9211007 , 0.94510597, 0.8932511 ,\n",
       "       0.90792114, 0.8607042 , 0.79292715, 0.8642741 , 0.8872637 ,\n",
       "       0.8861971 , 0.913413  , 0.9299183 , 0.75024897, 0.84738326,\n",
       "       0.86299825, 0.9157644 , 0.8374662 , 0.94563645, 0.81301785,\n",
       "       0.8500089 , 0.9144313 , 0.9410184 , 0.8897411 , 0.85874027,\n",
       "       0.89645886, 0.8334701 , 0.87103   , 0.8764112 , 0.89176965,\n",
       "       0.9459387 , 0.9260616 , 0.891444  , 0.88576716, 0.9025373 ,\n",
       "       0.8340893 , 0.8275253 , 0.9245579 , 0.8769424 , 0.7869215 ,\n",
       "       0.9063748 , 0.88050836, 0.8635482 , 0.90578836, 0.89222914,\n",
       "       0.7888876 , 0.8703116 , 0.9370887 ], dtype=float32)>"
      ]
     },
     "execution_count": 23,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# LayerNorm 中的 gamma 参数，对应词向量的维度，每个值介于 0~1 之间\n",
    "params[3]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-05-07T08:53:39.592187Z",
     "start_time": "2020-05-07T08:53:39.577142Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<tf.Variable 'tf_bert_for_pre_training_1/bert/embeddings/LayerNorm/beta:0' shape=(768,) dtype=float32, numpy=\n",
       "array([-2.59147063e-02, -1.95512995e-02,  2.42394563e-02,  8.90459269e-02,\n",
       "       -6.28105924e-02, -1.32586155e-02,  1.33676557e-02, -1.26008904e-02,\n",
       "        4.42890869e-03,  3.53448503e-02, -4.77544852e-02, -1.38501097e-02,\n",
       "        4.97434661e-03, -1.03460267e-01, -7.57153481e-02,  2.68115215e-02,\n",
       "       -2.06349045e-02, -4.36961651e-02, -5.63907251e-02, -8.65845978e-02,\n",
       "       -8.42638407e-03, -3.71674448e-02, -4.30996120e-02, -2.34397706e-02,\n",
       "        2.00961120e-02, -6.83468487e-03, -6.17916360e-02, -4.71808054e-02,\n",
       "        3.53978574e-03, -5.68182068e-03, -4.02727462e-02, -1.04445003e-01,\n",
       "       -3.08606084e-02, -4.73697111e-02, -5.13820536e-02,  5.25085628e-02,\n",
       "       -6.01501809e-03, -6.30762726e-02, -3.26319560e-02, -1.02944709e-02,\n",
       "       -2.61339303e-02,  3.29767801e-02,  2.35322025e-02, -3.78341340e-02,\n",
       "       -7.12791830e-02,  1.88004784e-02, -4.80748825e-02, -2.56555341e-02,\n",
       "       -1.05376057e-01,  4.46190359e-03, -2.86829676e-02, -5.92361577e-02,\n",
       "        1.23257134e-02,  1.25486761e-01, -5.69101050e-02, -7.56548420e-02,\n",
       "       -3.93328257e-02,  6.98591117e-03, -6.42135143e-02, -4.69061919e-02,\n",
       "       -2.11343355e-02, -4.01438288e-02, -7.74445087e-02, -5.10230027e-02,\n",
       "        2.99088638e-02,  2.66465787e-02, -4.11578938e-02, -9.22780335e-02,\n",
       "       -4.44925539e-02, -1.93255525e-02, -5.75817190e-02, -3.16675543e-03,\n",
       "        3.46963084e-03, -1.65544264e-02,  2.35265475e-02, -4.80383001e-02,\n",
       "        9.77970380e-03, -1.04416043e-01, -4.84367386e-02,  7.31969858e-03,\n",
       "       -1.97341107e-02, -4.20696139e-02, -7.46076331e-02, -8.53904814e-04,\n",
       "       -5.04036285e-02, -5.98558672e-02, -4.18946594e-02, -1.51765533e-02,\n",
       "       -6.51947930e-02,  1.39580853e-02, -5.30321859e-02, -2.67192498e-02,\n",
       "       -8.87626708e-02, -9.37363729e-02, -7.90804066e-03, -7.25183729e-03,\n",
       "       -6.76746666e-03,  1.19225895e-02, -1.07288710e-03,  5.00897877e-02,\n",
       "       -1.45441247e-02,  9.80450027e-03, -2.71886121e-02, -9.04692151e-03,\n",
       "       -2.61889342e-02, -5.12600467e-02,  6.20719139e-03, -4.59443871e-03,\n",
       "       -4.23969179e-02, -8.67994949e-02,  1.04734473e-01, -4.46505919e-02,\n",
       "       -1.20982470e-03,  4.89024557e-02, -6.63166344e-02, -6.59319460e-02,\n",
       "       -6.28694445e-02, -4.57712216e-03, -1.24088516e-02, -3.43277454e-02,\n",
       "       -3.11083179e-02,  6.15077019e-01, -5.72329834e-02, -3.16157527e-02,\n",
       "        3.66780832e-02, -1.23331556e-02, -4.17433567e-02,  7.66236708e-02,\n",
       "       -4.78936508e-02,  5.19876741e-03, -3.13039497e-02, -1.21015543e-03,\n",
       "        2.26243902e-02, -8.76605958e-02,  2.31059715e-02, -8.26322939e-03,\n",
       "       -3.99542740e-03, -2.66964789e-02, -2.74064485e-02,  1.29783466e-01,\n",
       "       -1.67593663e-03, -5.21626398e-02, -4.44818847e-02, -1.14191994e-02,\n",
       "       -7.65208974e-02,  6.31311655e-01, -2.79941596e-02,  1.86421350e-02,\n",
       "        1.44187156e-02,  1.12428693e-02, -1.57559812e-02, -3.28115630e-03,\n",
       "        7.84189627e-02, -2.13155635e-02, -1.77308954e-02, -1.71035584e-02,\n",
       "       -2.14106813e-02,  2.15788977e-03,  7.54900090e-03, -1.76642761e-01,\n",
       "       -6.41084858e-04, -4.87990193e-02, -1.07868062e-02,  2.03205757e-02,\n",
       "       -2.75001805e-02, -1.51495915e-02, -2.54338849e-02, -1.07448101e-01,\n",
       "        1.87617183e-01,  2.71392092e-02, -5.73437661e-02, -5.71253337e-02,\n",
       "       -7.70396292e-02, -6.82527549e-04, -2.89633498e-02, -5.55524588e-01,\n",
       "       -7.30627105e-02, -2.82024425e-02, -9.18840542e-02, -5.50532714e-02,\n",
       "       -9.96991806e-03, -2.35299878e-02,  2.23376956e-02,  1.17923897e-02,\n",
       "       -3.94024923e-02, -4.27180678e-02, -7.04451725e-02, -8.41934234e-02,\n",
       "       -1.58792101e-02, -3.86505499e-02, -9.61505249e-03,  2.35357750e-02,\n",
       "       -2.79329848e-02,  4.93699461e-02, -3.23601440e-03,  4.49049938e-03,\n",
       "       -1.43508650e-02, -4.74810190e-02,  5.63722365e-02,  2.59661004e-02,\n",
       "       -2.25627497e-02, -1.27177104e-01, -4.22202721e-02, -1.70455836e-02,\n",
       "        3.35474387e-02, -6.64497837e-02, -5.11258841e-02, -1.28927361e-02,\n",
       "        4.56995964e-02, -5.08452989e-02, -4.10658075e-03,  1.11875245e-02,\n",
       "       -9.10589844e-02, -3.15501131e-02, -3.36933285e-02, -8.18561763e-02,\n",
       "       -8.69288743e-02, -4.98861633e-02, -3.76106054e-02, -7.47280195e-02,\n",
       "       -8.26504752e-02, -1.67362264e-03, -3.97412777e-02, -4.03845534e-02,\n",
       "        2.59619374e-02, -1.10980719e-01, -5.42957820e-02, -5.12320222e-03,\n",
       "       -4.86672968e-02,  3.35472450e-02, -7.93868955e-03,  1.77247524e-02,\n",
       "        4.10612971e-02,  2.04178076e-02,  8.86159111e-03, -2.16183607e-02,\n",
       "       -4.01977971e-02, -1.05982721e-01, -3.66337150e-02,  1.26616787e-02,\n",
       "       -2.17129104e-02,  2.81185587e-03, -3.44928205e-02, -5.16855111e-03,\n",
       "        1.86376292e-02, -1.26860529e-01,  8.62286426e-03,  2.75256280e-02,\n",
       "       -4.04733755e-02,  1.81656070e-02, -5.88371977e-03, -1.64715089e-02,\n",
       "        1.73446629e-02, -9.91635919e-02, -5.72883524e-02, -5.23208082e-02,\n",
       "        1.67844146e-02, -4.90685627e-02, -4.73735929e-02, -5.97682111e-02,\n",
       "       -3.36971767e-02, -2.15151273e-02, -6.74388558e-02, -2.51901452e-03,\n",
       "       -4.53895628e-02, -6.30619898e-02, -6.69291476e-03,  7.17544323e-03,\n",
       "       -1.10627254e-02, -6.17827103e-03, -3.53327137e-04, -3.51528749e-02,\n",
       "       -4.29050699e-02, -1.46190338e-02,  3.98465805e-02, -2.77606472e-02,\n",
       "       -7.31405094e-02, -1.02777835e-02, -2.24731639e-02, -8.32810253e-03,\n",
       "       -4.70288247e-02, -3.78077701e-02, -1.42305891e-03, -1.50282532e-01,\n",
       "        1.43077243e-02,  1.97724421e-02, -6.95757475e-03, -8.72979034e-03,\n",
       "       -3.00346669e-02, -2.12163534e-02, -1.86339114e-02, -1.80113576e-02,\n",
       "       -1.86247975e-02, -7.37220868e-02, -1.14188511e-02, -5.43811060e-02,\n",
       "       -7.26022944e-02, -5.61642796e-02,  4.92536835e-03,  2.85652354e-02,\n",
       "       -3.21937166e-02,  3.32991704e-02,  9.25936550e-03, -1.41654322e-02,\n",
       "        6.24812255e-03, -9.18491110e-02, -6.16734885e-02, -1.97878852e-02,\n",
       "        8.02075207e-01,  3.83147853e-03,  7.31942710e-03, -2.78896070e-03,\n",
       "       -4.55389917e-02,  3.41698378e-02, -5.82249947e-02,  4.78237830e-02,\n",
       "       -3.03480811e-02, -9.36662871e-03, -2.21905634e-02, -4.20161448e-02,\n",
       "       -3.27412598e-02, -4.65735756e-02, -3.01698409e-02,  1.22395149e-02,\n",
       "       -4.05134819e-02, -5.18168211e-02, -4.18861769e-02, -6.09364435e-02,\n",
       "       -1.64757855e-02, -4.51483466e-02, -5.22162542e-02, -8.44340101e-02,\n",
       "        4.16170135e-02,  1.51905809e-02, -6.46860246e-03, -7.01757818e-02,\n",
       "       -4.51175570e-02, -1.16105145e-03, -2.93247141e-02,  1.99566558e-02,\n",
       "       -3.30624133e-02, -5.72409295e-03, -3.46434377e-02, -1.83490403e-02,\n",
       "       -3.82996574e-02,  4.23628800e-02, -2.92677525e-03,  1.65528040e-02,\n",
       "       -2.57653855e-02, -5.74955605e-02,  2.94352453e-02, -1.95881426e-02,\n",
       "       -5.32014892e-02, -1.53941959e-01,  6.03344338e-03,  3.98809016e-02,\n",
       "        6.91875909e-03, -6.43052533e-02, -5.96450036e-03, -5.33139072e-02,\n",
       "       -3.71079408e-02, -3.04919537e-02, -5.48997149e-02, -1.37763517e-02,\n",
       "       -4.42016535e-02, -1.35037629e-02,  1.98836648e-03, -6.26936853e-02,\n",
       "       -9.70829185e-03, -2.94803679e-02, -4.73043136e-02, -2.80816909e-02,\n",
       "       -5.37739545e-02, -1.51570039e-02,  3.73631455e-02, -7.89148081e-03,\n",
       "       -1.26123419e-02, -3.18188183e-02,  2.94974912e-02, -6.32378608e-02,\n",
       "       -4.41899896e-02,  8.25890452e-02,  3.42960167e-03, -5.58699295e-02,\n",
       "       -1.44547713e-03, -4.92737405e-02, -3.37157734e-02, -1.67123752e-03,\n",
       "       -7.88181201e-02,  1.34563423e-04, -4.26846333e-02, -5.50386496e-02,\n",
       "       -8.07035191e-04, -5.37923947e-02, -4.39084582e-02, -4.20862138e-02,\n",
       "       -2.02460997e-02, -9.82043426e-03,  1.15882121e-02, -5.23745781e-03,\n",
       "       -1.03680745e-01,  8.16783831e-02,  2.48023346e-02, -2.12966073e-02,\n",
       "       -9.79850162e-03, -5.97284101e-02,  1.55927148e-02, -2.75384448e-02,\n",
       "       -7.17958137e-02, -2.66819727e-02, -9.79927368e-03, -6.12811046e-03,\n",
       "       -1.35665946e-02,  4.60112914e-02, -3.69975008e-02, -2.99108382e-02,\n",
       "       -3.77897383e-03,  6.24454059e-02, -4.73711938e-02, -2.56408807e-02,\n",
       "       -3.35129574e-02,  3.30764651e-02, -3.42580932e-03, -6.40980899e-02,\n",
       "        6.24381378e-03, -2.59481976e-03, -2.05947682e-02,  5.80184534e-03,\n",
       "       -1.69520341e-02, -3.13396342e-02,  9.20129661e-03,  4.38074209e-03,\n",
       "       -2.99002007e-02, -4.68306094e-02, -3.29264365e-02, -4.33461964e-02,\n",
       "       -5.51926903e-02, -3.01706549e-02, -4.62184846e-02,  4.40612203e-03,\n",
       "       -9.88743827e-02, -4.51756008e-02, -1.11954734e-02, -2.79771779e-02,\n",
       "       -7.35729113e-02,  1.11695109e-02,  3.15282401e-03, -3.46051268e-02,\n",
       "        3.50805707e-02, -2.11692117e-02, -8.52309018e-02, -4.17998917e-02,\n",
       "       -7.97682628e-03, -3.46286520e-02, -5.74680790e-02, -4.71834689e-02,\n",
       "       -2.83477306e-02, -3.65725309e-02, -2.67941738e-03, -1.77789535e-02,\n",
       "       -1.46003058e-02, -1.03028573e-01,  3.50813987e-03, -2.55276468e-02,\n",
       "       -4.92869206e-02,  3.89129743e-02, -5.26351519e-02, -9.31726918e-02,\n",
       "       -4.18997817e-02, -4.43794951e-03, -4.40299250e-02, -9.74777713e-02,\n",
       "       -2.32472131e-03, -5.18855043e-02, -1.71922352e-02, -1.04530059e-01,\n",
       "       -4.29420210e-02,  1.84780527e-02,  1.25591148e-04, -1.82154123e-02,\n",
       "       -2.01127995e-02, -4.52954695e-02, -1.41935691e-01, -3.16051841e-02,\n",
       "       -6.34036660e-02, -3.76226790e-02, -6.01126105e-02, -1.33191384e-02,\n",
       "        1.11824730e-02, -1.35800391e-02, -2.45583542e-02,  2.45727152e-02,\n",
       "       -6.49485961e-02,  3.27384509e-02,  2.52443249e-03, -4.84996177e-02,\n",
       "       -2.24996969e-01, -2.69632600e-02, -1.04168197e-02, -4.81781550e-02,\n",
       "       -5.74718229e-02,  7.43242772e-03, -3.04077584e-02,  4.28020768e-02,\n",
       "        2.34942362e-02,  7.98728541e-02, -3.84980370e-03,  3.84835666e-03,\n",
       "       -2.96989307e-02,  1.06035899e-02, -3.29106711e-02,  5.08200228e-02,\n",
       "       -5.20973727e-02, -1.13555696e-02, -1.67075191e-02,  8.75620265e-03,\n",
       "       -2.10735970e-03, -7.08313882e-02,  4.18707589e-03, -6.08610883e-02,\n",
       "        9.92811099e-03, -1.74978897e-02, -4.15764712e-02, -4.81071509e-02,\n",
       "       -3.59360827e-04, -3.72996219e-02, -3.36154029e-02,  9.30645037e-03,\n",
       "       -2.87428889e-02,  3.38916272e-01,  1.55889569e-02, -1.20749138e-02,\n",
       "        6.60608662e-03, -5.28288521e-02,  1.56699657e-03,  4.60904790e-03,\n",
       "       -1.81966927e-02, -1.47303976e-02, -4.79129888e-02, -5.08909933e-02,\n",
       "       -4.07863222e-02,  5.87696552e-01, -6.40100315e-02, -3.28961872e-02,\n",
       "       -4.69895452e-02,  3.40833850e-02, -5.38305789e-02,  8.29242449e-03,\n",
       "       -2.83684433e-02, -7.95814991e-02, -3.34358886e-02, -1.56367552e-02,\n",
       "       -4.11969535e-02, -1.08068585e-02, -4.75606322e-02, -2.32116552e-03,\n",
       "       -7.94138089e-02, -6.00715801e-02, -4.90864888e-02,  2.95125991e-02,\n",
       "       -2.14496665e-02, -5.31493090e-02, -3.60760577e-02, -1.65713707e-03,\n",
       "       -6.23810589e-02, -6.65615350e-02, -2.94328574e-02, -4.55798022e-02,\n",
       "        1.00571290e-01, -4.50164005e-02, -3.46382111e-02,  2.19386201e-02,\n",
       "       -3.11825257e-02,  3.43853384e-02, -5.18128090e-02, -8.36137384e-02,\n",
       "       -5.71676046e-02,  1.23399158e-03, -2.07563881e-02, -2.61201952e-02,\n",
       "       -4.43493016e-02,  2.26286594e-02, -3.64989154e-02, -7.49337375e-02,\n",
       "       -5.31205647e-02, -3.31199765e-02, -2.89630797e-02, -7.96623304e-02,\n",
       "       -8.94981436e-03, -4.65447176e-03, -1.65564362e-02,  2.18574777e-02,\n",
       "       -2.89067980e-02, -3.78119648e-02, -5.48922643e-02, -3.49555314e-02,\n",
       "       -4.25876901e-02, -7.14248866e-02,  1.55242058e-02, -3.10118478e-02,\n",
       "       -6.15143254e-02,  2.82810448e-04, -1.82457138e-02, -5.77845685e-02,\n",
       "       -7.11344182e-02, -1.83645857e-03, -5.58455251e-02,  1.37628308e-02,\n",
       "        1.62172560e-02, -3.68420072e-02,  8.60932283e-03, -2.21234206e-02,\n",
       "       -3.40586081e-02,  1.34797599e-02,  2.14191955e-02, -5.87648824e-02,\n",
       "       -2.14329027e-02,  1.25384517e-02,  2.34335735e-02, -5.71606345e-02,\n",
       "       -4.88574058e-02, -3.14020887e-02, -3.36176790e-02,  1.57339722e-02,\n",
       "       -1.79668870e-02,  1.14784883e-02,  1.16164098e-02, -5.80758378e-02,\n",
       "       -3.04793622e-02, -7.34234322e-03, -1.03355702e-02, -6.38389140e-02,\n",
       "       -6.67905584e-02, -1.26117272e-02, -3.24039869e-02, -6.72282130e-02,\n",
       "       -5.36192320e-02, -2.73984466e-02, -7.78656304e-02,  5.17367721e-02,\n",
       "       -2.56132409e-02,  1.14970058e-02,  1.33768190e-02, -5.34092933e-02,\n",
       "       -4.90953140e-02, -2.29437891e-02,  2.50836741e-03, -2.59005595e-02,\n",
       "       -5.78779504e-02, -6.15348332e-02, -1.76010691e-02,  3.04938387e-02,\n",
       "        1.90077553e-04, -8.81273597e-02,  4.12105501e-01, -5.18569723e-02,\n",
       "       -5.35475202e-02,  1.53077906e-02, -1.17141366e-01, -2.22060829e-03,\n",
       "       -1.24771567e-03,  2.65556704e-02, -3.55066024e-02, -5.70061989e-02,\n",
       "        3.47856395e-02, -4.62054880e-03, -7.08354786e-02, -2.12868731e-02,\n",
       "       -1.00361787e-01, -8.36184993e-03, -1.02886476e-01, -4.19753045e-02,\n",
       "       -3.59605401e-05, -7.56751895e-02,  1.93956140e-02, -3.74097191e-02,\n",
       "        4.27462049e-02,  5.08783478e-03,  1.10963015e-02,  1.68699231e-02,\n",
       "       -4.52817343e-02, -3.01013365e-02,  4.73714154e-03, -6.16758056e-02,\n",
       "        4.09413315e-02, -3.16235535e-02, -9.79338121e-03,  1.27027566e-02,\n",
       "       -1.42883547e-02, -8.39611515e-02, -7.78975412e-02, -6.48192316e-02,\n",
       "       -3.27251740e-02, -1.63022429e-02, -2.34571528e-02, -1.12649705e-02,\n",
       "       -6.26866445e-02, -7.62585774e-02, -2.28348915e-02, -2.54387930e-02,\n",
       "       -2.42753215e-02, -4.01587710e-02,  1.89490300e-02, -5.80234602e-02,\n",
       "        9.17766020e-02, -6.65734634e-02, -2.88434159e-02, -1.10377684e-01,\n",
       "        1.01822903e-02,  5.36948442e-02, -5.38673066e-02, -7.22947437e-03,\n",
       "        1.90297309e-02, -1.33693498e-02, -6.80402443e-02, -8.52842256e-03,\n",
       "       -1.79472729e-03, -4.62749340e-02,  1.18112136e-02, -2.41993275e-02,\n",
       "       -1.19631253e-02, -4.98173945e-02, -3.20411175e-02, -3.51872444e-02,\n",
       "       -1.69001948e-02,  5.30058928e-02, -4.85737622e-02, -5.89649985e-03,\n",
       "        4.12623920e-02, -4.10187133e-02, -1.02243777e-02, -4.64050099e-02,\n",
       "        2.52548996e-02, -9.28399991e-03, -5.24849743e-02, -4.31740545e-02,\n",
       "       -5.21927811e-02,  1.93056930e-02,  4.41940427e-02, -6.95351958e-02,\n",
       "        2.60203779e-02, -4.71339226e-02, -4.47712988e-02,  2.41697971e-02,\n",
       "       -3.98667566e-02, -2.18660571e-02, -4.83010113e-02, -1.47544527e-02,\n",
       "        1.36605673e-03, -1.07842609e-01, -3.08051892e-02, -2.76597347e-02,\n",
       "       -4.81024981e-02, -9.18657333e-03,  7.92457815e-03, -7.75427148e-02,\n",
       "        4.55447435e-02, -2.20947526e-02,  1.23430658e-02, -4.74185012e-02,\n",
       "       -3.08901295e-02, -4.75923382e-02, -3.63420472e-02,  6.30968250e-04,\n",
       "       -4.90016267e-02, -2.99229976e-02,  2.13183165e-02,  3.05532180e-02],\n",
       "      dtype=float32)>"
      ]
     },
     "execution_count": 22,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# LayerNorm 中的 beta 参数\n",
    "params[4]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "- 词嵌入层，共30522个单词，每个单词768维，参数量：23440896\n",
    "- 位置编码，最长512，每个位置编码768维，参数量：393216。通常位置编码是固定的，该部分参数不用训练\n",
    "- 句子编码，共2个句子，属于每个句子的编码768维，参数量:1536\n",
    "- 正则化中 gamma 和 beta 都是768维参数"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 编码器"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-05-07T06:39:51.438355Z",
     "start_time": "2020-05-07T06:39:51.430885Z"
    }
   },
   "source": [
    "## 多头自注意力层\n",
    "<img src=\"../images/self-attn.png\" width=\"75%\">"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Attention(tf.keras.layers.Layer):\n",
    "    \"\"\"Performs multi-headed attention from `from_tensor` to `to_tensor`.\n",
    "  \n",
    "    This is an implementation of multi-headed attention based on \"Attention\n",
    "    is all you Need\". If `from_tensor` and `to_tensor` are the same, then\n",
    "    this is self-attention. Each timestep in `from_tensor` attends to the\n",
    "    corresponding sequence in `to_tensor`, and returns a fixed-with vector.\n",
    "  \n",
    "    This function first projects `from_tensor` into a \"query\" tensor and\n",
    "    `to_tensor` into \"key\" and \"value\" tensors. These are (effectively) a list\n",
    "    of tensors of length `num_attention_heads`, where each tensor is of shape\n",
    "    [batch_size, seq_length, size_per_head].\n",
    "  \n",
    "    Then, the query and key tensors are dot-producted and scaled. These are\n",
    "    softmaxed to obtain attention probabilities. The value tensors are then\n",
    "    interpolated by these probabilities, then concatenated back to a single\n",
    "    tensor and returned.\n",
    "  \n",
    "    In practice, the multi-headed attention are done with tf.einsum as follows:\n",
    "      Input_tensor: [BFD]\n",
    "      Wq, Wk, Wv: [DNH]\n",
    "      Q:[BFNH] = einsum('BFD,DNH->BFNH', Input_tensor, Wq)\n",
    "      K:[BTNH] = einsum('BTD,DNH->BTNH', Input_tensor, Wk)\n",
    "      V:[BTNH] = einsum('BTD,DNH->BTNH', Input_tensor, Wv)\n",
    "      attention_scores:[BNFT] = einsum('BTNH,BFNH->BNFT', K, Q) / sqrt(H)\n",
    "      attention_probs:[BNFT] = softmax(attention_scores)\n",
    "      context_layer:[BFNH] = einsum('BNFT,BTNH->BFNH', attention_probs, V)\n",
    "      Wout:[DNH]\n",
    "      Output:[BFD] = einsum('BFNH,DNH>BFD', context_layer, Wout)\n",
    "    \"\"\"\n",
    "    \n",
    "    def __init__(self, num_attention_heads=12, size_per_head=64,\n",
    "                 attention_probs_dropout_prob=0.0, initializer_range=0.02,\n",
    "                 backward_compatible=False, **kwargs):\n",
    "        super(Attention, self).__init__(**kwargs)\n",
    "        \n",
    "        # 注意力的头数\n",
    "        self.num_attention_heads = num_attention_heads\n",
    "        \n",
    "        # 每个头的维度\n",
    "        self.size_per_head = size_per_head\n",
    "        \n",
    "        self.attention_probs_dropout_prob = attention_probs_dropout_prob\n",
    "        self.initializer_range = initializer_range\n",
    "        self.backward_compatible = backward_compatible\n",
    "    \n",
    "    def build(self, unused_input_shapes):\n",
    "        \"\"\"Implements build() for the layer.\"\"\"\n",
    "        # 将输入转换成 query，key，value\n",
    "        self.query_dense = self._projection_dense_layer(\"query\")\n",
    "        self.key_dense = self._projection_dense_layer(\"key\")\n",
    "        self.value_dense = self._projection_dense_layer(\"value\")\n",
    "        self.attention_probs_dropout = tf.keras.layers.Dropout(\n",
    "            rate=self.attention_probs_dropout_prob)\n",
    "        super(Attention, self).build(unused_input_shapes)\n",
    "    \n",
    "    def reshape_to_matrix(self, input_tensor):\n",
    "        \"\"\"Reshape N > 2 rank tensor to rank 2 tensor for performance.\"\"\"\n",
    "        ndims = input_tensor.shape.ndims\n",
    "        if ndims < 2:\n",
    "            raise ValueError(\"Input tensor must have at least rank 2.\"\n",
    "                             \"Shape = %s\" % (input_tensor.shape))\n",
    "        if ndims == 2:\n",
    "            return input_tensor\n",
    "        \n",
    "        width = input_tensor.shape[-1]\n",
    "        output_tensor = tf.reshape(input_tensor, [-1, width])\n",
    "        return output_tensor\n",
    "    \n",
    "    def __call__(self, from_tensor, to_tensor, attention_mask=None, **kwargs):\n",
    "        inputs = tf_utils.pack_inputs([from_tensor, to_tensor, attention_mask])\n",
    "        return super(Attention, self).__call__(inputs, **kwargs)\n",
    "    \n",
    "    def call(self, inputs):\n",
    "        \"\"\"Implements call() for the layer.\"\"\"\n",
    "        (from_tensor, to_tensor, attention_mask) = tf_utils.unpack_inputs(\n",
    "            inputs)\n",
    "        \n",
    "        # Scalar dimensions referenced here:\n",
    "        #   B = batch size (number of sequences)\n",
    "        #   F = `from_tensor` sequence length\n",
    "        #   T = `to_tensor` sequence length\n",
    "        #   N = `num_attention_heads`\n",
    "        #   H = `size_per_head`\n",
    "        # `query_tensor` = [B, F, N ,H]\n",
    "        query_tensor = self.query_dense(from_tensor)\n",
    "        \n",
    "        # `key_tensor` = [B, T, N, H]\n",
    "        key_tensor = self.key_dense(to_tensor)\n",
    "        \n",
    "        # `value_tensor` = [B, T, N, H]\n",
    "        value_tensor = self.value_dense(to_tensor)\n",
    "        \n",
    "        # Take the dot product between \"query\" and \"key\" to get the raw\n",
    "        # attention scores.\n",
    "        attention_scores = tf.einsum(\"BTNH,BFNH->BNFT\", key_tensor,\n",
    "                                     query_tensor)\n",
    "        attention_scores = tf.multiply(attention_scores, 1.0 / math.sqrt(\n",
    "            float(self.size_per_head)))\n",
    "        \n",
    "        if attention_mask is not None:\n",
    "            # `attention_mask` = [B, 1, F, T]\n",
    "            attention_mask = tf.expand_dims(attention_mask, axis=[1])\n",
    "            \n",
    "            # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n",
    "            # masked positions, this operation will create a tensor which is 0.0 for\n",
    "            # positions we want to attend and -10000.0 for masked positions.\n",
    "            adder = (1.0 - tf.cast(attention_mask,\n",
    "                                   attention_scores.dtype)) * -10000.0\n",
    "            \n",
    "            # Since we are adding it to the raw scores before the softmax, this is\n",
    "            # effectively the same as removing these entirely.\n",
    "            attention_scores += adder\n",
    "        \n",
    "        # Normalize the attention scores to probabilities.\n",
    "        # `attention_probs` = [B, N, F, T]\n",
    "        attention_probs = tf.nn.softmax(attention_scores)\n",
    "        \n",
    "        # This is actually dropping out entire tokens to attend to, which might\n",
    "        # seem a bit unusual, but is taken from the original Transformer paper.\n",
    "        attention_probs = self.attention_probs_dropout(attention_probs)\n",
    "        \n",
    "        # `context_layer` = [B, F, N, H]\n",
    "        context_tensor = tf.einsum(\"BNFT,BTNH->BFNH\", attention_probs,\n",
    "                                   value_tensor)\n",
    "        \n",
    "        return context_tensor\n",
    "    \n",
    "    def _projection_dense_layer(self, name):\n",
    "        \"\"\"A helper to define a projection layer.\"\"\"\n",
    "        return Dense3D(num_attention_heads=self.num_attention_heads,\n",
    "            size_per_head=self.size_per_head,\n",
    "            kernel_initializer=get_initializer(self.initializer_range),\n",
    "            output_projection=False,\n",
    "            backward_compatible=self.backward_compatible, name=name)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Dense3D(tf.keras.layers.Layer):\n",
    "    \"\"\"A Dense Layer using 3D kernel with tf.einsum implementation.\n",
    "  \n",
    "    Attributes:\n",
    "      num_attention_heads: An integer, number of attention heads for each\n",
    "        multihead attention layer.\n",
    "      size_per_head: An integer, hidden size per attention head.\n",
    "      hidden_size: An integer, dimension of the hidden layer.\n",
    "      kernel_initializer: An initializer for the kernel weight.\n",
    "      bias_initializer: An initializer for the bias.\n",
    "      activation: An activation function to use. If nothing is specified, no\n",
    "        activation is applied.\n",
    "      use_bias: A bool, whether the layer uses a bias.\n",
    "      output_projection: A bool, whether the Dense3D layer is used for output\n",
    "        linear projection.\n",
    "      backward_compatible: A bool, whether the variables shape are compatible\n",
    "        with checkpoints converted from TF 1.x.\n",
    "    \"\"\"\n",
    "    \n",
    "    def __init__(self, num_attention_heads=12, size_per_head=72,\n",
    "                 kernel_initializer=None, bias_initializer=\"zeros\",\n",
    "                 activation=None, use_bias=True, output_projection=False,\n",
    "                 backward_compatible=False, **kwargs):\n",
    "        \"\"\"Inits Dense3D.\"\"\"\n",
    "        super(Dense3D, self).__init__(**kwargs)\n",
    "        self.num_attention_heads = num_attention_heads\n",
    "        self.size_per_head = size_per_head\n",
    "        self.hidden_size = num_attention_heads * size_per_head\n",
    "        self.kernel_initializer = kernel_initializer\n",
    "        self.bias_initializer = bias_initializer\n",
    "        self.activation = activation\n",
    "        self.use_bias = use_bias\n",
    "        self.output_projection = output_projection\n",
    "        self.backward_compatible = backward_compatible\n",
    "    \n",
    "    @property\n",
    "    def compatible_kernel_shape(self):\n",
    "        if self.output_projection:\n",
    "            return [self.hidden_size, self.hidden_size]\n",
    "        return [self.last_dim, self.hidden_size]\n",
    "    \n",
    "    @property\n",
    "    def compatible_bias_shape(self):\n",
    "        return [self.hidden_size]\n",
    "    \n",
    "    @property\n",
    "    def kernel_shape(self):\n",
    "        if self.output_projection:\n",
    "            return [self.num_attention_heads, self.size_per_head,\n",
    "                    self.hidden_size]\n",
    "        return [self.last_dim, self.num_attention_heads, self.size_per_head]\n",
    "    \n",
    "    @property\n",
    "    def bias_shape(self):\n",
    "        if self.output_projection:\n",
    "            return [self.hidden_size]\n",
    "        return [self.num_attention_heads, self.size_per_head]\n",
    "    \n",
    "    def build(self, input_shape):\n",
    "        \"\"\"Implements build() for the layer.\"\"\"\n",
    "        dtype = tf.as_dtype(self.dtype or tf.keras.backend.floatx())\n",
    "        if not (dtype.is_floating or dtype.is_complex):\n",
    "            raise TypeError(\"Unable to build `Dense3D` layer with non-floating \"\n",
    "                            \"point (and non-complex) dtype %s\" % (dtype,))\n",
    "        input_shape = tf.TensorShape(input_shape)\n",
    "        if tf.compat.dimension_value(input_shape[-1]) is None:\n",
    "            raise ValueError(\"The last dimension of the inputs to `Dense3D` \"\n",
    "                             \"should be defined. Found `None`.\")\n",
    "        self.last_dim = tf.compat.dimension_value(input_shape[-1])\n",
    "        self.input_spec = tf.keras.layers.InputSpec(min_ndim=3,\n",
    "            axes={-1: self.last_dim})\n",
    "        # Determines variable shapes.\n",
    "        if self.backward_compatible:\n",
    "            kernel_shape = self.compatible_kernel_shape\n",
    "            bias_shape = self.compatible_bias_shape\n",
    "        else:\n",
    "            kernel_shape = self.kernel_shape\n",
    "            bias_shape = self.bias_shape\n",
    "        \n",
    "        self.kernel = self.add_weight(\"kernel\", shape=kernel_shape,\n",
    "            initializer=self.kernel_initializer, dtype=self.dtype,\n",
    "            trainable=True)\n",
    "        if self.use_bias:\n",
    "            self.bias = self.add_weight(\"bias\", shape=bias_shape,\n",
    "                initializer=self.bias_initializer, dtype=self.dtype,\n",
    "                trainable=True)\n",
    "        else:\n",
    "            self.bias = None\n",
    "        super(Dense3D, self).build(input_shape)\n",
    "    \n",
    "    def call(self, inputs):\n",
    "        \"\"\"Implements ``call()`` for Dense3D.\n",
    "    \n",
    "        Args:\n",
    "          inputs: A float tensor of shape [batch_size, sequence_length, hidden_size]\n",
    "            when output_projection is False, otherwise a float tensor of shape\n",
    "            [batch_size, sequence_length, num_heads, dim_per_head].\n",
    "    \n",
    "        Returns:\n",
    "          The projected tensor with shape [batch_size, sequence_length, num_heads,\n",
    "            dim_per_head] when output_projection is False, otherwise [batch_size,\n",
    "            sequence_length, hidden_size].\n",
    "        \"\"\"\n",
    "        if self.backward_compatible:\n",
    "            kernel = tf.keras.backend.reshape(self.kernel, self.kernel_shape)\n",
    "            bias = (tf.keras.backend.reshape(self.bias,\n",
    "                                             self.bias_shape) if self.use_bias else None)\n",
    "        else:\n",
    "            kernel = self.kernel\n",
    "            bias = self.bias\n",
    "        \n",
    "        if self.output_projection:\n",
    "            ret = tf.einsum(\"abcd,cde->abe\", inputs, kernel)\n",
    "        else:\n",
    "            ret = tf.einsum(\"abc,cde->abde\", inputs, kernel)\n",
    "        if self.use_bias:\n",
    "            ret += bias\n",
    "        if self.activation is not None:\n",
    "            return self.activation(ret)\n",
    "        return ret"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Dense2DProjection(tf.keras.layers.Layer):\n",
    "    \"\"\"A 2D projection layer with tf.einsum implementation.\"\"\"\n",
    "    \n",
    "    def __init__(self, output_size, kernel_initializer=None,\n",
    "                 bias_initializer=\"zeros\", activation=None,\n",
    "                 fp32_activation=False, **kwargs):\n",
    "        super(Dense2DProjection, self).__init__(**kwargs)\n",
    "        self.output_size = output_size\n",
    "        self.kernel_initializer = kernel_initializer\n",
    "        self.bias_initializer = bias_initializer\n",
    "        self.activation = activation\n",
    "        self.fp32_activation = fp32_activation\n",
    "    \n",
    "    def build(self, input_shape):\n",
    "        \"\"\"Implements build() for the layer.\"\"\"\n",
    "        dtype = tf.as_dtype(self.dtype or tf.keras.backend.floatx())\n",
    "        if not (dtype.is_floating or dtype.is_complex):\n",
    "            raise TypeError(\"Unable to build `Dense2DProjection` layer with \"\n",
    "                            \"non-floating point (and non-complex) \"\n",
    "                            \"dtype %s\" % (dtype,))\n",
    "        input_shape = tf.TensorShape(input_shape)\n",
    "        if tf.compat.dimension_value(input_shape[-1]) is None:\n",
    "            raise ValueError(\"The last dimension of the inputs to \"\n",
    "                             \"`Dense2DProjection` should be defined. \"\n",
    "                             \"Found `None`.\")\n",
    "        last_dim = tf.compat.dimension_value(input_shape[-1])\n",
    "        self.input_spec = tf.keras.layers.InputSpec(min_ndim=3,\n",
    "                                                    axes={-1: last_dim})\n",
    "        self.kernel = self.add_weight(\"kernel\",\n",
    "            shape=[last_dim, self.output_size],\n",
    "            initializer=self.kernel_initializer, dtype=self.dtype,\n",
    "            trainable=True)\n",
    "        self.bias = self.add_weight(\"bias\", shape=[self.output_size],\n",
    "            initializer=self.bias_initializer, dtype=self.dtype, trainable=True)\n",
    "        super(Dense2DProjection, self).build(input_shape)\n",
    "    \n",
    "    def call(self, inputs):\n",
    "        \"\"\"Implements call() for Dense2DProjection.\n",
    "    \n",
    "        Args:\n",
    "          inputs: float Tensor of shape [batch, from_seq_length,\n",
    "            num_attention_heads, size_per_head].\n",
    "    \n",
    "        Returns:\n",
    "          A 3D Tensor.\n",
    "        \"\"\"\n",
    "        ret = tf.einsum(\"abc,cd->abd\", inputs, self.kernel)\n",
    "        ret += self.bias\n",
    "        if self.activation is not None:\n",
    "            if self.dtype == tf.float16 and self.fp32_activation:\n",
    "                ret = tf.cast(ret, tf.float32)\n",
    "            return self.activation(ret)\n",
    "        return ret"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 注意力层参数量"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-05-07T09:13:08.071191Z",
     "start_time": "2020-05-07T09:13:08.063673Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "======注意力层参数=============\n",
      "encoder/layer_._0/attention/self/query/kernel:0 (768, 768) True\n",
      "encoder/layer_._0/attention/self/query/bias:0 (768,) True\n",
      "encoder/layer_._0/attention/self/key/kernel:0 (768, 768) True\n",
      "encoder/layer_._0/attention/self/key/bias:0 (768,) True\n",
      "encoder/layer_._0/attention/self/value/kernel:0 (768, 768) True\n",
      "encoder/layer_._0/attention/self/value/bias:0 (768,) True\n",
      "encoder/layer_._0/attention/output/dense/kernel:0 (768, 768) True\n",
      "encoder/layer_._0/attention/output/dense/bias:0 (768,) True\n",
      "encoder/layer_._0/attention/output/LayerNorm/gamma:0 (768,) True\n",
      "encoder/layer_._0/attention/output/LayerNorm/beta:0 (768,) True\n"
     ]
    }
   ],
   "source": [
    "print(\"======注意力层参数=============\")\n",
    "for param in params[5:15]:\n",
    "    print('/'.join(param.name.split('/')[2:]), param.shape, param.trainable)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "编码器，第一层，注意力的参数：\n",
    "- 将输入转换成 query、key、value 时，**三组** 权重+偏置 参数，分别为 (768,768) 和（768,)\n",
    "- 然后进行点积注意力运算后的输出，再经过密集层处理：权重+偏置 参数，分别为 (768,768) 和（768,)\n",
    "- 将上述 **输出和注意力的输入相加** 后，再次正则化， gamma 和 beta 都是768维参数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-05-07T09:02:29.819311Z",
     "start_time": "2020-05-07T09:02:29.807556Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['embeddings/word_embeddings/weight:0',\n",
       " 'embeddings/position_embeddings/embeddings:0',\n",
       " 'embeddings/token_type_embeddings/embeddings:0',\n",
       " 'embeddings/LayerNorm/gamma:0',\n",
       " 'embeddings/LayerNorm/beta:0',\n",
       " 'encoder/layer_._0/attention/self/query/kernel:0',\n",
       " 'encoder/layer_._0/attention/self/query/bias:0',\n",
       " 'encoder/layer_._0/attention/self/key/kernel:0',\n",
       " 'encoder/layer_._0/attention/self/key/bias:0',\n",
       " 'encoder/layer_._0/attention/self/value/kernel:0',\n",
       " 'encoder/layer_._0/attention/self/value/bias:0',\n",
       " 'encoder/layer_._0/attention/output/dense/kernel:0',\n",
       " 'encoder/layer_._0/attention/output/dense/bias:0',\n",
       " 'encoder/layer_._0/attention/output/LayerNorm/gamma:0',\n",
       " 'encoder/layer_._0/attention/output/LayerNorm/beta:0',\n",
       " 'encoder/layer_._0/intermediate/dense/kernel:0',\n",
       " 'encoder/layer_._0/intermediate/dense/bias:0',\n",
       " 'encoder/layer_._0/output/dense/kernel:0',\n",
       " 'encoder/layer_._0/output/dense/bias:0',\n",
       " 'encoder/layer_._0/output/LayerNorm/gamma:0',\n",
       " 'encoder/layer_._0/output/LayerNorm/beta:0',\n",
       " 'encoder/layer_._1/attention/self/query/kernel:0',\n",
       " 'encoder/layer_._1/attention/self/query/bias:0',\n",
       " 'encoder/layer_._1/attention/self/key/kernel:0',\n",
       " 'encoder/layer_._1/attention/self/key/bias:0',\n",
       " 'encoder/layer_._1/attention/self/value/kernel:0',\n",
       " 'encoder/layer_._1/attention/self/value/bias:0',\n",
       " 'encoder/layer_._1/attention/output/dense/kernel:0',\n",
       " 'encoder/layer_._1/attention/output/dense/bias:0',\n",
       " 'encoder/layer_._1/attention/output/LayerNorm/gamma:0',\n",
       " 'encoder/layer_._1/attention/output/LayerNorm/beta:0',\n",
       " 'encoder/layer_._1/intermediate/dense/kernel:0',\n",
       " 'encoder/layer_._1/intermediate/dense/bias:0',\n",
       " 'encoder/layer_._1/output/dense/kernel:0',\n",
       " 'encoder/layer_._1/output/dense/bias:0',\n",
       " 'encoder/layer_._1/output/LayerNorm/gamma:0',\n",
       " 'encoder/layer_._1/output/LayerNorm/beta:0',\n",
       " 'encoder/layer_._2/attention/self/query/kernel:0',\n",
       " 'encoder/layer_._2/attention/self/query/bias:0',\n",
       " 'encoder/layer_._2/attention/self/key/kernel:0',\n",
       " 'encoder/layer_._2/attention/self/key/bias:0',\n",
       " 'encoder/layer_._2/attention/self/value/kernel:0',\n",
       " 'encoder/layer_._2/attention/self/value/bias:0',\n",
       " 'encoder/layer_._2/attention/output/dense/kernel:0',\n",
       " 'encoder/layer_._2/attention/output/dense/bias:0',\n",
       " 'encoder/layer_._2/attention/output/LayerNorm/gamma:0',\n",
       " 'encoder/layer_._2/attention/output/LayerNorm/beta:0',\n",
       " 'encoder/layer_._2/intermediate/dense/kernel:0',\n",
       " 'encoder/layer_._2/intermediate/dense/bias:0',\n",
       " 'encoder/layer_._2/output/dense/kernel:0',\n",
       " 'encoder/layer_._2/output/dense/bias:0',\n",
       " 'encoder/layer_._2/output/LayerNorm/gamma:0',\n",
       " 'encoder/layer_._2/output/LayerNorm/beta:0',\n",
       " 'encoder/layer_._3/attention/self/query/kernel:0',\n",
       " 'encoder/layer_._3/attention/self/query/bias:0',\n",
       " 'encoder/layer_._3/attention/self/key/kernel:0',\n",
       " 'encoder/layer_._3/attention/self/key/bias:0',\n",
       " 'encoder/layer_._3/attention/self/value/kernel:0',\n",
       " 'encoder/layer_._3/attention/self/value/bias:0',\n",
       " 'encoder/layer_._3/attention/output/dense/kernel:0',\n",
       " 'encoder/layer_._3/attention/output/dense/bias:0',\n",
       " 'encoder/layer_._3/attention/output/LayerNorm/gamma:0',\n",
       " 'encoder/layer_._3/attention/output/LayerNorm/beta:0',\n",
       " 'encoder/layer_._3/intermediate/dense/kernel:0',\n",
       " 'encoder/layer_._3/intermediate/dense/bias:0',\n",
       " 'encoder/layer_._3/output/dense/kernel:0',\n",
       " 'encoder/layer_._3/output/dense/bias:0',\n",
       " 'encoder/layer_._3/output/LayerNorm/gamma:0',\n",
       " 'encoder/layer_._3/output/LayerNorm/beta:0',\n",
       " 'encoder/layer_._4/attention/self/query/kernel:0',\n",
       " 'encoder/layer_._4/attention/self/query/bias:0',\n",
       " 'encoder/layer_._4/attention/self/key/kernel:0',\n",
       " 'encoder/layer_._4/attention/self/key/bias:0',\n",
       " 'encoder/layer_._4/attention/self/value/kernel:0',\n",
       " 'encoder/layer_._4/attention/self/value/bias:0',\n",
       " 'encoder/layer_._4/attention/output/dense/kernel:0',\n",
       " 'encoder/layer_._4/attention/output/dense/bias:0',\n",
       " 'encoder/layer_._4/attention/output/LayerNorm/gamma:0',\n",
       " 'encoder/layer_._4/attention/output/LayerNorm/beta:0',\n",
       " 'encoder/layer_._4/intermediate/dense/kernel:0',\n",
       " 'encoder/layer_._4/intermediate/dense/bias:0',\n",
       " 'encoder/layer_._4/output/dense/kernel:0',\n",
       " 'encoder/layer_._4/output/dense/bias:0',\n",
       " 'encoder/layer_._4/output/LayerNorm/gamma:0',\n",
       " 'encoder/layer_._4/output/LayerNorm/beta:0',\n",
       " 'encoder/layer_._5/attention/self/query/kernel:0',\n",
       " 'encoder/layer_._5/attention/self/query/bias:0',\n",
       " 'encoder/layer_._5/attention/self/key/kernel:0',\n",
       " 'encoder/layer_._5/attention/self/key/bias:0',\n",
       " 'encoder/layer_._5/attention/self/value/kernel:0',\n",
       " 'encoder/layer_._5/attention/self/value/bias:0',\n",
       " 'encoder/layer_._5/attention/output/dense/kernel:0',\n",
       " 'encoder/layer_._5/attention/output/dense/bias:0',\n",
       " 'encoder/layer_._5/attention/output/LayerNorm/gamma:0',\n",
       " 'encoder/layer_._5/attention/output/LayerNorm/beta:0',\n",
       " 'encoder/layer_._5/intermediate/dense/kernel:0',\n",
       " 'encoder/layer_._5/intermediate/dense/bias:0',\n",
       " 'encoder/layer_._5/output/dense/kernel:0',\n",
       " 'encoder/layer_._5/output/dense/bias:0',\n",
       " 'encoder/layer_._5/output/LayerNorm/gamma:0',\n",
       " 'encoder/layer_._5/output/LayerNorm/beta:0',\n",
       " 'encoder/layer_._6/attention/self/query/kernel:0',\n",
       " 'encoder/layer_._6/attention/self/query/bias:0',\n",
       " 'encoder/layer_._6/attention/self/key/kernel:0',\n",
       " 'encoder/layer_._6/attention/self/key/bias:0',\n",
       " 'encoder/layer_._6/attention/self/value/kernel:0',\n",
       " 'encoder/layer_._6/attention/self/value/bias:0',\n",
       " 'encoder/layer_._6/attention/output/dense/kernel:0',\n",
       " 'encoder/layer_._6/attention/output/dense/bias:0',\n",
       " 'encoder/layer_._6/attention/output/LayerNorm/gamma:0',\n",
       " 'encoder/layer_._6/attention/output/LayerNorm/beta:0',\n",
       " 'encoder/layer_._6/intermediate/dense/kernel:0',\n",
       " 'encoder/layer_._6/intermediate/dense/bias:0',\n",
       " 'encoder/layer_._6/output/dense/kernel:0',\n",
       " 'encoder/layer_._6/output/dense/bias:0',\n",
       " 'encoder/layer_._6/output/LayerNorm/gamma:0',\n",
       " 'encoder/layer_._6/output/LayerNorm/beta:0',\n",
       " 'encoder/layer_._7/attention/self/query/kernel:0',\n",
       " 'encoder/layer_._7/attention/self/query/bias:0',\n",
       " 'encoder/layer_._7/attention/self/key/kernel:0',\n",
       " 'encoder/layer_._7/attention/self/key/bias:0',\n",
       " 'encoder/layer_._7/attention/self/value/kernel:0',\n",
       " 'encoder/layer_._7/attention/self/value/bias:0',\n",
       " 'encoder/layer_._7/attention/output/dense/kernel:0',\n",
       " 'encoder/layer_._7/attention/output/dense/bias:0',\n",
       " 'encoder/layer_._7/attention/output/LayerNorm/gamma:0',\n",
       " 'encoder/layer_._7/attention/output/LayerNorm/beta:0',\n",
       " 'encoder/layer_._7/intermediate/dense/kernel:0',\n",
       " 'encoder/layer_._7/intermediate/dense/bias:0',\n",
       " 'encoder/layer_._7/output/dense/kernel:0',\n",
       " 'encoder/layer_._7/output/dense/bias:0',\n",
       " 'encoder/layer_._7/output/LayerNorm/gamma:0',\n",
       " 'encoder/layer_._7/output/LayerNorm/beta:0',\n",
       " 'encoder/layer_._8/attention/self/query/kernel:0',\n",
       " 'encoder/layer_._8/attention/self/query/bias:0',\n",
       " 'encoder/layer_._8/attention/self/key/kernel:0',\n",
       " 'encoder/layer_._8/attention/self/key/bias:0',\n",
       " 'encoder/layer_._8/attention/self/value/kernel:0',\n",
       " 'encoder/layer_._8/attention/self/value/bias:0',\n",
       " 'encoder/layer_._8/attention/output/dense/kernel:0',\n",
       " 'encoder/layer_._8/attention/output/dense/bias:0',\n",
       " 'encoder/layer_._8/attention/output/LayerNorm/gamma:0',\n",
       " 'encoder/layer_._8/attention/output/LayerNorm/beta:0',\n",
       " 'encoder/layer_._8/intermediate/dense/kernel:0',\n",
       " 'encoder/layer_._8/intermediate/dense/bias:0',\n",
       " 'encoder/layer_._8/output/dense/kernel:0',\n",
       " 'encoder/layer_._8/output/dense/bias:0',\n",
       " 'encoder/layer_._8/output/LayerNorm/gamma:0',\n",
       " 'encoder/layer_._8/output/LayerNorm/beta:0',\n",
       " 'encoder/layer_._9/attention/self/query/kernel:0',\n",
       " 'encoder/layer_._9/attention/self/query/bias:0',\n",
       " 'encoder/layer_._9/attention/self/key/kernel:0',\n",
       " 'encoder/layer_._9/attention/self/key/bias:0',\n",
       " 'encoder/layer_._9/attention/self/value/kernel:0',\n",
       " 'encoder/layer_._9/attention/self/value/bias:0',\n",
       " 'encoder/layer_._9/attention/output/dense/kernel:0',\n",
       " 'encoder/layer_._9/attention/output/dense/bias:0',\n",
       " 'encoder/layer_._9/attention/output/LayerNorm/gamma:0',\n",
       " 'encoder/layer_._9/attention/output/LayerNorm/beta:0',\n",
       " 'encoder/layer_._9/intermediate/dense/kernel:0',\n",
       " 'encoder/layer_._9/intermediate/dense/bias:0',\n",
       " 'encoder/layer_._9/output/dense/kernel:0',\n",
       " 'encoder/layer_._9/output/dense/bias:0',\n",
       " 'encoder/layer_._9/output/LayerNorm/gamma:0',\n",
       " 'encoder/layer_._9/output/LayerNorm/beta:0',\n",
       " 'encoder/layer_._10/attention/self/query/kernel:0',\n",
       " 'encoder/layer_._10/attention/self/query/bias:0',\n",
       " 'encoder/layer_._10/attention/self/key/kernel:0',\n",
       " 'encoder/layer_._10/attention/self/key/bias:0',\n",
       " 'encoder/layer_._10/attention/self/value/kernel:0',\n",
       " 'encoder/layer_._10/attention/self/value/bias:0',\n",
       " 'encoder/layer_._10/attention/output/dense/kernel:0',\n",
       " 'encoder/layer_._10/attention/output/dense/bias:0',\n",
       " 'encoder/layer_._10/attention/output/LayerNorm/gamma:0',\n",
       " 'encoder/layer_._10/attention/output/LayerNorm/beta:0',\n",
       " 'encoder/layer_._10/intermediate/dense/kernel:0',\n",
       " 'encoder/layer_._10/intermediate/dense/bias:0',\n",
       " 'encoder/layer_._10/output/dense/kernel:0',\n",
       " 'encoder/layer_._10/output/dense/bias:0',\n",
       " 'encoder/layer_._10/output/LayerNorm/gamma:0',\n",
       " 'encoder/layer_._10/output/LayerNorm/beta:0',\n",
       " 'encoder/layer_._11/attention/self/query/kernel:0',\n",
       " 'encoder/layer_._11/attention/self/query/bias:0',\n",
       " 'encoder/layer_._11/attention/self/key/kernel:0',\n",
       " 'encoder/layer_._11/attention/self/key/bias:0',\n",
       " 'encoder/layer_._11/attention/self/value/kernel:0',\n",
       " 'encoder/layer_._11/attention/self/value/bias:0',\n",
       " 'encoder/layer_._11/attention/output/dense/kernel:0',\n",
       " 'encoder/layer_._11/attention/output/dense/bias:0',\n",
       " 'encoder/layer_._11/attention/output/LayerNorm/gamma:0',\n",
       " 'encoder/layer_._11/attention/output/LayerNorm/beta:0',\n",
       " 'encoder/layer_._11/intermediate/dense/kernel:0',\n",
       " 'encoder/layer_._11/intermediate/dense/bias:0',\n",
       " 'encoder/layer_._11/output/dense/kernel:0',\n",
       " 'encoder/layer_._11/output/dense/bias:0',\n",
       " 'encoder/layer_._11/output/LayerNorm/gamma:0',\n",
       " 'encoder/layer_._11/output/LayerNorm/beta:0',\n",
       " 'pooler/dense/kernel:0',\n",
       " 'pooler/dense/bias:0',\n",
       " 'seq_relationship/kernel:0',\n",
       " 'seq_relationship/bias:0',\n",
       " 'predictions/bias:0',\n",
       " 'predictions/transform/dense/kernel:0',\n",
       " 'predictions/transform/dense/bias:0',\n",
       " 'predictions/transform/LayerNorm/gamma:0',\n",
       " 'predictions/transform/LayerNorm/beta:0']"
      ]
     },
     "execution_count": 32,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "['/'.join(param.name.split('/')[2:]) for param in model.weights]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 编码层"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "其层次结构如下：\n",
    "```\n",
    "1. attention_layer       :[batch,seq,hidden_size]-->[batch,seq,hidden_size]\n",
    "2. attention_output_dense  \n",
    "3. attention_dropout\n",
    "4. layer_norm\n",
    "5. intermediate_dense    :[batch,seq,hidden_size]-->[batch,seq,intermediate_size]\n",
    "6. output_dense          :[batch,seq,intermediate_size]-->[batch,seq,hidden_size]\n",
    "7. output_dropout\n",
    "8. layer_norm            \n",
    "```"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class TransformerBlock(tf.keras.layers.Layer):\n",
    "    \"\"\"Single transformer layer.\n",
    "  \n",
    "    It has two sub-layers. The first is a multi-head self-attention mechanism, and\n",
    "    the second is a positionwise fully connected feed-forward network.\n",
    "    \"\"\"\n",
    "    \n",
    "    def __init__(self, hidden_size=768, num_attention_heads=12,\n",
    "                 intermediate_size=3072, intermediate_activation=\"gelu\",\n",
    "                 hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0,\n",
    "                 initializer_range=0.02, backward_compatible=False,\n",
    "                 float_type=tf.float32, **kwargs):\n",
    "        super(TransformerBlock, self).__init__(**kwargs)\n",
    "        self.hidden_size = hidden_size\n",
    "        self.num_attention_heads = num_attention_heads\n",
    "        self.intermediate_size = intermediate_size\n",
    "        self.intermediate_activation = tf_utils.get_activation(\n",
    "            intermediate_activation)\n",
    "        self.hidden_dropout_prob = hidden_dropout_prob\n",
    "        self.attention_probs_dropout_prob = attention_probs_dropout_prob\n",
    "        self.initializer_range = initializer_range\n",
    "        self.backward_compatible = backward_compatible\n",
    "        self.float_type = float_type\n",
    "        \n",
    "        if self.hidden_size % self.num_attention_heads != 0:\n",
    "            raise ValueError(\n",
    "                \"The hidden size (%d) is not a multiple of the number of attention \"\n",
    "                \"heads (%d)\" % (self.hidden_size, self.num_attention_heads))\n",
    "        self.attention_head_size = int(\n",
    "            self.hidden_size / self.num_attention_heads)\n",
    "    \n",
    "    def build(self, unused_input_shapes):\n",
    "        \"\"\"Implements build() for the layer.\"\"\"\n",
    "        self.attention_layer = Attention(\n",
    "            num_attention_heads=self.num_attention_heads,\n",
    "            size_per_head=self.attention_head_size,\n",
    "            attention_probs_dropout_prob=self.attention_probs_dropout_prob,\n",
    "            initializer_range=self.initializer_range,\n",
    "            backward_compatible=self.backward_compatible, name=\"self_attention\")\n",
    "        self.attention_output_dense = Dense3D(\n",
    "            num_attention_heads=self.num_attention_heads,\n",
    "            size_per_head=int(self.hidden_size / self.num_attention_heads),\n",
    "            kernel_initializer=get_initializer(self.initializer_range),\n",
    "            output_projection=True,\n",
    "            backward_compatible=self.backward_compatible,\n",
    "            name=\"self_attention_output\")\n",
    "        self.attention_dropout = tf.keras.layers.Dropout(\n",
    "            rate=self.hidden_dropout_prob)\n",
    "        self.attention_layer_norm = (\n",
    "            tf.keras.layers.LayerNormalization(name=\"self_attention_layer_norm\",\n",
    "                axis=-1, epsilon=1e-12,\n",
    "                # We do layer norm in float32 for numeric stability.\n",
    "                dtype=tf.float32))\n",
    "        self.intermediate_dense = Dense2DProjection(\n",
    "            output_size=self.intermediate_size,\n",
    "            kernel_initializer=get_initializer(self.initializer_range),\n",
    "            activation=self.intermediate_activation,\n",
    "            # Uses float32 so that gelu activation is done in float32.\n",
    "            fp32_activation=True, name=\"intermediate\")\n",
    "        \n",
    "        self.output_dense = Dense2DProjection(output_size=self.hidden_size,\n",
    "            kernel_initializer=get_initializer(self.initializer_range),\n",
    "            name=\"output\")\n",
    "        self.output_dropout = tf.keras.layers.Dropout(\n",
    "            rate=self.hidden_dropout_prob)\n",
    "        self.output_layer_norm = tf.keras.layers.LayerNormalization(\n",
    "            name=\"output_layer_norm\", axis=-1, epsilon=1e-12, dtype=tf.float32)\n",
    "        super(TransformerBlock, self).build(unused_input_shapes)\n",
    "    \n",
    "    def common_layers(self):\n",
    "        \"\"\"Explicitly gets all layer objects inside a Transformer encoder block.\"\"\"\n",
    "        return [self.attention_layer, self.attention_output_dense,\n",
    "            self.attention_dropout, self.attention_layer_norm,\n",
    "            self.intermediate_dense, self.output_dense, self.output_dropout,\n",
    "            self.output_layer_norm]\n",
    "    \n",
    "    def __call__(self, input_tensor, attention_mask=None, **kwargs):\n",
    "        inputs = tf_utils.pack_inputs([input_tensor, attention_mask])\n",
    "        return super(TransformerBlock, self).__call__(inputs, **kwargs)\n",
    "    \n",
    "    def call(self, inputs):\n",
    "        \"\"\"Implements call() for the layer.\"\"\"\n",
    "        (input_tensor, attention_mask) = tf_utils.unpack_inputs(inputs)\n",
    "        attention_output = self.attention_layer(from_tensor=input_tensor,\n",
    "            to_tensor=input_tensor, attention_mask=attention_mask)\n",
    "        attention_output = self.attention_output_dense(attention_output)\n",
    "        attention_output = self.attention_dropout(attention_output)\n",
    "        # Use float32 in keras layer norm and the gelu activation in the\n",
    "        # intermediate dense layer for numeric stability\n",
    "        attention_output = self.attention_layer_norm(\n",
    "            input_tensor + attention_output)\n",
    "        if self.float_type == tf.float16:\n",
    "            attention_output = tf.cast(attention_output, tf.float16)\n",
    "        intermediate_output = self.intermediate_dense(attention_output)\n",
    "        if self.float_type == tf.float16:\n",
    "            intermediate_output = tf.cast(intermediate_output, tf.float16)\n",
    "        layer_output = self.output_dense(intermediate_output)\n",
    "        layer_output = self.output_dropout(layer_output)\n",
    "        # Use float32 in keras layer norm for numeric stability\n",
    "        layer_output = self.output_layer_norm(layer_output + attention_output)\n",
    "        if self.float_type == tf.float16:\n",
    "            layer_output = tf.cast(layer_output, tf.float16)\n",
    "        return layer_output"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 编码层参数量"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-05-07T09:25:13.265479Z",
     "start_time": "2020-05-07T09:25:13.258815Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "======编码层参数=============\n",
      "encoder/layer_._0/attention/self/query/kernel:0 (768, 768) True\n",
      "encoder/layer_._0/attention/self/query/bias:0 (768,) True\n",
      "encoder/layer_._0/attention/self/key/kernel:0 (768, 768) True\n",
      "encoder/layer_._0/attention/self/key/bias:0 (768,) True\n",
      "encoder/layer_._0/attention/self/value/kernel:0 (768, 768) True\n",
      "encoder/layer_._0/attention/self/value/bias:0 (768,) True\n",
      "encoder/layer_._0/attention/output/dense/kernel:0 (768, 768) True\n",
      "encoder/layer_._0/attention/output/dense/bias:0 (768,) True\n",
      "encoder/layer_._0/attention/output/LayerNorm/gamma:0 (768,) True\n",
      "encoder/layer_._0/attention/output/LayerNorm/beta:0 (768,) True\n",
      "encoder/layer_._0/intermediate/dense/kernel:0 (768, 3072) True\n",
      "encoder/layer_._0/intermediate/dense/bias:0 (3072,) True\n",
      "encoder/layer_._0/output/dense/kernel:0 (3072, 768) True\n",
      "encoder/layer_._0/output/dense/bias:0 (768,) True\n",
      "encoder/layer_._0/output/LayerNorm/gamma:0 (768,) True\n",
      "encoder/layer_._0/output/LayerNorm/beta:0 (768,) True\n"
     ]
    }
   ],
   "source": [
    "print(\"======编码层参数=============\")\n",
    "for param in params[5:21]:\n",
    "    print('/'.join(param.name.split('/')[2:]), param.shape, param.trainable)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "上述注意力的输出后\n",
    "- 经过前向密集层：权重 (768, 3072) ，偏置 (3072,) \n",
    "- 密集层：权重 (3072,768) ，偏置 (768,) \n",
    "- 再次与注意力的输出相加后，正则化： gamma 和 beta 都是768维参数"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 编码器\n",
    "由上一步的多层编码层组成"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Transformer(tf.keras.layers.Layer):\n",
    "    \"\"\"Multi-headed, multi-layer Transformer from \"Attention is All You Need\".\n",
    "  \n",
    "    This is almost an exact implementation of the original Transformer encoder.\n",
    "  \n",
    "    See the original paper:\n",
    "    https://arxiv.org/abs/1706.03762\n",
    "  \n",
    "    Also see:\n",
    "    https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py\n",
    "    \"\"\"\n",
    "    \n",
    "    def __init__(self, num_hidden_layers=12, hidden_size=768,\n",
    "                 num_attention_heads=12, intermediate_size=3072,\n",
    "                 intermediate_activation=\"gelu\", hidden_dropout_prob=0.0,\n",
    "                 attention_probs_dropout_prob=0.0, initializer_range=0.02,\n",
    "                 backward_compatible=False, float_type=tf.float32, **kwargs):\n",
    "        super(Transformer, self).__init__(**kwargs)\n",
    "        self.num_hidden_layers = num_hidden_layers\n",
    "        self.hidden_size = hidden_size\n",
    "        self.num_attention_heads = num_attention_heads\n",
    "        self.intermediate_size = intermediate_size\n",
    "        self.intermediate_activation = tf_utils.get_activation(\n",
    "            intermediate_activation)\n",
    "        self.hidden_dropout_prob = hidden_dropout_prob\n",
    "        self.attention_probs_dropout_prob = attention_probs_dropout_prob\n",
    "        self.initializer_range = initializer_range\n",
    "        self.backward_compatible = backward_compatible\n",
    "        self.float_type = float_type\n",
    "    \n",
    "    def build(self, unused_input_shapes):\n",
    "        \"\"\"Implements build() for the layer.\"\"\"\n",
    "        self.layers = []\n",
    "        for i in range(self.num_hidden_layers):\n",
    "            self.layers.append(TransformerBlock(hidden_size=self.hidden_size,\n",
    "                num_attention_heads=self.num_attention_heads,\n",
    "                intermediate_size=self.intermediate_size,\n",
    "                intermediate_activation=self.intermediate_activation,\n",
    "                hidden_dropout_prob=self.hidden_dropout_prob,\n",
    "                attention_probs_dropout_prob=self.attention_probs_dropout_prob,\n",
    "                initializer_range=self.initializer_range,\n",
    "                backward_compatible=self.backward_compatible,\n",
    "                float_type=self.float_type, name=(\"layer_%d\" % i)))\n",
    "        super(Transformer, self).build(unused_input_shapes)\n",
    "    \n",
    "    def __call__(self, input_tensor, attention_mask=None, **kwargs):\n",
    "        inputs = tf_utils.pack_inputs([input_tensor, attention_mask])\n",
    "        return super(Transformer, self).__call__(inputs=inputs, **kwargs)\n",
    "    \n",
    "    def call(self, inputs, return_all_layers=False):\n",
    "        \"\"\"Implements call() for the layer.\n",
    "    \n",
    "        Args:\n",
    "          inputs: packed inputs.\n",
    "          return_all_layers: bool, whether to return outputs of all layers inside\n",
    "            encoders.\n",
    "        Returns:\n",
    "          Output tensor of the last layer or a list of output tensors.\n",
    "        \"\"\"\n",
    "        unpacked_inputs = tf_utils.unpack_inputs(inputs)\n",
    "        input_tensor = unpacked_inputs[0]\n",
    "        attention_mask = unpacked_inputs[1]\n",
    "        output_tensor = input_tensor\n",
    "        \n",
    "        all_layer_outputs = []\n",
    "        for layer in self.layers:\n",
    "            output_tensor = layer(output_tensor, attention_mask)\n",
    "            all_layer_outputs.append(output_tensor)\n",
    "        \n",
    "        if return_all_layers:\n",
    "            return all_layer_outputs\n",
    "        \n",
    "        return all_layer_outputs[-1]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 编码器的参数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-05-07T09:30:35.563924Z",
     "start_time": "2020-05-07T09:30:35.537288Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "======编码期参数=============\n",
      "encoder/layer_._0/attention/self/query/kernel:0 (768, 768) True\n",
      "encoder/layer_._0/attention/self/query/bias:0 (768,) True\n",
      "encoder/layer_._0/attention/self/key/kernel:0 (768, 768) True\n",
      "encoder/layer_._0/attention/self/key/bias:0 (768,) True\n",
      "encoder/layer_._0/attention/self/value/kernel:0 (768, 768) True\n",
      "encoder/layer_._0/attention/self/value/bias:0 (768,) True\n",
      "encoder/layer_._0/attention/output/dense/kernel:0 (768, 768) True\n",
      "encoder/layer_._0/attention/output/dense/bias:0 (768,) True\n",
      "encoder/layer_._0/attention/output/LayerNorm/gamma:0 (768,) True\n",
      "encoder/layer_._0/attention/output/LayerNorm/beta:0 (768,) True\n",
      "encoder/layer_._0/intermediate/dense/kernel:0 (768, 3072) True\n",
      "encoder/layer_._0/intermediate/dense/bias:0 (3072,) True\n",
      "encoder/layer_._0/output/dense/kernel:0 (3072, 768) True\n",
      "encoder/layer_._0/output/dense/bias:0 (768,) True\n",
      "encoder/layer_._0/output/LayerNorm/gamma:0 (768,) True\n",
      "encoder/layer_._0/output/LayerNorm/beta:0 (768,) True\n",
      "encoder/layer_._1/attention/self/query/kernel:0 (768, 768) True\n",
      "encoder/layer_._1/attention/self/query/bias:0 (768,) True\n",
      "encoder/layer_._1/attention/self/key/kernel:0 (768, 768) True\n",
      "encoder/layer_._1/attention/self/key/bias:0 (768,) True\n",
      "encoder/layer_._1/attention/self/value/kernel:0 (768, 768) True\n",
      "encoder/layer_._1/attention/self/value/bias:0 (768,) True\n",
      "encoder/layer_._1/attention/output/dense/kernel:0 (768, 768) True\n",
      "encoder/layer_._1/attention/output/dense/bias:0 (768,) True\n",
      "encoder/layer_._1/attention/output/LayerNorm/gamma:0 (768,) True\n",
      "encoder/layer_._1/attention/output/LayerNorm/beta:0 (768,) True\n",
      "encoder/layer_._1/intermediate/dense/kernel:0 (768, 3072) True\n",
      "encoder/layer_._1/intermediate/dense/bias:0 (3072,) True\n",
      "encoder/layer_._1/output/dense/kernel:0 (3072, 768) True\n",
      "encoder/layer_._1/output/dense/bias:0 (768,) True\n",
      "encoder/layer_._1/output/LayerNorm/gamma:0 (768,) True\n",
      "encoder/layer_._1/output/LayerNorm/beta:0 (768,) True\n",
      "encoder/layer_._2/attention/self/query/kernel:0 (768, 768) True\n",
      "encoder/layer_._2/attention/self/query/bias:0 (768,) True\n",
      "encoder/layer_._2/attention/self/key/kernel:0 (768, 768) True\n",
      "encoder/layer_._2/attention/self/key/bias:0 (768,) True\n",
      "encoder/layer_._2/attention/self/value/kernel:0 (768, 768) True\n",
      "encoder/layer_._2/attention/self/value/bias:0 (768,) True\n",
      "encoder/layer_._2/attention/output/dense/kernel:0 (768, 768) True\n",
      "encoder/layer_._2/attention/output/dense/bias:0 (768,) True\n",
      "encoder/layer_._2/attention/output/LayerNorm/gamma:0 (768,) True\n",
      "encoder/layer_._2/attention/output/LayerNorm/beta:0 (768,) True\n",
      "encoder/layer_._2/intermediate/dense/kernel:0 (768, 3072) True\n",
      "encoder/layer_._2/intermediate/dense/bias:0 (3072,) True\n",
      "encoder/layer_._2/output/dense/kernel:0 (3072, 768) True\n",
      "encoder/layer_._2/output/dense/bias:0 (768,) True\n",
      "encoder/layer_._2/output/LayerNorm/gamma:0 (768,) True\n",
      "encoder/layer_._2/output/LayerNorm/beta:0 (768,) True\n",
      "encoder/layer_._3/attention/self/query/kernel:0 (768, 768) True\n",
      "encoder/layer_._3/attention/self/query/bias:0 (768,) True\n",
      "encoder/layer_._3/attention/self/key/kernel:0 (768, 768) True\n",
      "encoder/layer_._3/attention/self/key/bias:0 (768,) True\n",
      "encoder/layer_._3/attention/self/value/kernel:0 (768, 768) True\n",
      "encoder/layer_._3/attention/self/value/bias:0 (768,) True\n",
      "encoder/layer_._3/attention/output/dense/kernel:0 (768, 768) True\n",
      "encoder/layer_._3/attention/output/dense/bias:0 (768,) True\n",
      "encoder/layer_._3/attention/output/LayerNorm/gamma:0 (768,) True\n",
      "encoder/layer_._3/attention/output/LayerNorm/beta:0 (768,) True\n",
      "encoder/layer_._3/intermediate/dense/kernel:0 (768, 3072) True\n",
      "encoder/layer_._3/intermediate/dense/bias:0 (3072,) True\n",
      "encoder/layer_._3/output/dense/kernel:0 (3072, 768) True\n",
      "encoder/layer_._3/output/dense/bias:0 (768,) True\n",
      "encoder/layer_._3/output/LayerNorm/gamma:0 (768,) True\n",
      "encoder/layer_._3/output/LayerNorm/beta:0 (768,) True\n",
      "encoder/layer_._4/attention/self/query/kernel:0 (768, 768) True\n",
      "encoder/layer_._4/attention/self/query/bias:0 (768,) True\n",
      "encoder/layer_._4/attention/self/key/kernel:0 (768, 768) True\n",
      "encoder/layer_._4/attention/self/key/bias:0 (768,) True\n",
      "encoder/layer_._4/attention/self/value/kernel:0 (768, 768) True\n",
      "encoder/layer_._4/attention/self/value/bias:0 (768,) True\n",
      "encoder/layer_._4/attention/output/dense/kernel:0 (768, 768) True\n",
      "encoder/layer_._4/attention/output/dense/bias:0 (768,) True\n",
      "encoder/layer_._4/attention/output/LayerNorm/gamma:0 (768,) True\n",
      "encoder/layer_._4/attention/output/LayerNorm/beta:0 (768,) True\n",
      "encoder/layer_._4/intermediate/dense/kernel:0 (768, 3072) True\n",
      "encoder/layer_._4/intermediate/dense/bias:0 (3072,) True\n",
      "encoder/layer_._4/output/dense/kernel:0 (3072, 768) True\n",
      "encoder/layer_._4/output/dense/bias:0 (768,) True\n",
      "encoder/layer_._4/output/LayerNorm/gamma:0 (768,) True\n",
      "encoder/layer_._4/output/LayerNorm/beta:0 (768,) True\n",
      "encoder/layer_._5/attention/self/query/kernel:0 (768, 768) True\n",
      "encoder/layer_._5/attention/self/query/bias:0 (768,) True\n",
      "encoder/layer_._5/attention/self/key/kernel:0 (768, 768) True\n",
      "encoder/layer_._5/attention/self/key/bias:0 (768,) True\n",
      "encoder/layer_._5/attention/self/value/kernel:0 (768, 768) True\n",
      "encoder/layer_._5/attention/self/value/bias:0 (768,) True\n",
      "encoder/layer_._5/attention/output/dense/kernel:0 (768, 768) True\n",
      "encoder/layer_._5/attention/output/dense/bias:0 (768,) True\n",
      "encoder/layer_._5/attention/output/LayerNorm/gamma:0 (768,) True\n",
      "encoder/layer_._5/attention/output/LayerNorm/beta:0 (768,) True\n",
      "encoder/layer_._5/intermediate/dense/kernel:0 (768, 3072) True\n",
      "encoder/layer_._5/intermediate/dense/bias:0 (3072,) True\n",
      "encoder/layer_._5/output/dense/kernel:0 (3072, 768) True\n",
      "encoder/layer_._5/output/dense/bias:0 (768,) True\n",
      "encoder/layer_._5/output/LayerNorm/gamma:0 (768,) True\n",
      "encoder/layer_._5/output/LayerNorm/beta:0 (768,) True\n",
      "encoder/layer_._6/attention/self/query/kernel:0 (768, 768) True\n",
      "encoder/layer_._6/attention/self/query/bias:0 (768,) True\n",
      "encoder/layer_._6/attention/self/key/kernel:0 (768, 768) True\n",
      "encoder/layer_._6/attention/self/key/bias:0 (768,) True\n",
      "encoder/layer_._6/attention/self/value/kernel:0 (768, 768) True\n",
      "encoder/layer_._6/attention/self/value/bias:0 (768,) True\n",
      "encoder/layer_._6/attention/output/dense/kernel:0 (768, 768) True\n",
      "encoder/layer_._6/attention/output/dense/bias:0 (768,) True\n",
      "encoder/layer_._6/attention/output/LayerNorm/gamma:0 (768,) True\n",
      "encoder/layer_._6/attention/output/LayerNorm/beta:0 (768,) True\n",
      "encoder/layer_._6/intermediate/dense/kernel:0 (768, 3072) True\n",
      "encoder/layer_._6/intermediate/dense/bias:0 (3072,) True\n",
      "encoder/layer_._6/output/dense/kernel:0 (3072, 768) True\n",
      "encoder/layer_._6/output/dense/bias:0 (768,) True\n",
      "encoder/layer_._6/output/LayerNorm/gamma:0 (768,) True\n",
      "encoder/layer_._6/output/LayerNorm/beta:0 (768,) True\n",
      "encoder/layer_._7/attention/self/query/kernel:0 (768, 768) True\n",
      "encoder/layer_._7/attention/self/query/bias:0 (768,) True\n",
      "encoder/layer_._7/attention/self/key/kernel:0 (768, 768) True\n",
      "encoder/layer_._7/attention/self/key/bias:0 (768,) True\n",
      "encoder/layer_._7/attention/self/value/kernel:0 (768, 768) True\n",
      "encoder/layer_._7/attention/self/value/bias:0 (768,) True\n",
      "encoder/layer_._7/attention/output/dense/kernel:0 (768, 768) True\n",
      "encoder/layer_._7/attention/output/dense/bias:0 (768,) True\n",
      "encoder/layer_._7/attention/output/LayerNorm/gamma:0 (768,) True\n",
      "encoder/layer_._7/attention/output/LayerNorm/beta:0 (768,) True\n",
      "encoder/layer_._7/intermediate/dense/kernel:0 (768, 3072) True\n",
      "encoder/layer_._7/intermediate/dense/bias:0 (3072,) True\n",
      "encoder/layer_._7/output/dense/kernel:0 (3072, 768) True\n",
      "encoder/layer_._7/output/dense/bias:0 (768,) True\n",
      "encoder/layer_._7/output/LayerNorm/gamma:0 (768,) True\n",
      "encoder/layer_._7/output/LayerNorm/beta:0 (768,) True\n",
      "encoder/layer_._8/attention/self/query/kernel:0 (768, 768) True\n",
      "encoder/layer_._8/attention/self/query/bias:0 (768,) True\n",
      "encoder/layer_._8/attention/self/key/kernel:0 (768, 768) True\n",
      "encoder/layer_._8/attention/self/key/bias:0 (768,) True\n",
      "encoder/layer_._8/attention/self/value/kernel:0 (768, 768) True\n",
      "encoder/layer_._8/attention/self/value/bias:0 (768,) True\n",
      "encoder/layer_._8/attention/output/dense/kernel:0 (768, 768) True\n",
      "encoder/layer_._8/attention/output/dense/bias:0 (768,) True\n",
      "encoder/layer_._8/attention/output/LayerNorm/gamma:0 (768,) True\n",
      "encoder/layer_._8/attention/output/LayerNorm/beta:0 (768,) True\n",
      "encoder/layer_._8/intermediate/dense/kernel:0 (768, 3072) True\n",
      "encoder/layer_._8/intermediate/dense/bias:0 (3072,) True\n",
      "encoder/layer_._8/output/dense/kernel:0 (3072, 768) True\n",
      "encoder/layer_._8/output/dense/bias:0 (768,) True\n",
      "encoder/layer_._8/output/LayerNorm/gamma:0 (768,) True\n",
      "encoder/layer_._8/output/LayerNorm/beta:0 (768,) True\n",
      "encoder/layer_._9/attention/self/query/kernel:0 (768, 768) True\n",
      "encoder/layer_._9/attention/self/query/bias:0 (768,) True\n",
      "encoder/layer_._9/attention/self/key/kernel:0 (768, 768) True\n",
      "encoder/layer_._9/attention/self/key/bias:0 (768,) True\n",
      "encoder/layer_._9/attention/self/value/kernel:0 (768, 768) True\n",
      "encoder/layer_._9/attention/self/value/bias:0 (768,) True\n",
      "encoder/layer_._9/attention/output/dense/kernel:0 (768, 768) True\n",
      "encoder/layer_._9/attention/output/dense/bias:0 (768,) True\n",
      "encoder/layer_._9/attention/output/LayerNorm/gamma:0 (768,) True\n",
      "encoder/layer_._9/attention/output/LayerNorm/beta:0 (768,) True\n",
      "encoder/layer_._9/intermediate/dense/kernel:0 (768, 3072) True\n",
      "encoder/layer_._9/intermediate/dense/bias:0 (3072,) True\n",
      "encoder/layer_._9/output/dense/kernel:0 (3072, 768) True\n",
      "encoder/layer_._9/output/dense/bias:0 (768,) True\n",
      "encoder/layer_._9/output/LayerNorm/gamma:0 (768,) True\n",
      "encoder/layer_._9/output/LayerNorm/beta:0 (768,) True\n",
      "encoder/layer_._10/attention/self/query/kernel:0 (768, 768) True\n",
      "encoder/layer_._10/attention/self/query/bias:0 (768,) True\n",
      "encoder/layer_._10/attention/self/key/kernel:0 (768, 768) True\n",
      "encoder/layer_._10/attention/self/key/bias:0 (768,) True\n",
      "encoder/layer_._10/attention/self/value/kernel:0 (768, 768) True\n",
      "encoder/layer_._10/attention/self/value/bias:0 (768,) True\n",
      "encoder/layer_._10/attention/output/dense/kernel:0 (768, 768) True\n",
      "encoder/layer_._10/attention/output/dense/bias:0 (768,) True\n",
      "encoder/layer_._10/attention/output/LayerNorm/gamma:0 (768,) True\n",
      "encoder/layer_._10/attention/output/LayerNorm/beta:0 (768,) True\n",
      "encoder/layer_._10/intermediate/dense/kernel:0 (768, 3072) True\n",
      "encoder/layer_._10/intermediate/dense/bias:0 (3072,) True\n",
      "encoder/layer_._10/output/dense/kernel:0 (3072, 768) True\n",
      "encoder/layer_._10/output/dense/bias:0 (768,) True\n",
      "encoder/layer_._10/output/LayerNorm/gamma:0 (768,) True\n",
      "encoder/layer_._10/output/LayerNorm/beta:0 (768,) True\n",
      "encoder/layer_._11/attention/self/query/kernel:0 (768, 768) True\n",
      "encoder/layer_._11/attention/self/query/bias:0 (768,) True\n",
      "encoder/layer_._11/attention/self/key/kernel:0 (768, 768) True\n",
      "encoder/layer_._11/attention/self/key/bias:0 (768,) True\n",
      "encoder/layer_._11/attention/self/value/kernel:0 (768, 768) True\n",
      "encoder/layer_._11/attention/self/value/bias:0 (768,) True\n",
      "encoder/layer_._11/attention/output/dense/kernel:0 (768, 768) True\n",
      "encoder/layer_._11/attention/output/dense/bias:0 (768,) True\n",
      "encoder/layer_._11/attention/output/LayerNorm/gamma:0 (768,) True\n",
      "encoder/layer_._11/attention/output/LayerNorm/beta:0 (768,) True\n",
      "encoder/layer_._11/intermediate/dense/kernel:0 (768, 3072) True\n",
      "encoder/layer_._11/intermediate/dense/bias:0 (3072,) True\n",
      "encoder/layer_._11/output/dense/kernel:0 (3072, 768) True\n",
      "encoder/layer_._11/output/dense/bias:0 (768,) True\n",
      "encoder/layer_._11/output/LayerNorm/gamma:0 (768,) True\n",
      "encoder/layer_._11/output/LayerNorm/beta:0 (768,) True\n"
     ]
    }
   ],
   "source": [
    "print(\"======编码期参数=============\")\n",
    "for param in params[5:197]:\n",
    "    print('/'.join(param.name.split('/')[2:]), param.shape, param.trainable)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**相同结构的编码层重复12遍**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 50,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-05-07T09:33:50.284788Z",
     "start_time": "2020-05-07T09:33:50.278105Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "pooler/dense/kernel:0 (768, 768) True\n",
      "pooler/dense/bias:0 (768,) True\n",
      "seq_relationship/kernel:0 (768, 2) True\n",
      "seq_relationship/bias:0 (2,) True\n",
      "predictions/bias:0 (30522,) True\n",
      "predictions/transform/dense/kernel:0 (768, 768) True\n",
      "predictions/transform/dense/bias:0 (768,) True\n",
      "predictions/transform/LayerNorm/gamma:0 (768,) True\n",
      "predictions/transform/LayerNorm/beta:0 (768,) True\n"
     ]
    }
   ],
   "source": [
    "for param in params[197:]:\n",
    "    print('/'.join(param.name.split('/')[2:]), param.shape, param.trainable)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 辅助函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "lines_to_end_of_cell_marker": 2
   },
   "outputs": [],
   "source": [
    "# 初始化函数\n",
    "def get_initializer(initializer_range=0.02):\n",
    "    \"\"\"Creates a `tf.initializers.truncated_normal` with the given range.\n",
    "  \n",
    "    Args:\n",
    "      initializer_range: float, initializer range for stddev.\n",
    "  \n",
    "    Returns:\n",
    "      TruncatedNormal initializer with stddev = `initializer_range`.\n",
    "    \"\"\"\n",
    "    return tf.keras.initializers.TruncatedNormal(stddev=initializer_range)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "jupytext": {
   "cell_metadata_filter": "-all",
   "formats": "py:light,ipynb",
   "notebook_metadata_filter": "-all"
  },
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.7"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {
    "height": "calc(100% - 180px)",
    "left": "10px",
    "top": "150px",
    "width": "165px"
   },
   "toc_section_display": true,
   "toc_window_display": true
  },
  "varInspector": {
   "cols": {
    "lenName": 16,
    "lenType": 16,
    "lenVar": 40
   },
   "kernels_config": {
    "python": {
     "delete_cmd_postfix": "",
     "delete_cmd_prefix": "del ",
     "library": "var_list.py",
     "varRefreshCmd": "print(var_dic_list())"
    },
    "r": {
     "delete_cmd_postfix": ") ",
     "delete_cmd_prefix": "rm(",
     "library": "var_list.r",
     "varRefreshCmd": "cat(var_dic_list()) "
    }
   },
   "types_to_exclude": [
    "module",
    "function",
    "builtin_function_or_method",
    "instance",
    "_Feature"
   ],
   "window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
