{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# BERT预训练"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "两种预训练任务：\n",
    "- `Mask LM`：将标记序列中一定数量的标记遮挡，然后预测该处的标记\n",
    "- `Next Sentence Prediction`：标记序列的两部分在真实文本中是不是连续的\n",
    "<img src=\"../images/pretrained_tasks.png\" width=\"100%\">"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-05-09T05:37:39.310148Z",
     "start_time": "2020-05-09T05:37:38.523776Z"
    }
   },
   "outputs": [],
   "source": [
    "import tensorflow as tf"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 获取预训练数据\n",
    "加载 [38-BERT创建训练数据(Tensorflow)](38-BERT创建训练数据(Tensorflow).ipynb) 中创建然后保存为 `TFRecord` 的预训练数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-05-09T05:37:42.940460Z",
     "start_time": "2020-05-09T05:37:42.928478Z"
    }
   },
   "outputs": [],
   "source": [
    "# 读取 TFRecord， 转换数据类型 tf.int64 --> tfint32\n",
    "def decode_record(record, name_to_features):\n",
    "    example = tf.io.parse_single_example(record, name_to_features)\n",
    "    for name in list(example.keys()):\n",
    "        t = example[name]\n",
    "        if t.dtype == tf.int64:\n",
    "            t = tf.cast(t, tf.int32)\n",
    "        example[name] = t\n",
    "    return example\n",
    "\n",
    "\n",
    "# 读取 TFRecord 文件\n",
    "def single_file_dataset(input_file, name_to_features):\n",
    "    # For training, we want a lot of parallel reading and shuffling.\n",
    "    # For eval, we want no shuffling and parallel reading doesn't matter.\n",
    "    d = tf.data.TFRecordDataset(input_file)\n",
    "    d = d.map(lambda record: decode_record(record, name_to_features))\n",
    "\n",
    "    # When `input_file` is a path to a single file or a list\n",
    "    # containing a single path, disable auto sharding so that\n",
    "    # same input file is sent to all workers.\n",
    "    if isinstance(input_file, str) or len(input_file) == 1:\n",
    "        options = tf.data.Options()\n",
    "        options.experimental_distribute.auto_shard_policy = (\n",
    "            tf.data.experimental.AutoShardPolicy.OFF)\n",
    "        d = d.with_options(options)\n",
    "\n",
    "    return d\n",
    "\n",
    "\n",
    "def create_pretrain_dataset(input_patterns,\n",
    "                            seq_length,\n",
    "                            max_predictions_per_seq,\n",
    "                            batch_size,\n",
    "                            is_training=True,\n",
    "                            input_pipeline_context=None):\n",
    "    # 数据中的特征\n",
    "    name_to_features = {\n",
    "        'input_ids':\n",
    "        tf.io.FixedLenFeature([seq_length], tf.int64),\n",
    "        'input_mask':\n",
    "        tf.io.FixedLenFeature([seq_length], tf.int64),\n",
    "        'segment_ids':\n",
    "        tf.io.FixedLenFeature([seq_length], tf.int64),\n",
    "        'masked_lm_positions':\n",
    "        tf.io.FixedLenFeature([max_predictions_per_seq], tf.int64),\n",
    "        'masked_lm_ids':\n",
    "        tf.io.FixedLenFeature([max_predictions_per_seq], tf.int64),\n",
    "        'masked_lm_weights':\n",
    "        tf.io.FixedLenFeature([max_predictions_per_seq], tf.float32),\n",
    "        'next_sentence_labels':\n",
    "        tf.io.FixedLenFeature([1], tf.int64),\n",
    "    }\n",
    "\n",
    "    # 读取所有数据\n",
    "    dataset = tf.data.Dataset.list_files(input_patterns, shuffle=is_training)\n",
    "\n",
    "    # 设置并行通道\n",
    "    if input_pipeline_context and input_pipeline_context.num_input_pipelines > 1:\n",
    "        dataset = dataset.shard(input_pipeline_context.num_input_pipelines,\n",
    "                                input_pipeline_context.input_pipeline_id)\n",
    "\n",
    "    dataset = dataset.repeat()\n",
    "\n",
    "    # 随机重排序\n",
    "    input_files = []\n",
    "    for input_pattern in input_patterns:\n",
    "        input_files.extend(tf.io.gfile.glob(input_pattern))\n",
    "    dataset = dataset.shuffle(len(input_files))\n",
    "\n",
    "    dataset = dataset.interleave(\n",
    "        tf.data.TFRecordDataset,\n",
    "        cycle_length=8,\n",
    "        num_parallel_calls=tf.data.experimental.AUTOTUNE)\n",
    "\n",
    "    # 读取 TFRecord 内容，转换数据类型\n",
    "    decode_fn = lambda record: decode_record(record, name_to_features)\n",
    "    dataset = dataset.map(decode_fn,\n",
    "                          num_parallel_calls=tf.data.experimental.AUTOTUNE)\n",
    "\n",
    "    # 选择训练数据\n",
    "    def _select_data_from_record(record):\n",
    "        x = {\n",
    "            'input_word_ids': record['input_ids'],\n",
    "            'input_mask': record['input_mask'],\n",
    "            'input_type_ids': record['segment_ids'],\n",
    "            'masked_lm_positions': record['masked_lm_positions'],\n",
    "            'masked_lm_ids': record['masked_lm_ids'],\n",
    "            'masked_lm_weights': record['masked_lm_weights'],\n",
    "            'next_sentence_labels': record['next_sentence_labels'],\n",
    "        }\n",
    "        y = record['masked_lm_weights']\n",
    "        return (x, y)\n",
    "\n",
    "    dataset = dataset.map(_select_data_from_record,\n",
    "                          num_parallel_calls=tf.data.experimental.AUTOTUNE)\n",
    "    if is_training:\n",
    "        dataset = dataset.shuffle(100)\n",
    "    dataset = dataset.batch(batch_size, drop_remainder=True)\n",
    "    dataset = dataset.prefetch(1024)\n",
    "    return dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-05-09T07:03:22.284573Z",
     "start_time": "2020-05-09T07:03:22.133611Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "input_word_ids batch_size: 32 ; the 24th sample: \n",
      "\n",
      "tf.Tensor(\n",
      "[  101  1103  4458  1125  1178  6445   103  1103  5021 24177  1104  2106\n",
      "  1120 20013  2851   103  1105  1103  3433 21167  1106   170  7279  2305\n",
      "  1104  4044   103   117  1105  1103  4006  1104  6278 14726   117 14086\n",
      " 13624   117  1105  2964  3227   190 23826   103   103  1187  1103   103\n",
      "  5946  1116  1125  8589  1283  1103 11829   103   103 19943  1116  1196\n",
      "  1103  6493  3581   119   102  1103 12325  1104   103 12304   117   177\n",
      "  1183   103 10691  1941   119   102     0     0     0     0     0     0\n",
      "     0     0     0     0     0     0     0     0     0     0     0     0\n",
      "     0     0     0     0     0     0     0     0     0     0     0     0\n",
      "     0     0     0     0     0     0     0     0     0     0     0     0\n",
      "     0     0     0     0     0     0     0     0], shape=(128,), dtype=int32)\n",
      "================================================================================\n",
      "input_mask batch_size: 32 ; the 15th sample: \n",
      "\n",
      "tf.Tensor(\n",
      "[1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n",
      " 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0\n",
      " 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
      " 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0], shape=(128,), dtype=int32)\n",
      "================================================================================\n",
      "input_type_ids batch_size: 32 ; the 19th sample: \n",
      "\n",
      "tf.Tensor(\n",
      "[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n",
      " 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n",
      " 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n",
      " 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1], shape=(128,), dtype=int32)\n",
      "================================================================================\n",
      "masked_lm_positions batch_size: 32 ; the 16th sample: \n",
      "\n",
      "tf.Tensor(\n",
      "[  2   4   8   9  13  29  39  50  52  60  62  71  82  83 104 106 117 118\n",
      " 120   0], shape=(20,), dtype=int32)\n",
      "================================================================================\n",
      "masked_lm_ids batch_size: 32 ; the 0th sample: \n",
      "\n",
      "tf.Tensor(\n",
      "[10658   992  3087  1431 19729  1933  2953   119     0     0     0     0\n",
      "     0     0     0     0     0     0     0     0], shape=(20,), dtype=int32)\n",
      "================================================================================\n",
      "masked_lm_weights batch_size: 32 ; the 6th sample: \n",
      "\n",
      "tf.Tensor([1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.], shape=(20,), dtype=float32)\n",
      "================================================================================\n",
      "next_sentence_labels batch_size: 32 ; the 11th sample: \n",
      "\n",
      "tf.Tensor([1], shape=(1,), dtype=int32)\n",
      "================================================================================\n"
     ]
    }
   ],
   "source": [
    "input_files = \"../datasets/sample.record\"\n",
    "input_patterns = input_files.split(',')\n",
    "dataset = create_pretrain_dataset(input_patterns,\n",
    "                                  seq_length=128,\n",
    "                                  max_predictions_per_seq=20,\n",
    "                                  batch_size=32)\n",
    "\n",
    "for data in dataset:\n",
    "    import random\n",
    "    x, y = data\n",
    "    for k in x:\n",
    "        batch_size = x[k].shape[0]\n",
    "        ind = random.randint(0, batch_size)\n",
    "        print(k + f\" batch_size: {batch_size} ; the {ind}th sample: \\n\")\n",
    "        print(x[k][ind])\n",
    "        print(\"=\" * 80)\n",
    "    break"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-05-09T07:06:14.987689Z",
     "start_time": "2020-05-09T07:06:14.975485Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "8\n",
      "10\n",
      "19\n",
      "8\n",
      "19\n",
      "19\n",
      "10\n",
      "12\n",
      "19\n",
      "6\n",
      "8\n",
      "19\n",
      "19\n",
      "8\n",
      "19\n",
      "10\n",
      "19\n",
      "19\n",
      "19\n",
      "19\n",
      "19\n",
      "19\n",
      "6\n",
      "19\n",
      "12\n",
      "19\n",
      "19\n",
      "9\n",
      "8\n",
      "19\n",
      "19\n",
      "19\n"
     ]
    }
   ],
   "source": [
    "# 一个批次数据中，每个序列中被遮挡的位置数量是不相同的\n",
    "for col in x['masked_lm_positions'].numpy()!=0:\n",
    "    print(sum(col))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 读取数据封装成函数\n",
    "\n",
    "def get_pretrain_dataset_fn(input_file_pattern, seq_length,\n",
    "                            max_predictions_per_seq, global_batch_size):\n",
    "    def _dataset_fn(ctx=None):\n",
    "        input_patterns = input_file_pattern.split(',')\n",
    "        batch_size = ctx.get_per_replica_batch_size(global_batch_size)\n",
    "        train_dataset = input_pipeline.create_pretrain_dataset(\n",
    "            input_patterns,\n",
    "            seq_length,\n",
    "            max_predictions_per_seq,\n",
    "            batch_size,\n",
    "            is_training=True,\n",
    "            input_pipeline_context=ctx,\n",
    "        )\n",
    "        return train_dataset\n",
    "\n",
    "    return _dataset_fn"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 模型"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-05-09T06:22:51.328516Z",
     "start_time": "2020-05-09T06:22:51.326106Z"
    }
   },
   "source": [
    "### `Mask LM`\n",
    "- 从`transformer`编码器的输出，和 被遮挡标记位置列表，获取被遮挡标记的输出\n",
    "\n",
    "- 被遮挡标记的输出分布与 `transformer` 的 `embedding` 层矩阵乘法，获取预测输出，每个标记对应词汇表的概率分布"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# @tf.keras.utils.register_keras_serializable(package='Text')\n",
    "class MaskedLM(network.Network):\n",
    "    \"\"\"Masked language model network head for BERT modeling.\n",
    "\n",
    "  This network implements a masked language model based on the provided network.\n",
    "  It assumes that the network being passed has a \"get_embedding_table()\" method.\n",
    "\n",
    "  Attributes:\n",
    "    input_width: The innermost dimension of the input tensor to this network.\n",
    "    num_predictions: The number of predictions to make per sequence.\n",
    "    source_network: The network with the embedding layer to use for the\n",
    "      embedding layer.\n",
    "    activation: The activation, if any, for the dense layer in this network.\n",
    "    initializer: The intializer for the dense layer in this network. Defaults to\n",
    "      a Glorot uniform initializer.\n",
    "    output: The output style for this network. Can be either 'logits' or\n",
    "      'predictions'.\n",
    "  \"\"\"\n",
    "    def __init__(\n",
    "            self,\n",
    "            input_width,\n",
    "            num_predictions,\n",
    "            source_network,  # transformer\n",
    "            activation=None,\n",
    "            initializer='glorot_uniform',\n",
    "            output='logits',\n",
    "            **kwargs):\n",
    "\n",
    "        embedding_table = source_network.get_embedding_table()\n",
    "        vocab_size, hidden_size = embedding_table.shape\n",
    "\n",
    "        # transformer 的输出：batch,seq_len,hidden_size\n",
    "        sequence_data = tf.keras.layers.Input(shape=(None, input_width),\n",
    "                                              name='sequence_data',\n",
    "                                              dtype=tf.float32)\n",
    "        # 被遮挡的标记的位置列表\n",
    "        masked_lm_positions = tf.keras.layers.Input(shape=(num_predictions, ),\n",
    "                                                    name='masked_lm_positions',\n",
    "                                                    dtype=tf.int32)\n",
    "\n",
    "        # 被遮挡的标记对应的输出序列: batch*um_predictions,hidden_size\n",
    "        masked_lm_input = tf.keras.layers.Lambda(\n",
    "            lambda x: self._gather_indexes(x[0], x[1]))(\n",
    "                [sequence_data, masked_lm_positions])\n",
    "\n",
    "        # 密集层\n",
    "        lm_data = (tf.keras.layers.Dense(\n",
    "            hidden_size,\n",
    "            activation=activation,\n",
    "            kernel_initializer=initializer,\n",
    "            name='cls/predictions/transform/dense',\n",
    "        )(masked_lm_input))\n",
    "        lm_data = tf.keras.layers.LayerNormalization(\n",
    "            axis=-1, epsilon=1e-12,\n",
    "            name='cls/predictions/transform/LayerNorm')(lm_data)\n",
    "\n",
    "        # 与词汇表每个单词做点积\n",
    "        lm_data = tf.keras.layers.Lambda(\n",
    "            lambda x: tf.matmul(x, embedding_table, transpose_b=True))(lm_data)\n",
    "        logits = Bias(initializer=tf.keras.initializers.Zeros(),\n",
    "                      name='cls/predictions/output_bias')(lm_data)\n",
    "\n",
    "        # We can't use the standard Keras reshape layer here, since it expects\n",
    "        # the input and output batch size to be the same.\n",
    "        reshape_layer = tf.keras.layers.Lambda(\n",
    "            lambda x: tf.reshape(x, [-1, num_predictions, vocab_size]))\n",
    "\n",
    "        # batch，num_predictions, vocab_size\n",
    "        self.logits = reshape_layer(logits)\n",
    "        \n",
    "        # softmax 获取概率分布\n",
    "        predictions = tf.keras.layers.Activation(tf.nn.log_softmax)(\n",
    "            self.logits)\n",
    "\n",
    "        if output == 'logits':\n",
    "            output_tensors = self.logits\n",
    "        elif output == 'predictions':\n",
    "            output_tensors = predictions\n",
    "        else:\n",
    "            raise ValueError((\n",
    "                'Unknown `output` value \"%s\". `output` can be either \"logits\" or '\n",
    "                '\"predictions\"') % output)\n",
    "\n",
    "        super(MaskedLM,\n",
    "              self).__init__(inputs=[sequence_data, masked_lm_positions],\n",
    "                             outputs=output_tensors,\n",
    "                             **kwargs)\n",
    "\n",
    "    def get_config(self):\n",
    "        raise NotImplementedError(\n",
    "            'MaskedLM cannot be directly serialized at this '\n",
    "            'time. Please use it only in Layers or '\n",
    "            'functionally subclassed Models/Networks.')\n",
    "\n",
    "    def _gather_indexes(self, sequence_tensor, positions):\n",
    "        \"\"\"Gathers the vectors at the specific positions.\n",
    "\n",
    "    Args:\n",
    "        sequence_tensor: Sequence output of `BertModel` layer of shape\n",
    "          (`batch_size`, `seq_length`, num_hidden) where num_hidden is number of\n",
    "          hidden units of `BertModel` layer.\n",
    "        positions: Positions ids of tokens in sequence to mask for pretraining\n",
    "          of with dimension (batch_size, num_predictions) where\n",
    "          `num_predictions` is maximum number of tokens to mask out and predict\n",
    "          per each sequence.\n",
    "\n",
    "    Returns:\n",
    "        Masked out sequence tensor of shape (batch_size * num_predictions,\n",
    "        num_hidden).\n",
    "    \"\"\"\n",
    "        sequence_shape = tf_utils.get_shape_list(sequence_tensor,\n",
    "                                                 name='sequence_output_tensor')\n",
    "        batch_size, seq_length, width = sequence_shape\n",
    "\n",
    "        flat_offsets = tf.keras.backend.reshape(\n",
    "            tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])\n",
    "        flat_positions = tf.keras.backend.reshape(positions + flat_offsets,\n",
    "                                                  [-1])\n",
    "        flat_sequence_tensor = tf.keras.backend.reshape(\n",
    "            sequence_tensor, [batch_size * seq_length, width])\n",
    "        output_tensor = tf.gather(flat_sequence_tensor, flat_positions)\n",
    "\n",
    "        return output_tensor"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### `NSP`\n",
    "- 开始标记`[CLS]`对应的输出，在加一个二分类的全连接层"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# @tf.keras.utils.register_keras_serializable(package='Text')\n",
    "class Classification(network.Network):\n",
    "    \"\"\"Classification network head for BERT modeling.\n",
    "\n",
    "  This network implements a simple classifier head based on a dense layer.\n",
    "\n",
    "  Attributes:\n",
    "    input_width: The innermost dimension of the input tensor to this network.\n",
    "    num_classes: The number of classes that this network should classify to.\n",
    "    activation: The activation, if any, for the dense layer in this network.\n",
    "    initializer: The intializer for the dense layer in this network. Defaults to\n",
    "      a Glorot uniform initializer.\n",
    "    output: The output style for this network. Can be either 'logits' or\n",
    "      'predictions'.\n",
    "  \"\"\"\n",
    "    def __init__(self,\n",
    "                 input_width,\n",
    "                 num_classes,\n",
    "                 initializer='glorot_uniform',\n",
    "                 output='logits',\n",
    "                 **kwargs):\n",
    "        self._self_setattr_tracking = False\n",
    "        self._config_dict = {\n",
    "            'input_width': input_width,\n",
    "            'num_classes': num_classes,\n",
    "            'initializer': initializer,\n",
    "            'output': output,\n",
    "        }\n",
    "\n",
    "        cls_output = tf.keras.layers.Input(shape=(input_width, ),\n",
    "                                           name='cls_output',\n",
    "                                           dtype=tf.float32)\n",
    "        \n",
    "\n",
    "        # 分类的全连接层\n",
    "        self.logits = tf.keras.layers.Dense(\n",
    "            num_classes,\n",
    "            activation=None,\n",
    "            kernel_initializer=initializer,\n",
    "            name='predictions/transform/logits')(cls_output)\n",
    "        predictions = tf.keras.layers.Activation(tf.nn.log_softmax)(\n",
    "            self.logits)\n",
    "\n",
    "        if output == 'logits':\n",
    "            output_tensors = self.logits\n",
    "        elif output == 'predictions':\n",
    "            output_tensors = predictions\n",
    "        else:\n",
    "            raise ValueError((\n",
    "                'Unknown `output` value \"%s\". `output` can be either \"logits\" or '\n",
    "                '\"predictions\"') % output)\n",
    "\n",
    "        super(Classification, self).__init__(inputs=[cls_output],\n",
    "                                             outputs=output_tensors,\n",
    "                                             **kwargs)\n",
    "\n",
    "    def get_config(self):\n",
    "        return self._config_dict\n",
    "\n",
    "    @classmethod\n",
    "    def from_config(cls, config, custom_objects=None):\n",
    "        return cls(**config)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 整合了预训练任务的模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class BertPretrainer(tf.keras.Model):\n",
    "    def __init__(\n",
    "            self,\n",
    "            network,  # transformer 编码器\n",
    "            num_classes,\n",
    "            num_token_predictions,\n",
    "            activation=None,\n",
    "            output_activation=None,\n",
    "            initializer='glorot_uniform',\n",
    "            output='logits',\n",
    "            **kwargs):\n",
    "        self._self_setattr_tracking = False\n",
    "        self._config = {\n",
    "            'network': network,\n",
    "            'num_classes': num_classes,\n",
    "            'num_token_predictions': num_token_predictions,\n",
    "            'activation': activation,\n",
    "            'output_activation': output_activation,\n",
    "            'initializer': initializer,\n",
    "            'output': output,\n",
    "        }\n",
    "\n",
    "        # We want to use the inputs of the passed network as the inputs to this\n",
    "        # Model. To do this, we need to keep a copy of the network inputs for use\n",
    "        # when we construct the Model object at the end of init. (We keep a copy\n",
    "        # because we'll be adding another tensor to the copy later.)\n",
    "        network_inputs = network.inputs\n",
    "        inputs = copy.copy(network_inputs)\n",
    "\n",
    "        # Because we have a copy of inputs to create this Model object, we can\n",
    "        # invoke the Network object with its own input tensors to start the Model.\n",
    "\n",
    "        # cls_output 为 [CLS] 的输出，用于 NSP 任务\n",
    "        # sequence_output 为所有标记的输出\n",
    "        sequence_output, cls_output = network(network_inputs)\n",
    "\n",
    "        sequence_output_length = sequence_output.shape.as_list()[1]\n",
    "        if sequence_output_length < num_token_predictions:\n",
    "            raise ValueError(\n",
    "                \"The passed network's output length is %s, which is less than the \"\n",
    "                'requested num_token_predictions %s.' %\n",
    "                (sequence_output_length, num_token_predictions))\n",
    "\n",
    "        # Mask LM 任务\n",
    "        masked_lm_positions = tf.keras.layers.Input(\n",
    "            shape=(num_token_predictions, ),\n",
    "            name='masked_lm_positions',\n",
    "            dtype=tf.int32,\n",
    "        )\n",
    "        inputs.append(masked_lm_positions)\n",
    "\n",
    "        self.masked_lm = MaskedLM(\n",
    "            num_predictions=num_token_predictions,\n",
    "            input_width=sequence_output.shape[-1],\n",
    "            source_network=network,\n",
    "            activation=activation,\n",
    "            initializer=initializer,\n",
    "            output=output,\n",
    "            name='masked_lm',\n",
    "        )\n",
    "        lm_outputs = self.masked_lm([sequence_output, masked_lm_positions])\n",
    "\n",
    "        # NSP 任务\n",
    "        self.classification = Classification(\n",
    "            input_width=cls_output.shape[-1],\n",
    "            num_classes=num_classes,\n",
    "            initializer=initializer,\n",
    "            output=output,\n",
    "            name='classification',\n",
    "        )\n",
    "        sentence_outputs = self.classification(cls_output)\n",
    "\n",
    "        super(BertPretrainer,\n",
    "              self).__init__(inputs=inputs,\n",
    "                             outputs=[lm_outputs, sentence_outputs],\n",
    "                             **kwargs)\n",
    "\n",
    "    def get_config(self):\n",
    "        return self._config\n",
    "\n",
    "    @classmethod\n",
    "    def from_config(cls, config, custom_objects=None):\n",
    "        return cls(**config)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 损失函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class BertPretrainLossAndMetricLayer(tf.keras.layers.Layer):\n",
    "    \"\"\"Returns layer that computes custom loss and metrics for pretraining.\"\"\"\n",
    "    def __init__(self, vocab_size, **kwargs):\n",
    "        super(BertPretrainLossAndMetricLayer, self).__init__(**kwargs)\n",
    "        self._vocab_size = vocab_size\n",
    "        self.config = {\n",
    "            'vocab_size': vocab_size,\n",
    "        }\n",
    "\n",
    "    def __call__(self,\n",
    "                 lm_output,\n",
    "                 sentence_output=None,\n",
    "                 lm_label_ids=None,\n",
    "                 lm_label_weights=None,\n",
    "                 sentence_labels=None,\n",
    "                 **kwargs):\n",
    "        inputs = tf_utils.pack_inputs([\n",
    "            lm_output, sentence_output, lm_label_ids, lm_label_weights,\n",
    "            sentence_labels\n",
    "        ])\n",
    "        return super(BertPretrainLossAndMetricLayer,\n",
    "                     self).__call__(inputs, **kwargs)\n",
    "\n",
    "    def _add_metrics(self, lm_output, lm_labels, lm_label_weights,\n",
    "                     lm_example_loss, sentence_output, sentence_labels,\n",
    "                     next_sentence_loss):\n",
    "        \"\"\"Adds metrics.\"\"\"\n",
    "        masked_lm_accuracy = tf.keras.metrics.sparse_categorical_accuracy(\n",
    "            lm_labels, lm_output)\n",
    "        numerator = tf.reduce_sum(masked_lm_accuracy * lm_label_weights)\n",
    "        denominator = tf.reduce_sum(lm_label_weights) + 1e-5\n",
    "        masked_lm_accuracy = numerator / denominator\n",
    "        self.add_metric(masked_lm_accuracy,\n",
    "                        name='masked_lm_accuracy',\n",
    "                        aggregation='mean')\n",
    "\n",
    "        self.add_metric(lm_example_loss,\n",
    "                        name='lm_example_loss',\n",
    "                        aggregation='mean')\n",
    "\n",
    "        next_sentence_accuracy = tf.keras.metrics.sparse_categorical_accuracy(\n",
    "            sentence_labels, sentence_output)\n",
    "        self.add_metric(next_sentence_accuracy,\n",
    "                        name='next_sentence_accuracy',\n",
    "                        aggregation='mean')\n",
    "\n",
    "        self.add_metric(next_sentence_loss,\n",
    "                        name='next_sentence_loss',\n",
    "                        aggregation='mean')\n",
    "\n",
    "    def call(self, inputs):\n",
    "        \"\"\"Implements call() for the layer.\"\"\"\n",
    "        unpacked_inputs = tf_utils.unpack_inputs(inputs)\n",
    "        \n",
    "        # Mask LM 的输出\n",
    "        lm_output = unpacked_inputs[0]\n",
    "        \n",
    "        # NSP 的输出\n",
    "        sentence_output = unpacked_inputs[1]\n",
    "        \n",
    "        # Mask LM 的标签\n",
    "        lm_label_ids = unpacked_inputs[2]\n",
    "        \n",
    "        # Mask LM 的权重\n",
    "        lm_label_weights = tf.keras.backend.cast(unpacked_inputs[3],\n",
    "                                                 tf.float32)\n",
    "        # NSP 的输出\n",
    "        sentence_labels = unpacked_inputs[4]\n",
    "\n",
    "        # Mask LM 的损失\n",
    "        mask_label_loss = losses.weighted_sparse_categorical_crossentropy_loss(\n",
    "            labels=lm_label_ids,\n",
    "            predictions=lm_output,\n",
    "            weights=lm_label_weights)\n",
    "        \n",
    "        # NSP 的损失\n",
    "        sentence_loss = losses.weighted_sparse_categorical_crossentropy_loss(\n",
    "            labels=sentence_labels, predictions=sentence_output)\n",
    "        \n",
    "        # 总损失\n",
    "        loss = mask_label_loss + sentence_loss\n",
    "        batch_shape = tf.slice(tf.keras.backend.shape(sentence_labels), [0],\n",
    "                               [1])\n",
    "        # TODO(hongkuny): Avoids the hack and switches add_loss.\n",
    "        final_loss = tf.fill(batch_shape, loss)\n",
    "\n",
    "        self._add_metrics(lm_output, lm_label_ids, lm_label_weights,\n",
    "                          mask_label_loss, sentence_output, sentence_labels,\n",
    "                          sentence_loss)\n",
    "        return final_loss"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 带损失层的完整模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def pretrain_model(bert_config,\n",
    "                   seq_length,\n",
    "                   max_predictions_pre_seq,\n",
    "                   initializer=None):\n",
    "\n",
    "    # 模型输入：\n",
    "    input_word_ids = tf.keras.layers.Input(\n",
    "        shape=(seq_lenght, ),\n",
    "        name=\"input_word_ids\",\n",
    "        dtype=tf.int32,\n",
    "    )\n",
    "    input_mask = tf.keras.layers.Input(\n",
    "        shape=(seq_length),\n",
    "        name='input_mask',\n",
    "        dtype=tf.int32,\n",
    "    )\n",
    "    input_type_ids = tf.keras.layers.Input(\n",
    "        shape=(seq_length, ),\n",
    "        name='input_type_ids',\n",
    "        dtype=tf.int32,\n",
    "    )\n",
    "    masked_lm_positions = tf.keras.layers.Input(\n",
    "        shape=(max_predictions_per_seq, ),\n",
    "        name='masked_lm_positions',\n",
    "        dtype=tf.int32)\n",
    "    masked_lm_ids = tf.keras.layers.Input(\n",
    "        shape=(max_predictions_per_seq, ),\n",
    "        name='masked_lm_ids',\n",
    "        dtype=tf.int32,\n",
    "    )\n",
    "    masked_lm_weights = tf.keras.layers.Input(\n",
    "        shape=(max_predictions_per_seq, ),\n",
    "        name='masked_lm_weights',\n",
    "        dtype=tf.int32)\n",
    "    next_sentence_labels = tf.keras.layers.Input(\n",
    "        shape=(1, ),\n",
    "        name='next_sentence_labels',\n",
    "        dtype=tf.int32,\n",
    "    )\n",
    "\n",
    "    # transformer 编码器\n",
    "    transformer_encoder = _get_transformer_encoder(bert_config, seq_length)\n",
    "    if initializer is None:\n",
    "        initializer = tf.keras.initializers.TruncatedNormal(\n",
    "            stddev=bert_config.initializer_range)\n",
    "\n",
    "    # 预训练任务的模型\n",
    "    pretrainer_model = BertPretrainer(\n",
    "        network=transformer_encoder,\n",
    "        num_classes=2,  # The next sentence prediction label has two classes.\n",
    "        num_token_predictions=max_predictions_per_seq,\n",
    "        initializer=initializer,\n",
    "        output='predictions',\n",
    "    )\n",
    "\n",
    "    # 输出\n",
    "    lm_output, sentence_output = pretrainer_model(\n",
    "        [input_word_ids, input_mask, input_type_ids, masked_lm_positions])\n",
    "\n",
    "    # 损失函数\n",
    "    pretrain_loss_layer = BertPretrainLossAndMetricLayer(\n",
    "        vocab_size=bert_config.vocab_size)\n",
    "    output_loss = pretrain_loss_layer(lm_output, sentence_output,\n",
    "                                      masked_lm_ids, masked_lm_weights,\n",
    "                                      next_sentence_labels)\n",
    "\n",
    "    # 完整的模型\n",
    "    keras_model = tf.keras.Model(inputs={\n",
    "        'input_word_ids': input_word_ids,\n",
    "        'input_mask': input_mask,\n",
    "        'input_type_ids': input_type_ids,\n",
    "        'masked_lm_positions': masked_lm_positions,\n",
    "        'masked_lm_ids': masked_lm_ids,\n",
    "        'masked_lm_weights': masked_lm_weights,\n",
    "        'next_sentence_labels': next_sentence_labels\n",
    "    },\n",
    "                                 outputs=output_loss)\n",
    "    return keras_model, transformer_encoder"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 训练过程"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def run_customized_training_loop(  \n",
    "        _sentinel=None,  \n",
    "        strategy=None,\n",
    "        model_fn=None,\n",
    "        loss_fn=None,\n",
    "        model_dir=None,\n",
    "        train_input_fn=None,\n",
    "        steps_per_epoch=None,\n",
    "        steps_per_loop=1,\n",
    "        epochs=1,\n",
    "        eval_input_fn=None,\n",
    "        eval_steps=None,\n",
    "        metric_fn=None,\n",
    "        init_checkpoint=None,\n",
    "        custom_callbacks=None,\n",
    "        run_eagerly=False,\n",
    "        sub_model_export_name=None):\n",
    "    \"\"\"Run BERT pretrain model training using low-level API.\n",
    "  \n",
    "    Arguments:\n",
    "        _sentinel: Used to prevent positional parameters. Internal, do not use.\n",
    "        strategy: Distribution strategy on which to run low level training loop.\n",
    "        model_fn: Function that returns a tuple (model, sub_model). Caller of this\n",
    "          function should add optimizer to the `model` via calling\n",
    "          `model.compile()` API or manually setting `model.optimizer` attribute.\n",
    "          Second element of the returned tuple(sub_model) is an optional sub model\n",
    "          to be used for initial checkpoint -- if provided.\n",
    "        loss_fn: Function with signature func(labels, logits) and returns a loss\n",
    "          tensor.\n",
    "        model_dir: Model directory used during training for restoring/saving model\n",
    "          weights.\n",
    "        train_input_fn: Function that returns a tf.data.Dataset used for training.\n",
    "        steps_per_epoch: Number of steps to run per epoch. At the end of each\n",
    "          epoch, model checkpoint will be saved and evaluation will be conducted\n",
    "          if evaluation dataset is provided.\n",
    "        steps_per_loop: Number of steps per graph-mode loop. In order to reduce\n",
    "          communication in eager context, training logs are printed every\n",
    "          steps_per_loop.\n",
    "        epochs: Number of epochs to train.\n",
    "        eval_input_fn: Function that returns evaluation dataset. If none,\n",
    "          evaluation is skipped.\n",
    "        eval_steps: Number of steps to run evaluation. Required if `eval_input_fn`\n",
    "          is not none.\n",
    "        metric_fn: A metrics function that returns a Keras Metric object to record\n",
    "          evaluation result using evaluation dataset or with training dataset\n",
    "          after every epoch.\n",
    "        init_checkpoint: Optional checkpoint to load to `sub_model` returned by\n",
    "          `model_fn`.\n",
    "        custom_callbacks: A list of Keras Callbacks objects to run during\n",
    "          training. More specifically, `on_batch_begin()`, `on_batch_end()`,\n",
    "          methods are invoked during training.\n",
    "        run_eagerly: Whether to run model training in pure eager execution. This\n",
    "          should be disable for TPUStrategy.\n",
    "        sub_model_export_name: If not None, will export `sub_model` returned by\n",
    "          `model_fn` into checkpoint files. The name of intermediate checkpoint\n",
    "          file is {sub_model_export_name}_step_{step}.ckpt and the last\n",
    "          checkpint's name is {sub_model_export_name}.ckpt;\n",
    "          if None, `sub_model` will not be exported as checkpoint.\n",
    "  \n",
    "    Returns:\n",
    "        Trained model.\n",
    "  \n",
    "    Raises:\n",
    "        ValueError: (1) When model returned by `model_fn` does not have optimizer\n",
    "          attribute or when required parameters are set to none. (2) eval args are\n",
    "          not specified correctly. (3) metric_fn must be a callable if specified.\n",
    "          (4) sub_model_checkpoint_name is specified, but `sub_model` returned\n",
    "          by `model_fn` is None.\n",
    "    \"\"\"\n",
    "\n",
    "    if _sentinel is not None:\n",
    "        raise ValueError('only call `run_customized_training_loop()` '\n",
    "                         'with named arguments.')\n",
    "\n",
    "    required_arguments = [\n",
    "        strategy, model_fn, loss_fn, model_dir, steps_per_epoch, train_input_fn\n",
    "    ]\n",
    "    if [arg for arg in required_arguments if arg is None]:\n",
    "        raise ValueError('`strategy`, `model_fn`, `loss_fn`, `model_dir`, '\n",
    "                         '`steps_per_loop` and `steps_per_epoch` are required '\n",
    "                         'parameters.')\n",
    "    if steps_per_loop > steps_per_epoch:\n",
    "        logging.error(\n",
    "            'steps_per_loop: %d is specified to be greater than '\n",
    "            ' steps_per_epoch: %d, we will use steps_per_epoch as'\n",
    "            ' steps_per_loop.', steps_per_loop, steps_per_epoch)\n",
    "        steps_per_loop = steps_per_epoch\n",
    "    assert tf.executing_eagerly()\n",
    "\n",
    "    if run_eagerly:\n",
    "        if steps_per_loop > 1:\n",
    "            raise ValueError(\n",
    "                'steps_per_loop is used for performance optimization. When you want '\n",
    "                'to run eagerly, you cannot leverage graph mode loop.')\n",
    "        if isinstance(strategy, tf.distribute.experimental.TPUStrategy):\n",
    "            raise ValueError(\n",
    "                'TPUStrategy should not run eagerly as it heavily replies on graph'\n",
    "                ' optimization for the distributed system.')\n",
    "\n",
    "    if eval_input_fn and (eval_steps is None or metric_fn is None):\n",
    "        raise ValueError(\n",
    "            '`eval_step` and `metric_fn` are required when `eval_input_fn ` '\n",
    "            'is not none.')\n",
    "    if metric_fn and not callable(metric_fn):\n",
    "        raise ValueError(\n",
    "            'if `metric_fn` is specified, metric_fn must be a callable.')\n",
    "\n",
    "    total_training_steps = steps_per_epoch * epochs\n",
    "\n",
    "    # To reduce unnecessary send/receive input pipeline operation, we place input\n",
    "    # pipeline ops in worker task.\n",
    "    train_iterator = _get_input_iterator(train_input_fn, strategy)\n",
    "\n",
    "    with distribution_utils.get_strategy_scope(strategy):\n",
    "        # To correctly place the model weights on accelerators,\n",
    "        # model and optimizer should be created in scope.\n",
    "        \n",
    "        # 输入-->损失的完整模型， 和 transformer 编码器部分\n",
    "        model, sub_model = model_fn()\n",
    "        if not hasattr(model, 'optimizer'):\n",
    "            raise ValueError('User should set optimizer attribute to model '\n",
    "                             'inside `model_fn`.')\n",
    "        if sub_model_export_name and sub_model is None:\n",
    "            raise ValueError('sub_model_export_name is specified as %s, but '\n",
    "                             'sub_model is None.' % sub_model_export_name)\n",
    "\n",
    "        # 优化器\n",
    "        optimizer = model.optimizer\n",
    "        use_float16 = isinstance(\n",
    "            optimizer,\n",
    "            tf.keras.mixed_precision.experimental.LossScaleOptimizer)\n",
    "\n",
    "        if init_checkpoint:\n",
    "            logging.info(\n",
    "                'Checkpoint file %s found and restoring from '\n",
    "                'initial checkpoint for core model.', init_checkpoint)\n",
    "            checkpoint = tf.train.Checkpoint(model=sub_model)\n",
    "            checkpoint.restore(\n",
    "                init_checkpoint).assert_existing_objects_matched()\n",
    "            logging.info('Loading from checkpoint file completed')\n",
    "\n",
    "        train_loss_metric = tf.keras.metrics.Mean('training_loss',\n",
    "                                                  dtype=tf.float32)\n",
    "        eval_metrics = [metric_fn()] if metric_fn else []\n",
    "        # If evaluation is required, make a copy of metric as it will be used by\n",
    "        # both train and evaluation.\n",
    "        train_metrics = [\n",
    "            metric.__class__.from_config(metric.get_config())\n",
    "            for metric in eval_metrics\n",
    "        ]\n",
    "\n",
    "        # Create summary writers\n",
    "        summary_dir = os.path.join(model_dir, 'summaries')\n",
    "        eval_summary_writer = tf.summary.create_file_writer(\n",
    "            os.path.join(summary_dir, 'eval'))\n",
    "        if steps_per_loop >= _MIN_SUMMARY_STEPS:\n",
    "            # Only writes summary when the stats are collected sufficiently over\n",
    "            # enough steps.\n",
    "            train_summary_writer = tf.summary.create_file_writer(\n",
    "                os.path.join(summary_dir, 'train'))\n",
    "        else:\n",
    "            train_summary_writer = None\n",
    "\n",
    "        # Collects training variables.\n",
    "        training_vars = model.trainable_variables\n",
    "\n",
    "        def _replicated_step(inputs):\n",
    "            \"\"\"Replicated training step.\"\"\"\n",
    "\n",
    "            inputs, labels = inputs\n",
    "            with tf.GradientTape() as tape:\n",
    "                model_outputs = model(inputs, training=True)\n",
    "                loss = loss_fn(labels, model_outputs)\n",
    "                if use_float16:\n",
    "                    scaled_loss = optimizer.get_scaled_loss(loss)\n",
    "\n",
    "            if use_float16:\n",
    "                scaled_grads = tape.gradient(scaled_loss, training_vars)\n",
    "                grads = optimizer.get_unscaled_gradients(scaled_grads)\n",
    "            else:\n",
    "                grads = tape.gradient(loss, training_vars)\n",
    "            optimizer.apply_gradients(zip(grads, training_vars))\n",
    "            # For reporting, the metric takes the mean of losses.\n",
    "            train_loss_metric.update_state(loss)\n",
    "            for metric in train_metrics:\n",
    "                metric.update_state(labels, model_outputs)\n",
    "\n",
    "        @tf.function\n",
    "        def train_steps(iterator, steps):\n",
    "            \"\"\"Performs distributed training steps in a loop.\n",
    "      \n",
    "            Args:\n",
    "              iterator: the distributed iterator of training datasets.\n",
    "              steps: an tf.int32 integer tensor to specify number of steps to run\n",
    "                inside host training loop.\n",
    "      \n",
    "            Raises:\n",
    "              ValueError: Any of the arguments or tensor shapes are invalid.\n",
    "            \"\"\"\n",
    "            if not isinstance(steps, tf.Tensor):\n",
    "                raise ValueError(\n",
    "                    'steps should be an Tensor. Python object may cause '\n",
    "                    'retracing.')\n",
    "\n",
    "            for _ in tf.range(steps):\n",
    "                strategy.experimental_run_v2(_replicated_step,\n",
    "                                             args=(next(iterator), ))\n",
    "\n",
    "        def train_single_step(iterator):\n",
    "            \"\"\"Performs a distributed training step.\n",
    "      \n",
    "            Args:\n",
    "              iterator: the distributed iterator of training datasets.\n",
    "      \n",
    "            Raises:\n",
    "              ValueError: Any of the arguments or tensor shapes are invalid.\n",
    "            \"\"\"\n",
    "            strategy.experimental_run_v2(_replicated_step,\n",
    "                                         args=(next(iterator), ))\n",
    "\n",
    "        def test_step(iterator):\n",
    "            \"\"\"Calculates evaluation metrics on distributed devices.\"\"\"\n",
    "            def _test_step_fn(inputs):\n",
    "                \"\"\"Replicated accuracy calculation.\"\"\"\n",
    "\n",
    "                inputs, labels = inputs\n",
    "                model_outputs = model(inputs, training=False)\n",
    "                for metric in eval_metrics:\n",
    "                    metric.update_state(labels, model_outputs)\n",
    "\n",
    "            strategy.experimental_run_v2(_test_step_fn,\n",
    "                                         args=(next(iterator), ))\n",
    "\n",
    "        if not run_eagerly:\n",
    "            train_single_step = tf.function(train_single_step)\n",
    "            test_step = tf.function(test_step)\n",
    "\n",
    "        def _run_evaluation(current_training_step, test_iterator):\n",
    "            \"\"\"Runs validation steps and aggregate metrics.\"\"\"\n",
    "            for _ in range(eval_steps):\n",
    "                test_step(test_iterator)\n",
    "\n",
    "            with eval_summary_writer.as_default():\n",
    "                for metric in eval_metrics + model.metrics:\n",
    "                    metric_value = _float_metric_value(metric)\n",
    "                    logging.info('Step: [%d] Validation %s = %f',\n",
    "                                 current_training_step, metric.name,\n",
    "                                 metric_value)\n",
    "                    tf.summary.scalar(metric.name,\n",
    "                                      metric_value,\n",
    "                                      step=current_training_step)\n",
    "                eval_summary_writer.flush()\n",
    "\n",
    "        def _run_callbacks_on_batch_begin(batch):\n",
    "            \"\"\"Runs custom callbacks at the start of every step.\"\"\"\n",
    "            if not custom_callbacks:\n",
    "                return\n",
    "            for callback in custom_callbacks:\n",
    "                callback.on_batch_begin(batch)\n",
    "\n",
    "        def _run_callbacks_on_batch_end(batch):\n",
    "            \"\"\"Runs custom callbacks at the end of every step.\"\"\"\n",
    "            if not custom_callbacks:\n",
    "                return\n",
    "            for callback in custom_callbacks:\n",
    "                callback.on_batch_end(batch)\n",
    "\n",
    "        # Training loop starts here.\n",
    "        checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)\n",
    "        sub_model_checkpoint = tf.train.Checkpoint(\n",
    "            model=sub_model) if sub_model_export_name else None\n",
    "\n",
    "        latest_checkpoint_file = tf.train.latest_checkpoint(model_dir)\n",
    "        if latest_checkpoint_file:\n",
    "            logging.info(\n",
    "                'Checkpoint file %s found and restoring from '\n",
    "                'checkpoint', latest_checkpoint_file)\n",
    "            checkpoint.restore(latest_checkpoint_file)\n",
    "            logging.info('Loading from checkpoint file completed')\n",
    "\n",
    "        current_step = optimizer.iterations.numpy()\n",
    "        checkpoint_name = 'ctl_step_{step}.ckpt'\n",
    "\n",
    "        while current_step < total_training_steps:\n",
    "            # Training loss/metric are taking average over steps inside micro\n",
    "            # training loop. We reset the their values before each round.\n",
    "            train_loss_metric.reset_states()\n",
    "            for metric in train_metrics + model.metrics:\n",
    "                metric.reset_states()\n",
    "\n",
    "            _run_callbacks_on_batch_begin(current_step)\n",
    "            # Runs several steps in the host while loop.\n",
    "            steps = steps_to_run(current_step, steps_per_epoch, steps_per_loop)\n",
    "\n",
    "            if steps == 1:\n",
    "                # TODO(zongweiz): merge with train_steps once tf.while_loop\n",
    "                # GPU performance bugs are fixed.\n",
    "                train_single_step(train_iterator)\n",
    "            else:\n",
    "                # Converts steps to a Tensor to avoid tf.function retracing.\n",
    "                train_steps(train_iterator,\n",
    "                            tf.convert_to_tensor(steps, dtype=tf.int32))\n",
    "            _run_callbacks_on_batch_end(current_step)\n",
    "            current_step += steps\n",
    "\n",
    "            train_loss = _float_metric_value(train_loss_metric)\n",
    "            # Updates training logging.\n",
    "            training_status = 'Train Step: %d/%d  / loss = %s' % (\n",
    "                current_step, total_training_steps, train_loss)\n",
    "\n",
    "            if train_summary_writer:\n",
    "                with train_summary_writer.as_default():\n",
    "                    tf.summary.scalar(train_loss_metric.name,\n",
    "                                      train_loss,\n",
    "                                      step=current_step)\n",
    "                    for metric in train_metrics + model.metrics:\n",
    "                        metric_value = _float_metric_value(metric)\n",
    "                        training_status += '  %s = %f' % (metric.name,\n",
    "                                                          metric_value)\n",
    "                        tf.summary.scalar(metric.name,\n",
    "                                          metric_value,\n",
    "                                          step=current_step)\n",
    "                    train_summary_writer.flush()\n",
    "            logging.info(training_status)\n",
    "\n",
    "            # Saves model checkpoints and run validation steps at every epoch end.\n",
    "            if current_step % steps_per_epoch == 0:\n",
    "                # To avoid repeated model saving, we do not save after the last\n",
    "                # step of training.\n",
    "                if current_step < total_training_steps:\n",
    "                    _save_checkpoint(checkpoint, model_dir,\n",
    "                                     checkpoint_name.format(step=current_step))\n",
    "                    if sub_model_export_name:\n",
    "                        _save_checkpoint(\n",
    "                            sub_model_checkpoint, model_dir,\n",
    "                            '%s_step_%d.ckpt' %\n",
    "                            (sub_model_export_name, current_step))\n",
    "                if eval_input_fn:\n",
    "                    logging.info('Running evaluation after step: %s.',\n",
    "                                 current_step)\n",
    "                    _run_evaluation(\n",
    "                        current_step,\n",
    "                        _get_input_iterator(eval_input_fn, strategy))\n",
    "                    # Re-initialize evaluation metric.\n",
    "                    for metric in eval_metrics + model.metrics:\n",
    "                        metric.reset_states()\n",
    "\n",
    "        _save_checkpoint(checkpoint, model_dir,\n",
    "                         checkpoint_name.format(step=current_step))\n",
    "        if sub_model_export_name:\n",
    "            _save_checkpoint(sub_model_checkpoint, model_dir,\n",
    "                             '%s.ckpt' % sub_model_export_name)\n",
    "\n",
    "        if eval_input_fn:\n",
    "            logging.info(\n",
    "                'Running final evaluation after training is complete.')\n",
    "            _run_evaluation(current_step,\n",
    "                            _get_input_iterator(eval_input_fn, strategy))\n",
    "\n",
    "        training_summary = {\n",
    "            'total_training_steps': total_training_steps,\n",
    "            'train_loss': _float_metric_value(train_loss_metric),\n",
    "        }\n",
    "        if eval_metrics:\n",
    "            # TODO(hongkuny): Cleans up summary reporting in text.\n",
    "            training_summary['last_train_metrics'] = _float_metric_value(\n",
    "                train_metrics[0])\n",
    "            training_summary['eval_metrics'] = _float_metric_value(\n",
    "                eval_metrics[0])\n",
    "\n",
    "        write_txt_summary(training_summary, summary_dir)\n",
    "\n",
    "        return model"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 训练模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def run_customized_training(strategy, bert_config, max_seq_length,\n",
    "                            max_predictions_per_seq, model_dir,\n",
    "                            steps_per_epoch, steps_per_loop, epochs,\n",
    "                            initial_lr, warmup_steps, input_files,\n",
    "                            train_batch_size):\n",
    "    \"\"\"Run BERT pretrain model training using low-level API.\"\"\"\n",
    "\n",
    "    train_input_fn = get_pretrain_dataset_fn(input_files, max_seq_length,\n",
    "                                             max_predictions_per_seq,\n",
    "                                             train_batch_size)\n",
    "\n",
    "    def _get_pretrain_model():\n",
    "        \"\"\"Gets a pretraining model.\"\"\"\n",
    "        pretrain_model, core_model = bert_models.pretrain_model(\n",
    "            bert_config, max_seq_length, max_predictions_per_seq)\n",
    "        pretrain_model.optimizer = optimization.create_optimizer(\n",
    "            initial_lr, steps_per_epoch * epochs, warmup_steps)\n",
    "        if FLAGS.fp16_implementation == 'graph_rewrite':\n",
    "            # Note: when flags_obj.fp16_implementation == \"graph_rewrite\", dtype as\n",
    "            # determined by flags_core.get_tf_dtype(flags_obj) would be 'float32'\n",
    "            # which will ensure tf.compat.v2.keras.mixed_precision and\n",
    "            # tf.train.experimental.enable_mixed_precision_graph_rewrite do not double\n",
    "            # up.\n",
    "            pretrain_model.optimizer = tf.train.experimental.enable_mixed_precision_graph_rewrite(\n",
    "                pretrain_model.optimizer)\n",
    "        return pretrain_model, core_model\n",
    "\n",
    "    trained_model = model_training_utils.run_customized_training_loop(\n",
    "        strategy=strategy,\n",
    "        model_fn=_get_pretrain_model,\n",
    "        loss_fn=get_loss_fn(\n",
    "            loss_factor=1.0 /\n",
    "            strategy.num_replicas_in_sync if FLAGS.scale_loss else 1.0),\n",
    "        model_dir=model_dir,\n",
    "        train_input_fn=train_input_fn,\n",
    "        steps_per_epoch=steps_per_epoch,\n",
    "        steps_per_loop=steps_per_loop,\n",
    "        epochs=epochs,\n",
    "        sub_model_export_name='pretrained/bert_model')\n",
    "\n",
    "    return trained_model\n",
    "\n",
    "\n",
    "def run_bert_pretrain(strategy):\n",
    "    \"\"\"Runs BERT pre-training.\"\"\"\n",
    "\n",
    "    bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)\n",
    "    if not strategy:\n",
    "        raise ValueError('Distribution strategy is not specified.')\n",
    "\n",
    "    # Runs customized training loop.\n",
    "    logging.info(\n",
    "        'Training using customized training loop TF 2.0 with distrubuted'\n",
    "        'strategy.')\n",
    "\n",
    "    return run_customized_training(strategy, bert_config, FLAGS.max_seq_length,\n",
    "                                   FLAGS.max_predictions_per_seq,\n",
    "                                   FLAGS.model_dir, FLAGS.num_steps_per_epoch,\n",
    "                                   FLAGS.steps_per_loop,\n",
    "                                   FLAGS.num_train_epochs, FLAGS.learning_rate,\n",
    "                                   FLAGS.warmup_steps, FLAGS.input_files,\n",
    "                                   FLAGS.train_batch_size)\n",
    "\n",
    "\n",
    "assert tf.version.VERSION.startswith('2.')\n",
    "\n",
    "if not FLAGS.model_dir:\n",
    "    FLAGS.model_dir = '/tmp/bert20/'\n",
    "strategy = distribution_utils.get_distribution_strategy(\n",
    "    distribution_strategy=FLAGS.distribution_strategy,\n",
    "    num_gpus=FLAGS.num_gpus,\n",
    "    tpu_address=FLAGS.tpu)\n",
    "if strategy:\n",
    "    print('***** Number of cores used : ', strategy.num_replicas_in_sync)\n",
    "\n",
    "run_bert_pretrain(strategy)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.4"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": false
  },
  "varInspector": {
   "cols": {
    "lenName": 16,
    "lenType": 16,
    "lenVar": 40
   },
   "kernels_config": {
    "python": {
     "delete_cmd_postfix": "",
     "delete_cmd_prefix": "del ",
     "library": "var_list.py",
     "varRefreshCmd": "print(var_dic_list())"
    },
    "r": {
     "delete_cmd_postfix": ") ",
     "delete_cmd_prefix": "rm(",
     "library": "var_list.r",
     "varRefreshCmd": "cat(var_dic_list()) "
    }
   },
   "types_to_exclude": [
    "module",
    "function",
    "builtin_function_or_method",
    "instance",
    "_Feature"
   ],
   "window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
