{
 "nbformat": 4,
 "nbformat_minor": 0,
 "metadata": {
  "colab": {
   "name": "Bert文本分类.ipynb",
   "provenance": [],
   "collapsed_sections": [],
   "toc_visible": true
  },
  "kernelspec": {
   "name": "python3",
   "display_name": "Python 3"
  },
  "widgets": {
   "application/vnd.jupyter.widget-state+json": {
    "6a09b3ffafad4334a73db05a5cc09bc9": {
     "model_module": "@jupyter-widgets/controls",
     "model_name": "VBoxModel",
     "state": {
      "_view_name": "VBoxView",
      "_dom_classes": [
       "widget-interact"
      ],
      "_model_name": "VBoxModel",
      "_view_module": "@jupyter-widgets/controls",
      "_model_module_version": "1.5.0",
      "_view_count": null,
      "_view_module_version": "1.5.0",
      "box_style": "",
      "layout": "IPY_MODEL_dbb9ea7853fe40f3ab7f54b9cf5aff9c",
      "_model_module": "@jupyter-widgets/controls",
      "children": [
       "IPY_MODEL_7a8d29ef7e4a45f2907b5765cdc6531b",
       "IPY_MODEL_3eee9de1adf64db8b34d59534ffb2346"
      ]
     }
    },
    "dbb9ea7853fe40f3ab7f54b9cf5aff9c": {
     "model_module": "@jupyter-widgets/base",
     "model_name": "LayoutModel",
     "state": {
      "_view_name": "LayoutView",
      "grid_template_rows": null,
      "right": null,
      "justify_content": null,
      "_view_module": "@jupyter-widgets/base",
      "overflow": null,
      "_model_module_version": "1.2.0",
      "_view_count": null,
      "flex_flow": null,
      "width": null,
      "min_width": null,
      "border": null,
      "align_items": null,
      "bottom": null,
      "_model_module": "@jupyter-widgets/base",
      "top": null,
      "grid_column": null,
      "overflow_y": null,
      "overflow_x": null,
      "grid_auto_flow": null,
      "grid_area": null,
      "grid_template_columns": null,
      "flex": null,
      "_model_name": "LayoutModel",
      "justify_items": null,
      "grid_row": null,
      "max_height": null,
      "align_content": null,
      "visibility": null,
      "align_self": null,
      "height": null,
      "min_height": null,
      "padding": null,
      "grid_auto_rows": null,
      "grid_gap": null,
      "max_width": null,
      "order": null,
      "_view_module_version": "1.2.0",
      "grid_template_areas": null,
      "object_position": null,
      "object_fit": null,
      "grid_auto_columns": null,
      "margin": null,
      "display": null,
      "left": null
     }
    },
    "7a8d29ef7e4a45f2907b5765cdc6531b": {
     "model_module": "@jupyter-widgets/controls",
     "model_name": "TextModel",
     "state": {
      "_view_name": "TextView",
      "style": "IPY_MODEL_98021f75eb4e40a3b1b72598f82bb19f",
      "_dom_classes": [],
      "description": "x",
      "_model_name": "TextModel",
      "placeholder": "​",
      "_view_module": "@jupyter-widgets/controls",
      "_model_module_version": "1.5.0",
      "value": " 科技",
      "_view_count": null,
      "disabled": false,
      "_view_module_version": "1.5.0",
      "continuous_update": true,
      "description_tooltip": null,
      "_model_module": "@jupyter-widgets/controls",
      "layout": "IPY_MODEL_6b564c5514814bf78874dfe6d683d607"
     }
    },
    "3eee9de1adf64db8b34d59534ffb2346": {
     "model_module": "@jupyter-widgets/output",
     "model_name": "OutputModel",
     "state": {
      "_view_name": "OutputView",
      "msg_id": "",
      "_dom_classes": [],
      "_model_name": "OutputModel",
      "outputs": [
       {
        "output_type": "display_data",
        "metadata": {
         "tags": []
        },
        "text/plain": "'分类结果： 科技'"
       }
      ],
      "_view_module": "@jupyter-widgets/output",
      "_model_module_version": "1.0.0",
      "_view_count": null,
      "_view_module_version": "1.0.0",
      "layout": "IPY_MODEL_ba773a74029947e6ba651ea469417e6f",
      "_model_module": "@jupyter-widgets/output"
     }
    },
    "98021f75eb4e40a3b1b72598f82bb19f": {
     "model_module": "@jupyter-widgets/controls",
     "model_name": "DescriptionStyleModel",
     "state": {
      "_view_name": "StyleView",
      "_model_name": "DescriptionStyleModel",
      "description_width": "",
      "_view_module": "@jupyter-widgets/base",
      "_model_module_version": "1.5.0",
      "_view_count": null,
      "_view_module_version": "1.2.0",
      "_model_module": "@jupyter-widgets/controls"
     }
    },
    "6b564c5514814bf78874dfe6d683d607": {
     "model_module": "@jupyter-widgets/base",
     "model_name": "LayoutModel",
     "state": {
      "_view_name": "LayoutView",
      "grid_template_rows": null,
      "right": null,
      "justify_content": null,
      "_view_module": "@jupyter-widgets/base",
      "overflow": null,
      "_model_module_version": "1.2.0",
      "_view_count": null,
      "flex_flow": null,
      "width": null,
      "min_width": null,
      "border": null,
      "align_items": null,
      "bottom": null,
      "_model_module": "@jupyter-widgets/base",
      "top": null,
      "grid_column": null,
      "overflow_y": null,
      "overflow_x": null,
      "grid_auto_flow": null,
      "grid_area": null,
      "grid_template_columns": null,
      "flex": null,
      "_model_name": "LayoutModel",
      "justify_items": null,
      "grid_row": null,
      "max_height": null,
      "align_content": null,
      "visibility": null,
      "align_self": null,
      "height": null,
      "min_height": null,
      "padding": null,
      "grid_auto_rows": null,
      "grid_gap": null,
      "max_width": null,
      "order": null,
      "_view_module_version": "1.2.0",
      "grid_template_areas": null,
      "object_position": null,
      "object_fit": null,
      "grid_auto_columns": null,
      "margin": null,
      "display": null,
      "left": null
     }
    }
   }
  },
  "pycharm": {
   "stem_cell": {
    "cell_type": "raw",
    "source": [],
    "metadata": {
     "collapsed": false
    }
   }
  }
 },
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "sxJbp3g0ulse",
    "colab_type": "text"
   },
   "source": [
    "## 安装依赖"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "id": "E-XZtsaMbc3Q",
    "colab_type": "code",
    "outputId": "eff9b772-ceb3-4145-9185-ed8433a39d14",
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 0
    }
   },
   "source": [
    "!pip install bert-tensorflow"
   ],
   "execution_count": 1,
   "outputs": [
    {
     "output_type": "stream",
     "text": [
      "Collecting bert-tensorflow\n",
      "\u001b[?25l  Downloading https://files.pythonhosted.org/packages/a6/66/7eb4e8b6ea35b7cc54c322c816f976167a43019750279a8473d355800a93/bert_tensorflow-1.0.1-py2.py3-none-any.whl (67kB)\n",
      "\r\u001b[K     |████▉                           | 10kB 17.7MB/s eta 0:00:01\r\u001b[K     |█████████▊                      | 20kB 1.6MB/s eta 0:00:01\r\u001b[K     |██████████████▋                 | 30kB 2.3MB/s eta 0:00:01\r\u001b[K     |███████████████████▍            | 40kB 1.6MB/s eta 0:00:01\r\u001b[K     |████████████████████████▎       | 51kB 1.9MB/s eta 0:00:01\r\u001b[K     |█████████████████████████████▏  | 61kB 2.2MB/s eta 0:00:01\r\u001b[K     |████████████████████████████████| 71kB 2.1MB/s \n",
      "\u001b[?25hRequirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from bert-tensorflow) (1.12.0)\n",
      "Installing collected packages: bert-tensorflow\n",
      "Successfully installed bert-tensorflow-1.0.1\n"
     ],
     "name": "stdout"
    }
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "b--davW4uodj",
    "colab_type": "text"
   },
   "source": [
    "## 任务代码封装"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "aq7K_dnOu1FR",
    "colab_type": "text"
   },
   "source": [
    "### BertClassifier"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "id": "WBJ532k7fPzA",
    "colab_type": "code",
    "outputId": "ab8d2168-8ad4-4403-b1a6-d360d9c8b7d4",
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 100
    }
   },
   "source": [
    "import os\n",
    "import sys\n",
    "sys.path.append(os.path.dirname(os.getcwd()))\n",
    "import tensorflow as tf\n",
    "\n",
    "from bert import modeling\n",
    "from bert import optimization\n",
    "\n",
    "\n",
    "class BertClassifier(object):\n",
    "    def __init__(self, config, is_training=True, num_train_step=None, num_warmup_step=None):\n",
    "        self.__bert_config_path = os.path.join(config[\"bert_model_path\"], \"bert_config.json\")\n",
    "        self.__num_classes = config[\"num_classes\"]\n",
    "        self.__learning_rate = config[\"learning_rate\"]\n",
    "        self.__is_training = is_training\n",
    "        self.__num_train_step = num_train_step\n",
    "        self.__num_warmup_step = num_warmup_step\n",
    "\n",
    "        self.input_ids = tf.placeholder(dtype=tf.int32, shape=[None, None], name='input_ids')\n",
    "        self.input_masks = tf.placeholder(dtype=tf.int32, shape=[None, None], name='input_mask')\n",
    "        self.segment_ids = tf.placeholder(dtype=tf.int32, shape=[None, None], name='segment_ids')\n",
    "        self.label_ids = tf.placeholder(dtype=tf.int32, shape=[None], name=\"label_ids\")\n",
    "\n",
    "        self.built_model()\n",
    "        self.init_saver()\n",
    "\n",
    "    def built_model(self):\n",
    "        bert_config = modeling.BertConfig.from_json_file(self.__bert_config_path)\n",
    "\n",
    "        model = modeling.BertModel(config=bert_config,\n",
    "                                   is_training=self.__is_training,\n",
    "                                   input_ids=self.input_ids,\n",
    "                                   input_mask=self.input_masks,\n",
    "                                   token_type_ids=self.segment_ids,\n",
    "                                   use_one_hot_embeddings=False)\n",
    "        output_layer = model.get_pooled_output()\n",
    "\n",
    "        hidden_size = output_layer.shape[-1].value\n",
    "        if self.__is_training:\n",
    "            # I.e., 0.1 dropout\n",
    "            output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n",
    "\n",
    "        with tf.name_scope(\"output\"):\n",
    "            output_weights = tf.get_variable(\n",
    "                \"output_weights\", [self.__num_classes, hidden_size],\n",
    "                initializer=tf.truncated_normal_initializer(stddev=0.02))\n",
    "\n",
    "            output_bias = tf.get_variable(\n",
    "                \"output_bias\", [self.__num_classes], initializer=tf.zeros_initializer())\n",
    "\n",
    "            logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n",
    "            logits = tf.nn.bias_add(logits, output_bias)\n",
    "            self.predictions = tf.argmax(logits, axis=-1, name=\"predictions\")\n",
    "\n",
    "        if self.__is_training:\n",
    "\n",
    "            with tf.name_scope(\"loss\"):\n",
    "                losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=self.label_ids)\n",
    "                self.loss = tf.reduce_mean(losses, name=\"loss\")\n",
    "\n",
    "            with tf.name_scope('train_op'):\n",
    "                self.train_op = optimization.create_optimizer(\n",
    "                    self.loss, self.__learning_rate, self.__num_train_step, self.__num_warmup_step, use_tpu=False)\n",
    "\n",
    "    def init_saver(self):\n",
    "        self.saver = tf.train.Saver(tf.global_variables())\n",
    "\n",
    "    def train(self, sess, batch):\n",
    "        \"\"\"\n",
    "        训练模型\n",
    "        :param sess: tf的会话对象\n",
    "        :param batch: batch数据\n",
    "        :return: 损失和预测结果\n",
    "        \"\"\"\n",
    "\n",
    "        feed_dict = {self.input_ids: batch[\"input_ids\"],\n",
    "                     self.input_masks: batch[\"input_masks\"],\n",
    "                     self.segment_ids: batch[\"segment_ids\"],\n",
    "                     self.label_ids: batch[\"label_ids\"]}\n",
    "\n",
    "        # 训练模型\n",
    "        _, loss, predictions = sess.run([self.train_op, self.loss, self.predictions], feed_dict=feed_dict)\n",
    "        return loss, predictions\n",
    "\n",
    "    def eval(self, sess, batch):\n",
    "        \"\"\"\n",
    "        验证模型\n",
    "        :param sess: tf中的会话对象\n",
    "        :param batch: batch数据\n",
    "        :return: 损失和预测结果\n",
    "        \"\"\"\n",
    "        feed_dict = {self.input_ids: batch[\"input_ids\"],\n",
    "                     self.input_masks: batch[\"input_masks\"],\n",
    "                     self.segment_ids: batch[\"segment_ids\"],\n",
    "                     self.label_ids: batch[\"label_ids\"]}\n",
    "\n",
    "        loss, predictions = sess.run([self.loss, self.predictions], feed_dict=feed_dict)\n",
    "        return loss, predictions\n",
    "\n",
    "    def infer(self, sess, batch):\n",
    "        \"\"\"\n",
    "        预测新数据\n",
    "        :param sess: tf中的会话对象\n",
    "        :param batch: batch数据\n",
    "        :return: 预测结果\n",
    "        \"\"\"\n",
    "        feed_dict = {self.input_ids: batch[\"input_ids\"],\n",
    "                     self.input_masks: batch[\"input_masks\"],\n",
    "                     self.segment_ids: batch[\"segment_ids\"]}\n",
    "\n",
    "        predict = sess.run(self.predictions, feed_dict=feed_dict)\n",
    "\n",
    "        return predict\n"
   ],
   "execution_count": 2,
   "outputs": [
    {
     "output_type": "display_data",
     "data": {
      "text/html": [
       "<p style=\"color: red;\">\n",
       "The default version of TensorFlow in Colab will soon switch to TensorFlow 2.x.<br>\n",
       "We recommend you <a href=\"https://www.tensorflow.org/guide/migrate\" target=\"_blank\">upgrade</a> now \n",
       "or ensure your notebook will continue to use TensorFlow 1.x via the <code>%tensorflow_version 1.x</code> magic:\n",
       "<a href=\"https://colab.research.google.com/notebooks/tensorflow_version.ipynb\" target=\"_blank\">more info</a>.</p>\n"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {
      "tags": []
     }
    },
    {
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/bert/optimization.py:87: The name tf.train.Optimizer is deprecated. Please use tf.compat.v1.train.Optimizer instead.\n",
      "\n"
     ],
     "name": "stdout"
    }
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "4eZTMa_Ru58Q",
    "colab_type": "text"
   },
   "source": [
    "### TrainData"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "id": "5odF6bA0fjF5",
    "colab_type": "code",
    "colab": {}
   },
   "source": [
    "\n",
    "import os\n",
    "import json\n",
    "import random\n",
    "import sys\n",
    "sys.path.append(os.path.dirname(os.getcwd()))\n",
    "\n",
    "from bert import tokenization\n",
    "\n",
    "\n",
    "class TrainData(object):\n",
    "    def __init__(self, config):\n",
    "\n",
    "        self.__vocab_path = os.path.join(config[\"bert_model_path\"], \"vocab.txt\")\n",
    "        self.__output_path = config[\"output_path\"]\n",
    "        if not os.path.exists(self.__output_path):\n",
    "            os.makedirs(self.__output_path)\n",
    "        self._sequence_length = config[\"sequence_length\"]  # 每条输入的序列处理为定长\n",
    "        self._batch_size = config[\"batch_size\"]\n",
    "\n",
    "    @staticmethod\n",
    "    def read_data(file_path):\n",
    "        \"\"\"\n",
    "        读取数据\n",
    "        :param file_path:\n",
    "        :return: 返回分词后的文本内容和标签，inputs = [], labels = []\n",
    "        \"\"\"\n",
    "        inputs = []\n",
    "        labels = []\n",
    "        with open(file_path, \"r\", encoding=\"utf8\") as fr:\n",
    "            for line in fr.readlines():\n",
    "                item = line.strip().split(\"<SEP>\")\n",
    "                if len(item) == 3:\n",
    "                    inputs.append(item[0] + item[1])\n",
    "                    labels.append(item[2])\n",
    "                else:\n",
    "                    inputs.append(item[0])\n",
    "                    labels.append(item[1])\n",
    "\n",
    "        return inputs, labels\n",
    "\n",
    "    def trans_to_index(self, inputs):\n",
    "        \"\"\"\n",
    "        将输入转化为索引表示\n",
    "        :param inputs: 输入\n",
    "        :return:\n",
    "        \"\"\"\n",
    "        tokenizer = tokenization.FullTokenizer(vocab_file=self.__vocab_path, do_lower_case=True)\n",
    "        input_ids = []\n",
    "        input_masks = []\n",
    "        segment_ids = []\n",
    "        for text in inputs:\n",
    "            text = tokenization.convert_to_unicode(text)\n",
    "            tokens = tokenizer.tokenize(text)\n",
    "            tokens = [\"[CLS]\"] + tokens + [\"[SEP]\"]\n",
    "            input_id = tokenizer.convert_tokens_to_ids(tokens)\n",
    "            input_ids.append(input_id)\n",
    "            input_masks.append([1] * len(input_id))\n",
    "            segment_ids.append([0] * len(input_id))\n",
    "\n",
    "        return input_ids, input_masks, segment_ids\n",
    "\n",
    "    @staticmethod\n",
    "    def trans_label_to_index(labels, label_to_index):\n",
    "        \"\"\"\n",
    "        将标签也转换成数字表示\n",
    "        :param labels: 标签\n",
    "        :param label_to_index: 标签-索引映射表\n",
    "        :return:\n",
    "        \"\"\"\n",
    "        labels_idx = [label_to_index[label] for label in labels]\n",
    "        return labels_idx\n",
    "\n",
    "    def padding(self, input_ids, input_masks, segment_ids):\n",
    "        \"\"\"\n",
    "        对序列进行补全\n",
    "        :param input_ids:\n",
    "        :param input_masks:\n",
    "        :param segment_ids:\n",
    "        :return:\n",
    "        \"\"\"\n",
    "        pad_input_ids, pad_input_masks, pad_segment_ids = [], [], []\n",
    "        for input_id, input_mask, segment_id in zip(input_ids, input_masks, segment_ids):\n",
    "            if len(input_id) < self._sequence_length:\n",
    "                pad_input_ids.append(input_id + [0] * (self._sequence_length - len(input_id)))\n",
    "                pad_input_masks.append(input_mask + [0] * (self._sequence_length - len(input_mask)))\n",
    "                pad_segment_ids.append(segment_id + [0] * (self._sequence_length - len(segment_id)))\n",
    "            else:\n",
    "                pad_input_ids.append(input_id[:self._sequence_length])\n",
    "                pad_input_masks.append(input_mask[:self._sequence_length])\n",
    "                pad_segment_ids.append(segment_id[:self._sequence_length])\n",
    "\n",
    "        return pad_input_ids, pad_input_masks, pad_segment_ids\n",
    "\n",
    "    def gen_data(self, file_path, is_training=True):\n",
    "        \"\"\"\n",
    "        生成数据\n",
    "        :param file_path:\n",
    "        :param is_training:\n",
    "        :return:\n",
    "        \"\"\"\n",
    "\n",
    "        # 1，读取原始数据\n",
    "        inputs, labels = self.read_data(file_path)\n",
    "        print(\"read finished\")\n",
    "\n",
    "        if is_training:\n",
    "            uni_label = list(set(labels))\n",
    "            label_to_index = dict(zip(uni_label, list(range(len(uni_label)))))\n",
    "            with open(os.path.join(self.__output_path, \"label_to_index.json\"), \"w\", encoding=\"utf8\") as fw:\n",
    "                json.dump(label_to_index, fw, indent=0, ensure_ascii=False)\n",
    "        else:\n",
    "            with open(os.path.join(self.__output_path, \"label_to_index.json\"), \"r\", encoding=\"utf8\") as fr:\n",
    "                label_to_index = json.load(fr)\n",
    "\n",
    "        # 2，输入转索引\n",
    "        inputs_ids, input_masks, segment_ids = self.trans_to_index(inputs)\n",
    "        print(\"index transform finished\")\n",
    "\n",
    "        inputs_ids, input_masks, segment_ids = self.padding(inputs_ids, input_masks, segment_ids)\n",
    "\n",
    "        # 3，标签转索引\n",
    "        labels_ids = self.trans_label_to_index(labels, label_to_index)\n",
    "        print(\"label index transform finished\")\n",
    "\n",
    "        for i in range(5):\n",
    "            print(\"line {}: *****************************************\".format(i))\n",
    "            print(\"input: \", inputs[i])\n",
    "            print(\"input_id: \", inputs_ids[i])\n",
    "            print(\"input_mask: \", input_masks[i])\n",
    "            print(\"segment_id: \", segment_ids[i])\n",
    "            print(\"label_id: \", labels_ids[i])\n",
    "\n",
    "        return inputs_ids, input_masks, segment_ids, labels_ids, label_to_index\n",
    "\n",
    "    def next_batch(self, input_ids, input_masks, segment_ids, label_ids):\n",
    "        \"\"\"\n",
    "        生成batch数据\n",
    "        :param input_ids:\n",
    "        :param input_masks:\n",
    "        :param segment_ids:\n",
    "        :param label_ids:\n",
    "        :return:\n",
    "        \"\"\"\n",
    "        z = list(zip(input_ids, input_masks, segment_ids, label_ids))\n",
    "        random.shuffle(z)\n",
    "        input_ids, input_masks, segment_ids, label_ids = zip(*z)\n",
    "\n",
    "        num_batches = len(input_ids) // self._batch_size\n",
    "\n",
    "        for i in range(num_batches):\n",
    "            start = i * self._batch_size\n",
    "            end = start + self._batch_size\n",
    "            batch_input_ids = input_ids[start: end]\n",
    "            batch_input_masks = input_masks[start: end]\n",
    "            batch_segment_ids = segment_ids[start: end]\n",
    "            batch_label_ids = label_ids[start: end]\n",
    "\n",
    "            yield dict(input_ids=batch_input_ids,\n",
    "                       input_masks=batch_input_masks,\n",
    "                       segment_ids=batch_segment_ids,\n",
    "                       label_ids=batch_label_ids)"
   ],
   "execution_count": 0,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "J498iLMHu94Q",
    "colab_type": "text"
   },
   "source": [
    "### metrics"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "id": "gEMbeK8jfpx5",
    "colab_type": "code",
    "colab": {}
   },
   "source": [
    "\"\"\"\n",
    "定义各类性能指标\n",
    "\"\"\"\n",
    "from sklearn.metrics import roc_auc_score\n",
    "\n",
    "\n",
    "def mean(item: list) -> float:\n",
    "    \"\"\"\n",
    "    计算列表中元素的平均值\n",
    "    :param item: 列表对象\n",
    "    :return:\n",
    "    \"\"\"\n",
    "    res = sum(item) / len(item) if len(item) > 0 else 0\n",
    "    return res\n",
    "\n",
    "\n",
    "def accuracy(pred_y, true_y):\n",
    "    \"\"\"\n",
    "    计算二类和多类的准确率\n",
    "    :param pred_y: 预测结果\n",
    "    :param true_y: 真实结果\n",
    "    :return:\n",
    "    \"\"\"\n",
    "    if isinstance(pred_y[0], list):\n",
    "        pred_y = [item[0] for item in pred_y]\n",
    "    corr = 0\n",
    "    for i in range(len(pred_y)):\n",
    "        if pred_y[i] == true_y[i]:\n",
    "            corr += 1\n",
    "    acc = corr / len(pred_y) if len(pred_y) > 0 else 0\n",
    "    return acc\n",
    "\n",
    "\n",
    "def binary_auc(pred_y, true_y):\n",
    "    \"\"\"\n",
    "    二类别的auc值\n",
    "    :param pred_y: 预测结果\n",
    "    :param true_y: 真实结果\n",
    "    :return:\n",
    "    \"\"\"\n",
    "    auc = roc_auc_score(true_y, pred_y)\n",
    "    return auc\n",
    "\n",
    "\n",
    "def binary_precision(pred_y, true_y, positive=1):\n",
    "    \"\"\"\n",
    "    二类的精确率计算\n",
    "    :param pred_y: 预测结果\n",
    "    :param true_y: 真实结果\n",
    "    :param positive: 正例的索引表示\n",
    "    :return:\n",
    "    \"\"\"\n",
    "    corr = 0\n",
    "    pred_corr = 0\n",
    "    for i in range(len(pred_y)):\n",
    "        if pred_y[i] == positive:\n",
    "            pred_corr += 1\n",
    "            if pred_y[i] == true_y[i]:\n",
    "                corr += 1\n",
    "\n",
    "    prec = corr / pred_corr if pred_corr > 0 else 0\n",
    "    return prec\n",
    "\n",
    "\n",
    "def binary_recall(pred_y, true_y, positive=1):\n",
    "    \"\"\"\n",
    "    二类的召回率\n",
    "    :param pred_y: 预测结果\n",
    "    :param true_y: 真实结果\n",
    "    :param positive: 正例的索引表示\n",
    "    :return:\n",
    "    \"\"\"\n",
    "    corr = 0\n",
    "    true_corr = 0\n",
    "    for i in range(len(pred_y)):\n",
    "        if true_y[i] == positive:\n",
    "            true_corr += 1\n",
    "            if pred_y[i] == true_y[i]:\n",
    "                corr += 1\n",
    "\n",
    "    rec = corr / true_corr if true_corr > 0 else 0\n",
    "    return rec\n",
    "\n",
    "\n",
    "def binary_f_beta(pred_y, true_y, beta=1.0, positive=1):\n",
    "    \"\"\"\n",
    "    二类的f beta值\n",
    "    :param pred_y: 预测结果\n",
    "    :param true_y: 真实结果\n",
    "    :param beta: beta值\n",
    "    :param positive: 正例的索引表示\n",
    "    :return:\n",
    "    \"\"\"\n",
    "    precision = binary_precision(pred_y, true_y, positive)\n",
    "    recall = binary_recall(pred_y, true_y, positive)\n",
    "    try:\n",
    "        f_b = (1 + beta * beta) * precision * recall / (beta * beta * precision + recall)\n",
    "    except:\n",
    "        f_b = 0\n",
    "    return f_b\n",
    "\n",
    "\n",
    "def multi_precision(pred_y, true_y, labels):\n",
    "    \"\"\"\n",
    "    多类的精确率\n",
    "    :param pred_y: 预测结果\n",
    "    :param true_y: 真实结果\n",
    "    :param labels: 标签列表\n",
    "    :return:\n",
    "    \"\"\"\n",
    "    if isinstance(pred_y[0], list):\n",
    "        pred_y = [item[0] for item in pred_y]\n",
    "\n",
    "    precisions = [binary_precision(pred_y, true_y, label) for label in labels]\n",
    "    prec = mean(precisions)\n",
    "    return prec\n",
    "\n",
    "\n",
    "def multi_recall(pred_y, true_y, labels):\n",
    "    \"\"\"\n",
    "    多类的召回率\n",
    "    :param pred_y: 预测结果\n",
    "    :param true_y: 真实结果\n",
    "    :param labels: 标签列表\n",
    "    :return:\n",
    "    \"\"\"\n",
    "    if isinstance(pred_y[0], list):\n",
    "        pred_y = [item[0] for item in pred_y]\n",
    "\n",
    "    recalls = [binary_recall(pred_y, true_y, label) for label in labels]\n",
    "    rec = mean(recalls)\n",
    "    return rec\n",
    "\n",
    "\n",
    "def multi_f_beta(pred_y, true_y, labels, beta=1.0):\n",
    "    \"\"\"\n",
    "    多类的f beta值\n",
    "    :param pred_y: 预测结果\n",
    "    :param true_y: 真实结果\n",
    "    :param labels: 标签列表\n",
    "    :param beta: beta值\n",
    "    :return:\n",
    "    \"\"\"\n",
    "    if isinstance(pred_y[0], list):\n",
    "        pred_y = [item[0] for item in pred_y]\n",
    "\n",
    "    f_betas = [binary_f_beta(pred_y, true_y, beta, label) for label in labels]\n",
    "    f_beta = mean(f_betas)\n",
    "    return f_beta\n",
    "\n",
    "\n",
    "def get_binary_metrics(pred_y, true_y, f_beta=1.0):\n",
    "    \"\"\"\n",
    "    得到二分类的性能指标\n",
    "    :param pred_y:\n",
    "    :param true_y:\n",
    "    :param f_beta:\n",
    "    :return:\n",
    "    \"\"\"\n",
    "    acc = accuracy(pred_y, true_y)\n",
    "    auc = binary_auc(pred_y, true_y)\n",
    "    recall = binary_recall(pred_y, true_y)\n",
    "    precision = binary_precision(pred_y, true_y)\n",
    "    f_beta = binary_f_beta(pred_y, true_y, f_beta)\n",
    "    return acc, auc, recall, precision, f_beta\n",
    "\n",
    "\n",
    "def get_multi_metrics(pred_y, true_y, labels, f_beta=1.0):\n",
    "    \"\"\"\n",
    "    得到多分类的性能指标\n",
    "    :param pred_y:\n",
    "    :param true_y:\n",
    "    :param labels:\n",
    "    :param f_beta:\n",
    "    :return:\n",
    "    \"\"\"\n",
    "    acc = accuracy(pred_y, true_y)\n",
    "    recall = multi_recall(pred_y, true_y, labels)\n",
    "    precision = multi_precision(pred_y, true_y, labels)\n",
    "    f_beta = multi_f_beta(pred_y, true_y, labels, f_beta)\n",
    "    return acc, recall, precision, f_beta"
   ],
   "execution_count": 0,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "BjAqefDKvBwS",
    "colab_type": "text"
   },
   "source": [
    "### Trainer"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "id": "gDec6YeZddil",
    "colab_type": "code",
    "colab": {}
   },
   "source": [
    "import os\n",
    "import time\n",
    "import sys\n",
    "sys.path.append(os.path.dirname(os.getcwd()))\n",
    "import tensorflow as tf\n",
    "from bert import modeling\n",
    "# from model import BertClassifier\n",
    "# from data_helper import TrainData\n",
    "# from metrics import mean, get_multi_metrics\n",
    "\n",
    "\n",
    "class Trainer(object):\n",
    "    def __init__(self, config:dict):\n",
    "        self.config = config\n",
    "        self.__bert_checkpoint_path = os.path.join(self.config[\"bert_model_path\"], \"bert_model.ckpt\")\n",
    "\n",
    "        # 加载数据集\n",
    "        self.data_obj = self.load_data()\n",
    "        self.t_in_ids, self.t_in_masks, self.t_seg_ids, self.t_lab_ids, lab_to_idx = self.data_obj.gen_data(\n",
    "            self.config[\"train_data\"])\n",
    "\n",
    "        self.e_in_ids, self.e_in_masks, self.e_seg_ids, self.e_lab_ids, lab_to_idx = self.data_obj.gen_data(\n",
    "            self.config[\"eval_data\"], is_training=False)\n",
    "        print(\"train data size: {}\".format(len(self.t_lab_ids)))\n",
    "        print(\"eval data size: {}\".format(len(self.e_lab_ids)))\n",
    "        self.label_list = [value for key, value in lab_to_idx.items()]\n",
    "        print(\"label numbers: \", len(self.label_list))\n",
    "\n",
    "        num_train_steps = int(\n",
    "            len(self.t_lab_ids) / self.config[\"batch_size\"] * self.config[\"epochs\"])\n",
    "        num_warmup_steps = int(num_train_steps * self.config[\"warmup_rate\"])\n",
    "        # 初始化模型对象\n",
    "        self.model = self.create_model(num_train_steps, num_warmup_steps)\n",
    "\n",
    "    def load_data(self):\n",
    "        \"\"\"\n",
    "        创建数据对象\n",
    "        :return:\n",
    "        \"\"\"\n",
    "        # 生成训练集对象并生成训练数据\n",
    "        data_obj = TrainData(self.config)\n",
    "        return data_obj\n",
    "\n",
    "    def create_model(self, num_train_step, num_warmup_step):\n",
    "        \"\"\"\n",
    "        根据config文件选择对应的模型，并初始化\n",
    "        :return:\n",
    "        \"\"\"\n",
    "        model = BertClassifier(config=self.config, num_train_step=num_train_step, num_warmup_step=num_warmup_step)\n",
    "        return model\n",
    "\n",
    "    def train(self):\n",
    "        with tf.Session() as sess:\n",
    "            tvars = tf.trainable_variables()\n",
    "            (assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(\n",
    "                tvars, self.__bert_checkpoint_path)\n",
    "            print(\"init bert model params\")\n",
    "            tf.train.init_from_checkpoint(self.__bert_checkpoint_path, assignment_map)\n",
    "            print(\"init bert model params done\")\n",
    "            sess.run(tf.variables_initializer(tf.global_variables()))\n",
    "\n",
    "            current_step = 0\n",
    "            start = time.time()\n",
    "            for epoch in range(self.config[\"epochs\"]):\n",
    "                print(\"----- Epoch {}/{} -----\".format(epoch + 1, self.config[\"epochs\"]))\n",
    "\n",
    "                for batch in self.data_obj.next_batch(self.t_in_ids, self.t_in_masks, self.t_seg_ids, self.t_lab_ids):\n",
    "                    loss, predictions = self.model.train(sess, batch)\n",
    "\n",
    "                    acc, recall, prec, f_beta = get_multi_metrics(pred_y=predictions, true_y=batch[\"label_ids\"],\n",
    "                                                                  labels=self.label_list)\n",
    "                    print(\"train: step: {}, loss: {}, acc: {}, recall: {}, precision: {}, f_beta: {}\".format(\n",
    "                        current_step, loss, acc, recall, prec, f_beta))\n",
    "\n",
    "                    current_step += 1\n",
    "                    if self.data_obj and current_step % self.config[\"checkpoint_every\"] == 0:\n",
    "\n",
    "                        eval_losses = []\n",
    "                        eval_accs = []\n",
    "                        eval_aucs = []\n",
    "                        eval_recalls = []\n",
    "                        eval_precs = []\n",
    "                        eval_f_betas = []\n",
    "                        for eval_batch in self.data_obj.next_batch(self.e_in_ids, self.e_in_masks,\n",
    "                                                                   self.e_seg_ids, self.e_lab_ids):\n",
    "                            eval_loss, eval_predictions = self.model.eval(sess, eval_batch)\n",
    "\n",
    "                            eval_losses.append(eval_loss)\n",
    "\n",
    "                            acc, recall, prec, f_beta = get_multi_metrics(pred_y=eval_predictions,\n",
    "                                                                          true_y=eval_batch[\"label_ids\"],\n",
    "                                                                          labels=self.label_list)\n",
    "                            eval_accs.append(acc)\n",
    "                            eval_recalls.append(recall)\n",
    "                            eval_precs.append(prec)\n",
    "                            eval_f_betas.append(f_beta)\n",
    "                        print(\"\\n\")\n",
    "                        print(\"eval:  loss: {}, acc: {}, auc: {}, recall: {}, precision: {}, f_beta: {}\".format(\n",
    "                            mean(eval_losses), mean(eval_accs), mean(eval_aucs), mean(eval_recalls),\n",
    "                            mean(eval_precs), mean(eval_f_betas)))\n",
    "                        print(\"\\n\")\n",
    "\n",
    "                        if self.config[\"ckpt_model_path\"]:\n",
    "                            print(\"save check point.\")\n",
    "                            save_path = self.config[\"ckpt_model_path\"]\n",
    "                            if not os.path.exists(save_path):\n",
    "                                os.makedirs(save_path)\n",
    "                            model_save_path = os.path.join(save_path, self.config[\"model_name\"])\n",
    "                            self.model.saver.save(sess, model_save_path, global_step=current_step)\n",
    "                        else:\n",
    "                            print(\"no ckpt_model_path\")\n",
    "\n",
    "            end = time.time()\n",
    "            print(\"total train time: \", end - start)\n",
    "\n",
    "def run_trainer(config:dict):\n",
    "    trainer = Trainer(config)\n",
    "    trainer.train()"
   ],
   "execution_count": 0,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "DBej1t4Tv3pq",
    "colab_type": "text"
   },
   "source": [
    "### Predictor"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "id": "g4SWGleQvyhf",
    "colab_type": "code",
    "colab": {}
   },
   "source": [
    "import json\n",
    "import os\n",
    "import sys\n",
    "sys.path.append(os.path.abspath(os.path.dirname(os.getcwd())))\n",
    "\n",
    "import tensorflow as tf\n",
    "from bert import tokenization\n",
    "\n",
    "\n",
    "class Predictor(object):\n",
    "    def __init__(self, config):\n",
    "        self.model = None\n",
    "        self.config = config\n",
    "\n",
    "        self.output_path = config[\"output_path\"]\n",
    "        self.vocab_path = os.path.join(config[\"bert_model_path\"], \"vocab.txt\")\n",
    "        self.label_to_index = self.load_vocab()\n",
    "        self.index_to_label = {value: key for key, value in self.label_to_index.items()}\n",
    "        self.word_vectors = None\n",
    "        self.sequence_length = self.config[\"sequence_length\"]\n",
    "\n",
    "        # 创建模型\n",
    "        self.create_model()\n",
    "        # 加载计算图\n",
    "        self.load_graph()\n",
    "\n",
    "    def load_vocab(self):\n",
    "        # 将词汇-索引映射表加载出来\n",
    "\n",
    "        with open(os.path.join(self.output_path, \"label_to_index.json\"), \"r\") as f:\n",
    "            label_to_index = json.load(f)\n",
    "\n",
    "        return label_to_index\n",
    "\n",
    "    def padding(self, input_id, input_mask, segment_id):\n",
    "        \"\"\"\n",
    "        对序列进行补全\n",
    "        :param input_id:\n",
    "        :param input_mask:\n",
    "        :param segment_id:\n",
    "        :return:\n",
    "        \"\"\"\n",
    "\n",
    "        if len(input_id) < self.sequence_length:\n",
    "            pad_input_id = input_id + [0] * (self.sequence_length - len(input_id))\n",
    "            pad_input_mask = input_mask + [0] * (self.sequence_length - len(input_mask))\n",
    "            pad_segment_id = segment_id + [0] * (self.sequence_length - len(segment_id))\n",
    "        else:\n",
    "            pad_input_id = input_id[:self.sequence_length]\n",
    "            pad_input_mask = input_mask[:self.sequence_length]\n",
    "            pad_segment_id = segment_id[:self.sequence_length]\n",
    "\n",
    "        return pad_input_id, pad_input_mask, pad_segment_id\n",
    "\n",
    "    def sentence_to_idx(self, text):\n",
    "        \"\"\"\n",
    "        将分词后的句子转换成idx表示\n",
    "        :return:\n",
    "        \"\"\"\n",
    "        tokenizer = tokenization.FullTokenizer(vocab_file=self.vocab_path, do_lower_case=True)\n",
    "\n",
    "        text = tokenization.convert_to_unicode(text)\n",
    "        tokens = tokenizer.tokenize(text)\n",
    "        tokens = [\"[CLS]\"] + tokens + [\"[SEP]\"]\n",
    "        input_id = tokenizer.convert_tokens_to_ids(tokens)\n",
    "        input_mask = [1] * len(input_id)\n",
    "        segment_id = [0] * len(input_id)\n",
    "\n",
    "        input_id, input_mask, segment_id = self.padding(input_id, input_mask, segment_id)\n",
    "\n",
    "        return [input_id], [input_mask], [segment_id]\n",
    "\n",
    "    def load_graph(self):\n",
    "        \"\"\"\n",
    "        加载计算图\n",
    "        :return:\n",
    "        \"\"\"\n",
    "        self.sess = tf.Session()\n",
    "        ckpt = tf.train.get_checkpoint_state(self.config[\"ckpt_model_path\"])\n",
    "        if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):\n",
    "            print('Reloading model parameters..')\n",
    "            self.model.saver.restore(self.sess, ckpt.model_checkpoint_path)\n",
    "        else:\n",
    "            raise ValueError('No such file:[{}]'.format(self.config[\"ckpt_model_path\"]))\n",
    "\n",
    "    def create_model(self):\n",
    "        \"\"\"\n",
    "                根据config文件选择对应的模型，并初始化\n",
    "                :return:\n",
    "                \"\"\"\n",
    "        self.model = BertClassifier(config=self.config, is_training=False)\n",
    "\n",
    "    def predict(self, text):\n",
    "        \"\"\"\n",
    "        给定分词后的句子，预测其分类结果\n",
    "        :param text:\n",
    "        :return:\n",
    "        \"\"\"\n",
    "        input_ids, input_masks, segment_ids = self.sentence_to_idx(text)\n",
    "\n",
    "        prediction = self.model.infer(self.sess,\n",
    "                                      dict(input_ids=input_ids,\n",
    "                                           input_masks=input_masks,\n",
    "                                           segment_ids=segment_ids)).tolist()[0]\n",
    "        label = self.index_to_label[prediction]\n",
    "        return label\n",
    "\n",
    "\n"
   ],
   "execution_count": 0,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "ea5g0C1vt5t9",
    "colab_type": "text"
   },
   "source": [
    "## 前面的全部一梭子运行。。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "5Z0yTDN2t8Qz",
    "colab_type": "text"
   },
   "source": [
    "## 下载模型和测试集"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "id": "zt5xET56tx9h",
    "colab_type": "code",
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 917
    },
    "outputId": "7d0eea9d-64de-4766-e0ec-70fb85eade03"
   },
   "source": [
    "!mkdir output\n",
    "!mkdir data\n",
    "!mkdir ckpt_model\n",
    "!mkdir bert_model\n",
    "\n",
    "!cd bert_model && wget https://storage.googleapis.com/bert_models/2018_11_03/chinese_L-12_H-768_A-12.zip\n",
    "!unzip -n -d bert_model/ bert_model/chinese_L-12_H-768_A-12.zip\n",
    "\n",
    "!cd data && wget https://github.com/Deali-Axy/bert-chinese-classifier/raw/master/data/train.txt\n",
    "!cd data && wget https://github.com/Deali-Axy/bert-chinese-classifier/raw/master/data/test.txt"
   ],
   "execution_count": 7,
   "outputs": [
    {
     "output_type": "stream",
     "text": [
      "--2020-03-18 04:07:17--  https://storage.googleapis.com/bert_models/2018_11_03/chinese_L-12_H-768_A-12.zip\n",
      "Resolving storage.googleapis.com (storage.googleapis.com)... 173.194.216.128, 2607:f8b0:400c:c12::80\n",
      "Connecting to storage.googleapis.com (storage.googleapis.com)|173.194.216.128|:443... connected.\n",
      "HTTP request sent, awaiting response... 200 OK\n",
      "Length: 381892918 (364M) [application/zip]\n",
      "Saving to: ‘chinese_L-12_H-768_A-12.zip’\n",
      "\n",
      "chinese_L-12_H-768_ 100%[===================>] 364.20M   218MB/s    in 1.7s    \n",
      "\n",
      "2020-03-18 04:07:19 (218 MB/s) - ‘chinese_L-12_H-768_A-12.zip’ saved [381892918/381892918]\n",
      "\n",
      "Archive:  bert_model/chinese_L-12_H-768_A-12.zip\n",
      "   creating: bert_model/chinese_L-12_H-768_A-12/\n",
      "  inflating: bert_model/chinese_L-12_H-768_A-12/bert_model.ckpt.meta  \n",
      "  inflating: bert_model/chinese_L-12_H-768_A-12/bert_model.ckpt.data-00000-of-00001  \n",
      "  inflating: bert_model/chinese_L-12_H-768_A-12/vocab.txt  \n",
      "  inflating: bert_model/chinese_L-12_H-768_A-12/bert_model.ckpt.index  \n",
      "  inflating: bert_model/chinese_L-12_H-768_A-12/bert_config.json  \n",
      "--2020-03-18 04:07:28--  https://github.com/Deali-Axy/bert-chinese-classifier/raw/master/data/train.txt\n",
      "Resolving github.com (github.com)... 140.82.114.3\n",
      "Connecting to github.com (github.com)|140.82.114.3|:443... connected.\n",
      "HTTP request sent, awaiting response... 302 Found\n",
      "Location: https://raw.githubusercontent.com/Deali-Axy/bert-chinese-classifier/master/data/train.txt [following]\n",
      "--2020-03-18 04:07:29--  https://raw.githubusercontent.com/Deali-Axy/bert-chinese-classifier/master/data/train.txt\n",
      "Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 151.101.0.133, 151.101.64.133, 151.101.128.133, ...\n",
      "Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|151.101.0.133|:443... connected.\n",
      "HTTP request sent, awaiting response... 200 OK\n",
      "Length: 104544 (102K) [text/plain]\n",
      "Saving to: ‘train.txt’\n",
      "\n",
      "train.txt           100%[===================>] 102.09K  --.-KB/s    in 0.04s   \n",
      "\n",
      "2020-03-18 04:07:29 (2.71 MB/s) - ‘train.txt’ saved [104544/104544]\n",
      "\n",
      "--2020-03-18 04:07:31--  https://github.com/Deali-Axy/bert-chinese-classifier/raw/master/data/test.txt\n",
      "Resolving github.com (github.com)... 140.82.113.3\n",
      "Connecting to github.com (github.com)|140.82.113.3|:443... connected.\n",
      "HTTP request sent, awaiting response... 302 Found\n",
      "Location: https://raw.githubusercontent.com/Deali-Axy/bert-chinese-classifier/master/data/test.txt [following]\n",
      "--2020-03-18 04:07:31--  https://raw.githubusercontent.com/Deali-Axy/bert-chinese-classifier/master/data/test.txt\n",
      "Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 151.101.0.133, 151.101.64.133, 151.101.128.133, ...\n",
      "Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|151.101.0.133|:443... connected.\n",
      "HTTP request sent, awaiting response... 200 OK\n",
      "Length: 35337 (35K) [text/plain]\n",
      "Saving to: ‘test.txt’\n",
      "\n",
      "test.txt            100%[===================>]  34.51K  --.-KB/s    in 0.01s   \n",
      "\n",
      "2020-03-18 04:07:31 (2.48 MB/s) - ‘test.txt’ saved [35337/35337]\n",
      "\n"
     ],
     "name": "stdout"
    }
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "ejhpdWQuyhX2",
    "colab_type": "text"
   },
   "source": [
    "## 配置"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "id": "6oiBR1S6ykPm",
    "colab_type": "code",
    "colab": {}
   },
   "source": [
    "train_config={\n",
    "  \"model_name\": \"classifier\",\n",
    "  \"epochs\": 10,\n",
    "  \"checkpoint_every\": 10,\n",
    "  \"eval_every\": 10,\n",
    "  \"learning_rate\": 5e-5,\n",
    "  \"sequence_length\": 128,\n",
    "  \"batch_size\": 32,\n",
    "  \"num_classes\": 28,\n",
    "  \"warmup_rate\": 0.1,\n",
    "  \"output_path\": \"output/\",\n",
    "  \"bert_model_path\": \"bert_model/chinese_L-12_H-768_A-12\",\n",
    "  \"train_data\": \"data/train.txt\",\n",
    "  \"eval_data\": \"data/test.txt\",\n",
    "  \"ckpt_model_path\": \"ckpt_model/\"\n",
    "}"
   ],
   "execution_count": 0,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "ip1nBKGot2KI",
    "colab_type": "text"
   },
   "source": [
    "## 训练\n"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "id": "O5IxWgyDc8rx",
    "colab_type": "code",
    "outputId": "6161e0d2-c9e5-4886-9e3a-cd9942f161d4",
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 1000
    }
   },
   "source": [
    "run_trainer(train_config)"
   ],
   "execution_count": 0,
   "outputs": [
    {
     "output_type": "stream",
     "text": [
      "read finished\n",
      "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/bert/tokenization.py:125: The name tf.gfile.GFile is deprecated. Please use tf.io.gfile.GFile instead.\n",
      "\n",
      "index transform finished\n",
      "label index transform finished\n",
      "line 0: *****************************************\n",
      "input:  方媛悲伤发文悼念离世婆婆，晒婆媳早前合照亲如母女   \n",
      "input_id:  [101, 3175, 2056, 2650, 839, 1355, 3152, 2656, 2573, 4895, 686, 2038, 2038, 8024, 3235, 2038, 2060, 3193, 1184, 1394, 4212, 779, 1963, 3678, 1957, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
      "input_mask:  [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
      "segment_id:  [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
      "label_id:  23\n",
      "line 1: *****************************************\n",
      "input:  巴克利：MVP投票白痴才选詹姆斯 雄鹿第二好球员是米德尔顿   \n",
      "input_id:  [101, 2349, 1046, 1164, 8038, 9505, 2832, 4873, 4635, 4590, 2798, 6848, 6285, 1990, 3172, 7413, 7922, 5018, 753, 1962, 4413, 1447, 3221, 5101, 2548, 2209, 7561, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
      "input_mask:  [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
      "segment_id:  [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
      "label_id:  15\n",
      "line 2: *****************************************\n",
      "input:  易烊千玺进入“班级群”，大家刷屏欢迎，粉丝却因“头像”吃醋不已   \n",
      "input_id:  [101, 3211, 4165, 1283, 4389, 6822, 1057, 100, 4408, 5277, 5408, 100, 8024, 1920, 2157, 1170, 2242, 3614, 6816, 8024, 5106, 692, 1316, 1728, 100, 1928, 1008, 100, 1391, 7005, 679, 2347, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
      "input_mask:  [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
      "segment_id:  [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
      "label_id:  23\n",
      "line 3: *****************************************\n",
      "input:  张文宏没评上先进？原因不是可以乱猜的   \n",
      "input_id:  [101, 2476, 3152, 2131, 3766, 6397, 677, 1044, 6822, 8043, 1333, 1728, 679, 3221, 1377, 809, 744, 4339, 4638, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
      "input_mask:  [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
      "segment_id:  [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
      "label_id:  0\n",
      "line 4: *****************************************\n",
      "input:  “梅姨案”被拐孩子申聪现状：养父母长期分居 家里有三个孩子   \n",
      "input_id:  [101, 100, 3449, 2007, 3428, 100, 6158, 2866, 2111, 2094, 4509, 5473, 4385, 4307, 8038, 1075, 4266, 3678, 7270, 3309, 1146, 2233, 2157, 7027, 3300, 676, 702, 2111, 2094, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
      "input_mask:  [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
      "segment_id:  [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
      "label_id:  11\n",
      "read finished\n",
      "index transform finished\n",
      "label index transform finished\n",
      "line 0: *****************************************\n",
      "input:  崇祯皇帝误杀了一个人，等于自断了一条手臂，不然不会输得这样惨   \n",
      "input_id:  [101, 2300, 4875, 4640, 2370, 6428, 3324, 749, 671, 702, 782, 8024, 5023, 754, 5632, 3171, 749, 671, 3340, 2797, 5619, 8024, 679, 4197, 679, 833, 6783, 2533, 6821, 3416, 2673, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
      "input_mask:  [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
      "segment_id:  [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
      "label_id:  2\n",
      "line 1: *****************************************\n",
      "input:  用古文笔法，以《山海经》视角看日本   \n",
      "input_id:  [101, 4500, 1367, 3152, 5011, 3791, 8024, 809, 517, 2255, 3862, 5307, 518, 6228, 6235, 4692, 3189, 3315, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
      "input_mask:  [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
      "segment_id:  [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
      "label_id:  2\n",
      "line 2: *****************************************\n",
      "input:  古希腊雅典与斯巴达两强争霸之前，制度不同，国家气质也就不一了   \n",
      "input_id:  [101, 1367, 2361, 5572, 7414, 1073, 680, 3172, 2349, 6809, 697, 2487, 751, 7464, 722, 1184, 8024, 1169, 2428, 679, 1398, 8024, 1744, 2157, 3698, 6574, 738, 2218, 679, 671, 749, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
      "input_mask:  [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
      "segment_id:  [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
      "label_id:  2\n",
      "line 3: *****************************************\n",
      "input:  晚天萧索，断蓬踪迹   \n",
      "input_id:  [101, 3241, 1921, 5854, 5164, 8024, 3171, 5908, 6679, 6839, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
      "input_mask:  [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
      "segment_id:  [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
      "label_id:  2\n",
      "line 4: *****************************************\n",
      "input:  重庆渔民江中捞出废铁，论斤卖了65元，专家鉴定后：价值至少3亿   \n",
      "input_id:  [101, 7028, 2412, 3934, 3696, 3736, 704, 2937, 1139, 2426, 7188, 8024, 6389, 3165, 1297, 749, 8284, 1039, 8024, 683, 2157, 7063, 2137, 1400, 8038, 817, 966, 5635, 2208, 124, 783, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
      "input_mask:  [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
      "segment_id:  [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
      "label_id:  20\n",
      "train data size: 1101\n",
      "eval data size: 368\n",
      "label numbers:  28\n",
      "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/bert/modeling.py:171: The name tf.variable_scope is deprecated. Please use tf.compat.v1.variable_scope instead.\n",
      "\n",
      "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/bert/modeling.py:409: The name tf.get_variable is deprecated. Please use tf.compat.v1.get_variable instead.\n",
      "\n",
      "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/bert/modeling.py:490: The name tf.assert_less_equal is deprecated. Please use tf.compat.v1.assert_less_equal instead.\n",
      "\n",
      "WARNING:tensorflow:\n",
      "The TensorFlow contrib module will not be included in TensorFlow 2.0.\n",
      "For more information, please see:\n",
      "  * https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md\n",
      "  * https://github.com/tensorflow/addons\n",
      "  * https://github.com/tensorflow/io (for I/O related ops)\n",
      "If you depend on functionality not listed there, please file an issue.\n",
      "\n",
      "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/bert/modeling.py:358: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n",
      "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/bert/modeling.py:671: dense (from tensorflow.python.layers.core) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use keras.layers.Dense instead.\n",
      "WARNING:tensorflow:From /tensorflow-1.15.0/python3.6/tensorflow_core/python/layers/core.py:187: Layer.apply (from tensorflow.python.keras.engine.base_layer) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Please use `layer.__call__` method instead.\n",
      "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/bert/optimization.py:27: The name tf.train.get_or_create_global_step is deprecated. Please use tf.compat.v1.train.get_or_create_global_step instead.\n",
      "\n",
      "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/bert/optimization.py:32: The name tf.train.polynomial_decay is deprecated. Please use tf.compat.v1.train.polynomial_decay instead.\n",
      "\n",
      "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/bert/optimization.py:70: The name tf.trainable_variables is deprecated. Please use tf.compat.v1.trainable_variables instead.\n",
      "\n",
      "WARNING:tensorflow:From /tensorflow-1.15.0/python3.6/tensorflow_core/python/ops/math_grad.py:1375: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use tf.where in 2.0, which has the same broadcast rule as np.where\n",
      "init bert model params\n",
      "init bert model params done\n",
      "----- Epoch 1/10 -----\n",
      "train: step: 0, loss: 3.3980650901794434, acc: 0.0625, recall: 0.03571428571428571, precision: 0.003105590062111801, f_beta: 0.005714285714285714\n",
      "train: step: 1, loss: 3.5460734367370605, acc: 0.0625, recall: 0.07142857142857142, precision: 0.013227513227513227, f_beta: 0.02040816326530612\n",
      "train: step: 2, loss: 3.3068253993988037, acc: 0.0625, recall: 0.03968253968253969, precision: 0.03720238095238095, f_beta: 0.009999999999999998\n",
      "train: step: 3, loss: 3.30191707611084, acc: 0.09375, recall: 0.03571428571428571, precision: 0.005357142857142857, f_beta: 0.009316770186335404\n",
      "train: step: 4, loss: 3.354055881500244, acc: 0.09375, recall: 0.0625, precision: 0.07857142857142858, f_beta: 0.049999999999999996\n",
      "train: step: 5, loss: 3.3095569610595703, acc: 0.03125, recall: 0.0071428571428571435, precision: 0.011904761904761904, f_beta: 0.008928571428571428\n",
      "train: step: 6, loss: 3.1946890354156494, acc: 0.125, recall: 0.047619047619047616, precision: 0.01768707482993197, f_beta: 0.017857142857142856\n",
      "train: step: 7, loss: 3.3505945205688477, acc: 0.0625, recall: 0.05357142857142857, precision: 0.006222943722943723, f_beta: 0.011054421768707483\n",
      "train: step: 8, loss: 3.13468074798584, acc: 0.15625, recall: 0.05357142857142857, precision: 0.025793650793650792, f_beta: 0.03084415584415584\n",
      "train: step: 9, loss: 3.129467725753784, acc: 0.1875, recall: 0.07142857142857142, precision: 0.06666666666666667, f_beta: 0.04829545454545454\n",
      "\n",
      "\n",
      "eval:  loss: 3.100596774708141, acc: 0.14204545454545456, auc: 0, recall: 0.07911255411255412, precision: 0.042692435802355815, f_beta: 0.042617118472733445\n",
      "\n",
      "\n",
      "save check point.\n",
      "train: step: 10, loss: 2.9081735610961914, acc: 0.21875, recall: 0.08333333333333334, precision: 0.05240683229813665, f_beta: 0.044897959183673466\n",
      "train: step: 11, loss: 2.935898780822754, acc: 0.21875, recall: 0.03571428571428571, precision: 0.011904761904761904, f_beta: 0.017857142857142856\n",
      "train: step: 12, loss: 2.846654176712036, acc: 0.21875, recall: 0.11904761904761904, precision: 0.10331632653061225, f_beta: 0.08273809523809524\n",
      "train: step: 13, loss: 2.1628499031066895, acc: 0.625, recall: 0.15178571428571427, precision: 0.12612781954887217, f_beta: 0.1313912661838929\n",
      "train: step: 14, loss: 2.3304877281188965, acc: 0.46875, recall: 0.20238095238095236, precision: 0.1463032581453634, f_beta: 0.15654761904761902\n",
      "train: step: 15, loss: 2.45156192779541, acc: 0.40625, recall: 0.2130952380952381, precision: 0.14418498168498167, f_beta: 0.1596938775510204\n",
      "train: step: 16, loss: 2.219454765319824, acc: 0.59375, recall: 0.26785714285714285, precision: 0.23469387755102042, f_beta: 0.23342490842490843\n",
      "train: step: 17, loss: 1.935562252998352, acc: 0.65625, recall: 0.29563492063492064, precision: 0.28095238095238095, f_beta: 0.2741071428571428\n",
      "train: step: 18, loss: 2.031770706176758, acc: 0.65625, recall: 0.22202380952380954, precision: 0.2398809523809524, f_beta: 0.21488095238095237\n",
      "train: step: 19, loss: 1.9162944555282593, acc: 0.6875, recall: 0.3928571428571428, precision: 0.3720238095238095, f_beta: 0.36445578231292514\n",
      "\n",
      "\n",
      "eval:  loss: 2.146301258694042, acc: 0.4971590909090909, auc: 0, recall: 0.2990530303030303, precision: 0.2474734590806019, f_beta: 0.2461803167971999\n",
      "\n",
      "\n",
      "save check point.\n",
      "train: step: 20, loss: 1.7381162643432617, acc: 0.78125, recall: 0.40773809523809523, precision: 0.37976190476190474, f_beta: 0.3825963718820861\n",
      "train: step: 21, loss: 1.9783741235733032, acc: 0.5, recall: 0.28273809523809523, precision: 0.2767857142857143, f_beta: 0.2648809523809524\n",
      "train: step: 22, loss: 1.7605961561203003, acc: 0.625, recall: 0.26785714285714285, precision: 0.2398809523809524, f_beta: 0.23394145537002678\n",
      "train: step: 23, loss: 1.7810711860656738, acc: 0.5, recall: 0.3071428571428571, precision: 0.3178571428571428, f_beta: 0.2983843537414966\n",
      "train: step: 24, loss: 1.4769631624221802, acc: 0.78125, recall: 0.30853174603174605, precision: 0.26746031746031745, f_beta: 0.2834467120181406\n",
      "train: step: 25, loss: 1.7830467224121094, acc: 0.625, recall: 0.26785714285714285, precision: 0.2308673469387755, f_beta: 0.22976190476190478\n",
      "train: step: 26, loss: 1.3705717325210571, acc: 0.78125, recall: 0.35714285714285715, precision: 0.29791666666666666, f_beta: 0.31388888888888883\n",
      "train: step: 27, loss: 1.7516348361968994, acc: 0.5625, recall: 0.3094387755102041, precision: 0.2833333333333333, f_beta: 0.25902014652014654\n",
      "train: step: 28, loss: 1.1800661087036133, acc: 0.84375, recall: 0.4255952380952381, precision: 0.45089285714285715, f_beta: 0.4318942961800105\n",
      "train: step: 29, loss: 1.4023427963256836, acc: 0.71875, recall: 0.306547619047619, precision: 0.31875000000000003, f_beta: 0.3017355660212803\n",
      "\n",
      "\n",
      "eval:  loss: 1.759208863431757, acc: 0.5653409090909091, auc: 0, recall: 0.3249458874458874, precision: 0.2792851989280561, f_beta: 0.27893639549483706\n",
      "\n",
      "\n",
      "save check point.\n",
      "train: step: 30, loss: 1.0877569913864136, acc: 0.8125, recall: 0.34523809523809523, precision: 0.2967261904761905, f_beta: 0.30892857142857144\n",
      "train: step: 31, loss: 1.6286101341247559, acc: 0.625, recall: 0.3482142857142857, precision: 0.32142857142857145, f_beta: 0.30453514739229026\n",
      "train: step: 32, loss: 0.9576531648635864, acc: 0.75, recall: 0.3678571428571429, precision: 0.2994047619047619, f_beta: 0.31428571428571433\n",
      "train: step: 33, loss: 0.9133843779563904, acc: 0.84375, recall: 0.4345238095238095, precision: 0.4047619047619047, f_beta: 0.4035714285714286\n",
      "----- Epoch 2/10 -----\n",
      "train: step: 34, loss: 1.2152464389801025, acc: 0.71875, recall: 0.44345238095238093, precision: 0.41369047619047616, f_beta: 0.4106137909709338\n",
      "train: step: 35, loss: 0.8681966066360474, acc: 0.75, recall: 0.2996031746031746, precision: 0.34523809523809523, f_beta: 0.3109943977591036\n",
      "train: step: 36, loss: 1.099592924118042, acc: 0.6875, recall: 0.36607142857142855, precision: 0.33333333333333337, f_beta: 0.33333333333333337\n",
      "train: step: 37, loss: 1.0755914449691772, acc: 0.71875, recall: 0.45535714285714285, precision: 0.37882653061224486, f_beta: 0.39189560439560445\n",
      "train: step: 38, loss: 1.133003830909729, acc: 0.78125, recall: 0.38690476190476186, precision: 0.3937074829931973, f_beta: 0.3805860805860806\n",
      "train: step: 39, loss: 1.0388154983520508, acc: 0.75, recall: 0.3380952380952381, precision: 0.28095238095238095, f_beta: 0.29126984126984123\n",
      "\n",
      "\n",
      "eval:  loss: 1.3528622605583884, acc: 0.6619318181818182, auc: 0, recall: 0.36705318491032773, precision: 0.36617965367965366, f_beta: 0.34921993590824757\n",
      "\n",
      "\n",
      "save check point.\n",
      "train: step: 40, loss: 1.0587565898895264, acc: 0.8125, recall: 0.45238095238095244, precision: 0.41964285714285715, f_beta: 0.4329931972789116\n",
      "train: step: 41, loss: 1.1530351638793945, acc: 0.78125, recall: 0.42857142857142855, precision: 0.39285714285714285, f_beta: 0.39846938775510204\n",
      "train: step: 42, loss: 0.912286639213562, acc: 0.75, recall: 0.431547619047619, precision: 0.41666666666666663, f_beta: 0.4118893011750155\n",
      "train: step: 43, loss: 1.0034897327423096, acc: 0.8125, recall: 0.3678571428571429, precision: 0.3571428571428571, f_beta: 0.35555555555555557\n",
      "train: step: 44, loss: 0.8146212100982666, acc: 0.8125, recall: 0.38392857142857145, precision: 0.35119047619047616, f_beta: 0.3520408163265306\n",
      "train: step: 45, loss: 0.8822551369667053, acc: 0.8125, recall: 0.3639455782312925, precision: 0.3392857142857143, f_beta: 0.3448717948717949\n",
      "train: step: 46, loss: 0.4767836630344391, acc: 0.90625, recall: 0.4166666666666667, precision: 0.380952380952381, f_beta: 0.3904761904761905\n",
      "train: step: 47, loss: 1.3038254976272583, acc: 0.65625, recall: 0.39710884353741494, precision: 0.369047619047619, f_beta: 0.35719954648526075\n",
      "train: step: 48, loss: 0.8437767624855042, acc: 0.75, recall: 0.32440476190476186, precision: 0.3452380952380953, f_beta: 0.3270408163265306\n",
      "train: step: 49, loss: 1.1698901653289795, acc: 0.71875, recall: 0.3333333333333333, precision: 0.3154761904761904, f_beta: 0.3120748299319728\n",
      "\n",
      "\n",
      "eval:  loss: 1.3497835831208662, acc: 0.6590909090909091, auc: 0, recall: 0.3710923005565862, precision: 0.371969696969697, f_beta: 0.35569025130713444\n",
      "\n",
      "\n",
      "save check point.\n",
      "train: step: 50, loss: 0.7894954085350037, acc: 0.8125, recall: 0.4035714285714286, precision: 0.38095238095238093, f_beta: 0.3876984126984127\n",
      "train: step: 51, loss: 0.7669597268104553, acc: 0.8125, recall: 0.3898809523809524, precision: 0.3541666666666667, f_beta: 0.3564625850340136\n",
      "train: step: 52, loss: 0.774948000907898, acc: 0.8125, recall: 0.42857142857142855, precision: 0.4255952380952381, f_beta: 0.4226035868893012\n",
      "train: step: 53, loss: 0.9611626267433167, acc: 0.78125, recall: 0.36904761904761907, precision: 0.3628246753246754, f_beta: 0.33996598639455783\n",
      "train: step: 54, loss: 0.6262866258621216, acc: 0.875, recall: 0.4970238095238095, precision: 0.4880952380952381, f_beta: 0.4829931972789115\n",
      "train: step: 55, loss: 1.2035330533981323, acc: 0.75, recall: 0.38571428571428573, precision: 0.3410714285714285, f_beta: 0.3556122448979592\n",
      "train: step: 56, loss: 0.831573486328125, acc: 0.8125, recall: 0.4321428571428571, precision: 0.38690476190476186, f_beta: 0.393452380952381\n",
      "train: step: 57, loss: 1.2871960401535034, acc: 0.65625, recall: 0.3339285714285714, precision: 0.3244047619047619, f_beta: 0.3121598639455782\n",
      "train: step: 58, loss: 0.7513810396194458, acc: 0.875, recall: 0.4642857142857143, precision: 0.41666666666666663, f_beta: 0.4333333333333334\n",
      "train: step: 59, loss: 1.0674928426742554, acc: 0.65625, recall: 0.33333333333333337, precision: 0.3392857142857143, f_beta: 0.327891156462585\n",
      "\n",
      "\n",
      "eval:  loss: 1.291904259811748, acc: 0.6534090909090909, auc: 0, recall: 0.35162337662337656, precision: 0.36408472479901044, f_beta: 0.34568645640074214\n",
      "\n",
      "\n",
      "save check point.\n",
      "WARNING:tensorflow:From /tensorflow-1.15.0/python3.6/tensorflow_core/python/training/saver.py:963: remove_checkpoint (from tensorflow.python.training.checkpoint_management) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use standard file APIs to delete files with this prefix.\n",
      "train: step: 60, loss: 0.7792361974716187, acc: 0.84375, recall: 0.4672619047619047, precision: 0.4613095238095238, f_beta: 0.4568722943722943\n",
      "train: step: 61, loss: 0.7662951946258545, acc: 0.84375, recall: 0.42857142857142855, precision: 0.4345238095238096, f_beta: 0.42738095238095236\n",
      "train: step: 62, loss: 0.9178655743598938, acc: 0.8125, recall: 0.42261904761904756, precision: 0.4047619047619047, f_beta: 0.40238095238095234\n",
      "train: step: 63, loss: 1.0597680807113647, acc: 0.78125, recall: 0.34523809523809523, precision: 0.32023809523809527, f_beta: 0.3308802308802309\n",
      "train: step: 64, loss: 0.784172773361206, acc: 0.90625, recall: 0.5178571428571429, precision: 0.4988095238095238, f_beta: 0.5007936507936508\n",
      "train: step: 65, loss: 1.096909999847412, acc: 0.75, recall: 0.3630952380952381, precision: 0.3696428571428571, f_beta: 0.3548185941043084\n",
      "train: step: 66, loss: 0.7011700868606567, acc: 0.8125, recall: 0.5238095238095238, precision: 0.5357142857142857, f_beta: 0.5226190476190476\n",
      "train: step: 67, loss: 1.1956334114074707, acc: 0.71875, recall: 0.3819444444444445, precision: 0.3392857142857143, f_beta: 0.3372023809523809\n",
      "----- Epoch 3/10 -----\n",
      "train: step: 68, loss: 0.7497074007987976, acc: 0.8125, recall: 0.42261904761904756, precision: 0.4154761904761905, f_beta: 0.4057359307359308\n",
      "train: step: 69, loss: 0.6587712168693542, acc: 0.90625, recall: 0.4642857142857143, precision: 0.45535714285714285, f_beta: 0.45918367346938777\n",
      "\n",
      "\n",
      "eval:  loss: 1.3846888975663618, acc: 0.6619318181818182, auc: 0, recall: 0.3365607606679035, precision: 0.33007627293341574, f_beta: 0.31770754498027226\n",
      "\n",
      "\n",
      "save check point.\n",
      "train: step: 70, loss: 0.7719834446907043, acc: 0.78125, recall: 0.4226190476190476, precision: 0.4642857142857143, f_beta: 0.42261904761904756\n",
      "train: step: 71, loss: 0.41077619791030884, acc: 0.96875, recall: 0.4166666666666667, precision: 0.4107142857142857, f_beta: 0.40952380952380946\n",
      "train: step: 72, loss: 0.5167752504348755, acc: 0.875, recall: 0.5392857142857143, precision: 0.5148809523809523, f_beta: 0.5145408163265306\n",
      "train: step: 73, loss: 0.8455445766448975, acc: 0.75, recall: 0.3761904761904762, precision: 0.3779761904761905, f_beta: 0.3520408163265306\n",
      "train: step: 74, loss: 0.6128225326538086, acc: 0.84375, recall: 0.4107142857142857, precision: 0.4047619047619047, f_beta: 0.4023809523809524\n",
      "train: step: 75, loss: 0.4067264199256897, acc: 0.90625, recall: 0.5357142857142857, precision: 0.5166666666666667, f_beta: 0.5246031746031746\n",
      "train: step: 76, loss: 0.5866496562957764, acc: 0.90625, recall: 0.48809523809523814, precision: 0.48214285714285715, f_beta: 0.48095238095238096\n",
      "train: step: 77, loss: 0.5500006675720215, acc: 0.84375, recall: 0.40714285714285714, precision: 0.3958333333333333, f_beta: 0.39387755102040817\n",
      "train: step: 78, loss: 0.7672551274299622, acc: 0.78125, recall: 0.42857142857142855, precision: 0.42857142857142855, f_beta: 0.4166666666666666\n",
      "train: step: 79, loss: 0.6397721171379089, acc: 0.90625, recall: 0.39285714285714285, precision: 0.3988095238095238, f_beta: 0.3872294372294372\n",
      "\n",
      "\n",
      "eval:  loss: 1.1199907389554111, acc: 0.6960227272727273, auc: 0, recall: 0.37721861471861473, precision: 0.3681199752628324, f_beta: 0.35632567288411443\n",
      "\n",
      "\n",
      "save check point.\n",
      "train: step: 80, loss: 0.6267534494400024, acc: 0.84375, recall: 0.48214285714285715, precision: 0.4464285714285715, f_beta: 0.452891156462585\n",
      "train: step: 81, loss: 0.7001509070396423, acc: 0.84375, recall: 0.5297619047619048, precision: 0.5327380952380952, f_beta: 0.5166666666666667\n",
      "train: step: 82, loss: 0.27130764722824097, acc: 0.96875, recall: 0.5357142857142857, precision: 0.5297619047619048, f_beta: 0.5324675324675324\n",
      "train: step: 83, loss: 0.2780088186264038, acc: 0.9375, recall: 0.4642857142857143, precision: 0.4375, f_beta: 0.44727891156462585\n",
      "train: step: 84, loss: 0.16292762756347656, acc: 0.96875, recall: 0.42142857142857143, precision: 0.42857142857142855, f_beta: 0.4246031746031746\n",
      "train: step: 85, loss: 0.311647891998291, acc: 0.9375, recall: 0.5535714285714286, precision: 0.5476190476190477, f_beta: 0.5416666666666667\n",
      "train: step: 86, loss: 0.5068833827972412, acc: 0.84375, recall: 0.41964285714285715, precision: 0.4345238095238096, f_beta: 0.4163265306122449\n",
      "train: step: 87, loss: 0.597405195236206, acc: 0.875, recall: 0.49489795918367346, precision: 0.4404761904761904, f_beta: 0.4555860805860806\n",
      "train: step: 88, loss: 0.33347374200820923, acc: 0.9375, recall: 0.38392857142857145, precision: 0.380952380952381, f_beta: 0.38061224489795914\n",
      "train: step: 89, loss: 0.6730923652648926, acc: 0.78125, recall: 0.30357142857142855, precision: 0.2773809523809524, f_beta: 0.28528911564625853\n",
      "\n",
      "\n",
      "eval:  loss: 1.1759852875362744, acc: 0.7045454545454546, auc: 0, recall: 0.37878014842300556, precision: 0.3776244588744589, f_beta: 0.36626937276287924\n",
      "\n",
      "\n",
      "save check point.\n",
      "train: step: 90, loss: 0.363048791885376, acc: 0.90625, recall: 0.5, precision: 0.5238095238095238, f_beta: 0.5047619047619049\n",
      "train: step: 91, loss: 0.8519927263259888, acc: 0.78125, recall: 0.3511904761904762, precision: 0.3482142857142857, f_beta: 0.34251700680272107\n",
      "train: step: 92, loss: 0.2781505286693573, acc: 0.96875, recall: 0.45535714285714285, precision: 0.4642857142857143, f_beta: 0.45918367346938777\n",
      "train: step: 93, loss: 0.846942126750946, acc: 0.8125, recall: 0.375, precision: 0.3720238095238096, f_beta: 0.36190476190476184\n",
      "train: step: 94, loss: 0.754914402961731, acc: 0.8125, recall: 0.44642857142857145, precision: 0.4279761904761905, f_beta: 0.428726035868893\n",
      "train: step: 95, loss: 0.7729130983352661, acc: 0.8125, recall: 0.4583333333333333, precision: 0.49107142857142855, f_beta: 0.4639455782312925\n",
      "train: step: 96, loss: 0.7593732476234436, acc: 0.8125, recall: 0.4107142857142857, precision: 0.37142857142857144, f_beta: 0.37636054421768705\n",
      "train: step: 97, loss: 0.37887144088745117, acc: 0.90625, recall: 0.41666666666666663, precision: 0.4107142857142857, f_beta: 0.4095238095238095\n",
      "train: step: 98, loss: 0.31818753480911255, acc: 0.9375, recall: 0.48214285714285715, precision: 0.47023809523809523, f_beta: 0.46904761904761905\n",
      "train: step: 99, loss: 0.3225332796573639, acc: 0.90625, recall: 0.44642857142857145, precision: 0.4571428571428572, f_beta: 0.4484126984126985\n",
      "\n",
      "\n",
      "eval:  loss: 1.456737832589583, acc: 0.6477272727272727, auc: 0, recall: 0.350974025974026, precision: 0.3497990105132962, f_beta: 0.3339100654035719\n",
      "\n",
      "\n",
      "save check point.\n",
      "train: step: 100, loss: 0.5244003534317017, acc: 0.8125, recall: 0.3988095238095238, precision: 0.38690476190476186, f_beta: 0.37976190476190474\n",
      "train: step: 101, loss: 0.6250932216644287, acc: 0.875, recall: 0.39285714285714285, precision: 0.36607142857142855, f_beta: 0.36989795918367346\n",
      "----- Epoch 4/10 -----\n",
      "train: step: 102, loss: 0.22861796617507935, acc: 0.9375, recall: 0.5, precision: 0.4928571428571429, f_beta: 0.49603174603174605\n",
      "train: step: 103, loss: 0.46146002411842346, acc: 0.875, recall: 0.4880952380952381, precision: 0.44642857142857145, f_beta: 0.4588435374149659\n",
      "train: step: 104, loss: 0.3055291175842285, acc: 0.9375, recall: 0.5, precision: 0.4880952380952381, f_beta: 0.4928571428571429\n",
      "train: step: 105, loss: 0.35921967029571533, acc: 0.90625, recall: 0.47023809523809523, precision: 0.4761904761904762, f_beta: 0.4666666666666667\n",
      "train: step: 106, loss: 0.20903149247169495, acc: 0.9375, recall: 0.5238095238095238, precision: 0.5166666666666667, f_beta: 0.5174603174603175\n",
      "train: step: 107, loss: 0.5577107667922974, acc: 0.90625, recall: 0.5238095238095238, precision: 0.5059523809523809, f_beta: 0.5095238095238096\n",
      "train: step: 108, loss: 0.15838295221328735, acc: 0.96875, recall: 0.5595238095238095, precision: 0.5625, f_beta: 0.5591836734693878\n",
      "\n",
      "\n",
      "eval:  loss: 1.3914656801657244, acc: 0.6761363636363636, auc: 0, recall: 0.35465367965367967, precision: 0.3669449598021027, f_beta: 0.3424883918390412\n",
      "\n",
      "\n",
      "save check point.\n",
      "train: step: 110, loss: 0.07642845064401627, acc: 1.0, recall: 0.4642857142857143, precision: 0.4642857142857143, f_beta: 0.4642857142857143\n",
      "train: step: 111, loss: 0.7537034153938293, acc: 0.875, recall: 0.35714285714285715, precision: 0.325, f_beta: 0.3363095238095238\n",
      "train: step: 112, loss: 0.43738460540771484, acc: 0.90625, recall: 0.5238095238095238, precision: 0.494047619047619, f_beta: 0.5023809523809523\n",
      "train: step: 113, loss: 0.39248114824295044, acc: 0.90625, recall: 0.5059523809523809, precision: 0.4880952380952381, f_beta: 0.49166666666666675\n",
      "train: step: 114, loss: 0.37989598512649536, acc: 0.90625, recall: 0.5, precision: 0.47619047619047616, f_beta: 0.4857142857142857\n",
      "train: step: 115, loss: 0.07591186463832855, acc: 1.0, recall: 0.6071428571428571, precision: 0.6071428571428571, f_beta: 0.6071428571428571\n",
      "train: step: 116, loss: 0.45337027311325073, acc: 0.90625, recall: 0.4107142857142857, precision: 0.42142857142857143, f_beta: 0.4126984126984127\n",
      "train: step: 117, loss: 0.261650025844574, acc: 0.9375, recall: 0.47023809523809523, precision: 0.5, f_beta: 0.48095238095238096\n",
      "train: step: 118, loss: 0.231988325715065, acc: 0.96875, recall: 0.5, precision: 0.48809523809523814, f_beta: 0.4928571428571429\n",
      "train: step: 119, loss: 0.3606150448322296, acc: 0.90625, recall: 0.5, precision: 0.47500000000000003, f_beta: 0.48412698412698413\n",
      "\n",
      "\n",
      "eval:  loss: 1.3119198083877563, acc: 0.7045454545454546, auc: 0, recall: 0.381976654298083, precision: 0.3757807668521954, f_beta: 0.3640796973589181\n",
      "\n",
      "\n",
      "save check point.\n",
      "train: step: 120, loss: 0.34489840269088745, acc: 0.9375, recall: 0.45238095238095244, precision: 0.44642857142857145, f_beta: 0.44523809523809516\n",
      "train: step: 121, loss: 0.42838889360427856, acc: 0.90625, recall: 0.4642857142857143, precision: 0.4732142857142857, f_beta: 0.4591836734693877\n",
      "train: step: 122, loss: 0.526075005531311, acc: 0.875, recall: 0.47619047619047616, precision: 0.4583333333333333, f_beta: 0.45595238095238094\n",
      "train: step: 123, loss: 0.27397996187210083, acc: 0.875, recall: 0.45833333333333337, precision: 0.46011904761904765, f_beta: 0.44331065759637184\n",
      "train: step: 124, loss: 0.35571131110191345, acc: 0.9375, recall: 0.38690476190476186, precision: 0.39285714285714285, f_beta: 0.38961038961038963\n",
      "train: step: 125, loss: 0.2640703320503235, acc: 0.96875, recall: 0.5357142857142857, precision: 0.5238095238095238, f_beta: 0.5285714285714286\n",
      "train: step: 126, loss: 0.5452972054481506, acc: 0.875, recall: 0.5196428571428572, precision: 0.49107142857142855, f_beta: 0.4977324263038549\n",
      "train: step: 127, loss: 0.3508843183517456, acc: 0.9375, recall: 0.5, precision: 0.5178571428571429, f_beta: 0.5\n",
      "train: step: 128, loss: 0.41138696670532227, acc: 0.90625, recall: 0.5535714285714286, precision: 0.5357142857142858, f_beta: 0.5372294372294372\n",
      "train: step: 129, loss: 0.281724214553833, acc: 0.90625, recall: 0.4095238095238095, precision: 0.4107142857142857, f_beta: 0.40555555555555556\n",
      "\n",
      "\n",
      "eval:  loss: 1.4492076960476963, acc: 0.6960227272727273, auc: 0, recall: 0.35424397031539884, precision: 0.3543444650587508, f_beta: 0.3389302436380358\n",
      "\n",
      "\n",
      "save check point.\n",
      "train: step: 130, loss: 0.43215274810791016, acc: 0.875, recall: 0.45238095238095244, precision: 0.47916666666666663, f_beta: 0.45680272108843534\n",
      "train: step: 131, loss: 0.3754275441169739, acc: 0.90625, recall: 0.43928571428571433, precision: 0.4375, f_beta: 0.43140589569161003\n",
      "train: step: 132, loss: 0.6692055463790894, acc: 0.84375, recall: 0.4642857142857143, precision: 0.44642857142857145, f_beta: 0.45238095238095244\n",
      "train: step: 133, loss: 0.38255077600479126, acc: 0.875, recall: 0.5625, precision: 0.5267857142857143, f_beta: 0.5374149659863946\n",
      "train: step: 134, loss: 0.131722554564476, acc: 0.96875, recall: 0.45535714285714285, precision: 0.4642857142857143, f_beta: 0.45918367346938777\n",
      "train: step: 135, loss: 0.3620125651359558, acc: 0.90625, recall: 0.42857142857142855, precision: 0.4375, f_beta: 0.419047619047619\n",
      "----- Epoch 5/10 -----\n"
     ],
     "name": "stdout"
    }
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "trxd9aLtv-Bn",
    "colab_type": "text"
   },
   "source": [
    "## 测试\n"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "id": "BfsM3FMvwBRE",
    "colab_type": "code",
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 86,
     "referenced_widgets": [
      "6a09b3ffafad4334a73db05a5cc09bc9",
      "dbb9ea7853fe40f3ab7f54b9cf5aff9c",
      "7a8d29ef7e4a45f2907b5765cdc6531b",
      "3eee9de1adf64db8b34d59534ffb2346",
      "98021f75eb4e40a3b1b72598f82bb19f",
      "6b564c5514814bf78874dfe6d683d607"
     ]
    },
    "outputId": "628a8a84-4f9f-42f2-b2d0-288983b00dd7"
   },
   "source": [
    "pip install ipywidgets\n",
    "jupyter nbextension enable --py widgetsnbextension\n",
    "# import tensorflow as tf\n",
    "# tf.get_variable_scope().reuse_variables()\n",
    "from ipywidgets import interact\n",
    "# predictor = Predictor(train_config)\n",
    "text = \"王一博被问：是否愿意为了肖战与全世界为敌？王一博的反应太真实\"\n",
    "res = predictor.predict(text)\n",
    "interact(lambda x: f'分类结果：{x}', x=res)"
   ],
   "execution_count": 31,
   "outputs": [
    {
     "output_type": "display_data",
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "6a09b3ffafad4334a73db05a5cc09bc9",
       "version_minor": 0,
       "version_major": 2
      },
      "text/plain": [
       "interactive(children=(Text(value=' 科技', description='x'), Output()), _dom_classes=('widget-interact',))"
      ]
     },
     "metadata": {
      "tags": []
     }
    },
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": [
       "<function __main__.<lambda>>"
      ]
     },
     "metadata": {
      "tags": []
     },
     "execution_count": 31
    }
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "id": "GOtcS_F68hXU",
    "colab_type": "code",
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 419
    },
    "outputId": "5eb0b811-b55b-4dce-efd7-26e8c5f2962e"
   },
   "source": [
    "# !pip install qgrid\n",
    "# !jupyter nbextension enable --py --sys-prefix qgrid\n",
    "# !jupyter nbextension enable --py --sys-prefix widgetsnbextension\n",
    "\n",
    "import pandas as pd\n",
    "# import qgrid\n",
    "def build_df():\n",
    "    data = []\n",
    "    with open(train_config['eval_data'], 'r', encoding='utf-8') as f:\n",
    "        for line in f.readlines():\n",
    "            line = line.split('<SEP>')\n",
    "            data.append({\n",
    "                'content': line[0].strip(),\n",
    "                'class': line[2].strip()\n",
    "            })\n",
    "    return pd.DataFrame(data)\n",
    "\n",
    "# qgrid_widgets = qgrid.show_grid(build_df(), show_toolbar=True)\n",
    "# qgrid_widgets\n",
    "build_df()\n"
   ],
   "execution_count": 37,
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>content</th>\n",
       "      <th>class</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>崇祯皇帝误杀了一个人，等于自断了一条手臂，不然不会输得这样惨</td>\n",
       "      <td>历史</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>用古文笔法，以《山海经》视角看日本</td>\n",
       "      <td>历史</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>古希腊雅典与斯巴达两强争霸之前，制度不同，国家气质也就不一了</td>\n",
       "      <td>历史</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>晚天萧索，断蓬踪迹</td>\n",
       "      <td>历史</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>重庆渔民江中捞出废铁，论斤卖了65元，专家鉴定后：价值至少3亿</td>\n",
       "      <td>文化</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>363</th>\n",
       "      <td>记者探访双井商圈：饭馆分散就餐，商场无接触购物</td>\n",
       "      <td>社会</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>364</th>\n",
       "      <td>入境瞒报频发？唐山出了个“狠招儿”，叫好无数</td>\n",
       "      <td>社会</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>365</th>\n",
       "      <td>新加坡戴口罩女子在商场晕倒，其他人镇定地吃着饭，无人上前查看</td>\n",
       "      <td>社会</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>366</th>\n",
       "      <td>黄金位置户外大屏全天滚动播放，致敬东莞最美逆行者活动还在持续</td>\n",
       "      <td>社会</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>367</th>\n",
       "      <td>武汉小区“阴阳菜价”供货商被叫停，业主联系爱心菜曾被社区拒绝</td>\n",
       "      <td>社会</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>368 rows × 2 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "                             content class\n",
       "0     崇祯皇帝误杀了一个人，等于自断了一条手臂，不然不会输得这样惨    历史\n",
       "1                  用古文笔法，以《山海经》视角看日本    历史\n",
       "2     古希腊雅典与斯巴达两强争霸之前，制度不同，国家气质也就不一了    历史\n",
       "3                          晚天萧索，断蓬踪迹    历史\n",
       "4    重庆渔民江中捞出废铁，论斤卖了65元，专家鉴定后：价值至少3亿    文化\n",
       "..                               ...   ...\n",
       "363          记者探访双井商圈：饭馆分散就餐，商场无接触购物    社会\n",
       "364           入境瞒报频发？唐山出了个“狠招儿”，叫好无数    社会\n",
       "365   新加坡戴口罩女子在商场晕倒，其他人镇定地吃着饭，无人上前查看    社会\n",
       "366   黄金位置户外大屏全天滚动播放，致敬东莞最美逆行者活动还在持续    社会\n",
       "367   武汉小区“阴阳菜价”供货商被叫停，业主联系爱心菜曾被社区拒绝    社会\n",
       "\n",
       "[368 rows x 2 columns]"
      ]
     },
     "metadata": {
      "tags": []
     },
     "execution_count": 37
    }
   ]
  }
 ]
}