{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 第 13 章 中文实体识别实战"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 13.1 人民日报实体识别"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 13.1.1 数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "import random\n",
    "import operator\n",
    "import collections\n",
    "\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "\n",
    "from typing import List, Dict, Tuple\n",
    "\n",
    "import matplotlib.pyplot as plt\n",
    "\n",
    "import tensorflow as tf\n",
    "from tensorflow.keras.preprocessing.sequence import pad_sequences\n",
    "from tensorflow.keras.utils import to_categorical\n",
    "\n",
    "%matplotlib inline\n",
    "\n",
    "def read_net_data(file_path: str) -> Tuple[List[List[str]], List[List[str]]]:\n",
    "    x_data, y_data = [], []\n",
    "    with open(file_path, 'r', encoding='utf-8') as f:\n",
    "        lines = f.read().splitlines()\n",
    "        x, y = [], []\n",
    "        for line in lines:\n",
    "            rows = line.split(' ')\n",
    "            if len(rows) == 1:\n",
    "                x_data.append(x)\n",
    "                y_data.append(y)\n",
    "                x = []\n",
    "                y = []\n",
    "            else:\n",
    "                x.append(rows[0])\n",
    "                y.append(rows[1])\n",
    "    return x_data, y_data\n",
    "\n",
    "\n",
    "train_x, train_y = read_net_data('data/peoples-daily-ner/example.train')\n",
    "valid_x, valid_y = read_net_data('data/peoples-daily-ner/example.dev')\n",
    "test_x, test_y = read_net_data('data/peoples-daily-ner/example.test')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Processor(object):\n",
    "\n",
    "    def build_token_dict(self, corpus: List[List[str]]):\n",
    "        \"\"\"\n",
    "        构建 token 字典，这个方法将会遍历分词后的语料，构建一个标记频率字典和标记与索引的映射字典\n",
    "\n",
    "        Args:\n",
    "            corpus: 所有分词后的语料\n",
    "        \"\"\"\n",
    "        token2idx = {\n",
    "            '<PAD>': 0,\n",
    "            '<UNK>': 1,\n",
    "            '<BOS>': 2,\n",
    "            '<EOS>': 3\n",
    "        }\n",
    "\n",
    "        token2count = {}\n",
    "        for sentence in corpus:\n",
    "            for token in sentence:\n",
    "                count = token2count.get(token, 0)\n",
    "                token2count[token] = count + 1\n",
    "        # 按照词频降序排序\n",
    "        sorted_token2count = sorted(token2count.items(),\n",
    "                                    key=operator.itemgetter(1),\n",
    "                                    reverse=True)\n",
    "        token2count = collections.OrderedDict(sorted_token2count)\n",
    "\n",
    "        for token in token2count.keys():\n",
    "            if token not in token2idx:\n",
    "                token2idx[token] = len(token2idx)\n",
    "        return token2idx, token2count\n",
    "\n",
    "    @staticmethod\n",
    "    def numerize_sequences(sequence: List[str],\n",
    "                           token2index: Dict[str, int]) -> List[int]:\n",
    "        \"\"\"\n",
    "        将分词后的标记（token）数组转换成对应的索引数组\n",
    "        如 ['我', '想', '睡觉'] -> [10, 313, 233]\n",
    "\n",
    "        Args:\n",
    "            sequence: 分词后的标记数组\n",
    "            token2index: 索引词典\n",
    "        Returns: 输入数据对应的索引数组\n",
    "        \"\"\"\n",
    "        token_result = []\n",
    "        for token in sequence:\n",
    "            token_index = token2index.get(token)\n",
    "            if token_index is None:\n",
    "                token_index = token2index['<UNK>']\n",
    "            token_result.append(token_index)\n",
    "        return token_result"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "p = Processor()\n",
    "\n",
    "# 使用全部语料构建输入词表\n",
    "p.token2idx, p.token2count = p.build_token_dict(train_x + valid_x + test_x)\n",
    "\n",
    "# 构建标签词表\n",
    "label2idx = {\n",
    "    '<PAD>': 0\n",
    "}\n",
    "\n",
    "all_label_data = train_y + valid_y + test_y\n",
    "for sequence in all_label_data:\n",
    "    for label in sequence:\n",
    "        if label not in label2idx:\n",
    "            label2idx[label] = len(label2idx)\n",
    "\n",
    "p.label2idx = label2idx\n",
    "p.idx2label = dict([(v, k) for k, v in p.label2idx.items()])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYsAAAD4CAYAAAAdIcpQAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy86wFpkAAAACXBIWXMAAAsTAAALEwEAmpwYAAAV2ElEQVR4nO3df9CdZX3n8fdnk4I/Wk2AZ1k2CZu4pjrRqUpTjGO3o7ALAR3DH9SBcZesm2lmtrFrd51RaGeWWZUZ2O2UyqzSzUoqdBwipbZkFKUp0nV2pvwIgkBAyiOgSQZMNAF36xQb/e4f54oe0+fhSp5zwvNj36+ZM899f6/rPvd1kUM+uX+c505VIUnSi/lHsz0ASdLcZ1hIkroMC0lSl2EhSeoyLCRJXYtnewAzddppp9XKlStnexiSNK/cf//9362qiePdbt6GxcqVK9m1a9dsD0OS5pUk35rJdp6GkiR1GRaSpC7DQpLUZVhIkroMC0lSl2EhSeoyLCRJXYaFJKnLsJAkdc3bb3CPYuXlX5yV/T599btmZb+SNCqPLCRJXYaFJKmrGxZJtiXZn+SRo+q/leQbSXYn+a9D9SuSTCZ5PMn5Q/X1rTaZ5PKh+qok97T655KcNK7JSZLG41iOLD4DrB8uJHknsAF4U1W9Afi9Vl8DXAK8oW3zqSSLkiwCPglcAKwBLm19Aa4Brq2q1wKHgE2jTkqSNF7dsKiqrwIHjyr/e+Dqqnqh9dnf6huA7VX1QlU9BUwCZ7fXZFU9WVU/BLYDG5IEOAe4tW1/I3DRaFOSJI3bTK9Z/CLwL9rpo/+V5FdafRmwZ6jf3labrn4q8FxVHT6qLkmaQ2Z66+xi4BRgHfArwC1JXjO2UU0jyWZgM8CZZ555oncnSWpmemSxF/h8DdwL/Bg4DdgHrBjqt7zVpqt/D1iSZPFR9SlV1daqWltVaycmjvupgJKkGZppWPw58E6AJL8InAR8F9gBXJLk5CSrgNXAvcB9wOp259NJDC6C76iqAu4CLm7vuxG4bYZjkiSdIN3TUEluBt4BnJZkL3AlsA3Y1m6n/SGwsf3FvzvJLcCjwGFgS1X9qL3PB4A7gEXAtqra3XbxEWB7ko8DDwA3jHF+kqQx6IZFVV06TdO/nqb/VcBVU9RvB26fov4kg7ulJElzlN/gliR1GRaSpC7DQpLUZVhIkroMC0lSl2EhSeoyLCRJXYaFJKnLsJAkdRkWkqQuw0KS1GVYSJK6DAtJUpdhIUnqMiwkSV2GhSSpqxsWSbYl2d+eind024eSVJLT2nqSXJdkMslDSc4a6rsxyRPttXGo/stJHm7bXJck45qcJGk8juXI4jPA+qOLSVYA5wHfHipfwOC526uBzcD1re8pDB7H+lYGT8W7MsnSts31wG8MbfcP9iVJml3dsKiqrwIHp2i6FvgwUEO1DcBNNXA3sCTJGcD5wM6qOlhVh4CdwPrW9qqqurs9w/sm4KKRZiRJGrsZXbNIsgHYV1VfP6ppGbBnaH1vq71Yfe8UdUnSHLL4eDdI8grgdxicgnpJJdnM4PQWZ5555ku9e0n6/9ZMjiz+ObAK+HqSp4HlwNeS/BNgH7BiqO/yVnux+vIp6lOqqq1Vtbaq1k5MTMxg6JKkmTjusKiqh6vqH1fVyqpayeDU0VlV9SywA7is3RW1Dni+qp4B7gDOS7K0Xdg+D7ijtX0/ybp2F9RlwG1jmpskaUyO5dbZm4G/Bl6XZG+STS/S/XbgSWAS+J/AbwJU1UHgY8B97fXRVqP1+XTb5pvAl2Y2FUnSidK9ZlFVl3baVw4tF7Blmn7bgG1T1HcBb+yNQ5I0e/wGtySpy7CQJHUZFpKkLsNCktRlWEiSugwLSVKXYSFJ6jIsJEldhoUkqcuwkCR1GRaSpC7DQpLUZVhIkroMC0lSl2EhSeoyLCRJXcfypLxtSfYneWSo9t+SfCPJQ0n+LMmSobYrkkwmeTzJ+UP19a02meTyofqqJPe0+ueSnDTG+UmSxuBYjiw+A6w/qrYTeGNV/RLwN8AVAEnWAJcAb2jbfCrJoiSLgE8CFwBrgEtbX4BrgGur6rXAIeDFHtsqSZoF3bCoqq8CB4+q/UVVHW6rdwPL2/IGYHtVvVBVTzF4rvbZ7TVZVU9W1Q+B7cCGJAHOAW5t298IXDTalCRJ4zaOaxb/DvhSW14G7Blq29tq09VPBZ4bCp4jdUnSHDJSWCT5XeAw8NnxDKe7v81JdiXZdeDAgZdil5IkRgiLJP8WeDfwvqqqVt4HrBjqtrzVpqt/D1iSZPFR9SlV1daqWltVaycmJmY6dEnScZpRWCRZD3wYeE9V/WCoaQdwSZKTk6wCVgP3AvcBq9udTycxuAi+o4XMXcDFbfuNwG0zm4ok6UQ5lltnbwb+Gnhdkr1JNgH/HfgFYGeSB5P8IUBV7QZuAR4FvgxsqaoftWsSHwDuAB4Dbml9AT4C/KckkwyuYdww1hlKkka2uNehqi6dojztX+hVdRVw1RT124Hbp6g/yeBuKUnSHOU3uCVJXYaFJKnLsJAkdRkWkqQuw0KS1GVYSJK6DAtJUpdhIUnqMiwkSV2GhSSpy7CQJHUZFpKkLsNCktRlWEiSugwLSVKXYSFJ6jqWJ+VtS7I/ySNDtVOS7EzyRPu5tNWT5Lokk0keSnLW0DYbW/8nkmwcqv9ykofbNtclybgnKUkazbEcWXwGWH9U7XLgzqpaDdzZ1gEuYPDc7dXAZuB6GIQLcCXwVgZPxbvySMC0Pr8xtN3R+5IkzbJuWFTVV4GDR5U3ADe25RuBi4bqN9XA3cCSJGcA5wM7q+pgVR0CdgLrW9urquruqirgpqH3kiTNETO9ZnF6VT3Tlp8FTm/Ly4A9Q/32ttqL1fdOUZckzSEjX+BuRwQ1hrF0JdmcZFeSXQcOHHgpdilJYuZh8Z12Con2c3+r7wNWDPVb3movVl8+RX1KVbW1qtZW1dqJiYkZDl2SdLxmGhY7gCN3NG0EbhuqX9builoHPN9OV90BnJdkabuwfR5wR2v7fpJ17S6oy4beS5I0RyzudUhyM/AO4LQkexnc1XQ1cEuSTcC3gPe27rcDFwKTwA+A9wNU1cEkHwPua/0+WlVHLpr/JoM7rl4OfKm9JElzSDcsqurSaZrOnaJvAVumeZ9twLYp6ruAN/bGIUmaPX6DW5LUZVhIkroMC0lSl2EhSeoyLCRJXYaFJKnLsJAkdRkWkqQuw0KS1GVYSJK6DAtJUpdhIUnqMiwkSV2GhSSpy7CQJHUZFpKkLsNCktQ1Ulgk+Y9Jdid5JMnNSV6WZFWSe5JMJvlckpNa35Pb+mRrXzn0Ple0+uNJzh9xTpKkMZtxWCRZBvwHYG1VvRFYBFwCXANcW1WvBQ4Bm9omm4BDrX5t60eSNW27NwDrgU8lWTTTcUmSxm/U01CLgZcnWQy8AngGOAe4tbXfCFzUlje0dVr7uUnS6tur6oWqegqYBM4ecVySpDGacVhU1T7g94BvMwiJ54H7geeq6nDrthdY1paXAXvatodb/1OH61Ns8zOSbE6yK8muAwcOzHTokqTjNMppqKUMjgpWAf8UeCWD00gnTFVtraq1VbV2YmLiRO5KkjRklNNQ/xJ4qqoOVNXfA58H3g4saaelAJYD+9ryPmAFQGt/NfC94foU20iS5oBRwuLbwLokr2jXHs4FHgXuAi5ufTYCt7XlHW2d1v6VqqpWv6TdLbUKWA3cO8K4JEljtrjfZWpVdU+SW4GvAYeBB4CtwBeB7Uk+3mo3tE1uAP44ySRwkMEdUFTV7iS3MAiaw8CWqvrRTMclSRq/GYcFQFVdCVx5VPlJpribqar+Dvj1ad7nKuCqUcYiSTpx/Aa3JKnLsJAkdRkWkqQuw0KS1GVYSJK6DAtJUpdhIUnqMiwkSV2GhSSpy7CQJHUZFpKkLsNCktRlWEiSugwLSVKXYSFJ6jIsJEldI4VFkiVJbk3yjSSPJXlbklOS7EzyRPu5tPVNkuuSTCZ5KMlZQ++zsfV/IsnG6fcoSZoNox5ZfAL4clW9HngT8BhwOXBnVa0G7mzrABcweL72amAzcD1AklMYPG3vrQyesHflkYCRJM0NMw6LJK8Gfo32jO2q+mFVPQdsAG5s3W4ELmrLG4CbauBuYEmSM4DzgZ1VdbCqDgE7gfUzHZckafxGObJYBRwA/ijJA0k+neSVwOlV9Uzr8yxwelteBuwZ2n5vq01X/weSbE6yK8muAwcOjDB0SdLxGCUsFgNnAddX1VuAv+Wnp5wAqKoCaoR9/Iyq2lpVa6tq7cTExLjeVpLUMUpY7AX2VtU9bf1WBuHxnXZ6ifZzf2vfB6wY2n55q01XlyTNETMOi6p6FtiT5HWtdC7wKLADOHJH00bgtra8A7is3RW1Dni+na66AzgvydJ2Yfu8VpMkzRGLR9z+t4DPJjkJeBJ4P4MAuiXJJuBbwHtb39uBC4FJ4AetL1V1MMnHgPtav49W1cERxyVJGqORwqKqHgTWTtF07hR9C9gyzftsA7aNMhZJ0onjN7glSV2GhSSpy7CQJHUZFpKkLsNCktRlWEiSugwLSVKXYSFJ6jIsJEldhoUkqcuwkCR1GRaSpC7DQpLUZVhIkroMC0lSl2EhSeoaOSySLEryQJIvtPVVSe5JMpnkc+0peiQ5ua1PtvaVQ+9xRas/nuT8UcckSRqvcRxZfBB4bGj9GuDaqnotcAjY1OqbgEOtfm3rR5I1wCXAG4D1wKeSLBrDuCRJYzJSWCRZDrwL+HRbD3AOcGvrciNwUVve0NZp7ee2/huA7VX1QlU9xeAZ3WePMi5J0niNemTxB8CHgR+39VOB56rqcFvfCyxry8uAPQCt/fnW/yf1Kbb5GUk2J9mVZNeBAwdGHLok6VgtnumGSd4N7K+q+5O8Y2wjehFVtRXYCrB27dp6KfY5Tisv/+Ks7fvpq981a/uWNP/NOCyAtwPvSXIh8DLgVcAngCVJFrejh+XAvtZ/H7AC2JtkMfBq4HtD9SOGt5EkzQEzPg1VVVdU1fKqWsngAvVXqup9wF3Axa3bRuC2tryjrdPav1JV1eqXtLulVgGrgXtnOi5J0viNcmQxnY8A25N8HHgAuKHVbwD+OMkkcJBBwFBVu5PcAjwKHAa2VNWPTsC4JEkzNJawqKq/Av6qLT/JFHczVdXfAb8+zfZXAVeNYyySpPHzG9ySpC7DQpLUZVhIkroMC0lSl2EhSeoyLCRJXYaFJKnLsJAkdRkWkqQuw0KS1GVYSJK6DAtJUpdhIUnqMiwkSV2GhSSpy7CQJHXNOCySrEhyV5JHk+xO8sFWPyXJziRPtJ9LWz1JrksymeShJGcNvdfG1v+JJBun26ckaXaMcmRxGPhQVa0B1gFbkqwBLgfurKrVwJ1tHeACBs/XXg1sBq6HQbgAVwJvZfCEvSuPBIwkaW6YcVhU1TNV9bW2/H+Ax4BlwAbgxtbtRuCitrwBuKkG7gaWJDkDOB/YWVUHq+oQsBNYP9NxSZLGbyzXLJKsBN4C3AOcXlXPtKZngdPb8jJgz9Bme1ttuvpU+9mcZFeSXQcOHBjH0CVJx2DksEjy88CfAr9dVd8fbquqAmrUfQy939aqWltVaycmJsb1tpKkjpHCIsnPMQiKz1bV51v5O+30Eu3n/lbfB6wY2nx5q01XlyTNEaPcDRXgBuCxqvr9oaYdwJE7mjYCtw3VL2t3Ra0Dnm+nq+4AzkuytF3YPq/VJElzxOIRtn078G+Ah5M82Gq/A1wN3JJkE/At4L2t7XbgQmAS+AHwfoCqOpjkY8B9rd9Hq+rgCOOSJI3ZjMOiqv43kGmaz52ifwFbpnmvbcC2mY5FknRi+Q1uSVKXYSFJ6jIsJEldhoUkqcuwkCR1GRaSpC7DQpLUZVhIkroMC0lSl2EhSeoyLCRJXYaFJKnLsJAkdRkWkqSuUZ5noXlk5eVfnJX9Pn31u2Zlv5LGyyMLSVLXnAmLJOuTPJ5kMsnlsz0eSdJPzYmwSLII+CRwAbAGuDTJmtkdlSTpiLlyzeJsYLKqngRIsh3YADw6q6PSyGbrWgl4vUQap7kSFsuAPUPre4G3Ht0pyWZgc1v9v0keP459nAZ8d8YjnJsW4pxgTPPKNWMYyfj4ZzW/LMR5HZnTP5vJxnMlLI5JVW0Fts5k2yS7qmrtmIc0qxbinGBhzmshzgmc13wy6pzmxDULYB+wYmh9eatJkuaAuRIW9wGrk6xKchJwCbBjlsckSWrmxGmoqjqc5APAHcAiYFtV7R7zbmZ0+mqOW4hzgoU5r4U4J3Be88lIc0pVjWsgkqQFaq6chpIkzWGGhSSpa8GHxXz+NSJJtiXZn+SRodopSXYmeaL9XNrqSXJdm+dDSc6avZFPL8mKJHcleTTJ7iQfbPX5Pq+XJbk3ydfbvP5Lq69Kck8b/+faDRwkObmtT7b2lbM6gReRZFGSB5J8oa0vhDk9neThJA8m2dVq8/ozCJBkSZJbk3wjyWNJ3jaueS3osFgAv0bkM8D6o2qXA3dW1WrgzrYOgzmubq/NwPUv0RiP12HgQ1W1BlgHbGl/JvN9Xi8A51TVm4A3A+uTrAOuAa6tqtcCh4BNrf8m4FCrX9v6zVUfBB4bWl8IcwJ4Z1W9eei7B/P9MwjwCeDLVfV64E0M/tzGM6+qWrAv4G3AHUPrVwBXzPa4jnMOK4FHhtYfB85oy2cAj7fl/wFcOlW/ufwCbgP+1UKaF/AK4GsMfgvBd4HFrf6TzyODO//e1pYXt36Z7bFPMZfl7S+Yc4AvAJnvc2rjexo47ajavP4MAq8Gnjr6v/m45rWgjyyY+teILJulsYzL6VX1TFt+Fji9Lc+7ubbTFG8B7mEBzKudrnkQ2A/sBL4JPFdVh1uX4bH/ZF6t/Xng1Jd0wMfmD4APAz9u66cy/+cEUMBfJLm//RohmP+fwVXAAeCP2mnDTyd5JWOa10IPiwWtBv8cmJf3Pif5eeBPgd+uqu8Pt83XeVXVj6rqzQz+NX428PrZHdFokrwb2F9V98/2WE6AX62qsxicitmS5NeGG+fpZ3AxcBZwfVW9BfhbfnrKCRhtXgs9LBbirxH5TpIzANrP/a0+b+aa5OcYBMVnq+rzrTzv53VEVT0H3MXgFM2SJEe+/Do89p/Mq7W/GvjeSzvSrrcD70nyNLCdwamoTzC/5wRAVe1rP/cDf8Yg3Of7Z3AvsLeq7mnrtzIIj7HMa6GHxUL8NSI7gI1teSODc/5H6pe1OxzWAc8PHXrOGUkC3AA8VlW/P9Q03+c1kWRJW345g+swjzEIjYtbt6PndWS+FwNfaf/qmzOq6oqqWl5VKxn8v/OVqnof83hOAElemeQXjiwD5wGPMM8/g1X1LLAnyeta6VwGj3kYz7xm+6LMS3DR50LgbxicP/7d2R7PcY79ZuAZ4O8Z/KthE4NzwHcCTwB/CZzS+obBnV/fBB4G1s72+KeZ068yOAx+CHiwvS5cAPP6JeCBNq9HgP/c6q8B7gUmgT8BTm71l7X1ydb+mtmeQ2d+7wC+sBDm1Mb/9fbafeTvhfn+GWxjfTOwq30O/xxYOq55+es+JEldC/00lCRpDAwLSVKXYSFJ6jIsJEldhoUkqcuwkCR1GRaSpK7/B/NLQMEv7N8eAAAAAElFTkSuQmCC\n",
      "text/plain": [
       "<Figure size 432x288 with 1 Axes>"
      ]
     },
     "metadata": {
      "needs_background": "light"
     },
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "max length: 574\n"
     ]
    }
   ],
   "source": [
    "seq_len_list = [len(seq) for seq in train_x]\n",
    "plt.figure()\n",
    "plt.hist(seq_len_list)\n",
    "plt.show()\n",
    "print(f\"max length: {max(seq_len_list)}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 序列长度\n",
    "SEQ_LEN = 100\n",
    "\n",
    "def process_dataset(x_set, y_set):\n",
    "    x_set_num = [p.numerize_sequences(seq, p.token2idx) for seq in x_set]\n",
    "    y_set_num = [p.numerize_sequences(lab, p.label2idx) for lab in y_set]\n",
    "\n",
    "    # 补全序列长度\n",
    "    x_set_pad = pad_sequences(x_set_num, SEQ_LEN, padding='post', truncating='post')\n",
    "    y_set_pad = pad_sequences(y_set_num, SEQ_LEN, padding='post', truncating='post')\n",
    "\n",
    "    # 标签序列转换为 one-hot 表示\n",
    "    y_set_one = to_categorical(y_set_pad, len(p.label2idx))\n",
    "\n",
    "    return x_set_pad, y_set_one\n",
    "\n",
    "\n",
    "train_x_tensor, train_y_tensor = process_dataset(train_x, train_y)\n",
    "valid_x_tensor, valid_y_tensor = process_dataset(valid_x, valid_y)\n",
    "test_x_tensor, test_y_tensor = process_dataset(test_x, test_y)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 13.1.2 训练模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model: \"model\"\n",
      "_________________________________________________________________\n",
      "Layer (type)                 Output Shape              Param #   \n",
      "=================================================================\n",
      "input_layer (InputLayer)     [(None, 100)]             0         \n",
      "_________________________________________________________________\n",
      "embedding_layer (Embedding)  (None, 100, 64)           286016    \n",
      "_________________________________________________________________\n",
      "bidirectional (Bidirectional (None, 100, 128)          66048     \n",
      "_________________________________________________________________\n",
      "dense (Dense)                (None, 100, 8)            1032      \n",
      "=================================================================\n",
      "Total params: 353,096\n",
      "Trainable params: 353,096\n",
      "Non-trainable params: 0\n",
      "_________________________________________________________________\n"
     ]
    }
   ],
   "source": [
    "TOKEN_COUNT = len(p.token2idx)\n",
    "LABEL_COUNT = len(p.label2idx)\n",
    "EMBEDDING_DIM = 64\n",
    "HIDDEN_DIM = 64\n",
    "\n",
    "L = tf.keras.layers\n",
    "\n",
    "inputs = L.Input((SEQ_LEN, ), name='input_layer')\n",
    "embedding_layer = L.Embedding(input_dim=TOKEN_COUNT,\n",
    "                              output_dim=EMBEDDING_DIM,\n",
    "                              name='embedding_layer')\n",
    "bi_lstm_layer = L.Bidirectional(L.LSTM(HIDDEN_DIM,\n",
    "                                       return_sequences=True))\n",
    "dense_layer = L.Dense(LABEL_COUNT, activation=tf.nn.softmax)\n",
    "\n",
    "tensor = embedding_layer(inputs)\n",
    "tensor = bi_lstm_layer(tensor)\n",
    "outputs = dense_layer(tensor)\n",
    "\n",
    "model = tf.keras.Model(inputs, outputs)\n",
    "model.compile(optimizer='adam',\n",
    "              loss='categorical_crossentropy',\n",
    "              metrics=['accuracy'])\n",
    "model.summary()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/10\n",
      "82/82 [==============================] - 32s 388ms/step - loss: 0.6112 - accuracy: 0.8441 - val_loss: 0.2482 - val_accuracy: 0.9462\n",
      "Epoch 2/10\n",
      "82/82 [==============================] - 30s 364ms/step - loss: 0.2195 - accuracy: 0.9478 - val_loss: 0.1900 - val_accuracy: 0.9488\n",
      "Epoch 3/10\n",
      "82/82 [==============================] - 31s 378ms/step - loss: 0.1688 - accuracy: 0.9508 - val_loss: 0.1517 - val_accuracy: 0.9530\n",
      "Epoch 4/10\n",
      "82/82 [==============================] - 31s 378ms/step - loss: 0.1420 - accuracy: 0.9551 - val_loss: 0.1310 - val_accuracy: 0.9583\n",
      "Epoch 5/10\n",
      "82/82 [==============================] - 31s 376ms/step - loss: 0.1219 - accuracy: 0.9611 - val_loss: 0.1105 - val_accuracy: 0.9641\n",
      "Epoch 6/10\n",
      "82/82 [==============================] - 29s 350ms/step - loss: 0.1031 - accuracy: 0.9667 - val_loss: 0.0936 - val_accuracy: 0.9698\n",
      "Epoch 7/10\n",
      "82/82 [==============================] - 27s 325ms/step - loss: 0.0877 - accuracy: 0.9721 - val_loss: 0.0793 - val_accuracy: 0.9748\n",
      "Epoch 8/10\n",
      "82/82 [==============================] - 27s 326ms/step - loss: 0.0758 - accuracy: 0.9759 - val_loss: 0.0697 - val_accuracy: 0.9776\n",
      "Epoch 9/10\n",
      "82/82 [==============================] - 27s 326ms/step - loss: 0.0675 - accuracy: 0.9783 - val_loss: 0.0624 - val_accuracy: 0.9798\n",
      "Epoch 10/10\n",
      "82/82 [==============================] - 26s 322ms/step - loss: 0.0610 - accuracy: 0.9802 - val_loss: 0.0569 - val_accuracy: 0.9815\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "<tensorflow.python.keras.callbacks.History at 0x1456cc630>"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.fit(train_x_tensor,\n",
    "          train_y_tensor,\n",
    "          validation_data=(train_x_tensor, train_y_tensor),\n",
    "          epochs=10,\n",
    "          batch_size=256)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 13.1.3 评估序列标注"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "lost: 0.06724640882611789, accuracy: 0.9784641861915588\n"
     ]
    }
   ],
   "source": [
    "loss, acc = model.evaluate(test_x_tensor, test_y_tensor, batch_size=512, verbose=0)\n",
    "print(f\"lost: {loss}, accuracy: {acc}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "from seqeval.metrics import classification_report\n",
    "\n",
    "def predict(sequences: List[List[str]]):\n",
    "    \"\"\"\n",
    "    预测序列标注结果\n",
    "    Args:\n",
    "        sequences: 文本序列数组\n",
    "    Returns:\n",
    "        文本序列的标注数组\n",
    "    \"\"\"\n",
    "    len_list = [len(seq) for seq in sequences]\n",
    "    x_set_num = [p.numerize_sequences(seq, p.token2idx) for seq in sequences]\n",
    "\n",
    "    # 补全序列长度\n",
    "    x_set_pad = pad_sequences(x_set_num, SEQ_LEN, padding='post', truncating='post')\n",
    "\n",
    "    predicts = model.predict(x_set_pad)\n",
    "    predict_labels = predicts.argmax(-1)\n",
    "\n",
    "    result = []\n",
    "    for index, label_idx in enumerate(predict_labels):\n",
    "        label_idx = label_idx[:len_list[index]]\n",
    "        labels = [p.idx2label[idx] for idx in label_idx]\n",
    "        result.append(labels)\n",
    "    return result\n",
    "\n",
    "# 由于模型只能处理一定长度的序列，需要对原始标签也进行截断\n",
    "y_true = [y[:SEQ_LEN] for y in test_y]\n",
    "y_pred = predict(test_x)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "           precision    recall  f1-score   support\n",
      "\n",
      "      PER       0.51      0.61      0.55      1798\n",
      "      LOC       0.52      0.52      0.52      3431\n",
      "      ORG       0.31      0.43      0.36      2148\n",
      "\n",
      "micro avg       0.45      0.51      0.48      7377\n",
      "macro avg       0.46      0.51      0.48      7377\n",
      "\n"
     ]
    }
   ],
   "source": [
    "print(classification_report(y_true, y_pred))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 13.2 BERT 迁移学习实体识别"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 13.2.1 tf.keras 加载 BERT 模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model: \"model_2\"\n",
      "________________________________________________________________________________________________________________________\n",
      "Layer (type)                           Output Shape               Param #       Connected to                            \n",
      "========================================================================================================================\n",
      "Input-Token (InputLayer)               [(None, 100)]              0                                                     \n",
      "________________________________________________________________________________________________________________________\n",
      "Input-Segment (InputLayer)             [(None, 100)]              0                                                     \n",
      "________________________________________________________________________________________________________________________\n",
      "Embedding-Token (TokenEmbedding)       [(None, 100, 768), (21128, 16226304      Input-Token[0][0]                       \n",
      "________________________________________________________________________________________________________________________\n",
      "Embedding-Segment (Embedding)          (None, 100, 768)           1536          Input-Segment[0][0]                     \n",
      "________________________________________________________________________________________________________________________\n",
      "Embedding-Token-Segment (Add)          (None, 100, 768)           0             Embedding-Token[0][0]                   \n",
      "                                                                                Embedding-Segment[0][0]                 \n",
      "________________________________________________________________________________________________________________________\n",
      "Embedding-Position (PositionEmbedding) (None, 100, 768)           76800         Embedding-Token-Segment[0][0]           \n",
      "________________________________________________________________________________________________________________________\n",
      "Embedding-Dropout (Dropout)            (None, 100, 768)           0             Embedding-Position[0][0]                \n",
      "________________________________________________________________________________________________________________________\n",
      "Embedding-Norm (LayerNormalization)    (None, 100, 768)           1536          Embedding-Dropout[0][0]                 \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-1-MultiHeadSelfAttention (Mult (None, 100, 768)           2362368       Embedding-Norm[0][0]                    \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-1-MultiHeadSelfAttention-Dropo (None, 100, 768)           0             Encoder-1-MultiHeadSelfAttention[0][0]  \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-1-MultiHeadSelfAttention-Add ( (None, 100, 768)           0             Embedding-Norm[0][0]                    \n",
      "                                                                                Encoder-1-MultiHeadSelfAttention-Dropout\n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-1-MultiHeadSelfAttention-Norm  (None, 100, 768)           1536          Encoder-1-MultiHeadSelfAttention-Add[0][\n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-1-FeedForward (FeedForward)    (None, 100, 768)           4722432       Encoder-1-MultiHeadSelfAttention-Norm[0]\n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-1-FeedForward-Dropout (Dropout (None, 100, 768)           0             Encoder-1-FeedForward[0][0]             \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-1-FeedForward-Add (Add)        (None, 100, 768)           0             Encoder-1-MultiHeadSelfAttention-Norm[0]\n",
      "                                                                                Encoder-1-FeedForward-Dropout[0][0]     \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-1-FeedForward-Norm (LayerNorma (None, 100, 768)           1536          Encoder-1-FeedForward-Add[0][0]         \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-2-MultiHeadSelfAttention (Mult (None, 100, 768)           2362368       Encoder-1-FeedForward-Norm[0][0]        \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-2-MultiHeadSelfAttention-Dropo (None, 100, 768)           0             Encoder-2-MultiHeadSelfAttention[0][0]  \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-2-MultiHeadSelfAttention-Add ( (None, 100, 768)           0             Encoder-1-FeedForward-Norm[0][0]        \n",
      "                                                                                Encoder-2-MultiHeadSelfAttention-Dropout\n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-2-MultiHeadSelfAttention-Norm  (None, 100, 768)           1536          Encoder-2-MultiHeadSelfAttention-Add[0][\n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-2-FeedForward (FeedForward)    (None, 100, 768)           4722432       Encoder-2-MultiHeadSelfAttention-Norm[0]\n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-2-FeedForward-Dropout (Dropout (None, 100, 768)           0             Encoder-2-FeedForward[0][0]             \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-2-FeedForward-Add (Add)        (None, 100, 768)           0             Encoder-2-MultiHeadSelfAttention-Norm[0]\n",
      "                                                                                Encoder-2-FeedForward-Dropout[0][0]     \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-2-FeedForward-Norm (LayerNorma (None, 100, 768)           1536          Encoder-2-FeedForward-Add[0][0]         \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-3-MultiHeadSelfAttention (Mult (None, 100, 768)           2362368       Encoder-2-FeedForward-Norm[0][0]        \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-3-MultiHeadSelfAttention-Dropo (None, 100, 768)           0             Encoder-3-MultiHeadSelfAttention[0][0]  \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-3-MultiHeadSelfAttention-Add ( (None, 100, 768)           0             Encoder-2-FeedForward-Norm[0][0]        \n",
      "                                                                                Encoder-3-MultiHeadSelfAttention-Dropout\n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-3-MultiHeadSelfAttention-Norm  (None, 100, 768)           1536          Encoder-3-MultiHeadSelfAttention-Add[0][\n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-3-FeedForward (FeedForward)    (None, 100, 768)           4722432       Encoder-3-MultiHeadSelfAttention-Norm[0]\n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-3-FeedForward-Dropout (Dropout (None, 100, 768)           0             Encoder-3-FeedForward[0][0]             \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-3-FeedForward-Add (Add)        (None, 100, 768)           0             Encoder-3-MultiHeadSelfAttention-Norm[0]\n",
      "                                                                                Encoder-3-FeedForward-Dropout[0][0]     \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-3-FeedForward-Norm (LayerNorma (None, 100, 768)           1536          Encoder-3-FeedForward-Add[0][0]         \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-4-MultiHeadSelfAttention (Mult (None, 100, 768)           2362368       Encoder-3-FeedForward-Norm[0][0]        \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-4-MultiHeadSelfAttention-Dropo (None, 100, 768)           0             Encoder-4-MultiHeadSelfAttention[0][0]  \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-4-MultiHeadSelfAttention-Add ( (None, 100, 768)           0             Encoder-3-FeedForward-Norm[0][0]        \n",
      "                                                                                Encoder-4-MultiHeadSelfAttention-Dropout\n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-4-MultiHeadSelfAttention-Norm  (None, 100, 768)           1536          Encoder-4-MultiHeadSelfAttention-Add[0][\n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-4-FeedForward (FeedForward)    (None, 100, 768)           4722432       Encoder-4-MultiHeadSelfAttention-Norm[0]\n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-4-FeedForward-Dropout (Dropout (None, 100, 768)           0             Encoder-4-FeedForward[0][0]             \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-4-FeedForward-Add (Add)        (None, 100, 768)           0             Encoder-4-MultiHeadSelfAttention-Norm[0]\n",
      "                                                                                Encoder-4-FeedForward-Dropout[0][0]     \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-4-FeedForward-Norm (LayerNorma (None, 100, 768)           1536          Encoder-4-FeedForward-Add[0][0]         \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-5-MultiHeadSelfAttention (Mult (None, 100, 768)           2362368       Encoder-4-FeedForward-Norm[0][0]        \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-5-MultiHeadSelfAttention-Dropo (None, 100, 768)           0             Encoder-5-MultiHeadSelfAttention[0][0]  \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-5-MultiHeadSelfAttention-Add ( (None, 100, 768)           0             Encoder-4-FeedForward-Norm[0][0]        \n",
      "                                                                                Encoder-5-MultiHeadSelfAttention-Dropout\n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-5-MultiHeadSelfAttention-Norm  (None, 100, 768)           1536          Encoder-5-MultiHeadSelfAttention-Add[0][\n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-5-FeedForward (FeedForward)    (None, 100, 768)           4722432       Encoder-5-MultiHeadSelfAttention-Norm[0]\n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-5-FeedForward-Dropout (Dropout (None, 100, 768)           0             Encoder-5-FeedForward[0][0]             \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-5-FeedForward-Add (Add)        (None, 100, 768)           0             Encoder-5-MultiHeadSelfAttention-Norm[0]\n",
      "                                                                                Encoder-5-FeedForward-Dropout[0][0]     \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-5-FeedForward-Norm (LayerNorma (None, 100, 768)           1536          Encoder-5-FeedForward-Add[0][0]         \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-6-MultiHeadSelfAttention (Mult (None, 100, 768)           2362368       Encoder-5-FeedForward-Norm[0][0]        \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-6-MultiHeadSelfAttention-Dropo (None, 100, 768)           0             Encoder-6-MultiHeadSelfAttention[0][0]  \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-6-MultiHeadSelfAttention-Add ( (None, 100, 768)           0             Encoder-5-FeedForward-Norm[0][0]        \n",
      "                                                                                Encoder-6-MultiHeadSelfAttention-Dropout\n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-6-MultiHeadSelfAttention-Norm  (None, 100, 768)           1536          Encoder-6-MultiHeadSelfAttention-Add[0][\n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-6-FeedForward (FeedForward)    (None, 100, 768)           4722432       Encoder-6-MultiHeadSelfAttention-Norm[0]\n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-6-FeedForward-Dropout (Dropout (None, 100, 768)           0             Encoder-6-FeedForward[0][0]             \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-6-FeedForward-Add (Add)        (None, 100, 768)           0             Encoder-6-MultiHeadSelfAttention-Norm[0]\n",
      "                                                                                Encoder-6-FeedForward-Dropout[0][0]     \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-6-FeedForward-Norm (LayerNorma (None, 100, 768)           1536          Encoder-6-FeedForward-Add[0][0]         \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-7-MultiHeadSelfAttention (Mult (None, 100, 768)           2362368       Encoder-6-FeedForward-Norm[0][0]        \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-7-MultiHeadSelfAttention-Dropo (None, 100, 768)           0             Encoder-7-MultiHeadSelfAttention[0][0]  \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-7-MultiHeadSelfAttention-Add ( (None, 100, 768)           0             Encoder-6-FeedForward-Norm[0][0]        \n",
      "                                                                                Encoder-7-MultiHeadSelfAttention-Dropout\n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-7-MultiHeadSelfAttention-Norm  (None, 100, 768)           1536          Encoder-7-MultiHeadSelfAttention-Add[0][\n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-7-FeedForward (FeedForward)    (None, 100, 768)           4722432       Encoder-7-MultiHeadSelfAttention-Norm[0]\n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-7-FeedForward-Dropout (Dropout (None, 100, 768)           0             Encoder-7-FeedForward[0][0]             \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-7-FeedForward-Add (Add)        (None, 100, 768)           0             Encoder-7-MultiHeadSelfAttention-Norm[0]\n",
      "                                                                                Encoder-7-FeedForward-Dropout[0][0]     \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-7-FeedForward-Norm (LayerNorma (None, 100, 768)           1536          Encoder-7-FeedForward-Add[0][0]         \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-8-MultiHeadSelfAttention (Mult (None, 100, 768)           2362368       Encoder-7-FeedForward-Norm[0][0]        \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-8-MultiHeadSelfAttention-Dropo (None, 100, 768)           0             Encoder-8-MultiHeadSelfAttention[0][0]  \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-8-MultiHeadSelfAttention-Add ( (None, 100, 768)           0             Encoder-7-FeedForward-Norm[0][0]        \n",
      "                                                                                Encoder-8-MultiHeadSelfAttention-Dropout\n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-8-MultiHeadSelfAttention-Norm  (None, 100, 768)           1536          Encoder-8-MultiHeadSelfAttention-Add[0][\n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-8-FeedForward (FeedForward)    (None, 100, 768)           4722432       Encoder-8-MultiHeadSelfAttention-Norm[0]\n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-8-FeedForward-Dropout (Dropout (None, 100, 768)           0             Encoder-8-FeedForward[0][0]             \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-8-FeedForward-Add (Add)        (None, 100, 768)           0             Encoder-8-MultiHeadSelfAttention-Norm[0]\n",
      "                                                                                Encoder-8-FeedForward-Dropout[0][0]     \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-8-FeedForward-Norm (LayerNorma (None, 100, 768)           1536          Encoder-8-FeedForward-Add[0][0]         \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-9-MultiHeadSelfAttention (Mult (None, 100, 768)           2362368       Encoder-8-FeedForward-Norm[0][0]        \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-9-MultiHeadSelfAttention-Dropo (None, 100, 768)           0             Encoder-9-MultiHeadSelfAttention[0][0]  \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-9-MultiHeadSelfAttention-Add ( (None, 100, 768)           0             Encoder-8-FeedForward-Norm[0][0]        \n",
      "                                                                                Encoder-9-MultiHeadSelfAttention-Dropout\n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-9-MultiHeadSelfAttention-Norm  (None, 100, 768)           1536          Encoder-9-MultiHeadSelfAttention-Add[0][\n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-9-FeedForward (FeedForward)    (None, 100, 768)           4722432       Encoder-9-MultiHeadSelfAttention-Norm[0]\n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-9-FeedForward-Dropout (Dropout (None, 100, 768)           0             Encoder-9-FeedForward[0][0]             \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-9-FeedForward-Add (Add)        (None, 100, 768)           0             Encoder-9-MultiHeadSelfAttention-Norm[0]\n",
      "                                                                                Encoder-9-FeedForward-Dropout[0][0]     \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-9-FeedForward-Norm (LayerNorma (None, 100, 768)           1536          Encoder-9-FeedForward-Add[0][0]         \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-10-MultiHeadSelfAttention (Mul (None, 100, 768)           2362368       Encoder-9-FeedForward-Norm[0][0]        \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-10-MultiHeadSelfAttention-Drop (None, 100, 768)           0             Encoder-10-MultiHeadSelfAttention[0][0] \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-10-MultiHeadSelfAttention-Add  (None, 100, 768)           0             Encoder-9-FeedForward-Norm[0][0]        \n",
      "                                                                                Encoder-10-MultiHeadSelfAttention-Dropou\n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-10-MultiHeadSelfAttention-Norm (None, 100, 768)           1536          Encoder-10-MultiHeadSelfAttention-Add[0]\n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-10-FeedForward (FeedForward)   (None, 100, 768)           4722432       Encoder-10-MultiHeadSelfAttention-Norm[0\n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-10-FeedForward-Dropout (Dropou (None, 100, 768)           0             Encoder-10-FeedForward[0][0]            \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-10-FeedForward-Add (Add)       (None, 100, 768)           0             Encoder-10-MultiHeadSelfAttention-Norm[0\n",
      "                                                                                Encoder-10-FeedForward-Dropout[0][0]    \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-10-FeedForward-Norm (LayerNorm (None, 100, 768)           1536          Encoder-10-FeedForward-Add[0][0]        \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-11-MultiHeadSelfAttention (Mul (None, 100, 768)           2362368       Encoder-10-FeedForward-Norm[0][0]       \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-11-MultiHeadSelfAttention-Drop (None, 100, 768)           0             Encoder-11-MultiHeadSelfAttention[0][0] \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-11-MultiHeadSelfAttention-Add  (None, 100, 768)           0             Encoder-10-FeedForward-Norm[0][0]       \n",
      "                                                                                Encoder-11-MultiHeadSelfAttention-Dropou\n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-11-MultiHeadSelfAttention-Norm (None, 100, 768)           1536          Encoder-11-MultiHeadSelfAttention-Add[0]\n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-11-FeedForward (FeedForward)   (None, 100, 768)           4722432       Encoder-11-MultiHeadSelfAttention-Norm[0\n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-11-FeedForward-Dropout (Dropou (None, 100, 768)           0             Encoder-11-FeedForward[0][0]            \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-11-FeedForward-Add (Add)       (None, 100, 768)           0             Encoder-11-MultiHeadSelfAttention-Norm[0\n",
      "                                                                                Encoder-11-FeedForward-Dropout[0][0]    \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-11-FeedForward-Norm (LayerNorm (None, 100, 768)           1536          Encoder-11-FeedForward-Add[0][0]        \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-12-MultiHeadSelfAttention (Mul (None, 100, 768)           2362368       Encoder-11-FeedForward-Norm[0][0]       \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-12-MultiHeadSelfAttention-Drop (None, 100, 768)           0             Encoder-12-MultiHeadSelfAttention[0][0] \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-12-MultiHeadSelfAttention-Add  (None, 100, 768)           0             Encoder-11-FeedForward-Norm[0][0]       \n",
      "                                                                                Encoder-12-MultiHeadSelfAttention-Dropou\n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-12-MultiHeadSelfAttention-Norm (None, 100, 768)           1536          Encoder-12-MultiHeadSelfAttention-Add[0]\n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-12-FeedForward (FeedForward)   (None, 100, 768)           4722432       Encoder-12-MultiHeadSelfAttention-Norm[0\n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-12-FeedForward-Dropout (Dropou (None, 100, 768)           0             Encoder-12-FeedForward[0][0]            \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-12-FeedForward-Add (Add)       (None, 100, 768)           0             Encoder-12-MultiHeadSelfAttention-Norm[0\n",
      "                                                                                Encoder-12-FeedForward-Dropout[0][0]    \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-12-FeedForward-Norm (LayerNorm (None, 100, 768)           1536          Encoder-12-FeedForward-Add[0][0]        \n",
      "________________________________________________________________________________________________________________________\n",
      "Encoder-Output (Concatenate)           (None, 100, 3072)          0             Encoder-9-FeedForward-Norm[0][0]        \n",
      "                                                                                Encoder-10-FeedForward-Norm[0][0]       \n",
      "                                                                                Encoder-11-FeedForward-Norm[0][0]       \n",
      "                                                                                Encoder-12-FeedForward-Norm[0][0]       \n",
      "========================================================================================================================\n",
      "Total params: 101,360,640\n",
      "Trainable params: 0\n",
      "Non-trainable params: 101,360,640\n",
      "________________________________________________________________________________________________________________________\n"
     ]
    }
   ],
   "source": [
    "import os\n",
    "# 需要设定环境变量来使用 tf.keras 作为后端\n",
    "os.environ['TF_KERAS'] = '1'\n",
    "from keras_bert import load_trained_model_from_checkpoint\n",
    "\n",
    "SEQ_LEN = 100\n",
    "BERT_PATH = 'data/bert/chinese_L-12_H-768_A-12'\n",
    "\n",
    "config_path = os.path.join(BERT_PATH, 'bert_config.json')\n",
    "checkpoint_path = os.path.join(BERT_PATH, 'bert_model.ckpt')\n",
    "\n",
    "bert_model = load_trained_model_from_checkpoint(config_path,\n",
    "                                                checkpoint_path,\n",
    "                                                seq_len=SEQ_LEN, # 序列长度\n",
    "                                                output_layer_num=4) # 提取特征层数\n",
    "bert_model.summary(line_length=120)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import codecs\n",
    "from typing import List\n",
    "\n",
    "class BERTProcessor(object):\n",
    "\n",
    "    def read_vocab_list(self, bert_folder: str):\n",
    "        \"\"\"\n",
    "        读取 BERT 词表\n",
    "        Args:\n",
    "            bert_folder: BERT 模型文件夹路径\n",
    "        \"\"\"\n",
    "        dict_path = os.path.join(bert_folder, 'vocab.txt')\n",
    "        token_dict = {}\n",
    "        with codecs.open(dict_path, 'r', 'utf8') as reader:\n",
    "            for line in reader:\n",
    "                token = line.strip()\n",
    "                token_dict[token] = len(token_dict)\n",
    "\n",
    "        self.token2idx = token_dict\n",
    "\n",
    "    def build_label_dict(self, y_data):\n",
    "        \"\"\"\n",
    "        构建标签词表\n",
    "        Args:\n",
    "            y_data: 标签数据数组\n",
    "        \"\"\"\n",
    "        # 构建标签词表，需要增加一个特殊标志 [PAD] 用于标记补全位\n",
    "        label2idx = {\n",
    "            '[PAD]': 0\n",
    "        }\n",
    "\n",
    "        for sequence in y_data:\n",
    "            for label in sequence:\n",
    "                if label not in label2idx:\n",
    "                    label2idx[label] = len(label2idx)\n",
    "\n",
    "        self.label2idx = label2idx\n",
    "        self.idx2label = dict([(v, k) for k, v in label2idx.items()])\n",
    "\n",
    "    def numerize_sequences(self, sequence: List[str], token2idx: Dict[str, int]) -> List[int]:\n",
    "        \"\"\"\n",
    "        将分词后的标记（token）数组转换成对应的索引数组\n",
    "        Args:\n",
    "            sequence: 分词后的标记数组\n",
    "        Returns: 输入数据对应的索引数组\n",
    "        \"\"\"\n",
    "        token_result = []\n",
    "        for token in sequence:\n",
    "            token_index = token2idx.get(token)\n",
    "            if token_index is None:\n",
    "                token_index = token2idx['[UNK]']\n",
    "            token_result.append(token_index)\n",
    "        return token_result\n",
    "\n",
    "bert_processor = BERTProcessor()\n",
    "bert_processor.read_vocab_list(BERT_PATH)\n",
    "bert_processor.build_label_dict(train_y + test_y + valid_y)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[[ 0.39564452,  0.33022746, -0.43297228, ...,  0.661224  ,\n",
       "         -1.5728569 , -0.0946451 ],\n",
       "        [ 1.3865703 , -1.0209625 , -0.81014854, ...,  0.67395735,\n",
       "         -1.3157052 , -0.2783737 ],\n",
       "        [ 1.1564046 , -0.9684137 , -0.83766013, ...,  0.7626987 ,\n",
       "         -1.3007891 , -0.3555608 ],\n",
       "        ...,\n",
       "        [ 1.0103469 ,  0.5744371 , -0.2238759 , ...,  0.64004606,\n",
       "         -1.5287675 , -0.10786324],\n",
       "        [ 1.0362953 ,  0.6315569 , -0.38111922, ...,  0.65740514,\n",
       "         -1.5043037 , -0.10978415],\n",
       "        [ 0.96543396,  0.56763047, -0.2328559 , ...,  0.6472532 ,\n",
       "         -1.5195591 , -0.11089186]]], dtype=float32)"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "sentences = [[\n",
    "    '语', '言', '模', '型'\n",
    "]]\n",
    "x_set_num = [p.numerize_sequences(seq, p.token2idx) for seq in sentences]\n",
    "x_set_pad = pad_sequences(x_set_num, SEQ_LEN, padding='post', truncating='post')\n",
    "x_segments = np.zeros(x_set_pad.shape)\n",
    "\n",
    "# 输出句子的特征张量\n",
    "bert_model.predict((x_set_pad, x_segments))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "HIDDEN_DIM = 64\n",
    "\n",
    "L = tf.keras.layers\n",
    "\n",
    "bi_lstm_layer = L.Bidirectional(L.LSTM(HIDDEN_DIM,\n",
    "                                       return_sequences=True))\n",
    "dense_layer = L.Dense(len(p.label2idx), activation=tf.nn.softmax)\n",
    "\n",
    "tensor = bi_lstm_layer(bert_model.output)\n",
    "outputs = dense_layer(tensor)\n",
    "\n",
    "transfer_model = tf.keras.Model(bert_model.inputs, outputs)\n",
    "transfer_model.compile(optimizer='adam',\n",
    "                       loss='categorical_crossentropy',\n",
    "                       metrics=['accuracy'])\n",
    "# transfer_model.summary()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 序列长度\n",
    "SEQ_LEN = 100\n",
    "\n",
    "def process_bert_dataset(x_set, y_set):\n",
    "    x_set_num = [bert_processor.numerize_sequences(seq, p.token2idx) for seq in x_set]\n",
    "    y_set_num = [bert_processor.numerize_sequences(lab, p.label2idx) for lab in y_set]\n",
    "\n",
    "    # 补全序列长度\n",
    "    x_set_pad = pad_sequences(x_set_num, SEQ_LEN, padding='post', truncating='post')\n",
    "    y_set_pad = pad_sequences(y_set_num, SEQ_LEN, padding='post', truncating='post')\n",
    "\n",
    "    x_segments = np.zeros(x_set_pad.shape)\n",
    "\n",
    "    # 标签序列转换为 one-hot 表示\n",
    "    y_set_one = to_categorical(y_set_pad, len(p.label2idx))\n",
    "\n",
    "    return (x_set_pad, x_segments), y_set_one\n",
    "\n",
    "\n",
    "train_x_tensor, train_y_tensor = process_bert_dataset(train_x, train_y)\n",
    "valid_x_tensor, valid_y_tensor = process_bert_dataset(valid_x, valid_y)\n",
    "test_x_tensor, test_y_tensor = process_bert_dataset(test_x, test_y)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/10\n",
      "11/41 [=======>......................] - ETA: 33:29 - loss: 0.3369 - accuracy: 0.4182"
     ]
    }
   ],
   "source": [
    "transfer_model.fit(train_x_tensor,\n",
    "                   train_y_tensor,\n",
    "                   validation_data=(valid_x_tensor, valid_y_tensor),\n",
    "                   batch_size=512,\n",
    "                   epochs=10)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "metadata": {},
   "outputs": [],
   "source": [
    "def bert_predict(sequences: List[List[str]]) -> List[List[str]]:\n",
    "    len_list = [len(seq) for seq in sequences]\n",
    "    x_set_num = [bert_processor.numerize_sequences(seq, p.token2idx) for seq in sequences]\n",
    "\n",
    "    # 补全序列长度\n",
    "    x_set_pad = pad_sequences(x_set_num, SEQ_LEN, padding='post', truncating='post')\n",
    "    x_segments = np.zeros(x_set_pad.shape)\n",
    "\n",
    "    predicts = transfer_model.predict([x_set_pad, x_segments])\n",
    "    predict_labels = predicts.argmax(-1)\n",
    "\n",
    "    result = []\n",
    "    for index, label_idx in enumerate(predict_labels):\n",
    "        # 截断预测序列，保持和输入一致\n",
    "        label_idx = label_idx[:len_list[index]]\n",
    "        labels = [bert_processor.idx2label[idx] for idx in label_idx]\n",
    "        result.append(labels)\n",
    "    return result"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {},
   "outputs": [],
   "source": [
    "y_true = [y[:SEQ_LEN] for y in test_y]\n",
    "y_pred = bert_predict(test_x)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "           precision    recall  f1-score   support\n",
      "\n",
      "      PER       0.88      0.90      0.89      1798\n",
      "      LOC       0.80      0.85      0.82      3431\n",
      "      ORG       0.67      0.74      0.70      2148\n",
      "\n",
      "micro avg       0.78      0.83      0.80      7377\n",
      "macro avg       0.78      0.83      0.81      7377\n",
      "\n"
     ]
    }
   ],
   "source": [
    "print(classification_report(y_true, y_pred))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "transfer_model.fit(train_x_tensor,\n",
    "                   train_y_tensor,\n",
    "                   validation_data=(valid_x_tensor, valid_y_tensor),\n",
    "                   batch_size=512,\n",
    "                   epochs=40)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "y_true = [y[:SEQ_LEN] for y in test_y]\n",
    "y_pred = bert_predict(test_x)\n",
    "print(classification_report(y_true, y_pred))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
