{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "code_folding": [
     44,
     49
    ]
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "unable to import 'smart_open.gcs', disabling that module\n",
      "/home/huangweilin/anaconda3/envs/fjw/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:523: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n",
      "/home/huangweilin/anaconda3/envs/fjw/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:524: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n",
      "/home/huangweilin/anaconda3/envs/fjw/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n",
      "/home/huangweilin/anaconda3/envs/fjw/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:526: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n",
      "/home/huangweilin/anaconda3/envs/fjw/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:527: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n",
      "/home/huangweilin/anaconda3/envs/fjw/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:532: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n"
     ]
    }
   ],
   "source": [
    "import json\n",
    "import torch\n",
    "import gensim\n",
    "from gensim.test.utils import datapath, get_tmpfile\n",
    "from gensim.models import KeyedVectors\n",
    "from gensim.scripts.glove2word2vec import glove2word2vec\n",
    "from transformers import *\n",
    "import torch.utils.data as Data\n",
    "import collections\n",
    "import os\n",
    "import random\n",
    "import tarfile\n",
    "from torch import nn\n",
    "import torchtext.vocab as Vocab\n",
    "import pickle as pk\n",
    "import torch.nn.functional as F\n",
    "from IPython.display import display,HTML\n",
    "from torch.nn.utils.rnn import pad_sequence\n",
    "from torch.nn.utils.rnn import pack_padded_sequence\n",
    "from torch.nn.utils.rnn import pad_packed_sequence\n",
    "from torch.nn.utils.rnn import pack_sequence\n",
    "from torch.nn import CrossEntropyLoss, MSELoss\n",
    "from torchcrf import CRF\n",
    "from sklearn import metrics\n",
    "import joblib\n",
    "import math\n",
    "device=torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
    "import argparse\n",
    "import glob\n",
    "import logging\n",
    "import unicodedata\n",
    "from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset\n",
    "from torch.utils.data.distributed import DistributedSampler\n",
    "from tqdm import tqdm_notebook as tqdm\n",
    "import torch.utils.data as Data\n",
    "import jieba\n",
    "import jieba.posseg as pseg\n",
    "import copy\n",
    "import time\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "import random\n",
    "from sklearn.model_selection import KFold\n",
    "logger = logging.getLogger(__name__)\n",
    "logging.basicConfig(\n",
    "    format=\"%(asctime)s - %(levelname)s - %(name)s -   %(message)s\",\n",
    "    datefmt=\"%m/%d/%Y %H:%M:%S\",\n",
    "    level=logging.INFO \n",
    ")\n",
    "ARG=collections.namedtuple('ARG',['train_batch_size',\n",
    " 'eval_batch_size',\n",
    " 'weight_decay',\n",
    " 'learning_rate',\n",
    " 'adam_epsilon',\n",
    " 'num_train_epochs',\n",
    " 'warmup_steps',\n",
    " 'gradient_accumulation_steps',\n",
    " 'save_steps',\n",
    " 'max_grad_norm',\n",
    " 'model_name_or_path',\n",
    " 'output_dir',\n",
    " 'seed',\n",
    " 'device',\n",
    " 'n_gpu',\n",
    " 'max_steps',\n",
    " 'output_mode',\n",
    "'fp16_opt_level',\n",
    "'fp16',\n",
    "'card_list'])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 数据预处理"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "heading_collapsed": true
   },
   "source": [
    "## 工具函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "#后处理的时候考虑：\n",
    "#复杂类的value一定不能缺\n",
    "#复杂类的其他ob跟value公用一个sub\n",
    "#同一复杂类出现多次的时候怎么组织value和其他ob还是一个问题\n",
    "#后续可以考虑再训练一个model，给定复杂类的sub ob的情况下，预测出其他附属属性"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "code_folding": [
     0,
     16,
     40,
     55,
     73,
     83,
     106,
     121,
     141,
     167,
     171,
     176,
     182,
     184,
     208,
     257,
     370,
     477,
     528,
     554,
     583,
     613,
     712,
     745,
     812,
     845
    ],
    "hidden": true
   },
   "outputs": [],
   "source": [
    "def convert_text_to_ids(tokenizer, text, max_len=100):\n",
    "    if isinstance(text,str):\n",
    "        output=tokenizer.encode_plus(text,max_length=max_len,pad_to_max_length=True,return_tensors=\"pt\")\n",
    "        input_ids=output[\"input_ids\"].squeeze(0)\n",
    "        token_type_ids=output[\"token_type_ids\"].squeeze(0)\n",
    "        attention_mask=output[\"attention_mask\"].squeeze(0)\n",
    "    elif isinstance(text,list):\n",
    "        input_ids,token_type_ids,attention_mask=[],[],[]\n",
    "        for e in text:\n",
    "            output=tokenizer.encode_plus(e,max_length=max_len,pad_to_max_length=True,return_tensors=\"pt\")\n",
    "            input_ids.append(output[\"input_ids\"].squeeze(0))\n",
    "            token_type_ids.append(output[\"token_type_ids\"].squeeze(0))\n",
    "            attention_mask.append(output[\"attention_mask\"].squeeze(0))\n",
    "    else:\n",
    "        raise Exception('type error')\n",
    "    return torch.stack(input_ids).long(),torch.stack(token_type_ids).long(),torch.stack(attention_mask).long()        \n",
    "class RelDataset(Data.Dataset):\n",
    "    def __init__(self,examples):\n",
    "        self.input_ids=torch.stack([e['input_ids'] for e in examples]).long()\n",
    "        self.token_type_ids=torch.stack([e['token_type_ids'] for e in examples]).long()\n",
    "        self.attention_mask=torch.stack([e['attention_mask'] for e in examples]).long()\n",
    "        self.rel_label=torch.stack([e['rel_label'] for e in examples]).long()\n",
    "        self.postag=torch.stack([e['postag'] for e in examples]).long()\n",
    "        self.feature=torch.stack([e['feature'] for e in examples]).float()\n",
    "        if examples[0]['token_vec'] is None:\n",
    "            self.token_vec=np.zeros(len(examples))\n",
    "            self.word_vec=np.zeros(len(examples))\n",
    "            self.word_mask=np.zeros(len(examples))\n",
    "        else:\n",
    "            self.token_vec=np.stack([np.array(e['token_vec'])  for e in examples])\n",
    "            self.word_vec=np.stack([np.array(e['word_vec']) for e in examples])\n",
    "            self.word_mask=np.stack([np.array(e['word_mask']) for e in examples])\n",
    "        self.plan_label=np.stack([np.array(e['plan_label']) for e in examples])\n",
    "        self.token2docs=[e[\"token2doc\"] for e in examples]\n",
    "    def __len__(self):\n",
    "        return self.input_ids.shape[0]\n",
    "    def __getitem__(self,idx):\n",
    "        return self.input_ids[idx],self.attention_mask[idx],self.token_type_ids[idx],\\\n",
    "    self.rel_label[idx],self.postag[idx],self.feature[idx],self.token_vec[idx],self.word_vec[idx],\\\n",
    "    self.word_mask[idx],self.plan_label[idx],self.token2docs[idx]  \n",
    "class NerDataset(Data.Dataset):\n",
    "    def __init__(self,examples):\n",
    "        self.input_ids=torch.stack([e['input_ids'] for e in examples]).long()\n",
    "        self.token_type_ids=torch.stack([e['token_type_ids'] for e in examples]).long()\n",
    "        self.attention_mask=torch.stack([e['attention_mask'] for e in examples]).long()\n",
    "        self.rel_label=torch.stack([e['rel_label'] for e in examples]).long()\n",
    "        self.labels=torch.stack([e['labels'] for e in examples]).long()\n",
    "        self.postag=torch.stack([e['postag'] for e in examples]).long()\n",
    "        self.feature=torch.stack([e['feature'] for e in examples]).float()\n",
    "        self.token2docs=[e[\"token2doc\"] for e in examples]\n",
    "    def __len__(self):\n",
    "        return self.input_ids.shape[0]\n",
    "    def __getitem__(self,idx):\n",
    "        return self.input_ids[idx],self.attention_mask[idx],self.token_type_ids[idx],\\\n",
    "               self.rel_label[idx],self.labels[idx],self.postag[idx],self.feature[idx],self.token2docs[idx]  \n",
    "class NerDataset_pointer(Data.Dataset):\n",
    "    def __init__(self,examples):\n",
    "        self.input_ids=torch.stack([e['input_ids'] for e in examples]).long()\n",
    "        self.token_type_ids=torch.stack([e['token_type_ids'] for e in examples]).long()\n",
    "        self.attention_mask=torch.stack([e['attention_mask'] for e in examples]).long()\n",
    "        self.rel_label=torch.stack([e['rel_label'] for e in examples]).long()\n",
    "        self.labels=torch.stack([e['labels'] for e in examples]).long()\n",
    "        self.postag=torch.stack([e['postag'] for e in examples]).long()\n",
    "        self.feature=torch.stack([e['feature'] for e in examples]).float()\n",
    "        self.plan_labels=torch.stack([e['plan_labels'] for e in examples]).long()\n",
    "        self.token2docs=[e[\"token2doc\"] for e in examples]\n",
    "    def __len__(self):\n",
    "        return self.input_ids.shape[0]\n",
    "    def __getitem__(self,idx):\n",
    "        return self.input_ids[idx],self.attention_mask[idx],self.token_type_ids[idx],\\\n",
    "               self.rel_label[idx],self.labels[idx],self.postag[idx],self.feature[idx],\\\n",
    "               self.plan_labels[idx],self.token2docs[idx]  \n",
    "import unicodedata\n",
    "def _is_whitespace(char):\n",
    "    \"\"\"Checks whether `chars` is a whitespace character.\"\"\"\n",
    "    # \\t, \\n, and \\r are technically contorl characters but we treat them\n",
    "    # as whitespace since they are generally considered as such.\n",
    "    if char == \" \" or char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n",
    "        return True\n",
    "    cat = unicodedata.category(char)\n",
    "    if cat == \"Zs\":\n",
    "        return True\n",
    "    return False\n",
    "def is_chinese_char(cp):\n",
    "    \"\"\"Checks whether CP is the codepoint of a CJK character.\"\"\"\n",
    "    # This defines a \"chinese character\" as anything in the CJK Unicode block:\n",
    "    #   https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)\n",
    "    #\n",
    "    # Note that the CJK Unicode block is NOT all Japanese and Korean characters,\n",
    "    # despite its name. The modern Korean Hangul alphabet is a different block,\n",
    "    # as is Japanese Hiragana and Katakana. Those alphabets are used to write\n",
    "    # space-separated words, so they are not treated specially and handled\n",
    "    # like the all of the other languages.\n",
    "    if (\n",
    "        (cp >= 0x4E00 and cp <= 0x9FFF)\n",
    "        or (cp >= 0x3400 and cp <= 0x4DBF)  #\n",
    "        or (cp >= 0x20000 and cp <= 0x2A6DF)  #\n",
    "        or (cp >= 0x2A700 and cp <= 0x2B73F)  #\n",
    "        or (cp >= 0x2B740 and cp <= 0x2B81F)  #\n",
    "        or (cp >= 0x2B820 and cp <= 0x2CEAF)  #\n",
    "        or (cp >= 0xF900 and cp <= 0xFAFF)\n",
    "        or (cp >= 0x2F800 and cp <= 0x2FA1F)  #\n",
    "    ):  #\n",
    "        return True\n",
    "\n",
    "    return False\n",
    "def is_punctuation(char):\n",
    "    \"\"\"Checks whether `chars` is a punctuation character.\"\"\"\n",
    "    cp = ord(char)\n",
    "    # We treat all non-letter/number ASCII as punctuation.\n",
    "    # Characters such as \"^\", \"$\", and \"`\" are not in the Unicode\n",
    "    # Punctuation class but we treat them as punctuation anyways, for\n",
    "    # consistency.\n",
    "    if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):\n",
    "        return True\n",
    "    cat = unicodedata.category(char)\n",
    "    if cat.startswith(\"P\"):\n",
    "        return True\n",
    "    return False\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "from sklearn.decomposition import TruncatedSVD\n",
    "def cut_buff(info):\n",
    "    buff=\"\"\n",
    "    tokens=[]\n",
    "    for e in info:\n",
    "        if (e.isdigit() and (buff.isdigit() or buff=='')):\n",
    "            buff+=e\n",
    "        elif (e.isalpha() and (buff.isalpha() or buff=='')):\n",
    "            buff+=e\n",
    "        else:\n",
    "            if buff!=\"\":\n",
    "                tokens.append(buff)\n",
    "            buff=\"\"\n",
    "            if not (e.isdigit() or e.isalpha()):\n",
    "                tokens.append(e)\n",
    "            else:\n",
    "                buff+=e\n",
    "\n",
    "    if buff!=\"\":\n",
    "        tokens.append(buff)\n",
    "    return tokens\n",
    "def build_tfidf_svd_matrix(texts,n_output,tfidf_vec=None,svd=None):\n",
    "    corpus=[]\n",
    "    for text in tqdm(texts):\n",
    "#         print(text)\n",
    "        words=word_segment(str(text['text']))\n",
    "#         print(words)\n",
    "        use_words=[]\n",
    "        for word in words:\n",
    "            if word in stop_words:\n",
    "                continue\n",
    "            use_words.append(word)\n",
    "#         print(use_words)\n",
    "        corpus.append(\" \".join(use_words))\n",
    "    print(len(corpus))\n",
    "    print(corpus[0])\n",
    "    if tfidf_vec is None:\n",
    "        tfidf_vec=TfidfVectorizer()\n",
    "        tfidf_matrix=tfidf_vec.fit_transform(corpus)\n",
    "    else:\n",
    "        tfidf_matrix=tfidf_vec.transform(corpus)\n",
    "    if svd is None:\n",
    "        svd=TruncatedSVD(n_components=n_output,n_iter=7,random_state=42)\n",
    "        tf_idf_svd=svd.fit_transform(tfidf_matrix)\n",
    "    else:\n",
    "        tf_idf_svd=svd.transform(tfidf_matrix)\n",
    "    return tf_idf_svd,tfidf_vec,svd\n",
    "def word_segment(sentence):\n",
    "    words=jieba.cut(sentence)\n",
    "    return \",\".join(words).split(\",\")\n",
    "stop_words=set()\n",
    "def load_stopwords():\n",
    "    with open(\"./middle_data/stopwords.txt\",\"r\",encoding=\"UTF-8\") as r:\n",
    "        for line in r.readlines():\n",
    "            stop_words.add(line.strip())\n",
    "load_stopwords()\n",
    "def remove_stopwords(word_list):\n",
    "    res=[]\n",
    "    for word in word_lists:\n",
    "        if word not in stop_words:\n",
    "            res.append(word)\n",
    "    return ' '.join(res)\n",
    "def clean_text(string):\n",
    "    return string.replace(' ', '').replace('\\n', '').replace('\\u3000', '')\n",
    "def special_tokenize(text_raw,tokenizer):\n",
    "    sub_text = []\n",
    "    buff = \"\"\n",
    "    for char in text_raw:\n",
    "        if is_chinese_char(ord(char)) or is_punctuation(char):\n",
    "            if buff != \"\":\n",
    "                sub_text.extend(cut_buff(buff))\n",
    "                buff = \"\"\n",
    "            sub_text.append(char)\n",
    "\n",
    "        else:\n",
    "            buff += char\n",
    "    if buff != \"\":\n",
    "        sub_text.extend(cut_buff(buff))\n",
    "    tokens = []\n",
    "    text_tmp = ''\n",
    "    for (i, token) in enumerate(sub_text):\n",
    "        sub_tokens = tokenizer.tokenize(token)\n",
    "        for sub_token in sub_tokens:\n",
    "            tokens.append(sub_token)\n",
    "        else:\n",
    "            continue\n",
    "        break \n",
    "    return tokens\n",
    "def _convert_example_to_cls_record(example,\n",
    "                               tokenizer):\n",
    "    if example.__contains__('spo_list'):\n",
    "        spo_list = example['spo_list']\n",
    "    else:\n",
    "        spo_list = []\n",
    "    text_raw = example['text']\n",
    "    sub_text = []\n",
    "    buff = \"\"\n",
    "    for char in text_raw:\n",
    "        if is_chinese_char(ord(char)) or is_punctuation(char):\n",
    "            if buff != \"\":\n",
    "                sub_text.append(buff)\n",
    "                buff = \"\"\n",
    "            sub_text.append(char)\n",
    "        else:\n",
    "            buff += char\n",
    "    if buff != \"\":\n",
    "        sub_text.append(buff)\n",
    "    tok_to_orig_start_index = []\n",
    "    tok_to_orig_end_index = []\n",
    "    orig_to_tok_index = []\n",
    "    tokens = []\n",
    "    text_tmp = ''\n",
    "    for (i, token) in enumerate(sub_text):\n",
    "        orig_to_tok_index.append(len(tokens))\n",
    "        sub_tokens = tokenizer.tokenize(token)\n",
    "        text_tmp += token\n",
    "        for sub_token in sub_tokens:\n",
    "            tok_to_orig_start_index.append(len(text_tmp) - len(token))\n",
    "            tok_to_orig_end_index.append(len(text_tmp) - 1)\n",
    "            tokens.append(sub_token)\n",
    "        else:\n",
    "            continue\n",
    "        break\n",
    "    #  find all entities and tag them with corresponding \"B\"/\"I\" labels\n",
    "    labels_list=[]\n",
    "    rel_labels=[]\n",
    "    for spo in spo_list:\n",
    "        for spo_object in spo['object'].keys():\n",
    "            labels = [0\n",
    "                  for i in range(len(tokens))]  # initialize tag\n",
    "            if not spo['predicate'] in special_rels:\n",
    "                rel_label=rels2id[spo[\"predicate\"]]\n",
    "            else:\n",
    "                rel_label=rels2id[spo[\"predicate\"]+\"_\"+spo_object] \n",
    "            rel_labels.append(rel_label)\n",
    "\n",
    "    return tok_to_orig_start_index,tok_to_orig_end_index,orig_to_tok_index,tokens,rel_labels\n",
    "def _convert_example_to_record(example,\n",
    "                               tokenizer):\n",
    "    if example.__contains__('spo_list'):\n",
    "        spo_list = example['spo_list']\n",
    "    else:\n",
    "        spo_list = []\n",
    "    text_raw = example['text']\n",
    "    sub_text = []\n",
    "    buff = \"\"\n",
    "    for char in text_raw:\n",
    "        if is_chinese_char(ord(char)) or is_punctuation(char):\n",
    "            if buff != \"\":\n",
    "                sub_text.extend(cut_buff(buff))\n",
    "                buff = \"\"\n",
    "            sub_text.append(char)\n",
    "\n",
    "        else:\n",
    "            buff += char\n",
    "    if buff != \"\":\n",
    "        sub_text.extend(cut_buff(buff))\n",
    "    tok_to_orig_start_index = []\n",
    "    tok_to_orig_end_index = []\n",
    "    orig_to_tok_index = []\n",
    "    tokens = []\n",
    "    text_tmp = ''\n",
    "    for (i, token) in enumerate(sub_text):\n",
    "        orig_to_tok_index.append(len(tokens))\n",
    "        sub_tokens = tokenizer.tokenize(token)\n",
    "        text_tmp += token\n",
    "        for sub_token in sub_tokens:\n",
    "            tok_to_orig_start_index.append(len(text_tmp) - len(token))\n",
    "            tok_to_orig_end_index.append(len(text_tmp) - 1)\n",
    "            tokens.append(sub_token)\n",
    "        else:\n",
    "            continue\n",
    "        break\n",
    "    #  find all entities and tag them with corresponding \"B\"/\"I\" labels\n",
    "    labels_list=[[0\n",
    "              for i in range(len(tokens))] for i in range(len(id2rels))]\n",
    "    rel_labels=[]\n",
    "    for spo in spo_list:\n",
    "        for spo_object in spo['object'].keys():\n",
    "            if not spo['predicate'] in special_rels:\n",
    "                rel_label=rels2id[spo[\"predicate\"]]\n",
    "            else:\n",
    "                rel_label=rels2id[spo[\"predicate\"]+\"_\"+spo_object] \n",
    "            labels = labels_list[rel_label] #复杂类的不同part还是不会被归到一个，以后再讲\n",
    "            label_subject = label2ids['B-SUB']\n",
    "            label_object = label2ids['B-OBJ']\n",
    "            subject_sub_tokens = special_tokenize(spo['subject'],tokenizer)\n",
    "#             print(\"sub\",subject_sub_tokens)\n",
    "            object_sub_tokens = special_tokenize(spo['object'][\n",
    "                spo_object],tokenizer)\n",
    "            forbidden_index = None\n",
    "            if len(subject_sub_tokens) > len(object_sub_tokens):\n",
    "                for index in range(\n",
    "                        len(tokens) - len(subject_sub_tokens) + 1):\n",
    "                    if tokens[index:index + len(\n",
    "                            subject_sub_tokens)] == subject_sub_tokens:\n",
    "                        labels[index]=label_subject\n",
    "                        for i in range(len(subject_sub_tokens) - 1):\n",
    "                            labels[index + i + 1]=label_subject+1\n",
    "                        forbidden_index = index\n",
    "                        break\n",
    "\n",
    "                for index in range(\n",
    "                        len(tokens) - len(object_sub_tokens) + 1):\n",
    "                    if tokens[index:index + len(\n",
    "                            object_sub_tokens)] == object_sub_tokens:\n",
    "                        if forbidden_index is None:\n",
    "                            labels[index]=label_object\n",
    "                            for i in range(len(object_sub_tokens) - 1):\n",
    "                                labels[index + i + 1]=label_object+1\n",
    "                            break\n",
    "                        # check if labeled already\n",
    "                        elif index < forbidden_index or index >= forbidden_index + len(\n",
    "                                subject_sub_tokens):\n",
    "                            labels[index]=label_object\n",
    "                            for i in range(len(object_sub_tokens) - 1):\n",
    "                                labels[index + i + 1]=label_object+1\n",
    "                            break\n",
    "\n",
    "            else:\n",
    "                for index in range(\n",
    "                        len(tokens) - len(object_sub_tokens) + 1):\n",
    "                    if tokens[index:index + len(\n",
    "                            object_sub_tokens)] == object_sub_tokens:\n",
    "                        labels[index]=label_object\n",
    "                        for i in range(len(object_sub_tokens) - 1):\n",
    "                            labels[index + i + 1]=label_object+1\n",
    "                        forbidden_index = index\n",
    "                        break\n",
    "\n",
    "                for index in range(\n",
    "                        len(tokens) - len(subject_sub_tokens) + 1):\n",
    "                    if tokens[index:index + len(\n",
    "                            subject_sub_tokens)] == subject_sub_tokens:\n",
    "                        if forbidden_index is None:\n",
    "                            labels[index]=label_subject\n",
    "                            for i in range(len(subject_sub_tokens) - 1):\n",
    "                                labels[index + i + 1]=label_subject+1\n",
    "                            break\n",
    "                        elif index < forbidden_index or index >= forbidden_index + len(\n",
    "                                object_sub_tokens):\n",
    "                            labels[index]=label_subject\n",
    "                            for i in range(len(subject_sub_tokens) - 1):\n",
    "                                labels[index + i + 1]=label_subject+1\n",
    "                            break\n",
    "            labels_list[rel_label]=labels\n",
    "            if rel_label not in rel_labels:\n",
    "                rel_labels.append(rel_label)\n",
    "\n",
    "    return tok_to_orig_start_index,tok_to_orig_end_index,orig_to_tok_index,tokens,labels_list,rel_labels\n",
    "def _convert_example_to_record_new(example,\n",
    "                               tokenizer):\n",
    "    if example.__contains__('spo_list'):\n",
    "        spo_list = example['spo_list']\n",
    "    else:\n",
    "        spo_list = []\n",
    "    text_raw = example['text']\n",
    "    sub_text = []\n",
    "    buff = \"\"\n",
    "    for char in text_raw:\n",
    "        if is_chinese_char(ord(char)) or is_punctuation(char):\n",
    "            if buff != \"\":\n",
    "                sub_text.extend(cut_buff(buff))\n",
    "                buff = \"\"\n",
    "            sub_text.append(char)\n",
    "\n",
    "        else:\n",
    "            buff += char\n",
    "    if buff != \"\":\n",
    "        sub_text.extend(cut_buff(buff))\n",
    "    tok_to_orig_start_index = []\n",
    "    tok_to_orig_end_index = []\n",
    "    orig_to_tok_index = []\n",
    "    tokens = []\n",
    "    text_tmp = ''\n",
    "    for (i, token) in enumerate(sub_text):\n",
    "        orig_to_tok_index.append(len(tokens))\n",
    "        sub_tokens = tokenizer.tokenize(token)\n",
    "        text_tmp += token\n",
    "        for sub_token in sub_tokens:\n",
    "            tok_to_orig_start_index.append(len(text_tmp) - len(token))\n",
    "            tok_to_orig_end_index.append(len(text_tmp) - 1)\n",
    "            tokens.append(sub_token)\n",
    "        else:\n",
    "            continue\n",
    "        break\n",
    "    #  find all entities and tag them with corresponding \"B\"/\"I\" labels\n",
    "    labels_list=[[[0] * len(id2labels)\n",
    "              for i in range(len(tokens))] for i in range(len(id2rels))]\n",
    "    rel_labels=[]\n",
    "    for spo in spo_list:\n",
    "        for spo_object in spo['object'].keys():\n",
    "            if not spo['predicate'] in special_rels:\n",
    "                rel_label=rels2id[spo[\"predicate\"]]\n",
    "            else:\n",
    "                rel_label=rels2id[spo[\"predicate\"]+\"_\"+spo_object] \n",
    "            labels = labels_list[rel_label] #复杂类的不同part还是不会被归到一个，以后再讲\n",
    "            label_subject = new_label2ids['B-SUB']\n",
    "            label_object = new_label2ids['B-OBJ']\n",
    "            subject_sub_tokens = special_tokenize(spo['subject'],tokenizer)\n",
    "#             print(\"sub\",subject_sub_tokens)\n",
    "            object_sub_tokens = special_tokenize(spo['object'][\n",
    "                spo_object],tokenizer)\n",
    "            forbidden_index = None\n",
    "            if len(subject_sub_tokens) > len(object_sub_tokens):\n",
    "                for index in range(\n",
    "                        len(tokens) - len(subject_sub_tokens) + 1):\n",
    "                    if tokens[index:index + len(\n",
    "                            subject_sub_tokens)] == subject_sub_tokens:\n",
    "                        labels[index][label_subject]+=1\n",
    "                        labels[index + len(subject_sub_tokens)-1][label_subject+1]+=1\n",
    "                        forbidden_index = index\n",
    "                        break\n",
    "\n",
    "                for index in range(\n",
    "                        len(tokens) - len(object_sub_tokens) + 1):\n",
    "                    if tokens[index:index + len(\n",
    "                            object_sub_tokens)] == object_sub_tokens:\n",
    "                        if forbidden_index is None:\n",
    "                            labels[index][label_object]+=1\n",
    "                            labels[index + len(object_sub_tokens)-1][label_object+1]+=1\n",
    "                            break\n",
    "                        # check if labeled already\n",
    "                        elif index < forbidden_index or index >= forbidden_index + len(\n",
    "                                subject_sub_tokens):\n",
    "                            labels[index][label_object]+=1\n",
    "                            labels[index + len(object_sub_tokens)-1][label_object+1]+=1\n",
    "                            break\n",
    "\n",
    "            else:\n",
    "                for index in range(\n",
    "                        len(tokens) - len(object_sub_tokens) + 1):\n",
    "                    if tokens[index:index + len(\n",
    "                            object_sub_tokens)] == object_sub_tokens:\n",
    "                        labels[index][label_object]+=1\n",
    "                        labels[index + len(object_sub_tokens)-1][label_object+1]+=1\n",
    "                        forbidden_index = index\n",
    "                        break\n",
    "\n",
    "                for index in range(\n",
    "                        len(tokens) - len(subject_sub_tokens) + 1):\n",
    "                    if tokens[index:index + len(\n",
    "                            subject_sub_tokens)] == subject_sub_tokens:\n",
    "                        if forbidden_index is None:\n",
    "                            labels[index][label_subject]+=1\n",
    "                            labels[index + len(subject_sub_tokens)-1][label_subject+1]+=1\n",
    "                            break\n",
    "                        elif index < forbidden_index or index >= forbidden_index + len(\n",
    "                                object_sub_tokens):\n",
    "                            labels[index][label_subject]+=1\n",
    "                            labels[index + len(subject_sub_tokens)-1][label_subject+1]+=1\n",
    "                            break\n",
    "            labels_list[rel_label]=labels\n",
    "            if rel_label not in rel_labels:\n",
    "                rel_labels.append(rel_label)\n",
    "\n",
    "    return tok_to_orig_start_index,tok_to_orig_end_index,orig_to_tok_index,tokens,labels_list,rel_labels\n",
    "def create_cls_example(tokens,rel,token2doc,postag,tfidf_svd,word_vec,token_vec,plan_label,tokenizer):\n",
    "#         word_vec=copy.deepcopy(word_vec)\n",
    "        word_vec=None\n",
    "        word_mask=None\n",
    "        token_vec=copy.deepcopy(token_vec)\n",
    "        if len(tokens)>max_len-2:\n",
    "                tokens=tokens[:(max_len-2)]\n",
    "                plan_label=plan_label[:(max_len-2)]\n",
    "                token2doc=[e[:(max_len-2)] if ind<2 else e for ind,e in enumerate(token2doc)]\n",
    "        \n",
    "        tag=[postag[token2doc[0][idx]] for idx in range(len(tokens))]\n",
    "        tokenvec=None\n",
    "#         tokenvec=[token_vec[token2doc[0][idx]] for idx in range(len(tokens))]\n",
    "        tag=[pos2id_BIO[tokenizer.cls_token]]+tag+[pos2id_BIO[tokenizer.sep_token]]\n",
    "#         tokenvec=[word2id[tokenizer.cls_token]]+tokenvec+[word2id[tokenizer.sep_token]]\n",
    "        full_tokens=[tokenizer.cls_token]+tokens+[tokenizer.sep_token]\n",
    "        padding_O=[0]*len(id2kglabels)\n",
    "        padding_O[0]=1\n",
    "        full_plan_labels=[padding_O]+plan_label+[padding_O]\n",
    "        token_type_ids=[0]*len(full_tokens)\n",
    "        attention_mask=[1]*len(token_type_ids)\n",
    "        cur_len=len(full_tokens)\n",
    "        if cur_len<max_len:\n",
    "            full_tokens+=[tokenizer.pad_token]*(max_len-cur_len)\n",
    "            full_plan_labels+=[padding_O]*(max_len-cur_len)\n",
    "            attention_mask+=[0]*(max_len-cur_len)\n",
    "            token_type_ids+=[0]*(max_len-cur_len)\n",
    "            tag+=[pos2id_BIO[tokenizer.pad_token]]*(max_len-cur_len)\n",
    "#             tokenvec+=[word2id[tokenizer.pad_token]]*(max_len-cur_len)\n",
    "\n",
    "#         if len(word_vec)>max_len//2:\n",
    "#             word_vec=word_vec[:max_len//2]\n",
    "#         word_mask=[1]*len(word_vec)\n",
    "#         if len(word_vec)<max_len//2:\n",
    "#             word_mask+=[0]*(max_len//2-len(word_vec))\n",
    "#             word_vec+=[word2id[tokenizer.pad_token]]*(max_len//2-len(word_vec))\n",
    "\n",
    "        full_ids=tokenizer.convert_tokens_to_ids(full_tokens)\n",
    "        if len(rel)>0:\n",
    "            example={\"input_ids\":torch.tensor(full_ids,dtype=torch.long),\"token_type_ids\":torch.tensor(token_type_ids,dtype=torch.long),\\\n",
    "                    \"attention_mask\":torch.tensor(attention_mask,dtype=torch.long),\n",
    "                    \"rel_label\":(F.one_hot(torch.tensor(rel),len(id2rels)).sum(dim=0)!=0).long(),\n",
    "                     \"postag\":torch.tensor(tag).long(),\"feature\":torch.tensor(tfidf_svd).float(),\\\n",
    "                     \"token_vec\":tokenvec,\"word_vec\":word_vec,\"word_mask\":word_mask,\"plan_label\":full_plan_labels,\"token2doc\":token2doc}\n",
    "        else:\n",
    "            example={\"input_ids\":torch.tensor(full_ids,dtype=torch.long),\"token_type_ids\":torch.tensor(token_type_ids,dtype=torch.long),\\\n",
    "                    \"attention_mask\":torch.tensor(attention_mask,dtype=torch.long),\n",
    "                     \"rel_label\":torch.zeros(len(id2rels)).long(),\n",
    "                     \"postag\":torch.tensor(tag).long(),\"feature\":torch.tensor(tfidf_svd).float(),\\\n",
    "                     \"token_vec\":tokenvec,\"word_vec\":word_vec,\"word_mask\":word_mask,\"plan_label\":full_plan_labels,\"token2doc\":token2doc}  \n",
    "        return example\n",
    "def create_example(tokens,rel,labels,token2doc,tokenizer,rel_text,tfidf_svd,postag,rel_postag,rel_token2doc):\n",
    "        tag=[postag[token2doc[0][idx]] for idx in range(len(tokens))]\n",
    "        tag=[pos2id_BIO[tokenizer.cls_token]]+tag+[pos2id_BIO[tokenizer.sep_token]]\n",
    "        rel_tag=[rel_postag[rel_token2doc[idx]] for idx in range(len(rel_text))]\n",
    "        rel_tag=rel_tag+[pos2id_BIO[tokenizer.sep_token]]\n",
    "        tag=tag+rel_tag\n",
    "        second_token=rel_text\n",
    "        full_tokens=[[tokenizer.cls_token]+tokens+[tokenizer.sep_token],second_token+[tokenizer.sep_token]]\n",
    "        full_labels=[[label2ids[tokenizer.cls_token]]+labels+[label2ids[tokenizer.sep_token]],[label2ids[\"[category]\"]]*len(second_token)+[label2ids[tokenizer.sep_token]]]\n",
    "        token_type_ids=[0]*len(full_tokens[0])+[1]*len(full_tokens[1])\n",
    "        attention_mask=[1]*len(token_type_ids)\n",
    "        full_tokens=full_tokens[0]+full_tokens[1]\n",
    "        full_labels=full_labels[0]+full_labels[1]\n",
    "        cur_len=len(full_labels)\n",
    "        if cur_len<max_len:\n",
    "            full_tokens+=[tokenizer.pad_token]*(max_len-cur_len)\n",
    "            full_labels+=[label2ids[tokenizer.pad_token]]*(max_len-cur_len)\n",
    "            attention_mask+=[0]*(max_len-cur_len)\n",
    "            token_type_ids+=[0]*(max_len-cur_len)\n",
    "            tag+=[pos2id_BIO[tokenizer.pad_token]]*(max_len-cur_len)\n",
    "        full_ids=tokenizer.convert_tokens_to_ids(full_tokens)\n",
    "        example={\"input_ids\":torch.tensor(full_ids,dtype=torch.long),\"token_type_ids\":torch.tensor(token_type_ids,dtype=torch.long),\\\n",
    "                \"attention_mask\":torch.tensor(attention_mask,dtype=torch.long),\"labels\":torch.tensor(full_labels,dtype=torch.long),\n",
    "                \"rel_label\":F.one_hot(torch.tensor(rel),num_classes=len(id2rels)),\"postag\":torch.tensor(tag).long(),\"feature\":torch.tensor(tfidf_svd).float(),\\\n",
    "                 \"token2doc\":token2doc}\n",
    "        return example\n",
    "def create_example_new(tokens,rel,labels,token2doc,tokenizer,rel_text,tfidf_svd,postag,rel_postag,rel_token2doc,plan_labels):\n",
    "        tag=[postag[token2doc[0][idx]] for idx in range(len(tokens))]\n",
    "        tag=[pos2id_BIO[tokenizer.cls_token]]+tag+[pos2id_BIO[tokenizer.sep_token]]\n",
    "        rel_tag=[rel_postag[rel_token2doc[idx]] for idx in range(len(rel_text))]\n",
    "        rel_tag=rel_tag+[pos2id_BIO[tokenizer.sep_token]]\n",
    "        tag=tag+rel_tag\n",
    "        second_token=rel_text\n",
    "        full_tokens=[[tokenizer.cls_token]+tokens+[tokenizer.sep_token],second_token+[tokenizer.sep_token]]\n",
    "        full_labels=[[padding_CLS]+labels+[padding_SEP],[padding_category]*len(second_token)+[padding_SEP]]\n",
    "        full_plan_labels=[[padding_CLS]+plan_labels+[padding_SEP],[padding_category]*len(second_token)+[padding_SEP]]\n",
    "        token_type_ids=[0]*len(full_tokens[0])+[1]*len(full_tokens[1])\n",
    "        attention_mask=[1]*len(token_type_ids)\n",
    "        full_tokens=full_tokens[0]+full_tokens[1]\n",
    "        full_labels=full_labels[0]+full_labels[1]\n",
    "        full_plan_labels=full_plan_labels[0]+full_plan_labels[1]\n",
    "        cur_len=len(full_labels)\n",
    "        if cur_len<max_len:\n",
    "            full_tokens+=[tokenizer.pad_token]*(max_len-cur_len)\n",
    "            full_labels+=[padding_PAD]*(max_len-cur_len)\n",
    "            full_plan_labels+=[padding_PAD]*(max_len-cur_len)\n",
    "            attention_mask+=[0]*(max_len-cur_len)\n",
    "            token_type_ids+=[0]*(max_len-cur_len)\n",
    "            tag+=[pos2id_BIO[tokenizer.pad_token]]*(max_len-cur_len)\n",
    "        full_ids=tokenizer.convert_tokens_to_ids(full_tokens)\n",
    "        example={\"input_ids\":torch.tensor(full_ids,dtype=torch.long),\"token_type_ids\":torch.tensor(token_type_ids,dtype=torch.long),\\\n",
    "                 \"attention_mask\":torch.tensor(attention_mask,dtype=torch.long),\"labels\":torch.tensor(full_labels,dtype=torch.long),\n",
    "                \"rel_label\":F.one_hot(torch.tensor(rel),num_classes=len(id2rels)),\"postag\":torch.tensor(tag).long(),\"feature\":torch.tensor(tfidf_svd).float(),\\\n",
    "                 \"plan_labels\":torch.tensor(full_plan_labels,dtype=torch.long),\"token2doc\":token2doc}\n",
    "        return example\n",
    "def index_token(text_raw):\n",
    "    sub_text = []\n",
    "    buff = \"\"\n",
    "    for char in text_raw:\n",
    "        if is_chinese_char(ord(char)) or is_punctuation(char):\n",
    "            if buff != \"\":\n",
    "                sub_text.append(buff)\n",
    "                buff = \"\"\n",
    "            sub_text.append(char)\n",
    "        else:\n",
    "            buff += char\n",
    "    if buff != \"\":\n",
    "        sub_text.append(buff)\n",
    "    tok_to_orig_start_index = []\n",
    "    tok_to_orig_end_index = []\n",
    "    orig_to_tok_index = []\n",
    "    tokens = []\n",
    "    text_tmp = ''\n",
    "    for (i, token) in enumerate(sub_text):\n",
    "        orig_to_tok_index.append(len(tokens))\n",
    "        sub_tokens = tokenizer.tokenize(token)\n",
    "        text_tmp += token\n",
    "        for sub_token in sub_tokens:\n",
    "            tok_to_orig_start_index.append(len(text_tmp) - len(token))\n",
    "            tok_to_orig_end_index.append(len(text_tmp) - 1)\n",
    "            tokens.append(sub_token)\n",
    "        else:\n",
    "            continue\n",
    "        break\n",
    "    return tok_to_orig_start_index,tok_to_orig_end_index,orig_to_tok_index\n",
    "def _convert_example_to_cls_plan_labels(example,\n",
    "                               tokenizer):\n",
    "    if example.__contains__('plan_spo_list'):\n",
    "        spo_list = example['plan_spo_list']\n",
    "    else:\n",
    "        spo_list = []\n",
    "    text_raw = example['text']\n",
    "    sub_text = []\n",
    "    buff = \"\"\n",
    "    for char in text_raw:\n",
    "        if is_chinese_char(ord(char)) or is_punctuation(char):\n",
    "            if buff != \"\":\n",
    "                sub_text.append(buff)\n",
    "                buff = \"\"\n",
    "            sub_text.append(char)\n",
    "        else:\n",
    "            buff += char\n",
    "    if buff != \"\":\n",
    "        sub_text.append(buff)\n",
    "    tok_to_orig_start_index = []\n",
    "    tok_to_orig_end_index = []\n",
    "    orig_to_tok_index = []\n",
    "    tokens = []\n",
    "    text_tmp = ''\n",
    "    for (i, token) in enumerate(sub_text):\n",
    "        orig_to_tok_index.append(len(tokens))\n",
    "        sub_tokens = tokenizer.tokenize(token)\n",
    "        text_tmp += token\n",
    "        for sub_token in sub_tokens:\n",
    "            tok_to_orig_start_index.append(len(text_tmp) - len(token))\n",
    "            tok_to_orig_end_index.append(len(text_tmp) - 1)\n",
    "            tokens.append(sub_token)\n",
    "        else:\n",
    "            continue\n",
    "        break\n",
    "    #  find all entities and tag them with corresponding \"B\"/\"I\" labels\n",
    "    labels = [[0] * len(id2kglabels)\n",
    "              for i in range(len(tokens))]  # initialize tag\n",
    "    for spo in spo_list:\n",
    "            label_subject = kglabels2id['B-'+spo['predicate']+'-SUB']\n",
    "            label_object = kglabels2id['B-'+spo['predicate']+'-OB']\n",
    "            subject_sub_tokens = tokenizer.tokenize(spo['subject'])\n",
    "            object_sub_tokens = tokenizer.tokenize(spo['object'])\n",
    "            forbidden_index = None\n",
    "            if len(subject_sub_tokens) > len(object_sub_tokens):\n",
    "                for index in range(\n",
    "                        len(tokens) - len(subject_sub_tokens) + 1):\n",
    "                    if tokens[index:index + len(\n",
    "                            subject_sub_tokens)] == subject_sub_tokens:\n",
    "                        labels[index][label_subject]=1\n",
    "                        for i in range(len(subject_sub_tokens) - 1):\n",
    "                            labels[index+i+1][kglabels2id['I']]=1\n",
    "                        forbidden_index = index\n",
    "                        break\n",
    "\n",
    "                for index in range(\n",
    "                        len(tokens) - len(object_sub_tokens) + 1):\n",
    "                    if tokens[index:index + len(\n",
    "                            object_sub_tokens)] == object_sub_tokens:\n",
    "                        if forbidden_index is None:\n",
    "                            labels[index][label_object]=1\n",
    "                            for i in range(len(object_sub_tokens) - 1):\n",
    "                                labels[index + i + 1][kglabels2id['I']]=1\n",
    "                            break\n",
    "                        # check if labeled already\n",
    "                        elif index < forbidden_index or index >= forbidden_index + len(\n",
    "                                subject_sub_tokens):\n",
    "                            labels[index][label_object]=1\n",
    "                            for i in range(len(object_sub_tokens) - 1):\n",
    "                                labels[index + i + 1][kglabels2id['I']]=1\n",
    "                            break\n",
    "\n",
    "            else:\n",
    "                for index in range(\n",
    "                        len(tokens) - len(object_sub_tokens) + 1):\n",
    "                    if tokens[index:index + len(\n",
    "                            object_sub_tokens)] == object_sub_tokens:\n",
    "                        labels[index][label_object]=1\n",
    "                        for i in range(len(object_sub_tokens) - 1):\n",
    "                            labels[index + i + 1][kglabels2id['I']]=1\n",
    "                        forbidden_index = index\n",
    "                        break\n",
    "\n",
    "                for index in range(\n",
    "                        len(tokens) - len(subject_sub_tokens) + 1):\n",
    "                    if tokens[index:index + len(\n",
    "                            subject_sub_tokens)] == subject_sub_tokens:\n",
    "                        if forbidden_index is None:\n",
    "                            labels[index][label_subject]=1\n",
    "                            for i in range(len(subject_sub_tokens) - 1):\n",
    "                                labels[index + i + 1][kglabels2id['I']]=1\n",
    "                            break\n",
    "                        elif index < forbidden_index or index >= forbidden_index + len(\n",
    "                                object_sub_tokens):\n",
    "                            labels[index][label_subject]=1\n",
    "                            for i in range(len(subject_sub_tokens) - 1):\n",
    "                                labels[index + i + 1][kglabels2id['I']]=1\n",
    "                            break\n",
    "    return labels\n",
    "def _convert_example_to_plan_labels(example,\n",
    "                               tokenizer):\n",
    "    if example.__contains__('plan_spo_list'):\n",
    "        spo_list = example['plan_spo_list']\n",
    "    else:\n",
    "        spo_list = []\n",
    "    text_raw = example['text']\n",
    "    sub_text = []\n",
    "    buff = \"\"\n",
    "    for char in text_raw:\n",
    "        if is_chinese_char(ord(char)) or is_punctuation(char):\n",
    "            if buff != \"\":\n",
    "                sub_text.extend(cut_buff(buff))\n",
    "                buff = \"\"\n",
    "            sub_text.append(char)\n",
    "\n",
    "        else:\n",
    "            buff += char\n",
    "    if buff != \"\":\n",
    "        sub_text.extend(cut_buff(buff))\n",
    "    tok_to_orig_start_index = []\n",
    "    tok_to_orig_end_index = []\n",
    "    orig_to_tok_index = []\n",
    "    tokens = []\n",
    "    text_tmp = ''\n",
    "    for (i, token) in enumerate(sub_text):\n",
    "        orig_to_tok_index.append(len(tokens))\n",
    "        sub_tokens = tokenizer.tokenize(token)\n",
    "        text_tmp += token\n",
    "        for sub_token in sub_tokens:\n",
    "            tok_to_orig_start_index.append(len(text_tmp) - len(token))\n",
    "            tok_to_orig_end_index.append(len(text_tmp) - 1)\n",
    "            tokens.append(sub_token)\n",
    "        else:\n",
    "            continue\n",
    "        break\n",
    "    #  find all entities and tag them with corresponding \"B\"/\"I\" labels\n",
    "    labels = [[0] * len(new_id2kglabels)\n",
    "              for i in range(len(tokens))]  # initialize tag\n",
    "    for spo in spo_list:\n",
    "            label_subject = kglabels2id['B-'+spo['predicate']+'-SUB']\n",
    "            label_object = kglabels2id['B-'+spo['predicate']+'-OB']\n",
    "            subject_sub_tokens = special_tokenize(spo['subject'],tokenizer)\n",
    "            object_sub_tokens = special_tokenize(spo['object'],tokenizer)\n",
    "            forbidden_index = None\n",
    "            if len(subject_sub_tokens) > len(object_sub_tokens):\n",
    "                for index in range(\n",
    "                        len(tokens) - len(subject_sub_tokens) + 1):\n",
    "                    if tokens[index:index + len(\n",
    "                            subject_sub_tokens)] == subject_sub_tokens:\n",
    "                        labels[index][label_subject]=1\n",
    "                        for i in range(len(subject_sub_tokens) - 1):\n",
    "                            labels[index+i+1][kglabels2id['I']]=1\n",
    "                        forbidden_index = index\n",
    "                        break\n",
    "\n",
    "                for index in range(\n",
    "                        len(tokens) - len(object_sub_tokens) + 1):\n",
    "                    if tokens[index:index + len(\n",
    "                            object_sub_tokens)] == object_sub_tokens:\n",
    "                        if forbidden_index is None:\n",
    "                            labels[index][label_object]=1\n",
    "                            for i in range(len(object_sub_tokens) - 1):\n",
    "                                labels[index + i + 1][kglabels2id['I']]=1\n",
    "                            break\n",
    "                        # check if labeled already\n",
    "                        elif index < forbidden_index or index >= forbidden_index + len(\n",
    "                                subject_sub_tokens):\n",
    "                            labels[index][label_object]=1\n",
    "                            for i in range(len(object_sub_tokens) - 1):\n",
    "                                labels[index + i + 1][kglabels2id['I']]=1\n",
    "                            break\n",
    "\n",
    "            else:\n",
    "                for index in range(\n",
    "                        len(tokens) - len(object_sub_tokens) + 1):\n",
    "                    if tokens[index:index + len(\n",
    "                            object_sub_tokens)] == object_sub_tokens:\n",
    "                        labels[index][label_object]=1\n",
    "                        for i in range(len(object_sub_tokens) - 1):\n",
    "                            labels[index + i + 1][kglabels2id['I']]=1\n",
    "                        forbidden_index = index\n",
    "                        break\n",
    "\n",
    "                for index in range(\n",
    "                        len(tokens) - len(subject_sub_tokens) + 1):\n",
    "                    if tokens[index:index + len(\n",
    "                            subject_sub_tokens)] == subject_sub_tokens:\n",
    "                        if forbidden_index is None:\n",
    "                            labels[index][label_subject]=1\n",
    "                            for i in range(len(subject_sub_tokens) - 1):\n",
    "                                labels[index + i + 1][kglabels2id['I']]=1\n",
    "                            break\n",
    "                        elif index < forbidden_index or index >= forbidden_index + len(\n",
    "                                object_sub_tokens):\n",
    "                            labels[index][label_subject]=1\n",
    "                            for i in range(len(subject_sub_tokens) - 1):\n",
    "                                labels[index + i + 1][kglabels2id['I']]=1\n",
    "                            break\n",
    "    return labels\n",
    "def _convert_example_to_plan_labels_new(example,\n",
    "                               tokenizer):\n",
    "    if example.__contains__('plan_spo_list'):\n",
    "        spo_list = example['plan_spo_list']\n",
    "    else:\n",
    "        spo_list = []\n",
    "    text_raw = example['text']\n",
    "    sub_text = []\n",
    "    buff = \"\"\n",
    "    for char in text_raw:\n",
    "        if is_chinese_char(ord(char)) or is_punctuation(char):\n",
    "            if buff != \"\":\n",
    "                sub_text.extend(cut_buff(buff))\n",
    "                buff = \"\"\n",
    "            sub_text.append(char)\n",
    "\n",
    "        else:\n",
    "            buff += char\n",
    "    if buff != \"\":\n",
    "        sub_text.extend(cut_buff(buff))\n",
    "    tok_to_orig_start_index = []\n",
    "    tok_to_orig_end_index = []\n",
    "    orig_to_tok_index = []\n",
    "    tokens = []\n",
    "    text_tmp = ''\n",
    "    for (i, token) in enumerate(sub_text):\n",
    "        orig_to_tok_index.append(len(tokens))\n",
    "        sub_tokens = tokenizer.tokenize(token)\n",
    "        text_tmp += token\n",
    "        for sub_token in sub_tokens:\n",
    "            tok_to_orig_start_index.append(len(text_tmp) - len(token))\n",
    "            tok_to_orig_end_index.append(len(text_tmp) - 1)\n",
    "            tokens.append(sub_token)\n",
    "        else:\n",
    "            continue\n",
    "        break\n",
    "    #  find all entities and tag them with corresponding \"B\"/\"I\" labels\n",
    "    labels = [[0] * len(new_id2kglabels)\n",
    "              for i in range(len(tokens))]  # initialize tag\n",
    "    for spo in spo_list:\n",
    "            label_subject = new_kglabels2id['B-'+spo['predicate']+'-SUB']\n",
    "            label_object = new_kglabels2id['B-'+spo['predicate']+'-OB']\n",
    "            subject_sub_tokens = special_tokenize(spo['subject'],tokenizer)\n",
    "            object_sub_tokens = special_tokenize(spo['object'],tokenizer)\n",
    "            forbidden_index = None\n",
    "            if len(subject_sub_tokens) > len(object_sub_tokens):\n",
    "                for index in range(\n",
    "                        len(tokens) - len(subject_sub_tokens) + 1):\n",
    "                    if tokens[index:index + len(\n",
    "                            subject_sub_tokens)] == subject_sub_tokens:\n",
    "                        labels[index][label_subject]+=1\n",
    "                        labels[index+len(subject_sub_tokens)-1][new_kglabels2id['E-'+spo['predicate']+'-SUB']]+=1\n",
    "                        forbidden_index = index\n",
    "                        break\n",
    "\n",
    "                for index in range(\n",
    "                        len(tokens) - len(object_sub_tokens) + 1):\n",
    "                    if tokens[index:index + len(\n",
    "                            object_sub_tokens)] == object_sub_tokens:\n",
    "                        if forbidden_index is None:\n",
    "                            labels[index][label_object]+=1\n",
    "                            labels[index+len(object_sub_tokens)-1][new_kglabels2id['E-'+spo['predicate']+'-OB']]+=1\n",
    "                            break\n",
    "                        # check if labeled already\n",
    "                        elif index < forbidden_index or index >= forbidden_index + len(\n",
    "                                subject_sub_tokens):\n",
    "                            labels[index][label_object]+=1\n",
    "                            labels[index+len(object_sub_tokens)-1][new_kglabels2id['E-'+spo['predicate']+'-OB']]+=1\n",
    "                            break\n",
    "\n",
    "            else:\n",
    "                for index in range(\n",
    "                        len(tokens) - len(object_sub_tokens) + 1):\n",
    "                    if tokens[index:index + len(\n",
    "                            object_sub_tokens)] == object_sub_tokens:\n",
    "                        labels[index][label_object]+=1\n",
    "                        labels[index+len(object_sub_tokens)-1][new_kglabels2id['E-'+spo['predicate']+'-OB']]+=1\n",
    "                        forbidden_index = index\n",
    "                        break\n",
    "\n",
    "                for index in range(\n",
    "                        len(tokens) - len(subject_sub_tokens) + 1):\n",
    "                    if tokens[index:index + len(\n",
    "                            subject_sub_tokens)] == subject_sub_tokens:\n",
    "                        if forbidden_index is None:\n",
    "                            labels[index][label_subject]+=1\n",
    "                            labels[index+len(subject_sub_tokens)-1][new_kglabels2id['E-'+spo['predicate']+'-SUB']]+=1\n",
    "                            break\n",
    "                        elif index < forbidden_index or index >= forbidden_index + len(\n",
    "                                object_sub_tokens):\n",
    "                            labels[index][label_subject]+=1\n",
    "                            labels[index+len(subject_sub_tokens)-1][new_kglabels2id['E-'+spo['predicate']+'-SUB']]+=1\n",
    "                            break\n",
    "    return labels\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "heading_collapsed": true
   },
   "source": [
    "## 加载模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "hidden": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "06/18/2020 21:17:29 - INFO - transformers.tokenization_utils -   Model name './roberta-zh-wwm-pytorch/vocab.txt' not found in model shortcut name list (bert-base-uncased, bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, bert-base-multilingual-cased, bert-base-chinese, bert-base-german-cased, bert-large-uncased-whole-word-masking, bert-large-cased-whole-word-masking, bert-large-uncased-whole-word-masking-finetuned-squad, bert-large-cased-whole-word-masking-finetuned-squad, bert-base-cased-finetuned-mrpc, bert-base-german-dbmdz-cased, bert-base-german-dbmdz-uncased, bert-base-finnish-cased-v1, bert-base-finnish-uncased-v1, bert-base-dutch-cased). Assuming './roberta-zh-wwm-pytorch/vocab.txt' is a path, a model identifier, or url to a directory containing tokenizer files.\n",
      "06/18/2020 21:17:29 - WARNING - transformers.tokenization_utils -   Calling BertTokenizer.from_pretrained() with the path to a single file or url is deprecated\n",
      "06/18/2020 21:17:29 - INFO - transformers.tokenization_utils -   loading file ./roberta-zh-wwm-pytorch/vocab.txt\n"
     ]
    }
   ],
   "source": [
    "do_lower_case=True\n",
    "max_len=256\n",
    "device=torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
    "# bert_dir=\"./bert-pytorch-chinese/\"\n",
    "# vocab=\"bert-base-chinese-vocab.txt\"\n",
    "# config_file=\"bert_config.json\"\n",
    "bert_dir=\"./roberta-zh-wwm-pytorch/\"\n",
    "vocab=\"vocab.txt\"\n",
    "config_file=\"bert_config.json\"\n",
    "tokenizer=BertTokenizer.from_pretrained(os.path.join(bert_dir,vocab),do_lower_case=do_lower_case)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "heading_collapsed": true
   },
   "source": [
    "## 读取数据构建映射表"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "code_folding": [
     1,
     6,
     11,
     16,
     22,
     29,
     36,
     46,
     67
    ],
    "hidden": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loading dict...\n"
     ]
    }
   ],
   "source": [
    "text_data=[]\n",
    "with open(\"./dataset/train_data/new_train_data.json\",\"r\") as r:\n",
    "    raw_data=r.readlines()\n",
    "    for d in raw_data:\n",
    "        text_data.append(json.loads(d))\n",
    "test_text_data=[]\n",
    "with open(\"./dataset/test1_data/new_test1_data.json\",\"r\") as r:\n",
    "    raw_data=r.readlines()\n",
    "    for d in raw_data:\n",
    "        test_text_data.append(json.loads(d))\n",
    "test2_text_data=[]\n",
    "with open(\"./dataset/test2_data/new_test2_data.json\",\"r\") as r:\n",
    "    raw_data=r.readlines()\n",
    "    for d in raw_data:\n",
    "        test2_text_data.append(json.loads(d))\n",
    "schema=[]\n",
    "with open(\"./dataset/schema.json\",\"r\") as r:\n",
    "    raw_schema=r.readlines()\n",
    "    for d in raw_schema:\n",
    "        schema.append(json.loads(d))\n",
    "rels=set()\n",
    "special_rels=set()\n",
    "for e in schema:\n",
    "        if len(e['object_type'].keys())==1:\n",
    "            rels.add(e[\"predicate\"])\n",
    "        else:\n",
    "            special_rels.add(e[\"predicate\"])\n",
    "            for key in e['object_type'].keys():\n",
    "                rels.add(e['predicate']+\"_\"+key)\n",
    "if not os.path.exists(\"./dataset/dict.pk\"):\n",
    "    special_rels=list(special_rels)\n",
    "    id2rels=list(rels)\n",
    "    rels2id=dict([(rel,idx) for idx,rel in enumerate(id2rels)])\n",
    "    id2labels=[\"O\",\"B-OBJ\",\"I-OBJ\",\"B-SUB\",\"I-SUB\",\"[category]\",\"[SEP]\",\"[CLS]\",\"[PAD]\"]\n",
    "    label2ids=dict([ (label,idx) for idx,label in enumerate(id2labels)])\n",
    "    pk.dump([special_rels,id2rels,rels2id,id2labels,label2ids],open(\"./dataset/dict.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading dict...\")\n",
    "    special_rels,id2rels,rels2id,id2labels,label2ids=pk.load(open(\"./dataset/dict.pk\",\"rb\"))\n",
    "id2kglabels=['O','I']+['B-'+e+\"-SUB\" for e in id2rels]+['B-'+e+\"-OB\" for e in id2rels]\n",
    "kglabels2id=dict([ (label,idx) for idx,label in enumerate(id2kglabels)])\n",
    "new_id2labels=[\"O\",\"B-OBJ\",\"E-OBJ\",\"B-SUB\",\"E-SUB\",\"[category]\",\"[SEP]\",\"[CLS]\",\"[PAD]\"]\n",
    "new_label2ids=dict([ (label,idx) for idx,label in enumerate(new_id2labels)])\n",
    "new_id2kglabels=['O']+['B-'+e+\"-SUB\" for e in id2rels]+['B-'+e+\"-OB\" for e in id2rels]+['E-'+e+\"-SUB\" for e in id2rels]+['E-'+e+\"-OB\" for e in id2rels]\n",
    "new_kglabels2id=dict([ (label,idx) for idx,label in enumerate(new_id2kglabels)])\n",
    "id2reltype=[[] for i in range(len(id2rels))]\n",
    "for e in schema:\n",
    "    if len(e['object_type'].keys())==1:\n",
    "        rel=e[\"predicate\"]\n",
    "        ids=rels2id[rel]\n",
    "        id2reltype[ids].append(e)\n",
    "    else:\n",
    "        for key in e['object_type'].keys():\n",
    "            rel=e['predicate']+\"_\"+key\n",
    "            ids=rels2id[rel]\n",
    "            temp_e=copy.deepcopy(e)\n",
    "            poped_keys=[]\n",
    "            for k in temp_e['object_type'].keys():\n",
    "                if k!=key:\n",
    "                    poped_keys.append(k)\n",
    "            for k in poped_keys:\n",
    "                 temp_e['object_type'].pop(k)\n",
    "            id2reltype[ids].append(temp_e)\n",
    "id2schema=[e[0] for e in id2reltype]\n",
    "id2rel_text=[[] for i in range(len(id2rels))]\n",
    "id2rel_rawtext=[[] for i in range(len(id2rels))]\n",
    "id2rel_token2text=[[] for i in range(len(id2rels))]\n",
    "for rel in range(len(id2rels)):\n",
    "    if id2rels[rel].split(\"_\")[0] not in special_rels:\n",
    "        cls_text=id2schema[rel]['subject_type']+\",\"+id2schema[rel]['predicate']+\",\"+id2schema[rel]['object_type']['@value']\n",
    "    else:\n",
    "        cls_text=id2schema[rel]['subject_type']+\",\"+id2schema[rel]['predicate']+\",\"+id2schema[rel]['object_type'][id2rels[rel].split(\"_\")[1]]\n",
    "    id2rel_text[rel]=tokenizer.tokenize(cls_text)\n",
    "    id2rel_rawtext[rel]=cls_text\n",
    "    id2rel_token2text[rel]=index_token(cls_text)[0]\n",
    "    assert len(id2rel_token2text[rel])==len(id2rel_text[rel])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "code_folding": [
     0,
     32
    ],
    "hidden": true,
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loading...\n"
     ]
    }
   ],
   "source": [
    "#这一部分是生成所有关系及描述词对应的词性，所以需要在run过一遍下面的1.4建立词性tag映射表之后在run这一步。\n",
    "if not os.path.exists(\"./middle_data/rel_data_postag.pk\"):\n",
    "    jieba.enable_paddle() \n",
    "    jieba.enable_parallel(8)\n",
    "    rel_cut_words=[]\n",
    "    rel_cut_tags=[]\n",
    "    for idx in tqdm(range(len(id2rel_rawtext))):\n",
    "        words = pseg.lcut(id2rel_rawtext[idx],use_paddle=False) #jieba默认模式\n",
    "        new_words=[w for w,t in words]\n",
    "        new_tags=[t for w,t in words]\n",
    "        rel_cut_words.append([idx,new_words])\n",
    "        rel_cut_tags.append([idx,new_tags])\n",
    "    rel_cut_words=[e[1] for e in sorted(rel_cut_words,key=lambda x:x[0])]\n",
    "    rel_cut_tags=[e[1] for e in sorted(rel_cut_tags,key=lambda x:x[0])]\n",
    "    rel_data_postag=[]\n",
    "    for idx in tqdm(range(len(id2rel_rawtext))):\n",
    "        assert len(id2rel_rawtext[idx].strip())==len(\"\".join(rel_cut_words[idx]))\n",
    "        indexs=[]\n",
    "        cur_length=0\n",
    "        for e in rel_cut_words[idx]:\n",
    "            indexs.append([cur_length,cur_length+len(e)-1])\n",
    "            cur_length+=len(e)\n",
    "        pos_label=np.zeros(len(id2rel_rawtext[idx])).astype(np.int8)\n",
    "        for i,(b,e) in enumerate(indexs):\n",
    "            assert (id2rel_rawtext[idx][b]==rel_cut_words[idx][i][0] or _is_whitespace(id2rel_rawtext[idx].strip()[b])\\\n",
    "                   or _is_whitespace(rel_cut_words[idx][i][0])) \\\n",
    "                    and (id2rel_rawtext[idx].strip()[e]==rel_cut_words[idx][i][-1] \\\n",
    "                         or _is_whitespace(id2rel_rawtext[idx].strip()[e])  or _is_whitespace(rel_cut_words[idx][i][-1]))\n",
    "            pos_label[b+1:e+1]=pos2id_BIO['I-'+rel_cut_tags[idx][i]]\n",
    "            pos_label[b]=pos2id_BIO['B-'+rel_cut_tags[idx][i]]\n",
    "        rel_data_postag.append(pos_label)\n",
    "    pk.dump(rel_data_postag,open(\"./middle_data/rel_data_postag.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading...\")\n",
    "    rel_data_postag=pk.load(open(\"./middle_data/rel_data_postag.pk\",\"rb\"))\n",
    "special_major_idx=[2,4,22,32,54]\n",
    "special_affilate_idx=[[] for i in range(len(id2rels))]\n",
    "special_affilate_idx[2]=[5]\n",
    "special_affilate_idx[4]=[0]\n",
    "special_affilate_idx[22]=[51]\n",
    "special_affilate_idx[32]=[8]\n",
    "special_affilate_idx[54]=[6,11,14]\n",
    "special_pass_idx=[0,5,6,8,11,14,51]\n",
    "padding_O=[0]*len(new_id2labels)\n",
    "padding_O[0]=1\n",
    "padding_category=[0]*len(new_id2labels)\n",
    "padding_category[5]=1\n",
    "padding_SEP=[0]*len(new_id2labels)\n",
    "padding_SEP[6]=1\n",
    "padding_CLS=[0]*len(new_id2labels)\n",
    "padding_CLS[7]=1\n",
    "padding_PAD=[0]*len(new_id2labels)\n",
    "padding_PAD[8]=1 #for pointer ner"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "heading_collapsed": true
   },
   "source": [
    "## 建立词性tag"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "code_folding": [
     0,
     17,
     20
    ],
    "hidden": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loading...\n",
      "loading...\n"
     ]
    }
   ],
   "source": [
    "if not os.path.exists(\"./middle_data/train_postag.pk\"):\n",
    "    postag_set=set()\n",
    "    jieba.enable_parallel(8)\n",
    "    train_cut_words=[]\n",
    "    train_cut_tags=[]\n",
    "    for idx in tqdm(range(len(text_data))):\n",
    "        words = pseg.lcut(text_data[idx]['text'],use_paddle=False) #jieba默认模式\n",
    "        new_words=[w for w,t in words]\n",
    "        new_tags=[t for w,t in words]\n",
    "        postag_set.update(new_tags)\n",
    "        train_cut_words.append([idx,new_words])\n",
    "        train_cut_tags.append([idx,new_tags])\n",
    "    train_cut_words=[e[1] for e in sorted(train_cut_words,key=lambda x:x[0])]\n",
    "    train_cut_tags=[e[1] for e in sorted(train_cut_tags,key=lambda x:x[0])]\n",
    "    id2pos=list(postag_set)\n",
    "    pos2id=dict([(pos,idx) for idx,pos in enumerate(id2pos)])\n",
    "    pk.dump([id2pos,pos2id,train_cut_words,train_cut_tags],open(\"./middle_data/train_postag.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading...\")\n",
    "    id2pos,pos2id,train_cut_words,train_cut_tags=pk.load(open(\"./middle_data/train_postag.pk\",\"rb\"))\n",
    "if not os.path.exists(\"./middle_data/train_postag_BIO.pk\"):\n",
    "    id2pos_BIO=['B-'+e for e in id2pos]+['I-'+e for e in id2pos]\n",
    "    id2pos_BIO.extend(['[CLS]','[SEP]','[PAD]'])\n",
    "    pos2id_BIO=dict([(pos,idx) for idx,pos in enumerate(id2pos_BIO)])\n",
    "\n",
    "    text_data_postag=[]\n",
    "    for idx in tqdm(range(len(text_data))):\n",
    "        assert len(text_data[idx]['text'])==len(\"\".join(train_cut_words[idx]))\n",
    "        indexs=[]\n",
    "        cur_length=0\n",
    "        for e in train_cut_words[idx]:\n",
    "            indexs.append([cur_length,cur_length+len(e)-1])\n",
    "            cur_length+=len(e)\n",
    "        pos_label=np.zeros(len(text_data[idx]['text'])).astype(np.int8)\n",
    "        for i,(b,e) in enumerate(indexs):\n",
    "            assert (text_data[idx]['text'][b]==train_cut_words[idx][i][0] or _is_whitespace(text_data[idx]['text'][b])\\\n",
    "                   or _is_whitespace(train_cut_words[idx][i][0])) \\\n",
    "                    and (text_data[idx]['text'][e]==train_cut_words[idx][i][-1] \\\n",
    "                         or _is_whitespace(text_data[idx]['text'][e])  or _is_whitespace(train_cut_words[idx][i][-1]))\n",
    "            pos_label[b+1:e+1]=pos2id_BIO['I-'+train_cut_tags[idx][i]]\n",
    "            pos_label[b]=pos2id_BIO['B-'+train_cut_tags[idx][i]]\n",
    "        text_data_postag.append(pos_label)\n",
    "    pk.dump([id2pos_BIO,pos2id_BIO,text_data_postag],open(\"./middle_data/train_postag_BIO.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading...\")\n",
    "    id2pos_BIO,pos2id_BIO,text_data_postag=pk.load(open(\"./middle_data/train_postag_BIO.pk\",\"rb\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {
    "code_folding": [
     0,
     14,
     17,
     37
    ],
    "hidden": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loading...\n",
      "loading...\n"
     ]
    }
   ],
   "source": [
    "if not os.path.exists(\"./middle_data/test_postag.pk\"):\n",
    "    jieba.enable_paddle() \n",
    "    jieba.enable_parallel(8)\n",
    "    test_cut_words=[]\n",
    "    test_cut_tags=[]\n",
    "    for idx in tqdm(range(len(test_text_data))):\n",
    "        words = pseg.lcut(test_text_data[idx]['text'],use_paddle=False) #jieba默认模式\n",
    "        new_words=[w for w,t in words]\n",
    "        new_tags=[t for w,t in words]\n",
    "        test_cut_words.append([idx,new_words])\n",
    "        test_cut_tags.append([idx,new_tags])\n",
    "    test_cut_words=[e[1] for e in sorted(test_cut_words,key=lambda x:x[0])]\n",
    "    test_cut_tags=[e[1] for e in sorted(test_cut_tags,key=lambda x:x[0])]\n",
    "    pk.dump([test_cut_words,test_cut_tags],open(\"./middle_data/test_postag.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading...\")\n",
    "    test_cut_words,test_cut_tags=pk.load(open(\"./middle_data/test_postag.pk\",\"rb\"))\n",
    "if not os.path.exists(\"./middle_data/test_postag_BIO.pk\"):\n",
    "\n",
    "    test_text_data_postag=[]\n",
    "    for idx in tqdm(range(len(test_text_data))):\n",
    "        assert len(test_text_data[idx]['text'])==len(\"\".join(test_cut_words[idx]))\n",
    "        indexs=[]\n",
    "        cur_length=0\n",
    "        for e in test_cut_words[idx]:\n",
    "            indexs.append([cur_length,cur_length+len(e)-1])\n",
    "            cur_length+=len(e)\n",
    "        pos_label=np.zeros(len(test_text_data[idx]['text'])).astype(np.int8)\n",
    "        for i,(b,e) in enumerate(indexs):\n",
    "            assert (test_text_data[idx]['text'][b]==test_cut_words[idx][i][0] or _is_whitespace(test_text_data[idx]['text'].strip()[b])\\\n",
    "                   or _is_whitespace(test_cut_words[idx][i][0])) \\\n",
    "                    and (test_text_data[idx]['text'][e]==test_cut_words[idx][i][-1] \\\n",
    "                         or _is_whitespace(test_text_data[idx]['text'][e])  or _is_whitespace(test_cut_words[idx][i][-1]))\n",
    "            pos_label[b+1:e+1]=pos2id_BIO['I-'+test_cut_tags[idx][i]]\n",
    "            pos_label[b]=pos2id_BIO['B-'+test_cut_tags[idx][i]]\n",
    "        test_text_data_postag.append(pos_label)\n",
    "    pk.dump(test_text_data_postag,open(\"./middle_data/test_postag_BIO.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading...\")\n",
    "    test_text_data_postag=pk.load(open(\"./middle_data/test_postag_BIO.pk\",\"rb\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {
    "code_folding": [
     0,
     14,
     17
    ],
    "hidden": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loading...\n",
      "loading...\n"
     ]
    }
   ],
   "source": [
    "if not os.path.exists(\"./middle_data/test2_postag.pk\"):\n",
    "    jieba.enable_paddle() \n",
    "    jieba.enable_parallel(8)\n",
    "    test2_cut_words=[]\n",
    "    test2_cut_tags=[]\n",
    "    for idx in tqdm(range(len(test2_text_data))):\n",
    "        words = pseg.lcut(test2_text_data[idx]['text'],use_paddle=False) #jieba默认模式\n",
    "        new_words=[w for w,t in words]\n",
    "        new_tags=[t for w,t in words]\n",
    "        test2_cut_words.append([idx,new_words])\n",
    "        test2_cut_tags.append([idx,new_tags])\n",
    "    test2_cut_words=[e[1] for e in sorted(test2_cut_words,key=lambda x:x[0])]\n",
    "    test2_cut_tags=[e[1] for e in sorted(test2_cut_tags,key=lambda x:x[0])]\n",
    "    pk.dump([test2_cut_words,test2_cut_tags],open(\"./middle_data/test2_postag.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading...\")\n",
    "    test2_cut_words,test2_cut_tags=pk.load(open(\"./middle_data/test2_postag.pk\",\"rb\"))\n",
    "if not os.path.exists(\"./middle_data/test2_postag_BIO.pk\"):\n",
    "\n",
    "    test2_text_data_postag=[]\n",
    "    for idx in tqdm(range(len(test2_text_data))):\n",
    "        assert len(test2_text_data[idx]['text'])==len(\"\".join(test2_cut_words[idx]))\n",
    "        indexs=[]\n",
    "        cur_length=0\n",
    "        for e in test2_cut_words[idx]:\n",
    "            indexs.append([cur_length,cur_length+len(e)-1])\n",
    "            cur_length+=len(e)\n",
    "        pos_label=np.zeros(len(test2_text_data[idx]['text'])).astype(np.int8)\n",
    "        for i,(b,e) in enumerate(indexs):\n",
    "            assert (test2_text_data[idx]['text'][b]==test2_cut_words[idx][i][0] or _is_whitespace(test2_text_data[idx]['text'].strip()[b])\\\n",
    "                   or _is_whitespace(test2_cut_words[idx][i][0])) \\\n",
    "                    and (test2_text_data[idx]['text'][e]==test2_cut_words[idx][i][-1] \\\n",
    "                         or _is_whitespace(test2_text_data[idx]['text'][e])  or _is_whitespace(test2_cut_words[idx][i][-1]))\n",
    "            pos_label[b+1:e+1]=pos2id_BIO['I-'+test2_cut_tags[idx][i]]\n",
    "            pos_label[b]=pos2id_BIO['B-'+test2_cut_tags[idx][i]]\n",
    "        test2_text_data_postag.append(pos_label)\n",
    "    pk.dump(test2_text_data_postag,open(\"./middle_data/test2_postag_BIO.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading...\")\n",
    "    test2_text_data_postag=pk.load(open(\"./middle_data/test2_postag_BIO.pk\",\"rb\"))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "heading_collapsed": true
   },
   "source": [
    "## 读取词向量"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "code_folding": [
     0
    ],
    "hidden": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "05/20/2020 17:06:34 - INFO - gensim.utils -   loading Word2VecKeyedVectors object from ./Tencent_ChineseEmbedding/ChineseEmbedding.bin\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loading...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "05/20/2020 17:07:30 - INFO - gensim.utils -   loading vectors from ./Tencent_ChineseEmbedding/ChineseEmbedding.bin.vectors.npy with mmap=r\n",
      "05/20/2020 17:07:30 - INFO - gensim.utils -   setting ignored attribute vectors_norm to None\n",
      "05/20/2020 17:07:30 - INFO - gensim.utils -   loaded ./Tencent_ChineseEmbedding/ChineseEmbedding.bin\n"
     ]
    }
   ],
   "source": [
    "if not os.path.exists('./Tencent_ChineseEmbedding/ChineseEmbedding.bin'):\n",
    "    file = './Tencent_ChineseEmbedding/Tencent_AILab_ChineseEmbedding.txt'\n",
    "    wv_from_text = KeyedVectors.load_word2vec_format(file, binary=False)\n",
    "    wv_from_text.init_sims(replace=True)\n",
    "    # 重新保存加载变量为二进制形式\n",
    "    wv_from_text.save('./Tencent_ChineseEmbedding/ChineseEmbedding.bin')\n",
    "else:\n",
    "    print(\"loading...\")\n",
    "    wv_from_text = gensim.models.KeyedVectors.load('./Tencent_ChineseEmbedding/ChineseEmbedding.bin', mmap='r')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "code_folding": [
     9
    ],
    "hidden": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loading extra tokens vector file...\n",
      "loading...\n"
     ]
    }
   ],
   "source": [
    "if not os.path.exists(\"./Tencent_ChineseEmbedding/extra_embedding.npy\"):\n",
    "    word2vec_dims=wv_from_text.vectors.shape[-1]\n",
    "    pad_vec=np.zeros([1,word2vec_dims],dtype=np.float32)\n",
    "    other_token_vec=np.random.normal(scale=0.02,size=[3,word2vec_dims])\n",
    "    extra_token_vec=np.concatenate([pad_vec,other_token_vec])\n",
    "    np.save(\"./Tencent_ChineseEmbedding/extra_embedding.npy\",extra_token_vec)\n",
    "else:\n",
    "    print(\"loading extra tokens vector file...\")\n",
    "    extra_token_vec=np.load(\"./Tencent_ChineseEmbedding/extra_embedding.npy\")\n",
    "if not os.path.exists(\"./Tencent_ChineseEmbedding/word_id.pk\"):\n",
    "    word2id={}\n",
    "    id2word=[[] for i in range(word2vec.shape[0])]\n",
    "    for i,key in tqdm(enumerate(keys)):\n",
    "        assert (word2vec[4+i]==wv_from_text[key]).all()\n",
    "        word2id[key]=4+i\n",
    "        id2word[4+i]=key\n",
    "    id2word[0]=tokenizer.pad_token\n",
    "    word2id[tokenizer.pad_token]=0\n",
    "    id2word[1]=tokenizer.unk_token\n",
    "    word2id[tokenizer.unk_token]=1\n",
    "    id2word[2]=tokenizer.cls_token\n",
    "    word2id[tokenizer.cls_token]=2\n",
    "    id2word[3]=tokenizer.sep_token\n",
    "    word2id[tokenizer.sep_token]=3\n",
    "    pk.dump([word2id,id2word],open(\"./Tencent_ChineseEmbedding/word_id.pk\",\"wb\"),protocol = 4)\n",
    "else:\n",
    "    print(\"loading...\")\n",
    "    word2id,id2word=pk.load(open(\"./Tencent_ChineseEmbedding/word_id.pk\",\"rb\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "word2vec=np.concatenate([extra_token_vec,wv_from_text.vectors])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "code_folding": [
     0,
     18
    ],
    "hidden": true
   },
   "outputs": [],
   "source": [
    "if not os.path.exists(\"./middle_data/train_wordvec.pk\"):\n",
    "    jieba.enable_parallel(8)\n",
    "    train_cut_words=[]\n",
    "    train_cut_vecs=[]\n",
    "    for idx in tqdm(range(len(text_data))):\n",
    "        word_list=jieba.lcut(text_data[idx]['text'],HMM=False,use_paddle=False)\n",
    "        wvid_list=[ word2id.get(e,1)  for e in word_list ]\n",
    "        train_cut_words.append([idx,word_list])\n",
    "        train_cut_vecs.append([idx,wvid_list])\n",
    "    train_cut_words=[e[1] for e in sorted(train_cut_words,key=lambda x:x[0])]\n",
    "    train_cut_vecs=[e[1] for e in sorted(train_cut_vecs,key=lambda x:x[0])]\n",
    "#     train_cut_wvs=[]\n",
    "#     for e in tqdm(train_cut_vecs):\n",
    "#         train_cut_wvs.append(np.array([word2vec[t] for t in e]))\n",
    "    pk.dump([train_cut_words,train_cut_vecs],open(\"./middle_data/train_wordvec.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading...\")\n",
    "    train_cut_words,train_cut_vecs=pk.load(open(\"./middle_data/train_wordvec.pk\",\"rb\"))\n",
    "if not os.path.exists(\"./middle_data/train_tokenvec.pk\"):\n",
    "    text_data_tokenvec=[]\n",
    "    for idx in tqdm(range(len(text_data))):\n",
    "        assert len(text_data[idx]['text'])==len(\"\".join(train_cut_words[idx]))\n",
    "        indexs=[]\n",
    "        cur_length=0\n",
    "        for e in train_cut_words[idx]:\n",
    "            indexs.append([cur_length,cur_length+len(e)-1])\n",
    "            cur_length+=len(e)\n",
    "        vec_label=np.zeros((len(text_data[idx]['text']))).astype(np.int)\n",
    "        for i,(b,e) in enumerate(indexs):\n",
    "            assert (text_data[idx]['text'][b]==train_cut_words[idx][i][0] or _is_whitespace(text_data[idx]['text'][b])\\\n",
    "                   or _is_whitespace(train_cut_words[idx][i][0])) \\\n",
    "                    and (text_data[idx]['text'][e]==train_cut_words[idx][i][-1] \\\n",
    "                         or _is_whitespace(text_data[idx]['text'][e])  or _is_whitespace(train_cut_words[idx][i][-1]))\n",
    "            vec_label[b:e+1]=train_cut_vecs[idx][i]\n",
    "        text_data_tokenvec.append(vec_label)\n",
    "    pk.dump(text_data_tokenvec,open(\"./middle_data/train_tokenvec.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading...\")\n",
    "    text_data_tokenvec=pk.load(open(\"./middle_data/train_tokenvec.pk\",\"rb\"))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "code_folding": [
     0,
     15
    ],
    "hidden": true
   },
   "outputs": [],
   "source": [
    "if not os.path.exists(\"./middle_data/test_wordvec.pk\"):\n",
    "    jieba.enable_parallel(8)\n",
    "    test_cut_words=[]\n",
    "    test_cut_vecs=[]\n",
    "    for idx in tqdm(range(len(test_text_data))):\n",
    "        word_list=jieba.lcut(test_text_data[idx]['text'],HMM=False,use_paddle=False)\n",
    "        wvid_list=[ word2id.get(e,1)  for e in word_list ]\n",
    "        test_cut_words.append([idx,word_list])\n",
    "        test_cut_vecs.append([idx,wvid_list])\n",
    "    test_cut_words=[e[1] for e in sorted(test_cut_words,key=lambda x:x[0])]\n",
    "    test_cut_vecs=[e[1] for e in sorted(test_cut_vecs,key=lambda x:x[0])]\n",
    "    pk.dump([test_cut_words,test_cut_vecs],open(\"./middle_data/test_wordvec.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading...\")\n",
    "    test_cut_words,test_cut_vecs=pk.load(open(\"./middle_data/test_wordvec.pk\",\"rb\"))\n",
    "if not os.path.exists(\"./middle_data/test_tokenvec.pk\"):\n",
    "    test_text_data_tokenvec=[]\n",
    "    for idx in tqdm(range(len(test_text_data))):\n",
    "        assert len(test_text_data[idx]['text'])==len(\"\".join(test_cut_words[idx]))\n",
    "        indexs=[]\n",
    "        cur_length=0\n",
    "        for e in test_cut_words[idx]:\n",
    "            indexs.append([cur_length,cur_length+len(e)-1])\n",
    "            cur_length+=len(e)\n",
    "        vec_label=np.zeros((len(test_text_data[idx]['text']),)).astype(np.int)\n",
    "        for i,(b,e) in enumerate(indexs):\n",
    "            assert (test_text_data[idx]['text'][b]==test_cut_words[idx][i][0] or _is_whitespace(test_text_data[idx]['text'][b])\\\n",
    "                   or _is_whitespace(test_cut_words[idx][i][0])) \\\n",
    "                    and (test_text_data[idx]['text'][e]==test_cut_words[idx][i][-1] \\\n",
    "                         or _is_whitespace(test_text_data[idx]['text'][e])  or _is_whitespace(test_cut_words[idx][i][-1]))\n",
    "            vec_label[b:e+1]=test_cut_vecs[idx][i]\n",
    "        test_text_data_tokenvec.append(vec_label)\n",
    "    pk.dump(test_text_data_tokenvec,open(\"./middle_data/test_tokenvec.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading...\")\n",
    "    test_text_data_tokenvec=pk.load(open(\"./middle_data/test_tokenvec.pk\",\"rb\"))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "code_folding": [
     0,
     12,
     15
    ],
    "hidden": true
   },
   "outputs": [],
   "source": [
    "if not os.path.exists(\"./middle_data/test2_wordvec.pk\"):\n",
    "    jieba.enable_parallel(8)\n",
    "    test2_cut_words=[]\n",
    "    test2_cut_vecs=[]\n",
    "    for idx in tqdm(range(len(test2_text_data))):\n",
    "        word_list=jieba.lcut(test2_text_data[idx]['text'],HMM=False,use_paddle=False)\n",
    "        wvid_list=[ word2id.get(e,1)  for e in word_list ]\n",
    "        test2_cut_words.append([idx,word_list])\n",
    "        test2_cut_vecs.append([idx,wvid_list])\n",
    "    test2_cut_words=[e[1] for e in sorted(test2_cut_words,key=lambda x:x[0])]\n",
    "    test2_cut_vecs=[e[1] for e in sorted(test2_cut_vecs,key=lambda x:x[0])]\n",
    "    pk.dump([test2_cut_words,test2_cut_vecs],open(\"./middle_data/test2_wordvec.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading...\")\n",
    "    test2_cut_words,test2_cut_vecs=pk.load(open(\"./middle_data/test2_wordvec.pk\",\"rb\"))\n",
    "if not os.path.exists(\"./middle_data/test2_tokenvec.pk\"):\n",
    "    test2_text_data_tokenvec=[]\n",
    "    for idx in tqdm(range(len(test2_text_data))):\n",
    "        assert len(test2_text_data[idx]['text'])==len(\"\".join(test2_cut_words[idx]))\n",
    "        indexs=[]\n",
    "        cur_length=0\n",
    "        for e in test2_cut_words[idx]:\n",
    "            indexs.append([cur_length,cur_length+len(e)-1])\n",
    "            cur_length+=len(e)\n",
    "        vec_label=np.zeros((len(test2_text_data[idx]['text']),)).astype(np.int)\n",
    "        for i,(b,e) in enumerate(indexs):\n",
    "            assert (test2_text_data[idx]['text'][b]==test2_cut_words[idx][i][0] or _is_whitespace(test2_text_data[idx]['text'][b])\\\n",
    "                   or _is_whitespace(test2_cut_words[idx][i][0])) \\\n",
    "                    and (test2_text_data[idx]['text'][e]==test2_cut_words[idx][i][-1] \\\n",
    "                         or _is_whitespace(test2_text_data[idx]['text'][e])  or _is_whitespace(test2_cut_words[idx][i][-1]))\n",
    "            vec_label[b:e+1]=test2_cut_vecs[idx][i]\n",
    "        test2_text_data_tokenvec.append(vec_label)\n",
    "    pk.dump(test2_text_data_tokenvec,open(\"./middle_data/test2_tokenvec.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading...\")\n",
    "    test2_text_data_tokenvec=pk.load(open(\"./middle_data/test2_tokenvec.pk\",\"rb\"))\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "heading_collapsed": true
   },
   "source": [
    "## TFIDF-SVD"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "code_folding": [
     0,
     5
    ],
    "hidden": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loading...\n"
     ]
    }
   ],
   "source": [
    "if not os.path.exists(\"./middle_data/texts_tfidf_svd.pk\"):\n",
    "    texts_tfidf_svd,tfidf_vectorizer,svd_vectorizer=build_tfidf_svd_matrix(text_data,16)\n",
    "    joblib.dump(tfidf_vectorizer,\"./middle_data/tfidf_vectorizer.sk.model\")\n",
    "    joblib.dump(svd_vectorizer,\"./middle_data/svd_vectorizer.sk.model\")\n",
    "    pk.dump(texts_tfidf_svd,open(\"./middle_data/texts_tfidf_svd.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading...\")\n",
    "    texts_tfidf_svd=pk.load(open(\"./middle_data/texts_tfidf_svd.pk\",\"rb\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "code_folding": [
     0,
     6
    ],
    "hidden": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loading...\n"
     ]
    }
   ],
   "source": [
    "if not os.path.exists(\"./middle_data/test_texts_tfidf_svd.pk\"):\n",
    "    tfidf_vectorizer=joblib.load(\"./middle_data/tfidf_vectorizer.sk.model\")\n",
    "    svd_vectorizer=joblib.load(\"./middle_data/svd_vectorizer.sk.model\")\n",
    "    test_texts_tfidf_svd,test_tfidf_vectorizer,test_svd_vectorizer=build_tfidf_svd_matrix(test_text_data,16,tfidf_vectorizer,svd_vectorizer)\n",
    "\n",
    "    pk.dump(test_texts_tfidf_svd,open(\"./middle_data/test_texts_tfidf_svd.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading...\")\n",
    "    test_texts_tfidf_svd=pk.load(open(\"./middle_data/test_texts_tfidf_svd.pk\",\"rb\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "code_folding": [
     0,
     6
    ],
    "hidden": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loading...\n"
     ]
    }
   ],
   "source": [
    "if not os.path.exists(\"./middle_data/test2_texts_tfidf_svd.pk\"):\n",
    "    tfidf_vectorizer=joblib.load(\"./middle_data/tfidf_vectorizer.sk.model\")\n",
    "    svd_vectorizer=joblib.load(\"./middle_data/svd_vectorizer.sk.model\")\n",
    "    test2_texts_tfidf_svd,test2_tfidf_vectorizer,test2_svd_vectorizer=build_tfidf_svd_matrix(test2_text_data,16,tfidf_vectorizer,svd_vectorizer)\n",
    "\n",
    "    pk.dump(test2_texts_tfidf_svd,open(\"./middle_data/test2_texts_tfidf_svd.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading...\")\n",
    "    test2_texts_tfidf_svd=pk.load(open(\"./middle_data/test2_texts_tfidf_svd.pk\",\"rb\"))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "heading_collapsed": true
   },
   "source": [
    "## 关系分类数据处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {
    "code_folding": [],
    "hidden": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loading...\n"
     ]
    }
   ],
   "source": [
    "if not os.path.exists(\"./middle_data/train_plan_labels.pk\"):\n",
    "    train_plan_labels=[]\n",
    "    for e in tqdm(text_data):\n",
    "        train_plan_labels.append(_convert_example_to_cls_plan_labels(e,tokenizer))\n",
    "    for idx in tqdm(range(len(train_plan_labels))):\n",
    "        for jdx in range(len(train_plan_labels[idx])):\n",
    "            if sum(train_plan_labels[idx][jdx])==0:\n",
    "                train_plan_labels[idx][jdx][0]=1\n",
    "    pk.dump(train_plan_labels,open(\"./middle_data/train_plan_labels.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading...\")\n",
    "    train_plan_labels=pk.load(open(\"./middle_data/train_plan_labels.pk\",\"rb\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "code_folding": [
     0
    ],
    "hidden": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loading...\n"
     ]
    }
   ],
   "source": [
    "if not os.path.exists(\"./middle_data/doc_cls_info.pk\"):\n",
    "    cls_doc_tokens=[]\n",
    "    cls_doc_rels=[]\n",
    "    cls_token2doc=[]\n",
    "    for e in tqdm(text_data):\n",
    "        tok_to_orig_start_index,tok_to_orig_end_index,orig_to_tok_index,tokens,rel_labels=_convert_example_to_cls_record(e,tokenizer)\n",
    "        cls_doc_tokens.append(tokens)\n",
    "        cls_doc_rels.append(rel_labels)\n",
    "        cls_token2doc.append([tok_to_orig_start_index,tok_to_orig_end_index,orig_to_tok_index])\n",
    "    pk.dump([cls_doc_rels,cls_doc_tokens,cls_token2doc],open(\"./middle_data/doc_cls_info.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading...\")\n",
    "    cls_doc_rels,cls_doc_tokens,cls_token2doc=pk.load(open(\"./middle_data/doc_cls_info.pk\",\"rb\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "code_folding": [],
    "hidden": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loading example..\n"
     ]
    }
   ],
   "source": [
    "if not os.path.exists(\"./middle_data/cls_examples.pk\"):\n",
    "    cls_examples,train_wordvecs,train_tokenvecs=[],[],[]\n",
    "    #train_cut_vecs[idx],text_data_tokenvec[idx]\n",
    "    for idx,(tokens,rel,token2doc,postag) in tqdm(enumerate(zip(cls_doc_tokens,cls_doc_rels,cls_token2doc,text_data_postag))):\n",
    "                example=create_cls_example(tokens,rel,token2doc,postag,texts_tfidf_svd[idx],\\\n",
    "                                                       None,None,train_plan_labels[idx],tokenizer)\n",
    "                cls_examples.append(example)\n",
    "    pk.dump(cls_examples,open(\"./middle_data/cls_examples.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading example..\")\n",
    "    cls_examples=pk.load(open(\"./middle_data/cls_examples.pk\",\"rb\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 79,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "cls_examples=np.array(cls_examples)\n",
    "spliter=KFold(n_splits=3,shuffle=True,random_state=20)\n",
    "train_indexs=[]\n",
    "val_indexs=[]\n",
    "for idx,(train_index,val_index) in enumerate(spliter.split(cls_examples)):\n",
    "    train_indexs.append(train_index)\n",
    "    val_indexs.append(val_index)\n",
    "cls_logits=[]\n",
    "for idx in range(3):\n",
    "    cls_logits.append(np.load(\"./cross_results/cls_\"+str(idx)+\".npy\"))\n",
    "pred_cls_logits=np.zeros((len(cls_examples),55))\n",
    "for idx in range(3):\n",
    "    pred_cls_logits[val_indexs[idx]]=cls_logits[idx]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 49,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "cls_train_dataset=RelDataset(cls_examples[:10000])\n",
    "cls_val_dataset=RelDataset(cls_examples[10000:20000])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "code_folding": [
     0
    ],
    "hidden": true
   },
   "outputs": [],
   "source": [
    "if not os.path.exists(\"./middle_data/test_plan_labels.pk\"):\n",
    "    test_plan_labels=[]\n",
    "    for e in tqdm(test_text_data):\n",
    "        test_plan_labels.append(_convert_example_to_cls_plan_labels(e,tokenizer))\n",
    "    for idx in tqdm(range(len(test_plan_labels))):\n",
    "        for jdx in range(len(test_plan_labels[idx])):\n",
    "            if sum(test_plan_labels[idx][jdx])==0:\n",
    "                test_plan_labels[idx][jdx][0]=1\n",
    "    pk.dump(test_plan_labels,open(\"./middle_data/test_plan_labels.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading...\")\n",
    "    test_plan_labels=pk.load(open(\"./middle_data/test_plan_labels.pk\",\"rb\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "code_folding": [
     0
    ],
    "hidden": true
   },
   "outputs": [],
   "source": [
    "if not os.path.exists(\"./middle_data/test_doc_cls_info.pk\"):\n",
    "    test_cls_doc_tokens=[]\n",
    "    test_cls_doc_rels=[]\n",
    "    test_cls_token2doc=[]\n",
    "    for e in tqdm(test_text_data):\n",
    "        tok_to_orig_start_index,tok_to_orig_end_index,orig_to_tok_index,tokens,rel_labels=_convert_example_to_cls_record(e,tokenizer)\n",
    "        test_cls_doc_tokens.append(tokens)\n",
    "        test_cls_doc_rels.append(rel_labels)\n",
    "        test_cls_token2doc.append([tok_to_orig_start_index,tok_to_orig_end_index,orig_to_tok_index])\n",
    "    pk.dump([test_cls_doc_rels,test_cls_doc_tokens,test_cls_token2doc],open(\"./middle_data/test_doc_cls_info.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading...\")\n",
    "    test_cls_doc_rels,test_cls_doc_tokens,test_cls_token2doc=pk.load(open(\"./middle_data/test_doc_cls_info.pk\",\"rb\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "code_folding": [
     0
    ],
    "hidden": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loading example..\n"
     ]
    }
   ],
   "source": [
    "if not os.path.exists(\"./middle_data/test_cls_examples.pk\"):\n",
    "    test_cls_examples=[]\n",
    "    #test_cut_vecs[idx],test_text_data_tokenvec[idx]\n",
    "    for idx,(tokens,rel,token2doc,postag) in tqdm(enumerate(zip(test_cls_doc_tokens,test_cls_doc_rels,test_cls_token2doc,test_text_data_postag))):   \n",
    "            example=create_cls_example(tokens,rel,token2doc,postag,test_texts_tfidf_svd[idx],\\\n",
    "                                       None,None,test_plan_labels[idx],tokenizer)\n",
    "            test_cls_examples.append(example)   \n",
    "    pk.dump(test_cls_examples,open(\"./middle_data/test_cls_examples.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading example..\")\n",
    "    test_cls_examples=pk.load(open(\"./middle_data/test_cls_examples.pk\",\"rb\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "cls_test_dataset=RelDataset(test_cls_examples[:])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "#model ensemble 部分\n",
    "preds=[]\n",
    "\n",
    "for idx in range(3):\n",
    "    preds.append(np.load(\"./cross_results/test_cls_\"+str(idx)+\".npy\"))\n",
    "preds.append(np.load(\"./cross_results/test_cls_full.npy\"))\n",
    "preds=np.stack(preds,axis=2)\n",
    "preds=preds.mean(axis=-1)\n",
    "# preds=(preds>0.5).astype(np.int)\n",
    "pred_cls_logits=preds\n",
    "# cls_logits=[]\n",
    "# for idx in range(3):\n",
    "#     cls_logits.append(np.load(\"./cross_results/cls_\"+str(idx)+\".npy\"))\n",
    "# pred_cls_logits=np.zeros((len(cls_examples),55))\n",
    "# for idx in range(3):\n",
    "#     pred_cls_logits[val_indexs[idx]]=cls_logits[idx]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "code_folding": [
     0
    ],
    "hidden": true
   },
   "outputs": [],
   "source": [
    "if not os.path.exists(\"./middle_data/test2_plan_labels.pk\"):\n",
    "    test2_plan_labels=[]\n",
    "    for e in tqdm(test2_text_data):\n",
    "        test2_plan_labels.append(_convert_example_to_cls_plan_labels(e,tokenizer))\n",
    "    for idx in tqdm(range(len(test2_plan_labels))):\n",
    "        for jdx in range(len(test2_plan_labels[idx])):\n",
    "            if sum(test2_plan_labels[idx][jdx])==0:\n",
    "                test2_plan_labels[idx][jdx][0]=1\n",
    "    pk.dump(test2_plan_labels,open(\"./middle_data/test2_plan_labels.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading...\")\n",
    "    test2_plan_labels=pk.load(open(\"./middle_data/test2_plan_labels.pk\",\"rb\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "code_folding": [
     0
    ],
    "hidden": true
   },
   "outputs": [],
   "source": [
    "if not os.path.exists(\"./middle_data/test2_doc_cls_info.pk\"):\n",
    "    test2_cls_doc_tokens=[]\n",
    "    test2_cls_doc_rels=[]\n",
    "    test2_cls_token2doc=[]\n",
    "    for e in tqdm(test2_text_data):\n",
    "        tok_to_orig_start_index,tok_to_orig_end_index,orig_to_tok_index,tokens,rel_labels=_convert_example_to_cls_record(e,tokenizer)\n",
    "        test2_cls_doc_tokens.append(tokens)\n",
    "        test2_cls_doc_rels.append(rel_labels)\n",
    "        test2_cls_token2doc.append([tok_to_orig_start_index,tok_to_orig_end_index,orig_to_tok_index])\n",
    "    pk.dump([test2_cls_doc_rels,test2_cls_doc_tokens,test2_cls_token2doc],open(\"./middle_data/test2_doc_cls_info.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading...\")\n",
    "    test2_cls_doc_rels,test2_cls_doc_tokens,test2_cls_token2doc=pk.load(open(\"./middle_data/test2_doc_cls_info.pk\",\"rb\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "code_folding": [
     0
    ],
    "hidden": true
   },
   "outputs": [],
   "source": [
    "if not os.path.exists(\"./middle_data/test2_cls_examples.pk\"):\n",
    "    test2_cls_examples=[]\n",
    "    for idx,(tokens,rel,token2doc,postag) in tqdm(enumerate(zip(test2_cls_doc_tokens,test2_cls_doc_rels,test2_cls_token2doc,test2_text_data_postag))):   \n",
    "            example=create_cls_example(tokens,rel,token2doc,postag,test2_texts_tfidf_svd[idx],\\\n",
    "                                       test2_cut_vecs[idx],test2_text_data_tokenvec[idx],test2_plan_labels[idx],tokenizer)\n",
    "            test2_cls_examples.append(example)   \n",
    "    pk.dump(test2_cls_examples,open(\"./middle_data/test2_cls_examples.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading example..\")\n",
    "    test2_cls_examples=pk.load(open(\"./middle_data/test2_cls_examples.pk\",\"rb\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "preds=[]\n",
    "\n",
    "for idx in range(3):\n",
    "    preds.append(np.load(\"./cross_results/test2_cls_\"+str(idx)+\".npy\"))\n",
    "preds.append(np.load(\"./cross_results/test2_cls_full.npy\"))\n",
    "preds=np.stack(preds,axis=2)\n",
    "preds=preds.mean(axis=-1)\n",
    "# preds=(preds>0.5).astype(np.int)\n",
    "pred_cls_logits=preds"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "cls_test2_dataset=RelDataset(test2_cls_examples[:])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "heading_collapsed": true
   },
   "source": [
    "## 关系实体识别数据处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "code_folding": [
     0
    ],
    "hidden": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/huangweilin/anaconda3/envs/fjw/lib/python3.6/site-packages/ipykernel_launcher.py:7: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0\n",
      "Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`\n",
      "  import sys\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "c0d654c0d6c34c8d99bd6a09e4099cd2",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=1, bar_style='info', max=1), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "if not os.path.exists(\"./middle_data/doc_token_info_ner.pk\"):\n",
    "    ner_doc_tokens=[]\n",
    "    ner_doc_token_labels=[]\n",
    "    ner_doc_rels=[]\n",
    "    ner_token2doc=[]\n",
    "    ner_doc2doc=[]\n",
    "    for idx,example in tqdm(enumerate(text_data)):\n",
    "        tok_to_orig_start_index,tok_to_orig_end_index,orig_to_tok_index,tokens,labels_list,rel_labels=_convert_example_to_record(example,tokenizer)\n",
    "        rel_list=[e['predicate'] for e in example['spo_list']]\n",
    "#         if len(set(rel_list))!=len(rel_list):\n",
    "#                  print(idx)\n",
    "        for i in range(len(rel_labels)):\n",
    "            ner_doc_tokens.append(tokens)\n",
    "            ner_doc_rels.append(rel_labels[i])\n",
    "            ner_doc_token_labels.append(labels_list[rel_labels[i]])\n",
    "            ner_token2doc.append([tok_to_orig_start_index,tok_to_orig_end_index,orig_to_tok_index])\n",
    "            ner_doc2doc.append(idx)\n",
    "    pk.dump([ner_doc2doc,ner_doc_rels,ner_doc_tokens,ner_doc_token_labels,ner_token2doc],open(\"./middle_data/doc_token_info_ner.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading doc..\")\n",
    "    ner_doc2doc,ner_doc_rels,ner_doc_tokens,ner_doc_token_labels,ner_token2doc=pk.load(open(\"./middle_data/doc_token_info_ner.pk\",\"rb\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "code_folding": [
     1
    ],
    "hidden": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/huangweilin/anaconda3/envs/fjw/lib/python3.6/site-packages/ipykernel_launcher.py:5: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0\n",
      "Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`\n",
      "  \"\"\"\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "3dfc292e611e4e33b08ac55808fcfb1f",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=1, bar_style='info', max=1), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "\n",
    "if not os.path.exists(\"./middle_data/ner_examples_ner.pk\"):\n",
    "    ner_examples=[]\n",
    "    example2doc=[]\n",
    "    doc2example=[[] for i in range(len(ner_doc_tokens))]\n",
    "    for idx,(tokens,rel,labels,token2doc) in tqdm(enumerate(zip(ner_doc_tokens,ner_doc_rels,ner_doc_token_labels,ner_token2doc))):\n",
    "        tfidf_svd=texts_tfidf_svd[ner_doc2doc[idx]]\n",
    "        postag=text_data_postag[ner_doc2doc[idx]]\n",
    "        cur_idx=0\n",
    "        rel_text=id2rel_text[rel]\n",
    "        rel_postag=rel_data_postag[rel]\n",
    "        rel_token2doc=id2rel_token2text[rel]\n",
    "        while len(tokens)-cur_idx> (max_len-3-len(rel_text)):\n",
    "            temp_tokens=tokens[cur_idx:cur_idx+max_len-3-len(rel_text)]\n",
    "            temp_labels=labels[cur_idx:cur_idx+max_len-3-len(rel_text)]\n",
    "            temp_token2doc=[e[cur_idx:cur_idx+max_len-3-len(rel_text)] if ind<2 else e for ind,e in enumerate(token2doc)]\n",
    "            #训练时把这些label里面妹有 3 4 1 2 的样本去掉\n",
    "            if (1 in  temp_labels) or (2 in  temp_labels) or (3 in  temp_labels) or (4 in  temp_labels):\n",
    "                example2doc.append(idx)\n",
    "                ner_examples.append(create_example(temp_tokens,rel,temp_labels,temp_token2doc,tokenizer,rel_text,tfidf_svd,postag,rel_postag,rel_token2doc))\n",
    "                doc2example[idx].append(len(ner_examples)-1)\n",
    "            cur_idx+=max_len-3-len(rel_text)\n",
    "        if cur_idx<len(tokens):\n",
    "            temp_tokens=tokens[cur_idx:]\n",
    "            temp_labels=labels[cur_idx:]\n",
    "            temp_token2doc=[e[cur_idx:cur_idx+max_len-3-len(rel_text)] if ind<2 else e for ind,e in enumerate(token2doc)]\n",
    "            if (1 in  temp_labels) or (2 in  temp_labels) or (3 in  temp_labels) or (4 in  temp_labels):\n",
    "                example2doc.append(idx)\n",
    "                ner_examples.append(create_example(temp_tokens,rel,temp_labels,temp_token2doc,tokenizer,rel_text,tfidf_svd,postag,rel_postag,rel_token2doc))    \n",
    "                doc2example[idx].append(len(ner_examples)-1)\n",
    "\n",
    "    pk.dump([doc2example,example2doc,ner_examples],open(\"./middle_data/ner_examples_ner.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading example..\")\n",
    "    doc2example,example2doc,ner_examples=pk.load(open(\"./middle_data/ner_examples_ner.pk\",\"rb\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "#交叉ensemble部分\n",
    "ner_examples=np.array(ner_examples)\n",
    "spliter=KFold(n_splits=3,shuffle=True,random_state=20)\n",
    "train_indexs=[]\n",
    "val_indexs=[]\n",
    "for idx,(train_index,val_index) in enumerate(spliter.split(ner_examples)):\n",
    "    train_indexs.append(train_index)\n",
    "    val_indexs.append(val_index)\n",
    "preds=[]\n",
    "logits=[]\n",
    "rel_logits=[]\n",
    "for idx in range(3):\n",
    "    preds.append(np.load(\"./cross_results/pred_\"+str(idx)+\".npy\"))\n",
    "    logits.append(np.load(\"./cross_results/logit_\"+str(idx)+\".npy\"))\n",
    "    rel_logits.append(np.load(\"./cross_results/rel_\"+str(idx)+\".npy\"))\n",
    "pred_token_pred=np.zeros((len(ner_examples),256))\n",
    "pred_token_logits=np.zeros((len(ner_examples),256,9))\n",
    "pred_rel_logits=np.zeros((len(ner_examples),55))\n",
    "for idx in range(3):\n",
    "    val_index=val_indexs[idx]\n",
    "    pred_token_pred[val_index]=preds[idx]\n",
    "    pred_token_logits[val_index]=logits[idx]\n",
    "    pred_rel_logits[val_index]=rel_logits[idx]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "# for i in range(len(example2doc)):\n",
    "#     if ner_doc2doc[example2doc[i]]==180000:\n",
    "#         print(i)\n",
    "ner_train_dataset=NerDataset(ner_examples[:10000])\n",
    "ner_val_dataset=NerDataset(ner_examples[268432:])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "heading_collapsed": true
   },
   "source": [
    "## 使用关系分类得到的结果进行序列标注(验证整体pipline模型效果及预测 测试集的时候使用)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "hidden": true
   },
   "source": [
    "### CRF"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "heading_collapsed": true,
    "hidden": true
   },
   "source": [
    "#### val"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "preds=(preds>0.5).astype(np.int)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {
    "hidden": true
   },
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'out_label_ids' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-39-5ec5843bc1fe>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mpreds\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mout_label_ids\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[0;31mNameError\u001b[0m: name 'out_label_ids' is not defined"
     ]
    }
   ],
   "source": [
    "preds.shape,out_label_ids.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "val_index"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "code_folding": [],
    "hidden": true
   },
   "outputs": [],
   "source": [
    "if not os.path.exists(\"./middle_data/pred_token_info_ner_0.pk\"):\n",
    "    pred_doc_tokens=[]\n",
    "    pred_doc_token_labels=[]\n",
    "    pred_doc_rels=[]\n",
    "    pred_token2doc=[]\n",
    "    pred_doc2doc=[]\n",
    "    for idx,ind in tqdm(enumerate(val_index)):\n",
    "        example=text_data[ind]\n",
    "        tok_to_orig_start_index,tok_to_orig_end_index,orig_to_tok_index,tokens,labels_list,rel_labels=_convert_example_to_record(example,tokenizer)\n",
    "        rels=(preds[idx].astype(np.int8)*np.arange(preds.shape[1]))\n",
    "        rels=rels[rels>0]\n",
    "        rel_labels=rels\n",
    "        if preds[idx][0]==1:\n",
    "            rel_labels=np.array([0]+list(rel_labels))\n",
    "        for i in range(len(rel_labels)):\n",
    "            pred_doc_tokens.append(tokens)\n",
    "            pred_doc_rels.append(rel_labels[i])\n",
    "            pred_doc_token_labels.append(labels_list[rel_labels[i]])\n",
    "            pred_token2doc.append([tok_to_orig_start_index,tok_to_orig_end_index,orig_to_tok_index])\n",
    "            pred_doc2doc.append(ind)\n",
    "    pk.dump([pred_doc2doc,pred_doc_rels,pred_doc_tokens,pred_doc_token_labels,pred_token2doc],open(\"./middle_data/pred_token_info_ner_0.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading doc..\")\n",
    "    pred_doc2doc,pred_doc_rels,pred_doc_tokens,pred_doc_token_labels,pred_token2doc=pk.load(open(\"./middle_data/pred_token_info_ner_0.pk\",\"rb\"))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "code_folding": [
     0
    ],
    "hidden": true
   },
   "outputs": [],
   "source": [
    "if not os.path.exists(\"./middle_data/pred_examples_ner_0.pk\"):\n",
    "    pred_examples=[]\n",
    "    pred_example2doc=[]\n",
    "    pred_doc2example=[[] for i in range(len(pred_doc_tokens))]\n",
    "    for idx,(tokens,rel,labels,token2doc) in tqdm(enumerate(zip(pred_doc_tokens,pred_doc_rels,pred_doc_token_labels,pred_token2doc))):\n",
    "                tfidf_svd=texts_tfidf_svd[pred_doc2doc[idx]]\n",
    "                postag=text_data_postag[pred_doc2doc[idx]]\n",
    "                cur_idx=0\n",
    "                rel_text=id2rel_text[rel]\n",
    "                rel_postag=rel_data_postag[rel]\n",
    "                rel_token2doc=id2rel_token2text[rel]\n",
    "                while len(tokens)-cur_idx> (max_len-3-len(rel_text)):\n",
    "                    temp_tokens=tokens[cur_idx:cur_idx+max_len-3-len(rel_text)]\n",
    "                    temp_labels=labels[cur_idx:cur_idx+max_len-3-len(rel_text)]\n",
    "                    temp_token2doc=[e[cur_idx:cur_idx+max_len-3-len(rel_text)] if ind<2 else e for ind,e in enumerate(token2doc)]\n",
    "                    pred_example2doc.append(idx)\n",
    "                    pred_examples.append(create_example(temp_tokens,int(rel),temp_labels,temp_token2doc,tokenizer,rel_text,tfidf_svd,postag,rel_postag,rel_token2doc))\n",
    "                    pred_doc2example[idx].append(len(pred_examples)-1)\n",
    "                    cur_idx+=max_len-3-len(rel_text)\n",
    "                if cur_idx<len(tokens):\n",
    "                    temp_tokens=tokens[cur_idx:]\n",
    "                    temp_labels=labels[cur_idx:]\n",
    "                    temp_token2doc=[e[cur_idx:cur_idx+max_len-3-len(rel_text)] if ind<2 else e for ind,e in enumerate(token2doc)]\n",
    "                    pred_example2doc.append(idx)\n",
    "                    pred_examples.append(create_example(temp_tokens,int(rel),temp_labels,temp_token2doc,tokenizer,rel_text,tfidf_svd,postag,rel_postag,rel_token2doc))    \n",
    "                    pred_doc2example[idx].append(len(pred_examples)-1)\n",
    "    pk.dump([pred_doc2example,pred_example2doc,pred_examples],open(\"./middle_data/pred_examples_ner_0.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading example..\")\n",
    "    pred_doc2example,pred_example2doc,pred_examples=pk.load(open(\"./middle_data/pred_examples_ner_0.pk\",\"rb\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "ner_pred_dataset=NerDataset(pred_examples[:])\n",
    "len(ner_pred_dataset) #21678"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "heading_collapsed": true,
    "hidden": true
   },
   "source": [
    "#### test"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "preds=np.random.rand(10468,55)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {
    "hidden": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "((10468, 55), 10468)"
      ]
     },
     "execution_count": 30,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "preds.shape,len(test_text_data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "preds=(preds>0.5).astype(np.int)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {
    "code_folding": [
     0
    ],
    "hidden": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/huangweilin/anaconda3/envs/fjw/lib/python3.6/site-packages/ipykernel_launcher.py:7: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0\n",
      "Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`\n",
      "  import sys\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "c4b8a112d9d44d78b49b0c92a88bff77",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=1, bar_style='info', max=1), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "if not os.path.exists(\"./middle_data/test_pred_token_info_ner.pk\"):\n",
    "    pred_doc_tokens=[]\n",
    "    pred_doc_token_labels=[]\n",
    "    pred_doc_rels=[]\n",
    "    pred_token2doc=[]\n",
    "    pred_doc2doc=[]\n",
    "    for idx,example in tqdm(enumerate(test_text_data)):\n",
    "        tok_to_orig_start_index,tok_to_orig_end_index,orig_to_tok_index,tokens,labels_list,rel_labels=_convert_example_to_record(example,tokenizer)\n",
    "        rels=(preds[idx].astype(np.int8)*np.arange(preds.shape[1]))\n",
    "        rels=rels[rels>0]\n",
    "        rel_labels=rels\n",
    "        if preds[idx][0]==1:\n",
    "            rel_labels=np.array([0]+list(rel_labels))\n",
    "        for i in range(len(rel_labels)):\n",
    "            pred_doc_tokens.append(tokens)\n",
    "            pred_doc_rels.append(rel_labels[i])\n",
    "            pred_doc_token_labels.append(labels_list[rel_labels[i]])\n",
    "            pred_token2doc.append([tok_to_orig_start_index,tok_to_orig_end_index,orig_to_tok_index])\n",
    "            pred_doc2doc.append(idx)\n",
    "    pk.dump([pred_doc2doc,pred_doc_rels,pred_doc_tokens,pred_doc_token_labels,pred_token2doc],open(\"./middle_data/test_pred_token_info_ner.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading doc..\")\n",
    "    pred_doc2doc,pred_doc_rels,pred_doc_tokens,pred_doc_token_labels,pred_token2doc=pk.load(open(\"./middle_data/test_pred_token_info_ner.pk\",\"rb\"))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {
    "code_folding": [
     0
    ],
    "hidden": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/huangweilin/anaconda3/envs/fjw/lib/python3.6/site-packages/ipykernel_launcher.py:5: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0\n",
      "Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`\n",
      "  \"\"\"\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "eaab553fd4cc475aa57cf3cc879ab896",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=1, bar_style='info', max=1), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "if not os.path.exists(\"./middle_data/test_pred_examples_ner.pk\"):\n",
    "    pred_examples=[]\n",
    "    pred_example2doc=[]\n",
    "    pred_doc2example=[[] for i in range(len(pred_doc_tokens))]\n",
    "    for idx,(tokens,rel,labels,token2doc) in tqdm(enumerate(zip(pred_doc_tokens,pred_doc_rels,pred_doc_token_labels,pred_token2doc))):\n",
    "                tfidf_svd=test_texts_tfidf_svd[pred_doc2doc[idx]]\n",
    "                postag=test_text_data_postag[pred_doc2doc[idx]]\n",
    "                cur_idx=0\n",
    "                rel_text=id2rel_text[rel]\n",
    "                rel_postag=rel_data_postag[rel]\n",
    "                rel_token2doc=id2rel_token2text[rel]\n",
    "                while len(tokens)-cur_idx> (max_len-3-len(rel_text)):\n",
    "                    temp_tokens=tokens[cur_idx:cur_idx+max_len-3-len(rel_text)]\n",
    "                    temp_labels=labels[cur_idx:cur_idx+max_len-3-len(rel_text)]\n",
    "                    temp_token2doc=[e[cur_idx:cur_idx+max_len-3-len(rel_text)] if ind<2 else e for ind,e in enumerate(token2doc)]\n",
    "                    pred_example2doc.append(idx)\n",
    "                    pred_examples.append(create_example(temp_tokens,int(rel),temp_labels,temp_token2doc,tokenizer,rel_text,tfidf_svd,postag,rel_postag,rel_token2doc))\n",
    "                    pred_doc2example[idx].append(len(pred_examples)-1)\n",
    "                    cur_idx+=max_len-3-len(rel_text)\n",
    "                if cur_idx<len(tokens):\n",
    "                    temp_tokens=tokens[cur_idx:]\n",
    "                    temp_labels=labels[cur_idx:]\n",
    "                    temp_token2doc=[e[cur_idx:cur_idx+max_len-3-len(rel_text)] if ind<2 else e for ind,e in enumerate(token2doc)]\n",
    "                    pred_example2doc.append(idx)\n",
    "                    pred_examples.append(create_example(temp_tokens,int(rel),temp_labels,temp_token2doc,tokenizer,rel_text,tfidf_svd,postag,rel_postag,rel_token2doc))    \n",
    "                    pred_doc2example[idx].append(len(pred_examples)-1)\n",
    "    pk.dump([pred_doc2example,pred_example2doc,pred_examples],open(\"./middle_data/test_pred_examples_ner.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading example..\")\n",
    "    pred_doc2example,pred_example2doc,pred_examples=pk.load(open(\"./middle_data/test_pred_examples_ner.pk\",\"rb\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {
    "hidden": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "288440"
      ]
     },
     "execution_count": 36,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "ner_pred_dataset=NerDataset(pred_examples)\n",
    "len(ner_pred_dataset)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "hidden": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "((18646, 55), 0)"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#交叉 ensemble 部分\n",
    "pred_examples=np.array(pred_examples)\n",
    "pred_token_pred=np.zeros((len(pred_examples),256))\n",
    "pred_token_logits=np.zeros((len(pred_examples),256,9))\n",
    "pred_rel_logits=np.zeros((len(pred_examples),55))\n",
    "pred_token_pred=np.load(\"./cross_results/test_pred_full.npy\")\n",
    "pred_token_logits=np.load(\"./cross_results/test_logit_full.npy\")\n",
    "pred_rel_logits=np.load(\"./cross_results/test_rel_full.npy\")\n",
    "pred_rel_logits.shape,len(pred_examples)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "code_folding": [
     0
    ],
    "hidden": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loading doc..\n"
     ]
    }
   ],
   "source": [
    "if not os.path.exists(\"./middle_data/test2_pred_token_info_ner.pk\"):\n",
    "    pred_doc_tokens=[]\n",
    "    pred_doc_token_labels=[]\n",
    "    pred_doc_rels=[]\n",
    "    pred_token2doc=[]\n",
    "    pred_doc2doc=[]\n",
    "    for idx,example in tqdm(enumerate(test2_text_data)):\n",
    "        tok_to_orig_start_index,tok_to_orig_end_index,orig_to_tok_index,tokens,labels_list,rel_labels=_convert_example_to_record(example,tokenizer)\n",
    "        rels=(preds[idx].astype(np.int8)*np.arange(preds.shape[1]))\n",
    "        rels=rels[rels>0]\n",
    "        rel_labels=rels\n",
    "        if preds[idx][0]==1:\n",
    "            rel_labels=np.array([0]+list(rel_labels))\n",
    "        for i in range(len(rel_labels)):\n",
    "            pred_doc_tokens.append(tokens)\n",
    "            pred_doc_rels.append(rel_labels[i])\n",
    "            pred_doc_token_labels.append(labels_list[rel_labels[i]])\n",
    "            pred_token2doc.append([tok_to_orig_start_index,tok_to_orig_end_index,orig_to_tok_index])\n",
    "            pred_doc2doc.append(idx)\n",
    "    pk.dump([pred_doc2doc,pred_doc_rels,pred_doc_tokens,pred_doc_token_labels,pred_token2doc],open(\"./middle_data/test2_pred_token_info_ner.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading doc..\")\n",
    "    pred_doc2doc,pred_doc_rels,pred_doc_tokens,pred_doc_token_labels,pred_token2doc=pk.load(open(\"./middle_data/test2_pred_token_info_ner.pk\",\"rb\"))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "code_folding": [
     0
    ],
    "hidden": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loading example..\n"
     ]
    }
   ],
   "source": [
    "if not os.path.exists(\"./middle_data/test2_pred_examples_ner.pk\"):\n",
    "    pred_examples=[]\n",
    "    pred_example2doc=[]\n",
    "    pred_doc2example=[[] for i in range(len(pred_doc_tokens))]\n",
    "    for idx,(tokens,rel,labels,token2doc) in tqdm(enumerate(zip(pred_doc_tokens,pred_doc_rels,pred_doc_token_labels,pred_token2doc))):\n",
    "                tfidf_svd=test2_texts_tfidf_svd[pred_doc2doc[idx]]\n",
    "                postag=test2_text_data_postag[pred_doc2doc[idx]]\n",
    "                cur_idx=0\n",
    "                rel_text=id2rel_text[rel]\n",
    "                rel_postag=rel_data_postag[rel]\n",
    "                rel_token2doc=id2rel_token2text[rel]\n",
    "                while len(tokens)-cur_idx> (max_len-3-len(rel_text)):\n",
    "                    temp_tokens=tokens[cur_idx:cur_idx+max_len-3-len(rel_text)]\n",
    "                    temp_labels=labels[cur_idx:cur_idx+max_len-3-len(rel_text)]\n",
    "                    temp_token2doc=[e[cur_idx:cur_idx+max_len-3-len(rel_text)] if ind<2 else e for ind,e in enumerate(token2doc)]\n",
    "                    pred_example2doc.append(idx)\n",
    "                    pred_examples.append(create_example(temp_tokens,int(rel),temp_labels,temp_token2doc,tokenizer,rel_text,tfidf_svd,postag,rel_postag,rel_token2doc))\n",
    "                    pred_doc2example[idx].append(len(pred_examples)-1)\n",
    "                    cur_idx+=max_len-3-len(rel_text)\n",
    "                if cur_idx<len(tokens):\n",
    "                    temp_tokens=tokens[cur_idx:]\n",
    "                    temp_labels=labels[cur_idx:]\n",
    "                    temp_token2doc=[e[cur_idx:cur_idx+max_len-3-len(rel_text)] if ind<2 else e for ind,e in enumerate(token2doc)]\n",
    "                    pred_example2doc.append(idx)\n",
    "                    pred_examples.append(create_example(temp_tokens,int(rel),temp_labels,temp_token2doc,tokenizer,rel_text,tfidf_svd,postag,rel_postag,rel_token2doc))    \n",
    "                    pred_doc2example[idx].append(len(pred_examples)-1)\n",
    "    pk.dump([pred_doc2example,pred_example2doc,pred_examples],open(\"./middle_data/test2_pred_examples_ner.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading example..\")\n",
    "    pred_doc2example,pred_example2doc,pred_examples=pk.load(open(\"./middle_data/test2_pred_examples_ner.pk\",\"rb\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "hidden": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "((214785, 55), 214785)"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#交叉 ensemble 部分\n",
    "pred_examples=np.array(pred_examples)\n",
    "pred_token_pred=np.zeros((len(pred_examples),256))\n",
    "pred_token_logits=np.zeros((len(pred_examples),256,9))\n",
    "pred_rel_logits=np.zeros((len(pred_examples),55))\n",
    "pred_token_pred=np.load(\"./cross_results/test2_pred_full.npy\")\n",
    "pred_token_logits=np.load(\"./cross_results/test2_logit_full.npy\")\n",
    "pred_rel_logits=np.load(\"./cross_results/test2_rel_full.npy\")\n",
    "pred_rel_logits.shape,len(pred_examples)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {
    "hidden": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "214785"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "ner_pred_dataset=NerDataset(pred_examples)\n",
    "len(ner_pred_dataset)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "heading_collapsed": true,
    "hidden": true
   },
   "source": [
    "### POINTER"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "heading_collapsed": true,
    "hidden": true
   },
   "source": [
    "#### val"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "if not os.path.exists(\"./middle_data/train_plan_labels_pointer.pk\"):\n",
    "    train_plan_labels=[]\n",
    "    for e in tqdm(text_data):\n",
    "        train_plan_labels.append(_convert_example_to_plan_labels_new(e,tokenizer))\n",
    "    for idx in tqdm(range(len(train_plan_labels))):\n",
    "        for jdx in range(len(train_plan_labels[idx])):\n",
    "            if sum(train_plan_labels[idx][jdx])==0:\n",
    "                train_plan_labels[idx][jdx][0]=1\n",
    "    pk.dump(train_plan_labels,open(\"./middle_data/train_plan_labels_pointer.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading...\")\n",
    "    train_plan_labels=pk.load(open(\"./middle_data/train_plan_labels_pointer.pk\",\"rb\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "code_folding": [],
    "hidden": true
   },
   "outputs": [],
   "source": [
    "if not os.path.exists(\"./middle_data/pred_token_info_pointer.pk\"):\n",
    "    pred_doc_tokens=[]\n",
    "    pred_doc_token_labels=[]\n",
    "    pred_doc_rels=[]\n",
    "    pred_token2doc=[]\n",
    "    pred_doc2doc=[]\n",
    "    for idx,example in tqdm(enumerate(text_data[180000:])):\n",
    "        tok_to_orig_start_index,tok_to_orig_end_index,orig_to_tok_index,tokens,labels_list,rel_labels=_convert_example_to_record_new(example,tokenizer)\n",
    "        rels=(preds[idx].astype(np.int8)*np.arange(preds.shape[1]))\n",
    "        rels=rels[rels>0]\n",
    "        rel_labels=rels\n",
    "        if preds[idx][0]==1:\n",
    "            rel_labels=np.array([0]+list(rel_labels))\n",
    "        for i in range(len(rel_labels)):\n",
    "            pred_doc_tokens.append(tokens)\n",
    "            pred_doc_rels.append(rel_labels[i])\n",
    "            pred_doc_token_labels.append(labels_list[rel_labels[i]])\n",
    "            pred_token2doc.append([tok_to_orig_start_index,tok_to_orig_end_index,orig_to_tok_index])\n",
    "            pred_doc2doc.append(idx+180000)\n",
    "    for idx in tqdm(range(len(pred_doc_token_labels))):\n",
    "        for jdx in range(len(pred_doc_token_labels[idx])):\n",
    "            if sum(pred_doc_token_labels[idx][jdx])==0:\n",
    "                pred_doc_token_labels[idx][jdx][0]=1\n",
    "    pk.dump([pred_doc2doc,pred_doc_rels,pred_doc_tokens,pred_doc_token_labels,pred_token2doc],open(\"./middle_data/pred_token_info_pointer.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading doc..\")\n",
    "    pred_doc2doc,pred_doc_rels,pred_doc_tokens,pred_doc_token_labels,pred_token2doc=pk.load(open(\"./middle_data/pred_token_info_pointer.pk\",\"rb\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "len(pred_doc_token_labels)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "code_folding": [
     0
    ],
    "hidden": true
   },
   "outputs": [],
   "source": [
    "if not os.path.exists(\"./middle_data/pred_examples_pointer.pk\"):\n",
    "    pred_examples=[]\n",
    "    pred_example2doc=[]\n",
    "    pred_doc2example=[[] for i in range(len(pred_doc_tokens))]\n",
    "    for idx,(tokens,rel,labels,token2doc) in tqdm(enumerate(zip(pred_doc_tokens,pred_doc_rels,pred_doc_token_labels,pred_token2doc))):\n",
    "        tfidf_svd=texts_tfidf_svd[pred_doc2doc[idx]]\n",
    "        postag=text_data_postag[pred_doc2doc[idx]]\n",
    "        plan_labels=train_plan_labels[pred_doc2doc[idx]]\n",
    "        index=[0,new_kglabels2id['B-'+id2rels[rel]+\"-OB\"],new_kglabels2id['E-'+id2rels[rel]+\"-OB\"],\\\n",
    "           new_kglabels2id['B-'+id2rels[rel]+\"-SUB\"],new_kglabels2id['E-'+id2rels[rel]+\"-SUB\"]]\n",
    "        plan_labels=np.concatenate([np.array(plan_labels)[:,index],np.zeros((len(plan_labels),4)).astype(np.int64)],axis=1).tolist()\n",
    "        cur_idx=0\n",
    "        rel_text=id2rel_text[rel]\n",
    "        rel_postag=rel_data_postag[rel]\n",
    "        rel_token2doc=id2rel_token2text[rel]\n",
    "        while len(tokens)-cur_idx> (max_len-3-len(rel_text)):\n",
    "            temp_tokens=tokens[cur_idx:cur_idx+max_len-3-len(rel_text)]\n",
    "            temp_labels=labels[cur_idx:cur_idx+max_len-3-len(rel_text)]\n",
    "            temp_plan_labels=plan_labels[cur_idx:cur_idx+max_len-3-len(rel_text)]\n",
    "            temp_token2doc=[e[cur_idx:cur_idx+max_len-3-len(rel_text)] if ind<2 else e for ind,e in enumerate(token2doc)]\n",
    "            #训练时把这些label里面妹有 3 4 1 2 的样本去掉\n",
    "            compare_labels=np.array(temp_labels).argmax(-1)\n",
    "#             if (1 in  compare_labels) or (2 in  compare_labels) or (3 in  compare_labels) or (4 in  compare_labels):\n",
    "            pred_example2doc.append(idx)\n",
    "            pred_examples.append(create_example_new(temp_tokens,rel,temp_labels,temp_token2doc,tokenizer,rel_text,tfidf_svd,postag,rel_postag,rel_token2doc,temp_plan_labels))\n",
    "            pred_doc2example[idx].append(len(pred_examples)-1)\n",
    "            cur_idx+=max_len-3-len(rel_text)\n",
    "        if cur_idx<len(tokens):\n",
    "            temp_tokens=tokens[cur_idx:]\n",
    "            temp_labels=labels[cur_idx:]\n",
    "            temp_plan_labels=plan_labels[cur_idx:]\n",
    "            temp_token2doc=[e[cur_idx:cur_idx+max_len-3-len(rel_text)] if ind<2 else e for ind,e in enumerate(token2doc)]\n",
    "            compare_labels=np.array(temp_labels).argmax(-1)\n",
    "#             if (1 in  compare_labels) or (2 in  compare_labels) or (3 in  compare_labels) or (4 in  compare_labels):\n",
    "            pred_example2doc.append(idx)\n",
    "            pred_examples.append(create_example_new(temp_tokens,rel,temp_labels,temp_token2doc,tokenizer,rel_text,tfidf_svd,postag,rel_postag,rel_token2doc,temp_plan_labels))    \n",
    "            pred_doc2example[idx].append(len(pred_examples)-1)\n",
    "    pk.dump([pred_doc2example,pred_example2doc,pred_examples],open(\"./middle_data/pred_examples_pointer.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading example..\")\n",
    "    pred_doc2example,pred_example2doc,pred_examples=pk.load(open(\"./middle_data/pred_examples_pointer.pk\",\"rb\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "len(pred_examples)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "ner_pred_dataset=NerDataset_pointer(pred_examples[:])\n",
    "len(ner_pred_dataset) #21678"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "heading_collapsed": true,
    "hidden": true
   },
   "source": [
    "#### test"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "code_folding": [
     0
    ],
    "hidden": true
   },
   "outputs": [],
   "source": [
    "if not os.path.exists(\"./middle_data/test_plan_labels_pointer.pk\"):\n",
    "    test_plan_labels=[]\n",
    "    for e in tqdm(test_text_data):\n",
    "        test_plan_labels.append(_convert_example_to_plan_labels_new(e,tokenizer))\n",
    "    for idx in tqdm(range(len(test_plan_labels))):\n",
    "        for jdx in range(len(test_plan_labels[idx])):\n",
    "            if sum(test_plan_labels[idx][jdx])==0:\n",
    "                test_plan_labels[idx][jdx][0]=1\n",
    "    pk.dump(test_plan_labels,open(\"./middle_data/test_plan_labels_pointer.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading...\")\n",
    "    test_plan_labels=pk.load(open(\"./middle_data/test_plan_labels_pointer.pk\",\"rb\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "code_folding": [
     0
    ],
    "hidden": true
   },
   "outputs": [],
   "source": [
    "if not os.path.exists(\"./middle_data/test_pred_token_info_pointer.pk\"):\n",
    "    pred_doc_tokens=[]\n",
    "    pred_doc_token_labels=[]\n",
    "    pred_doc_rels=[]\n",
    "    pred_token2doc=[]\n",
    "    pred_doc2doc=[]\n",
    "    for idx,example in tqdm(enumerate(test_text_data)):\n",
    "        tok_to_orig_start_index,tok_to_orig_end_index,orig_to_tok_index,tokens,labels_list,rel_labels=_convert_example_to_record_new(example,tokenizer)\n",
    "        rels=(preds[idx].astype(np.int8)*np.arange(preds.shape[1]))\n",
    "        rels=rels[rels>0]\n",
    "        rel_labels=rels\n",
    "        if preds[idx][0]==1:\n",
    "            rel_labels=np.array([0]+list(rel_labels))\n",
    "        for i in range(len(rel_labels)):\n",
    "            pred_doc_tokens.append(tokens)\n",
    "            pred_doc_rels.append(rel_labels[i])\n",
    "            pred_doc_token_labels.append(labels_list[rel_labels[i]])\n",
    "            pred_token2doc.append([tok_to_orig_start_index,tok_to_orig_end_index,orig_to_tok_index])\n",
    "            pred_doc2doc.append(idx)\n",
    "    for idx in tqdm(range(len(pred_doc_token_labels))):\n",
    "        for jdx in range(len(pred_doc_token_labels[idx])):\n",
    "            if sum(pred_doc_token_labels[idx][jdx])==0:\n",
    "                pred_doc_token_labels[idx][jdx][0]=1\n",
    "    pk.dump([pred_doc2doc,pred_doc_rels,pred_doc_tokens,pred_doc_token_labels,pred_token2doc],open(\"./middle_data/test_pred_token_info_pointer.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading doc..\")\n",
    "    pred_doc2doc,pred_doc_rels,pred_doc_tokens,pred_doc_token_labels,pred_token2doc=pk.load(open(\"./middle_data/test_pred_token_info_pointer.pk\",\"rb\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "code_folding": [
     0
    ],
    "hidden": true
   },
   "outputs": [],
   "source": [
    "if not os.path.exists(\"./middle_data/test_pred_examples_pointer.pk\"):\n",
    "    pred_examples=[]\n",
    "    pred_example2doc=[]\n",
    "    pred_doc2example=[[] for i in range(len(pred_doc_tokens))]\n",
    "    for idx,(tokens,rel,labels,token2doc) in tqdm(enumerate(zip(pred_doc_tokens,pred_doc_rels,pred_doc_token_labels,pred_token2doc))):\n",
    "        tfidf_svd=test_texts_tfidf_svd[pred_doc2doc[idx]]\n",
    "        postag=test_text_data_postag[pred_doc2doc[idx]]\n",
    "        plan_labels=test_plan_labels[pred_doc2doc[idx]]\n",
    "        index=[0,new_kglabels2id['B-'+id2rels[rel]+\"-OB\"],new_kglabels2id['E-'+id2rels[rel]+\"-OB\"],\\\n",
    "           new_kglabels2id['B-'+id2rels[rel]+\"-SUB\"],new_kglabels2id['E-'+id2rels[rel]+\"-SUB\"]]\n",
    "        plan_labels=np.concatenate([np.array(plan_labels)[:,index],np.zeros((len(plan_labels),4)).astype(np.int64)],axis=1).tolist()\n",
    "        cur_idx=0\n",
    "        rel_text=id2rel_text[rel]\n",
    "        rel_postag=rel_data_postag[rel]\n",
    "        rel_token2doc=id2rel_token2text[rel]\n",
    "        while len(tokens)-cur_idx> (max_len-3-len(rel_text)):\n",
    "            temp_tokens=tokens[cur_idx:cur_idx+max_len-3-len(rel_text)]\n",
    "            temp_labels=labels[cur_idx:cur_idx+max_len-3-len(rel_text)]\n",
    "            temp_plan_labels=plan_labels[cur_idx:cur_idx+max_len-3-len(rel_text)]\n",
    "            temp_token2doc=[e[cur_idx:cur_idx+max_len-3-len(rel_text)] if ind<2 else e for ind,e in enumerate(token2doc)]\n",
    "            #训练时把这些label里面妹有 3 4 1 2 的样本去掉\n",
    "            compare_labels=np.array(temp_labels).argmax(-1)\n",
    "#             if (1 in  compare_labels) or (2 in  compare_labels) or (3 in  compare_labels) or (4 in  compare_labels):\n",
    "            pred_example2doc.append(idx)\n",
    "            pred_examples.append(create_example_new(temp_tokens,rel,temp_labels,temp_token2doc,tokenizer,rel_text,tfidf_svd,postag,rel_postag,rel_token2doc,temp_plan_labels))\n",
    "            pred_doc2example[idx].append(len(pred_examples)-1)\n",
    "            cur_idx+=max_len-3-len(rel_text)\n",
    "        if cur_idx<len(tokens):\n",
    "            temp_tokens=tokens[cur_idx:]\n",
    "            temp_labels=labels[cur_idx:]\n",
    "            temp_plan_labels=plan_labels[cur_idx:]\n",
    "            temp_token2doc=[e[cur_idx:cur_idx+max_len-3-len(rel_text)] if ind<2 else e for ind,e in enumerate(token2doc)]\n",
    "            compare_labels=np.array(temp_labels).argmax(-1)\n",
    "#             if (1 in  compare_labels) or (2 in  compare_labels) or (3 in  compare_labels) or (4 in  compare_labels):\n",
    "            pred_example2doc.append(idx)\n",
    "            pred_examples.append(create_example_new(temp_tokens,rel,temp_labels,temp_token2doc,tokenizer,rel_text,tfidf_svd,postag,rel_postag,rel_token2doc,temp_plan_labels))    \n",
    "            pred_doc2example[idx].append(len(pred_examples)-1)\n",
    "    pk.dump([pred_doc2example,pred_example2doc,pred_examples],open(\"./middle_data/test_pred_examples_pointer.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading example..\")\n",
    "    pred_doc2example,pred_example2doc,pred_examples=pk.load(open(\"./middle_data/test_pred_examples_pointer.pk\",\"rb\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "len(pred_examples)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "ner_pred_dataset=NerDataset_pointer(pred_examples[:])\n",
    "len(ner_pred_dataset) #21678"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "heading_collapsed": true
   },
   "source": [
    "# 关系分类"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 50,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "hidden_dropout_prob = 0.1\n",
    "num_rel_labels = len(id2rels)\n",
    "num_token_labels=len(id2labels)\n",
    "learning_rate = 5e-5\n",
    "weight_decay = 0\n",
    "epochs = 3\n",
    "batch_size = 16\n",
    "adam_epsilon=1e-8"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 51,
   "metadata": {
    "code_folding": [],
    "hidden": true
   },
   "outputs": [],
   "source": [
    "output_dir=\"./output_test/\"\n",
    "device=torch.device(\"cuda:2\")\n",
    "args=ARG(train_batch_size=batch_size,eval_batch_size=batch_size,weight_decay=weight_decay,learning_rate=learning_rate,\n",
    "         adam_epsilon=adam_epsilon,num_train_epochs=epochs,warmup_steps=0,gradient_accumulation_steps=1,save_steps=313,\n",
    "         max_grad_norm=1.0,model_name_or_path=output_dir,output_dir=output_dir,seed=42,device=device,n_gpu=1,\n",
    "        max_steps=0,output_mode=\"classification\",fp16=False,fp16_opt_level='O1',card_list=[0,1,2,3])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "hidden": true
   },
   "source": [
    "## model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "code_folding": [
     0
    ],
    "hidden": true
   },
   "outputs": [],
   "source": [
    "# class BertMulticlass_DGCNN(nn.Module):\n",
    "#     def __init__(self,bert_dir,config_file,num_rel_labels,hidden_dropout_prob,embed_dim,loss_type=True,use_feature=True,alpha=1):\n",
    "#         super(BertMulticlass_DGCNN,self).__init__()\n",
    "#         self.num_rel_labels = num_rel_labels\n",
    "#         self.alpha=alpha\n",
    "#         self.bert =BertModel.from_pretrained(bert_dir,config=os.path.join(bert_dir,config_file), \\\n",
    "#                                              hidden_dropout_prob=hidden_dropout_prob,output_hidden_states=True, output_attentions=True)\n",
    "#         self.dropout = nn.Dropout(hidden_dropout_prob)\n",
    "#         self.embed=nn.Embedding(len(id2pos_BIO),embed_dim)\n",
    "#         if use_feature:\n",
    "#             hidden_size=self.bert.pooler.dense.out_features+embed_dim\n",
    "#         else:\n",
    "#             hidden_size=self.bert.pooler.dense.out_features         \n",
    "#         self.DGCNN=nn.Sequential(GCNN_block(hidden_size,hidden_size,1,padding=0,dilation=1),\n",
    "#                    GCNN_block(hidden_size,hidden_size,3,padding=1,dilation=1),\n",
    "#                    GCNN_block(hidden_size,hidden_size,3,padding=2,dilation=2),\n",
    "#                    GCNN_block(hidden_size,hidden_size,3,padding=4,dilation=4))\n",
    "# #         self.dense_feature=nn.Sequential(nn.Linear(16,16),nn.ReLU())\n",
    "#         if use_feature:\n",
    "#             self.classifier=nn.Linear(hidden_size,num_rel_labels)\n",
    "#         else:\n",
    "#             self.classifier=nn.Linear(hidden_size,num_rel_labels)\n",
    "#         self.loss_type=loss_type\n",
    "#         self.use_feature=use_feature\n",
    "#     def forward(\n",
    "#         self,\n",
    "#         input_ids=None,\n",
    "#         attention_mask=None,\n",
    "#         token_type_ids=None,\n",
    "#         rel_label=None,\n",
    "#         feature=None,\n",
    "#         postag=None,\n",
    "        \n",
    "#     ):\n",
    "\n",
    "#         outputs = self.bert(\n",
    "#             input_ids,\n",
    "#             attention_mask=attention_mask,\n",
    "#             token_type_ids=token_type_ids,\n",
    "#         )\n",
    "\n",
    "#         sequence_output = outputs[0]\n",
    "#         if self.use_feature:\n",
    "#             postag_output=self.embed(postag)\n",
    "#             sequence_output=torch.cat([sequence_output,postag_output],dim=-1)\n",
    "#         sequence_output=F.adaptive_avg_pool1d(self.DGCNN([sequence_output.permute(0,2,1),attention_mask])[0],1).squeeze(-1)\n",
    "#         sequence_output = self.dropout(sequence_output)\n",
    "#         if self.use_feature:\n",
    "# #             feature=self.dense_feature(feature)\n",
    "# #             logits = self.classifier(torch.cat([feature,sequence_output],dim=-1))\n",
    "#             logits = self.classifier(sequence_output)\n",
    "#         else:\n",
    "#             logits = self.classifier(sequence_output)\n",
    "#         outputs = (logits,) + outputs[2:]  # add hidden states and attention if they are here\n",
    "\n",
    "#         if rel_label is not None:\n",
    "#             if self.num_rel_labels == 1:\n",
    "#                 #  We are doing regression\n",
    "#                 loss_fct = MSELoss()\n",
    "#                 loss = loss_fct(logits.view(-1), labels.view(-1))\n",
    "#             else:\n",
    "#                 if self.loss_type:\n",
    "#                     loss_fct = clsLoss(0.3,self.alpha)\n",
    "#                     loss = loss_fct(logits.view(-1, self.num_rel_labels), rel_label.view(-1,self.num_rel_labels))\n",
    "#                 else:\n",
    "#                     loss_fct = nn.BCEWithLogitsLoss(reduction=\"none\")\n",
    "#                     loss = ((loss_fct(logits.view(-1, self.num_rel_labels), rel_label.view(-1,self.num_rel_labels).float())).sum(dim=-1)).mean()\n",
    "#             outputs = (loss,) + outputs\n",
    "\n",
    "#         return outputs  # (loss), logits, (hidden_states), (attentions)\n",
    "# def train(args, train_dataset,val_dataset, model, tokenizer):\n",
    "# #     \"\"\" Train the model \"\"\"\n",
    "# #     if args.local_rank in [-1, 0]:\n",
    "# #         tb_writer = SummaryWriter()\n",
    "\n",
    "# #     args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\n",
    "#     train_sampler = RandomSampler(train_dataset) # if args.local_rank == -1 else DistributedSampler(train_dataset)\n",
    "#     train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)\n",
    "\n",
    "#     if args.max_steps > 0:\n",
    "#         t_total = args.max_steps\n",
    "#         args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1\n",
    "#     else:\n",
    "#         t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs\n",
    "\n",
    "#     # Prepare optimizer and schedule (linear warmup and decay)\n",
    "#     no_decay = [\"bias\", \"LayerNorm.weight\"]\n",
    "#     optimizer_grouped_parameters = [\n",
    "#         {\n",
    "#             \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n",
    "#             \"weight_decay\": args.weight_decay,\n",
    "#         },\n",
    "#         {\"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], \"weight_decay\": 0.0},\n",
    "#     ]\n",
    "\n",
    "#     optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n",
    "#     scheduler = get_linear_schedule_with_warmup(\n",
    "#         optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total\n",
    "#     )\n",
    "\n",
    "#     # Check if saved optimizer or scheduler states exist\n",
    "#     if os.path.isfile(os.path.join(args.model_name_or_path, \"optimizer.pt\")) and os.path.isfile(\n",
    "#         os.path.join(args.model_name_or_path, \"scheduler.pt\")\n",
    "#     ):\n",
    "#         logger.info(\"  loading optimizer and scheduler...\")\n",
    "#         # Load in optimizer and scheduler states\n",
    "#         optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"optimizer.pt\")))\n",
    "# #         scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"scheduler.pt\")))\n",
    "#     else:\n",
    "#         logger.info(\"  No optimizer and scheduler,we build a new one\")        \n",
    "\n",
    "#     if args.fp16:\n",
    "#         try:\n",
    "#             from apex import amp\n",
    "#         except ImportError:\n",
    "#             raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n",
    "#         model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)\n",
    "\n",
    "#     # multi-gpu training (should be after apex fp16 initialization)\n",
    "#     if args.n_gpu > 1:\n",
    "#         model = torch.nn.DataParallel(model,device_ids=args.card_list)\n",
    "\n",
    "\n",
    "#     # Train!\n",
    "#     logger.info(\"***** Running training *****\")\n",
    "#     logger.info(\"  Num examples = %d\", len(train_dataset))\n",
    "#     logger.info(\"  Num Epochs = %d\", args.num_train_epochs)\n",
    "# #     logger.info(\"  Instantaneous batch size per GPU = %d\", args.per_gpu_train_batch_size)\n",
    "#     logger.info(\n",
    "#         \"  Total train batch size (w. parallel, distributed & accumulation) = %d\",\n",
    "#         args.train_batch_size\n",
    "#         * args.gradient_accumulation_steps\n",
    "#     )\n",
    "#     logger.info(\"  Gradient Accumulation steps = %d\", args.gradient_accumulation_steps)\n",
    "#     logger.info(\"  Total optimization steps = %d\", t_total)\n",
    "\n",
    "#     global_step = 0\n",
    "#     epochs_trained = 0\n",
    "#     steps_trained_in_current_epoch = 0\n",
    "#     # Check if continuing training from a checkpoint\n",
    "#     if os.path.exists(args.model_name_or_path):\n",
    "#         # set global_step to global_step of last saved checkpoint from model path\n",
    "#         try:\n",
    "#             global_step = int(args.model_name_or_path.split(\"-\")[-1].split(\"/\")[0])\n",
    "#         except ValueError:\n",
    "#             global_step = 0\n",
    "#         epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)\n",
    "#         steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)\n",
    "\n",
    "#         logger.info(\"  Continuing training from checkpoint, will skip to saved global_step\")\n",
    "#         logger.info(\"  Continuing training from epoch %d\", epochs_trained)\n",
    "#         logger.info(\"  Continuing training from global step %d\", global_step)\n",
    "#         logger.info(\"  Will skip the first %d steps in the first epoch\", steps_trained_in_current_epoch)\n",
    "\n",
    "#     tr_loss, logging_loss = 0.0, 0.0\n",
    "#     model.zero_grad()\n",
    "#     train_iterator = tqdm(range(\n",
    "#         epochs_trained, int(args.num_train_epochs)), desc=\"Epoch\")\n",
    "#     set_seed(args)  # Added here for reproductibility\n",
    "#     for _ in train_iterator:\n",
    "#         start=time.time()\n",
    "#         epoch_iterator = tqdm(train_dataloader, desc=\"Iteration\")\n",
    "\n",
    "#         for step, batch in enumerate(epoch_iterator):\n",
    "\n",
    "#             # Skip past any already trained steps if resuming training\n",
    "#             if steps_trained_in_current_epoch > 0:\n",
    "#                 if  (step + 1) % args.gradient_accumulation_steps == 0: \n",
    "#                         steps_trained_in_current_epoch -= 1\n",
    "#                 continue\n",
    "\n",
    "#             model.train()\n",
    "#             batch = tuple(t.to(args.device) for t in batch[:-1])\n",
    "#             inputs = {\"input_ids\": batch[0], \"attention_mask\": batch[1], \"rel_label\": batch[3],\\\n",
    "#                      \"postag\":batch[4],\"feature\":batch[5]}\n",
    "# #             inputs = {\"input_ids\": batch[0], \"attention_mask\": batch[1], \"labels\": batch[3]}\n",
    "#             inputs[\"token_type_ids\"]=batch[2]\n",
    "# #             if args.model_type != \"distilbert\":\n",
    "# #                 inputs[\"token_type_ids\"] = (\n",
    "# #                     batch[2] if args.model_type in [\"bert\", \"xlnet\", \"albert\"] else None\n",
    "# #                 )  # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids\n",
    "#             outputs = model(**inputs)\n",
    "#             loss = outputs[0]  # model outputs are always tuple in transformers (see doc)\n",
    "\n",
    "#             if args.n_gpu > 1:\n",
    "#                 loss = loss.mean()  # mean() to average on multi-gpu parallel training\n",
    "#             if args.gradient_accumulation_steps > 1:\n",
    "#                 loss = loss / args.gradient_accumulation_steps\n",
    "\n",
    "#             if args.fp16:\n",
    "#                 with amp.scale_loss(loss, optimizer) as scaled_loss:\n",
    "#                     scaled_loss.backward()\n",
    "#             else:\n",
    "#                 loss.backward()\n",
    "#             logger.info(\"  step:%d loss %.3f\", step,loss.item())\n",
    "\n",
    "#             tr_loss += loss.item()\n",
    "#             if (step + 1) % args.gradient_accumulation_steps == 0:\n",
    "#                 if args.fp16:\n",
    "#                     torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)\n",
    "#                 else:\n",
    "#                     torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n",
    "\n",
    "#                 optimizer.step()\n",
    "#                 scheduler.step()  # Update learning rate schedule\n",
    "#                 model.zero_grad()\n",
    "#                 global_step += 1\n",
    "\n",
    "\n",
    "#                 if  args.save_steps > 0 and global_step % args.save_steps == 0:\n",
    "#                     # Save model checkpoint\n",
    "#                     results = evaluate(args, val_dataset,model, tokenizer)\n",
    "#                     output_dir = os.path.join(args.output_dir, \"checkpoint-{}\".format(global_step))\n",
    "#                     if not os.path.exists(output_dir):\n",
    "#                         os.makedirs(output_dir)\n",
    "#                     model_to_save = (\n",
    "#                         model.module if hasattr(model, \"module\") else model\n",
    "#                     )  # Take care of distributed/parallel training\n",
    "#                     torch.save(model_to_save.state_dict(),os.path.join(output_dir,\"model.pt\"))\n",
    "#                     tokenizer.save_pretrained(output_dir)\n",
    "\n",
    "#                     torch.save(args, os.path.join(output_dir, \"training_args.bin\"))\n",
    "#                     logger.info(\"Saving model checkpoint to %s\", output_dir)\n",
    "\n",
    "#                     torch.save(optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\"))\n",
    "#                     torch.save(scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\"))\n",
    "#                     if args.fp16:\n",
    "#                         torch.save(amp.state_dict(),os.path.join(output_dir, \"amp.pt\"))\n",
    "#                     logger.info(\"Saving optimizer and scheduler states to %s\", output_dir)\n",
    "\n",
    "#             if args.max_steps > 0 and global_step > args.max_steps:\n",
    "#                 epoch_iterator.close()\n",
    "#                 break\n",
    "#         print(time.time()-start)\n",
    "#         if args.max_steps > 0 and global_step > args.max_steps:\n",
    "#             train_iterator.close()\n",
    "#             break\n",
    "\n",
    "\n",
    "#     return global_step, tr_loss / global_step\n",
    "# def evaluate(args, eval_dataset,model, tokenizer,thre=0.5):\n",
    "#     eval_output_dir = args.output_dir \n",
    "\n",
    "#     results = {}\n",
    "# #         eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True)\n",
    "\n",
    "#     if not os.path.exists(eval_output_dir) :\n",
    "#         os.makedirs(eval_output_dir)\n",
    "\n",
    "# #         args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)\n",
    "#     # Note that DistributedSampler samples randomly\n",
    "#     eval_sampler = SequentialSampler(eval_dataset)\n",
    "#     eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)\n",
    "\n",
    "#     # multi-gpu eval\n",
    "#     if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):\n",
    "#         model = torch.nn.DataParallel(model,device_ids=args.card_list)\n",
    "\n",
    "#     # Eval!\n",
    "#     logger.info(\"***** Running evaluation  *****\")\n",
    "#     logger.info(\"  Num examples = %d\", len(eval_dataset))\n",
    "#     logger.info(\"  Batch size = %d\", args.eval_batch_size)\n",
    "#     eval_loss = 0.0\n",
    "#     nb_eval_steps = 0\n",
    "#     preds = None\n",
    "#     out_label_ids = None\n",
    "#     rights,num=0,0\n",
    "#     recall_num,full_num=0,0\n",
    "#     precision_num,full_num_pre=0,0\n",
    "#     for batch in tqdm(eval_dataloader, desc=\"Evaluating\"):\n",
    "#         model.eval()\n",
    "#         batch = tuple(t.to(args.device) for t in batch[:-1])\n",
    "\n",
    "#         with torch.no_grad():\n",
    "#             inputs = {\"input_ids\": batch[0], \"attention_mask\": batch[1], \"rel_label\": batch[3],\\\n",
    "#                      \"postag\":batch[4],\"feature\":batch[5]}\n",
    "# #             inputs = {\"input_ids\": batch[0], \"attention_mask\": batch[1], \"labels\": batch[3]}\n",
    "#             inputs[\"token_type_ids\"]=batch[2]\n",
    "# #                 if args.model_type != \"distilbert\":\n",
    "# #                     inputs[\"token_type_ids\"] = (\n",
    "# #                         batch[2] if args.model_type in [\"bert\", \"xlnet\", \"albert\"] else None\n",
    "# #                     )  # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids\n",
    "#             outputs = model(**inputs)\n",
    "#             tmp_eval_loss, logits = outputs[:2]\n",
    "\n",
    "#             eval_loss += tmp_eval_loss.mean().item()\n",
    "#         rights+=( (F.sigmoid(logits).detach()>thre).long()==batch[3]).all(dim=-1).sum().detach().cpu().item()\n",
    "#         num+=batch[3].shape[0]\n",
    "#         recall_num+=(( (F.sigmoid(logits).detach()>thre).long()==1) & (batch[3]==1)).sum().detach().cpu().item()\n",
    "#         precision_num+=(( (F.sigmoid(logits).detach()>thre).long()==1) & (batch[3]==1)).sum().detach().cpu().item()\n",
    "#         full_num+=batch[3].sum().detach().cpu().item()\n",
    "#         full_num_pre+= (F.sigmoid(logits).detach()>thre).long().sum().detach().cpu().item()\n",
    "#         nb_eval_steps += 1\n",
    "#         if preds is None:\n",
    "#             preds = F.sigmoid(logits).detach().cpu().numpy()\n",
    "#             out_label_ids = inputs[\"rel_label\"].detach().cpu().numpy()\n",
    "# #             out_label_ids=inputs[\"labels\"].detach().cpu().numpy()\n",
    "#         else:\n",
    "#             preds = np.append(preds, F.sigmoid(logits).detach().cpu().numpy(), axis=0)\n",
    "#             out_label_ids = np.append(out_label_ids, inputs[\"rel_label\"].detach().cpu().numpy(), axis=0)\n",
    "# #             out_label_ids = np.append(out_label_ids, inputs[\"labels\"].detach().cpu().numpy(), axis=0)\n",
    "\n",
    "#     eval_loss = eval_loss / nb_eval_steps\n",
    "#     if args.output_mode == \"classification\":\n",
    "#         preds = (preds>thre).astype(np.int8)\n",
    "#         out_label_ids=out_label_ids.astype(np.int8)\n",
    "#     elif args.output_mode == \"regression\":\n",
    "#         preds = np.squeeze(preds)\n",
    "#     result = {\"acc_class\":(preds==out_label_ids).sum(axis=0)/preds.shape[0],\"acc_sample\":rights/(num+0.001),\\\n",
    "#               \"recall_sample\":recall_num/(full_num+0.001),\"precision_sample\":precision_num/(full_num_pre+0.001)}\n",
    "#     results.update(result)\n",
    "\n",
    "#     output_eval_file = os.path.join(eval_output_dir,  \"eval_results.txt\")\n",
    "#     with open(output_eval_file, \"a\") as writer:\n",
    "#         logger.info(\"***** Eval results  *****\")\n",
    "#         for key in sorted(result.keys()):\n",
    "#             logger.info(\"  %s = %s\", key, str(result[key]))\n",
    "#             writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n",
    "\n",
    "#     return results,preds,out_label_ids\n",
    "# epoch_iterator=Data.DataLoader(cls_val_dataset,batch_size=2,shuffle=False)\n",
    "# for step, batch in enumerate(epoch_iterator):\n",
    "#     model_cls.train()\n",
    "#     tokenvec=batch[6].detach().cpu().numpy()\n",
    "#     batch[6]=torch.tensor(word2vec[tokenvec]).float()\n",
    "#     wordvec=batch[7].detach().cpu().numpy()\n",
    "#     batch[7]=torch.tensor(word2vec[wordvec]).float()\n",
    "# #     batch = tuple(t.to(args.device) for t in batch[:-1])\n",
    "#     inputs = {\"input_ids\": batch[0], \"attention_mask\": batch[1], \"rel_label\": batch[3],\\\n",
    "#              \"postag\":batch[4],\"feature\":batch[5],\"tokenvec\":batch[6],\"wordvec\":batch[7],\"wordmask\":batch[8]}\n",
    "#     inputs[\"token_type_ids\"]=batch[2]\n",
    "#     outputs = model_cls(**inputs)\n",
    "#     outputs[0].backward()\n",
    "#     break"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {
    "code_folding": [
     0,
     14,
     33,
     54,
     165,
     213,
     219,
     393
    ],
    "hidden": true
   },
   "outputs": [],
   "source": [
    "class clsLoss(nn.Module):\n",
    "    def __init__(self,m,alpha):\n",
    "        super(clsLoss,self).__init__()\n",
    "        self.m=m\n",
    "        self.alpha=alpha\n",
    "    def forward(self,logits,targets):\n",
    "        y_hat=torch.sigmoid(logits)\n",
    "        y=targets\n",
    "        theta=(((y_hat-0.5)*(y.float()-0.5))<0) | ((y_hat>0.5-self.m) & (y_hat<0.5+self.m))\n",
    "        loss_p=-(theta.float()*y.float()*torch.log(y_hat+1e-5)).sum(dim=-1)/y.float().sum(dim=-1)\n",
    "        loss_n=-(theta.float()*(1-y).float()*torch.log((1-y_hat)+1e-5)).sum(dim=-1)/(1-y.float()).sum(dim=-1)\n",
    "#         loss_n=-torch.log(1-(theta.float()*(1-y).float()*y_hat).sum(dim=-1)/(1-y).float().sum(dim=-1))\n",
    "        loss=loss_p+self.alpha*loss_n\n",
    "        return loss.mean()\n",
    "class GCNN_block(nn.Module):\n",
    "    def __init__(self,input_channel,output_channel,kernel_size,padding,dilation=1):\n",
    "        super(GCNN_block,self).__init__()\n",
    "        self.input_channel=input_channel\n",
    "        self.output_channel=output_channel\n",
    "        self.conv1=nn.Conv1d(input_channel,output_channel,kernel_size,padding=padding,dilation=dilation)\n",
    "        self.conv2=nn.Conv1d(input_channel,output_channel,kernel_size,padding=padding,dilation=dilation)\n",
    "        if input_channel !=output_channel:\n",
    "            self.trans=nn.Conv1d(input_channel,output_channel,1)\n",
    "    def forward(self,args):\n",
    "        X,attention_mask=args[0],args[1]\n",
    "        X=X*attention_mask.unsqueeze(1).float()\n",
    "        gate=torch.sigmoid(self.conv2(X))\n",
    "        if self.input_channel==self.output_channel:\n",
    "            Y=X*(1-gate)+self.conv1(X)*gate\n",
    "        else:\n",
    "            Y=self.trans(X)*(1-gate)+self.conv1(X)*gate\n",
    "        Y=Y*attention_mask.unsqueeze(1).float()\n",
    "        return Y,attention_mask\n",
    "class Conditional_LayerNorm(nn.Module):\n",
    "    def __init__(self,features,conditional_dim,eps=1e-6):\n",
    "        super(Conditional_LayerNorm,self).__init__()\n",
    "        self.gamma=nn.Parameter(torch.ones(features))\n",
    "        self.beta=nn.Parameter(torch.zeros(features))\n",
    "        self.trans_gamma=nn.Linear(conditional_dim,self.gamma.shape[-1])\n",
    "        self.trans_beta=nn.Linear(conditional_dim,self.beta.shape[-1])\n",
    "        torch.nn.init.constant_(self.trans_gamma.weight,val=0)\n",
    "        torch.nn.init.constant_(self.trans_gamma.bias,val=0)\n",
    "        torch.nn.init.constant_(self.trans_beta.weight,val=0)\n",
    "        torch.nn.init.constant_(self.trans_beta.bias,val=0)\n",
    "        self.eps=eps\n",
    "    def forward(self,X,condition):\n",
    "        mean=X.mean(-1,keepdim=True)\n",
    "        std=X.std(-1,keepdim=True)\n",
    "        cond_gamma=self.trans_gamma(condition)\n",
    "        cond_beta=self.trans_beta(condition)\n",
    "        if condition.dim()<X.dim(): #condition是固定维度\n",
    "            return (self.gamma+cond_gamma).unsqueeze(1)*(X-mean)/(std+self.eps)+(self.beta+cond_beta).unsqueeze(1)\n",
    "        else:#condition是sequence\n",
    "            return (self.gamma+cond_gamma)*(X-mean)/(std+self.eps)+(self.beta+cond_beta)\n",
    "class BertMulticlass_DGCNN(nn.Module):\n",
    "    def __init__(self,bert_dir,config_file,num_rel_labels,hidden_dropout_prob,embed_dim,                 loss_type=True,use_feature=True,use_vec=False,use_word=False,use_pool=False,use_plan=False,alpha=1):\n",
    "        super(BertMulticlass_DGCNN,self).__init__()\n",
    "        self.num_rel_labels = num_rel_labels\n",
    "        self.alpha=alpha\n",
    "        self.bert =BertModel.from_pretrained(bert_dir,config=os.path.join(bert_dir,config_file),                                              hidden_dropout_prob=hidden_dropout_prob,output_hidden_states=True, output_attentions=True)\n",
    "        self.dropout = nn.Dropout(hidden_dropout_prob)\n",
    "        self.embed=nn.Embedding(len(id2pos_BIO),embed_dim)\n",
    "        if use_feature:\n",
    "            hidden_size=self.bert.pooler.dense.out_features+embed_dim\n",
    "        else:\n",
    "            hidden_size=self.bert.pooler.dense.out_features      \n",
    "        if use_vec and use_feature:\n",
    "#             hidden_size+=200\n",
    "            self.trans=nn.Linear(200,hidden_size-embed_dim)\n",
    "        if use_vec and not use_feature:\n",
    "            self.trans=nn.Linear(200,hidden_size)\n",
    "        self.DGCNN=nn.Sequential(GCNN_block(hidden_size,hidden_size,1,padding=0,dilation=1),\n",
    "                   GCNN_block(hidden_size,hidden_size,3,padding=1,dilation=1),\n",
    "                   GCNN_block(hidden_size,hidden_size,3,padding=2,dilation=2),\n",
    "                   GCNN_block(hidden_size,hidden_size,3,padding=4,dilation=4))\n",
    "#         self.dense_feature=nn.Sequential(nn.Linear(16,16),nn.ReLU())\n",
    "        logit_size=hidden_size\n",
    "        if use_word:\n",
    "            self.DGCNN_vec=nn.Sequential(GCNN_block(200,200,1,padding=0,dilation=1),\n",
    "                       GCNN_block(200,200,3,padding=1,dilation=1),\n",
    "                       GCNN_block(200,200,3,padding=2,dilation=2),\n",
    "                       GCNN_block(200,200,3,padding=4,dilation=4))\n",
    "            logit_size+=200\n",
    "        if use_plan:\n",
    "            self.cond_ln=Conditional_LayerNorm(hidden_size,112)\n",
    "#             self.DGCNN_plan=nn.Sequential(GCNN_block(112,112,1,padding=0,dilation=1),\n",
    "#                        GCNN_block(112,112,3,padding=1,dilation=1),\n",
    "#                        GCNN_block(112,112,3,padding=2,dilation=2),\n",
    "#                        GCNN_block(112,112,3,padding=4,dilation=4))\n",
    "#             logit_size+=112\n",
    "        if use_pool:#cat or +\n",
    "            logit_size+=self.bert.pooler.dense.out_features \n",
    "        self.classifier=nn.Linear(logit_size,num_rel_labels)\n",
    "        self.loss_type=loss_type\n",
    "        self.use_feature=use_feature\n",
    "        self.use_vec=use_vec\n",
    "        self.use_word=use_word\n",
    "        self.use_pool=use_pool\n",
    "        self.use_plan=use_plan\n",
    "    def forward(\n",
    "        self,\n",
    "        input_ids=None,\n",
    "        attention_mask=None,\n",
    "        token_type_ids=None,\n",
    "        rel_label=None,\n",
    "        feature=None,\n",
    "        postag=None,\n",
    "        tokenvec=None,\n",
    "        wordvec=None,\n",
    "        wordmask=None,\n",
    "        plan_label=None\n",
    "    ):\n",
    "\n",
    "        outputs = self.bert(\n",
    "            input_ids,\n",
    "            attention_mask=attention_mask,\n",
    "            token_type_ids=token_type_ids,\n",
    "        )\n",
    "\n",
    "        sequence_output = outputs[0]\n",
    "        pool_output=outputs[1]\n",
    "        if self.use_vec:\n",
    "            tokenvec=self.trans(tokenvec)\n",
    "            sequence_output+=tokenvec\n",
    "#             sequence_output=torch.cat([sequence_output,tokenvec],dim=-1)\n",
    "        if self.use_feature:\n",
    "            postag_output=self.embed(postag)\n",
    "            sequence_output=torch.cat([sequence_output,postag_output],dim=-1)\n",
    "        if self.use_plan:\n",
    "#             plan_output=F.adaptive_avg_pool1d(self.DGCNN_plan([plan_label.permute(0,2,1),attention_mask])[0],1).squeeze(-1)\n",
    "            sequence_output=self.cond_ln(sequence_output,plan_label)\n",
    "        sequence_output=F.adaptive_avg_pool1d(self.DGCNN([sequence_output.permute(0,2,1),attention_mask])[0],1).squeeze(-1)\n",
    "        sequence_output = self.dropout(sequence_output)\n",
    "        if self.use_word:\n",
    "            word_output=F.adaptive_avg_pool1d(self.DGCNN_vec([wordvec.permute(0,2,1),wordmask])[0],1).squeeze(-1)\n",
    "            word_output=self.dropout(word_output)\n",
    "#         if self.use_plan:\n",
    "#             plan_output=F.adaptive_avg_pool1d(self.DGCNN_plan([plan_label.permute(0,2,1),attention_mask])[0],1).squeeze(-1)\n",
    "#             plan_output=self.dropout(plan_output)\n",
    "        logits_input=sequence_output\n",
    "        if self.use_word:\n",
    "            logits_input=torch.cat([logits_input,word_output],dim=-1)\n",
    "#         if self.use_plan:\n",
    "#             logits_input=torch.cat([logits_input,plan_output],dim=-1)    \n",
    "        if self.use_pool:\n",
    "            pool_output=self.dropout(pool_output)\n",
    "            logits_input=torch.cat([logits_input,pool_output],dim=-1) \n",
    "        logits = self.classifier(logits_input)\n",
    "        outputs = (logits,) + outputs[2:]  # add hidden states and attention if they are here\n",
    "\n",
    "        if rel_label is not None:\n",
    "            if self.num_rel_labels == 1:\n",
    "                #  We are doing regression\n",
    "                loss_fct = MSELoss()\n",
    "                loss = loss_fct(logits.view(-1), labels.view(-1))\n",
    "            else:\n",
    "                if self.loss_type:\n",
    "                    loss_fct = clsLoss(0.3,self.alpha)\n",
    "                    loss = loss_fct(logits.view(-1, self.num_rel_labels), rel_label.view(-1,self.num_rel_labels))\n",
    "                else:\n",
    "                    loss_fct = nn.BCEWithLogitsLoss(reduction=\"none\")\n",
    "                    loss = ((loss_fct(logits.view(-1, self.num_rel_labels), rel_label.view(-1,self.num_rel_labels).float())).sum(dim=-1)).mean()\n",
    "            outputs = (loss,) + outputs\n",
    "\n",
    "        return outputs  # (loss), logits, (hidden_states), (attentions)\n",
    "class BertMulticlass(BertPreTrainedModel):\n",
    "    def __init__(self, config):\n",
    "        super().__init__(config)\n",
    "        self.num_labels = config.num_labels\n",
    "\n",
    "        self.bert = BertModel(config)\n",
    "        self.dropout = nn.Dropout(config.hidden_dropout_prob)\n",
    "        self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)\n",
    "\n",
    "        self.init_weights()\n",
    "    def forward(\n",
    "        self,\n",
    "        input_ids=None,\n",
    "        attention_mask=None,\n",
    "        token_type_ids=None,\n",
    "        position_ids=None,\n",
    "        head_mask=None,\n",
    "        inputs_embeds=None,\n",
    "        labels=None,\n",
    "    ):\n",
    "\n",
    "        outputs = self.bert(\n",
    "            input_ids,\n",
    "            attention_mask=attention_mask,\n",
    "            token_type_ids=token_type_ids,\n",
    "            position_ids=position_ids,\n",
    "            head_mask=head_mask,\n",
    "            inputs_embeds=inputs_embeds,\n",
    "        )\n",
    "\n",
    "        pooled_output = outputs[1]\n",
    "\n",
    "        pooled_output = self.dropout(pooled_output)\n",
    "        logits = self.classifier(pooled_output)\n",
    "\n",
    "        outputs = (logits,) + outputs[2:]  # add hidden states and attention if they are here\n",
    "\n",
    "        if labels is not None:\n",
    "            if self.num_labels == 1:\n",
    "                #  We are doing regression\n",
    "                loss_fct = MSELoss()\n",
    "                loss = loss_fct(logits.view(-1), labels.view(-1))\n",
    "            else:\n",
    "                loss_fct = nn.BCEWithLogitsLoss(reduction=\"none\")\n",
    "                loss = ((loss_fct(logits.view(-1, self.num_labels), labels.view(-1,self.num_labels).float())).sum(dim=-1)).mean()\n",
    "            outputs = (loss,) + outputs\n",
    "\n",
    "        return outputs  # (loss), logits, (hidden_states), (attentions)\n",
    "def set_seed(args):\n",
    "    random.seed(args.seed)\n",
    "    np.random.seed(args.seed)\n",
    "    torch.manual_seed(args.seed)\n",
    "    if args.n_gpu > 0:\n",
    "        torch.cuda.manual_seed_all(args.seed)\n",
    "def train_cls(args, train_dataset,val_dataset, model, tokenizer):\n",
    "#     \"\"\" Train the model \"\"\"\n",
    "#     if args.local_rank in [-1, 0]:\n",
    "#         tb_writer = SummaryWriter()\n",
    "\n",
    "#     args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\n",
    "    train_sampler = RandomSampler(train_dataset) # if args.local_rank == -1 else DistributedSampler(train_dataset)\n",
    "    train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)\n",
    "    model.train()\n",
    "    if args.max_steps > 0:\n",
    "        t_total = args.max_steps\n",
    "        args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1\n",
    "    else:\n",
    "        t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs\n",
    "\n",
    "    # Prepare optimizer and schedule (linear warmup and decay)\n",
    "    no_decay = [\"bias\", \"LayerNorm.weight\"]\n",
    "    optimizer_grouped_parameters = [\n",
    "        {\n",
    "            \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n",
    "            \"weight_decay\": args.weight_decay,\n",
    "        },\n",
    "        {\"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], \"weight_decay\": 0.0},\n",
    "    ]\n",
    "\n",
    "    optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n",
    "    scheduler = get_linear_schedule_with_warmup(\n",
    "        optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total\n",
    "    )\n",
    "\n",
    "    # Check if saved optimizer or scheduler states exist\n",
    "    if os.path.isfile(os.path.join(args.model_name_or_path, \"optimizer.pt\")) and os.path.isfile(\n",
    "        os.path.join(args.model_name_or_path, \"scheduler.pt\")\n",
    "    ):\n",
    "        logger.info(\"  loading optimizer and scheduler...\")\n",
    "        # Load in optimizer and scheduler states\n",
    "        optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"optimizer.pt\")))\n",
    "#         scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"scheduler.pt\")))\n",
    "    else:\n",
    "        logger.info(\"  No optimizer and scheduler,we build a new one\")        \n",
    "\n",
    "    if args.fp16:\n",
    "        try:\n",
    "            from apex import amp\n",
    "        except ImportError:\n",
    "            raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n",
    "        model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)\n",
    "\n",
    "    # multi-gpu training (should be after apex fp16 initialization)\n",
    "    if args.n_gpu > 1:\n",
    "        model = torch.nn.DataParallel(model,device_ids=args.card_list)\n",
    "\n",
    "\n",
    "    # Train!\n",
    "    logger.info(\"***** Running training *****\")\n",
    "    logger.info(\"  Num examples = %d\", len(train_dataset))\n",
    "    logger.info(\"  Num Epochs = %d\", args.num_train_epochs)\n",
    "#     logger.info(\"  Instantaneous batch size per GPU = %d\", args.per_gpu_train_batch_size)\n",
    "    logger.info(\n",
    "        \"  Total train batch size (w. parallel, distributed & accumulation) = %d\",\n",
    "        args.train_batch_size\n",
    "        * args.gradient_accumulation_steps\n",
    "    )\n",
    "    logger.info(\"  Gradient Accumulation steps = %d\", args.gradient_accumulation_steps)\n",
    "    logger.info(\"  Total optimization steps = %d\", t_total)\n",
    "\n",
    "    global_step = 0\n",
    "    epochs_trained = 0\n",
    "    steps_trained_in_current_epoch = 0\n",
    "    # Check if continuing training from a checkpoint\n",
    "    if os.path.exists(args.model_name_or_path):\n",
    "        # set global_step to global_step of last saved checkpoint from model path\n",
    "        try:\n",
    "            global_step = int(args.model_name_or_path.split(\"-\")[-1].split(\"/\")[0])\n",
    "        except ValueError:\n",
    "            global_step = 0\n",
    "        epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)\n",
    "        steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)\n",
    "\n",
    "        logger.info(\"  Continuing training from checkpoint, will skip to saved global_step\")\n",
    "        logger.info(\"  Continuing training from epoch %d\", epochs_trained)\n",
    "        logger.info(\"  Continuing training from global step %d\", global_step)\n",
    "        logger.info(\"  Will skip the first %d steps in the first epoch\", steps_trained_in_current_epoch)\n",
    "\n",
    "    tr_loss, logging_loss = 0.0, 0.0\n",
    "    model.zero_grad()\n",
    "    train_iterator = tqdm(range(\n",
    "        epochs_trained, int(args.num_train_epochs)), desc=\"Epoch\")\n",
    "    set_seed(args)  # Added here for reproductibility\n",
    "    for _ in train_iterator:\n",
    "        start=time.time()\n",
    "        epoch_iterator = tqdm(train_dataloader, desc=\"Iteration\")\n",
    "\n",
    "        for step, batch in enumerate(epoch_iterator):\n",
    "\n",
    "            # Skip past any already trained steps if resuming training\n",
    "            if steps_trained_in_current_epoch > 0:\n",
    "                if  (step + 1) % args.gradient_accumulation_steps == 0: \n",
    "                        steps_trained_in_current_epoch -= 1\n",
    "                continue\n",
    "\n",
    "            model.train()\n",
    "            if batch[6].sum()!=0:\n",
    "                tokenvec=batch[6].detach().cpu().numpy()\n",
    "                batch[6]=torch.tensor(word2vec[tokenvec]).float()\n",
    "                wordvec=batch[7].detach().cpu().numpy()\n",
    "                batch[7]=torch.tensor(word2vec[wordvec]).float()\n",
    "            else:\n",
    "                batch[6]=torch.tensor(batch[6])\n",
    "                batch[7]=torch.tensor(batch[7])\n",
    "            batch = tuple(t.to(args.device) for t in batch[:-1])\n",
    "            inputs = {\"input_ids\": batch[0], \"attention_mask\": batch[1], \"rel_label\": batch[3],\\\n",
    "                      \"postag\":batch[4],\"feature\":batch[5],\"tokenvec\":batch[6],\"wordvec\":batch[7],\\\n",
    "                      \"wordmask\":batch[8],\"plan_label\":batch[9].float()}\n",
    "            inputs[\"token_type_ids\"]=batch[2]\n",
    "            outputs = model(**inputs)\n",
    "            loss = outputs[0]  # model outputs are always tuple in transformers (see doc)\n",
    "\n",
    "            if args.n_gpu > 1:\n",
    "                loss = loss.mean()  # mean() to average on multi-gpu parallel training\n",
    "            if args.gradient_accumulation_steps > 1:\n",
    "                loss = loss / args.gradient_accumulation_steps\n",
    "\n",
    "            if args.fp16:\n",
    "                with amp.scale_loss(loss, optimizer) as scaled_loss:\n",
    "                    scaled_loss.backward()\n",
    "            else:\n",
    "                loss.backward()\n",
    "            logger.info(\"  step:%d loss %.3f\", step,loss.item())\n",
    "\n",
    "            tr_loss += loss.item()\n",
    "            if (step + 1) % args.gradient_accumulation_steps == 0:\n",
    "                if args.fp16:\n",
    "                    torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)\n",
    "                else:\n",
    "                    torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n",
    "\n",
    "                optimizer.step()\n",
    "                scheduler.step()  # Update learning rate schedule\n",
    "                model.zero_grad()\n",
    "                global_step += 1\n",
    "\n",
    "\n",
    "                if  args.save_steps > 0 and global_step % args.save_steps == 0:\n",
    "                    # Save model checkpoint\n",
    "                    results = evaluate_cls(args, val_dataset,model, tokenizer)\n",
    "                    output_dir = os.path.join(args.output_dir, \"checkpoint-{}\".format(global_step))\n",
    "                    if not os.path.exists(output_dir):\n",
    "                        os.makedirs(output_dir)\n",
    "                    model_to_save = (\n",
    "                        model.module if hasattr(model, \"module\") else model\n",
    "                    )  # Take care of distributed/parallel training\n",
    "                    torch.save(model_to_save.state_dict(),os.path.join(output_dir,\"model.pt\"))\n",
    "                    tokenizer.save_pretrained(output_dir)\n",
    "\n",
    "                    torch.save(args, os.path.join(output_dir, \"training_args.bin\"))\n",
    "                    logger.info(\"Saving model checkpoint to %s\", output_dir)\n",
    "\n",
    "                    torch.save(optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\"))\n",
    "                    torch.save(scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\"))\n",
    "                    if args.fp16:\n",
    "                        torch.save(amp.state_dict(),os.path.join(output_dir, \"amp.pt\"))\n",
    "                    logger.info(\"Saving optimizer and scheduler states to %s\", output_dir)\n",
    "\n",
    "            if args.max_steps > 0 and global_step > args.max_steps:\n",
    "                epoch_iterator.close()\n",
    "                break\n",
    "        print(time.time()-start)\n",
    "        if args.max_steps > 0 and global_step > args.max_steps:\n",
    "            train_iterator.close()\n",
    "            break\n",
    "\n",
    "\n",
    "    return global_step, tr_loss / global_step\n",
    "def evaluate_cls(args, eval_dataset,model, tokenizer, thre=0.5):\n",
    "    eval_output_dir = args.output_dir \n",
    "\n",
    "    results = {}\n",
    "#         eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True)\n",
    "\n",
    "    if not os.path.exists(eval_output_dir) :\n",
    "        os.makedirs(eval_output_dir)\n",
    "\n",
    "#         args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)\n",
    "    # Note that DistributedSampler samples randomly\n",
    "    eval_sampler = SequentialSampler(eval_dataset)\n",
    "    eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)\n",
    "\n",
    "    # multi-gpu eval\n",
    "#         if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):\n",
    "#             model = torch.nn.DataParallel(model)\n",
    "\n",
    "    # Eval!\n",
    "    logger.info(\"***** Running evaluation *****\")\n",
    "    logger.info(\"  Num examples = %d\", len(eval_dataset))\n",
    "    logger.info(\"  Batch size = %d\", args.eval_batch_size)\n",
    "    eval_loss = 0.0\n",
    "    nb_eval_steps = 0\n",
    "    preds = None\n",
    "    full_logits=[]\n",
    "    out_label_ids = None\n",
    "    rights,num=0,0\n",
    "    recall_num,full_num=0,0\n",
    "    precision_num,full_num_pre=0,0\n",
    "    for batch in tqdm(eval_dataloader, desc=\"Evaluating\"):\n",
    "        model.eval()\n",
    "        tokenvec=batch[6].detach().cpu().numpy()\n",
    "        batch[6]=torch.tensor(word2vec[tokenvec]).float()\n",
    "        wordvec=batch[7].detach().cpu().numpy()\n",
    "        batch[7]=torch.tensor(word2vec[wordvec]).float()\n",
    "        batch = tuple(t.to(args.device) for t in batch[:-1])\n",
    "\n",
    "\n",
    "        with torch.no_grad():\n",
    "\n",
    "            inputs = {\"input_ids\": batch[0], \"attention_mask\": batch[1], \"rel_label\": batch[3],\\\n",
    "                      \"postag\":batch[4],\"feature\":batch[5],\"tokenvec\":batch[6],\"wordvec\":batch[7],\\\n",
    "                      \"wordmask\":batch[8],\"plan_label\":batch[9].float()}\n",
    "            inputs[\"token_type_ids\"]=batch[2]\n",
    "            outputs = model(**inputs)\n",
    "            tmp_eval_loss, logits = outputs[:2]\n",
    "\n",
    "            eval_loss += tmp_eval_loss.mean().item()\n",
    "        rights+=((torch.sigmoid(logits.detach())>thre).long()==batch[3]).all(dim=-1).sum().detach().cpu().item()\n",
    "        num+=batch[3].shape[0]\n",
    "        recall_num+=(((torch.sigmoid(logits.detach())>thre).long()==1) & (batch[3]==1)).sum().detach().cpu().item()\n",
    "        precision_num+=(((torch.sigmoid(logits.detach())>thre).long()==1) & (batch[3]==1)).sum().detach().cpu().item()\n",
    "        full_num+=batch[3].sum().detach().cpu().item()\n",
    "        full_num_pre+=(torch.sigmoid(logits.detach())>thre).long().sum().detach().cpu().item()\n",
    "        nb_eval_steps += 1\n",
    "        full_logits.append((torch.sigmoid(logits.detach())).detach().cpu().numpy())\n",
    "        if preds is None:\n",
    "            preds = (torch.sigmoid(logits.detach())>thre).detach().cpu().numpy()\n",
    "            out_label_ids = inputs[\"rel_label\"].detach().cpu().numpy()\n",
    "#             out_label_ids=inputs[\"labels\"].detach().cpu().numpy()\n",
    "        else:\n",
    "            preds = np.append(preds, (torch.sigmoid(logits.detach())>thre).detach().cpu().numpy(), axis=0)\n",
    "            out_label_ids = np.append(out_label_ids, inputs[\"rel_label\"].detach().cpu().numpy(), axis=0)\n",
    "#             out_label_ids = np.append(out_label_ids, inputs[\"labels\"].detach().cpu().numpy(), axis=0)\n",
    "\n",
    "    eval_loss = eval_loss / nb_eval_steps\n",
    "    if args.output_mode == \"classification\":\n",
    "        preds = preds.astype(np.int8)\n",
    "        out_label_ids=out_label_ids.astype(np.int8)\n",
    "    elif args.output_mode == \"regression\":\n",
    "        preds = np.squeeze(preds)\n",
    "    result = {\"acc_class\":(preds==out_label_ids).sum(axis=0)/preds.shape[0],\"acc_sample\":rights/(num+0.001),\\\n",
    "              \"recall_sample\":recall_num/(full_num+0.001),\"precision_sample\":precision_num/(full_num_pre+0.001)}\n",
    "    results.update(result)\n",
    "\n",
    "    output_eval_file = os.path.join(eval_output_dir, \"eval_results.txt\")\n",
    "    with open(output_eval_file, \"a\") as writer:\n",
    "        logger.info(\"***** Eval results *****\")\n",
    "        for key in sorted(result.keys()):\n",
    "            logger.info(\"  %s = %s\", key, str(result[key]))\n",
    "            writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n",
    "\n",
    "    return results,preds,out_label_ids,full_logits"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 53,
   "metadata": {
    "hidden": true,
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "06/18/2020 15:42:25 - INFO - transformers.configuration_utils -   loading configuration file ./roberta-zh-wwm-pytorch/bert_config.json\n",
      "06/18/2020 15:42:25 - INFO - transformers.configuration_utils -   Model config BertConfig {\n",
      "  \"_num_labels\": 2,\n",
      "  \"architectures\": null,\n",
      "  \"attention_probs_dropout_prob\": 0.1,\n",
      "  \"bos_token_id\": null,\n",
      "  \"directionality\": \"bidi\",\n",
      "  \"do_sample\": false,\n",
      "  \"early_stopping\": false,\n",
      "  \"eos_token_ids\": null,\n",
      "  \"finetuning_task\": null,\n",
      "  \"hidden_act\": \"gelu\",\n",
      "  \"hidden_dropout_prob\": 0.1,\n",
      "  \"hidden_size\": 768,\n",
      "  \"id2label\": {\n",
      "    \"0\": \"LABEL_0\",\n",
      "    \"1\": \"LABEL_1\"\n",
      "  },\n",
      "  \"initializer_range\": 0.02,\n",
      "  \"intermediate_size\": 3072,\n",
      "  \"is_decoder\": false,\n",
      "  \"label2id\": {\n",
      "    \"LABEL_0\": 0,\n",
      "    \"LABEL_1\": 1\n",
      "  },\n",
      "  \"layer_norm_eps\": 1e-12,\n",
      "  \"length_penalty\": 1.0,\n",
      "  \"max_length\": 20,\n",
      "  \"max_position_embeddings\": 512,\n",
      "  \"model_type\": \"bert\",\n",
      "  \"num_attention_heads\": 12,\n",
      "  \"num_beams\": 1,\n",
      "  \"num_hidden_layers\": 12,\n",
      "  \"num_return_sequences\": 1,\n",
      "  \"output_attentions\": true,\n",
      "  \"output_hidden_states\": true,\n",
      "  \"output_past\": true,\n",
      "  \"pad_token_id\": null,\n",
      "  \"pooler_fc_size\": 768,\n",
      "  \"pooler_num_attention_heads\": 12,\n",
      "  \"pooler_num_fc_layers\": 3,\n",
      "  \"pooler_size_per_head\": 128,\n",
      "  \"pooler_type\": \"first_token_transform\",\n",
      "  \"pruned_heads\": {},\n",
      "  \"repetition_penalty\": 1.0,\n",
      "  \"temperature\": 1.0,\n",
      "  \"top_k\": 50,\n",
      "  \"top_p\": 1.0,\n",
      "  \"torchscript\": false,\n",
      "  \"type_vocab_size\": 2,\n",
      "  \"use_bfloat16\": false,\n",
      "  \"vocab_size\": 21128\n",
      "}\n",
      "\n",
      "06/18/2020 15:42:25 - INFO - transformers.modeling_utils -   loading weights file ./roberta-zh-wwm-pytorch/pytorch_model.bin\n"
     ]
    }
   ],
   "source": [
    "model_cls=BertMulticlass_DGCNN(bert_dir,config_file,num_rel_labels=num_rel_labels,\n",
    "                  hidden_dropout_prob=hidden_dropout_prob,embed_dim=128,loss_type=True,use_feature=True,use_vec=False,use_word=False,use_pool=True,use_plan=True,alpha=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 64,
   "metadata": {
    "hidden": true,
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "set_seed(args)\n",
    "model_cls.to(args.device)\n",
    "global_step, tr_loss = train(args,cls_train_dataset,cls_val_dataset, model_cls, tokenizer)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {
    "hidden": true,
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# ckpt=torch.load(\"./output_1_1_0_1_1_1_new/checkpoint-35988/model.pt\", map_location=lambda storage, loc: storage)\n",
    "# model_cls.load_state_dict(ckpt)\n",
    "# model_cls=model_cls.to(args.device)\n",
    "results,preds,out_label_ids,full_logits=evaluate(args,cls_test_dataset,model_cls,tokenizer,thre=0.5)\n",
    "#获取test集/val集(在进行两阶段模型综合性能测试时)的关系分类结果后，进入1.9构建结合句子和本阶段模型输出的分类结果的关系实体识别数据集ner_pred_dataset\n",
    "#用于二阶段模型\n",
    "\n",
    "# full_logits=np.concatenate(full_logits,axis=0)\n",
    "# np.save(\"./cross_results/test_cls_new.npy\",full_logits)\n",
    "# full_logits.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "#交叉ensemble部分\n",
    "preds=[]\n",
    "\n",
    "for idx in range(3):\n",
    "    preds.append(np.load(\"./cross_results/test_cls_\"+str(idx)+\".npy\"))\n",
    "preds.append(np.load(\"./cross_results/test_cls_full.npy\"))\n",
    "preds.append(np.load(\"./cross_results/test_cls_new.npy\"))\n",
    "preds=np.stack(preds,axis=2)\n",
    "preds=preds.mean(axis=-1)\n",
    "preds=(preds>0.5).astype(np.int)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 序列标注"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "heading_collapsed": true
   },
   "source": [
    "## models set"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "hidden_dropout_prob = 0.1\n",
    "num_rel_labels = len(id2rels)\n",
    "num_token_labels=len(id2labels)\n",
    "learning_rate = 5e-5\n",
    "weight_decay = 0\n",
    "epochs = 3\n",
    "batch_size = 16\n",
    "adam_epsilon=1e-8"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {
    "code_folding": [
     0,
     76,
     84,
     138,
     144,
     308,
     407,
     426,
     447,
     523,
     687
    ],
    "hidden": true
   },
   "outputs": [],
   "source": [
    "class BERT_BiLSTM_CRF(nn.Module):\n",
    "    def __init__(self,bert_dir,config_file,num_rel_labels,num_token_labels,hidden_dropout_prob,need_birnn=True,need_cnn=False,rnn_dim=128):\n",
    "        super(BERT_BiLSTM_CRF, self).__init__()\n",
    "        self.num_rel_labels = num_rel_labels\n",
    "        self.num_token_labels= num_token_labels\n",
    "        self.bert =BertModel.from_pretrained(bert_dir,config=os.path.join(bert_dir,config_file), \\\n",
    "                                             hidden_dropout_prob=hidden_dropout_prob,output_hidden_states=True, output_attentions=True)\n",
    "        self.dropout = nn.Dropout(hidden_dropout_prob)\n",
    "        out_dim =self.bert.pooler.dense.out_features\n",
    "        hidden_size=out_dim\n",
    "        self.rel_classifier = nn.Linear(out_dim, self.num_rel_labels)\n",
    "        self.need_birnn = need_birnn\n",
    "        self.need_cnn=need_cnn\n",
    "        # 如果为False，则不要BiLSTM层\n",
    "        if need_birnn:\n",
    "            self.birnn = nn.LSTM(out_dim, rnn_dim, num_layers=1, bidirectional=True, batch_first=True)\n",
    "            out_dim = rnn_dim*2\n",
    "        if need_cnn:\n",
    "            self.DGCNN=nn.Sequential(GCNN_block(hidden_size,hidden_size,1,padding=0,dilation=1),\n",
    "                       GCNN_block(hidden_size,hidden_size,3,padding=1,dilation=1),\n",
    "                       GCNN_block(hidden_size,hidden_size,3,padding=2,dilation=2),\n",
    "                       GCNN_block(hidden_size,hidden_size,3,padding=4,dilation=4))\n",
    "            out_dim=hidden_size\n",
    "            #attention/batch norm+relu/ pooling/什么都不用\n",
    "        \n",
    "        self.hidden2tag = nn.Linear(out_dim, self.num_token_labels)\n",
    "        self.crf = CRF(self.num_token_labels, batch_first=True)\n",
    "    def forward(\n",
    "        self,\n",
    "        input_ids=None,\n",
    "        attention_mask=None,\n",
    "        token_type_ids=None,\n",
    "        labels=None,\n",
    "        label_tokens=None\n",
    "    ):\n",
    "\n",
    "        outputs = self.bert(\n",
    "            input_ids,\n",
    "            attention_mask=attention_mask,\n",
    "            token_type_ids=token_type_ids)\n",
    "\n",
    "        seq_output,pooled_output = outputs[0],outputs[1]\n",
    "        pooled_output = self.dropout(pooled_output)\n",
    "        emissions=self.tag_outputs(seq_output,attention_mask)\n",
    "        rel_logits = self.rel_classifier(pooled_output)\n",
    "        seq_logits = emissions\n",
    "        outputs = (rel_logits,seq_logits) + outputs[2:]  # add hidden states and attention if they are here\n",
    "        loss_full=None\n",
    "        if labels is not None:\n",
    "            loss_rel = nn.BCEWithLogitsLoss(reduction=\"none\")\n",
    "            l_r = ((loss_rel(rel_logits.view(-1, self.num_rel_labels), labels.view(-1,self.num_rel_labels).float())).sum(dim=-1)).mean()\n",
    "            loss_full=l_r\n",
    "\n",
    "        if label_tokens is not None:\n",
    "            l_t = -1*self.crf(emissions, label_tokens, mask=attention_mask.byte(),reduction='mean')\n",
    "            loss_full+=l_t\n",
    "        if loss_full is not None:\n",
    "            outputs = (loss_full,) + outputs\n",
    "        return outputs  # (loss), logits, (hidden_states), (attentions)\n",
    "\n",
    "    def tag_outputs(self, sequence_output,attention_mask):\n",
    "        if self.need_birnn:\n",
    "            sequence_output, _ = self.birnn(sequence_output)\n",
    "        if self.need_cnn:\n",
    "            sequence_output=self.DGCNN([sequence_output.permute(0,2,1),attention_mask])[0].permute(0,2,1)\n",
    "        sequence_output = self.dropout(sequence_output)\n",
    "        emissions = self.hidden2tag(sequence_output)\n",
    "        return emissions\n",
    "    \n",
    "    def predict(self, input_ids,attention_mask=None,token_type_ids=None):\n",
    "        outputs = self.bert(input_ids, attention_mask=attention_mask,token_type_ids=token_type_ids )\n",
    "        sequence_output = outputs[0]\n",
    "        emissions = self.tag_outputs(sequence_output,attention_mask)\n",
    "        return self.crf.decode(emissions, attention_mask.byte())\n",
    "    def logits_predict(self, emissions,attention_mask=None):\n",
    "        return self.crf.decode(emissions, attention_mask.byte())\n",
    "class MaskedSoftmaxCELoss(nn.CrossEntropyLoss):\n",
    "    def __init__(self):\n",
    "            super(MaskedSoftmaxCELoss,self).__init__(reduction='none')\n",
    "    def forward(self,pred,label,mask=None):\n",
    "            loss=super(MaskedSoftmaxCELoss,self).forward((pred).contiguous().view(-1,pred.shape[-1]),(label).contiguous().view(-1).long()).view(label.shape)\n",
    "            if mask is not None:\n",
    "                loss*=mask.float().contiguous()\n",
    "            return loss\n",
    "class BertSeqLabeling(nn.Module):\n",
    "    def __init__(self,bert_dir,config_file,num_rel_labels,num_token_labels,hidden_dropout_prob):\n",
    "        super(BertSeqLabeling,self).__init__()\n",
    "        self.num_rel_labels = num_rel_labels\n",
    "        self.num_token_labels= num_token_labels\n",
    "        self.bert =BertModel.from_pretrained(bert_dir,config=os.path.join(bert_dir,config_file), \\\n",
    "                                             hidden_dropout_prob=hidden_dropout_prob,output_hidden_states=True, output_attentions=True)\n",
    "        self.dropout = nn.Dropout(hidden_dropout_prob)\n",
    "        self.rel_classifier = nn.Linear(self.bert.pooler.dense.out_features, self.num_rel_labels)\n",
    "        self.seq_classifier=nn.Linear(self.bert.pooler.dense.out_features,self.num_token_labels)\n",
    "    def forward(\n",
    "        self,\n",
    "        input_ids=None,\n",
    "        attention_mask=None,\n",
    "        token_type_ids=None,\n",
    "        position_ids=None,\n",
    "        head_mask=None,\n",
    "        inputs_embeds=None,\n",
    "        labels=None,\n",
    "        label_tokens=None,\n",
    "    ):\n",
    "\n",
    "        outputs = self.bert(\n",
    "            input_ids,\n",
    "            attention_mask=attention_mask,\n",
    "            token_type_ids=token_type_ids,\n",
    "            position_ids=position_ids,\n",
    "            head_mask=head_mask,\n",
    "            inputs_embeds=inputs_embeds,\n",
    "        )\n",
    "\n",
    "        seq_output,pooled_output = outputs[0],outputs[1]\n",
    "        pooled_output = self.dropout(pooled_output)\n",
    "        seq_output=self.dropout(seq_output)\n",
    "        rel_logits = self.rel_classifier(pooled_output)\n",
    "        seq_logits = self.seq_classifier(seq_output)\n",
    "        outputs = (rel_logits,seq_logits) + outputs[2:]  # add hidden states and attention if they are here\n",
    "        loss_full=None\n",
    "        if labels is not None:\n",
    "            loss_rel = nn.BCEWithLogitsLoss(reduction=\"none\")\n",
    "            l_r = ((loss_rel(rel_logits.view(-1, self.num_rel_labels), labels.view(-1,self.num_rel_labels).float())).sum(dim=-1)).mean()\n",
    "            loss_full=0.5*l_r\n",
    "\n",
    "        if label_tokens is not None:\n",
    "            loss_token =MaskedSoftmaxCELoss()\n",
    "            # Only keep active parts of the loss\n",
    "            if attention_mask is not None:\n",
    "                l_t = (loss_token(seq_logits, label_tokens,attention_mask)).sum(dim=-1).mean()\n",
    "            else:\n",
    "                l_t = loss_token(seq_logits, label_tokens).sum(dim=-1).mean()\n",
    "            loss_full+=l_t\n",
    "        if loss_full is not None:\n",
    "            outputs = (loss_full,) + outputs\n",
    "        return outputs  # (loss), logits, (hidden_states), (attentions)\n",
    "def set_seed(args):\n",
    "    random.seed(args.seed)\n",
    "    np.random.seed(args.seed)\n",
    "    torch.manual_seed(args.seed)\n",
    "    if args.n_gpu > 0:\n",
    "        torch.cuda.manual_seed_all(args.seed)\n",
    "def train_ner(args, train_dataset,val_dataset, model, tokenizer):\n",
    "#     \"\"\" Train the model \"\"\"\n",
    "#     if args.local_rank in [-1, 0]:\n",
    "#         tb_writer = SummaryWriter()\n",
    "\n",
    "#     args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\n",
    "    train_sampler = RandomSampler(train_dataset) # if args.local_rank == -1 else DistributedSampler(train_dataset)\n",
    "    train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)\n",
    "\n",
    "    if args.max_steps > 0:\n",
    "        t_total = args.max_steps\n",
    "        args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1\n",
    "    else:\n",
    "        t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs\n",
    "\n",
    "    # Prepare optimizer and schedule (linear warmup and decay)\n",
    "    no_decay = [\"bias\", \"LayerNorm.weight\"]\n",
    "    optimizer_grouped_parameters = [\n",
    "        {\n",
    "            \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n",
    "            \"weight_decay\": args.weight_decay,\n",
    "        },\n",
    "        {\"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], \"weight_decay\": 0.0},\n",
    "    ]\n",
    "\n",
    "    optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n",
    "    scheduler = get_linear_schedule_with_warmup(\n",
    "        optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total\n",
    "    )\n",
    "\n",
    "    # Check if saved optimizer or scheduler states exist\n",
    "    if os.path.isfile(os.path.join(args.model_name_or_path, \"optimizer.pt\")) and os.path.isfile(\n",
    "        os.path.join(args.model_name_or_path, \"scheduler.pt\")\n",
    "    ):\n",
    "        logger.info(\"  loading optimizer and scheduler...\")\n",
    "        # Load in optimizer and scheduler states\n",
    "        optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"optimizer.pt\")))\n",
    "#         scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"scheduler.pt\")))\n",
    "    else:\n",
    "        logger.info(\"  No optimizer and scheduler,we build a new one\")        \n",
    "\n",
    "    if args.fp16:\n",
    "        try:\n",
    "            from apex import amp\n",
    "        except ImportError:\n",
    "            raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n",
    "        model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)\n",
    "\n",
    "    # multi-gpu training (should be after apex fp16 initialization)\n",
    "    if args.n_gpu > 1:\n",
    "        model = torch.nn.DataParallel(model,device_ids=args.card_list)\n",
    "\n",
    "\n",
    "    # Train!\n",
    "    logger.info(\"***** Running training *****\")\n",
    "    logger.info(\"  Num examples = %d\", len(train_dataset))\n",
    "    logger.info(\"  Num Epochs = %d\", args.num_train_epochs)\n",
    "#     logger.info(\"  Instantaneous batch size per GPU = %d\", args.per_gpu_train_batch_size)\n",
    "    logger.info(\n",
    "        \"  Total train batch size (w. parallel, distributed & accumulation) = %d\",\n",
    "        args.train_batch_size\n",
    "        * args.gradient_accumulation_steps\n",
    "#         * (torch.distributed.get_world_size() if args.local_rank != -1 else 1),\n",
    "    )\n",
    "    logger.info(\"  Gradient Accumulation steps = %d\", args.gradient_accumulation_steps)\n",
    "    logger.info(\"  Total optimization steps = %d\", t_total)\n",
    "\n",
    "    global_step = 0\n",
    "    epochs_trained = 0\n",
    "    steps_trained_in_current_epoch = 0\n",
    "    # Check if continuing training from a checkpoint\n",
    "    if os.path.exists(args.model_name_or_path):\n",
    "        # set global_step to global_step of last saved checkpoint from model path\n",
    "        try:\n",
    "            global_step = int(args.model_name_or_path.split(\"-\")[-1].split(\"/\")[0])\n",
    "        except ValueError:\n",
    "            global_step = 0\n",
    "        epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)\n",
    "        steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)\n",
    "\n",
    "        logger.info(\"  Continuing training from checkpoint, will skip to saved global_step\")\n",
    "        logger.info(\"  Continuing training from epoch %d\", epochs_trained)\n",
    "        logger.info(\"  Continuing training from global step %d\", global_step)\n",
    "        logger.info(\"  Will skip the first %d steps in the first epoch\", steps_trained_in_current_epoch)\n",
    "\n",
    "    tr_loss, logging_loss = 0.0, 0.0\n",
    "    model.zero_grad()\n",
    "    train_iterator = tqdm(range(\n",
    "        epochs_trained, int(args.num_train_epochs)), desc=\"Epoch\")\n",
    "    set_seed(args)  # Added here for reproductibility\n",
    "    for _ in train_iterator:\n",
    "        epoch_iterator = tqdm(train_dataloader, desc=\"Iteration\")\n",
    "        for step, batch in enumerate(epoch_iterator):\n",
    "\n",
    "            # Skip past any already trained steps if resuming training\n",
    "            if steps_trained_in_current_epoch > 0:\n",
    "                if  (step + 1) % args.gradient_accumulation_steps == 0: \n",
    "                        steps_trained_in_current_epoch -= 1\n",
    "                continue\n",
    "\n",
    "            model.train()\n",
    "            batch = tuple(t.to(args.device) for t in batch[:-1])\n",
    "            inputs = {\"input_ids\": batch[0], \"attention_mask\": batch[1], \"labels\": batch[3],\"label_tokens\":batch[4]}\n",
    "            inputs[\"token_type_ids\"]=batch[2]\n",
    "#             if args.model_type != \"distilbert\":\n",
    "#                 inputs[\"token_type_ids\"] = (\n",
    "#                     batch[2] if args.model_type in [\"bert\", \"xlnet\", \"albert\"] else None\n",
    "#                 )  # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids\n",
    "            outputs = model(**inputs)\n",
    "            loss = outputs[0]  # model outputs are always tuple in transformers (see doc)\n",
    "\n",
    "            if args.n_gpu > 1:\n",
    "                loss = loss.mean()  # mean() to average on multi-gpu parallel training\n",
    "            if args.gradient_accumulation_steps > 1:\n",
    "                loss = loss / args.gradient_accumulation_steps\n",
    "\n",
    "            if args.fp16:\n",
    "                with amp.scale_loss(loss, optimizer) as scaled_loss:\n",
    "                    scaled_loss.backward()\n",
    "            else:\n",
    "                loss.backward()\n",
    "            logger.info(\"  step:%d loss %.3f\", step,loss.item())\n",
    "\n",
    "            tr_loss += loss.item()\n",
    "            if (step + 1) % args.gradient_accumulation_steps == 0:\n",
    "                if args.fp16:\n",
    "                    torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)\n",
    "                else:\n",
    "                    torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n",
    "\n",
    "                optimizer.step()\n",
    "                scheduler.step()  # Update learning rate schedule\n",
    "                model.zero_grad()\n",
    "                global_step += 1\n",
    "\n",
    "                if  args.save_steps > 0 and global_step % args.save_steps == 0:\n",
    "                    # Save model checkpoint\n",
    "                    results = evaluate_ner(args, val_dataset,model, tokenizer)\n",
    "                    output_dir = os.path.join(args.output_dir, \"checkpoint-{}\".format(global_step))\n",
    "                    if not os.path.exists(output_dir):\n",
    "                        os.makedirs(output_dir)\n",
    "                    model_to_save = (\n",
    "                        model.module if hasattr(model, \"module\") else model\n",
    "                    )  # Take care of distributed/parallel training\n",
    "                    torch.save(model_to_save.state_dict(),os.path.join(output_dir,\"model.pt\"))\n",
    "                    tokenizer.save_pretrained(output_dir)\n",
    "\n",
    "                    torch.save(args, os.path.join(output_dir, \"training_args.bin\"))\n",
    "                    logger.info(\"Saving model checkpoint to %s\", output_dir)\n",
    "                    if args.fp16:\n",
    "                        torch.save(amp.state_dict(),os.path.join(output_dir, \"amp.pt\"))\n",
    "                    torch.save(optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\"))\n",
    "                    torch.save(scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\"))\n",
    "                    logger.info(\"Saving optimizer and scheduler states to %s\", output_dir)\n",
    "\n",
    "            if args.max_steps > 0 and global_step > args.max_steps:\n",
    "                epoch_iterator.close()\n",
    "                break\n",
    "        if args.max_steps > 0 and global_step > args.max_steps:\n",
    "            train_iterator.close()\n",
    "            break\n",
    "\n",
    "\n",
    "    return global_step, tr_loss / global_step\n",
    "def evaluate_ner(args, eval_dataset,model, tokenizer, prefix=\"\"):\n",
    "    eval_output_dir = args.output_dir \n",
    "\n",
    "    results = {}\n",
    "\n",
    "    if not os.path.exists(eval_output_dir) :\n",
    "        os.makedirs(eval_output_dir)\n",
    "\n",
    "    eval_sampler = SequentialSampler(eval_dataset)\n",
    "    eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size,num_workers=0)\n",
    "\n",
    "    # multi-gpu eval\n",
    "#         if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):\n",
    "#             model = torch.nn.DataParallel(model)\n",
    "\n",
    "    # Eval!\n",
    "    logger.info(\"***** Running evaluation {} *****\".format(prefix))\n",
    "    logger.info(\"  Num examples = %d\", len(eval_dataset))\n",
    "    logger.info(\"  Batch size = %d\", args.eval_batch_size)\n",
    "    eval_loss = 0.0\n",
    "    nb_eval_steps = 0\n",
    "    rel_preds=None\n",
    "    rel_labels=None\n",
    "    token_preds=None\n",
    "    token_labels=None\n",
    "    full_token_logits=[]\n",
    "    full_rel_logits=[]\n",
    "    for batch in tqdm(eval_dataloader, desc=\"Evaluating\"):\n",
    "        model.eval()\n",
    "        batch = tuple(t.to(args.device) for t in batch[:-1])\n",
    "\n",
    "        with torch.no_grad():\n",
    "            inputs = {\"input_ids\": batch[0], \"attention_mask\": batch[1], \"labels\": batch[3],\"label_tokens\":batch[4]}\n",
    "            inputs[\"token_type_ids\"]=batch[2]\n",
    "            temp_inputs={\"input_ids\": batch[0], \"attention_mask\": batch[1],\"token_type_ids\":batch[2]}\n",
    "#                 if args.model_type != \"distilbert\":\n",
    "#                     inputs[\"token_type_ids\"] = (\n",
    "#                         batch[2] if args.model_type in [\"bert\", \"xlnet\", \"albert\"] else None\n",
    "#                     )  # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids\n",
    "            outputs = model(**inputs)\n",
    "            model_to_predict = (\n",
    "                model.module if hasattr(model, \"module\") else model\n",
    "            )  # Take care of distributed/parallel training\n",
    "            token_pred=model_to_predict.predict(**temp_inputs)\n",
    "            tmp_eval_loss, rel_logits, token_logits = outputs[:3]\n",
    "            \n",
    "            eval_loss += tmp_eval_loss.mean().item()\n",
    "        nb_eval_steps += 1\n",
    "        mask=batch[1].view(-1).detach().cpu().numpy()\n",
    "        rel_logits = F.softmax(rel_logits,dim=-1)\n",
    "        rel_pred =torch.argmax(rel_logits,dim=-1).detach().cpu().numpy()\n",
    "        rel_label=torch.argmax(inputs[\"labels\"],dim=-1).detach().cpu().numpy()\n",
    "        full_rel_logits.append(rel_logits.detach().cpu().numpy())\n",
    "        if rel_preds is None:\n",
    "            rel_preds=rel_pred\n",
    "        else:\n",
    "            rel_preds=np.append(rel_preds,rel_pred,axis=0)\n",
    "        if rel_labels is None:\n",
    "            rel_labels=rel_label\n",
    "        else:\n",
    "            rel_labels=np.append(rel_labels,rel_label,axis=0)     \n",
    "\n",
    "            \n",
    "        #token classify\n",
    "        token_logits = F.softmax(token_logits,dim=-1)\n",
    "        token_logits =token_logits.view(-1).detach().cpu().numpy()\n",
    "        full_token_logits.append(token_logits.reshape(-1,256,9))\n",
    "        for i in range(len(token_pred)):\n",
    "            token_pred[i]+=[label2ids['[PAD]']]*(max_len-len(token_pred[i]))\n",
    "        token_pred=np.array(token_pred).reshape(-1)*mask\n",
    "        token_label=inputs[\"label_tokens\"].view(-1).detach().cpu().numpy()*mask\n",
    "        if token_preds is None:\n",
    "            token_preds=token_pred\n",
    "        else:\n",
    "            token_preds=np.append(token_preds,token_pred,axis=0)\n",
    "        if token_labels is None:\n",
    "            token_labels=token_label\n",
    "        else:\n",
    "            token_labels=np.append(token_labels,token_label,axis=0) \n",
    "        \n",
    "\n",
    "\n",
    "    eval_loss = eval_loss / nb_eval_steps\n",
    "    print(\"准召报告：\")\n",
    "    df_precison_recall_f1=pd.DataFrame(metrics.precision_recall_fscore_support(rel_labels,rel_preds),index=['precison','recall','f1','number'])\n",
    "    display(HTML(df_precison_recall_f1.to_html()))\n",
    "    df_precison_recall_f2=pd.DataFrame(metrics.precision_recall_fscore_support(token_labels,token_preds),index=['precison','recall','f1','number'])\n",
    "    display(HTML(df_precison_recall_f2.to_html()))\n",
    "    result = {\"cls\":    df_precison_recall_f1,'ner':    df_precison_recall_f2}\n",
    "    results.update(result)\n",
    "\n",
    "    output_eval_file = os.path.join(eval_output_dir, prefix, \"eval_results.txt\")\n",
    "    with open(output_eval_file, \"a\") as writer:\n",
    "        logger.info(\"***** Eval results {} *****\".format(prefix))\n",
    "        for key in sorted(result.keys()):\n",
    "            logger.info(\"  %s = %s\", key, str(result[key]))\n",
    "            writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n",
    "\n",
    "    return rel_labels,rel_preds,token_preds,token_labels,full_token_logits,full_rel_logits\n",
    "class GCNN_block(nn.Module):\n",
    "    def __init__(self,input_channel,output_channel,kernel_size,padding,dilation=1):\n",
    "        super(GCNN_block,self).__init__()\n",
    "        self.input_channel=input_channel\n",
    "        self.output_channel=output_channel\n",
    "        self.conv1=nn.Conv1d(input_channel,output_channel,kernel_size,padding=padding,dilation=dilation)\n",
    "        self.conv2=nn.Conv1d(input_channel,output_channel,kernel_size,padding=padding,dilation=dilation)\n",
    "        if input_channel !=output_channel:\n",
    "            self.trans=nn.Conv1d(input_channel,output_channel,1)\n",
    "    def forward(self,args):\n",
    "        X,attention_mask=args[0],args[1]\n",
    "        X=X*attention_mask.unsqueeze(1).float()\n",
    "        gate=torch.sigmoid(self.conv2(X))\n",
    "        if self.input_channel==self.output_channel:\n",
    "            Y=X*(1-gate)+self.conv1(X)*gate\n",
    "        else:\n",
    "            Y=self.trans(X)*(1-gate)+self.conv1(X)*gate\n",
    "        Y=Y*attention_mask.unsqueeze(1).float()\n",
    "        return Y,attention_mask\n",
    "class Conditional_LayerNorm(nn.Module):\n",
    "    def __init__(self,features,conditional_dim,eps=1e-6):\n",
    "        super(Conditional_LayerNorm,self).__init__()\n",
    "        self.gamma=nn.Parameter(torch.ones(features))\n",
    "        self.beta=nn.Parameter(torch.zeros(features))\n",
    "        self.trans_gamma=nn.Linear(conditional_dim,self.gamma.shape[-1])\n",
    "        self.trans_beta=nn.Linear(conditional_dim,self.beta.shape[-1])\n",
    "        torch.nn.init.constant_(self.trans_gamma.weight,val=0)\n",
    "        torch.nn.init.constant_(self.trans_gamma.bias,val=0)\n",
    "        torch.nn.init.constant_(self.trans_beta.weight,val=0)\n",
    "        torch.nn.init.constant_(self.trans_beta.bias,val=0)\n",
    "        self.eps=eps\n",
    "    def forward(self,X,condition):\n",
    "        mean=X.mean(-1,keepdim=True)\n",
    "        std=X.std(-1,keepdim=True)\n",
    "        cond_gamma=self.trans_gamma(condition)\n",
    "        cond_beta=self.trans_beta(condition)\n",
    "        if condition.dim()<X.dim(): #condition是固定维度\n",
    "            return (self.gamma+cond_gamma).unsqueeze(1)*(X-mean)/(std+self.eps)+(self.beta+cond_beta).unsqueeze(1)\n",
    "        else:#condition是sequence\n",
    "            return (self.gamma+cond_gamma)*(X-mean)/(std+self.eps)+(self.beta+cond_beta)\n",
    "class BERT_BiLSTM_POINTER(nn.Module):\n",
    "    def __init__(self,bert_dir,config_file,num_rel_labels,num_token_labels,hidden_dropout_prob,\\\n",
    "                 need_birnn=True,need_cnn=False,rnn_dim=128,use_plan=False,p=1):\n",
    "        super(BERT_BiLSTM_POINTER, self).__init__()\n",
    "        self.num_rel_labels = num_rel_labels\n",
    "        self.num_token_labels= 4\n",
    "        self.bert =BertModel.from_pretrained(bert_dir,config=os.path.join(bert_dir,config_file), \\\n",
    "                                             hidden_dropout_prob=hidden_dropout_prob,output_hidden_states=True, output_attentions=True)\n",
    "        self.dropout = nn.Dropout(hidden_dropout_prob)\n",
    "        out_dim =self.bert.pooler.dense.out_features\n",
    "        hidden_size=out_dim\n",
    "        self.rel_classifier = nn.Linear(out_dim, self.num_rel_labels)\n",
    "        self.need_birnn = need_birnn\n",
    "        self.need_cnn=need_cnn\n",
    "        self.p=p\n",
    "        self.use_plan=use_plan\n",
    "        if use_plan:\n",
    "            self.cond_ln=Conditional_LayerNorm(hidden_size,9)\n",
    "        if need_birnn:\n",
    "            self.birnn = nn.LSTM(out_dim, rnn_dim, num_layers=1, bidirectional=True, batch_first=True)\n",
    "            out_dim = rnn_dim*2\n",
    "        if need_cnn:\n",
    "            self.DGCNN=nn.Sequential(GCNN_block(hidden_size,hidden_size,1,padding=0,dilation=1),\n",
    "                       GCNN_block(hidden_size,hidden_size,3,padding=1,dilation=1),\n",
    "                       GCNN_block(hidden_size,hidden_size,3,padding=2,dilation=2),\n",
    "                       GCNN_block(hidden_size,hidden_size,3,padding=4,dilation=4))\n",
    "            out_dim=hidden_size\n",
    "        self.hidden2tag = nn.Linear(out_dim, self.num_token_labels)\n",
    "    def forward(\n",
    "        self,\n",
    "        input_ids=None,\n",
    "        attention_mask=None,\n",
    "        token_type_ids=None,\n",
    "        labels=None,\n",
    "        label_tokens=None,\n",
    "        plan_label=None\n",
    "    ):\n",
    "        label_tokens=(label_tokens[:,:,1:5]!=0).long()\n",
    "        outputs = self.bert(\n",
    "            input_ids,\n",
    "            attention_mask=attention_mask,\n",
    "            token_type_ids=token_type_ids)\n",
    "\n",
    "        seq_output,pooled_output = outputs[0],outputs[1]\n",
    "        pooled_output = self.dropout(pooled_output)\n",
    "        if self.use_plan:\n",
    "            plan_output=F.adaptive_avg_pool1d(plan_label.permute(0,2,1),1).squeeze(-1)\n",
    "            seq_output=self.cond_ln(seq_output,plan_output)\n",
    "        emissions=self.tag_outputs(seq_output,attention_mask)\n",
    "        rel_logits = self.rel_classifier(pooled_output)\n",
    "        seq_logits = emissions\n",
    "        outputs = (rel_logits,seq_logits) + outputs[2:]  # add hidden states and attention if they are here\n",
    "        loss_full=None\n",
    "        if labels is not None:\n",
    "            loss_rel = nn.BCEWithLogitsLoss(reduction=\"none\")\n",
    "            l_r = ((loss_rel(rel_logits.view(-1, self.num_rel_labels), labels.view(-1,self.num_rel_labels).float())).sum(dim=-1)).mean()\n",
    "            loss_full=l_r\n",
    "\n",
    "        if label_tokens is not None:\n",
    "            #(batch_size,seq_len,num_token_labels)\n",
    "            loss_token=nn.BCELoss(reduction='none')\n",
    "            l_t=(torch.sum((loss_token(torch.sigmoid(seq_logits)**self.p, label_tokens.float()))\\\n",
    "                 *attention_mask.float().unsqueeze(-1)))/input_ids.shape[0]\n",
    "            loss_full+=l_t\n",
    "        if loss_full is not None:\n",
    "            outputs = (loss_full,) + outputs\n",
    "        return outputs  # (loss), logits, (hidden_states), (attentions)\n",
    "\n",
    "    def tag_outputs(self, sequence_output,attention_mask):\n",
    "        if self.need_birnn:\n",
    "            sequence_output, _ = self.birnn(sequence_output)\n",
    "        if self.need_cnn:\n",
    "            sequence_output=self.DGCNN([sequence_output.permute(0,2,1),attention_mask])[0].permute(0,2,1)\n",
    "        sequence_output = self.dropout(sequence_output)\n",
    "        emissions = self.hidden2tag(sequence_output)\n",
    "        return emissions\n",
    "def train_pointer(args, train_dataset,val_dataset, model, tokenizer):\n",
    "#     \"\"\" Train the model \"\"\"\n",
    "#     if args.local_rank in [-1, 0]:\n",
    "#         tb_writer = SummaryWriter()\n",
    "\n",
    "#     args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\n",
    "    train_sampler = RandomSampler(train_dataset) # if args.local_rank == -1 else DistributedSampler(train_dataset)\n",
    "    train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)\n",
    "\n",
    "    if args.max_steps > 0:\n",
    "        t_total = args.max_steps\n",
    "        args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1\n",
    "    else:\n",
    "        t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs\n",
    "\n",
    "    # Prepare optimizer and schedule (linear warmup and decay)\n",
    "    no_decay = [\"bias\", \"LayerNorm.weight\"]\n",
    "    optimizer_grouped_parameters = [\n",
    "        {\n",
    "            \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n",
    "            \"weight_decay\": args.weight_decay,\n",
    "        },\n",
    "        {\"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], \"weight_decay\": 0.0},\n",
    "    ]\n",
    "\n",
    "    optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n",
    "    scheduler = get_linear_schedule_with_warmup(\n",
    "        optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total\n",
    "    )\n",
    "\n",
    "    # Check if saved optimizer or scheduler states exist\n",
    "    if os.path.isfile(os.path.join(args.model_name_or_path, \"optimizer.pt\")) and os.path.isfile(\n",
    "        os.path.join(args.model_name_or_path, \"scheduler.pt\")\n",
    "    ):\n",
    "        logger.info(\"  loading optimizer and scheduler...\")\n",
    "        # Load in optimizer and scheduler states\n",
    "        optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"optimizer.pt\")))\n",
    "#         scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"scheduler.pt\")))\n",
    "    else:\n",
    "        logger.info(\"  No optimizer and scheduler,we build a new one\")        \n",
    "\n",
    "    if args.fp16:\n",
    "        try:\n",
    "            from apex import amp\n",
    "        except ImportError:\n",
    "            raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n",
    "        model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)\n",
    "\n",
    "    # multi-gpu training (should be after apex fp16 initialization)\n",
    "    if args.n_gpu > 1:\n",
    "        model = torch.nn.DataParallel(model,device_ids=args.card_list)\n",
    "\n",
    "\n",
    "    # Train!\n",
    "    logger.info(\"***** Running training *****\")\n",
    "    logger.info(\"  Num examples = %d\", len(train_dataset))\n",
    "    logger.info(\"  Num Epochs = %d\", args.num_train_epochs)\n",
    "#     logger.info(\"  Instantaneous batch size per GPU = %d\", args.per_gpu_train_batch_size)\n",
    "    logger.info(\n",
    "        \"  Total train batch size (w. parallel, distributed & accumulation) = %d\",\n",
    "        args.train_batch_size\n",
    "        * args.gradient_accumulation_steps\n",
    "#         * (torch.distributed.get_world_size() if args.local_rank != -1 else 1),\n",
    "    )\n",
    "    logger.info(\"  Gradient Accumulation steps = %d\", args.gradient_accumulation_steps)\n",
    "    logger.info(\"  Total optimization steps = %d\", t_total)\n",
    "\n",
    "    global_step = 0\n",
    "    epochs_trained = 0\n",
    "    steps_trained_in_current_epoch = 0\n",
    "    # Check if continuing training from a checkpoint\n",
    "    if os.path.exists(args.model_name_or_path):\n",
    "        # set global_step to global_step of last saved checkpoint from model path\n",
    "        try:\n",
    "            global_step = int(args.model_name_or_path.split(\"-\")[-1].split(\"/\")[0])\n",
    "        except ValueError:\n",
    "            global_step = 0\n",
    "        epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)\n",
    "        steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)\n",
    "\n",
    "        logger.info(\"  Continuing training from checkpoint, will skip to saved global_step\")\n",
    "        logger.info(\"  Continuing training from epoch %d\", epochs_trained)\n",
    "        logger.info(\"  Continuing training from global step %d\", global_step)\n",
    "        logger.info(\"  Will skip the first %d steps in the first epoch\", steps_trained_in_current_epoch)\n",
    "\n",
    "    tr_loss, logging_loss = 0.0, 0.0\n",
    "    model.zero_grad()\n",
    "    train_iterator = tqdm(range(\n",
    "        epochs_trained, int(args.num_train_epochs)), desc=\"Epoch\")\n",
    "    set_seed(args)  # Added here for reproductibility\n",
    "    for _ in train_iterator:\n",
    "        epoch_iterator = tqdm(train_dataloader, desc=\"Iteration\")\n",
    "        for step, batch in enumerate(epoch_iterator):\n",
    "\n",
    "            # Skip past any already trained steps if resuming training\n",
    "            if steps_trained_in_current_epoch > 0:\n",
    "                if  (step + 1) % args.gradient_accumulation_steps == 0: \n",
    "                        steps_trained_in_current_epoch -= 1\n",
    "                continue\n",
    "\n",
    "            model.train()\n",
    "            batch = tuple(t.to(args.device) for t in batch[:-1])\n",
    "            inputs = {\"input_ids\": batch[0], \"attention_mask\": batch[1], \"labels\": batch[3],\"label_tokens\":batch[4],\"plan_label\":batch[7].float()}\n",
    "            inputs[\"token_type_ids\"]=batch[2]\n",
    "#             if args.model_type != \"distilbert\":\n",
    "#                 inputs[\"token_type_ids\"] = (\n",
    "#                     batch[2] if args.model_type in [\"bert\", \"xlnet\", \"albert\"] else None\n",
    "#                 )  # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids\n",
    "            outputs = model(**inputs)\n",
    "            loss = outputs[0]  # model outputs are always tuple in transformers (see doc)\n",
    "\n",
    "            if args.n_gpu > 1:\n",
    "                loss = loss.mean()  # mean() to average on multi-gpu parallel training\n",
    "            if args.gradient_accumulation_steps > 1:\n",
    "                loss = loss / args.gradient_accumulation_steps\n",
    "\n",
    "            if args.fp16:\n",
    "                with amp.scale_loss(loss, optimizer) as scaled_loss:\n",
    "                    scaled_loss.backward()\n",
    "            else:\n",
    "                loss.backward()\n",
    "            logger.info(\"  step:%d loss %.3f\", step,loss.item())\n",
    "\n",
    "            tr_loss += loss.item()\n",
    "            if (step + 1) % args.gradient_accumulation_steps == 0:\n",
    "                if args.fp16:\n",
    "                    torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)\n",
    "                else:\n",
    "                    torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n",
    "\n",
    "                optimizer.step()\n",
    "                scheduler.step()  # Update learning rate schedule\n",
    "                model.zero_grad()\n",
    "                global_step += 1\n",
    "\n",
    "                if  args.save_steps > 0 and global_step % args.save_steps == 0:\n",
    "                    # Save model checkpoint\n",
    "                    results = evaluate_pointer(args, val_dataset,model, tokenizer)\n",
    "                    output_dir = os.path.join(args.output_dir, \"checkpoint-{}\".format(global_step))\n",
    "                    if not os.path.exists(output_dir):\n",
    "                        os.makedirs(output_dir)\n",
    "                    model_to_save = (\n",
    "                        model.module if hasattr(model, \"module\") else model\n",
    "                    )  # Take care of distributed/parallel training\n",
    "                    torch.save(model_to_save.state_dict(),os.path.join(output_dir,\"model.pt\"))\n",
    "                    tokenizer.save_pretrained(output_dir)\n",
    "\n",
    "                    torch.save(args, os.path.join(output_dir, \"training_args.bin\"))\n",
    "                    logger.info(\"Saving model checkpoint to %s\", output_dir)\n",
    "                    if args.fp16:\n",
    "                        torch.save(amp.state_dict(),os.path.join(output_dir, \"amp.pt\"))\n",
    "                    torch.save(optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\"))\n",
    "                    torch.save(scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\"))\n",
    "                    logger.info(\"Saving optimizer and scheduler states to %s\", output_dir)\n",
    "\n",
    "            if args.max_steps > 0 and global_step > args.max_steps:\n",
    "                epoch_iterator.close()\n",
    "                break\n",
    "        if args.max_steps > 0 and global_step > args.max_steps:\n",
    "            train_iterator.close()\n",
    "            break\n",
    "\n",
    "\n",
    "    return global_step, tr_loss / global_step\n",
    "def evaluate_pointer(args, eval_dataset,model, tokenizer, thre=0.5,prefix=\"\"):\n",
    "    eval_output_dir = args.output_dir \n",
    "\n",
    "    results = {}\n",
    "\n",
    "    if not os.path.exists(eval_output_dir) :\n",
    "        os.makedirs(eval_output_dir)\n",
    "\n",
    "    eval_sampler = SequentialSampler(eval_dataset)\n",
    "    eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size,num_workers=0)\n",
    "\n",
    "    # multi-gpu eval\n",
    "#         if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):\n",
    "#             model = torch.nn.DataParallel(model)\n",
    "\n",
    "    # Eval!\n",
    "    logger.info(\"***** Running evaluation {} *****\".format(prefix))\n",
    "    logger.info(\"  Num examples = %d\", len(eval_dataset))\n",
    "    logger.info(\"  Batch size = %d\", args.eval_batch_size)\n",
    "    eval_loss = 0.0\n",
    "    nb_eval_steps = 0\n",
    "    rel_preds=None\n",
    "    rel_labels=None\n",
    "    token_preds=None\n",
    "    token_labels=None\n",
    "    recall_num=np.zeros((4,))\n",
    "    precision_num=np.zeros((4,))\n",
    "    full_num=np.zeros((4,))\n",
    "    full_num_pre=np.zeros((4,))\n",
    "    for batch in tqdm(eval_dataloader, desc=\"Evaluating\"):\n",
    "        model.eval()\n",
    "        batch = tuple(t.to(args.device) for t in batch[:-1])\n",
    "\n",
    "        with torch.no_grad():\n",
    "            inputs = {\"input_ids\": batch[0], \"attention_mask\": batch[1], \"labels\": batch[3],\"label_tokens\":batch[4],\"plan_label\":batch[7].float()}\n",
    "            inputs[\"token_type_ids\"]=batch[2]\n",
    "#             temp_inputs={\"input_ids\": batch[0], \"attention_mask\": batch[1],\"token_type_ids\":batch[2]}\n",
    "#                 if args.model_type != \"distilbert\":\n",
    "#                     inputs[\"token_type_ids\"] = (\n",
    "#                         batch[2] if args.model_type in [\"bert\", \"xlnet\", \"albert\"] else None\n",
    "#                     )  # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids\n",
    "            outputs = model(**inputs)\n",
    "            tmp_eval_loss, rel_logits, token_logits = outputs[:3]\n",
    "            \n",
    "            eval_loss += tmp_eval_loss.mean().item()\n",
    "        nb_eval_steps += 1\n",
    "        mask=batch[1].unsqueeze(-1)\n",
    "        rel_pred = F.softmax(rel_logits,dim=-1)\n",
    "        rel_pred =torch.argmax(rel_pred,dim=-1).detach().cpu().numpy()\n",
    "        rel_label=torch.argmax(inputs[\"labels\"],dim=-1).detach().cpu().numpy()\n",
    "        if rel_preds is None:\n",
    "            rel_preds=rel_pred\n",
    "        else:\n",
    "            rel_preds=np.append(rel_preds,rel_pred,axis=0)\n",
    "        if rel_labels is None:\n",
    "            rel_labels=rel_label\n",
    "        else:\n",
    "            rel_labels=np.append(rel_labels,rel_label,axis=0)     \n",
    "\n",
    "            \n",
    "\n",
    "        recall_num+=((((torch.sigmoid(token_logits.detach())>thre).long()==1) & (batch[4].detach()[:,:,1:5]!=0)).long()*mask).sum(dim=[0,1]).detach().cpu().numpy()\n",
    "        precision_num+=((((torch.sigmoid(token_logits.detach())>thre).long()==1) & (batch[4].detach()[:,:,1:5]!=0)).long()*mask).sum(dim=[0,1]).detach().cpu().numpy()\n",
    "        full_num+=((batch[4].detach()[:,:,1:5]!=0).long()*mask).sum(dim=[0,1]).detach().cpu().numpy()\n",
    "        full_num_pre+=((torch.sigmoid(token_logits.detach())>thre).long()*mask).sum(dim=[0,1]).detach().cpu().numpy()\n",
    "#         token_pred=((torch.sigmoid(token_logits.detach())>thre).long()*mask).cpu().numpy().reshape(-1,4)\n",
    "        token_pred=((torch.sigmoid(token_logits.detach())*mask.float())).cpu().numpy().reshape(-1,4)\n",
    "        token_label=((batch[4].detach()[:,:,1:5]!=0).long()*mask).cpu().numpy().reshape(-1,4)\n",
    "        if token_preds is None:\n",
    "            token_preds=token_pred\n",
    "        else:\n",
    "            token_preds=np.append(token_preds,token_pred,axis=0)\n",
    "        if token_labels is None:\n",
    "            token_labels=token_label\n",
    "        else:\n",
    "            token_labels=np.append(token_labels,token_label,axis=0) \n",
    "    result = {\"recall_sample\":recall_num/(full_num+0.001),\"precision_sample\":precision_num/(full_num_pre+0.001)}\n",
    "    results.update(result)\n",
    "\n",
    "\n",
    "    eval_loss = eval_loss / nb_eval_steps\n",
    "    print(\"准召报告：\")\n",
    "    df_precison_recall_f1=pd.DataFrame(metrics.precision_recall_fscore_support(rel_labels,rel_preds),index=['precison','recall','f1','number'])\n",
    "    display(HTML(df_precison_recall_f1.to_html()))\n",
    "    result = {\"cls\":    df_precison_recall_f1}\n",
    "    results.update(result)\n",
    "\n",
    "    output_eval_file = os.path.join(eval_output_dir, prefix, \"eval_results.txt\")\n",
    "    with open(output_eval_file, \"a\") as writer:\n",
    "        logger.info(\"***** Eval results {} *****\".format(prefix))\n",
    "        for key in sorted(results.keys()):\n",
    "            logger.info(\"  %s = %s\", key, str(results[key]))\n",
    "            writer.write(\"%s = %s\\n\" % (key, str(results[key])))\n",
    "\n",
    "    return rel_labels,rel_preds,token_preds,token_labels"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {
    "hidden": true,
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "06/18/2020 16:11:26 - INFO - transformers.configuration_utils -   loading configuration file ./roberta-zh-wwm-pytorch/bert_config.json\n",
      "06/18/2020 16:11:26 - INFO - transformers.configuration_utils -   Model config BertConfig {\n",
      "  \"_num_labels\": 2,\n",
      "  \"architectures\": null,\n",
      "  \"attention_probs_dropout_prob\": 0.1,\n",
      "  \"bos_token_id\": null,\n",
      "  \"directionality\": \"bidi\",\n",
      "  \"do_sample\": false,\n",
      "  \"early_stopping\": false,\n",
      "  \"eos_token_ids\": null,\n",
      "  \"finetuning_task\": null,\n",
      "  \"hidden_act\": \"gelu\",\n",
      "  \"hidden_dropout_prob\": 0.1,\n",
      "  \"hidden_size\": 768,\n",
      "  \"id2label\": {\n",
      "    \"0\": \"LABEL_0\",\n",
      "    \"1\": \"LABEL_1\"\n",
      "  },\n",
      "  \"initializer_range\": 0.02,\n",
      "  \"intermediate_size\": 3072,\n",
      "  \"is_decoder\": false,\n",
      "  \"label2id\": {\n",
      "    \"LABEL_0\": 0,\n",
      "    \"LABEL_1\": 1\n",
      "  },\n",
      "  \"layer_norm_eps\": 1e-12,\n",
      "  \"length_penalty\": 1.0,\n",
      "  \"max_length\": 20,\n",
      "  \"max_position_embeddings\": 512,\n",
      "  \"model_type\": \"bert\",\n",
      "  \"num_attention_heads\": 12,\n",
      "  \"num_beams\": 1,\n",
      "  \"num_hidden_layers\": 12,\n",
      "  \"num_return_sequences\": 1,\n",
      "  \"output_attentions\": true,\n",
      "  \"output_hidden_states\": true,\n",
      "  \"output_past\": true,\n",
      "  \"pad_token_id\": null,\n",
      "  \"pooler_fc_size\": 768,\n",
      "  \"pooler_num_attention_heads\": 12,\n",
      "  \"pooler_num_fc_layers\": 3,\n",
      "  \"pooler_size_per_head\": 128,\n",
      "  \"pooler_type\": \"first_token_transform\",\n",
      "  \"pruned_heads\": {},\n",
      "  \"repetition_penalty\": 1.0,\n",
      "  \"temperature\": 1.0,\n",
      "  \"top_k\": 50,\n",
      "  \"top_p\": 1.0,\n",
      "  \"torchscript\": false,\n",
      "  \"type_vocab_size\": 2,\n",
      "  \"use_bfloat16\": false,\n",
      "  \"vocab_size\": 21128\n",
      "}\n",
      "\n",
      "06/18/2020 16:11:26 - INFO - transformers.modeling_utils -   loading weights file ./roberta-zh-wwm-pytorch/pytorch_model.bin\n"
     ]
    }
   ],
   "source": [
    "# model=BertSeqLabeling(bert_dir,config_file,num_rel_labels=num_rel_labels,num_token_labels=num_token_labels,\n",
    "#                                      hidden_dropout_prob=hidden_dropout_prob)\n",
    "model_ner=BERT_BiLSTM_CRF(bert_dir,config_file,num_rel_labels=num_rel_labels,num_token_labels=num_token_labels,\n",
    "                                     hidden_dropout_prob=hidden_dropout_prob, need_birnn=False,need_cnn=True)\n",
    "# model_ner=BERT_BiLSTM_POINTER(bert_dir,config_file,num_rel_labels=num_rel_labels,num_token_labels=num_token_labels,\n",
    "#                          hidden_dropout_prob=hidden_dropout_prob,need_birnn=False,need_cnn=True\\\n",
    "#                           ,use_plan=True,p=4)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "output_dir=\"./output_test/\"\n",
    "device=torch.device(\"cuda:2\")\n",
    "args=ARG(train_batch_size=batch_size*2,eval_batch_size=batch_size*4,weight_decay=weight_decay,learning_rate=learning_rate,\n",
    "         adam_epsilon=adam_epsilon,num_train_epochs=epochs,warmup_steps=0,gradient_accumulation_steps=1,save_steps=313,\n",
    "         max_grad_norm=1.0,model_name_or_path=output_dir,output_dir=output_dir,seed=42,device=device,n_gpu=2,\n",
    "        max_steps=0,output_mode=\"classification\",fp16=False,fp16_opt_level='O1',card_list=[2,3])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "model_ner.to(args.device)\n",
    "train_ner(args, ner_train_dataset,ner_val_dataset, model_ner, tokenizer)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "#交叉ensemble部分\n",
    "\n",
    "ckpt=torch.load(\"./output_ner_0_1_old/checkpoint-67112/model.pt\", map_location=lambda storage, loc: storage)\n",
    "# full \"./output_ner_0_1_old/checkpoint-67112/model.pt\"\n",
    "# 0  \"./output_ner_0_1_0/checkpoint-23872/model.pt\"\n",
    "# 1 \"./output_ner_0_1_1/checkpoint-29840/model.pt\"\n",
    "#2  \"./output_ner_0_1_2/checkpoint-17904/model.pt\"\n",
    "model_ner.load_state_dict(ckpt)\n",
    "model_ner=model_ner.to(args.device)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "heading_collapsed": true
   },
   "source": [
    "## 单model检验（只检验二阶段命名实体识别的准确度，跟一阶段model无关）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "hidden": true,
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "results=evaluate(args,ner_val_dataset,model,tokenizer)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "val_token_pred=results[2].reshape(-1,256)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "val_token_label=results[3].reshape(-1,256)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "text_labels=[np.zeros((len(text_data[ner_doc2doc[i]]['text']))) for i in range(len(ner_doc_token_labels))]\n",
    "for idx in tqdm(range(len(text_labels))):\n",
    "    token_labels=ner_doc_token_labels[idx]\n",
    "    tok_to_orig_start_index,tok_to_orig_end_index,orig_to_tok_index=ner_token2doc[idx]\n",
    "    for i in range(len(token_labels)):\n",
    "        begin,end=tok_to_orig_start_index[i],tok_to_orig_end_index[i]\n",
    "        text_labels[idx][begin:end+1]=token_labels[i]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "for t,l in zip(text_data[ner_doc2doc[example2doc[360000+12]]]['text'],text_labels[example2doc[360000+12]]):\n",
    "        print(t,\" \",id2labels[int(l)])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "text_preds=[np.zeros((len(text_data[ner_doc2doc[i]]['text']),len(id2labels)))for i in range(len(ner_doc_token_labels))]\n",
    "for idx in tqdm(range(len(ner_val_dataset))):\n",
    "    text=ner_val_dataset[idx][0].numpy()\n",
    "    attention_mask=ner_val_dataset[idx][1].numpy()\n",
    "    token_labels=ner_val_dataset[idx][4].numpy()\n",
    "    tok_to_orig_start_index,tok_to_orig_end_index,orig_to_tok_index=ner_val_dataset[idx][5]\n",
    "    token_pred=val_token_pred[idx]\n",
    "    token_pred=token_pred[1:1+len(tok_to_orig_start_index)]\n",
    "    for i in range(len(token_pred)):\n",
    "        begin,end=tok_to_orig_start_index[i],tok_to_orig_end_index[i]\n",
    "        text_preds[example2doc[360000+idx]][begin:end+1,token_pred[i]]+=1\n",
    "text_preds=[e.argmax(axis=-1) for e in text_preds]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "hidden": true,
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "for t,l in zip(text_data[ner_doc2doc[example2doc[360000]]]['text'],text_preds[example2doc[360000]]):\n",
    "        print(t,\" \",id2labels[l])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "code_folding": [
     1
    ],
    "hidden": true
   },
   "outputs": [],
   "source": [
    "\n",
    "with open(\"val_output.txt\",\"w\") as w:\n",
    "    for idx in tqdm(range(example2doc[360000],len(text_preds))):\n",
    "        for token,label,pred in zip(text_data[ner_doc2doc[idx]]['text'],text_labels[idx],text_preds[idx]):\n",
    "            if _is_whitespace(token):\n",
    "                continue\n",
    "            w.write(token+\" \"+id2labels[int(label)]+\" \"+id2labels[int(pred)]+\"\\n\")\n",
    "        w.write(\"\\n\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## predict（使用一阶段输出的关系分类结果和句子一起构建ner_pred_dataset,再使用二阶段模型识别关系实体，完成关系抽取）"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### POINTER preprocess"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "len(ner_pred_dataset)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "pred_results=evaluate_pointer(args,ner_pred_dataset,model_ner,tokenizer)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "pred_token_pred=pred_results[2].reshape(-1,256,4)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "pred_results[2].shape,pred_token_pred.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "pred_token_pred[0][0:10]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "len(pred_doc_token_labels),pred_example2doc[-1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "code_folding": [
     1,
     58,
     62,
     63,
     78,
     88
    ]
   },
   "outputs": [],
   "source": [
    "\n",
    "def find_entities(token_label_pred,text,B_index,E_index,thre=0.5,max_count=2):\n",
    "    probs=[]\n",
    "    for i in range(len(text)):\n",
    "        counts=0\n",
    "        for j in range(i,len(text)):\n",
    "            if B_index==0 and j-i+1>20:\n",
    "                continue\n",
    "            if B_index==2 and j-i+1>25:\n",
    "                continue\n",
    "            if token_label_pred[i][B_index]*token_label_pred[j][E_index]>thre and counts<max_count:\n",
    "                probs.append([i,j,token_label_pred[i][B_index]*token_label_pred[j][E_index]])\n",
    "                counts+=1\n",
    "    probs=list(sorted(probs,key=lambda x:x[1]-x[0],reverse=True))\n",
    "    contains=[[] for i in range(len(probs))]\n",
    "    crosses=[]\n",
    "    for idx,e in enumerate(probs):\n",
    "        cross=[]\n",
    "        contained=[]\n",
    "        for jdx,t in enumerate(probs):\n",
    "            if jdx==idx:\n",
    "                continue\n",
    "            if t[0]<=e[0] and t[1]>=e[1]:\n",
    "                contained.append([jdx,t[1]-t[0]])\n",
    "            elif ((t[1]>=e[0] and t[0]<=e[0]) or (t[0]<=e[1] and t[1]>=e[1])) and not (t[0]>=e[0] and t[1]<=e[1]):\n",
    "                cross.append(jdx)\n",
    "        crosses.append(cross)\n",
    "        if len(contained)>0:\n",
    "            father=min(contained,key=lambda x:x[1]) \n",
    "            contains[father[0]].append(idx)\n",
    "    saved=np.ones((len(contains),))\n",
    "    for idx in range(len(contains)):\n",
    "        e=probs[idx]\n",
    "#         print([(text[int(e[0]):int(e[1])+1]),e[2]])\n",
    "#         print(\"contains\")\n",
    "        full_prob=probs[idx][2]\n",
    "        seperate_prob=0\n",
    "        for jdx in contains[idx]:\n",
    "            t=probs[jdx]\n",
    "#             print([(text[int(t[0]):int(t[1])+1]),t[2]])   \n",
    "            seperate_prob+=probs[jdx][2]\n",
    "        if seperate_prob>full_prob:\n",
    "            saved[idx]=0\n",
    "        else:\n",
    "            saved[contains[idx]]=0\n",
    "    for idx in range(len(crosses)):\n",
    "        full_prob=probs[idx][2]\n",
    "        cross_prob=0\n",
    "        for jdx in crosses[idx]:\n",
    "            if probs[jdx][2]>full_prob:\n",
    "                saved[idx]=0\n",
    "            else:\n",
    "                saved[jdx]=0 \n",
    "#     print([ (text[int(e[0]):int(e[1])+1],e[2],e[0],e[1])for e in probs]) #覆盖不可以 相交可以\n",
    "    probs=np.array(probs)[saved.astype(np.bool)]\n",
    "    entities=[text[int(e[0]):int(e[1])+1] for e in probs]\n",
    "    offsets=[[int(e[0]),int(e[1])+1] for e in probs]\n",
    "    return entities,offsets\n",
    "def find_subs_obs_pointer(token_label_pred,text,sub_thre=0.7,ob_thre=0.8,max_count=2):\n",
    "    obs,ob_offsets=find_entities(token_label_pred,text,0,1,ob_thre,max_count)\n",
    "    subs,sub_offsets=find_entities(token_label_pred,text,2,3,sub_thre,max_count-1)\n",
    "    return subs,obs,sub_offsets,ob_offsets\n",
    "def make_output_pointer(doc2pred_ner_doc,simple_thre,complex_thre,text_data,pred_info,soinfos):\n",
    "    for ind,info in tqdm(enumerate(doc2pred_ner_doc)):\n",
    "        rels={}\n",
    "        for idx in (info):\n",
    "            rels[pred_doc_rels[idx]]=idx\n",
    "        for idx in info:\n",
    "            token_label_pred=text_preds[idx]\n",
    "            label_pred=pred_doc_rels[idx]\n",
    "            text=text_data[ind]['text']\n",
    "            if label_pred in special_pass_idx:\n",
    "                continue\n",
    "            elif label_pred not in special_major_idx:\n",
    "                subs,obs,sub_offsets,ob_offsets=soinfos[idx]\n",
    "                if ind==337:\n",
    "                    print(subs)\n",
    "                    print(obs)\n",
    "                if (len(subs)*len(obs)<simple_thre) or len(subs)==1 or len(obs)==1:\n",
    "                    for i,sub in enumerate(subs):\n",
    "                        for j,ob in enumerate(obs):\n",
    "                            sub=sub.strip()\n",
    "                            ob=ob.strip()\n",
    "                            if sub=='' or ob=='':\n",
    "                                continue\n",
    "                            rel_dict={'object':{'@value':ob},'subject':sub}\n",
    "                            rel_dict.update(copy.deepcopy(id2schema[label_pred]))\n",
    "                            pred_info[ind]['spo_list'].append(rel_dict)\n",
    "                else:\n",
    "                    for j,ob in enumerate(obs):\n",
    "    #                     sub_offset=sub_offsets[i]\n",
    "    #                     min_dis,ob_index=100000,-1\n",
    "                        ob_offset=ob_offsets[j]\n",
    "                        min_dis,sub_index=100000,-1\n",
    "                        for i,sub in enumerate(subs):\n",
    "                            sub=sub.strip()\n",
    "                            ob=ob.strip()\n",
    "                            if sub=='' or ob=='':\n",
    "                                continue\n",
    "                            if abs(sub_offsets[i][0]-ob_offset[0])<min_dis:\n",
    "                                sub_index=i\n",
    "                                min_dis=abs(sub_offsets[i][0]-ob_offset[0])\n",
    "                        if sub_index!=-1:\n",
    "                            ob=ob.strip()\n",
    "                            if subs[sub_index]=='' or ob=='':\n",
    "                                continue\n",
    "                            rel_dict={'object':{'@value':ob},'subject':subs[sub_index].strip()}\n",
    "                            rel_dict.update(copy.deepcopy(id2schema[label_pred]))\n",
    "                            pred_info[ind]['spo_list'].append(rel_dict)\n",
    "                    #如果搞完存在sub没有ob对应上那就再来一轮\n",
    "            else:\n",
    "                affilate_labels=special_affilate_idx[label_pred]\n",
    "                subs,obs,sub_offsets,ob_offsets=soinfos[idx]\n",
    "                subs2id=dict([(sub,i) for i,sub in enumerate(subs)])\n",
    "                subs_af=[[] for i in range(len(subs))]\n",
    "                for f in affilate_labels:\n",
    "                    if f in rels:\n",
    "                        af_idx=rels[f]\n",
    "                        af_token_label_pred=text_preds[af_idx]\n",
    "                        af_text=text_data[ind]['text']\n",
    "                        af_subs,af_obs,af_sub_offsets,af_ob_offsets=soinfos[af_idx]\n",
    "                        for sub in af_subs:\n",
    "                            if sub in subs:\n",
    "                                subs_af[subs2id[sub]].append([af_obs,f,af_ob_offsets])\n",
    "                #现在只取第一个\n",
    "                if len(subs)*len(obs)<complex_thre or len(subs)==1 or len(obs)==1 :\n",
    "                    #这个时候ob附属找离ob主体最近的\n",
    "                    for i,sub in enumerate(subs):\n",
    "                        sub_offset=sub_offsets[i]\n",
    "                        for j,ob in enumerate(obs):\n",
    "                            sub=sub.strip()\n",
    "                            ob=ob.strip()\n",
    "                            if sub=='' or ob=='':\n",
    "                                continue\n",
    "                            rel_dict={'object':{'@value':ob},'subject':sub}\n",
    "                            rel_dict.update(copy.deepcopy(id2schema[label_pred]))\n",
    "                            if len(subs_af[i])==0:\n",
    "                                    pred_info[ind]['spo_list'].append(rel_dict)\n",
    "                            if len(subs_af[i])>=1:\n",
    "                                ob_offset=ob_offsets[j]\n",
    "                                for af_obs in subs_af[i]:\n",
    "                                    af_label_pred=af_obs[1]\n",
    "                                    af_min_dis,af_ob_index=100000,-1\n",
    "                                    for k,(af_ob,af_offset) in enumerate(zip(af_obs[0],af_obs[2])):\n",
    "                                        af_ob=af_ob.strip()\n",
    "                                        if af_ob=='':\n",
    "                                            continue\n",
    "#                                         rel_dict['object'].update({id2rels[af_label_pred].split(\"_\")[1]:af_ob})\n",
    "#                                         rel_dict['object_type'].update(copy.deepcopy(id2schema[af_label_pred]['object_type']))\n",
    "#                                         pred_info[ind]['spo_list'].append(copy.deepcopy(rel_dict))\n",
    "                                        if abs(ob_offset[0]-af_offset[0])<af_min_dis:\n",
    "                                            af_ob_index=k\n",
    "                                            min_dis=abs(ob_offset[0]-af_offset[0])\n",
    "                                    if af_ob_index!=-1:\n",
    "                                        rel_dict['object'].update({id2rels[af_label_pred].split(\"_\")[1]:af_obs[0][af_ob_index].strip()})\n",
    "                                        rel_dict['object_type'].update(copy.deepcopy(id2schema[af_label_pred]['object_type']))\n",
    "\n",
    "                                pred_info[ind]['spo_list'].append(rel_dict)\n",
    "                else:\n",
    "                    #这个时候还是直接取的第一个\n",
    "                    for i,sub in enumerate(subs):\n",
    "\n",
    "                        sub_offset=sub_offsets[i]\n",
    "                        min_dis,ob_index=100000,-1\n",
    "                        for j,ob in enumerate(obs):\n",
    "                            if sub=='' or ob=='':\n",
    "                                continue\n",
    "                            if abs(sub_offset[0]-ob_offsets[j][0])<min_dis:\n",
    "                                ob_index=j\n",
    "                                min_dis=abs(sub_offset[0]-ob_offsets[j][0])\n",
    "                        if ob_index!=-1:\n",
    "                            j=ob_index\n",
    "                            if subs[i]=='' or obs[j]=='':\n",
    "                                continue\n",
    "                            rel_dict={'object':{'@value':obs[j]},'subject':subs[i]}\n",
    "                            rel_dict.update(copy.deepcopy(id2schema[label_pred]))\n",
    "                            if len(subs_af[i])==0:\n",
    "                                    pred_info[ind]['spo_list'].append(rel_dict)\n",
    "                            if len(subs_af[i])>=1:\n",
    "#                                 ob_offset=ob_offsets[j]\n",
    "#                                 for af_obs in subs_af[i]:\n",
    "#                                     af_label_pred=af_obs[1]\n",
    "#                                     af_min_dis,af_ob_index=100000,-1\n",
    "#                                     for k,(af_ob,af_offset) in enumerate(zip(af_obs[0],af_obs[2])):\n",
    "#                                         if af_ob=='':\n",
    "#                                             continue\n",
    "#                                         if abs(ob_offset[0]-af_offset[0])<af_min_dis:\n",
    "#                                             af_ob_index=k\n",
    "#                                             min_dis=abs(ob_offset[0]-af_offset[0])\n",
    "#                                     if af_ob_index!=-1:\n",
    "#                                         rel_dict['object'].update({id2rels[af_label_pred].split(\"_\")[1]:af_obs[0][af_ob_index]})\n",
    "#                                         rel_dict['object_type'].update(copy.deepcopy(id2schema[af_label_pred]['object_type']))\n",
    "\n",
    "#                                 pred_info[ind]['spo_list'].append(rel_dict)\n",
    "                                for af_obs in subs_af[i]:\n",
    "                                    af_label_pred=af_obs[1]\n",
    "                                    rel_dict['object_type'].update(copy.deepcopy(id2schema[af_label_pred]['object_type']))\n",
    "                                    for af_ob in af_obs[0]:\n",
    "                                        rel_dict['object'].update({id2rels[af_label_pred].split(\"_\")[1]:af_ob})\n",
    "                                        break\n",
    "                                pred_info[ind]['spo_list'].append(rel_dict) \n",
    "                    #如果结束了存在ob没对上，那么在把没对上的ob也塞进去"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### test"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "text_preds=[np.zeros((len(test_text_data[pred_doc2doc[i]]['text']),4))for i in range(len(pred_doc_token_labels))]\n",
    "for idx in tqdm(range(len(ner_pred_dataset))):\n",
    "    text=ner_pred_dataset[idx][0].numpy()\n",
    "    attention_mask=ner_pred_dataset[idx][1].numpy()\n",
    "    token_labels=ner_pred_dataset[idx][4].numpy()\n",
    "    tok_to_orig_start_index,tok_to_orig_end_index,orig_to_tok_index=ner_pred_dataset[idx][-1]\n",
    "    token_pred=pred_token_pred[idx]\n",
    "    token_pred=token_pred[1:1+len(tok_to_orig_start_index)]\n",
    "    for i in range(len(token_pred)):\n",
    "        begin,end=tok_to_orig_start_index[i],tok_to_orig_end_index[i]\n",
    "        text_preds[pred_example2doc[idx]][begin:end+1,:]+=token_pred[i] #求和还是取平均？"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "code_folding": [
     4
    ]
   },
   "outputs": [],
   "source": [
    "soinfos=[]\n",
    "for idx,token_label_pred in tqdm(enumerate(text_preds)):\n",
    "    soinfos.append(find_subs_obs_pointer(token_label_pred,test_text_data[pred_doc2doc[idx]]['text']))\n",
    "new_soinfos=[]\n",
    "for e in tqdm(soinfos):\n",
    "    new_sub=[]\n",
    "    new_sub_offset=[]\n",
    "    for idx,t in enumerate(e[0]):\n",
    "        if idx==0:\n",
    "            new_sub.append(t)\n",
    "            new_sub_offset.append(e[2][idx])\n",
    "            continue\n",
    "        if t not in e[0][:idx]:\n",
    "            new_sub.append(t)\n",
    "            new_sub_offset.append(e[2][idx])\n",
    "    new_ob=[]\n",
    "    new_ob_offset=[]\n",
    "    for idx,t in enumerate(e[1]):\n",
    "        if idx==0:\n",
    "            new_ob.append(t)\n",
    "            new_ob_offset.append(e[3][idx])\n",
    "            continue\n",
    "        if t not in e[1][:idx]:\n",
    "            new_ob.append(t)\n",
    "            new_ob_offset.append(e[3][idx])\n",
    "    new_soinfos.append([new_sub,new_ob,new_sub_offset,new_ob_offset])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# idx=240\n",
    "# token_label_pred=text_preds[idx]\n",
    "# text=test_text_data[pred_doc2doc[idx]]['text']\n",
    "# print(id2rels[pred_doc_rels[idx]])\n",
    "# print(find_entities(token_label_pred,text,2,3,0.2,1))\n",
    "# print(find_entities(token_label_pred,text,0,1,0.3,2))\n",
    "# for t,l in zip(text,token_label_pred):\n",
    "#     print(t,np.array(new_id2labels)[((l>0.5)*np.arange(1,5))[np.where(((l>0.5)*np.arange(1,5)))]],l)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "special_rels"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "for e in pred_info:\n",
    "    flag=False\n",
    "    for spo in e['spo_list']:\n",
    "        if spo['predicate'] in special_rels:\n",
    "            flag=True\n",
    "            break\n",
    "    if flag:\n",
    "        print(e)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "doc2pred_ner_doc=[[] for i in range(len(test_text_data))]\n",
    "pred_info=[{} for i in range(len(test_text_data[:]))]\n",
    "for idx,e in enumerate(pred_info):\n",
    "    e['text']=test_text_data[idx]['text']\n",
    "    e['spo_list']=[]\n",
    "for idx in range(len(pred_doc_rels[:])):\n",
    "    ind=pred_doc2doc[idx]\n",
    "    doc2pred_ner_doc[ind].append(idx)\n",
    "flag=False\n",
    "simple_thre=4\n",
    "complex_thre=2\n",
    "make_output_pointer(doc2pred_ner_doc,simple_thre,complex_thre,test_text_data,pred_info,new_soinfos)\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### val"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "code_folding": [
     1
    ]
   },
   "outputs": [],
   "source": [
    "text_preds=[np.zeros((len(text_data[pred_doc2doc[i]]['text']),4))for i in range(len(pred_doc_token_labels))]\n",
    "for idx in tqdm(range(len(ner_pred_dataset))):\n",
    "    text=ner_pred_dataset[idx][0].numpy()\n",
    "    attention_mask=ner_pred_dataset[idx][1].numpy()\n",
    "    token_labels=ner_pred_dataset[idx][4].numpy()\n",
    "    tok_to_orig_start_index,tok_to_orig_end_index,orig_to_tok_index=ner_pred_dataset[idx][-1]\n",
    "    token_pred=pred_token_pred[idx]\n",
    "    token_pred=token_pred[1:1+len(tok_to_orig_start_index)]\n",
    "    for i in range(len(token_pred)):\n",
    "        begin,end=tok_to_orig_start_index[i],tok_to_orig_end_index[i]\n",
    "        text_preds[pred_example2doc[idx]][begin:end+1,:]+=token_pred[i] #求和还是取平均？"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "##### 数据分析"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "code_folding": [
     8
    ]
   },
   "outputs": [],
   "source": [
    "max_sub=''\n",
    "max_sub_spo=''\n",
    "max_ob=''\n",
    "max_ob_spo=''\n",
    "sub_len=[]\n",
    "ob_len=[]\n",
    "subs=set()\n",
    "obs=set()\n",
    "for example in tqdm(text_data):\n",
    "    for spo in example['spo_list']:\n",
    "        sub_len.append(len(spo['subject']))\n",
    "        subs.add(spo['subject'])\n",
    "        obs.add(spo['object']['@value'])\n",
    "        ob_len.append(len(spo['object']['@value']))\n",
    "        if len(spo['subject'])>len(max_sub):\n",
    "            max_sub=spo['subject']\n",
    "            max_sub_spo=spo\n",
    "        if len(spo['object']['@value'])>len(max_ob):\n",
    "            max_ob=spo['object']['@value']\n",
    "            max_ob_spo=spo\n",
    "len(subs),len(obs)\n",
    "# for t,l in zip(text,token_label_pred):\n",
    "#     print(t,np.array(new_id2labels)[((l>0.5)*np.arange(1,5))[np.where(((l>0.5)*np.arange(1,5)))]],l)\n",
    "idx=0\n",
    "token_label_pred=text_preds[idx]\n",
    "text=text_data[pred_doc2doc[idx]]['text']\n",
    "print(id2rels[pred_doc_rels[idx]])\n",
    "print(find_entities(token_label_pred,text,2,3,0.2,1))\n",
    "print(find_entities(token_label_pred,text,0,1,0.3,2))\n",
    "# find_subs_obs_pointer(token_label_pred,text)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "code_folding": [
     1,
     4
    ]
   },
   "outputs": [],
   "source": [
    "soinfos=[]\n",
    "for idx,token_label_pred in tqdm(enumerate(text_preds)):\n",
    "    soinfos.append(find_subs_obs_pointer(token_label_pred,text_data[pred_doc2doc[idx]]['text']))\n",
    "new_soinfos=[]\n",
    "for e in tqdm(soinfos):\n",
    "    new_sub=[]\n",
    "    new_sub_offset=[]\n",
    "    for idx,t in enumerate(e[0]):\n",
    "        if idx==0:\n",
    "            new_sub.append(t)\n",
    "            new_sub_offset.append(e[2][idx])\n",
    "            continue\n",
    "        if t not in e[0][:idx]:\n",
    "            new_sub.append(t)\n",
    "            new_sub_offset.append(e[2][idx])\n",
    "    new_ob=[]\n",
    "    new_ob_offset=[]\n",
    "    for idx,t in enumerate(e[1]):\n",
    "        if idx==0:\n",
    "            new_ob.append(t)\n",
    "            new_ob_offset.append(e[3][idx])\n",
    "            continue\n",
    "        if t not in e[1][:idx]:\n",
    "            new_ob.append(t)\n",
    "            new_ob_offset.append(e[3][idx])\n",
    "    new_soinfos.append([new_sub,new_ob,new_sub_offset,new_ob_offset])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "doc2pred_ner_doc=[[] for i in range(len(text_data[:])-180000)]\n",
    "pred_info=[{} for i in range(len(text_data[:])-180000)]\n",
    "for idx,e in enumerate(pred_info):\n",
    "    e['text']=text_data[180000+idx]['text']\n",
    "    e['spo_list']=[]\n",
    "for idx in range(len(pred_doc_rels[:])):\n",
    "    ind=pred_doc2doc[idx]\n",
    "    doc2pred_ner_doc[ind-180000].append(idx)\n",
    "flag=False\n",
    "simple_thre=4\n",
    "complex_thre=2\n",
    "make_output_pointer(doc2pred_ner_doc,simple_thre,complex_thre,text_data[180000:],pred_info,new_soinfos)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "code_folding": []
   },
   "outputs": [],
   "source": [
    "rel_nums,pred_nums=0,0\n",
    "right_num=0\n",
    "for idx,(pred,label) in enumerate(zip(pred_info,text_data[180000:])):\n",
    "        pred_nums+=len(pred['spo_list'])\n",
    "        rel_nums+=len(label['spo_list'])\n",
    "        print(idx)\n",
    "        print(\"text:\",label['text'])\n",
    "        print(\"real_spo_list:\",label['spo_list'])\n",
    "        print(\"pred_spo_list:\",pred['spo_list'])\n",
    "        for e in pred['spo_list']:\n",
    "            if e in label['spo_list']:\n",
    "                right_num+=1\n",
    "#         for e in label['spo_list']:\n",
    "#             if not e in pred['spo_list']:\n",
    "#                 print(idx)\n",
    "#                 print(e)\n",
    "#                 print(label['text'])\n",
    "#                 for t in label['spo_list']:\n",
    "#                     if t['predicate']==e['predicate']:\n",
    "#                         print(t)\n",
    "#                 print(\"=================================\")\n",
    "#                 for t in pred['spo_list']:\n",
    "#                     if t['predicate']==e['predicate']:\n",
    "#                         print(t)\n",
    "         "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "right_num,pred_nums,rel_nums"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "precision=right_num/pred_nums\n",
    "recall=right_num/rel_nums\n",
    "f1=2*precision*recall/(precision+recall)\n",
    "precision,recall,f1"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### CRF preprocess"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "18727"
      ]
     },
     "execution_count": 33,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(ner_pred_dataset)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "pred_results=evaluate_ner(args,ner_pred_dataset,model_ner,tokenizer)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "((18727, 256, 9), (18727, 256), (18727, 55))"
      ]
     },
     "execution_count": 35,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#交叉ensemble 部分\n",
    "pred_rel_logits=np.concatenate(pred_results[-1],axis=0)\n",
    "pred_token_logits=np.concatenate(pred_results[-2],axis=0)\n",
    "pred_token_pred=pred_results[2].reshape(-1,256)\n",
    "pred_token_logits.shape,pred_token_pred.shape,pred_rel_logits.shape\n",
    "np.save(\"./cross_results/test_pred_new.npy\",pred_token_pred)\n",
    "np.save(\"./cross_results/test_logit_new.npy\",pred_token_logits)\n",
    "np.save(\"./cross_results/test_rel_new.npy\",pred_rel_logits)\n",
    "np.save(\"./cross_results/pred_0.npy\",pred_token_pred)\n",
    "np.save(\"./cross_results/logit_0.npy\",pred_token_logits)\n",
    "np.save(\"./cross_results/rel_0.npy\",pred_rel_logits)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {
    "code_folding": [
     0,
     13,
     61,
     75,
     98,
     248
    ]
   },
   "outputs": [],
   "source": [
    "def split_and_idx(text,token):\n",
    "    lists=[\"\"]\n",
    "    indexs=[]\n",
    "    for idx,c in enumerate(text):\n",
    "        if c==token:\n",
    "            lists.append(\"\")\n",
    "        else:\n",
    "            if lists[-1]=='':\n",
    "                indexs.append(idx)\n",
    "            lists[-1]+=c\n",
    "    lists=list(filter(lambda x:x!=\"\",lists))\n",
    "    final_lists=[[lists[idx],indexs[idx]] for idx in range(len(lists))]\n",
    "    return final_lists\n",
    "def throw_part(text,index):\n",
    "#CEO,子，王，等，哥 姐\n",
    "#     if \"校长\" in text:\n",
    "#         new_text,new_index=split_and_idx(text,'长')[-1]\n",
    "#         return new_text,new_index+index\n",
    "#     if \"巨头\" in text:\n",
    "#         new_text,new_index=split_and_idx(text,'头')[-1]\n",
    "#         return new_text,new_index+index\n",
    "#     if \"嘉宾\" in text:\n",
    "#         new_text,new_index=split_and_idx(text,'宾')[-1]\n",
    "#         return new_text,new_index+index\n",
    "#     if \"人\" in text:\n",
    "#         new_text,new_index=split_and_idx(text,'人')[-1]\n",
    "#         return new_text,new_index+index\n",
    "#     if \"经理\" in text:\n",
    "#         new_text,new_index=split_and_idx(text,'理')[-1]\n",
    "#         return new_text,new_index+index\n",
    "#     if \"主席\" in text:\n",
    "#         new_text,new_index=split_and_idx(text,'席')[-1]\n",
    "#         return new_text,new_index+index\n",
    "#     if \"总裁\" in text:\n",
    "#         new_text,new_index=split_and_idx(text,'裁')[-1]\n",
    "#         return new_text,new_index+index\n",
    "    if \"CEO\" in text:\n",
    "        new_text,new_index=split_and_idx(text,'O')[-1]\n",
    "        return new_text,new_index+index\n",
    "    if \"子\" in text and \"子\" not in text[:2]:\n",
    "        new_text,new_index=split_and_idx(text,'子')[-1]\n",
    "        return new_text,new_index+index\n",
    "    if \"王\" in text:\n",
    "        new_text,new_index=split_and_idx(text,'王')[-1]\n",
    "        return new_text,new_index+index\n",
    "    if \"哥\" in text:\n",
    "        new_text,new_index=split_and_idx(text,'哥')[-1]\n",
    "        return new_text,new_index+index\n",
    "    if \"姐\" in text:\n",
    "        new_text,new_index=split_and_idx(text,'姐')[-1]\n",
    "        return new_text,new_index+index\n",
    "    if \"等\" in text[1:]:\n",
    "        new_text,new_index=split_and_idx(text,'等')[0]\n",
    "        return new_text,index\n",
    "    if \"身边\" in text[1:]:\n",
    "        new_text,new_index=split_and_idx(text,'身')[0]\n",
    "        return new_text,index\n",
    "    if \"悉数\" in text[1:]:\n",
    "        new_text,new_index=split_and_idx(text,'悉')[0]\n",
    "        return new_text,index\n",
    "    return text,index\n",
    "def find_token(text,token):\n",
    "    if token==\"《\":\n",
    "        for idx in range(len(text)-1,-1,-1):\n",
    "            if text[idx]==\"》\":\n",
    "                return -1\n",
    "            if text[idx]==token:\n",
    "                return idx\n",
    "    if token==\"》\":\n",
    "        for idx in range(len(text)):\n",
    "            if text[idx]==\"《\":\n",
    "                return -1\n",
    "            if text[idx]==token:\n",
    "                return idx\n",
    "    return -1\n",
    "def find_subs_obs(token_label_pred,text,logits_pred,find_sub=False,find_ob=False,find=False):\n",
    "    subs=[]\n",
    "    sub_offsets=[]\n",
    "    obs=[]\n",
    "    ob_offsets=[]\n",
    "    index=0\n",
    "    flag=False\n",
    "    while index<token_label_pred.shape[0]:\n",
    "        if token_label_pred[index]==3:\n",
    "            subs.append(text[index])\n",
    "            index+=1\n",
    "            while(index<token_label_pred.shape[0] and (token_label_pred[index]==4 or \\\n",
    "                                                       (token_label_pred[index]==3 and token_label_pred[index-1]==3))):\n",
    "                subs[-1]+=(text[index])\n",
    "                index+=1\n",
    "            sub_offsets.append([index-len(subs[-1]),index])\n",
    "            if  find_sub and find_token(text[sub_offsets[-1][0]-10:sub_offsets[-1][0]],\"《\")!=-1 and \\\n",
    "                find_token(text[sub_offsets[-1][1]:sub_offsets[-1][1]+10],\"》\")!=-1:\n",
    "                    begin_index=find_token(text[sub_offsets[-1][0]-10:sub_offsets[-1][0]],\"《\")+ sub_offsets[-1][0]-10\n",
    "                    end_index=find_token(text[sub_offsets[-1][1]:sub_offsets[-1][1]+10],\"》\")+sub_offsets[-1][1]\n",
    "                    if (text[begin_index:end_index+1][1:-1].strip()!=subs[-1]):\n",
    "                        subs[-1]=text[begin_index:end_index+1][1:-1].strip()\n",
    "                        sub_offsets[-1]=[begin_index+1,end_index]\n",
    "            if find and index<token_label_pred.shape[0] and text[index]=='、':\n",
    "                sub=split_and_idx(text[index+1:],\"、\")\n",
    "                if len(sub)>1:\n",
    "                    flag=True\n",
    "                    for e in sub[:-1]:\n",
    "                        assert (e[0]==text[index+1+e[1]:index+1+e[1]+len(e[0])])\n",
    "\n",
    "                        if len(e[0])>20:\n",
    "                            flag=False\n",
    "                            break\n",
    "                        for c in e[0]:\n",
    "                             if is_punctuation(c):\n",
    "                                flag=False\n",
    "                                break\n",
    "                        if not flag:\n",
    "                            break\n",
    "                    if flag:\n",
    "                        for i,e in enumerate(sub):\n",
    "                            info='校长 巨头 嘉宾 人 经理 主席 总裁'.split()\n",
    "                            tag=True\n",
    "                            for t in info:\n",
    "                                if t in e[0]:\n",
    "                                    tag=False\n",
    "                                    break\n",
    "                            if not tag:\n",
    "                                continue\n",
    "\n",
    "                            if e[0].startswith(\"等人\") or \"母亲\" in e[0] or \"妻子\" in e[0]:\n",
    "                                continue\n",
    "\n",
    "                            if \"等\" in e[0][1:]:\n",
    "                             e[0]=e[0].split(\"等\")[0]\n",
    "                            if i==len(sub)-1:\n",
    "                                if len(e[0])<10 and sum(token_label_pred[index+1+e[1]:index+1+e[1]+len(e[0])])==0\\\n",
    "                                    and \"和\" not in e[0] and \"之\" not in e[0]:\n",
    "                                    new_sub,ind=throw_part(e[0],e[1])\n",
    "                                    assert new_sub==text[index+1+ind:index+1+ind+len(new_sub)]\n",
    "                                    if new_sub not in subs:\n",
    "                                        subs.append(new_sub)\n",
    "                                        sub_offsets.append([index+1+ind,index+1+ind+len(new_sub)])\n",
    "\n",
    "                            elif sum(token_label_pred[index+1+e[1]:index+1+e[1]+len(e[0])])==0 and \"和\" not in e[0]\\\n",
    "                                and \"还有\" not in e[0] and \"第一\" not in e[0]:\n",
    "\n",
    "                                new_sub,ind=throw_part(e[0],e[1])\n",
    "                                assert new_sub==text[index+1+ind:index+1+ind+len(new_sub)]\n",
    "                                if new_sub not in subs:\n",
    "                                    subs.append(new_sub)\n",
    "                                    sub_offsets.append([index+1+ind,index+1+ind+len(new_sub)])        \n",
    "            index-=1\n",
    "        if index<token_label_pred.shape[0] and token_label_pred[index]==1:\n",
    "            obs.append(text[index])\n",
    "            index+=1\n",
    "            while(index<token_label_pred.shape[0] and (token_label_pred[index]==2 or \\\n",
    "                                                       (token_label_pred[index]==1 and token_label_pred[index-1]==1))):\n",
    "                obs[-1]+=(text[index])\n",
    "                index+=1\n",
    "            ob_offsets.append([index-len(obs[-1]),index])\n",
    "            if  find_ob and find_token(text[ob_offsets[-1][0]-10:ob_offsets[-1][0]],\"《\")!=-1 and \\\n",
    "                find_token(text[ob_offsets[-1][1]:ob_offsets[-1][1]+10],\"》\")!=-1:\n",
    "                    begin_index=find_token(text[ob_offsets[-1][0]-10:ob_offsets[-1][0]],\"《\")+ ob_offsets[-1][0]-10\n",
    "                    end_index=find_token(text[ob_offsets[-1][1]:ob_offsets[-1][1]+10],\"》\")+ob_offsets[-1][1]\n",
    "                    if (text[begin_index:end_index+1][1:-1].strip()!=obs[-1]):\n",
    "                        obs[-1]=text[begin_index:end_index+1][1:-1].strip()\n",
    "                        ob_offsets[-1]=[begin_index+1,end_index]\n",
    "            if find and index<token_label_pred.shape[0] and text[index]=='、':\n",
    "                sub=split_and_idx(text[index+1:],\"、\")\n",
    "                if len(sub)>1:\n",
    "                    flag=True\n",
    "                    for e in sub[:-1]:\n",
    "                        assert (e[0]==text[index+1+e[1]:index+1+e[1]+len(e[0])])\n",
    "\n",
    "                        if len(e[0])>20:\n",
    "                            flag=False\n",
    "                            break\n",
    "                        for c in e[0]:\n",
    "                             if is_punctuation(c):\n",
    "                                flag=False\n",
    "                                break\n",
    "                        if not flag:\n",
    "                            break\n",
    "                    if flag:\n",
    "\n",
    "                        for i,e in enumerate(sub):\n",
    "                            info='校长 巨头 嘉宾 人 经理 主席 总裁 导演'.split()\n",
    "                            tag=True\n",
    "                            for t in info:\n",
    "                                if t in e[0]:\n",
    "                                    tag=False\n",
    "                                    break\n",
    "                            if not tag:\n",
    "                                continue\n",
    "\n",
    "                            if e[0].startswith(\"等人\") or \"母亲\" in e[0] or \"妻子\" in e[0]:\n",
    "                                continue\n",
    "                            if \"等\" in e[0][1:]:\n",
    "                                 e[0]=e[0].split(\"等\")[0]\n",
    "                            if i==len(sub)-1:\n",
    "\n",
    "                                if len(e[0])<10 and sum(token_label_pred[index+1+e[1]:index+1+e[1]+len(e[0])])==0:\n",
    "                                    new_ob,ind=throw_part(e[0],e[1])\n",
    "                                    assert new_ob==text[index+1+ind:index+1+ind+len(new_ob)]\n",
    "                                    if new_ob not in obs:\n",
    "                                        obs.append(new_ob)\n",
    "                                        ob_offsets.append([index+1+ind,index+1+ind+len(new_ob)])\n",
    "\n",
    "                            elif sum(token_label_pred[index+1+e[1]:index+1+e[1]+len(e[0])])==0 and len(e[0])<10:\n",
    "                                new_ob,ind=throw_part(e[0],e[1])\n",
    "                                assert new_ob==text[index+1+ind:index+1+ind+len(new_ob)]\n",
    "                                if new_ob not in obs:\n",
    "                                    obs.append(new_ob)\n",
    "                                    ob_offsets.append([index+1+ind,index+1+ind+len(new_ob)])\n",
    "\n",
    "#                         print(\"==================\")\n",
    "            index-=1\n",
    "        index+=1\n",
    "    new_subs=[]\n",
    "    new_sub_offsets=[]\n",
    "    sub_logits=[]\n",
    "    for idx,sub in enumerate(subs):\n",
    "        if sub in new_subs:\n",
    "            continue\n",
    "        new_subs.append(sub)\n",
    "        new_sub_offsets.append(sub_offsets[idx])\n",
    "        sub_logits.append(logits_pred[sub_offsets[idx][0]:sub_offsets[idx][1]])\n",
    "    new_obs=[]\n",
    "    new_ob_offsets=[]\n",
    "    ob_logits=[]\n",
    "    for idx,ob in enumerate(obs):\n",
    "        if ob in new_obs:\n",
    "            continue\n",
    "        new_obs.append(ob)\n",
    "        new_ob_offsets.append(ob_offsets[idx])\n",
    "        ob_logits.append(logits_pred[ob_offsets[idx][0]:ob_offsets[idx][1]])\n",
    "    subs=new_subs\n",
    "    sub_offsets=new_sub_offsets\n",
    "    obs=new_obs\n",
    "    ob_offsets=new_ob_offsets\n",
    "    if len(set(subs))!=len(subs) or len(set(obs))!=len(obs):\n",
    "            print(\"subs\",subs)\n",
    "            print(sub_offsets)\n",
    "            print(\"obs\",obs)\n",
    "            print(ob_offsets)\n",
    "    if len(subs)==0:\n",
    "        subs.append('')\n",
    "        sub_offsets.append([0,0])\n",
    "    if len(obs)==0:\n",
    "        obs.append('')\n",
    "        ob_offsets.append([0,0])\n",
    "    return subs,obs,sub_offsets,ob_offsets,sub_logits,ob_logits\n",
    "def make_output(doc2pred_ner_doc,simple_thre,complex_thre,text_data,pred_info,pred_doc_rels):\n",
    "    for ind,info in tqdm(enumerate(doc2pred_ner_doc)):\n",
    "        rels={}\n",
    "        pred_info[ind]['spo_logits']=[]\n",
    "        for idx in (info):\n",
    "            rels[pred_doc_rels[idx]]=idx\n",
    "        for idx in info:\n",
    "            token_label_pred=text_preds[idx]\n",
    "            label_pred=pred_doc_rels[idx]\n",
    "            logits_pred=text_logits[idx]\n",
    "            text=text_data[ind]['text']\n",
    "            if label_pred in special_pass_idx:\n",
    "                continue\n",
    "            elif label_pred not in special_major_idx:\n",
    "                find_sub_hat,find_ob_hat=False,False\n",
    "                sub_type=id2schema[label_pred]['subject_type']\n",
    "                for key,value in id2schema[label_pred]['object_type'].items():\n",
    "                    ob_type=id2schema[label_pred]['object_type'][key]\n",
    "                if sub_type in '影视作品 音乐专辑 歌曲 图书作品 电视综艺 作品 文学作品'.split(\" \"):\n",
    "                    find_sub_hat=True\n",
    "                if ob_type in '影视作品 音乐专辑 歌曲 图书作品 电视综艺 作品 文学作品'.split(\" \"):\n",
    "                    find_ob_hat=True     \n",
    "\n",
    "                subs,obs,sub_offsets,ob_offsets,sub_logits,ob_logits=find_subs_obs(token_label_pred,text,logits_pred,find_sub_hat,find_ob_hat)\n",
    "#                 if ind==980:\n",
    "#                     print(subs)\n",
    "#                     print(obs)\n",
    "                if (len(subs)*len(obs)<simple_thre) or len(subs)==1 or len(obs)==1:\n",
    "                    for i,sub in enumerate(subs):\n",
    "                        for j,ob in enumerate(obs):\n",
    "                            sub=sub.strip()\n",
    "                            ob=ob.strip()\n",
    "                            if sub=='' or ob=='':\n",
    "                                continue\n",
    "                            rel_dict={'object':{'@value':ob},'subject':sub}\n",
    "                            rel_dict.update(copy.deepcopy(id2schema[label_pred]))\n",
    "                            pred_info[ind]['spo_list'].append(rel_dict)\n",
    "                            logits_dict={'object':ob_logits[j],'subject':sub_logits[i]}\n",
    "                            pred_info[ind]['spo_logits'].append(logits_dict)\n",
    "                else:\n",
    "                    for j,ob in enumerate(obs):\n",
    "    #                     sub_offset=sub_offsets[i]\n",
    "    #                     min_dis,ob_index=100000,-1\n",
    "                        ob_offset=ob_offsets[j]\n",
    "                        min_dis,sub_index=100000,-1\n",
    "                        for i,sub in enumerate(subs):\n",
    "                            sub=sub.strip()\n",
    "                            ob=ob.strip()\n",
    "                            if sub=='' or ob=='':\n",
    "                                continue\n",
    "                            if abs(sub_offsets[i][0]-ob_offset[0])<min_dis:\n",
    "                                sub_index=i\n",
    "                                min_dis=abs(sub_offsets[i][0]-ob_offset[0])\n",
    "                        if sub_index!=-1:\n",
    "                            ob=ob.strip()\n",
    "                            if subs[sub_index]=='' or ob=='':\n",
    "                                continue\n",
    "                            rel_dict={'object':{'@value':ob},'subject':subs[sub_index].strip()}\n",
    "                            rel_dict.update(copy.deepcopy(id2schema[label_pred]))\n",
    "                            pred_info[ind]['spo_list'].append(rel_dict)\n",
    "                            logits_dict={'object':ob_logits[j],'subject':sub_logits[sub_index]}\n",
    "                            pred_info[ind]['spo_logits'].append(logits_dict)\n",
    "                    #如果搞完存在sub没有ob对应上那就再来一轮\n",
    "            else:\n",
    "                affilate_labels=special_affilate_idx[label_pred]\n",
    "                find_sub_hat,find_ob_hat=False,False\n",
    "                sub_type=id2schema[label_pred]['subject_type']\n",
    "                for key,value in id2schema[label_pred]['object_type'].items():\n",
    "                    ob_type=id2schema[label_pred]['object_type'][key]\n",
    "                if sub_type in '影视作品 音乐专辑 歌曲 图书作品 电视综艺 作品 文学作品'.split(\" \"):\n",
    "                    find_sub_hat=True\n",
    "                if ob_type in '影视作品 音乐专辑 歌曲 图书作品 电视综艺 作品 文学作品'.split(\" \"):\n",
    "                    find_ob_hat=True     \n",
    "\n",
    "                subs,obs,sub_offsets,ob_offsets,sub_logits,ob_logits=find_subs_obs(token_label_pred,text,logits_pred,find_sub_hat,find_ob_hat)\n",
    "                subs2id=dict([(sub,i) for i,sub in enumerate(subs)])\n",
    "                subs_af=[[] for i in range(len(subs))]\n",
    "                for f in affilate_labels:\n",
    "                    if f in rels:\n",
    "                        af_idx=rels[f]\n",
    "                        af_token_label_pred=text_preds[af_idx]\n",
    "                        af_text=text_data[ind]['text']\n",
    "                        sub_type=id2schema[f]['subject_type']\n",
    "                        for key,value in id2schema[f]['object_type'].items():\n",
    "                            ob_type=id2schema[f]['object_type'][key]\n",
    "                        if sub_type in '影视作品 音乐专辑 歌曲 图书作品 电视综艺 作品 文学作品'.split(\" \"):\n",
    "                            find_sub_hat=True\n",
    "                        if ob_type in '影视作品 音乐专辑 歌曲 图书作品 电视综艺 作品 文学作品'.split(\" \"):\n",
    "                            find_ob_hat=True     \n",
    "                        af_subs,af_obs,af_sub_offsets,af_ob_offsets,af_sub_logits,af_ob_logits=find_subs_obs(af_token_label_pred,af_text,logits_pred,find_sub_hat,find_ob_hat)\n",
    "                        for sub in af_subs:\n",
    "                            if sub in subs:\n",
    "                                subs_af[subs2id[sub]].append([af_obs,f,af_ob_offsets])\n",
    "                #现在只取第一个\n",
    "                if len(subs)*len(obs)<complex_thre or len(subs)==1 or len(obs)==1 :\n",
    "                    #这个时候ob附属找离ob主体最近的\n",
    "                    for i,sub in enumerate(subs):\n",
    "                        sub_offset=sub_offsets[i]\n",
    "                        for j,ob in enumerate(obs):\n",
    "                            sub=sub.strip()\n",
    "                            ob=ob.strip()\n",
    "                            if sub=='' or ob=='':\n",
    "                                continue\n",
    "                            rel_dict={'object':{'@value':ob},'subject':sub}\n",
    "                            rel_dict.update(copy.deepcopy(id2schema[label_pred]))\n",
    "                            logits_dict={'object':ob_logits[j],'subject':sub_logits[i]}\n",
    "                            if len(subs_af[i])==0:\n",
    "                                    pred_info[ind]['spo_list'].append(rel_dict)\n",
    "                                    pred_info[ind]['spo_logits'].append(logits_dict)\n",
    "                            if len(subs_af[i])>=1:\n",
    "                                ob_offset=ob_offsets[j]\n",
    "                                for af_obs in subs_af[i]:\n",
    "                                    af_label_pred=af_obs[1]\n",
    "                                    af_min_dis,af_ob_index=100000,-1\n",
    "                                    for k,(af_ob,af_offset) in enumerate(zip(af_obs[0],af_obs[2])):\n",
    "                                        af_ob=af_ob.strip()\n",
    "                                        if af_ob=='':\n",
    "                                            continue\n",
    "#                                         rel_dict['object'].update({id2rels[af_label_pred].split(\"_\")[1]:af_ob})\n",
    "#                                         rel_dict['object_type'].update(copy.deepcopy(id2schema[af_label_pred]['object_type']))\n",
    "#                                         pred_info[ind]['spo_list'].append(copy.deepcopy(rel_dict))\n",
    "                                        if abs(ob_offset[0]-af_offset[0])<af_min_dis:\n",
    "                                            af_ob_index=k\n",
    "                                            min_dis=abs(ob_offset[0]-af_offset[0])\n",
    "                                    if af_ob_index!=-1:\n",
    "                                        rel_dict['object'].update({id2rels[af_label_pred].split(\"_\")[1]:af_obs[0][af_ob_index].strip()})\n",
    "                                        rel_dict['object_type'].update(copy.deepcopy(id2schema[af_label_pred]['object_type']))\n",
    "\n",
    "                                pred_info[ind]['spo_list'].append(rel_dict)\n",
    "                                pred_info[ind]['spo_logits'].append(logits_dict)\n",
    "                else:\n",
    "                    #这个时候还是直接取的第一个\n",
    "                    for i,sub in enumerate(subs):\n",
    "\n",
    "                        sub_offset=sub_offsets[i]\n",
    "                        min_dis,ob_index=100000,-1\n",
    "                        for j,ob in enumerate(obs):\n",
    "                            if sub=='' or ob=='':\n",
    "                                continue\n",
    "                            if abs(sub_offset[0]-ob_offsets[j][0])<min_dis:\n",
    "                                ob_index=j\n",
    "                                min_dis=abs(sub_offset[0]-ob_offsets[j][0])\n",
    "                        if ob_index!=-1:\n",
    "                            j=ob_index\n",
    "                            if subs[i]=='' or obs[j]=='':\n",
    "                                continue\n",
    "                            rel_dict={'object':{'@value':obs[j]},'subject':subs[i]}\n",
    "                            rel_dict.update(copy.deepcopy(id2schema[label_pred]))\n",
    "                            logits_dict={'object':ob_logits[j],'subject':sub_logits[i]}\n",
    "                            if len(subs_af[i])==0:\n",
    "                                    pred_info[ind]['spo_list'].append(rel_dict)\n",
    "                                    pred_info[ind]['spo_logits'].append(logits_dict)\n",
    "                            if len(subs_af[i])>=1:\n",
    "#                                 ob_offset=ob_offsets[j]\n",
    "#                                 for af_obs in subs_af[i]:\n",
    "#                                     af_label_pred=af_obs[1]\n",
    "#                                     af_min_dis,af_ob_index=100000,-1\n",
    "#                                     for k,(af_ob,af_offset) in enumerate(zip(af_obs[0],af_obs[2])):\n",
    "#                                         if af_ob=='':\n",
    "#                                             continue\n",
    "#                                         if abs(ob_offset[0]-af_offset[0])<af_min_dis:\n",
    "#                                             af_ob_index=k\n",
    "#                                             min_dis=abs(ob_offset[0]-af_offset[0])\n",
    "#                                     if af_ob_index!=-1:\n",
    "#                                         rel_dict['object'].update({id2rels[af_label_pred].split(\"_\")[1]:af_obs[0][af_ob_index]})\n",
    "#                                         rel_dict['object_type'].update(copy.deepcopy(id2schema[af_label_pred]['object_type']))\n",
    "\n",
    "#                                 pred_info[ind]['spo_list'].append(rel_dict)\n",
    "                                for af_obs in subs_af[i]:\n",
    "                                    af_label_pred=af_obs[1]\n",
    "                                    rel_dict['object_type'].update(copy.deepcopy(id2schema[af_label_pred]['object_type']))\n",
    "                                    for af_ob in af_obs[0]:\n",
    "                                        rel_dict['object'].update({id2rels[af_label_pred].split(\"_\")[1]:af_ob})\n",
    "                                        break\n",
    "                                pred_info[ind]['spo_list'].append(rel_dict) \n",
    "                                pred_info[ind]['spo_logits'].append(logits_dict)\n",
    "                    #如果结束了存在ob没对上，那么在把没对上的ob也塞进去"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### val"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "metadata": {
    "code_folding": []
   },
   "outputs": [],
   "source": [
    "text_preds=[np.zeros((len(text_data[ner_doc2doc[i]]['text']),len(id2labels)))for i in range(len(ner_doc_token_labels))]\n",
    "text_logits=[np.zeros((len(text_data[ner_doc2doc[i]]['text']),len(id2labels)))for i in range(len(ner_doc_token_labels))]\n",
    "for idx in tqdm(range(len(ner_examples))):\n",
    "    example=ner_examples[idx]\n",
    "    text=example['input_ids'].numpy()\n",
    "    attention_mask=example['attention_mask'].numpy()\n",
    "    token_labels=example['labels'].numpy()\n",
    "    tok_to_orig_start_index,tok_to_orig_end_index,orig_to_tok_index=example['token2doc']\n",
    "    token_pred=pred_token_pred[idx].astype(np.int)\n",
    "    token_pred=token_pred[1:1+len(tok_to_orig_start_index)]\n",
    "    logits_pred=pred_token_logits[idx][1:1+len(tok_to_orig_start_index)]\n",
    "    for i in range(len(token_pred)):\n",
    "        begin,end=tok_to_orig_start_index[i],tok_to_orig_end_index[i]\n",
    "        text_logits[example2doc[idx]][begin:end+1]=logits_pred[i]\n",
    "        if token_pred[i]!=0:\n",
    "            text_preds[example2doc[idx]][begin:end+1,token_pred[i]]+=1\n",
    "            if token_pred[i]==2 or token_pred[i]==4:\n",
    "                B_index=-1\n",
    "                for k in range(0,-begin-1,-1):\n",
    "                    if text_preds[example2doc[idx]][k+begin,token_pred[i]-1]!=0:\n",
    "                        B_index=k+begin\n",
    "                        break\n",
    "#                 if idx==489:\n",
    "#                     print(i,B_index,text_data[180321]['text'][B_index])\n",
    "                if B_index!=-1:\n",
    "                    for e in range(B_index+1,begin):\n",
    "                        if text_preds[example2doc[idx]][e,token_pred[i]]==0:\n",
    "                            text_preds[example2doc[idx]][e,token_pred[i]]+=1\n",
    "                else:\n",
    "                    print(idx)\n",
    "#         if idx==489:\n",
    "#             print(text_preds[487])\n",
    "text_preds=[(e!=0).astype(np.int32).argmax(axis=-1) for e in text_preds]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 99,
   "metadata": {
    "code_folding": [
     4,
     7
    ]
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/huangweilin/anaconda3/envs/fjw/lib/python3.6/site-packages/ipykernel_launcher.py:5: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0\n",
      "Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`\n",
      "  \"\"\"\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "b93d995b3b7d4cb4b997290ead402241",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=1, bar_style='info', max=1), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/huangweilin/anaconda3/envs/fjw/lib/python3.6/site-packages/ipykernel_launcher.py:250: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0\n",
      "Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "a4f5dfa285e94ea98193df8eaaeaddcc",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=1, bar_style='info', max=1), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "text_data=np.array(text_data)\n",
    "ind2idx=dict([(e,idx) for idx,e in enumerate(range(len(text_data)))])\n",
    "doc2pred_ner_doc=[[] for i in range(len(text_data))]\n",
    "pred_info=[{} for i in range(len(text_data))]\n",
    "for idx,e in tqdm(enumerate(pred_info)):\n",
    "    e['text']=text_data[idx]['text']\n",
    "    e['spo_list']=[]\n",
    "for idx in range(len(ner_doc_rels[:])):\n",
    "    ind=ner_doc2doc[idx]\n",
    "    doc2pred_ner_doc[ind].append(idx)\n",
    "flag=False\n",
    "simple_thre=1000\n",
    "complex_thre=1000   #现在是要尽可能地多生成实体对\n",
    "make_output(doc2pred_ner_doc,simple_thre,complex_thre,text_data,pred_info,ner_doc_rels)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {},
   "outputs": [],
   "source": [
    "for idx in tqdm(range(len(pred_info))):\n",
    "    pred_info[idx]['cls_logits']=list(pred_cls_logits[idx])\n",
    "    for spo in pred_info[idx]['spo_logits']:\n",
    "        spo['object']=spo['object'].tolist()\n",
    "        spo['subject']=spo['subject'].tolist()\n",
    "    pred_info[idx]['plan_spo_list']=text_data[idx]['plan_spo_list']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 104,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "with open(\"./cross_results/train_info.json\",\"w\") as w:\n",
    "    for e in tqdm(pred_info):\n",
    "        w.write(json.dumps(e, ensure_ascii=False)+'\\n')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 139,
   "metadata": {
    "code_folding": [
     2
    ],
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/huangweilin/anaconda3/envs/fjw/lib/python3.6/site-packages/ipykernel_launcher.py:3: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0\n",
      "Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`\n",
      "  This is separate from the ipykernel package so we can avoid doing imports until\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "f14e70a5b2974e32aabfaf75a50ef321",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=1, bar_style='info', max=1), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "rel_nums,pred_nums=0,0\n",
    "right_num=0\n",
    "for idx,(pred,label) in tqdm(enumerate(zip(pred_info,text_data))):\n",
    "        pred_nums+=len(pred['spo_list'])\n",
    "        rel_nums+=len(label['spo_list'])\n",
    "#         print(idx)\n",
    "#         print(\"text:\",label['text'])\n",
    "#         print(\"real_spo_list:\",label['spo_list'])\n",
    "#         print(\"pred_spo_list:\",pred['spo_list'])\n",
    "        for e in pred['spo_list']:\n",
    "            if e in label['spo_list']:\n",
    "                right_num+=1\n",
    "#         for e in label['spo_list']:\n",
    "#             if not e in pred['spo_list']:\n",
    "#                 print(idx)\n",
    "#                 print(e)\n",
    "#                 print(label['text'])\n",
    "#                 for t in label['spo_list']:\n",
    "#                     if t['predicate']==e['predicate']:\n",
    "#                         print(t)\n",
    "#                 print(\"=================================\")\n",
    "#                 for t in pred['spo_list']:\n",
    "#                     if t['predicate']==e['predicate']:\n",
    "#                         print(t)\n",
    "         "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 140,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(278336, 401495, 348534)"
      ]
     },
     "execution_count": 140,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "right_num,pred_nums,rel_nums"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 141,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(0.6932489819300365, 0.7985906683422564, 0.7422006349087835)"
      ]
     },
     "execution_count": 141,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "precision=right_num/pred_nums\n",
    "recall=right_num/rel_nums\n",
    "f1=2*precision*recall/(precision+recall)\n",
    "precision,recall,f1"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### test"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 246,
   "metadata": {},
   "outputs": [],
   "source": [
    "#交叉ensemble 部分\n",
    "# preds=[]\n",
    "# logits=[]\n",
    "# rel_logits=[]\n",
    "# for idx in range(3):\n",
    "#     preds.append(np.load(\"./cross_results/test_pred_\"+str(idx)+\".npy\"))\n",
    "#     logits.append(np.load(\"./cross_results/test_logit_\"+str(idx)+\".npy\"))\n",
    "#     rel_logits.append(np.load(\"./cross_results/test_rel_\"+str(idx)+\".npy\"))\n",
    "# preds.append(np.load(\"./cross_results/test_pred_full.npy\"))\n",
    "# logits.append(np.load(\"./cross_results/test_logit_full.npy\"))\n",
    "# rel_logits.append(np.load(\"./cross_results/test_rel_full.npy\"))\n",
    "# preds=np.stack(preds,axis=2)\n",
    "\n",
    "# pred_token_pred=np.zeros((len(pred_examples),256))\n",
    "# for idx,pred in tqdm(enumerate(preds)):\n",
    "#     for j,token in enumerate(pred):\n",
    "#         token_pred=np.argmax(np.bincount(token))\n",
    "#         if token_pred==0 and (token>0).sum()>=2 and ((not (np.bincount(token)>1).any()) or (np.bincount(token)==2).sum()==2):\n",
    "#             if (not (np.bincount(token)>1).any()):\n",
    "#                 print(token)\n",
    "#                 token_pred=token[-1]\n",
    "#                 print(token_pred)\n",
    "#             else:\n",
    "#                 token_pred=token.sum()//2\n",
    "\n",
    "#         pred_token_pred[idx][j]=token_pred\n",
    "# pred_token_pred=pred_token_pred.astype(np.int)  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "metadata": {
    "code_folding": [],
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/huangweilin/anaconda3/envs/fjw/lib/python3.6/site-packages/ipykernel_launcher.py:3: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0\n",
      "Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`\n",
      "  This is separate from the ipykernel package so we can avoid doing imports until\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "33975615c29d4ef8a2a6d2f589613a1c",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=0, max=18646), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "text_preds=[np.zeros((len(test_text_data[pred_doc2doc[i]]['text']),len(id2labels)))for i in range(len(pred_doc_token_labels[:]))]\n",
    "text_logits=[np.zeros((len(test_text_data[pred_doc2doc[i]]['text']),len(id2labels)))for i in range(len(pred_doc_token_labels))]\n",
    "for idx in tqdm(range(len(ner_pred_dataset))):\n",
    "    text=ner_pred_dataset[idx][0].numpy()\n",
    "    attention_mask=ner_pred_dataset[idx][1].numpy()\n",
    "    token_labels=ner_pred_dataset[idx][4].numpy()\n",
    "    tok_to_orig_start_index,tok_to_orig_end_index,orig_to_tok_index=ner_pred_dataset[idx][-1]\n",
    "    token_pred=pred_token_pred[idx]\n",
    "    token_pred=token_pred[1:1+len(tok_to_orig_start_index)]\n",
    "    logits_pred=pred_token_logits[idx][1:1+len(tok_to_orig_start_index)]\n",
    "    for i in range(len(token_pred)):\n",
    "        begin,end=tok_to_orig_start_index[i],tok_to_orig_end_index[i]\n",
    "        text_logits[pred_example2doc[idx]][begin:end+1]=logits_pred[i]\n",
    "        if token_pred[i]!=0:\n",
    "            text_preds[pred_example2doc[idx]][begin:end+1,token_pred[i]]+=1\n",
    "            if token_pred[i]==2 or token_pred[i]==4:\n",
    "                B_index=-1\n",
    "                for k in range(0,-begin-1,-1):\n",
    "                    if text_preds[pred_example2doc[idx]][k+begin,token_pred[i]-1]!=0:\n",
    "                        B_index=k+begin\n",
    "                        break\n",
    "#                 if idx==489:\n",
    "#                     print(i,B_index,text_data[180321]['text'][B_index])\n",
    "                if B_index!=-1:\n",
    "                    for e in range(B_index+1,begin):\n",
    "                        if text_preds[pred_example2doc[idx]][e,token_pred[i]]==0:\n",
    "                            text_preds[pred_example2doc[idx]][e,token_pred[i]]+=1\n",
    "#                 else:\n",
    "#                     print(idx)\n",
    "#         if idx==489:\n",
    "#             print(text_preds[487])\n",
    "text_preds=[(e!=0).astype(np.int32).argmax(axis=-1) for e in text_preds]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "metadata": {
    "code_folding": [],
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/huangweilin/anaconda3/envs/fjw/lib/python3.6/site-packages/ipykernel_launcher.py:250: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0\n",
      "Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "b46dbb76849e4a8aaf23400a4ffb62d0",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=1, bar_style='info', max=1), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "\n",
    "doc2pred_ner_doc=[[] for i in range(len(test_text_data))]\n",
    "pred_info=[{} for i in range(len(test_text_data))]\n",
    "for idx,e in enumerate(pred_info):\n",
    "    e['text']=test_text_data[idx]['text']\n",
    "    e['spo_list']=[]\n",
    "for idx in range(len(pred_doc_rels)):\n",
    "    ind=pred_doc2doc[idx]\n",
    "    doc2pred_ner_doc[ind].append(idx)\n",
    "flag=False\n",
    "simple_thre=4\n",
    "complex_thre=4\n",
    "make_output(doc2pred_ner_doc,simple_thre,complex_thre,test_text_data,pred_info,pred_doc_rels)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/huangweilin/anaconda3/envs/fjw/lib/python3.6/site-packages/ipykernel_launcher.py:1: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0\n",
      "Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`\n",
      "  \"\"\"Entry point for launching an IPython kernel.\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "6975c39543384ad7b069b4853c40cd51",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=0, max=10468), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "for idx in tqdm(range(len(pred_info))):\n",
    "#     pred_info[idx]['cls_logits']=list(pred_cls_logits[idx])\n",
    "    for spo in pred_info[idx]['spo_logits']:\n",
    "        spo['object']=spo['object'].tolist()\n",
    "        spo['subject']=spo['subject'].tolist()\n",
    "    pred_info[idx]['plan_spo_list']=test_text_data[idx]['plan_spo_list']"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "##### 分类器筛选"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "import lightgbm as lgb\n",
    "lgb_clf=lgb.Booster(model_file=\"./model.txt\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_data=pred_info"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 49,
   "metadata": {
    "code_folding": []
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/huangweilin/anaconda3/envs/fjw/lib/python3.6/site-packages/ipykernel_launcher.py:5: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0\n",
      "Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`\n",
      "  \"\"\"\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "7c9b785cfd6b40b5918f7bc36fac4dbd",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=1, bar_style='info', max=1), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "train_X=[]\n",
    "train_Y=[]\n",
    "full_rel_nums=np.zeros((len(train_data)))\n",
    "rel_index=[]\n",
    "for i,e in tqdm(enumerate(train_data)):\n",
    "#     cls_logits=np.array(e['cls_logits'])\n",
    "    triple_num=len(e['spo_list'])\n",
    "    rels=set()\n",
    "    entities_num=0\n",
    "    for spo in e['spo_list']:\n",
    "        rels.add(spo['predicate'])\n",
    "        entities_num+=(1+len(spo['object']))\n",
    "    rel_num=len(rels)\n",
    "    for idx in range(len(e['spo_list'])):\n",
    "        spo=e['spo_list'][idx]\n",
    "        spo_logits=e['spo_logits'][idx]\n",
    "        sub_ob_logits=np.concatenate([np.array(spo_logits['subject'])[0],np.array(spo_logits['subject'])[-1],\\\n",
    "                              np.array(spo_logits['object'])[0],np.array(spo_logits['object'])[-1]])\n",
    "        predicate=spo['predicate'] if spo['predicate'] not in special_rels else spo['predicate']+'_@value'\n",
    "        sub=spo['subject']\n",
    "        sub_len=len(sub)\n",
    "        ob=spo['object']['@value']\n",
    "        ob_len=len(ob)\n",
    "        show_time=0\n",
    "        for plan_spo in e['plan_spo_list']:\n",
    "            if plan_spo['predicate']==predicate and plan_spo['subject']==sub and plan_spo['object']==ob:\n",
    "                show_time+=1\n",
    "                break\n",
    "        feature=np.concatenate([sub_ob_logits,np.array([show_time,rel_num,entities_num,triple_num]),np.array([sub_len,ob_len])])\n",
    "        label=1\n",
    "        train_X.append(feature)\n",
    "        train_Y.append(label)\n",
    "        rel_index.append([i,spo])\n",
    "train_X=np.array(train_X)\n",
    "train_Y=np.array(train_Y)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 50,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(23083, 42)"
      ]
     },
     "execution_count": 50,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_X.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 51,
   "metadata": {},
   "outputs": [],
   "source": [
    "val_pre=lgb_clf.predict(train_X[:])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 52,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(22326, 23083)"
      ]
     },
     "execution_count": 52,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "((val_pre[:]>0.1)==1).sum(),(train_Y[:]==1).sum() #P"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 53,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/huangweilin/anaconda3/envs/fjw/lib/python3.6/site-packages/ipykernel_launcher.py:2: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0\n",
      "Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`\n",
      "  \n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "df3842926ddd4c01a48e714eba153f08",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=0, max=23083), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "num=0\n",
    "for idx in tqdm(range(len(val_pre))):\n",
    "    if val_pre[idx]<=0.1:\n",
    "        text_idx=rel_index[idx][0]\n",
    "        spo=rel_index[idx][1]\n",
    "        pred_info[text_idx]['spo_list'].remove(spo)\n",
    "#         print(spo)\n",
    "#         print(pred_info[text_idx]['text'])\n",
    "#         num+=1\n",
    "#         if num>=20:\n",
    "#             break\n",
    "#         print(\"=\"*10)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 55,
   "metadata": {},
   "outputs": [],
   "source": [
    "for idx in range(len(pred_info)):\n",
    "    pred_info[idx].pop('spo_logits')\n",
    "#     pred_info[idx].pop('cls_logits')\n",
    "    pred_info[idx].pop('plan_spo_list')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### test后处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 56,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "22326"
      ]
     },
     "execution_count": 56,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "nums=0\n",
    "for e in pred_info:\n",
    "    nums+=len(e['spo_list'])\n",
    "nums"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 57,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open(\"./predict_res.json\",\"w\") as w:\n",
    "    for e in pred_info:\n",
    "        w.write(json.dumps(e, ensure_ascii=False)+'\\n')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 81,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "194917"
      ]
     },
     "execution_count": 81,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pred_data=[]\n",
    "pred_num=0\n",
    "with open(\"./frzhu/test2_res.json\",\"r\") as r:\n",
    "    raw_data=r.readlines()\n",
    "    for d in raw_data:\n",
    "        pred_data.append(json.loads(d))\n",
    "        pred_num+=len(pred_data[-1]['spo_list'])\n",
    "pred_num"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "heading_collapsed": true
   },
   "source": [
    "##### 知识库匹配"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "# similarity_matrix=np.zeros((len(id2rels),len(id2rels)))\n",
    "# for i in range(len(id2rels)):\n",
    "#     for j in range(len(id2rels)):\n",
    "#         similarity_matrix[i][j]=wv_from_text.similarity(id2rels[i].split(\"_\")[0],id2rels[j].split(\"_\")[0])\n",
    "# df=pd.DataFrame(similarity_matrix,index=id2rels,columns=id2rels)\n",
    "# infos=[]\n",
    "# for idx,col in enumerate(df.columns):\n",
    "#     temp=df.iloc[:,idx:idx+1].sort_values(by=col,ascending=False)\n",
    "#     new_temp=temp[temp[col]<1-1e-3]\n",
    "#     infos.append(new_temp)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 78,
   "metadata": {
    "code_folding": [
     6
    ],
    "hidden": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/huangweilin/anaconda3/envs/fjw/lib/python3.6/site-packages/ipykernel_launcher.py:7: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0\n",
      "Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`\n",
      "  import sys\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "118b721f266c4333b13cd65fd90feafd",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=1, bar_style='info', max=1), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "id2pair_pre='编剧 导演 主演 作词 作曲 制片人 父亲 母亲 妻子 丈夫'.split(\" \")\n",
    "pair_pre_list=[['编剧','导演'],['编剧','导演','主演','制片人'],['导演','主演'],['作词', '作曲'],['作词', '作曲'],['导演','制片人'],\\\n",
    "              ['父亲','母亲'],['父亲','母亲'],['妻子','丈夫'],['妻子','丈夫']]\n",
    "pair_pre2id=dict([(e,idx) for idx,e in enumerate(id2pair_pre)])\n",
    "subs_predicate=dict()\n",
    "obs_predicate=dict()\n",
    "for idx,e in tqdm(enumerate(text_data)):\n",
    "    for spo in e['spo_list']:\n",
    "        pred_list=subs_predicate.get(spo['subject'],dict([(e,0) for idx,e in enumerate(id2pair_pre)]))\n",
    "        if spo['predicate'] in pred_list:\n",
    "            pred_list[spo['predicate']]+=1\n",
    "            subs_predicate[spo['subject']]=pred_list\n",
    "        pred_list=obs_predicate.get(spo['object']['@value'],dict([(e,0) for idx,e in enumerate(id2pair_pre)]))\n",
    "        if spo['predicate'] in pred_list:\n",
    "            pred_list[spo['predicate']]+=1\n",
    "            obs_predicate[spo['object']['@value']]=pred_list"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 60,
   "metadata": {
    "code_folding": [
     0
    ],
    "hidden": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loading...\n"
     ]
    }
   ],
   "source": [
    "if not os.path.exists(\"./dataset/test1_data/new_test1_data.json\"):\n",
    "    spo_corpus=[]\n",
    "    for idx,e in tqdm(enumerate(text_data)):\n",
    "        for spo in e['spo_list']:\n",
    "            if spo['predicate'] in special_rels:\n",
    "                for key,value in spo['object'].items():\n",
    "                    rel=spo['predicate']+\"_\"+key\n",
    "                    ob=value\n",
    "                    sub=spo['subject']\n",
    "                    sample={'predicate':rel,'object':ob,'subject':sub}\n",
    "                    spo_corpus.append(sample)\n",
    "            else:\n",
    "                for key,value in spo['object'].items():\n",
    "                    rel=spo['predicate']\n",
    "                    ob=value\n",
    "                    sub=spo['subject']\n",
    "                    sample={'predicate':rel,'object':ob,'subject':sub}\n",
    "                    spo_corpus.append(sample)\n",
    "    for idx,e in tqdm(enumerate(test_text_data)):\n",
    "        self_corpus=[]\n",
    "        for spo in spo_corpus:\n",
    "            if spo['subject'] in e['text']  and spo['object'] in e['text'] :\n",
    "                if (spo['subject']!='' or spo['object']!='') and spo not in self_corpus:\n",
    "                    self_corpus.append(spo)\n",
    "        e['plan_spo_list']=e.get('plan_spo_list',[])+self_corpus\n",
    "    with open(\"./dataset/test1_data/new_test1_data.json\",\"w\") as w:\n",
    "        for e in test_text_data:\n",
    "            w.write(json.dumps(e, ensure_ascii=False)+'\\n')\n",
    "else:\n",
    "    print(\"loading...\")\n",
    "    new_test_text_data=[]\n",
    "    with open(\"./dataset/test1_data/new_test1_data.json\",\"r\") as r:\n",
    "        raw_data=r.readlines()\n",
    "        for d in raw_data:\n",
    "            new_test_text_data.append(json.loads(d))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 79,
   "metadata": {
    "code_folding": [
     0
    ],
    "hidden": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loading...\n"
     ]
    }
   ],
   "source": [
    "if not os.path.exists(\"./dataset/test2_data/new_test2_data.json\"):\n",
    "    spo_corpus=[]\n",
    "    for idx,e in tqdm(enumerate(text_data)):\n",
    "        for spo in e['spo_list']:\n",
    "            if spo['predicate'] in special_rels:\n",
    "                for key,value in spo['object'].items():\n",
    "                    rel=spo['predicate']+\"_\"+key\n",
    "                    ob=value\n",
    "                    sub=spo['subject']\n",
    "                    sample={'predicate':rel,'object':ob,'subject':sub}\n",
    "                    spo_corpus.append(sample)\n",
    "            else:\n",
    "                for key,value in spo['object'].items():\n",
    "                    rel=spo['predicate']\n",
    "                    ob=value\n",
    "                    sub=spo['subject']\n",
    "                    sample={'predicate':rel,'object':ob,'subject':sub}\n",
    "                    spo_corpus.append(sample)\n",
    "    for idx,e in tqdm(enumerate(test2_text_data)):\n",
    "        self_corpus=[]\n",
    "        for spo in spo_corpus:\n",
    "            if spo['subject'] in e['text']  and spo['object'] in e['text'] :\n",
    "                if (spo['subject']!='' or spo['object']!='') and spo not in self_corpus:\n",
    "                    self_corpus.append(spo)\n",
    "        e['plan_spo_list']=e.get('plan_spo_list',[])+self_corpus\n",
    "    with open(\"./dataset/test2_data/new_test2_data.json\",\"w\") as w:\n",
    "        for e in test2_text_data:\n",
    "            w.write(json.dumps(e, ensure_ascii=False)+'\\n')\n",
    "else:\n",
    "    print(\"loading...\")\n",
    "    new_test2_text_data=[]\n",
    "    with open(\"./dataset/test2_data/new_test2_data.json\",\"r\") as r:\n",
    "        raw_data=r.readlines()\n",
    "        for d in raw_data:\n",
    "            new_test2_text_data.append(json.loads(d))\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "hidden": true
   },
   "source": [
    "###### 知识库长词替换及空缺填充"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 82,
   "metadata": {
    "code_folding": [
     4
    ],
    "hidden": true,
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/huangweilin/anaconda3/envs/fjw/lib/python3.6/site-packages/ipykernel_launcher.py:5: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0\n",
      "Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`\n",
      "  \"\"\"\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "41cf8f3a5d8d4cea8d87620bf348c828",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=0, max=101311), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "def cross_contain(a,b):\n",
    "    if a.find(b)!=-1 or b.find(a)!=-1:\n",
    "        return True\n",
    "    return False\n",
    "for idx in tqdm(range(len(pred_data))):\n",
    "    for spo in new_test2_text_data[idx]['plan_spo_list']:\n",
    "            if spo['predicate'].split(\"_\")[0] not in special_rels:\n",
    "                        ob_type='@value'\n",
    "            else:\n",
    "                        ob_type=spo['predicate'].split(\"_\")[1]\n",
    "            rel_dict={'object':{ob_type:spo['object']},'subject':spo['subject']}\n",
    "            rel_dict.update(id2schema[rels2id[spo['predicate']]])\n",
    "            if rel_dict not in pred_data[idx]['spo_list']:\n",
    "                rel=rel_dict['predicate']\n",
    "\n",
    "                flag=True\n",
    "                for t in pred_data[idx]['spo_list']:\n",
    "                    if ob_type not in t['object']:\n",
    "                        continue\n",
    "                    if t['predicate']==rel and rel in ['丈夫','妻子'] and t['subject']==rel_dict['object']['@value']\\\n",
    "                                and t['object']['@value']==rel_dict['subject']:\n",
    "                                t['subject']=rel_dict['object']['@value']\n",
    "                                t['object']['@value']=rel_dict['subject']\n",
    "                    if t['predicate']==rel and cross_contain(rel_dict['subject'],t['subject']) \\\n",
    "                    and cross_contain(rel_dict['object'][ob_type],t['object'][ob_type]):\n",
    "                            flag=False\n",
    "                            if len(rel_dict['subject'])+len(rel_dict['object'][ob_type])>len(t['subject'])+len(t['object'][ob_type]):\n",
    "                                    t['subject']=rel_dict['subject']\n",
    "                                    t['object'][ob_type]=rel_dict['object'][ob_type]\n",
    "                            break\n",
    "                if flag:\n",
    "                    # \n",
    "                    #'董事长',,'歌手',\n",
    "                    if rel in ['父亲','母亲','校长','作者','董事长','主持人','歌手','主演']\\\n",
    "                    and  rel_dict not in pred_data[idx]['spo_list'] \\\n",
    "                    and len(rel_dict['subject'])!=1 and len(rel_dict['object']['@value'])!=1:\n",
    "#                         print(rel_dict)\n",
    "#                         print(pred_data[idx])\n",
    "#                         print(\"======================\")\n",
    "                            pred_data[idx]['spo_list'].append(copy.deepcopy(rel_dict))\n",
    "#     if len(pred_data[idx]['spo_list'])==0:\n",
    "#         for spo in new_test2_text_data[idx]['plan_spo_list']:\n",
    "#                 if spo['predicate'].split(\"_\")[0] not in special_rels:\n",
    "#                             ob_type='@value'\n",
    "#                             rel_dict={'object':{ob_type:spo['object']},'subject':spo['subject']}\n",
    "#                             rel_dict.update(id2schema[rels2id[spo['predicate']]])\n",
    "#                             if ob_type=='@value':\n",
    "#                                 pred_data[idx]['spo_list'].append(rel_dict)\n",
    "#                 else:\n",
    "#                     ob_type=spo['predicate'].split(\"_\")[1]\n",
    "#                     if ob_type=='@value':\n",
    "#                         rel_dict={'object':{ob_type:spo['object']},'subject':spo['subject']}\n",
    "#                         rel_dict.update(id2schema[rels2id[spo['predicate']]])\n",
    "#                         print(rel_dict)\n",
    "#                         flag=True\n",
    "#                         for e in new_test2_text_data[idx]['plan_spo_list']:\n",
    "#                             if e['predicate'].split(\"_\")[0] in special_rels:\n",
    "#                                 e_ob_type=e['predicate'].split(\"_\")[1]\n",
    "#                                 if e['predicate'].split(\"_\")[0]==spo['predicate'].split(\"_\")[0] and e_ob_type!='@value':\n",
    "#                                     print(\"affiliate\",e)\n",
    "#                                     flag=False\n",
    "#                                     rel_dict['object'].update({e_ob_type:e['object']})\n",
    "#                                     rel_dict['object_type'].update(copy.deepcopy(id2schema[rels2id[e['predicate']]]['object_type']))\n",
    "#                                     print(rel_dict)\n",
    "#                                     pred_data[idx]['spo_list'].append(copy.deepcopy(rel_dict))\n",
    "#                         if flag:\n",
    "#                             print(rel_dict)\n",
    "#                             pred_data[idx]['spo_list'].append(copy.deepcopy(rel_dict))\n",
    "#         print(\"==================\")\n",
    "                        \n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "code_folding": [],
    "hidden": true
   },
   "outputs": [],
   "source": [
    "for idx in range(len(pred_data)):\n",
    "    remove_spo=[]\n",
    "    for j,t in enumerate(pred_data[idx]['spo_list']):\n",
    "        for k,spo in enumerate(pred_data[idx]['spo_list']):\n",
    "            if k==j:\n",
    "                continue\n",
    "            if t['predicate'] not in special_rels and  t['predicate']==spo['predicate'] and cross_contain(spo['subject'],t['subject']) \\\n",
    "            and cross_contain(spo['object']['@value'],t['object']['@value']):\n",
    "                    if len(spo['subject'])+len(spo['object']['@value'])>len(t['subject'])+len(t['object']['@value']):\n",
    "                                        remove_spo.append(t)\n",
    "                    else:\n",
    "                        remove_spo.append(spo)\n",
    "    if len(remove_spo)>0:\n",
    "        print(pred_data[idx]['spo_list'])\n",
    "        for e in remove_spo:\n",
    "            print(e)\n",
    "            if e in pred_data[idx]['spo_list']:\n",
    "                pred_data[idx]['spo_list'].remove(e)\n",
    "        print(\"after\")\n",
    "        print(pred_data[idx]['spo_list'])\n",
    "\n",
    "        print(\"===========================\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "hidden": true
   },
   "source": [
    "###### 一致性检验"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "code_folding": [],
    "hidden": true,
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "for idx in tqdm(range(len(pred_data))):\n",
    "    removing_spo=[]\n",
    "    for spo in pred_data[idx]['spo_list']:\n",
    "        try:\n",
    "            pred_list=obs_predicate.get(spo['object']['@value'],dict())\n",
    "        except:\n",
    "            print(spo)\n",
    "            pred_data[idx]['spo_list'].remove(spo)\n",
    "            continue\n",
    "        if len(pred_list)>0 and spo['predicate']  in pred_list:\n",
    "            if pred_list[spo['predicate']]==0:\n",
    "                num=0\n",
    "                for e in pair_pre_list[pair_pre2id[spo['predicate']]]:\n",
    "                    if e!=spo['predicate']:\n",
    "                        num+=pred_list[e]\n",
    "                if (spo['predicate'] in ['编剧','导演','主演','作词','作曲','制片人'] and num>13) or (spo['predicate'] in ['父亲','母亲','妻子','丈夫'] and num>0):\n",
    "                      print(idx)\n",
    "                      print(spo)\n",
    "                      print(pred_list)\n",
    "                      print(pred_data[idx])\n",
    "                      removing_spo.append(spo)\n",
    "                      print(pred_data[idx])\n",
    "    for spo in removing_spo:\n",
    "        pred_data[idx]['spo_list'].remove(spo)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "code_folding": [],
    "hidden": true,
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "for idx,e in enumerate(pred_data):\n",
    "    for spo in e['spo_list']:\n",
    "        while spo['subject'][0]=='《' and ( spo['subject'][-1]=='》' or  spo['subject'].find(\"》\")==-1):\n",
    "            print(\"sub\",spo['subject'])\n",
    "            spo['subject']=spo['subject'].replace(\"《\",\"\").replace(\"》\",\"\")\n",
    "            print(\"sub\",spo['subject'])         \n",
    "            break\n",
    "        poped_keys=[]\n",
    "        for key,value in spo['object'].items():\n",
    "            if len(value)==0:\n",
    "                print(spo)\n",
    "                poped_keys.append(key)\n",
    "            if len(poped_keys)>0:\n",
    "                for t in poped_keys:\n",
    "                    print(t)\n",
    "                    spo['object'].pop(t)\n",
    "                    spo['object_type'].pop(t)\n",
    "                print(idx,pred_data[idx])\n",
    "                break\n",
    "            while value[0]=='《' and (value[-1]=='》' or value.find(\"》\")==-1):\n",
    "                print(\"ob\",key,value)\n",
    "                spo['object'][key]=value.replace(\"《\",\"\").replace(\"》\",\"\")\n",
    "                print(\"ob\",spo['object'][key])        \n",
    "                break"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 86,
   "metadata": {
    "hidden": true
   },
   "outputs": [],
   "source": [
    "with open(\"./frzhu/test2_res kg.json\",\"w\") as w:\n",
    "    for e in pred_data:\n",
    "        w.write(json.dumps(e, ensure_ascii=False)+'\\n')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 对比"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 104,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "309022"
      ]
     },
     "execution_count": 104,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pred_data_1=[]\n",
    "pred_num_1=0\n",
    "with open(\"./test2 predict_res kg.json\",\"r\") as r:\n",
    "    raw_data=r.readlines()\n",
    "    for d in raw_data:\n",
    "        pred_data_1.append(json.loads(d))\n",
    "        pred_num_1+=len(pred_data_1[-1]['spo_list'])\n",
    "pred_num_1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 101,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "226369"
      ]
     },
     "execution_count": 101,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pred_data_2=[]\n",
    "pred_num_2=0\n",
    "with open(\"./frzhu/test2_res kg.json\",\"r\") as r:\n",
    "    raw_data=r.readlines()\n",
    "    for d in raw_data:\n",
    "        pred_data_2.append(json.loads(d))\n",
    "        pred_num_2+=len(pred_data_2[-1]['spo_list'])\n",
    "pred_num_2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 94,
   "metadata": {},
   "outputs": [],
   "source": [
    "def cross_contain(a,b):\n",
    "    if a.find(b)!=-1 or b.find(a)!=-1:\n",
    "        return True\n",
    "    return False\n",
    "for idx in range(len(pred_data_1)):\n",
    "    for spo in pred_data_2[idx]['spo_list']:\n",
    "        flag=True\n",
    "        for temp in pred_data_1[idx]['spo_list']:\n",
    "            if cross_contain(temp['subject'],spo['subject']) or cross_contain(temp['object']['@value'],spo['object']['@value'])\\\n",
    "              or len(spo['subject'])>10 or len(spo['object']['@value'])>10:\n",
    "                flag=False\n",
    "                break\n",
    "        if flag:\n",
    "            pred_data_1[idx]['spo_list'].append(spo)\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 95,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open(\"./test2 predict_res kg combine.json\",\"w\") as w:\n",
    "    for e in pred_data_1:\n",
    "        w.write(json.dumps(e, ensure_ascii=False)+'\\n')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 102,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "310078"
      ]
     },
     "execution_count": 102,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pred_data_2=[]\n",
    "pred_num_2=0\n",
    "with open(\"./test2 predict_res kg combine.json\",\"r\") as r:\n",
    "    raw_data=r.readlines()\n",
    "    for d in raw_data:\n",
    "        pred_data_2.append(json.loads(d))\n",
    "        pred_num_2+=len(pred_data_2[-1]['spo_list'])\n",
    "pred_num_2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "for idx in range(len(pred_data_1)):\n",
    "    for spo in pred_data_2[idx]['spo_list']:\n",
    "        if spo['predicate']  in special_rels:\n",
    "            print(spo)\n",
    "#     if not len(pred_data_1[idx]['spo_list'])==len(pred_data_2[idx]['spo_list']):\n",
    "#         print(idx)\n",
    "#         print(pred_data_1[idx]['text'])\n",
    "#         print(pred_data_1[idx]['spo_list'])\n",
    "#         print(\"===========================\")\n",
    "#         print(pred_data_2[idx]['spo_list'])\n",
    "#         print(\"-------------------------------\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "fjw",
   "language": "python",
   "name": "fjw"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.9"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
