{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "code_folding": [
     50,
     55
    ]
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "unable to import 'smart_open.gcs', disabling that module\n"
     ]
    }
   ],
   "source": [
    "import torchtext\n",
    "import gensim\n",
    "from gensim.test.utils import datapath, get_tmpfile\n",
    "from gensim.models import KeyedVectors\n",
    "from gensim.scripts.glove2word2vec import glove2word2vec\n",
    "from tqdm import tqdm_notebook as tqdm\n",
    "import json\n",
    "import torch\n",
    "from transformers import *\n",
    "from transformers import AdamW\n",
    "import torch.utils.data as Data\n",
    "import collections\n",
    "import os\n",
    "import random\n",
    "import tarfile\n",
    "import torch\n",
    "from torch import nn\n",
    "import torchtext.vocab as Vocab\n",
    "import pickle as pk\n",
    "import copy\n",
    "import time\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "import random\n",
    "import torch.nn.functional as F\n",
    "from IPython.display import display,HTML\n",
    "import os\n",
    "from torch.nn.utils.rnn import pad_sequence\n",
    "from torch.nn.utils.rnn import pack_padded_sequence\n",
    "from torch.nn.utils.rnn import pad_packed_sequence\n",
    "from torch.nn.utils.rnn import pack_sequence\n",
    "from torch.nn import CrossEntropyLoss, MSELoss\n",
    "from torchcrf import CRF\n",
    "from sklearn import metrics\n",
    "import joblib\n",
    "import math\n",
    "device=torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
    "import argparse\n",
    "import glob\n",
    "import json\n",
    "import logging\n",
    "import unicodedata\n",
    "from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset\n",
    "from torch.utils.data.distributed import DistributedSampler\n",
    "from tqdm import tqdm_notebook as tqdm\n",
    "import torch.utils.data as Data\n",
    "import jieba\n",
    "import jieba.posseg as pseg\n",
    "import numpy as np\n",
    "logger = logging.getLogger(__name__)\n",
    "logging.basicConfig(\n",
    "    format=\"%(asctime)s - %(levelname)s - %(name)s -   %(message)s\",\n",
    "    datefmt=\"%m/%d/%Y %H:%M:%S\",\n",
    "    level=logging.INFO \n",
    ")\n",
    "ARG=collections.namedtuple('ARG',['train_batch_size',\n",
    " 'eval_batch_size',\n",
    " 'weight_decay',\n",
    " 'learning_rate',\n",
    " 'adam_epsilon',\n",
    " 'num_train_epochs',\n",
    " 'warmup_steps',\n",
    " 'gradient_accumulation_steps',\n",
    " 'save_steps',\n",
    " 'max_grad_norm',\n",
    " 'model_name_or_path',\n",
    " 'output_dir',\n",
    " 'seed',\n",
    " 'device',\n",
    " 'n_gpu',\n",
    " 'max_steps',\n",
    " 'output_mode',\n",
    "'fp16_opt_level',\n",
    "'fp16',\n",
    "'card_list'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "04/29/2020 23:33:43 - INFO - transformers.tokenization_utils -   Model name './roberta-zh-wwm-pytorch/vocab.txt' not found in model shortcut name list (bert-base-uncased, bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, bert-base-multilingual-cased, bert-base-chinese, bert-base-german-cased, bert-large-uncased-whole-word-masking, bert-large-cased-whole-word-masking, bert-large-uncased-whole-word-masking-finetuned-squad, bert-large-cased-whole-word-masking-finetuned-squad, bert-base-cased-finetuned-mrpc, bert-base-german-dbmdz-cased, bert-base-german-dbmdz-uncased, bert-base-finnish-cased-v1, bert-base-finnish-uncased-v1, bert-base-dutch-cased). Assuming './roberta-zh-wwm-pytorch/vocab.txt' is a path, a model identifier, or url to a directory containing tokenizer files.\n",
      "04/29/2020 23:33:43 - WARNING - transformers.tokenization_utils -   Calling BertTokenizer.from_pretrained() with the path to a single file or url is deprecated\n",
      "04/29/2020 23:33:43 - INFO - transformers.tokenization_utils -   loading file ./roberta-zh-wwm-pytorch/vocab.txt\n"
     ]
    }
   ],
   "source": [
    "do_lower_case=True\n",
    "max_len=256\n",
    "device=torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
    "# bert_dir=\"./bert-pytorch-chinese/\"\n",
    "# vocab=\"bert-base-chinese-vocab.txt\"\n",
    "# config_file=\"bert_config.json\"\n",
    "bert_dir=\"./roberta-zh-wwm-pytorch/\"\n",
    "vocab=\"vocab.txt\"\n",
    "config_file=\"bert_config.json\"\n",
    "tokenizer=BertTokenizer.from_pretrained(os.path.join(bert_dir,vocab),do_lower_case=do_lower_case)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "code_folding": [
     0,
     16,
     30,
     46,
     56,
     79,
     95,
     121,
     125,
     130,
     136,
     138,
     249,
     298,
     325,
     351,
     381,
     412
    ]
   },
   "outputs": [],
   "source": [
    "def convert_text_to_ids(tokenizer, text, max_len=100):\n",
    "    if isinstance(text,str):\n",
    "        output=tokenizer.encode_plus(text,max_length=max_len,pad_to_max_length=True,return_tensors=\"pt\")\n",
    "        input_ids=output[\"input_ids\"].squeeze(0)\n",
    "        token_type_ids=output[\"token_type_ids\"].squeeze(0)\n",
    "        attention_mask=output[\"attention_mask\"].squeeze(0)\n",
    "    elif isinstance(text,list):\n",
    "        input_ids,token_type_ids,attention_mask=[],[],[]\n",
    "        for e in text:\n",
    "            output=tokenizer.encode_plus(e,max_length=max_len,pad_to_max_length=True,return_tensors=\"pt\")\n",
    "            input_ids.append(output[\"input_ids\"].squeeze(0))\n",
    "            token_type_ids.append(output[\"token_type_ids\"].squeeze(0))\n",
    "            attention_mask.append(output[\"attention_mask\"].squeeze(0))\n",
    "    else:\n",
    "        raise Exception('type error')\n",
    "    return torch.stack(input_ids).long(),torch.stack(token_type_ids).long(),torch.stack(attention_mask).long()        \n",
    "class RelDataset(Data.Dataset):\n",
    "    def __init__(self,examples):\n",
    "        self.input_ids=torch.stack([e['input_ids'] for e in examples]).long()\n",
    "        self.token_type_ids=torch.stack([e['token_type_ids'] for e in examples]).long()\n",
    "        self.attention_mask=torch.stack([e['attention_mask'] for e in examples]).long()\n",
    "        self.rel_label=torch.stack([e['rel_label'] for e in examples]).long()\n",
    "        self.postag=torch.stack([e['postag'] for e in examples]).long()\n",
    "        self.feature=torch.stack([e['feature'] for e in examples]).float()\n",
    "        self.token2docs=[e[\"token2doc\"] for e in examples]\n",
    "    def __len__(self):\n",
    "        return self.input_ids.shape[0]\n",
    "    def __getitem__(self,idx):\n",
    "        return self.input_ids[idx],self.attention_mask[idx],self.token_type_ids[idx],\\\n",
    "               self.rel_label[idx],self.postag[idx],self.feature[idx],self.token2docs[idx]  \n",
    "class NerDataset(Data.Dataset):\n",
    "    def __init__(self,examples):\n",
    "        self.input_ids=torch.stack([e['input_ids'] for e in examples]).long()\n",
    "        self.token_type_ids=torch.stack([e['token_type_ids'] for e in examples]).long()\n",
    "        self.attention_mask=torch.stack([e['attention_mask'] for e in examples]).long()\n",
    "        self.rel_label=torch.stack([e['rel_label'] for e in examples]).long()\n",
    "        self.labels=torch.stack([e['labels'] for e in examples]).long()\n",
    "        self.postag=torch.stack([e['postag'] for e in examples]).long()\n",
    "        self.feature=torch.stack([e['feature'] for e in examples]).float()\n",
    "        self.token2docs=[e[\"token2doc\"] for e in examples]\n",
    "    def __len__(self):\n",
    "        return self.input_ids.shape[0]\n",
    "    def __getitem__(self,idx):\n",
    "        return self.input_ids[idx],self.attention_mask[idx],self.token_type_ids[idx],\\\n",
    "               self.rel_label[idx],self.labels[idx],self.postag[idx],self.feature[idx],self.token2docs[idx]  \n",
    "import unicodedata\n",
    "def _is_whitespace(char):\n",
    "    \"\"\"Checks whether `chars` is a whitespace character.\"\"\"\n",
    "    # \\t, \\n, and \\r are technically contorl characters but we treat them\n",
    "    # as whitespace since they are generally considered as such.\n",
    "    if char == \" \" or char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n",
    "        return True\n",
    "    cat = unicodedata.category(char)\n",
    "    if cat == \"Zs\":\n",
    "        return True\n",
    "    return False\n",
    "def is_chinese_char(cp):\n",
    "    \"\"\"Checks whether CP is the codepoint of a CJK character.\"\"\"\n",
    "    # This defines a \"chinese character\" as anything in the CJK Unicode block:\n",
    "    #   https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)\n",
    "    #\n",
    "    # Note that the CJK Unicode block is NOT all Japanese and Korean characters,\n",
    "    # despite its name. The modern Korean Hangul alphabet is a different block,\n",
    "    # as is Japanese Hiragana and Katakana. Those alphabets are used to write\n",
    "    # space-separated words, so they are not treated specially and handled\n",
    "    # like the all of the other languages.\n",
    "    if (\n",
    "        (cp >= 0x4E00 and cp <= 0x9FFF)\n",
    "        or (cp >= 0x3400 and cp <= 0x4DBF)  #\n",
    "        or (cp >= 0x20000 and cp <= 0x2A6DF)  #\n",
    "        or (cp >= 0x2A700 and cp <= 0x2B73F)  #\n",
    "        or (cp >= 0x2B740 and cp <= 0x2B81F)  #\n",
    "        or (cp >= 0x2B820 and cp <= 0x2CEAF)  #\n",
    "        or (cp >= 0xF900 and cp <= 0xFAFF)\n",
    "        or (cp >= 0x2F800 and cp <= 0x2FA1F)  #\n",
    "    ):  #\n",
    "        return True\n",
    "\n",
    "    return False\n",
    "def is_punctuation(char):\n",
    "    \"\"\"Checks whether `chars` is a punctuation character.\"\"\"\n",
    "    cp = ord(char)\n",
    "    # We treat all non-letter/number ASCII as punctuation.\n",
    "    # Characters such as \"^\", \"$\", and \"`\" are not in the Unicode\n",
    "    # Punctuation class but we treat them as punctuation anyways, for\n",
    "    # consistency.\n",
    "    if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):\n",
    "        return True\n",
    "    cat = unicodedata.category(char)\n",
    "    if cat.startswith(\"P\"):\n",
    "        return True\n",
    "    return False\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "from sklearn.decomposition import TruncatedSVD\n",
    "\n",
    "def build_tfidf_svd_matrix(texts,n_output,tfidf_vec=None,svd=None):\n",
    "    corpus=[]\n",
    "    for text in tqdm(texts):\n",
    "#         print(text)\n",
    "        words=word_segment(str(text['text']))\n",
    "#         print(words)\n",
    "        use_words=[]\n",
    "        for word in words:\n",
    "            if word in stop_words:\n",
    "                continue\n",
    "            use_words.append(word)\n",
    "#         print(use_words)\n",
    "        corpus.append(\" \".join(use_words))\n",
    "    print(len(corpus))\n",
    "    print(corpus[0])\n",
    "    if tfidf_vec is None:\n",
    "        tfidf_vec=TfidfVectorizer()\n",
    "        tfidf_matrix=tfidf_vec.fit_transform(corpus)\n",
    "    else:\n",
    "        tfidf_matrix=tfidf_vec.transform(corpus)\n",
    "    if svd is None:\n",
    "        svd=TruncatedSVD(n_components=n_output,n_iter=7,random_state=42)\n",
    "        tf_idf_svd=svd.fit_transform(tfidf_matrix)\n",
    "    else:\n",
    "        tf_idf_svd=svd.transform(tfidf_matrix)\n",
    "    return tf_idf_svd,tfidf_vec,svd\n",
    "def word_segment(sentence):\n",
    "    words=jieba.cut(sentence)\n",
    "    return \",\".join(words).split(\",\")\n",
    "stop_words=set()\n",
    "def load_stopwords():\n",
    "    with open(\"./middle_data/stopwords.txt\",\"r\",encoding=\"UTF-8\") as r:\n",
    "        for line in r.readlines():\n",
    "            stop_words.add(line.strip())\n",
    "load_stopwords()\n",
    "def remove_stopwords(word_list):\n",
    "    res=[]\n",
    "    for word in word_lists:\n",
    "        if word not in stop_words:\n",
    "            res.append(word)\n",
    "    return ' '.join(res)\n",
    "def clean_text(string):\n",
    "    return string.replace(' ', '').replace('\\n', '').replace('\\u3000', '')\n",
    "def _convert_example_to_record(example,\n",
    "                               tokenizer):\n",
    "    if example.__contains__('spo_list'):\n",
    "        spo_list = example['spo_list']\n",
    "    else:\n",
    "        spo_list = []\n",
    "    text_raw = example['text']\n",
    "    sub_text = []\n",
    "    buff = \"\"\n",
    "    for char in text_raw:\n",
    "        if is_chinese_char(ord(char)) or is_punctuation(char):\n",
    "            if buff != \"\":\n",
    "                sub_text.append(buff)\n",
    "                buff = \"\"\n",
    "            sub_text.append(char)\n",
    "        else:\n",
    "            buff += char\n",
    "    if buff != \"\":\n",
    "        sub_text.append(buff)\n",
    "    tok_to_orig_start_index = []\n",
    "    tok_to_orig_end_index = []\n",
    "    orig_to_tok_index = []\n",
    "    tokens = []\n",
    "    text_tmp = ''\n",
    "    for (i, token) in enumerate(sub_text):\n",
    "        orig_to_tok_index.append(len(tokens))\n",
    "        sub_tokens = tokenizer.tokenize(token)\n",
    "        text_tmp += token\n",
    "        for sub_token in sub_tokens:\n",
    "            tok_to_orig_start_index.append(len(text_tmp) - len(token))\n",
    "            tok_to_orig_end_index.append(len(text_tmp) - 1)\n",
    "            tokens.append(sub_token)\n",
    "        else:\n",
    "            continue\n",
    "        break\n",
    "    #  find all entities and tag them with corresponding \"B\"/\"I\" labels\n",
    "    labels_list=[[0\n",
    "              for i in range(len(tokens))] for i in range(len(id2rels))]\n",
    "    rel_labels=[]\n",
    "    for spo in spo_list:\n",
    "        for spo_object in spo['object'].keys():\n",
    "            if not spo['predicate'] in special_rels:\n",
    "                rel_label=rels2id[spo[\"predicate\"]]\n",
    "            else:\n",
    "                rel_label=rels2id[spo[\"predicate\"]+\"_\"+spo_object] \n",
    "            labels = labels_list[rel_label] #复杂类的不同part还是不会被归到一个，以后再讲\n",
    "            label_subject = label2ids['B-SUB']\n",
    "            label_object = label2ids['B-OBJ']\n",
    "            subject_sub_tokens = tokenizer.tokenize(spo['subject'])\n",
    "            object_sub_tokens = tokenizer.tokenize(spo['object'][\n",
    "                spo_object])\n",
    "            forbidden_index = None\n",
    "            if len(subject_sub_tokens) > len(object_sub_tokens):\n",
    "                for index in range(\n",
    "                        len(tokens) - len(subject_sub_tokens) + 1):\n",
    "                    if tokens[index:index + len(\n",
    "                            subject_sub_tokens)] == subject_sub_tokens:\n",
    "                        labels[index]=label_subject\n",
    "                        for i in range(len(subject_sub_tokens) - 1):\n",
    "                            labels[index + i + 1]=label_subject+1\n",
    "                        forbidden_index = index\n",
    "                        break\n",
    "\n",
    "                for index in range(\n",
    "                        len(tokens) - len(object_sub_tokens) + 1):\n",
    "                    if tokens[index:index + len(\n",
    "                            object_sub_tokens)] == object_sub_tokens:\n",
    "                        if forbidden_index is None:\n",
    "                            labels[index]=label_object\n",
    "                            for i in range(len(object_sub_tokens) - 1):\n",
    "                                labels[index + i + 1]=label_object+1\n",
    "                            break\n",
    "                        # check if labeled already\n",
    "                        elif index < forbidden_index or index >= forbidden_index + len(\n",
    "                                subject_sub_tokens):\n",
    "                            labels[index]=label_object\n",
    "                            for i in range(len(object_sub_tokens) - 1):\n",
    "                                labels[index + i + 1]=label_object+1\n",
    "                            break\n",
    "\n",
    "            else:\n",
    "                for index in range(\n",
    "                        len(tokens) - len(object_sub_tokens) + 1):\n",
    "                    if tokens[index:index + len(\n",
    "                            object_sub_tokens)] == object_sub_tokens:\n",
    "                        labels[index]=label_object\n",
    "                        for i in range(len(object_sub_tokens) - 1):\n",
    "                            labels[index + i + 1]=label_object+1\n",
    "                        forbidden_index = index\n",
    "                        break\n",
    "\n",
    "                for index in range(\n",
    "                        len(tokens) - len(subject_sub_tokens) + 1):\n",
    "                    if tokens[index:index + len(\n",
    "                            subject_sub_tokens)] == subject_sub_tokens:\n",
    "                        if forbidden_index is None:\n",
    "                            labels[index]=label_subject\n",
    "                            for i in range(len(subject_sub_tokens) - 1):\n",
    "                                labels[index + i + 1]=label_subject+1\n",
    "                            break\n",
    "                        elif index < forbidden_index or index >= forbidden_index + len(\n",
    "                                object_sub_tokens):\n",
    "                            labels[index]=label_subject\n",
    "                            for i in range(len(subject_sub_tokens) - 1):\n",
    "                                labels[index + i + 1]=label_subject+1\n",
    "                            break\n",
    "            labels_list[rel_label]=labels\n",
    "            if rel_label not in rel_labels:\n",
    "                rel_labels.append(rel_label)\n",
    "\n",
    "    return tok_to_orig_start_index,tok_to_orig_end_index,orig_to_tok_index,tokens,labels_list,rel_labels\n",
    "def _convert_example_to_cls_record(example,\n",
    "                               tokenizer):\n",
    "    if example.__contains__('spo_list'):\n",
    "        spo_list = example['spo_list']\n",
    "    else:\n",
    "        spo_list = []\n",
    "    text_raw = example['text']\n",
    "    sub_text = []\n",
    "    buff = \"\"\n",
    "    for char in text_raw:\n",
    "        if is_chinese_char(ord(char)) or is_punctuation(char):\n",
    "            if buff != \"\":\n",
    "                sub_text.append(buff)\n",
    "                buff = \"\"\n",
    "            sub_text.append(char)\n",
    "        else:\n",
    "            buff += char\n",
    "    if buff != \"\":\n",
    "        sub_text.append(buff)\n",
    "    tok_to_orig_start_index = []\n",
    "    tok_to_orig_end_index = []\n",
    "    orig_to_tok_index = []\n",
    "    tokens = []\n",
    "    text_tmp = ''\n",
    "    for (i, token) in enumerate(sub_text):\n",
    "        orig_to_tok_index.append(len(tokens))\n",
    "        sub_tokens = tokenizer.tokenize(token)\n",
    "        text_tmp += token\n",
    "        for sub_token in sub_tokens:\n",
    "            tok_to_orig_start_index.append(len(text_tmp) - len(token))\n",
    "            tok_to_orig_end_index.append(len(text_tmp) - 1)\n",
    "            tokens.append(sub_token)\n",
    "        else:\n",
    "            continue\n",
    "        break\n",
    "    #  find all entities and tag them with corresponding \"B\"/\"I\" labels\n",
    "    labels_list=[]\n",
    "    rel_labels=[]\n",
    "    for spo in spo_list:\n",
    "        for spo_object in spo['object'].keys():\n",
    "            labels = [0\n",
    "                  for i in range(len(tokens))]  # initialize tag\n",
    "            if not spo['predicate'] in special_rels:\n",
    "                rel_label=rels2id[spo[\"predicate\"]]\n",
    "            else:\n",
    "                rel_label=rels2id[spo[\"predicate\"]+\"_\"+spo_object] \n",
    "            rel_labels.append(rel_label)\n",
    "\n",
    "    return tok_to_orig_start_index,tok_to_orig_end_index,orig_to_tok_index,tokens,rel_labels\n",
    "def create_cls_example(tokens,rel,token2doc,postag,tfidf_svd,tokenizer):\n",
    "        if len(tokens)>max_len-2:\n",
    "                tokens=tokens[:(max_len-2)]\n",
    "                token2doc=[e[:(max_len-2)] if ind<2 else e for ind,e in enumerate(token2doc)]\n",
    "        tag=[postag[token2doc[0][idx]] for idx in range(len(tokens))]\n",
    "        tag=[pos2id_BIO[tokenizer.cls_token]]+tag+[pos2id_BIO[tokenizer.sep_token]]\n",
    "        full_tokens=[tokenizer.cls_token]+tokens+[tokenizer.sep_token]\n",
    "        token_type_ids=[0]*len(full_tokens)\n",
    "        attention_mask=[1]*len(token_type_ids)\n",
    "        cur_len=len(full_tokens)\n",
    "        if cur_len<max_len:\n",
    "            full_tokens+=[tokenizer.pad_token]*(max_len-cur_len)\n",
    "            attention_mask+=[0]*(max_len-cur_len)\n",
    "            token_type_ids+=[0]*(max_len-cur_len)\n",
    "            tag+=[pos2id_BIO[tokenizer.pad_token]]*(max_len-cur_len)\n",
    "        full_ids=tokenizer.convert_tokens_to_ids(full_tokens)\n",
    "        if len(rel)>0:\n",
    "            example={\"input_ids\":torch.tensor(full_ids,dtype=torch.long),\"token_type_ids\":torch.tensor(token_type_ids,dtype=torch.long),\\\n",
    "                    \"attention_mask\":torch.tensor(attention_mask,dtype=torch.long),\n",
    "                    \"rel_label\":(F.one_hot(torch.tensor(rel),len(id2rels)).sum(dim=0)!=0).long(),\n",
    "                     \"postag\":torch.tensor(tag).long(),\"feature\":torch.tensor(tfidf_svd).float(),\"token2doc\":token2doc}\n",
    "        else:\n",
    "            example={\"input_ids\":torch.tensor(full_ids,dtype=torch.long),\"token_type_ids\":torch.tensor(token_type_ids,dtype=torch.long),\\\n",
    "                    \"attention_mask\":torch.tensor(attention_mask,dtype=torch.long),\n",
    "                    \"rel_label\":(torch.zeros(len(id2rels))).long(),\\\n",
    "                     \"postag\":torch.tensor(tag).long(),\"feature\":torch.tensor(tfidf_svd).float(),\"token2doc\":token2doc}  \n",
    "        return example\n",
    "def create_example(tokens,rel,labels,token2doc,tokenizer,rel_text,tfidf_svd,postag,rel_postag,rel_token2doc):\n",
    "        tag=[postag[token2doc[0][idx]] for idx in range(len(tokens))]\n",
    "        tag=[pos2id_BIO[tokenizer.cls_token]]+tag+[pos2id_BIO[tokenizer.sep_token]]\n",
    "        rel_tag=[rel_postag[rel_token2doc[idx]] for idx in range(len(rel_text))]\n",
    "        rel_tag=rel_tag+[pos2id_BIO[tokenizer.sep_token]]\n",
    "        tag=tag+rel_tag\n",
    "        second_token=rel_text\n",
    "        full_tokens=[[tokenizer.cls_token]+tokens+[tokenizer.sep_token],second_token+[tokenizer.sep_token]]\n",
    "        full_labels=[[label2ids[tokenizer.cls_token]]+labels+[label2ids[tokenizer.sep_token]],[label2ids[\"[category]\"]]*len(second_token)+[label2ids[tokenizer.sep_token]]]\n",
    "        token_type_ids=[0]*len(full_tokens[0])+[1]*len(full_tokens[1])\n",
    "        attention_mask=[1]*len(token_type_ids)\n",
    "        full_tokens=full_tokens[0]+full_tokens[1]\n",
    "        full_labels=full_labels[0]+full_labels[1]\n",
    "        cur_len=len(full_labels)\n",
    "        if cur_len<max_len:\n",
    "            full_tokens+=[tokenizer.pad_token]*(max_len-cur_len)\n",
    "            full_labels+=[label2ids[tokenizer.pad_token]]*(max_len-cur_len)\n",
    "            attention_mask+=[0]*(max_len-cur_len)\n",
    "            token_type_ids+=[0]*(max_len-cur_len)\n",
    "            tag+=[pos2id_BIO[tokenizer.pad_token]]*(max_len-cur_len)\n",
    "        full_ids=tokenizer.convert_tokens_to_ids(full_tokens)\n",
    "        example={\"input_ids\":torch.tensor(full_ids,dtype=torch.long),\"token_type_ids\":torch.tensor(token_type_ids,dtype=torch.long),\\\n",
    "                \"attention_mask\":torch.tensor(attention_mask,dtype=torch.long),\"labels\":torch.tensor(full_labels,dtype=torch.long),\n",
    "                \"rel_label\":F.one_hot(torch.tensor(rel),num_classes=len(id2rels)),\"postag\":torch.tensor(tag).long(),\"feature\":torch.tensor(tfidf_svd).float(),\\\n",
    "                 \"token2doc\":token2doc}\n",
    "        return example\n",
    "def index_token(text_raw):\n",
    "    sub_text = []\n",
    "    buff = \"\"\n",
    "    for char in text_raw:\n",
    "        if is_chinese_char(ord(char)) or is_punctuation(char):\n",
    "            if buff != \"\":\n",
    "                sub_text.append(buff)\n",
    "                buff = \"\"\n",
    "            sub_text.append(char)\n",
    "        else:\n",
    "            buff += char\n",
    "    if buff != \"\":\n",
    "        sub_text.append(buff)\n",
    "    tok_to_orig_start_index = []\n",
    "    tok_to_orig_end_index = []\n",
    "    orig_to_tok_index = []\n",
    "    tokens = []\n",
    "    text_tmp = ''\n",
    "    for (i, token) in enumerate(sub_text):\n",
    "        orig_to_tok_index.append(len(tokens))\n",
    "        sub_tokens = tokenizer.tokenize(token)\n",
    "        text_tmp += token\n",
    "        for sub_token in sub_tokens:\n",
    "            tok_to_orig_start_index.append(len(text_tmp) - len(token))\n",
    "            tok_to_orig_end_index.append(len(text_tmp) - 1)\n",
    "            tokens.append(sub_token)\n",
    "        else:\n",
    "            continue\n",
    "        break\n",
    "    return tok_to_orig_start_index,tok_to_orig_end_index,orig_to_tok_index\n",
    "def find_subs_obs(token_label_pred,text):\n",
    "    subs=[]\n",
    "    sub_offsets=[]\n",
    "    obs=[]\n",
    "    ob_offsets=[]\n",
    "    index=0\n",
    "    while index<token_label_pred.shape[0]:\n",
    "        if token_label_pred[index]==3:\n",
    "            subs.append(text[index])\n",
    "            index+=1\n",
    "            while(index<token_label_pred.shape[0] and (token_label_pred[index]==4 or token_label_pred[index]==3)):\n",
    "                subs[-1]+=(text[index])\n",
    "                index+=1\n",
    "            sub_offsets.append([index-len(subs[-1]),index])\n",
    "            index-=1\n",
    "        if index<token_label_pred.shape[0] and token_label_pred[index]==1:\n",
    "            obs.append(text[index])\n",
    "            index+=1\n",
    "            while(index<token_label_pred.shape[0] and (token_label_pred[index]==2 or token_label_pred[index]==1)):\n",
    "                obs[-1]+=(text[index])\n",
    "                index+=1\n",
    "            ob_offsets.append([index-len(obs[-1]),index])\n",
    "            index-=1\n",
    "        index+=1\n",
    "    if len(subs)==0:\n",
    "        subs.append('')\n",
    "        sub_offsets.append([0,0])\n",
    "    if len(obs)==0:\n",
    "        obs.append('')\n",
    "        ob_offsets.append([0,0])\n",
    "    return subs,obs,sub_offsets,ob_offsets\n",
    "def make_output(doc2pred_ner_doc,simple_thre,complex_thre,text_data,pred_info):\n",
    "    for ind,info in tqdm(enumerate(doc2pred_ner_doc)):\n",
    "        rels={}\n",
    "        for idx in (info):\n",
    "            rels[pred_doc_rels[idx]]=idx\n",
    "        for idx in info:\n",
    "            token_label_pred=text_preds[idx]\n",
    "            label_pred=pred_doc_rels[idx]\n",
    "            text=text_data[ind]['text']\n",
    "            if label_pred in special_pass_idx:\n",
    "                continue\n",
    "            elif label_pred not in special_major_idx:\n",
    "                subs,obs,sub_offsets,ob_offsets=find_subs_obs(token_label_pred,text)\n",
    "                if (len(subs)*len(obs)<simple_thre) or len(subs)==1 or len(obs)==1:\n",
    "                    for i,sub in enumerate(subs):\n",
    "                        for j,ob in enumerate(obs):\n",
    "                            if sub=='' or ob=='':\n",
    "                                continue\n",
    "                            rel_dict={'object':{'@value':ob},'subject':sub}\n",
    "                            rel_dict.update(copy.deepcopy(id2schema[label_pred]))\n",
    "                            pred_info[ind]['spo_list'].append(rel_dict)\n",
    "                else:\n",
    "                    for j,ob in enumerate(obs):\n",
    "    #                     sub_offset=sub_offsets[i]\n",
    "    #                     min_dis,ob_index=100000,-1\n",
    "                        ob_offset=ob_offsets[j]\n",
    "                        min_dis,sub_index=100000,-1\n",
    "                        for i,sub in enumerate(subs):\n",
    "                            if sub=='' or ob=='':\n",
    "                                continue\n",
    "                            if abs(sub_offsets[i][0]-ob_offset[0])<min_dis:\n",
    "                                sub_index=i\n",
    "                                min_dis=abs(sub_offsets[i][0]-ob_offset[0])\n",
    "                        if sub_index!=-1:\n",
    "                            if subs[sub_index]=='' or ob=='':\n",
    "                                continue\n",
    "                            rel_dict={'object':{'@value':ob},'subject':subs[sub_index]}\n",
    "                            rel_dict.update(copy.deepcopy(id2schema[label_pred]))\n",
    "                            pred_info[ind]['spo_list'].append(rel_dict)\n",
    "            else:\n",
    "                affilate_labels=special_affilate_idx[label_pred]\n",
    "                subs,obs,sub_offsets,ob_offsets=find_subs_obs(token_label_pred,text)\n",
    "                subs2id=dict([(sub,i) for i,sub in enumerate(subs)])\n",
    "                subs_af=[[] for i in range(len(subs))]\n",
    "                for f in affilate_labels:\n",
    "                    if f in rels:\n",
    "                        af_idx=rels[f]\n",
    "                        af_token_label_pred=text_preds[af_idx]\n",
    "                        af_text=text_data[ind]['text']\n",
    "                        af_subs,af_obs,af_sub_offsets,af_ob_offsets=find_subs_obs(af_token_label_pred,af_text)\n",
    "                        for sub in af_subs:\n",
    "                            if sub in subs:\n",
    "                                subs_af[subs2id[sub]].append([af_obs,f,af_ob_offsets])\n",
    "                #现在只取第一个\n",
    "                if len(subs)*len(obs)<complex_thre :\n",
    "                    for i,sub in enumerate(subs):\n",
    "                        sub_offset=sub_offsets[i]\n",
    "                        for j,ob in enumerate(obs):\n",
    "                            if sub=='' or ob=='':\n",
    "                                continue\n",
    "                            rel_dict={'object':{'@value':ob},'subject':sub}\n",
    "                            rel_dict.update(copy.deepcopy(id2schema[label_pred]))\n",
    "                            if len(subs_af[i])==0:\n",
    "                                    pred_info[ind]['spo_list'].append(rel_dict)\n",
    "                            if len(subs_af[i])>=1:\n",
    "                                ob_offset=ob_offsets[j]\n",
    "                                for af_obs in subs_af[i]:\n",
    "                                    af_label_pred=af_obs[1]\n",
    "                                    af_min_dis,af_ob_index=100000,-1\n",
    "                                    for k,(af_ob,af_offset) in enumerate(zip(af_obs[0],af_obs[2])):\n",
    "                                        if af_ob=='':\n",
    "                                            continue\n",
    "                                        if abs(ob_offset[0]-af_offset[0])<af_min_dis:\n",
    "                                            af_ob_index=k\n",
    "                                            min_dis=abs(ob_offset[0]-af_offset[0])\n",
    "                                    if af_ob_index!=-1:\n",
    "                                        rel_dict['object'].update({id2rels[af_label_pred].split(\"_\")[1]:af_obs[0][af_ob_index]})\n",
    "                                        rel_dict['object_type'].update(copy.deepcopy(id2schema[af_label_pred]['object_type']))\n",
    "\n",
    "                                pred_info[ind]['spo_list'].append(rel_dict)\n",
    "                else:\n",
    "                    for i,sub in enumerate(subs):\n",
    "\n",
    "                        sub_offset=sub_offsets[i]\n",
    "                        min_dis,ob_index=100000,-1\n",
    "                        for j,ob in enumerate(obs):\n",
    "                            if sub=='' or ob=='':\n",
    "                                continue\n",
    "                            if abs(sub_offset[0]-ob_offsets[j][0])<min_dis:\n",
    "                                ob_index=j\n",
    "                                min_dis=abs(sub_offset[0]-ob_offsets[j][0])\n",
    "                        if ob_index!=-1:\n",
    "                            j=ob_index\n",
    "                            if subs[i]=='' or obs[j]=='':\n",
    "                                continue\n",
    "                            rel_dict={'object':{'@value':obs[j]},'subject':subs[i]}\n",
    "                            rel_dict.update(copy.deepcopy(id2schema[label_pred]))\n",
    "                            if len(subs_af[i])==0:\n",
    "                                    pred_info[ind]['spo_list'].append(rel_dict)\n",
    "                            if len(subs_af[i])>=1:\n",
    "                                for af_obs in subs_af[i]:\n",
    "                                    af_label_pred=af_obs[1]\n",
    "                                    rel_dict['object_type'].update(copy.deepcopy(id2schema[af_label_pred]['object_type']))\n",
    "                                    for af_ob in af_obs[0]:\n",
    "                                        rel_dict['object'].update({id2rels[af_label_pred].split(\"_\")[1]:af_ob})\n",
    "                                        break\n",
    "                                pred_info[ind]['spo_list'].append(rel_dict) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "code_folding": [
     1,
     6,
     11,
     17,
     22,
     28,
     35,
     42,
     46,
     67,
     76,
     107
    ]
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "171293\n",
      "191967\n",
      "201977\n",
      "loading dict...\n",
      "loading...\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "([('票房_@value', ['票房_inArea']),\n",
       "  ('上映时间_@value', ['上映时间_inArea']),\n",
       "  ('饰演_@value', ['饰演_inWork']),\n",
       "  ('配音_@value', ['配音_inWork']),\n",
       "  ('获奖_@value', ['获奖_inWork', '获奖_onDate', '获奖_period'])],\n",
       " ['上映时间_inArea',\n",
       "  '票房_inArea',\n",
       "  '获奖_inWork',\n",
       "  '配音_inWork',\n",
       "  '获奖_onDate',\n",
       "  '获奖_period',\n",
       "  '饰演_inWork'])"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "text_data=[]\n",
    "with open(\"./dataset/train_data/train_data.json\",\"r\") as r:\n",
    "    raw_data=r.readlines()\n",
    "    for d in raw_data:\n",
    "        text_data.append(json.loads(d))\n",
    "print(len(text_data))\n",
    "with open(\"./dataset/dev_data/dev_data.json\",\"r\") as r:\n",
    "    raw_data=r.readlines()\n",
    "    for d in raw_data:\n",
    "        text_data.append(json.loads(d))\n",
    "print(len(text_data))\n",
    "with open(\"./dataset/sample_data.json\",\"r\") as r:\n",
    "    raw_data=r.readlines()\n",
    "    for d in raw_data:\n",
    "        text_data.append(json.loads(d))\n",
    "print(len(text_data))\n",
    "test_text_data=[]\n",
    "with open(\"./dataset/test1_data/test1_data.json\",\"r\") as r:\n",
    "    raw_data=r.readlines()\n",
    "    for d in raw_data:\n",
    "        test_text_data.append(json.loads(d))\n",
    "schema=[]\n",
    "with open(\"./dataset/schema.json\",\"r\") as r:\n",
    "    raw_schema=r.readlines()\n",
    "    for d in raw_schema:\n",
    "        schema.append(json.loads(d))\n",
    "rels=set()\n",
    "special_rels=set()\n",
    "for e in schema:\n",
    "        if len(e['object_type'].keys())==1:\n",
    "            rels.add(e[\"predicate\"])\n",
    "        else:\n",
    "            special_rels.add(e[\"predicate\"])\n",
    "            for key in e['object_type'].keys():\n",
    "                rels.add(e['predicate']+\"_\"+key)\n",
    "if not os.path.exists(\"./dataset/dict.pk\"):\n",
    "    special_rels=list(special_rels)\n",
    "    id2rels=list(rels)\n",
    "    rels2id=dict([(rel,idx) for idx,rel in enumerate(id2rels)])\n",
    "    id2labels=[\"O\",\"B-OBJ\",\"I-OBJ\",\"B-SUB\",\"I-SUB\",\"[category]\",\"[SEP]\",\"[CLS]\",\"[PAD]\"]\n",
    "    label2ids=dict([ (label,idx) for idx,label in enumerate(id2labels)])\n",
    "    pk.dump([special_rels,id2rels,rels2id,id2labels,label2ids],open(\"./dataset/dict.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading dict...\")\n",
    "    special_rels,id2rels,rels2id,id2labels,label2ids=pk.load(open(\"./dataset/dict.pk\",\"rb\"))\n",
    "id2reltype=[[] for i in range(len(id2rels))]\n",
    "for e in schema:\n",
    "    if len(e['object_type'].keys())==1:\n",
    "        rel=e[\"predicate\"]\n",
    "        ids=rels2id[rel]\n",
    "        id2reltype[ids].append(e)\n",
    "    else:\n",
    "        for key in e['object_type'].keys():\n",
    "            rel=e['predicate']+\"_\"+key\n",
    "            ids=rels2id[rel]\n",
    "            temp_e=copy.deepcopy(e)\n",
    "            poped_keys=[]\n",
    "            for k in temp_e['object_type'].keys():\n",
    "                if k!=key:\n",
    "                    poped_keys.append(k)\n",
    "            for k in poped_keys:\n",
    "                 temp_e['object_type'].pop(k)\n",
    "            id2reltype[ids].append(temp_e)\n",
    "id2schema=[e[0] for e in id2reltype]\n",
    "id2rel_text=[[] for i in range(len(id2rels))]\n",
    "id2rel_rawtext=[[] for i in range(len(id2rels))]\n",
    "id2rel_token2text=[[] for i in range(len(id2rels))]\n",
    "for rel in range(len(id2rels)):\n",
    "    if id2rels[rel].split(\"_\")[0] not in special_rels:\n",
    "        cls_text=id2schema[rel]['subject_type']+\",\"+id2schema[rel]['predicate']+\",\"+id2schema[rel]['object_type']['@value']\n",
    "    else:\n",
    "        cls_text=id2schema[rel]['subject_type']+\",\"+id2schema[rel]['predicate']+\",\"+id2schema[rel]['object_type'][id2rels[rel].split(\"_\")[1]]\n",
    "    id2rel_text[rel]=tokenizer.tokenize(cls_text)\n",
    "    id2rel_rawtext[rel]=cls_text\n",
    "    id2rel_token2text[rel]=index_token(cls_text)[0]\n",
    "    assert len(id2rel_token2text[rel])==len(id2rel_text[rel])\n",
    "if not os.path.exists(\"./middle_data/rel_data_postag.pk\"):\n",
    "    jieba.enable_paddle() \n",
    "    jieba.enable_parallel(8)\n",
    "    rel_cut_words=[]\n",
    "    rel_cut_tags=[]\n",
    "    for idx in tqdm(range(len(id2rel_rawtext))):\n",
    "        words = pseg.lcut(id2rel_rawtext[idx],use_paddle=True) #jieba默认模式\n",
    "        new_words=[w for w,t in words]\n",
    "        new_tags=[t for w,t in words]\n",
    "        rel_cut_words.append([idx,new_words])\n",
    "        rel_cut_tags.append([idx,new_tags])\n",
    "    rel_cut_words=[e[1] for e in sorted(rel_cut_words,key=lambda x:x[0])]\n",
    "    rel_cut_tags=[e[1] for e in sorted(rel_cut_tags,key=lambda x:x[0])]\n",
    "    rel_data_postag=[]\n",
    "    for idx in tqdm(range(len(id2rel_rawtext))):\n",
    "        assert len(id2rel_rawtext[idx].strip())==len(\"\".join(rel_cut_words[idx]))\n",
    "        indexs=[]\n",
    "        cur_length=0\n",
    "        for e in rel_cut_words[idx]:\n",
    "            indexs.append([cur_length,cur_length+len(e)-1])\n",
    "            cur_length+=len(e)\n",
    "        pos_label=np.zeros(len(id2rel_rawtext[idx])).astype(np.int8)\n",
    "        for i,(b,e) in enumerate(indexs):\n",
    "            assert (id2rel_rawtext[idx][b]==rel_cut_words[idx][i][0] or _is_whitespace(id2rel_rawtext[idx].strip()[b])\\\n",
    "                   or _is_whitespace(rel_cut_words[idx][i][0])) \\\n",
    "                    and (id2rel_rawtext[idx].strip()[e]==rel_cut_words[idx][i][-1] \\\n",
    "                         or _is_whitespace(id2rel_rawtext[idx].strip()[e])  or _is_whitespace(rel_cut_words[idx][i][-1]))\n",
    "            pos_label[b+1:e+1]=pos2id_BIO['I-'+rel_cut_tags[idx][i]]\n",
    "            pos_label[b]=pos2id_BIO['B-'+rel_cut_tags[idx][i]]\n",
    "        rel_data_postag.append(pos_label)\n",
    "    pk.dump(rel_data_postag,open(\"./middle_data/rel_data_postag.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading...\")\n",
    "    rel_data_postag=pk.load(open(\"./middle_data/rel_data_postag.pk\",\"rb\"))\n",
    "special_major_idx=[2,4,22,32,54]\n",
    "special_affilate_idx=[[] for i in range(len(id2rels))]\n",
    "special_affilate_idx[2]=[5]\n",
    "special_affilate_idx[4]=[0]\n",
    "special_affilate_idx[22]=[51]\n",
    "special_affilate_idx[32]=[8]\n",
    "special_affilate_idx[54]=[6,11,14]\n",
    "special_pass_idx=[0,5,6,8,11,14,51]\n",
    "[(id2rels[e],[id2rels[t] for t in special_affilate_idx[e]]) for e in special_major_idx],[id2rels[e] for e in special_pass_idx]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "04/29/2020 23:33:54 - INFO - gensim.utils -   loading Word2VecKeyedVectors object from ./Tencent_ChineseEmbedding/ChineseEmbedding.bin\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loading...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "04/29/2020 23:34:16 - INFO - gensim.utils -   loading vectors from ./Tencent_ChineseEmbedding/ChineseEmbedding.bin.vectors.npy with mmap=r\n",
      "04/29/2020 23:34:16 - INFO - gensim.utils -   setting ignored attribute vectors_norm to None\n",
      "04/29/2020 23:34:16 - INFO - gensim.utils -   loaded ./Tencent_ChineseEmbedding/ChineseEmbedding.bin\n"
     ]
    }
   ],
   "source": [
    "if not os.path.exists('./Tencent_ChineseEmbedding/ChineseEmbedding.bin'):\n",
    "    file = './Tencent_ChineseEmbedding/Tencent_AILab_ChineseEmbedding.txt'\n",
    "    wv_from_text = KeyedVectors.load_word2vec_format(file, binary=False)\n",
    "    wv_from_text.init_sims(replace=True)\n",
    "    # 重新保存加载变量为二进制形式\n",
    "    wv_from_text.save('./Tencent_ChineseEmbedding/ChineseEmbedding.bin')\n",
    "else:\n",
    "    print(\"loading...\")\n",
    "    wv_from_text = gensim.models.KeyedVectors.load('./Tencent_ChineseEmbedding/ChineseEmbedding.bin', mmap='r')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loading extra tokens vector file...\n"
     ]
    }
   ],
   "source": [
    "if not os.path.exists(\"./Tencent_ChineseEmbedding/extra_embedding.npy\"):\n",
    "    word2vec_dims=wv_from_text.vectors.shape[-1]\n",
    "    pad_vec=np.zeros([1,word2vec_dims],dtype=np.float32)\n",
    "    other_token_vec=np.random.normal(scale=0.02,size=[3,word2vec_dims])\n",
    "    extra_token_vec=np.concatenate([pad_vec,other_token_vec])\n",
    "    np.save(\"./Tencent_ChineseEmbedding/extra_embedding.npy\",extra_token_vec)\n",
    "else:\n",
    "    print(\"loading extra tokens vector file...\")\n",
    "    extra_token_vec=np.load(\"./Tencent_ChineseEmbedding/extra_embedding.npy\")\n",
    "word2vec=np.concatenate([extra_token_vec,wv_from_text.vectors])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "code_folding": [
     15
    ]
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loading...\n"
     ]
    }
   ],
   "source": [
    "# if not os.path.exists('./Tencent_ChineseEmbedding/usr_dict.txt'):\n",
    "#     keys=[]\n",
    "#     for key in tqdm(wv_from_text.vocab.keys()):\n",
    "#         keys.append(key)\n",
    "#     with open(\"./Tencent_ChineseEmbedding/usr_dict.txt\",\"w\") as w:\n",
    "#         for key in tqdm(keys):\n",
    "#             w.write(key+\"\\n\")\n",
    "# else:\n",
    "#     print(\"loading...\")\n",
    "#     keys=[]\n",
    "#     with open(\"./Tencent_ChineseEmbedding/usr_dict.txt\",\"r\") as r:\n",
    "#         for key in tqdm(r.readlines()):\n",
    "#             keys.append(key[:-1])\n",
    "# import jieba\n",
    "# jieba.load_userdict(\"./Tencent_ChineseEmbedding/usr_dict.txt\")\n",
    "if not os.path.exists(\"./Tencent_ChineseEmbedding/word_id.pk\"):\n",
    "    word2id={}\n",
    "    id2word=[[] for i in range(word2vec.shape[0])]\n",
    "    for i,key in tqdm(enumerate(keys)):\n",
    "        assert (word2vec[4+i]==wv_from_text[key]).all()\n",
    "        word2id[key]=4+i\n",
    "        id2word[4+i]=key\n",
    "    id2word[0]=tokenizer.pad_token\n",
    "    word2id[tokenizer.pad_token]=0\n",
    "    id2word[1]=tokenizer.unk_token\n",
    "    word2id[tokenizer.unk_token]=1\n",
    "    id2word[2]=tokenizer.cls_token\n",
    "    word2id[tokenizer.cls_token]=2\n",
    "    id2word[3]=tokenizer.sep_token\n",
    "    word2id[tokenizer.sep_token]=3\n",
    "    pk.dump([word2id,id2word],open(\"./Tencent_ChineseEmbedding/word_id.pk\",\"wb\"),protocol = 4)\n",
    "else:\n",
    "    print(\"loading...\")\n",
    "    word2id,id2word=pk.load(open(\"./Tencent_ChineseEmbedding/word_id.pk\",\"rb\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "code_folding": [
     0
    ]
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loading...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/huangweilin/anaconda3/envs/fjw/lib/python3.6/site-packages/ipykernel_launcher.py:21: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0\n",
      "Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "655c4fd64d2b45d7b4d91a2bc956be26",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=0, max=201977), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "if not os.path.exists(\"./middle_data/train_wordvec.pk\"):\n",
    "    jieba.enable_parallel(8)\n",
    "    train_cut_words=[]\n",
    "    train_cut_vecs=[]\n",
    "    for idx in tqdm(range(len(text_data))):\n",
    "        word_list=jieba.lcut(text_data[idx]['text'],HMM=False,use_paddle=False)\n",
    "        wvid_list=[ word2id.get(e,1)  for e in word_list ]\n",
    "        train_cut_words.append([idx,word_list])\n",
    "        train_cut_vecs.append([idx,wvid_list])\n",
    "    train_cut_words=[e[1] for e in sorted(train_cut_words,key=lambda x:x[0])]\n",
    "    train_cut_vecs=[e[1] for e in sorted(train_cut_vecs,key=lambda x:x[0])]\n",
    "#     train_cut_wvs=[]\n",
    "#     for e in tqdm(train_cut_vecs):\n",
    "#         train_cut_wvs.append(np.array([word2vec[t] for t in e]))\n",
    "    pk.dump([train_cut_words,train_cut_vecs],open(\"./middle_data/train_wordvec.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading...\")\n",
    "    train_cut_words,train_cut_vecs=pk.load(open(\"./middle_data/train_wordvec.pk\",\"rb\"))\n",
    "if not os.path.exists(\"./middle_data/train_tokenvec.pk\"):\n",
    "    text_data_tokenvec=[]\n",
    "    for idx in tqdm(range(len(text_data))):\n",
    "        assert len(text_data[idx]['text'])==len(\"\".join(train_cut_words[idx]))\n",
    "        indexs=[]\n",
    "        cur_length=0\n",
    "        for e in train_cut_words[idx]:\n",
    "            indexs.append([cur_length,cur_length+len(e)-1])\n",
    "            cur_length+=len(e)\n",
    "        vec_label=np.zeros((len(text_data[idx]['text']))).astype(np.int)\n",
    "        for i,(b,e) in enumerate(indexs):\n",
    "            assert (text_data[idx]['text'][b]==train_cut_words[idx][i][0] or _is_whitespace(text_data[idx]['text'][b])\\\n",
    "                   or _is_whitespace(train_cut_words[idx][i][0])) \\\n",
    "                    and (text_data[idx]['text'][e]==train_cut_words[idx][i][-1] \\\n",
    "                         or _is_whitespace(text_data[idx]['text'][e])  or _is_whitespace(train_cut_words[idx][i][-1]))\n",
    "            vec_label[b:e+1]=train_cut_vecs[idx][i]\n",
    "        text_data_tokenvec.append(vec_label)\n",
    "    pk.dump(text_data_tokenvec,open(\"./middle_data/train_tokenvec.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading...\")\n",
    "    text_data_tokenvec=pk.load(open(\"./middle_data/train_tokenvec.pk\",\"rb\"))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {
    "code_folding": [
     0
    ]
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/huangweilin/anaconda3/envs/fjw/lib/python3.6/site-packages/ipykernel_launcher.py:5: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0\n",
      "Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`\n",
      "  \"\"\"\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "804163e8b07b4e81b81399c88d4c7c61",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=0, max=10468), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/huangweilin/anaconda3/envs/fjw/lib/python3.6/site-packages/ipykernel_launcher.py:18: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0\n",
      "Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "6a0e433a51e047c6be6caa50ea31dba2",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=0, max=10468), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "if not os.path.exists(\"./middle_data/test_wordvec.pk\"):\n",
    "    jieba.enable_parallel(8)\n",
    "    test_cut_words=[]\n",
    "    test_cut_vecs=[]\n",
    "    for idx in tqdm(range(len(test_text_data))):\n",
    "        word_list=jieba.lcut(test_text_data[idx]['text'],HMM=False,use_paddle=False)\n",
    "        wvid_list=[ word2id.get(e,1)  for e in word_list ]\n",
    "        test_cut_words.append([idx,word_list])\n",
    "        test_cut_vecs.append([idx,wvid_list])\n",
    "    test_cut_words=[e[1] for e in sorted(test_cut_words,key=lambda x:x[0])]\n",
    "    test_cut_vecs=[e[1] for e in sorted(test_cut_vecs,key=lambda x:x[0])]\n",
    "    pk.dump([test_cut_words,test_cut_vecs],open(\"./middle_data/test_wordvec.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading...\")\n",
    "    test_cut_words,test_cut_vecs=pk.load(open(\"./middle_data/test_wordvec.pk\",\"rb\"))\n",
    "if not os.path.exists(\"./middle_data/test_tokenvec.pk\"):\n",
    "    test_text_data_tokenvec=[]\n",
    "    for idx in tqdm(range(len(test_text_data))):\n",
    "        assert len(test_text_data[idx]['text'])==len(\"\".join(test_cut_words[idx]))\n",
    "        indexs=[]\n",
    "        cur_length=0\n",
    "        for e in test_cut_words[idx]:\n",
    "            indexs.append([cur_length,cur_length+len(e)-1])\n",
    "            cur_length+=len(e)\n",
    "        vec_label=np.zeros((len(test_text_data[idx]['text']),)).astype(np.int)\n",
    "        for i,(b,e) in enumerate(indexs):\n",
    "            assert (test_text_data[idx]['text'][b]==test_cut_words[idx][i][0] or _is_whitespace(test_text_data[idx]['text'][b])\\\n",
    "                   or _is_whitespace(test_cut_words[idx][i][0])) \\\n",
    "                    and (test_text_data[idx]['text'][e]==test_cut_words[idx][i][-1] \\\n",
    "                         or _is_whitespace(test_text_data[idx]['text'][e])  or _is_whitespace(test_cut_words[idx][i][-1]))\n",
    "            vec_label[b:e+1]=test_cut_vecs[idx][i]\n",
    "        test_text_data_tokenvec.append(vec_label)\n",
    "    pk.dump(test_text_data_tokenvec,open(\"./middle_data/test_tokenvec.pk\",\"wb\"))\n",
    "else:\n",
    "    print(\"loading...\")\n",
    "    test_text_data_tokenvec=pk.load(open(\"./middle_data/test_tokenvec.pk\",\"rb\"))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "fjw",
   "language": "python",
   "name": "fjw"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.9"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
