{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "- keras_bert\n",
    "- textdistance"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "#! -*- coding:utf-8 -*-\n",
    "import os\n",
    "import re\n",
    "import gc\n",
    "import sys\n",
    "import json\n",
    "import codecs\n",
    "import random\n",
    "import warnings\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import textdistance\n",
    "from tqdm import tqdm\n",
    "import tensorflow as tf\n",
    "from random import choice\n",
    "import matplotlib.pyplot as plt\n",
    "from collections import Counter\n",
    "from sklearn.metrics import f1_score, accuracy_score\n",
    "from sklearn.model_selection import KFold\n",
    "from sklearn.preprocessing import LabelEncoder\n",
    "from sklearn.model_selection import StratifiedKFold\n",
    "from sklearn.model_selection import train_test_split\n",
    "\n",
    "import keras.backend as K\n",
    "from keras.layers import *\n",
    "from keras.callbacks import *\n",
    "from keras.models import Model\n",
    "from keras.optimizers import Adam\n",
    "from keras.initializers import glorot_uniform\n",
    "from keras_bert import load_trained_model_from_checkpoint, Tokenizer\n",
    "\n",
    "from keras_tqdm import TQDMNotebookCallback\n",
    "\n",
    "tqdm.pandas()\n",
    "seed = 2019\n",
    "random.seed(seed)\n",
    "tf.set_random_seed(seed)\n",
    "np.random.seed(seed)\n",
    "warnings.filterwarnings('ignore')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 尝试数据增强\n",
    "- https://zhuanlan.zhihu.com/p/74514486?utm_source=qq&utm_medium=social&utm_oi=909699489177157632\n",
    "- https://arxiv.org/pdf/1906.06045.pdf\n",
    "- https://arxiv.org/pdf/1901.11196.pdf"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "collapsed": false,
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 10556/10556 [05:19<00:00, 33.09it/s] \n",
      "100%|██████████| 9997/9997 [06:10<00:00, 26.98it/s]\n",
      "100%|██████████| 10556/10556 [00:00<00:00, 35681.79it/s]\n",
      "100%|██████████| 9997/9997 [00:00<00:00, 39985.29it/s]\n",
      "100%|██████████| 10556/10556 [00:00<00:00, 22468.28it/s]\n",
      "100%|██████████| 9997/9997 [00:00<00:00, 21368.82it/s]\n",
      "100%|██████████| 10556/10556 [00:00<00:00, 19836.12it/s]\n",
      "100%|██████████| 9997/9997 [00:00<00:00, 22107.09it/s]\n",
      "100%|██████████| 10556/10556 [00:00<?, ?it/s]\n",
      "100%|██████████| 9997/9997 [00:00<00:00, 638667.80it/s]\n",
      "100%|██████████| 10556/10556 [00:00<00:00, 675666.48it/s]\n",
      "100%|██████████| 9997/9997 [00:00<00:00, 640237.85it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "samples's num with empty entity: 0\n"
     ]
    }
   ],
   "source": [
    "data_path = './data/'\n",
    "train = pd.read_csv(data_path + 'Round2_train.csv', encoding='utf-8')\n",
    "test = pd.read_csv(data_path + 'round2_test.csv', encoding='utf-8')\n",
    "\n",
    "train_preliminary = pd.read_csv('../../RoundOne/FinanceInformation/data/Train_Data.csv', encoding='utf-8')\n",
    "train = pd.concat([train, train_preliminary], axis=0, ignore_index=True)\n",
    "train = train.drop_duplicates(['title', 'text', 'entity', 'negative', 'key_entity'])\n",
    "\n",
    "def duplicate_entity(entity):\n",
    "    def is_empty(x):\n",
    "        return (x != '') & (x != ' ')\n",
    "\n",
    "    if entity is np.nan:\n",
    "        return entity\n",
    "    else:\n",
    "        entity = filter(is_empty, entity.split(';'))\n",
    "        return ';'.join(list(set(entity)))\n",
    "\n",
    "train['entity'] = train['entity'].apply(lambda index: duplicate_entity(index))\n",
    "test['entity'] = test['entity'].apply(lambda index: duplicate_entity(index))\n",
    "\n",
    "test['text'] = test.apply(lambda index: index.title if index.text is np.nan else index.text, axis=1)\n",
    "\n",
    "train.fillna('', inplace=True)\n",
    "train['title'] = train['title'].map(lambda index: index.replace(' ', ''))\n",
    "train['text'] = train['text'].map(lambda index: index.replace(' ', ''))\n",
    "train['title_len'] = train['title'].map(lambda index: len(index))\n",
    "train['text_len'] = train['text'].map(lambda index: len(index))\n",
    "\n",
    "test.fillna('', inplace=True)\n",
    "test['title'] = test['title'].map(lambda index: index.replace(' ', ''))\n",
    "test['text'] = test['text'].map(lambda index: index.replace(' ', ''))\n",
    "test['title_len'] = test['title'].map(lambda index: len(index))\n",
    "test['text_len'] = test['text'].map(lambda index: len(index))\n",
    "\n",
    "distance = textdistance.Levenshtein(external=False)\n",
    "train['distance'] = train.progress_apply(lambda index: distance(index.title, index.text), axis=1)   # distance   similarity\n",
    "test['distance'] = test.progress_apply(lambda index: distance(index.title, index.text), axis=1)   # distance   similarity\n",
    "\n",
    "train['title_in_text'] = train.progress_apply(lambda index: 1 if index.text.find(index.title) != -1 else 0, axis=1)\n",
    "test['title_in_text'] = test.progress_apply(lambda index: 1 if index.text.find(index.title) != -1 else 0, axis=1)\n",
    "\n",
    "train['text'] = train.progress_apply(lambda index: index.title + ';' + index.text if (index.title_len != 0) & (index.distance > 200) & (index.title_in_text != 1) else index.text, axis=1)\n",
    "test['text'] = test.progress_apply(lambda index: index.title + ';' + index.text if (index.title_len != 0) & (index.distance > 200) & (index.title_in_text != 1) else index.text, axis=1)\n",
    "\n",
    "train['text'] = train.progress_apply(lambda index: index.title + ';' + index.text if index.title_len + index.text_len < 512 else index.text, axis=1)\n",
    "test['text'] = test.progress_apply(lambda index: index.title + ';' + index.text if index.title_len + index.text_len < 512 else index.text, axis=1)\n",
    "\n",
    "train.drop(['title_len', 'distance', 'title_in_text', 'text_len'], axis=1, inplace=True)\n",
    "test.drop(['title_len', 'distance', 'title_in_text', 'text_len'], axis=1, inplace=True)\n",
    "\n",
    "train['entity_len'] = train['entity'].progress_apply(lambda index: len(index))\n",
    "test['entity_len'] = test['entity'].progress_apply(lambda index: len(index))\n",
    "\n",
    "# 替换实体链长度超过512的样本\n",
    "train['entity'] = train.apply(lambda index: '' if index.entity_len > 509 else index.entity, axis=1)\n",
    "test['entity'] = test.apply(lambda index: '' if index.entity_len > 509 else index.entity, axis=1)\n",
    "\n",
    "train['entity_len'] = train['entity'].progress_apply(lambda index: len(index))\n",
    "test['entity_len'] = test['entity'].progress_apply(lambda index: len(index))\n",
    "\n",
    "\n",
    "# 增加实体替换\n",
    "count = 0\n",
    "def get_content(x,y):\n",
    "    global count\n",
    "    try:\n",
    "        if y == '':  # y == ''  ??  str(y)=='nan'\n",
    "            return x\n",
    "        y=y.split(\";\")\n",
    "        y = sorted(y, key=lambda i:len(i),reverse=True)\n",
    "        for i in y:\n",
    "            x = '实体词'.join(x.split(i))\n",
    "        return x\n",
    "    except:\n",
    "        count += 1\n",
    "        return y\n",
    "\n",
    "print(\"samples's num with empty entity:\", count)\n",
    "train['text'] = list(map(lambda x,y: get_content(x,y),train['text'], train['entity']))\n",
    "test['text'] = list(map(lambda x,y: get_content(x,y),test['text'], test['entity']))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "# train['entity_len'] = train['key_entity'].apply(lambda index: len(index))\n",
    "# train['entity_num'] = train['key_entity'].apply(lambda index: index.count(';') + 1)\n",
    "# train[train['entity_len'] > 512]\n",
    "# train['entity_num'].max()\n",
    "\n",
    "# train['entity_len'].value_counts().reset_index().sort_values(['index'], ascending=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "maxlen = 509\n",
    "bert_path = 'E:/chinese_wwm_ext_L-12_H-768_A-12/'     # chinese_L-12_H-768_A-12    chinese_wwm_ext_L-12_H-768_A-12\n",
    "\n",
    "config_path = bert_path + 'bert_config.json'\n",
    "# checkpoint_path = '/export/home/liuyuzhong/kaggle/bert/chinese_L-12_H-768_A-12/bert_model.ckpt'\n",
    "checkpoint_path = bert_path + 'bert_model.ckpt'\n",
    "dict_path = bert_path + 'vocab.txt'\n",
    "\n",
    "token_dict = {}\n",
    "with codecs.open(dict_path, 'r', 'utf8') as reader:\n",
    "    for line in reader:\n",
    "        token = line.strip()\n",
    "        token_dict[token] = len(token_dict)  # 给每个token 按序编号\n",
    "\n",
    "class OurTokenizer(Tokenizer):\n",
    "    def _tokenize(self, text):\n",
    "        R = []\n",
    "        for c in text:\n",
    "            if c in self._token_dict:\n",
    "                R.append(c)\n",
    "            elif self._is_space(c):\n",
    "                R.append('[unused1]') # space类用未经训练的[unused1]表示\n",
    "            else:\n",
    "                R.append('[UNK]') # 剩余的字符是[UNK]\n",
    "        return R\n",
    "\n",
    "tokenizer = OurTokenizer(token_dict)\n",
    "\n",
    "def seq_padding(X, padding=0):\n",
    "    L = [len(x) for x in X]\n",
    "    ML = max(L)\n",
    "    return np.array([np.concatenate([x, [padding] * (ML - len(x))]) if len(x) < ML else x for x in X])\n",
    "\n",
    "class data_generator:\n",
    "    def __init__(self, data, batch_size=4, shuffle=True):    # 8\n",
    "        self.data = data\n",
    "        self.batch_size = batch_size\n",
    "        self.shuffle = shuffle\n",
    "        self.steps = len(self.data) // self.batch_size\n",
    "        if len(self.data) % self.batch_size != 0:\n",
    "            self.steps += 1\n",
    "    def __len__(self):\n",
    "        return self.steps\n",
    "    def __iter__(self):\n",
    "        while True:\n",
    "            idxs = list(range(len(self.data)))\n",
    "            \n",
    "            if self.shuffle:\n",
    "                np.random.shuffle(idxs)\n",
    "            \n",
    "            X1, X2, Y = [], [], []\n",
    "            for i in idxs:\n",
    "                d = self.data[i]\n",
    "                first_text = d[0]\n",
    "                second_text = d[2][:maxlen - d[1]]\n",
    "                x1, x2 = tokenizer.encode(first=first_text, second=second_text)   # , max_len=512\n",
    "                y = d[3]\n",
    "                X1.append(x1)\n",
    "                X2.append(x2)\n",
    "                Y.append([y])\n",
    "                if len(X1) == self.batch_size or i == idxs[-1]:\n",
    "                    X1 = seq_padding(X1)\n",
    "                    X2 = seq_padding(X2, padding=0)\n",
    "                    Y = seq_padding(Y)\n",
    "                    yield [X1, X2], Y[:, 0, :]\n",
    "                    [X1, X2, Y] = [], [], []\n",
    "\n",
    "from keras.metrics import top_k_categorical_accuracy\n",
    "from keras.metrics import categorical_accuracy\n",
    "\n",
    "def acc_top2(y_true, y_pred):\n",
    "    return top_k_categorical_accuracy(y_true, y_pred, k=2)\n",
    "\n",
    "\n",
    "def f1_metric(y_true, y_pred):\n",
    "    def recall(y_true, y_pred):\n",
    "        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n",
    "        possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n",
    "        recall = true_positives / (possible_positives + K.epsilon())\n",
    "        return recall\n",
    "\n",
    "    def precision(y_true, y_pred):\n",
    "        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n",
    "        predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n",
    "        precision = true_positives / (predicted_positives + K.epsilon())\n",
    "        return precision\n",
    "    precision = precision(y_true, y_pred)\n",
    "    recall = recall(y_true, y_pred)\n",
    "    return 2*((precision*recall)/(precision+recall+K.epsilon()))\n",
    "                    \n",
    "\n",
    "def build_bert(nclass):\n",
    "    bert_model = load_trained_model_from_checkpoint(config_path, checkpoint_path, seq_len=None)\n",
    "\n",
    "    for l in bert_model.layers:\n",
    "#         print(l)\n",
    "        l.trainable = True\n",
    "\n",
    "    x1_in = Input(shape=(None,))\n",
    "    x2_in = Input(shape=(None,))\n",
    "\n",
    "    x = bert_model([x1_in, x2_in])\n",
    "    x = Lambda(lambda x: x[:, 0])(x)\n",
    "    p = Dense(nclass, activation='softmax', kernel_initializer=glorot_uniform(seed=seed))(x)\n",
    "\n",
    "    model = Model([x1_in, x2_in], p)\n",
    "    model.compile(loss='categorical_crossentropy', \n",
    "                  optimizer=Adam(1e-5),                # lr: 5e-5   3e-5   2e-5    epoch: 3, 4    batch_size: 16, 32    \n",
    "                  metrics=['accuracy', f1_metric])\n",
    "    print(model.summary())\n",
    "    return model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "from keras.utils import to_categorical\n",
    "\n",
    "DATA_LIST = []\n",
    "for data_row in train.iloc[:].itertuples():\n",
    "    DATA_LIST.append((data_row.entity, data_row.entity_len, data_row.text, to_categorical(data_row.negative, 2)))\n",
    "DATA_LIST = np.array(DATA_LIST)\n",
    "\n",
    "DATA_LIST_TEST = []\n",
    "for data_row in test.iloc[:].itertuples():\n",
    "    DATA_LIST_TEST.append((data_row.entity, data_row.entity_len, data_row.text, to_categorical(0, 2)))\n",
    "DATA_LIST_TEST = np.array(DATA_LIST_TEST)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def run_cv(nfold, data, data_label, data_test):\n",
    "    kf = StratifiedKFold(n_splits=nfold, shuffle=True, random_state=seed).split(data, train['negative'])\n",
    "    train_model_pred = np.zeros((len(data), 2))\n",
    "    test_model_pred = np.zeros((len(data_test), 2))\n",
    "\n",
    "    for i, (train_fold, test_fold) in enumerate(kf):\n",
    "        X_train, X_valid, = data[train_fold, :], data[test_fold, :]\n",
    "\n",
    "        model = build_bert(2)\n",
    "        early_stopping = EarlyStopping(monitor='val_acc', patience=3)\n",
    "        plateau = ReduceLROnPlateau(monitor=\"val_acc\", verbose=1, mode='max', factor=0.5, patience=1)\n",
    "        checkpoint = ModelCheckpoint('./model/' + str(i) + '.hdf5', monitor='val_acc', \n",
    "                                         verbose=2, save_best_only=True, mode='max',save_weights_only=True)\n",
    "        \n",
    "        train_D = data_generator(X_train, shuffle=True)\n",
    "        valid_D = data_generator(X_valid, shuffle=False)\n",
    "        test_D = data_generator(data_test, shuffle=False)\n",
    "\n",
    "        model.fit_generator(\n",
    "            train_D.__iter__(),\n",
    "            steps_per_epoch=len(train_D),   ## ?? ##\n",
    "            epochs=5,\n",
    "            validation_data=valid_D.__iter__(),\n",
    "            validation_steps=len(valid_D),\n",
    "            callbacks=[TQDMNotebookCallback(), early_stopping, plateau, checkpoint],\n",
    "            verbose=2\n",
    "        )\n",
    "        \n",
    "        model.load_weights('./model/' + str(i) + '.hdf5')\n",
    "        \n",
    "#         # return model\n",
    "#         train_model_pred[test_fold, :] =  model.predict_generator(valid_D.__iter__(), steps=len(valid_D),verbose=0)\n",
    "#         test_model_pred += model.predict_generator(test_D.__iter__(), steps=len(test_D),verbose=0)\n",
    "        \n",
    "        # return model\n",
    "        val = model.predict_generator(valid_D.__iter__(), steps=len(valid_D),verbose=0)\n",
    "        train_model_pred[test_fold, :] = val\n",
    "        print('{}th f1_score:{}'.format(i+1, f1_score(train['negative'].values[test_fold], [np.argmax(index) for index in val])))\n",
    "        print('{}th accuracy:{}'.format(i+1, accuracy_score(train['negative'].values[test_fold], [np.argmax(index) for index in val])))\n",
    "        test_model_pred += model.predict_generator(test_D.__iter__(), steps=len(test_D),verbose=0)\n",
    "        \n",
    "        del model; gc.collect()\n",
    "        K.clear_session()\n",
    "        \n",
    "    return train_model_pred, test_model_pred"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "collapsed": false,
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "__________________________________________________________________________________________________\n",
      "Layer (type)                    Output Shape         Param #     Connected to                     \n",
      "==================================================================================================\n",
      "input_1 (InputLayer)            (None, None)         0                                            \n",
      "__________________________________________________________________________________________________\n",
      "input_2 (InputLayer)            (None, None)         0                                            \n",
      "__________________________________________________________________________________________________\n",
      "model_2 (Model)                 (None, None, 768)    101677056   input_1[0][0]                    \n",
      "                                                                 input_2[0][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "lambda_1 (Lambda)               (None, 768)          0           model_2[1][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "dense_1 (Dense)                 (None, 2)            1538        lambda_1[0][0]                   \n",
      "==================================================================================================\n",
      "Total params: 101,678,594\n",
      "Trainable params: 101,678,594\n",
      "Non-trainable params: 0\n",
      "__________________________________________________________________________________________________\n",
      "None\n",
      "Epoch 1/5\n",
      " - 485s - loss: 0.1425 - acc: 0.9491 - f1_metric: 0.9491 - val_loss: 0.1385 - val_acc: 0.9536 - val_f1_metric: 0.9536\n",
      "\n",
      "Epoch 00001: val_acc improved from -inf to 0.95360, saving model to ./model/0.hdf5\n",
      "Epoch 2/5\n",
      " - 476s - loss: 0.0701 - acc: 0.9739 - f1_metric: 0.9739 - val_loss: 0.1212 - val_acc: 0.9621 - val_f1_metric: 0.9621\n",
      "\n",
      "Epoch 00002: val_acc improved from 0.95360 to 0.96212, saving model to ./model/0.hdf5\n",
      "Epoch 3/5\n",
      " - 475s - loss: 0.0413 - acc: 0.9846 - f1_metric: 0.9846 - val_loss: 0.1764 - val_acc: 0.9536 - val_f1_metric: 0.9536\n",
      "\n",
      "Epoch 00003: ReduceLROnPlateau reducing learning rate to 4.999999873689376e-06.\n",
      "\n",
      "Epoch 00003: val_acc did not improve from 0.96212\n",
      "Epoch 4/5\n",
      " - 471s - loss: 0.0147 - acc: 0.9953 - f1_metric: 0.9953 - val_loss: 0.2763 - val_acc: 0.9508 - val_f1_metric: 0.9508\n",
      "\n",
      "Epoch 00004: ReduceLROnPlateau reducing learning rate to 2.499999936844688e-06.\n",
      "\n",
      "Epoch 00004: val_acc did not improve from 0.96212\n",
      "Epoch 5/5\n",
      " - 471s - loss: 0.0065 - acc: 0.9976 - f1_metric: 0.9976 - val_loss: 0.2092 - val_acc: 0.9635 - val_f1_metric: 0.9635\n",
      "\n",
      "Epoch 00005: val_acc improved from 0.96212 to 0.96354, saving model to ./model/0.hdf5\n",
      "\n",
      "1th f1_score:0.970976253298153\n",
      "1th accuracy:0.9635416666666666\n",
      "__________________________________________________________________________________________________\n",
      "Layer (type)                    Output Shape         Param #     Connected to                     \n",
      "==================================================================================================\n",
      "input_1 (InputLayer)            (None, None)         0                                            \n",
      "__________________________________________________________________________________________________\n",
      "input_2 (InputLayer)            (None, None)         0                                            \n",
      "__________________________________________________________________________________________________\n",
      "model_2 (Model)                 (None, None, 768)    101677056   input_1[0][0]                    \n",
      "                                                                 input_2[0][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "lambda_1 (Lambda)               (None, 768)          0           model_2[1][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "dense_1 (Dense)                 (None, 2)            1538        lambda_1[0][0]                   \n",
      "==================================================================================================\n",
      "Total params: 101,678,594\n",
      "Trainable params: 101,678,594\n",
      "Non-trainable params: 0\n",
      "__________________________________________________________________________________________________\n",
      "None\n",
      "Epoch 1/5\n",
      " - 482s - loss: 0.1543 - acc: 0.9435 - f1_metric: 0.9435 - val_loss: 0.1091 - val_acc: 0.9650 - val_f1_metric: 0.9650\n",
      "\n",
      "Epoch 00001: val_acc improved from -inf to 0.96496, saving model to ./model/1.hdf5\n",
      "Epoch 2/5\n",
      " - 473s - loss: 0.0801 - acc: 0.9728 - f1_metric: 0.9728 - val_loss: 0.1048 - val_acc: 0.9692 - val_f1_metric: 0.9692\n",
      "\n",
      "Epoch 00002: val_acc improved from 0.96496 to 0.96922, saving model to ./model/1.hdf5\n",
      "Epoch 3/5\n",
      " - 472s - loss: 0.0430 - acc: 0.9851 - f1_metric: 0.9851 - val_loss: 0.1048 - val_acc: 0.9692 - val_f1_metric: 0.9692\n",
      "\n",
      "Epoch 00003: ReduceLROnPlateau reducing learning rate to 4.999999873689376e-06.\n",
      "\n",
      "Epoch 00003: val_acc did not improve from 0.96922\n",
      "Epoch 4/5\n",
      " - 471s - loss: 0.0194 - acc: 0.9929 - f1_metric: 0.9929 - val_loss: 0.1119 - val_acc: 0.9702 - val_f1_metric: 0.9702\n",
      "\n",
      "Epoch 00004: val_acc improved from 0.96922 to 0.97017, saving model to ./model/1.hdf5\n",
      "Epoch 5/5\n",
      " - 472s - loss: 0.0088 - acc: 0.9966 - f1_metric: 0.9966 - val_loss: 0.1665 - val_acc: 0.9616 - val_f1_metric: 0.9616\n",
      "\n",
      "Epoch 00005: ReduceLROnPlateau reducing learning rate to 2.499999936844688e-06.\n",
      "\n",
      "Epoch 00005: val_acc did not improve from 0.97017\n",
      "\n",
      "2th f1_score:0.9759450171821304\n",
      "2th accuracy:0.9701704545454546\n",
      "__________________________________________________________________________________________________\n",
      "Layer (type)                    Output Shape         Param #     Connected to                     \n",
      "==================================================================================================\n",
      "input_1 (InputLayer)            (None, None)         0                                            \n",
      "__________________________________________________________________________________________________\n",
      "input_2 (InputLayer)            (None, None)         0                                            \n",
      "__________________________________________________________________________________________________\n",
      "model_2 (Model)                 (None, None, 768)    101677056   input_1[0][0]                    \n",
      "                                                                 input_2[0][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "lambda_1 (Lambda)               (None, 768)          0           model_2[1][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "dense_1 (Dense)                 (None, 2)            1538        lambda_1[0][0]                   \n",
      "==================================================================================================\n",
      "Total params: 101,678,594\n",
      "Trainable params: 101,678,594\n",
      "Non-trainable params: 0\n",
      "__________________________________________________________________________________________________\n",
      "None\n",
      "Epoch 1/5\n",
      " - 485s - loss: 0.1611 - acc: 0.9412 - f1_metric: 0.9412 - val_loss: 0.0874 - val_acc: 0.9687 - val_f1_metric: 0.9687\n",
      "\n",
      "Epoch 00001: val_acc improved from -inf to 0.96874, saving model to ./model/2.hdf5\n",
      "Epoch 2/5\n",
      " - 472s - loss: 0.0808 - acc: 0.9723 - f1_metric: 0.9723 - val_loss: 0.0924 - val_acc: 0.9706 - val_f1_metric: 0.9706\n",
      "\n",
      "Epoch 00002: val_acc improved from 0.96874 to 0.97063, saving model to ./model/2.hdf5\n",
      "Epoch 3/5\n",
      " - 471s - loss: 0.0525 - acc: 0.9809 - f1_metric: 0.9809 - val_loss: 0.1154 - val_acc: 0.9678 - val_f1_metric: 0.9678\n",
      "\n",
      "Epoch 00003: ReduceLROnPlateau reducing learning rate to 4.999999873689376e-06.\n",
      "\n",
      "Epoch 00003: val_acc did not improve from 0.97063\n",
      "Epoch 4/5\n",
      " - 472s - loss: 0.0220 - acc: 0.9921 - f1_metric: 0.9921 - val_loss: 0.1142 - val_acc: 0.9706 - val_f1_metric: 0.9706\n",
      "\n",
      "Epoch 00004: ReduceLROnPlateau reducing learning rate to 2.499999936844688e-06.\n",
      "\n",
      "Epoch 00004: val_acc did not improve from 0.97063\n",
      "Epoch 5/5\n",
      " - 474s - loss: 0.0108 - acc: 0.9951 - f1_metric: 0.9951 - val_loss: 0.1104 - val_acc: 0.9739 - val_f1_metric: 0.9739\n",
      "\n",
      "Epoch 00005: val_acc improved from 0.97063 to 0.97395, saving model to ./model/2.hdf5\n",
      "\n",
      "3th f1_score:0.978999618174876\n",
      "3th accuracy:0.9739459971577451\n",
      "__________________________________________________________________________________________________\n",
      "Layer (type)                    Output Shape         Param #     Connected to                     \n",
      "==================================================================================================\n",
      "input_1 (InputLayer)            (None, None)         0                                            \n",
      "__________________________________________________________________________________________________\n",
      "input_2 (InputLayer)            (None, None)         0                                            \n",
      "__________________________________________________________________________________________________\n",
      "model_2 (Model)                 (None, None, 768)    101677056   input_1[0][0]                    \n",
      "                                                                 input_2[0][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "lambda_1 (Lambda)               (None, 768)          0           model_2[1][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "dense_1 (Dense)                 (None, 2)            1538        lambda_1[0][0]                   \n",
      "==================================================================================================\n",
      "Total params: 101,678,594\n",
      "Trainable params: 101,678,594\n",
      "Non-trainable params: 0\n",
      "__________________________________________________________________________________________________\n",
      "None\n",
      "Epoch 1/5\n",
      " - 484s - loss: 0.1491 - acc: 0.9484 - f1_metric: 0.9484 - val_loss: 0.0950 - val_acc: 0.9673 - val_f1_metric: 0.9673\n",
      "\n",
      "Epoch 00001: val_acc improved from -inf to 0.96731, saving model to ./model/3.hdf5\n",
      "Epoch 2/5\n",
      " - 472s - loss: 0.0732 - acc: 0.9750 - f1_metric: 0.9750 - val_loss: 0.0997 - val_acc: 0.9645 - val_f1_metric: 0.9645\n",
      "\n",
      "Epoch 00002: ReduceLROnPlateau reducing learning rate to 4.999999873689376e-06.\n",
      "\n",
      "Epoch 00002: val_acc did not improve from 0.96731\n",
      "Epoch 3/5\n",
      " - 474s - loss: 0.0304 - acc: 0.9896 - f1_metric: 0.9896 - val_loss: 0.1007 - val_acc: 0.9730 - val_f1_metric: 0.9730\n",
      "\n",
      "Epoch 00003: val_acc improved from 0.96731 to 0.97300, saving model to ./model/3.hdf5\n",
      "Epoch 4/5\n",
      " - 474s - loss: 0.0130 - acc: 0.9956 - f1_metric: 0.9956 - val_loss: 0.1402 - val_acc: 0.9664 - val_f1_metric: 0.9664\n",
      "\n",
      "Epoch 00004: ReduceLROnPlateau reducing learning rate to 2.499999936844688e-06.\n",
      "\n",
      "Epoch 00004: val_acc did not improve from 0.97300\n",
      "Epoch 5/5\n",
      " - 471s - loss: 0.0068 - acc: 0.9980 - f1_metric: 0.9980 - val_loss: 0.1411 - val_acc: 0.9702 - val_f1_metric: 0.9702\n",
      "\n",
      "Epoch 00005: ReduceLROnPlateau reducing learning rate to 1.249999968422344e-06.\n",
      "\n",
      "Epoch 00005: val_acc did not improve from 0.97300\n",
      "\n",
      "4th f1_score:0.9782857142857141\n",
      "4th accuracy:0.9729985788725722\n",
      "__________________________________________________________________________________________________\n",
      "Layer (type)                    Output Shape         Param #     Connected to                     \n",
      "==================================================================================================\n",
      "input_1 (InputLayer)            (None, None)         0                                            \n",
      "__________________________________________________________________________________________________\n",
      "input_2 (InputLayer)            (None, None)         0                                            \n",
      "__________________________________________________________________________________________________\n",
      "model_2 (Model)                 (None, None, 768)    101677056   input_1[0][0]                    \n",
      "                                                                 input_2[0][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "lambda_1 (Lambda)               (None, 768)          0           model_2[1][0]                    \n",
      "__________________________________________________________________________________________________\n",
      "dense_1 (Dense)                 (None, 2)            1538        lambda_1[0][0]                   \n",
      "==================================================================================================\n",
      "Total params: 101,678,594\n",
      "Trainable params: 101,678,594\n",
      "Non-trainable params: 0\n",
      "__________________________________________________________________________________________________\n",
      "None\n",
      "Epoch 1/5\n",
      " - 488s - loss: 0.1514 - acc: 0.9460 - f1_metric: 0.9460 - val_loss: 0.1201 - val_acc: 0.9602 - val_f1_metric: 0.9602\n",
      "\n",
      "Epoch 00001: val_acc improved from -inf to 0.96019, saving model to ./model/4.hdf5\n",
      "Epoch 2/5\n",
      " - 473s - loss: 0.0754 - acc: 0.9732 - f1_metric: 0.9732 - val_loss: 0.1035 - val_acc: 0.9616 - val_f1_metric: 0.9616\n",
      "\n",
      "Epoch 00002: val_acc improved from 0.96019 to 0.96161, saving model to ./model/4.hdf5\n",
      "Epoch 3/5\n",
      " - 474s - loss: 0.0369 - acc: 0.9869 - f1_metric: 0.9869 - val_loss: 0.1307 - val_acc: 0.9664 - val_f1_metric: 0.9664\n",
      "\n",
      "Epoch 00003: val_acc improved from 0.96161 to 0.96635, saving model to ./model/4.hdf5\n",
      "Epoch 4/5\n",
      " - 473s - loss: 0.0278 - acc: 0.9901 - f1_metric: 0.9901 - val_loss: 0.1181 - val_acc: 0.9716 - val_f1_metric: 0.9716\n",
      "\n",
      "Epoch 00004: val_acc improved from 0.96635 to 0.97156, saving model to ./model/4.hdf5\n",
      "Epoch 5/5\n",
      " - 473s - loss: 0.0191 - acc: 0.9915 - f1_metric: 0.9915 - val_loss: 0.1238 - val_acc: 0.9706 - val_f1_metric: 0.9706\n",
      "\n",
      "Epoch 00005: ReduceLROnPlateau reducing learning rate to 4.999999873689376e-06.\n",
      "\n",
      "Epoch 00005: val_acc did not improve from 0.97156\n",
      "\n",
      "5th f1_score:0.9772727272727272\n",
      "5th accuracy:0.9715639810426541\n"
     ]
    }
   ],
   "source": [
    "train_model_pred, test_model_pred = run_cv(5, DATA_LIST, None, DATA_LIST_TEST)\n",
    "np.save('weights/train_bert_negtive_prob_1106_extend_trainSet.npy', train_model_pred)\n",
    "np.save('weights/test_bert_negtive_prob_1106_extend_trainSet.npy', test_model_pred)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 10556/10556 [00:30<00:00, 344.85it/s]\n"
     ]
    },
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAaEAAAEXCAYAAAAEO/uqAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4wLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvpW3flQAAIABJREFUeJzt3Xl8VOXZ//HPNUnYUbZQ2SyooKxG\nSaEuRanUorUslaeCG1CXPo9VH7FardbauvyoVav1KdWiVbRWUVEIKlWrUEQrGkQW2TQsQgqyKQgB\nQpK5fn+cyZANZoSQk0m+79crr8w5554z1z2TzHfuc86cY+6OiIhIGCJhFyAiIvWXQkhEREKjEBIR\nkdAohEREJDQKIRERCY1CSEREQqMQknrLzI43s4/MbIeZXRt2PSL1kUJI6rNfAP9y9+bu/pCZDTSz\nWWa23czWhF2cSH2gEJL67JvAkjLTBcDjwI3hlCNS/5jOmCD1kZnNBM4AioBi4GR3/yS2bBDwmLt3\nDq9CkfpBIyGpl9z9u8Ac4Gp3b1YaQCJSsxRCIiISGoWQiIiERiEkIiKhSQ+7AJHawswiQAMgI5i0\nRkDU3feGW5lI3aWRkMg+A4DdwAzg6NjtN0KtSKSO0yHaIiISGo2EREQkNAohEREJjUJIRERCoxAS\nEZHQhHaIdps2bbxz584Hdd9t27bRokWL6i2ollOf6wf1uX44lD5/+OGHW9w9s5pLCk1oIdS5c2fm\nzZt3UPfNyclh6NCh1VxR7aY+1w/qc/1wKH02s8+quZxQJdwcZ2aPm9kmM/t4P8vNzB4yszwzW2Rm\nJ1d/mSIiUhcls09oEjD4AMvPAbrGfq4EHj70skREpD5IGELu/jbwxQGaDAWe8sBcoIWZtauuAkVK\nrVmzhsaNG5OVlRWf99prr3H88cdz3HHH8bvf/e6A958yZQpmFt8MvHXrVgYOHEizZs24+uqrq7zP\nkCFD6NWrV3z6tttuo0+fPmRlZXH22Wezfv16AJYvX84pp5xCw4YNue++++Lt9+zZQ79+/TjxxBPp\n2bMnt99+e3zZZZddxoknnkifPn0YMWIEO3fuBGDcuHFkZWWRlZVFt27dyu07SEtLiy8bMmRIfP7M\nmTM5+eST6dWrF6NHj6a4uLhcP3Jzc0lLS2PKlCnxeTfddBO9evWiV69ePPfcc/H5b731FieffDJZ\nWVmcfvrp5OXlATBp0iQyMzPjj//YY48lXJe7c+utt9KtWze6d+/OQw89tN/XpyrJvL6fffYZZ511\nFn369OHMM88kPz8fgFmzZsVrzcrKolGjRkybNu2Add17773x9r169SItLY0vvvjigLWMGTOGLl26\nxO+3YMECINjkVvq3kp2dzTvvvBO/z+bNmzn77LPp3r07PXr0YM2aNeX6dM0119CsWbP4dNnnHuhh\nZpeXLjOze8zs49jPBWXmTzKz1Wa2IPaTFZt/gpm9Z2aFZnZD2cc1s3FmtiS2rmdjp67CzL5rZvNj\n8580s/TY/KGxLWALzGyemZ0em39sbN7OBC9x8GIk+gE6Ax/vZ9krwOllpt8CshOts2/fvn6wpk2b\ndtD3TVXqs/vq1au9Z8+e8eni4mI/5phjfOXKlV5YWOh9+vTxJUuWVLmur776yr/zne94//79PTc3\n193dd+7c6XPmzPGHH37Yf/azn1W6z4svvuijRo0q95jbt2+P3/7jH//oP/3pT93dfePGjf7BBx/4\nLbfc4vfee2+8TTQa9R07dri7+969e71fv37+3nvvVVrXuHHjfPz48ZX6/NBDD/nYsWPj002bNq1U\nZ0lJiXfs2NFXrFjh7u633XabP/bYY+Wep4EDB/o555zjL7zwgru7v/LKKz5o0CAvKirynTt3et++\nfeP1dO3a1ZcuXeru7hMmTPDRo0e7u/sTTzxR5fN0oHU9/vjjfskll3hJSUn8eapof3/byb6+I0aM\n8EmTJrm7+1tvveUXX3xxpTZbt271li1bekFBQdJ1TZ8+3QcOHJiwltGjR8ef17J27Njh0WjU3d0X\nLlzoxx9/fHxZz549/Y033oi3K63L3T03N9cvvvjicq912ecemOf73m9/APyTYP9+U2AecERs2SRg\nhFd+z24LfAu4G7ihzPwOwGqgcWz6eWAMwWBlHdAtNv8O4LLY7WbsO/NOH2B5hcfaWfHxK/5Ux4EJ\nVsW8Ks8FZGZXEmyyIzMzk5ycnIN+0EO5b6qq733euHEjO3bsiM9bvnw5RxxxBIsXL2bx4sX06dOH\n8ePHM2LEiErreeyxxxgwYADTpk1j9uzZ/Oc//4kvW7p0KatXry73WLt37+aOO+7gqquu4t13363y\nuc/NzWXLli3llq1cuZLGjRtX2b6wsJAtW7YwZ84cNm7cGJ/v7ixbtoytW7fSvXv3cvedMGECo0aN\nis8rKSmptO7t27dTXFzMsmXLWLZsGY0aNeLPf/4zbdq0AWD69Ol07dqVTz/9lNzcXDIyMpg6dSpt\n27bl1VdfBaBFixbceeednH766RQUFPDKK6/wySef8N5777F7925ycnKYP39+pecJOOC6xo8fz/XX\nX8/LL79c6fkoq6rnK9nX9/3332fw4MHk5OTg7rz44ouV2rz++uv07t2bf/7znwBJ1XX//ffTu3dv\ncnJyDljL2rVr48/r/ixfvpyCggJycnJYt24d0WiUXbt2Vep3SUkJt99+O9dffz1TpkyJL9/fcw/0\nAGa7ezFQbGYLCXafPL+/Wtx9E7DJzH5QxeJ0oLGZFQFNgPVAa6DQ91348Z/AL4G/unvZkU5T9vPe\nf0CJUsoTj4T+AowqM70CaJdonRoJfT3qc+WR0AsvvOCXXXZZfPqpp56q8pP6/Pnz/Uc/+pG7u59x\nxhnxkVCpqj7hX3fddf7SSy9Vekx391tuucU7duzoPXv29E2bNpVbdvvtt5cbCbkHn6JPPPFEb9q0\nqf/iF78ot2zMmDHetm1bP/PMM72goKBcn9esWeNHHXWUFxcXx+elpaV53759vX///j516lR3D0Zb\nRx99dLxf1157rffq1cvd3fPz833AgAFeXFxc7hP766+/7qeeeqoXFBT45s2bvUuXLn7fffe5u/vb\nb7/trVq18g4dOnj37t3jo5onnnjCjzrqKO/du7eff/75vnbt2oTratWqld91113et29fHzx4sH/y\nySeVXp/9/W0n+/qOGjXKH3zwQXcPRq+Ab9mypVybgQMH+ssvvxyfTlRXQUGBt2zZ0rdu3ZqwltGj\nR3u3bt28d+/eft111/mePXvi7V566SU//vjjvWXLlv7vf//b3d2nTp3q2dnZPnz4cM/KyvIbbrgh\n/ho/+OCD/oc//MHdvdJIqPS5J9g90smD99uzgXdjgdEGWAX83PeNhFYAi4AHgIZe/r37N5QZCcXm\n/S+wE9gM/D02z4DPiG3hAv4ILC5zn+HA8lhdp1RYX8KRUHV8WXU6cGnsKLlvA9vdfUM1rFfkgLyK\nk++alR+YR6NRxo0bx/3335/0ehcsWEBeXh7Dhw+vcvndd9/NunXruOiii/jTn/6UcH1paWksWLCA\n/Px8PvjgAz7+eN+Bpk888QTr16+ne/fu5falAEyePJkRI0aQlpYWn7d27VrmzZvHM888w3XXXcfK\nlSsxMyZPnsy4cePo168fzZs3Jz092Mhx3XXXcc8995RbB8DZZ5/Nueeey6mnnsqoUaM45ZRT4vd5\n4IEHmDFjBvn5+YwdO5brr78egB/+8IesWbOGRYsWMWjQIEaPHp1wXYWFhTRq1Ih58+ZxxRVX8JOf\n/CTh81UqmdcX4L777mP27NmcdNJJzJ49mw4dOsQfH2DDhg0sXryY73//+/F5iep6+eWXOe2002jV\nqlXCWsaPH8/y5cvJzc3liy++4J577om3GT58OMuXL2fatGncdtttABQXF7N06VLuu+8+cnNzWbVq\nFZMmTWL9+vW88MILXHPNNZUeq+xzD+wAnozV9QbBWd//DTwLvAeU7hD8JXACwaa3VsBNlVZcvj8t\nCfbxdwHaA03N7GIPOj8SeMDMPog9fnyno7tPdfcTgGHAnQd6jKok3BxnZs8CZwJtzCwfuJ3geiu4\n+yMET8C5QB6wCxj7dYtIZdGoUxSNUlTiFJdEicb+VotKohSVRIlGocSdkqhTHI3iDlH3Sr+D+wW/\no1HH2bfcHZZvM1p8sploNFjXvuWx++xnnVF3KPcYAbPg403wO5gIpm3f/Niy4LYRMYhU8SZQVun/\nqhOrPT7fK7VJ5MPNhn+0b7PZpg2fs2NPMTkLgnmrdjXkw6V58ek3PlgKNItPAxTs+Ir5CxfR79Tv\nALBt62bOPuc8bn3wcY7reSIAH639kjVbCnh10QYiBq+++Br/fj+Xozp0oqSkhG1fbCGr32k8+Lep\nRCx4PiJmHH/aYG64/ELOHX0tkUjwvK3ftpsmRREWrNtGpMzzF9wvQq/sU/jbCzlc0fab8ec6YsaZ\ng4fylwkPcvFl/81/tu0mYvD035/ld/c/wOYdhcG6zGjSog3bdxXRpn0nTvvOAN77YB7tO3XmpOx+\nvDnrX0TMePONN1ix4hPcnXnz5jFy5EgAtmzZwowZM0hPT2fYsGHceuut3HrrrQBceOGFdO3alc2b\nN7Nw4UL69+8PwAUXXMDgwcHBsa1bt44/r1dccQU33bTvPa2qdQF07NiR888/HwjekMeOTf7toWPH\njqxbty4+nZ+fT/v27Su1a9++PS+99BIAO3fu5MUXX+TII4+ML3/++ecZPnx4uc1lieqaPHkyo0aN\nSqqWdu2C47AaNmzI2LFjyx2YUmrAgAGsXLmSLVu20LFjR7p06cIxxxwDwLBhw5g7dy5HHXUUeXl5\nHHfccQDs2rWL4447jry8vHLPPcEopW/phLvfTbB/BzN7Bvg0Nr90MFBoZk8A5Q5CqMIgYLW7b46t\n6yXgVOBpd38P+E5s/tlAt4p3dve3YwcktHH3LQkeKy5hCLn7qATLHfhZsg9YU6JRZ8eeYr7aU8Se\nohIKi6Ns21XE1oJCdhYWs3NPMQWFxewovb23mJ2FJezZW8LekijF0SjFJR4LkyBgiqLB9L750XJv\n7IdXGg8v+6CmHqyWSONveQviU8XbN7Jp+27+d3Iwz6OwfulyrnrkNdKbt2bDc5Np88MbmTN5Qbm1\ntPrp3+K3v3rmZhoOvIz/W+ywOGi3c/Fa9n6+mY+fmR9r1ZvmP/lr/DFtym/Z9t1bGPNELkVf/IeM\nVh2CdX34MoXWigsmzo2vf9u8dVhGY6aVvAtAya7tWCSNSKNmRIsK2fR8Dkf0H8Hk+2dTvG0DGS3b\n4+5sm/U40JiV89P57fyZFG3NZ+NnG7jqzQLsrTeDde3ZSSS9IZaeQcmu7Xz+6pvMangKv1r4GiUF\n20hr2gIvLmLTlNs54pQL6PLLGXDBn+IfHiKv/IG0rv25aV5Dbs59FS8sIKPpkRRuWsW6N//Nkq6X\nEPngQz77fAu9rptEk8yObPnwH2yPtOa0381k71dbadSiNYbxxZI5WIuODP3TOzRIg0jRbo5s0ZKd\nG1byz3c+4JgRv2DBjGW0P3EAv/rzswwcMpJPF7xP6/adeeq9NcGO5FgIv7v8C2696zRunfBs8Kqb\nkRYxLKM9C5cs56nXP+Codu3565NP85sH/sL7q7aSFgnaRMz4atsXtG3TmqaNMrjv/93JyIsvZduu\nvUQiRsP0CM888yx33X03RSVRANIjxrBhw5g5cyY/+clPmD17Nt267XtP3b59O7Nnz+bpp5+Oz/vW\nt77Fp59+yurVq+nQoQOTJ0/mmWeeAYKRVrt27XB3pk2bFj+aMi8vj2OPPRYzY/78+ezdu5fWrVvT\nsmVLCgoK2Lx5M5mZmcycOZPs7Gx+8IMf8Pnnn8cfs1mzZvEjE0sfI6YFsCx4Ci0NaOHuW82sD8HB\nAW/ElrVz9w0WDNmGAVV+17OMtcC3zawJwbW0ziI40AEza+vum8ysIcGIqjT0jgNWurvHviPaANia\n4HHKSfkrq27btZepH/2H/3y5m3Vf7iL/y91s3lHI1oK9lCRICDNo1iCdpg3TadYo+N04I0LzjHQy\n0iKkR4yM9AgZESM9LUJGWoSMNCM9EiEj3ciIREhPs3LzI7GBQnC/CJFIMIJIiwTtSj/Rln4CjkQq\nTJuVG4VEDCIR4505cxgwYABmwT9R2U/k+27vW1fpPygQr6H0U3kwQtk3UvHY6An2syw2P1qmXaLn\nFfaNtEqPXSkdfQW3DzyiAnjzzTcZNGhQrC4nf+1nXP6vprz28zPibWadNIG7fnUT0ZIo11x+KT8b\ndykOPPi7u+iVdRKDBpff93rhnBb88uKT6Z0VfKf6jL49Kd7xFSV7i9iz4SMee3Yqx3Q9IRhBAuvW\nfsY1M5vwwlWnEnW48b8v5bPZeZhF6NGhIzc+/giZ32jH5k0bueJH36No5w4sEqFg2T/468tvsyF/\nHffeci0lJSVEolFGDPkhI3/6U4pLovxy7G/ZVbADd6dP1x5cftPdLP80j6ysLJ6f+CZFQ8/nx8N7\nx1+HFQvn8eS9N2EWwaNRLrziWk4992wcmPLweD6ePguPOuf8cBQDfjQiNoreN2qenNucE05oS69T\nO1NYuIc//2+w875B42aMueV+2nbphDsce/VveXvy3WARGjU9gvOv+g1HtG3N239/mqUfziYSSaNh\nsyP5/lW/pVGTBhTs2sWM8VcG2/cbNuGbQ27klY83saeohF3tzmLui/fx/BN/wRo0ovX3f8avc5aU\ne00KN2xn25bd/PKlxZX+BvyUsVw2ajh4lGa9v8evZm+H2XPZNudpGhzVlSZd+1Ow/B22vf0kYDTq\n1ItW3/sfptwRHIBQvH0jny/9lMv/uQt78x/x9WaU9GXj7+7l6lvuJKNhE04cdQMjJ75Hs4YZrJ37\nKu169uf/3l5H80bpNG2QRrNGGYy54U4GfHcQRKP814WX0rrjsewsLOaiiy5i8+bNuDtZWVk88sgj\nALz44os89dRTZGRk0LhxY5577jnMjLS0NMaMGcNZZ51Vun+cK6644oD/Cw899BDTp08v3czYFjiv\ntCvAnNj/01fAxR4cpADwdzPLJPi3WwD8N4CZHUXsKDogambXAT3c/X0zmwLMJ9jc9hEwMbauG83s\nPIIj5R5295mx+ecT7I4pIgiuC7yqbZcHENpF7bKzs/1QTttz+lmD+f1rK3huXjBEzkgzOrduSqdW\nTchs1pDM5g1p0SSDIxpl0LhBGhlpEVo2yaB1swY0b5RBs4bpNM5IIxJJ/GZYG+jUJsH3hM4777xy\n+1Tqmrr2Ors7hcVRCouCrQtA/EONE3zS+fm4axk06Gy+f+55OMHm5pKoU1RS+jtKcdQpiW32jkY9\nNu2x9rC7KNiKUVgSJRp1orFN4HuKSirUA0VRpzC2daSwuISdhSXs3FNEQWFJsGWkMHZ7TxFFJYnf\nH5s0SOPoVk3o0qYpx2Q2JbtzK045pjWNMtL2e59DPG3Ph+6efVB3rmFmttPdmx2oTUqOhApLYOyk\nXJZv2MF/9e3I8JM60K9LK9LTdFLwuiwtLY3t27eX+0Kg1G5mRqOMtAO+IY8Y8oNaG7yFxSUUFJaw\nc08xO2LhtLOwiB17iuOb9T//ag/rvtjF8s938MbSjZTMWkmzhul894S2nNW9LWd0y6RFkwZhd6VG\nmdmxwIvAxkRtUzKE/rXBWJS/nccuzWZQj2+EXY7UkE6dOpXbOSxyuDVMT6NhehqtmiYXInuKSnhv\n5VZeWbSB2Z9sYvrC9WSkGaNP6czPzz6exg32H8Z1ibuvBLISNiRFQ+iDzRFOPba1AkhEapVGGWkM\nPKEtA09oSzTqLMzfxtNz1/LYO6tZuXknEy/NJkNbbMpJuWfjy4K9bNlj9O/SOnFjEZGQRCLGSUe3\n5P4fn8idw3oxa8Vmxs9YHnZZtU7KhdDqrQUAtGy6/1NkiIjUJpd8+5v8OLsjT7//GTv2FIVdTq2S\nciG0Z29wtEu3bzQPuRIRkeSN6NuJvcVR3vk06e9x1gspF0JfxT5FNG+UkruzRKSeyurUggZpwdk0\nZJ+UC6G9seP2G6anXOkiUo81SI9wQrvmfLx+e9il1Cop906+78u1qfElUxGRUt84ohFbd+4Nu4xa\nJeVCqFQSZ30REalVmjZIY9feksQN65HUDaGwCxAR+ZqaNExXCFWQciEU0qnuREQOWZOMNHbtLU7c\nsB5JvRCKXaEmmbMwi4jUJqUjoZq7BEztl3ohFHvxFEEikmqaxM4dVxQNuZBaJHVDSCkkIimmaSyE\nCrVbKC71Qij22zQWEpEU07hB8CX7vRoJxaVeCHnpPqGQCxER+Zo0Eqos9UIo7AJERA5S6fWENBLa\nJ+VCCO0TEpEU1bRhsDmusERvYKVSLoR0iLaIpKr0SPC+VaJNOnGpF0I6RFtEUlREH54rSb0Qiv3W\naykiqaY0hHTml31SL4TiIyGlkIikltIPz8qgfVIvhNAh2iKSmhRClaVeCGmfkIikqNItONoct0/q\nhVDpDaWQiKSYSOwdVxm0T8qFUOlHCO0TEpFUEz8wIeQ6apOUCyEdHSciqar0bUub4/ZJvRDSPiER\nSVH6kn1lKRdCpfRiikiqKX3b0kXt9kkqhMxssJmtMLM8M7u5iuVHm9ksM/vIzBaZ2bnVX2rANY4V\nkRSlMyZUljCEzCwNmACcA/QARplZjwrNfgU87+4nASOBP1d3oaX2XU9IRCS1REpHQuGWUaskMxLq\nB+S5+yp33wtMBoZWaOPAEbHbRwLrq6/ECg+ks2iLSIrSUb2VWaLNW2Y2Ahjs7pfHpi8B+rv71WXa\ntAPeAFoCTYFB7v5hFeu6ErgSIDMzs++jjz76tQv+1wZj6po0xn+rmCbpX/vuIiKh2bIH7vwonYuO\nK6Ff5sHtWhg2bNiH7p5dzaWFJpm38aqiu+KzNwqY5O73m9kpwN/MrJe7lxt1uvtEYCJAdna2Dx1a\ncUCV2OY5q5i6ZhnnnnsuRzbO+Nr3T1U5OTkczPOVytTn+qE+9Xnt1l3c+dEsgHrT50SS2RyXD3Qq\nM92RypvbLgOeB3D394BGQJvqKHB/tDlORCT1JRNCuUBXM+tiZg0IDjyYXqHNWuAsADPrThBCm6uz\n0FL6npCISN2RMITcvRi4GngdWEZwFNwSM7vDzIbEmv0cuMLMFgLPAmP8MB1LrSuriojUHUnt2nf3\nGcCMCvN+Xeb2UuC06i1tf7UEvxVBIiKpL+XOmKBzx4mI1B0pF0J9OhzJGe2ipEdSrnQREakg5b5p\nc+pxbdjcOUqDdIWQiEiq0zu5iIiERiEkIiKhUQiJiEhoFEIiIhIahZCIiIRGISQiIqFRCImI1DRd\nIDpOISQiUkN0ppfKFEIiIhIahZCIiIRGISQiIqFRCImISGgUQiIiEhqFkIiIhEYhJCIioVEIiYhI\naBRCIiISGoWQiIiERiEkIiKhUQiJiEhoFEIiIhIahZCIiIRGISQiUsN0OaF9FEIiIhIahZCIiIRG\nISQiIqFRCImISGgUQiIiEpqkQsjMBpvZCjPLM7Ob99Pmx2a21MyWmNkz1VumiIjURemJGphZGjAB\n+B6QD+Sa2XR3X1qmTVfgl8Bp7v6lmbU9XAWLiEjdkcxIqB+Q5+6r3H0vMBkYWqHNFcAEd/8SwN03\nVW+ZIiJSFyUTQh2AdWWm82PzyuoGdDOzd81srpkNrq4CRUSk7kq4OQ6wKuZV/MJvOtAVOBPoCMwx\ns17uvq3cisyuBK4EyMzMJCcn52sXXOpQ7puq1Of6QX2uu7bugdK33frS50SSCaF8oFOZ6Y7A+ira\nzHX3ImC1ma0gCKXcso3cfSIwESA7O9uHDq24VS85OTk5HOx9U5X6XD+oz3Xbui92ccdHswDqTZ8T\nSWZzXC7Q1cy6mFkDYCQwvUKbacBAADNrQ7B5blV1FioiInVPwhBy92LgauB1YBnwvLsvMbM7zGxI\nrNnrwFYzWwrMAm50962Hq2gREakbktkch7vPAGZUmPfrMrcduD72IyIikhSdMUFEREKjEBIRkdAo\nhEREapguarePQkhEpIZYVd+6rOcUQiIiEhqFkIiIhEYhJCIioVEIiYhIaBRCIiISGoWQiIiERiEk\nIiKhUQiJiEhoFEIiIhIahZCIiIRGISQiIqFRCImISGgUQiIiEhqFkIiIhEYhJCIioVEIiYjUENMF\nhSpRCImISGgUQiIiEhqFkIiIhEYhJCIioVEIiYhIaBRCIiISGoWQiIiERiEkIiKhUQiJiEhoFEIi\nIhIahZCIiIRGISQiIqFJKoTMbLCZrTCzPDO7+QDtRpiZm1l29ZUoIiJ1VcIQMrM0YAJwDtADGGVm\nPapo1xy4Fni/uosUEZG6KZmRUD8gz91XufteYDIwtIp2dwK/B/ZUY30iInWOe9gV1B7pSbTpAKwr\nM50P9C/bwMxOAjq5+ytmdsP+VmRmVwJXAmRmZpKTk/P1K445lPumKvW5flCf664vC6H0bbe+9DmR\nZEKoqqswxXPczCLAA8CYRCty94nARIDs7GwfOrSqAVViOTk5HOx9U5X6XD+oz3Xb+m27+c38mQD1\nps+JJLM5Lh/oVGa6I7C+zHRzoBfwLzNbA3wbmK6DE0REJJFkQigX6GpmXcysATASmF660N23u3sb\nd+/s7p2BucAQd593WCoWEZE6I2EIuXsxcDXwOrAMeN7dl5jZHWY25HAXKCIidVcy+4Rw9xnAjArz\nfr2ftmceelkiIlIf6IwJIiISGoWQiIiERiEkIiKhUQiJiEhoFEIiIhIahZCIiIRGISQiIqFRCImI\nSGgUQiIiEhqFkIiIhEYhJCIioVEIiYjUEKvq6mz1nEJIRERCoxASEZHQKIRERCQ0CiEREQmNQkhE\nREKjEBIRkdAohEREJDQKIRERCY1CSEREQqMQEhGR0CiEREQkNAohEREJjUJIRERCoxASEZHQKIRE\nRGqYh11ALaIQEhGpIYYuKFSRQkhEREKjEBIRkdAohEREJDQKIRERCU1SIWRmg81shZnlmdnNVSy/\n3syWmtkiM3vLzL5Z/aWKiEhdkzCEzCwNmACcA/QARplZjwrNPgKy3b0PMAX4fXUXKiIidU8yI6F+\nQJ67r3L3vcBkYGjZBu4+y924pB3SAAAJXElEQVR3xSbnAh2rt0wREamLzP3AX5sysxHAYHe/PDZ9\nCdDf3a/eT/s/AZ+7+11VLLsSuBIgMzOz76OPPnqI5YuIpI5thXD7/HQuOKaEU79xcF9ZHTZs2Ifu\nnl3NpYUmPYk2VX27qspnz8wuBrKBM6pa7u4TgYkA2dnZPnTo0KqaJZSTk8PB3jdVqc/1g/pct32+\nfQ+3z38LoN70OZFkQigf6FRmuiOwvmIjMxsE3Aqc4e6F1VOeiIjUZcnsE8oFuppZFzNrAIwEppdt\nYGYnAX8Bhrj7puovU0RE6qKEIeTuxcDVwOvAMuB5d19iZneY2ZBYs3uBZsALZrbAzKbvZ3UiIiJx\nyWyOw91nADMqzPt1mduDqrkuERGpB3TGBBERCY1CSEREQqMQEhGpIabLCVWiEBIRkdAohEREJDQK\nIRERCY1CSEREQqMQEhGR0CiEREQkNAohEREJjUJIRERCoxASEZHQKIRERCQ0CiEREQmNQkhEREKj\nEBIRkdAohEREJDQKIRERCY1CSESkhnnYBdQiCiERkRqia9pVphASEZHQKIRERCQ0CiEREQmNQkhE\nREKjEBIRkdAohEREJDQKIRERCY1CSEREQqMQEhGR0CiEREQkNAohEREJTVIhZGaDzWyFmeWZ2c1V\nLG9oZs/Flr9vZp2ru1AREal7EoaQmaUBE4BzgB7AKDPrUaHZZcCX7n4c8ABwT3UXKiIidU8yI6F+\nQJ67r3L3vcBkYGiFNkOBJ2O3pwBnmZlOGCsiIgdk7ge+soWZjQAGu/vlselLgP7ufnWZNh/H2uTH\nplfG2mypsK4rgSsBMjMz+z766KPV2RcRkVqtoAgeW5HGd9tH6d3q4K4qNGzYsA/dPbuaSwtNehJt\nqhrRVHz2kmmDu08EJgJkZ2f70KEVB1TJycnJ4WDvm6rU5/pBfa77LqT+9flAktkclw90KjPdEVi/\nvzZmlg4cCXxRHQWKiEjdlUwI5QJdzayLmTUARgLTK7SZDoyO3R4BzPRE2/lERKTeS7g5zt2Lzexq\n4HUgDXjc3ZeY2R3APHefDvwV+JuZ5RGMgEYezqJFRKRuSGafEO4+A5hRYd6vy9zeA/xX9ZYmIiJ1\nnc6YICIioVEIiYhIaBRCIiISGoWQiIiEJuEZEw7bA5ttBj47yLu3AbYkbFW3qM/1g/pcPxxKn7/p\n7pnVWUyYQguhQ2Fm8+rSaSuSoT7XD+pz/VAf+7w/2hwnIiKhUQiJiEhoUjWEJoZdQAjU5/pBfa4f\n6mOfq5SS+4RERKRuSNWRkIiI1AEKIRERCU2tDiEzG2xmK8wsz8xurmJ5QzN7Lrb8fTPrXPNVVq8k\n+ny9mS01s0Vm9paZfTOMOqtToj6XaTfCzNzMUv7Q1mT6bGY/jr3WS8zsmZqusbol8bd9tJnNMrOP\nYn/f54ZRZ3Uxs8fNbFPsytNVLTczeyj2fCwys5NrusZawd1r5Q/BZSNWAscADYCFQI8Kba4CHond\nHgk8F3bdNdDngUCT2O3/qQ99jrVrDrwNzAWyw667Bl7nrsBHQMvYdNuw666BPk8E/id2uwewJuy6\nD7HPA4CTgY/3s/xc4B8EV6b+NvB+2DWH8VObR0L9gDx3X+Xue4HJQMXr4Q4FnozdngKcZWZVXWo8\nVSTss7vPcvddscm5BFe6TWXJvM4AdwK/B/bUZHGHSTJ9vgKY4O5fArj7phqusbol02cHjojdPpLK\nV3BOKe7+Nge+wvRQ4CkPzAVamFm7mqmu9qjNIdQBWFdmOj82r8o27l4MbAda10h1h0cyfS7rMoJP\nUqksYZ/N7CSgk7u/UpOFHUbJvM7dgG5m9q6ZzTWzwTVW3eGRTJ9/A1xsZvkE1y+7pmZKC83X/X+v\nk5K6qF1IqhrRVDyePJk2qSTp/pjZxUA2cMZhrejwO2CfzSwCPACMqamCakAyr3M6wSa5MwlGu3PM\nrJe7bzvMtR0uyfR5FDDJ3e83s1MIrtbcy92jh7+8UNS196+DUptHQvlApzLTHak8PI+3MbN0giH8\ngYa/tV0yfcbMBgG3AkPcvbCGajtcEvW5OdAL+JeZrSHYdj49xQ9OSPZvO8fdi9x9NbCCIJRSVTJ9\nvgx4HsDd3wMaEZzos65K6v+9rqvNIZQLdDWzLmbWgODAg+kV2kwHRsdujwBmemyPX4pK2OfYpqm/\nEARQqu8ngAR9dvft7t7G3Tu7e2eC/WBD3H1eOOVWi2T+tqcRHISCmbUh2Dy3qkarrF7J9HktcBaA\nmXUnCKHNNVplzZoOXBo7Su7bwHZ33xB2UTWt1m6Oc/diM7saeJ3gyJrH3X2Jmd0BzHP36cBfCYbs\neQQjoJHhVXzokuzzvUAz4IXYMRhr3X1IaEUfoiT7XKck2efXgbPNbClQAtzo7lvDq/rQJNnnnwOP\nmtk4gs1SY1L5Q6WZPUuwObVNbD/X7UAGgLs/QrDf61wgD9gFjA2n0nDptD0iIhKa2rw5TkRE6jiF\nkIiIhEYhJCIioVEIiYhIaBRCIiISGoWQiIiERiEk9Y6ZdTazCw/h/s/GTr0/zsxOMLMFscsPHFud\ndYrUBwohqY86AwcVQmZ2FHCqu/dx9weAYQSn1znJ3VdWY40i9YK+rCp1hpldCtxA8G37RQRnGnjF\n3afElu9092ZmNhfoDqwGnoyFScV1NQIeJjhJbDFwvbvPMrNFBOdwWwFMJbimUwnwCXAewbnPOhKc\nFeBOd3/uMHZZJOXV2tP2iHwdZtaT4KSup7n7FjNrBfxhP81vBm5w9/MOsMqfAbh7bzM7AXjDzLoB\nQwiCLSv2uAbsdPf7zOx8YL27/yC27Mhq6ZxIHabNcVJXfBeY4u5bANz9UM+mfjrwt9i6lgOfEZxE\n9EAWA4PM7B4z+467bz/EGkTqPIWQ1BVG5WuxFBP7G4+NWBp8zfV9Le7+CdCXIIzGm9mvv+46ROob\nhZDUFW8BPzaz1gCxzXFrCEIBgkspZ8Ru7yC4TtGBvA1cFFtXN+Bogv1A+2Vm7YFd7v40cB9w8tfu\nhUg9o31CUifELgtwNzDbzEqAj4CbgBwz+4AgpApizRcBxWa2kOBKnpUOTAD+DDxiZosJRlRj3L0w\ndvmM/ekN3GtmUaCI4KAFETkAHR0nIiKh0eY4EREJjTbHSb1mZt8H7qkwe7W7Dw+jHpH6RpvjREQk\nNNocJyIioVEIiYhIaBRCIiISGoWQiIiE5v8D00I10EvHKYkAAAAASUVORK5CYII=\n",
      "text/plain": [
       "<matplotlib.figure.Figure at 0x1dd32225ef0>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": [
       "0.4143133759498596"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from sklearn import metrics\n",
    "def get_best_F1(pred, ture):\n",
    "    f1_scores=[]\n",
    "    recall_scores = []\n",
    "    cut_offs=[]\n",
    "    for threshold in tqdm(np.sort(pred)):\n",
    "         pred_binary=(pred>=threshold)*1\n",
    "         f1_tmp=metrics.f1_score(y_true=ture,y_pred=pred_binary)\n",
    "         f1_scores.append(f1_tmp)\n",
    "         cut_offs.append(threshold)\n",
    "    max_index=f1_scores.index(max(f1_scores))\n",
    "    max_x_axis=cut_offs[max_index]\n",
    "    max_y_axis_F1=f1_scores[max_index]\n",
    "    max_tag='['+str(max_x_axis)+' , '+str(max_y_axis_F1) +']'\n",
    "    l1=plt.plot(cut_offs, f1_scores,label='F1')\n",
    "    plt.xlabel('cut_offs')\n",
    "    plt.title('f1 ')\n",
    "    plt.annotate(max_tag,xy=(max_x_axis,max_y_axis_F1),xytext=((max_x_axis,max_y_axis_F1)))\n",
    "    plt.grid(True, linewidth=1)\n",
    "    plt.show()\n",
    "    return max_x_axis\n",
    "\n",
    "get_best_F1(train_model_pred[:, 1], train['negative'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "validate f1 score: 0.9762845849802371\n",
      "validate acc score: 0.9704433497536946\n"
     ]
    }
   ],
   "source": [
    "validate = [1 if index > 0.500 else 0 for index in train_model_pred[:, 1]]\n",
    "score = f1_score(train['negative'], validate)\n",
    "print('validate f1 score:', score)\n",
    "\n",
    "res = pd.DataFrame({'ture': list(train['negative']), 'pred': validate})\n",
    "from sklearn.metrics import accuracy_score\n",
    "# print(train_model_pred)\n",
    "print('validate acc score:', accuracy_score(res['ture'], res['pred']))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "validate f1 score: 0.9734732824427481\n",
      "validate acc score: 0.96525\n"
     ]
    }
   ],
   "source": [
    "validate = [1 if index > 0.500 else 0 for index in train_model_pred[:, 1]]\n",
    "score = f1_score(train['negative'], validate)\n",
    "print('validate f1 score:', score)\n",
    "\n",
    "res = pd.DataFrame({'ture': list(train['negative']), 'pred': validate})\n",
    "from sklearn.metrics import accuracy_score\n",
    "# print(train_model_pred)\n",
    "print('validate acc score:', accuracy_score(res['ture'], res['pred']))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "train['predict'] = validate\n",
    "train.to_csv('weights/train_samples_with_predict_label.csv', encoding='utf-8', index=None)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "- chinese_wwm_ext_L   entitys + text + clean\n",
    "```\n",
    "validate f1 score: 0.9768953068592058\n",
    "validate acc score: 0.9737947589517904\n",
    "```\n",
    "\n",
    "- chinese_wwm_ext_L   entitys + text + clean + Mask\n",
    "\n",
    "```\n",
    "validate f1 score: 0.9775524981897176\n",
    "validate acc score:0.9751950390078016\n",
    "```"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>id</th>\n",
       "      <th>negative</th>\n",
       "      <th>key_entity</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <td>0</td>\n",
       "      <td>13002</td>\n",
       "      <td>1</td>\n",
       "      <td></td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>1</td>\n",
       "      <td>13006</td>\n",
       "      <td>1</td>\n",
       "      <td></td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>2</td>\n",
       "      <td>13009</td>\n",
       "      <td>0</td>\n",
       "      <td></td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>3</td>\n",
       "      <td>13011</td>\n",
       "      <td>0</td>\n",
       "      <td></td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>4</td>\n",
       "      <td>13015</td>\n",
       "      <td>1</td>\n",
       "      <td></td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>9992</td>\n",
       "      <td>22996</td>\n",
       "      <td>1</td>\n",
       "      <td></td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>9993</td>\n",
       "      <td>22997</td>\n",
       "      <td>0</td>\n",
       "      <td></td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>9994</td>\n",
       "      <td>22998</td>\n",
       "      <td>1</td>\n",
       "      <td></td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>9995</td>\n",
       "      <td>22999</td>\n",
       "      <td>0</td>\n",
       "      <td></td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>9996</td>\n",
       "      <td>23000</td>\n",
       "      <td>1</td>\n",
       "      <td></td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>9997 rows × 3 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "         id  negative key_entity\n",
       "0     13002         1           \n",
       "1     13006         1           \n",
       "2     13009         0           \n",
       "3     13011         0           \n",
       "4     13015         1           \n",
       "...     ...       ...        ...\n",
       "9992  22996         1           \n",
       "9993  22997         0           \n",
       "9994  22998         1           \n",
       "9995  22999         0           \n",
       "9996  23000         1           \n",
       "\n",
       "[9997 rows x 3 columns]"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "store over\n"
     ]
    }
   ],
   "source": [
    "test_prob = [np.argmax(index) for index in test_model_pred]\n",
    "test_index = test[['id']]\n",
    "test_index['negative'] = test_prob\n",
    "test_index['key_entity'] = ['' for index in range(len(test_index))]\n",
    "test_index\n",
    "test_index.to_csv('./submit/sub_emotion_withMask_1108_extend_trainSet.csv', encoding='utf-8', index=None)\n",
    "print('store over')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>id</th>\n",
       "      <th>negative</th>\n",
       "      <th>key_entity</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <td>0</td>\n",
       "      <td>13002</td>\n",
       "      <td>1</td>\n",
       "      <td></td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>1</td>\n",
       "      <td>13006</td>\n",
       "      <td>1</td>\n",
       "      <td></td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>4</td>\n",
       "      <td>13015</td>\n",
       "      <td>1</td>\n",
       "      <td></td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>6</td>\n",
       "      <td>13018</td>\n",
       "      <td>1</td>\n",
       "      <td></td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>7</td>\n",
       "      <td>13019</td>\n",
       "      <td>1</td>\n",
       "      <td></td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>9988</td>\n",
       "      <td>22991</td>\n",
       "      <td>1</td>\n",
       "      <td></td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>9991</td>\n",
       "      <td>22995</td>\n",
       "      <td>1</td>\n",
       "      <td></td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>9992</td>\n",
       "      <td>22996</td>\n",
       "      <td>1</td>\n",
       "      <td></td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>9994</td>\n",
       "      <td>22998</td>\n",
       "      <td>1</td>\n",
       "      <td></td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>9996</td>\n",
       "      <td>23000</td>\n",
       "      <td>1</td>\n",
       "      <td></td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>7857 rows × 3 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "         id  negative key_entity\n",
       "0     13002         1           \n",
       "1     13006         1           \n",
       "4     13015         1           \n",
       "6     13018         1           \n",
       "7     13019         1           \n",
       "...     ...       ...        ...\n",
       "9988  22991         1           \n",
       "9991  22995         1           \n",
       "9992  22996         1           \n",
       "9994  22998         1           \n",
       "9996  23000         1           \n",
       "\n",
       "[7857 rows x 3 columns]"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "test_index[test_index['negative'] == 1]"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.0"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
