{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "os.chdir('../')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "from config import conf\n",
    "import pandas as pd\n",
    "import os\n",
    "from os.path import join\n",
    "import re\n",
    "import numpy as np\n",
    "from data_utils.basic_data import load_basic_dataset\n",
    "FEATURE_ROOT_DIR = conf.get('linux_dir', 'feature_root_dir')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[01;34m/home/njuciairs/wangshuai/competitions/finance_features_round2/\u001b[00m\r\n",
      "├── \u001b[01;34mbinary_class_cross_roberta\u001b[00m\r\n",
      "│   ├── feature_split 1\r\n",
      "│   ├── feature_split 2\r\n",
      "│   ├── feature_split 3\r\n",
      "│   ├── feature_split 4\r\n",
      "│   ├── feature_split 5\r\n",
      "│   ├── feature_split 6\r\n",
      "│   ├── feature_split 7\r\n",
      "│   ├── feature_split 8\r\n",
      "│   ├── feature_split 9\r\n",
      "│   ├── test_features_round2_version 1\r\n",
      "│   ├── test_features_round2_version 2\r\n",
      "│   ├── test_features_round2_version 3\r\n",
      "│   ├── test_features_round2_version 4\r\n",
      "│   ├── test_features_round2_version 5\r\n",
      "│   ├── test_features_round2_version 6\r\n",
      "│   ├── test_features_round2_version 7\r\n",
      "│   ├── test_features_round2_version 8\r\n",
      "│   └── test_features_round2_version 9\r\n",
      "├── \u001b[01;34mmulti_class_cross1\u001b[00m\r\n",
      "│   ├── feature_split 1\r\n",
      "│   ├── feature_split 2\r\n",
      "│   ├── feature_split 3\r\n",
      "│   ├── feature_split 4\r\n",
      "│   ├── feature_split 5\r\n",
      "│   ├── feature_split 6\r\n",
      "│   ├── feature_split 7\r\n",
      "│   ├── feature_split 8\r\n",
      "│   ├── feature_split 9\r\n",
      "│   ├── test_features_round2_version 1\r\n",
      "│   ├── test_features_round2_version 2\r\n",
      "│   ├── test_features_round2_version 3\r\n",
      "│   ├── test_features_round2_version 4\r\n",
      "│   ├── test_features_round2_version 5\r\n",
      "│   ├── test_features_round2_version 6\r\n",
      "│   ├── test_features_round2_version 7\r\n",
      "│   ├── test_features_round2_version 8\r\n",
      "│   └── test_features_round2_version 9\r\n",
      "├── \u001b[01;34mmulti_class_cross_roberta\u001b[00m\r\n",
      "│   ├── feature_split 1\r\n",
      "│   ├── feature_split 2\r\n",
      "│   ├── feature_split 3\r\n",
      "│   ├── feature_split 4\r\n",
      "│   ├── feature_split 5\r\n",
      "│   ├── feature_split 6\r\n",
      "│   ├── feature_split 7\r\n",
      "│   ├── feature_split 8\r\n",
      "│   ├── feature_split 9\r\n",
      "│   ├── test_features_round2_version 1\r\n",
      "│   ├── test_features_round2_version 2\r\n",
      "│   ├── test_features_round2_version 3\r\n",
      "│   ├── test_features_round2_version 4\r\n",
      "│   ├── test_features_round2_version 5\r\n",
      "│   ├── test_features_round2_version 6\r\n",
      "│   ├── test_features_round2_version 7\r\n",
      "│   ├── test_features_round2_version 8\r\n",
      "│   └── test_features_round2_version 9\r\n",
      "├── \u001b[01;34msenti_entity_goodremove\u001b[00m\r\n",
      "│   ├── feature_split 1\r\n",
      "│   ├── feature_split 2\r\n",
      "│   ├── feature_split 3\r\n",
      "│   ├── feature_split 4\r\n",
      "│   ├── feature_split 5\r\n",
      "│   ├── feature_split 6\r\n",
      "│   ├── feature_split 7\r\n",
      "│   ├── feature_split 8\r\n",
      "│   ├── feature_split 9\r\n",
      "│   ├── test_features_round2_version_goodremove 1\r\n",
      "│   ├── test_features_round2_version_goodremove 2\r\n",
      "│   ├── test_features_round2_version_goodremove 3\r\n",
      "│   ├── test_features_round2_version_goodremove 4\r\n",
      "│   ├── test_features_round2_version_goodremove 5\r\n",
      "│   ├── test_features_round2_version_goodremove 6\r\n",
      "│   ├── test_features_round2_version_goodremove 7\r\n",
      "│   ├── test_features_round2_version_goodremove 8\r\n",
      "│   └── test_features_round2_version_goodremove 9\r\n",
      "└── \u001b[01;34msenti_entity_goodremove_full\u001b[00m\r\n",
      "    ├── feature_split 1\r\n",
      "    ├── feature_split 2\r\n",
      "    ├── feature_split 3\r\n",
      "    ├── feature_split 4\r\n",
      "    ├── feature_split 5\r\n",
      "    ├── test_features_round2_version_goodremove 1\r\n",
      "    ├── test_features_round2_version_goodremove 2\r\n",
      "    ├── test_features_round2_version_goodremove 3\r\n",
      "    ├── test_features_round2_version_goodremove 4\r\n",
      "    └── test_features_round2_version_goodremove 5\r\n",
      "\r\n",
      "5 directories, 82 files\r\n"
     ]
    }
   ],
   "source": [
    "!tree $FEATURE_ROOT_DIR"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2366\n"
     ]
    }
   ],
   "source": [
    "#查看各个文件中包含的条目数量是否一致\n",
    "model1_split_length = len(pd.read_csv(join(FEATURE_ROOT_DIR,'multi_class_cross1','feature_split 1')))\n",
    "model2_split_length = len(pd.read_csv(join(FEATURE_ROOT_DIR,'multi_class_cross_roberta','feature_split 1')))\n",
    "model3_split_length = len(pd.read_csv(join(FEATURE_ROOT_DIR,'senti_entity_goodremove_full','feature_split 1')))\n",
    "\n",
    "assert model1_split_length ==model2_split_length\n",
    "print(model1_split_length)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "#提取multi_class_cross1新feature\n",
    "def get_feature_for_multi_class_cross1(split_id):\n",
    "    from data_utils.bert_multi_class_data import get_train_val_data_loader,load_train_val_dataset_cross,TestEntityDataset\n",
    "    val_df, train_df  = load_train_val_dataset_cross(test_number=split_id, cross_number=9)\n",
    "    test_df = val_df\n",
    "    test_dataset = TestEntityDataset(test_df, max_len=400)\n",
    "    texts = [sample.text for sample in test_dataset]\n",
    "    entity = [t.split('[SEP]')[0][5:] for t in texts]\n",
    "    feature_df = pd.read_csv(join(FEATURE_ROOT_DIR,'multi_class_cross1','feature_split %d'%split_id))\n",
    "    feature_df['key_entity'] = entity\n",
    "    feature_df['predict_features'] = feature_df['predict_features'].map(lambda x:eval(re.search(r'(\\[.*\\])',x).group()))\n",
    "    return feature_df\n",
    "\n",
    "def get_test_feature_for_multi_class_cross1(split_id):\n",
    "    from data_utils.bert_multi_class_data import TestEntityDataset\n",
    "    from data_utils.basic_data import load_basic_dataset\n",
    "    test_df = load_basic_dataset('test')\n",
    "    test_dataset = TestEntityDataset(test_df, max_len=400)\n",
    "    texts = [sample.text for sample in test_dataset]\n",
    "    entity = [t.split('[SEP]')[0][5:] for t in texts]\n",
    "    feature_df = pd.read_csv(join(FEATURE_ROOT_DIR,'multi_class_cross1','test_features_round2_version %d'%split_id))\n",
    "    feature_df['key_entity'] = entity\n",
    "    feature_df['predict_test_features'] = feature_df['predict_test_features'].map(lambda x:eval(re.search(r'(\\[.*\\])',x).group()))\n",
    "    return feature_df.sort_values(['id','key_entity']).reset_index(drop=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "#提取BertSentiEntity_cross新feature\n",
    "def get_feature_for_BertSentiEntity_cross(split_id):\n",
    "    feature_df = pd.read_csv(join(FEATURE_ROOT_DIR,'senti_entity_goodremove_full','feature_split %d'%split_id))\n",
    "    feature_df['negative'] = feature_df['negative'].map(lambda x:eval(x))\n",
    "    feature_df['predict'] = feature_df['predict'].map(lambda x:eval(x))\n",
    "    feature_df['entity_list'] = feature_df['entity_list'].map(lambda x:eval(x))\n",
    "    return feature_df\n",
    "\n",
    "def get_test_feature_for_BertSentiEntity_cross(split_id):\n",
    "    feature_df = pd.read_csv(join(FEATURE_ROOT_DIR,'senti_entity_goodremove_full','test_features_round2_version_goodremove %d'%split_id))\n",
    "    feature_df['negative'] = feature_df['negative'].map(lambda x:eval(x))\n",
    "    feature_df['predict'] = feature_df['predict'].map(lambda x:eval(x))\n",
    "    feature_df['entity_list'] = feature_df['entity_list'].map(lambda x:eval(x))\n",
    "    return feature_df.sort_values(['id']).reset_index(drop=True)\n",
    "# df = get_test_feature_for_BertSentiEntity_cross(1)\n",
    "# df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>id</th>\n",
       "      <th>entity</th>\n",
       "      <th>mc_predict</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <td>0</td>\n",
       "      <td>13001</td>\n",
       "      <td>北京华赢凯来资产管理有限公司</td>\n",
       "      <td>[-4.195833333333334, -3.116511111111111, 6.595...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>1</td>\n",
       "      <td>13001</td>\n",
       "      <td>华赢凯来</td>\n",
       "      <td>[-3.8221555555555558, -3.475477777777778, 6.69...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>2</td>\n",
       "      <td>13002</td>\n",
       "      <td>米袋</td>\n",
       "      <td>[-3.8972333333333333, -3.413866666666666, 6.71...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>3</td>\n",
       "      <td>13002</td>\n",
       "      <td>米袋计划</td>\n",
       "      <td>[-4.139588888888889, -3.2134666666666667, 6.6718]</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>4</td>\n",
       "      <td>13003</td>\n",
       "      <td>易通</td>\n",
       "      <td>[-3.730388888888889, -3.3769666666666667, 6.47...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>33584</td>\n",
       "      <td>22999</td>\n",
       "      <td>京东白条</td>\n",
       "      <td>[8.301277777777777, -3.6993444444444443, -3.69...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>33585</td>\n",
       "      <td>22999</td>\n",
       "      <td>京东金融</td>\n",
       "      <td>[8.295011111111112, -3.6586111111111115, -3.72...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>33586</td>\n",
       "      <td>23000</td>\n",
       "      <td>宜贷网</td>\n",
       "      <td>[-3.937344444444444, -3.179866666666667, 6.435...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>33587</td>\n",
       "      <td>23000</td>\n",
       "      <td>有房</td>\n",
       "      <td>[-4.004755555555556, 6.6848888888888895, -3.00...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>33588</td>\n",
       "      <td>23000</td>\n",
       "      <td>贷网</td>\n",
       "      <td>[-4.251022222222222, -2.6024222222222217, 6.13...</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>33589 rows × 3 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "          id          entity  \\\n",
       "0      13001  北京华赢凯来资产管理有限公司   \n",
       "1      13001            华赢凯来   \n",
       "2      13002              米袋   \n",
       "3      13002            米袋计划   \n",
       "4      13003              易通   \n",
       "...      ...             ...   \n",
       "33584  22999            京东白条   \n",
       "33585  22999            京东金融   \n",
       "33586  23000             宜贷网   \n",
       "33587  23000              有房   \n",
       "33588  23000              贷网   \n",
       "\n",
       "                                              mc_predict  \n",
       "0      [-4.195833333333334, -3.116511111111111, 6.595...  \n",
       "1      [-3.8221555555555558, -3.475477777777778, 6.69...  \n",
       "2      [-3.8972333333333333, -3.413866666666666, 6.71...  \n",
       "3      [-4.139588888888889, -3.2134666666666667, 6.6718]  \n",
       "4      [-3.730388888888889, -3.3769666666666667, 6.47...  \n",
       "...                                                  ...  \n",
       "33584  [8.301277777777777, -3.6993444444444443, -3.69...  \n",
       "33585  [8.295011111111112, -3.6586111111111115, -3.72...  \n",
       "33586  [-3.937344444444444, -3.179866666666667, 6.435...  \n",
       "33587  [-4.004755555555556, 6.6848888888888895, -3.00...  \n",
       "33588  [-4.251022222222222, -2.6024222222222217, 6.13...  \n",
       "\n",
       "[33589 rows x 3 columns]"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#new_feature_sample = {(id,entity):(multiclass1_predict,roberta_predict,sentientity_predict,sentientity_sentence)}\n",
    "#整合新的feature\n",
    "class NewFeatureSample:\n",
    "    def __init__(self,id,entity,mc_predict=None,se_entity=None,se_sentence=None):\n",
    "        self.id = id\n",
    "        self.entity = entity\n",
    "        self.mc_predict = mc_predict\n",
    "        self.se_entity = se_entity\n",
    "        self.se_sentence = se_sentence\n",
    "    def get_tuple(self):\n",
    "        return (self.id,self.entity,self.mc_predict,self.se_entity,self.se_sentence)\n",
    "    def __str__(self):\n",
    "        return str((self.id,self.entity,self.mc_predict,self.se_entity,self.se_sentence))\n",
    "    def __repr__(self):\n",
    "        return self.__str__()\n",
    "    \n",
    "def make_mc_features(): \n",
    "    new_feature_samples = {}\n",
    "    for i in range(1,10):\n",
    "        df_mc = get_feature_for_multi_class_cross1(i)\n",
    "        for id,predict_features,key_entity in df_mc.values:\n",
    "            key = (str(id),key_entity) \n",
    "            if key not in new_feature_samples:\n",
    "                new_feature_samples[key] = NewFeatureSample(id=id,entity=key_entity,mc_predict=predict_features)\n",
    "            else:\n",
    "                new_feature_samples[key].mc_predict = predict_features\n",
    "    \n",
    "    mc_rb_features = pd.DataFrame([sample.get_tuple() for sample in new_feature_samples.values()],columns=['id','entity','mc_predict','se_entity','se_sentence'])\n",
    "    mc_rb_features_df = mc_rb_features[['id','entity','mc_predict']]\n",
    "    return new_feature_samples,mc_rb_features_df\n",
    "def make_mc_test_features(): \n",
    "    new_feature_samples = {}\n",
    "    df_mc = get_test_feature_for_multi_class_cross1(1)\n",
    "    values =[get_test_feature_for_multi_class_cross1(i)['predict_test_features'].values.tolist() for i in range(1,10)]\n",
    "    df_mc['predict_test_features'] = np.mean(values,axis=0).tolist()\n",
    "    for id,predict_features,key_entity in df_mc.values:\n",
    "        key = (str(id),key_entity) \n",
    "        if key not in new_feature_samples:\n",
    "            new_feature_samples[key] = NewFeatureSample(id=id,entity=key_entity,mc_predict=predict_features)\n",
    "        else:\n",
    "            new_feature_samples[key].mc_predict = predict_features\n",
    "            \n",
    "\n",
    "    mc_features = pd.DataFrame([sample.get_tuple() for sample in new_feature_samples.values()],columns=['id','entity','mc_predict','se_entity','se_sentence'])\n",
    "    mc_test_features_df = mc_features[['id','entity','mc_predict']]\n",
    "    return new_feature_samples,mc_test_features_df\n",
    "_,mc_test_features_df = make_mc_test_features()\n",
    "mc_test_features_df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "def make_mc_se_features():\n",
    "    new_feature_samples,_ = make_mc_features()\n",
    "    for i in range(1,6):\n",
    "        se_df = get_feature_for_BertSentiEntity_cross(i)\n",
    "        for id,negative,se_entity_logits,entity_list in se_df.values:\n",
    "            for entity in entity_list:\n",
    "                key = (str(id),entity)\n",
    "                if key not in  new_feature_samples:\n",
    "                    new_feature_samples[key] = NewFeatureSample(id,entity,se_sentence=negative)\n",
    "                else:\n",
    "                    new_feature_samples[key].se_sentence = negative\n",
    "            for entity_logit,entity in zip(se_entity_logits,entity_list):\n",
    "                key = (str(id),entity)\n",
    "                if key not in  new_feature_samples:\n",
    "                    new_feature_samples[key] = NewFeatureSample(id,entity,se_entity==entity_logit)\n",
    "                else:\n",
    "                    new_feature_samples[key].se_entity = entity_logit\n",
    "    mc_se_df = pd.DataFrame([sample.get_tuple() for sample in new_feature_samples.values()],columns=['id','entity','mc_predict','se_entity','se_sentence'])\n",
    "    return new_feature_samples,mc_se_df\n",
    "\n",
    "def make_mc_se_test_features():\n",
    "    new_feature_samples,_ = make_mc_test_features()\n",
    "    se_df = get_test_feature_for_BertSentiEntity_cross(1)\n",
    "    se_df['negative'] = np.mean(np.array([get_test_feature_for_BertSentiEntity_cross(i)['negative'].values.tolist() for i in range(1,6)]),axis=0).tolist()\n",
    "    \n",
    "    mean_rs = []\n",
    "    values = [get_test_feature_for_BertSentiEntity_cross(i)['predict'].values.tolist() for i in range(1,6) ]\n",
    "    for i in range(len(values[0])):\n",
    "        lists = [rs[i] for rs in values]\n",
    "        mean_rs.append(np.mean(lists,axis=0).tolist())\n",
    "    se_df['predict'] = mean_rs\n",
    "    \n",
    "    for id,negative,se_entity_logits,entity_list in se_df.values:\n",
    "        for entity in entity_list:\n",
    "            key = (str(id),entity)\n",
    "            if key not in  new_feature_samples:\n",
    "                new_feature_samples[key] = NewFeatureSample(id,entity,se_sentence=negative)\n",
    "            else:\n",
    "                new_feature_samples[key].se_sentence = negative\n",
    "        for entity_logit,entity in zip(se_entity_logits,entity_list):\n",
    "            key = (str(id),entity)\n",
    "            if key not in  new_feature_samples:\n",
    "                new_feature_samples[key] = NewFeatureSample(id,entity,se_entity==entity_logit)\n",
    "            else:\n",
    "                new_feature_samples[key].se_entity = entity_logit\n",
    "    mc_se_test_df = pd.DataFrame([sample.get_tuple() for sample in new_feature_samples.values()],columns=['id','entity','mc_predict','se_entity','se_sentence'])\n",
    "    return new_feature_samples,mc_se_test_df\n",
    "_,mc_se_test_df = make_mc_se_test_features()\n",
    "mc_se_test_df = mc_se_test_df.dropna()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "_,mc_se_df = make_mc_se_features()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "_,mc_df = make_mc_features()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>id</th>\n",
       "      <th>entity</th>\n",
       "      <th>mc_predict</th>\n",
       "      <th>se_entity</th>\n",
       "      <th>se_sentence</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <td>0</td>\n",
       "      <td>1267</td>\n",
       "      <td>资易贷（北京）金融信息服务有限公司</td>\n",
       "      <td>[-4.9175, -2.4626, 6.554]</td>\n",
       "      <td>[-4.284051895141602, 5.177525997161865]</td>\n",
       "      <td>[-5.652933597564697, 5.833582878112793]</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>1</td>\n",
       "      <td>1267</td>\n",
       "      <td>小资钱包</td>\n",
       "      <td>[-5.2723, -2.2209, 6.5282]</td>\n",
       "      <td>[-4.356371879577637, 5.301202774047852]</td>\n",
       "      <td>[-5.652933597564697, 5.833582878112793]</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>2</td>\n",
       "      <td>1267</td>\n",
       "      <td>恒丰银行</td>\n",
       "      <td>[-5.0347, 6.5708, -1.7464]</td>\n",
       "      <td>[3.0878849029541016, -3.0265259742736816]</td>\n",
       "      <td>[-5.652933597564697, 5.833582878112793]</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>4</td>\n",
       "      <td>730</td>\n",
       "      <td>京东白条</td>\n",
       "      <td>[8.0109, -4.0164, -3.8582]</td>\n",
       "      <td>[5.7095465660095215, -5.585907459259033]</td>\n",
       "      <td>[6.109175205230713, -5.691971302032471]</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>5</td>\n",
       "      <td>9966</td>\n",
       "      <td>网络小额贷款</td>\n",
       "      <td>[7.985, -3.8485, -4.0364]</td>\n",
       "      <td>[5.712749481201172, -6.114411354064941]</td>\n",
       "      <td>[5.938594818115234, -5.711514472961426]</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>21489</td>\n",
       "      <td>9223</td>\n",
       "      <td>资易贷（北京）金融信息服务有限公司</td>\n",
       "      <td>[-4.5641, -2.4793, 6.5552]</td>\n",
       "      <td>[-4.347775459289551, 5.041008472442627]</td>\n",
       "      <td>[-5.523403167724609, 5.714493751525879]</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>21490</td>\n",
       "      <td>9223</td>\n",
       "      <td>小资钱包</td>\n",
       "      <td>[-4.4554, -2.6234, 6.5931]</td>\n",
       "      <td>[-4.36795711517334, 5.213492393493652]</td>\n",
       "      <td>[-5.523403167724609, 5.714493751525879]</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>21492</td>\n",
       "      <td>12874</td>\n",
       "      <td>软银</td>\n",
       "      <td>[-4.6111, -2.2138, 6.4147]</td>\n",
       "      <td>[-5.987604141235352, 5.552385330200195]</td>\n",
       "      <td>[-5.774433135986328, 6.579286575317383]</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>21494</td>\n",
       "      <td>12874</td>\n",
       "      <td>宜贷网</td>\n",
       "      <td>[-4.4034, -2.4136, 6.4491]</td>\n",
       "      <td>[-5.640784740447998, 4.967833995819092]</td>\n",
       "      <td>[-5.774433135986328, 6.579286575317383]</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>21498</td>\n",
       "      <td>3593</td>\n",
       "      <td>京易商通科技股份有限公司</td>\n",
       "      <td>[-4.926, -1.9056, 6.113]</td>\n",
       "      <td>[-4.30443811416626, 4.3878068923950195]</td>\n",
       "      <td>[-6.4133620262146, 6.590082168579102]</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>18393 rows × 5 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "          id             entity                  mc_predict  \\\n",
       "0       1267  资易贷（北京）金融信息服务有限公司   [-4.9175, -2.4626, 6.554]   \n",
       "1       1267               小资钱包  [-5.2723, -2.2209, 6.5282]   \n",
       "2       1267               恒丰银行  [-5.0347, 6.5708, -1.7464]   \n",
       "4        730               京东白条  [8.0109, -4.0164, -3.8582]   \n",
       "5       9966             网络小额贷款   [7.985, -3.8485, -4.0364]   \n",
       "...      ...                ...                         ...   \n",
       "21489   9223  资易贷（北京）金融信息服务有限公司  [-4.5641, -2.4793, 6.5552]   \n",
       "21490   9223               小资钱包  [-4.4554, -2.6234, 6.5931]   \n",
       "21492  12874                 软银  [-4.6111, -2.2138, 6.4147]   \n",
       "21494  12874                宜贷网  [-4.4034, -2.4136, 6.4491]   \n",
       "21498   3593       京易商通科技股份有限公司    [-4.926, -1.9056, 6.113]   \n",
       "\n",
       "                                       se_entity  \\\n",
       "0        [-4.284051895141602, 5.177525997161865]   \n",
       "1        [-4.356371879577637, 5.301202774047852]   \n",
       "2      [3.0878849029541016, -3.0265259742736816]   \n",
       "4       [5.7095465660095215, -5.585907459259033]   \n",
       "5        [5.712749481201172, -6.114411354064941]   \n",
       "...                                          ...   \n",
       "21489    [-4.347775459289551, 5.041008472442627]   \n",
       "21490     [-4.36795711517334, 5.213492393493652]   \n",
       "21492    [-5.987604141235352, 5.552385330200195]   \n",
       "21494    [-5.640784740447998, 4.967833995819092]   \n",
       "21498    [-4.30443811416626, 4.3878068923950195]   \n",
       "\n",
       "                                   se_sentence  \n",
       "0      [-5.652933597564697, 5.833582878112793]  \n",
       "1      [-5.652933597564697, 5.833582878112793]  \n",
       "2      [-5.652933597564697, 5.833582878112793]  \n",
       "4      [6.109175205230713, -5.691971302032471]  \n",
       "5      [5.938594818115234, -5.711514472961426]  \n",
       "...                                        ...  \n",
       "21489  [-5.523403167724609, 5.714493751525879]  \n",
       "21490  [-5.523403167724609, 5.714493751525879]  \n",
       "21492  [-5.774433135986328, 6.579286575317383]  \n",
       "21494  [-5.774433135986328, 6.579286575317383]  \n",
       "21498    [-6.4133620262146, 6.590082168579102]  \n",
       "\n",
       "[18393 rows x 5 columns]"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "mc_se_df = mc_se_df.dropna()\n",
    "mc_se_df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "df = load_basic_dataset('train')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "#根据处理得到的mc_se_df制作新的数据集\n",
    "features = mc_se_df[['mc_predict','se_entity','se_sentence']].apply(lambda x:x[0]+x[1]+x[2],axis=1)\n",
    "features = np.array(features.values.tolist())\n",
    "#id,entity list\n",
    "indexes = list(zip(mc_se_df['id'].values,mc_se_df['entity'].values))\n",
    "#labels\n",
    "labels = [(index[1] in str(df[df['id'] ==int(index[0]) ]['key_entity'].values[0]).split(';')) for index in indexes]\n",
    "sentiment_labels = [df[df['id'] ==int(index[0]) ]['negative'].values[0] for index in indexes]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "# split_index =int( 0.8*len(features))\n",
    "# val_features = features[split_index:]\n",
    "# val_indexes = indexes[split_index:]\n",
    "# features = features[:split_index]\n",
    "# indexes = indexes[:split_index]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pickle as pkl\n",
    "with open('round2/tmp/two_model_features.pkl','wb') as f:\n",
    "    pkl.dump((mc_se_df,mc_se_test_df),f)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_features = mc_se_test_df[['mc_predict','se_entity','se_sentence']].apply(lambda x:x[0]+x[1]+x[2],axis=1)\n",
    "test_features = np.array(test_features.values.tolist())\n",
    "test_indexes =  list(zip(mc_se_test_df['id'].values,mc_se_test_df['entity'].values))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "-2.104490210049705e-16\n",
      "1.0000000000000024\n",
      "0.9358695652173913\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "      Benign       0.95      0.89      0.92       785\n",
      "   Malignant       0.92      0.97      0.95      1055\n",
      "\n",
      "    accuracy                           0.94      1840\n",
      "   macro avg       0.94      0.93      0.93      1840\n",
      "weighted avg       0.94      0.94      0.94      1840\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/njuciairs/anaconda3/envs/tftorch/lib/python3.6/site-packages/sklearn/linear_model/logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n",
      "  FutureWarning)\n"
     ]
    }
   ],
   "source": [
    "from sklearn.model_selection import train_test_split\n",
    "#拆分训练集和测试集（train_test_split是存在与sklearn中的函数）\n",
    "X_train,X_test,y_train,y_test = train_test_split(features,labels,train_size=0.9)\n",
    "#train为训练数据,test为测试数据,examDf为源数据,train_size 规定了训练数据的占比\n",
    "from sklearn.preprocessing import StandardScaler\n",
    "sc = StandardScaler()\n",
    "sc.fit(X_train)\n",
    "X_train_std = sc.transform(X_train)\n",
    "X_test_std = sc.transform(X_test)\n",
    "x_inferece_std = sc.transform(test_features)\n",
    "\n",
    "print (np.mean(X_train_std))\n",
    "print (np.var(X_train_std))\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "#from sklearn.ensemble import  AdaBoostClassifier as LogisticRegression\n",
    "lr = LogisticRegression()\n",
    "lr.fit(X_train_std,y_train)\n",
    "from sklearn.metrics import *\n",
    "print(lr.score(X_test_std,y_test))\n",
    "y_result = lr.predict(X_test_std)\n",
    "print(classification_report(y_test,y_result,target_names=['Benign','Malignant']))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "final_rs = lr.predict(x_inferece_std)\n",
    "final_rs = [index+(predict_class,) for index,predict_class in zip(test_indexes,final_rs)]\n",
    "rs_dict = {}\n",
    "for id,entity,cls in final_rs:\n",
    "    if id not in rs_dict:\n",
    "        rs_dict[id] = []\n",
    "    if cls==True:\n",
    "        rs_dict[id].append(entity)\n",
    "items = []\n",
    "for id,keys in rs_dict.items():\n",
    "    if len(keys) == 0:\n",
    "        items.append((id,0,np.nan))\n",
    "    else:\n",
    "        items.append((id,1,';'.join(keys)))\n",
    "rs_df = pd.DataFrame(items,columns=['id','negative','key_entity'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "#去重：把更短的去掉\n",
    "import numpy as np\n",
    "def has_longer_entity(entity_str):\n",
    "    \"\"\"\n",
    "    确定字符串里是否有重复实体\n",
    "    :param entity_str:\n",
    "    :return:\n",
    "    \"\"\"\n",
    "    if not isinstance(entity_str, str):\n",
    "        return False\n",
    "    entities = entity_str.split(';')\n",
    "    states = np.ones(len(entities))\n",
    "    for i, e in enumerate(entities):\n",
    "        for p in entities:\n",
    "            if e in p and len(e) < len(p):\n",
    "                return True\n",
    "    return False\n",
    "def get_preprocessed_entities(title, text, entities, max_text_length, entity_start_char, entity_end_char):\n",
    "    if isinstance(entities,float):\n",
    "        return entities\n",
    "    \n",
    "    if not text_startwith_title(title, text, 0.9):\n",
    "        text = title + '\\n' + text\n",
    "    text = text[:max_text_length]\n",
    "    pattern = re.compile(r\"[^\\u4e00-\\u9fa5@?（）【】《》“”‘’#？()[];:；：。.、]\")\n",
    "    text = re.sub(pattern, ' ', text)\n",
    "    index_pairs = []\n",
    "    for entity in entities.split(';'):\n",
    "        matched_pair = [sub.span() for sub in\n",
    "                        re.finditer(entity.replace(r'?', r'\\?').replace(r'*',r'\\*').replace(r'(', r'\\(').replace(r')', r'\\)'), text)]\n",
    "        index_pairs += matched_pair\n",
    "    index_pairs.sort()\n",
    "    maxrange_index_pairs = []\n",
    "    if len(index_pairs) == 0:\n",
    "        return entities\n",
    "    index_range_dict = {}\n",
    "    for pair in index_pairs:\n",
    "        start,end = pair\n",
    "        if  start not in index_range_dict:\n",
    "            index_range_dict[start] = (end,end)\n",
    "        else:\n",
    "            prev_start,prev_end = index_range_dict[start]\n",
    "            index_range_dict[start] = (min(prev_start,start),max(prev_end,end))\n",
    "#     print(index_range_dict)\n",
    "    index_pairs = []\n",
    "    for k,(endmin,endmax) in index_range_dict.items():\n",
    "        index_pairs.append((k,endmin))\n",
    "        index_pairs.append((k,endmax))\n",
    "        \n",
    "    index_pairs = list(set(index_pairs))\n",
    "    index_pairs.sort()\n",
    "#     print(index_pairs)\n",
    "    maxrange_index_pairs = set()\n",
    "    for pair1 in index_pairs:\n",
    "        start1,end1 = pair1\n",
    "        for pai2 in index_pairs:\n",
    "            start2,end2 = pai2\n",
    "            if (start1 >=start2) and (end1 <=end2):\n",
    "                start1 = start2\n",
    "                end1 = end2\n",
    "        maxrange_index_pairs.add((start1,end1))\n",
    "    \n",
    "            \n",
    "    maxrange_index_pairs = list(maxrange_index_pairs)\n",
    "    conflict_pairs = []\n",
    "    for i in range(len(maxrange_index_pairs) - 1):\n",
    "        start1, end1 = maxrange_index_pairs[i]\n",
    "        start2, end2 = maxrange_index_pairs[i + 1]\n",
    "        if start2 < end1:\n",
    "            conflict_pairs.append((start2, end2))\n",
    "\n",
    "    off = 0\n",
    "    text = list(text)\n",
    "    pair_entities = []\n",
    "#     print(maxrange_index_pairs)\n",
    "    for pair in maxrange_index_pairs:\n",
    "        entity = ''.join(text[pair[0] :pair[1] ])\n",
    "        pair_entities.append(entity)\n",
    "\n",
    "    return  ';'.join(set(pair_entities))\n",
    "def text_startwith_title(title,text,threshold):\n",
    "    if isinstance(title,float):\n",
    "        return True\n",
    "    title_set = set(title)\n",
    "    text_set = set(str(text)[:len(title)])\n",
    "    common = len(title_set& text_set)/len(title_set)\n",
    "    return common > threshold"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_df = load_basic_dataset('train')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [],
   "source": [
    "repetition_df = train_df[train_df['entity'].map(has_longer_entity)]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'易捷金融;宜贷网'"
      ]
     },
     "execution_count": 24,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "title, text, entities = repetition_df[repetition_df['id']==9][['title','text','entity']].values[0]\n",
    "get_preprocessed_entities(title, text, entities, max_text_length=400, entity_start_char='tmp', entity_end_char='tmp')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/njuciairs/anaconda3/envs/tftorch/lib/python3.6/site-packages/ipykernel_launcher.py:1: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  \"\"\"Entry point for launching an IPython kernel.\n"
     ]
    }
   ],
   "source": [
    "repetition_df['max_range_key'] = repetition_df.apply(lambda x: get_preprocessed_entities(x[1], x[2], x[3], max_text_length=800, entity_start_char='tmp', entity_end_char='tmp'),axis=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>id</th>\n",
       "      <th>title</th>\n",
       "      <th>text</th>\n",
       "      <th>entity</th>\n",
       "      <th>negative</th>\n",
       "      <th>key_entity</th>\n",
       "      <th>max_range_key</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <td>3</td>\n",
       "      <td>8</td>\n",
       "      <td>NaN</td>\n",
       "      <td>旺旺贷跑路！深圳警方确定投资人被骗！</td>\n",
       "      <td>旺贷;旺旺贷</td>\n",
       "      <td>1</td>\n",
       "      <td>旺旺贷</td>\n",
       "      <td>旺旺贷</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>4</td>\n",
       "      <td>9</td>\n",
       "      <td>宜贷网易捷金融岂止假标，供链贷诈骗1.03亿，阴阳合同，拿出借人的钱去线下放高利贷，收天价砍...</td>\n",
       "      <td>宜贷网易捷金融岂止假标，供链贷诈骗1.03亿，阴阳合同，拿出借人的钱去线下放高利贷，收天价砍...</td>\n",
       "      <td>宜贷网(沪);易捷金融;宜贷网</td>\n",
       "      <td>1</td>\n",
       "      <td>易捷金融;宜贷网</td>\n",
       "      <td>易捷金融;宜贷网</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>6</td>\n",
       "      <td>16</td>\n",
       "      <td>NaN</td>\n",
       "      <td>钱宝网张小雷因涉嫌集资诈骗罪被提起公诉未兑付本金达300亿元</td>\n",
       "      <td>钱宝网;钱宝</td>\n",
       "      <td>1</td>\n",
       "      <td>钱宝网</td>\n",
       "      <td>钱宝网</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>7</td>\n",
       "      <td>23</td>\n",
       "      <td>NaN</td>\n",
       "      <td>◆6月30日至7月1日，昆明市中级人民法院公开审理昆明泛亚有色金属交易所股份有限公司等4家公...</td>\n",
       "      <td>泛亚有色金属交易所;昆明泛亚有色金属交易所股份有限公司</td>\n",
       "      <td>1</td>\n",
       "      <td>昆明泛亚有色金属交易所股份有限公司</td>\n",
       "      <td>昆明泛亚有色金属交易所股份有限公司</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>9</td>\n",
       "      <td>29</td>\n",
       "      <td>????#新闻资讯[超话]##小资钱包涉嫌诈骗[超话]##扫黑除恶[超话]# (北京)资易贷...</td>\n",
       "      <td>????#新闻资讯[超话]##小资钱包涉嫌诈骗[超话]##扫黑除恶[超话]# (北京)资易贷...</td>\n",
       "      <td>小资钱包;资易贷金融信息服务有限公司;易贷金融</td>\n",
       "      <td>1</td>\n",
       "      <td>小资钱包;资易贷金融信息服务有限公司</td>\n",
       "      <td>资易贷金融信息服务有限公司;小资钱包</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>7995</td>\n",
       "      <td>12996</td>\n",
       "      <td>NaN</td>\n",
       "      <td>】广州华军合创股东孙新财涉嫌传销和非法融资被通缉公司已注销</td>\n",
       "      <td>合创;华军合创</td>\n",
       "      <td>1</td>\n",
       "      <td>华军合创</td>\n",
       "      <td>华军合创</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>7996</td>\n",
       "      <td>12997</td>\n",
       "      <td>NaN</td>\n",
       "      <td>&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;&amp;nbsp;6月29日，旺财猫提现困难</td>\n",
       "      <td>旺财猫;财猫</td>\n",
       "      <td>1</td>\n",
       "      <td>旺财猫</td>\n",
       "      <td>旺财猫</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>7997</td>\n",
       "      <td>12998</td>\n",
       "      <td>NaN</td>\n",
       "      <td>91．惠卡世纪涉嫌非法集资的深圳惠卡世纪有限公司资金紧张，又遭遇投资人挤兑，公司竟然发布公告...</td>\n",
       "      <td>深圳惠卡世纪有限公司;惠卡世纪</td>\n",
       "      <td>1</td>\n",
       "      <td>惠卡世纪</td>\n",
       "      <td>惠卡世纪;深圳惠卡世纪有限公司</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>7998</td>\n",
       "      <td>12999</td>\n",
       "      <td>NaN</td>\n",
       "      <td>平台曝光平台名称:君来投平台网址:http://www.junlt.com/曝光原因:逾期，...</td>\n",
       "      <td>君来投;来投</td>\n",
       "      <td>1</td>\n",
       "      <td>君来投</td>\n",
       "      <td>君来投</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>7999</td>\n",
       "      <td>13000</td>\n",
       "      <td>①爱钱进、点融、广信贷等7个平台出借人退出慢②陆金所代销产品逾期...</td>\n",
       "      <td>原标题：爱钱进、点融等7个平台出借人退出慢 虽然行业在近期趋于平稳，但很多平台债转退出慢的问...</td>\n",
       "      <td>爱钱进;点融网;人人聚财;钱牛牛;东方金钰;神州长城;陆金所;凯迪生态;厚本金融;广信贷;点...</td>\n",
       "      <td>1</td>\n",
       "      <td>神州长城;陆金所;爱钱进;凯迪生态;厚本金融;广信贷;人人聚财;点融;钱牛牛;龙力生物;东方...</td>\n",
       "      <td>厚本金融;陆金所;达人贷;广信贷;爱钱进;点融;钱牛牛;人人聚财</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>3170 rows × 7 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "         id                                              title  \\\n",
       "3         8                                                NaN   \n",
       "4         9  宜贷网易捷金融岂止假标，供链贷诈骗1.03亿，阴阳合同，拿出借人的钱去线下放高利贷，收天价砍...   \n",
       "6        16                                                NaN   \n",
       "7        23                                                NaN   \n",
       "9        29  ????#新闻资讯[超话]##小资钱包涉嫌诈骗[超话]##扫黑除恶[超话]# (北京)资易贷...   \n",
       "...     ...                                                ...   \n",
       "7995  12996                                                NaN   \n",
       "7996  12997                                                NaN   \n",
       "7997  12998                                                NaN   \n",
       "7998  12999                                                NaN   \n",
       "7999  13000                ①爱钱进、点融、广信贷等7个平台出借人退出慢②陆金所代销产品逾期...   \n",
       "\n",
       "                                                   text  \\\n",
       "3                                    旺旺贷跑路！深圳警方确定投资人被骗！   \n",
       "4     宜贷网易捷金融岂止假标，供链贷诈骗1.03亿，阴阳合同，拿出借人的钱去线下放高利贷，收天价砍...   \n",
       "6                        钱宝网张小雷因涉嫌集资诈骗罪被提起公诉未兑付本金达300亿元   \n",
       "7     ◆6月30日至7月1日，昆明市中级人民法院公开审理昆明泛亚有色金属交易所股份有限公司等4家公...   \n",
       "9     ????#新闻资讯[超话]##小资钱包涉嫌诈骗[超话]##扫黑除恶[超话]# (北京)资易贷...   \n",
       "...                                                 ...   \n",
       "7995                      】广州华军合创股东孙新财涉嫌传销和非法融资被通缉公司已注销   \n",
       "7996        &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;6月29日，旺财猫提现困难   \n",
       "7997  91．惠卡世纪涉嫌非法集资的深圳惠卡世纪有限公司资金紧张，又遭遇投资人挤兑，公司竟然发布公告...   \n",
       "7998  平台曝光平台名称:君来投平台网址:http://www.junlt.com/曝光原因:逾期，...   \n",
       "7999  原标题：爱钱进、点融等7个平台出借人退出慢 虽然行业在近期趋于平稳，但很多平台债转退出慢的问...   \n",
       "\n",
       "                                                 entity  negative  \\\n",
       "3                                                旺贷;旺旺贷         1   \n",
       "4                                       宜贷网(沪);易捷金融;宜贷网         1   \n",
       "6                                                钱宝网;钱宝         1   \n",
       "7                           泛亚有色金属交易所;昆明泛亚有色金属交易所股份有限公司         1   \n",
       "9                               小资钱包;资易贷金融信息服务有限公司;易贷金融         1   \n",
       "...                                                 ...       ...   \n",
       "7995                                            合创;华军合创         1   \n",
       "7996                                             旺财猫;财猫         1   \n",
       "7997                                    深圳惠卡世纪有限公司;惠卡世纪         1   \n",
       "7998                                             君来投;来投         1   \n",
       "7999  爱钱进;点融网;人人聚财;钱牛牛;东方金钰;神州长城;陆金所;凯迪生态;厚本金融;广信贷;点...         1   \n",
       "\n",
       "                                             key_entity  \\\n",
       "3                                                   旺旺贷   \n",
       "4                                              易捷金融;宜贷网   \n",
       "6                                                   钱宝网   \n",
       "7                                     昆明泛亚有色金属交易所股份有限公司   \n",
       "9                                    小资钱包;资易贷金融信息服务有限公司   \n",
       "...                                                 ...   \n",
       "7995                                               华军合创   \n",
       "7996                                                旺财猫   \n",
       "7997                                               惠卡世纪   \n",
       "7998                                                君来投   \n",
       "7999  神州长城;陆金所;爱钱进;凯迪生态;厚本金融;广信贷;人人聚财;点融;钱牛牛;龙力生物;东方...   \n",
       "\n",
       "                         max_range_key  \n",
       "3                                  旺旺贷  \n",
       "4                             易捷金融;宜贷网  \n",
       "6                                  钱宝网  \n",
       "7                    昆明泛亚有色金属交易所股份有限公司  \n",
       "9                   资易贷金融信息服务有限公司;小资钱包  \n",
       "...                                ...  \n",
       "7995                              华军合创  \n",
       "7996                               旺财猫  \n",
       "7997                   惠卡世纪;深圳惠卡世纪有限公司  \n",
       "7998                               君来投  \n",
       "7999  厚本金融;陆金所;达人贷;广信贷;爱钱进;点融;钱牛牛;人人聚财  \n",
       "\n",
       "[3170 rows x 7 columns]"
      ]
     },
     "execution_count": 26,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "repetition_df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [],
   "source": [
    "#去重：把更短的去掉\n",
    "import numpy as np\n",
    "def remove_short_entity_by_long(entity_str):\n",
    "    \"\"\"\n",
    "    除去key_entity中同一实体的较短名称\n",
    "    :param entity_str:\n",
    "    :return:\n",
    "    \"\"\"\n",
    "    if not isinstance(entity_str, str):\n",
    "        return entity_str\n",
    "    entities = entity_str.split(';')\n",
    "    states = np.ones(len(entities))\n",
    "    for i, e in enumerate(entities):\n",
    "        for p in entities:\n",
    "            if e in p and len(e) < len(p):\n",
    "                print('removed %s by %s'%(e,p))\n",
    "                states[i] = 0\n",
    "    rs = []\n",
    "    for i, e in enumerate(entities):\n",
    "        if states[i] == 1:\n",
    "            rs.append(e)\n",
    "    rs = ';'.join(rs)\n",
    "    return rs\n",
    "\n",
    "good_remove = False\n",
    "def get_trans_map():\n",
    "    from data_utils.basic_data import load_basic_dataset\n",
    "    train_df = load_basic_dataset('train')\n",
    "    srcs = train_df['entity'].map(lambda x: list(str(x).split(';')))\n",
    "    dests = train_df['key_entity'].map(lambda x: list(str(x).split(';')))\n",
    "    trans_map = {}\n",
    "    both_existed_log = ''\n",
    "    for srcs, dests in list(zip(srcs, dests)):\n",
    "        for src in srcs:\n",
    "            if src == '':\n",
    "                continue\n",
    "            for e in srcs:\n",
    "                if e == '':\n",
    "                    continue\n",
    "                if (src in e or e in src) and e != src:\n",
    "                    if src in dests:\n",
    "                        trans_map[src + '-' + e] = src\n",
    "                        trans_map[e + '-' + src] = src\n",
    "                    if e in dests:\n",
    "                        trans_map[src + '-' + e] = e\n",
    "                        trans_map[e + '-' + src] = e\n",
    "                    if good_remove:\n",
    "                        if src in dests and e in dests:\n",
    "                            trans_map[src + '-' + e] = e + ';' + src\n",
    "                            trans_map[e + '-' + src] = e + ';' + src\n",
    "                            both_existed_log += e + ';' + src\n",
    "                            print('both existsed:', e + ';' + src)\n",
    "    return trans_map\n",
    "def trans_keys(trans_map, entity_str):\n",
    "    if not isinstance(entity_str, str):\n",
    "        return entity_str\n",
    "    es = list(filter(lambda x: str(x).strip() != '', entity_str.split(';')))\n",
    "    rs = set()\n",
    "    for e in es:\n",
    "        finded = False\n",
    "        for y in es:\n",
    "            if e + '-' + y in trans_map and e != y:\n",
    "                rs.add(trans_map[e + '-' + y])\n",
    "                finded = True\n",
    "        if not finded:\n",
    "            rs.add(e)\n",
    "    if len(rs) > 0:\n",
    "        rs = ';'.join(list(rs))\n",
    "    else:\n",
    "        rs = np.nan\n",
    "    return rs\n",
    "trans_map = get_trans_map()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_df = load_basic_dataset('test').sort_values(['id'])\n",
    "rs_df = rs_df.sort_values(['id'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [],
   "source": [
    "rs_df = rs_df.merge(test_df,on=['id'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>id</th>\n",
       "      <th>negative</th>\n",
       "      <th>key_entity</th>\n",
       "      <th>title</th>\n",
       "      <th>text</th>\n",
       "      <th>entity</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <td>0</td>\n",
       "      <td>13001</td>\n",
       "      <td>1</td>\n",
       "      <td>北京华赢凯来资产管理有限公司;华赢凯来</td>\n",
       "      <td>NaN</td>\n",
       "      <td>北京华赢凯来资产管理有限公司（下称‘华赢凯来’）因涉嫌非法集资，已依法立案侦查，目前包括白某...</td>\n",
       "      <td>华赢凯来;北京华赢凯来资产管理有限公司</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>1</td>\n",
       "      <td>13002</td>\n",
       "      <td>1</td>\n",
       "      <td>米袋计划</td>\n",
       "      <td>NaN</td>\n",
       "      <td>26日，有投资者爆料称，米袋计划跑路了</td>\n",
       "      <td>米袋;米袋计划</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>2</td>\n",
       "      <td>13003</td>\n",
       "      <td>1</td>\n",
       "      <td>易通贷</td>\n",
       "      <td>NaN</td>\n",
       "      <td>平台曝光平台名称:易通贷平台网址:http://www.etongdai.com曝光原因:无...</td>\n",
       "      <td>易通贷;易通;通贷</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>3</td>\n",
       "      <td>13004</td>\n",
       "      <td>1</td>\n",
       "      <td>e租宝</td>\n",
       "      <td>NaN</td>\n",
       "      <td>e租宝事件曝光自融猫腻融资租赁类p2p放缓扩张</td>\n",
       "      <td>租宝;e租宝</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>4</td>\n",
       "      <td>13005</td>\n",
       "      <td>1</td>\n",
       "      <td>宜湃网;宜贷网</td>\n",
       "      <td>????成都怎么了？#成都七中#，学生吃“猪食”，成都！#宜贷网# 宜贷网，4亿，成都！#宜...</td>\n",
       "      <td>????成都怎么了？#成都七中#，学生吃猪食，成都！#宜贷网# 宜贷网，40亿，成都！#宜湃...</td>\n",
       "      <td>宜贷网(沪);宜贷网;宜湃网</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "      id  negative           key_entity  \\\n",
       "0  13001         1  北京华赢凯来资产管理有限公司;华赢凯来   \n",
       "1  13002         1                 米袋计划   \n",
       "2  13003         1                  易通贷   \n",
       "3  13004         1                  e租宝   \n",
       "4  13005         1              宜湃网;宜贷网   \n",
       "\n",
       "                                               title  \\\n",
       "0                                                NaN   \n",
       "1                                                NaN   \n",
       "2                                                NaN   \n",
       "3                                                NaN   \n",
       "4  ????成都怎么了？#成都七中#，学生吃“猪食”，成都！#宜贷网# 宜贷网，4亿，成都！#宜...   \n",
       "\n",
       "                                                text               entity  \n",
       "0  北京华赢凯来资产管理有限公司（下称‘华赢凯来’）因涉嫌非法集资，已依法立案侦查，目前包括白某...  华赢凯来;北京华赢凯来资产管理有限公司  \n",
       "1                                26日，有投资者爆料称，米袋计划跑路了              米袋;米袋计划  \n",
       "2  平台曝光平台名称:易通贷平台网址:http://www.etongdai.com曝光原因:无...            易通贷;易通;通贷  \n",
       "3                            e租宝事件曝光自融猫腻融资租赁类p2p放缓扩张               租宝;e租宝  \n",
       "4  ????成都怎么了？#成都七中#，学生吃猪食，成都！#宜贷网# 宜贷网，40亿，成都！#宜湃...       宜贷网(沪);宜贷网;宜湃网  "
      ]
     },
     "execution_count": 30,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "rs_df.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [],
   "source": [
    "# rs_df['key_entity'] = rs_df['key_entity'].map(lambda x: trans_keys(trans_map,x)).map(remove_short_entity_by_long)\n",
    "rs_df['key_entity'] = rs_df[['title','text','key_entity']].apply(lambda x:get_preprocessed_entities(str(x[0]), str(x[1]), x[2], max_text_length=400, entity_start_char='tmp', entity_end_char='tmp'),axis=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>id</th>\n",
       "      <th>negative</th>\n",
       "      <th>key_entity</th>\n",
       "      <th>title</th>\n",
       "      <th>text</th>\n",
       "      <th>entity</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <td>0</td>\n",
       "      <td>13001</td>\n",
       "      <td>1</td>\n",
       "      <td>华赢凯来;北京华赢凯来资产管理有限公司</td>\n",
       "      <td>NaN</td>\n",
       "      <td>北京华赢凯来资产管理有限公司（下称‘华赢凯来’）因涉嫌非法集资，已依法立案侦查，目前包括白某...</td>\n",
       "      <td>华赢凯来;北京华赢凯来资产管理有限公司</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>1</td>\n",
       "      <td>13002</td>\n",
       "      <td>1</td>\n",
       "      <td>米袋计划</td>\n",
       "      <td>NaN</td>\n",
       "      <td>26日，有投资者爆料称，米袋计划跑路了</td>\n",
       "      <td>米袋;米袋计划</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>2</td>\n",
       "      <td>13003</td>\n",
       "      <td>1</td>\n",
       "      <td>易通贷</td>\n",
       "      <td>NaN</td>\n",
       "      <td>平台曝光平台名称:易通贷平台网址:http://www.etongdai.com曝光原因:无...</td>\n",
       "      <td>易通贷;易通;通贷</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>3</td>\n",
       "      <td>13004</td>\n",
       "      <td>1</td>\n",
       "      <td>e租宝</td>\n",
       "      <td>NaN</td>\n",
       "      <td>e租宝事件曝光自融猫腻融资租赁类p2p放缓扩张</td>\n",
       "      <td>租宝;e租宝</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>4</td>\n",
       "      <td>13005</td>\n",
       "      <td>1</td>\n",
       "      <td>宜湃网;宜贷网</td>\n",
       "      <td>????成都怎么了？#成都七中#，学生吃“猪食”，成都！#宜贷网# 宜贷网，4亿，成都！#宜...</td>\n",
       "      <td>????成都怎么了？#成都七中#，学生吃猪食，成都！#宜贷网# 宜贷网，40亿，成都！#宜湃...</td>\n",
       "      <td>宜贷网(沪);宜贷网;宜湃网</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>9992</td>\n",
       "      <td>22996</td>\n",
       "      <td>1</td>\n",
       "      <td>金银岛</td>\n",
       "      <td>踩雷金银岛的不仅是P2P金联储出借人,还有国投瑞银、九州证券、财通资管等一众知名金融机构</td>\n",
       "      <td>&lt;p style=background-color: transparent;box-si...</td>\n",
       "      <td>金银岛;九州瀚海;金联储;九州证券;九鼎集团</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>9993</td>\n",
       "      <td>22997</td>\n",
       "      <td>0</td>\n",
       "      <td>NaN</td>\n",
       "      <td>P2P网贷发展稳中向好：微贷网、极光金融、人人贷、有利网</td>\n",
       "      <td>各种互联网项目，新手可操作，几乎都是0门槛 自从进入2019年以来，P2P网贷行业一直...</td>\n",
       "      <td>微贷网;小牛在线;极光金融;人人贷;宜人贷;轻易贷;有利网</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>9994</td>\n",
       "      <td>22998</td>\n",
       "      <td>1</td>\n",
       "      <td>上海聚胜投资管理有限公司;聚胜财富</td>\n",
       "      <td>NaN</td>\n",
       "      <td>2018年7月9日，上海市公安局虹口分局根据群众报案，对上海聚胜投资管理有限公司(聚胜财富理...</td>\n",
       "      <td>聚胜财富;上海聚胜投资管理有限公司</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>9995</td>\n",
       "      <td>22999</td>\n",
       "      <td>0</td>\n",
       "      <td>NaN</td>\n",
       "      <td>????所有的标准都是京东白条提现·信用卡可以套现吗·京东白条提现为了不爱的人准备的，当你遇...</td>\n",
       "      <td>????所有的标准都是京东白条提现·信用卡可以套现吗·京东白条提现为了不爱的人准备的，当你遇...</td>\n",
       "      <td>京东白条;京东金融</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>9996</td>\n",
       "      <td>23000</td>\n",
       "      <td>1</td>\n",
       "      <td>宜贷网</td>\n",
       "      <td>NaN</td>\n",
       "      <td>宜贷网业务多样，爆雷前有房贷、供链贷、车贷等业务，非常不专注，这导致风控容易出现大问题，供链...</td>\n",
       "      <td>有房;贷网;宜贷网</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>9997 rows × 6 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "         id  negative           key_entity  \\\n",
       "0     13001         1  华赢凯来;北京华赢凯来资产管理有限公司   \n",
       "1     13002         1                 米袋计划   \n",
       "2     13003         1                  易通贷   \n",
       "3     13004         1                  e租宝   \n",
       "4     13005         1              宜湃网;宜贷网   \n",
       "...     ...       ...                  ...   \n",
       "9992  22996         1                  金银岛   \n",
       "9993  22997         0                  NaN   \n",
       "9994  22998         1    上海聚胜投资管理有限公司;聚胜财富   \n",
       "9995  22999         0                  NaN   \n",
       "9996  23000         1                  宜贷网   \n",
       "\n",
       "                                                  title  \\\n",
       "0                                                   NaN   \n",
       "1                                                   NaN   \n",
       "2                                                   NaN   \n",
       "3                                                   NaN   \n",
       "4     ????成都怎么了？#成都七中#，学生吃“猪食”，成都！#宜贷网# 宜贷网，4亿，成都！#宜...   \n",
       "...                                                 ...   \n",
       "9992       踩雷金银岛的不仅是P2P金联储出借人,还有国投瑞银、九州证券、财通资管等一众知名金融机构   \n",
       "9993                       P2P网贷发展稳中向好：微贷网、极光金融、人人贷、有利网   \n",
       "9994                                                NaN   \n",
       "9995  ????所有的标准都是京东白条提现·信用卡可以套现吗·京东白条提现为了不爱的人准备的，当你遇...   \n",
       "9996                                                NaN   \n",
       "\n",
       "                                                   text  \\\n",
       "0     北京华赢凯来资产管理有限公司（下称‘华赢凯来’）因涉嫌非法集资，已依法立案侦查，目前包括白某...   \n",
       "1                                   26日，有投资者爆料称，米袋计划跑路了   \n",
       "2     平台曝光平台名称:易通贷平台网址:http://www.etongdai.com曝光原因:无...   \n",
       "3                               e租宝事件曝光自融猫腻融资租赁类p2p放缓扩张   \n",
       "4     ????成都怎么了？#成都七中#，学生吃猪食，成都！#宜贷网# 宜贷网，40亿，成都！#宜湃...   \n",
       "...                                                 ...   \n",
       "9992   <p style=background-color: transparent;box-si...   \n",
       "9993  　  各种互联网项目，新手可操作，几乎都是0门槛 自从进入2019年以来，P2P网贷行业一直...   \n",
       "9994  2018年7月9日，上海市公安局虹口分局根据群众报案，对上海聚胜投资管理有限公司(聚胜财富理...   \n",
       "9995  ????所有的标准都是京东白条提现·信用卡可以套现吗·京东白条提现为了不爱的人准备的，当你遇...   \n",
       "9996  宜贷网业务多样，爆雷前有房贷、供链贷、车贷等业务，非常不专注，这导致风控容易出现大问题，供链...   \n",
       "\n",
       "                             entity  \n",
       "0               华赢凯来;北京华赢凯来资产管理有限公司  \n",
       "1                           米袋;米袋计划  \n",
       "2                         易通贷;易通;通贷  \n",
       "3                            租宝;e租宝  \n",
       "4                    宜贷网(沪);宜贷网;宜湃网  \n",
       "...                             ...  \n",
       "9992         金银岛;九州瀚海;金联储;九州证券;九鼎集团  \n",
       "9993  微贷网;小牛在线;极光金融;人人贷;宜人贷;轻易贷;有利网  \n",
       "9994              聚胜财富;上海聚胜投资管理有限公司  \n",
       "9995                      京东白条;京东金融  \n",
       "9996                      有房;贷网;宜贷网  \n",
       "\n",
       "[9997 rows x 6 columns]"
      ]
     },
     "execution_count": 32,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "rs_df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [],
   "source": [
    "# rs_df.to_csv('round2/tmp/stack_on_twomodels_保留bothexisted.csv',index=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "-3.6698078053665883e-16\n",
      "0.9999999999999984\n",
      "0.9739130434782609\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "      Benign       0.96      0.95      0.96       544\n",
      "   Malignant       0.98      0.98      0.98      1296\n",
      "\n",
      "    accuracy                           0.97      1840\n",
      "   macro avg       0.97      0.97      0.97      1840\n",
      "weighted avg       0.97      0.97      0.97      1840\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/njuciairs/anaconda3/envs/tftorch/lib/python3.6/site-packages/sklearn/linear_model/logistic.py:432: FutureWarning: Default solver will be changed to 'lbfgs' in 0.22. Specify a solver to silence this warning.\n",
      "  FutureWarning)\n"
     ]
    }
   ],
   "source": [
    "from sklearn.model_selection import train_test_split\n",
    "#拆分训练集和测试集（train_test_split是存在与sklearn中的函数）\n",
    "X_train,X_test,y_train,y_test = train_test_split(features,sentiment_labels,train_size=0.9)\n",
    "#train为训练数据,test为测试数据,examDf为源数据,train_size 规定了训练数据的占比\n",
    "from sklearn.preprocessing import StandardScaler\n",
    "sc = StandardScaler()\n",
    "sc.fit(X_train)\n",
    "X_train_std = sc.transform(X_train)\n",
    "X_test_std = sc.transform(X_test)\n",
    "x_inferece_std = sc.transform(test_features)\n",
    "\n",
    "print (np.mean(X_train_std))\n",
    "print (np.var(X_train_std))\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "#from sklearn.ensemble import  AdaBoostClassifier as LogisticRegression\n",
    "lr = LogisticRegression()\n",
    "lr.fit(X_train_std,y_train)\n",
    "from sklearn.metrics import *\n",
    "print(lr.score(X_test_std,y_test))\n",
    "y_result = lr.predict(X_test_std)\n",
    "print(classification_report(y_test,y_result,target_names=['Benign','Malignant']))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {},
   "outputs": [],
   "source": [
    "final_rs = lr.predict(x_inferece_std)\n",
    "final_rs = [index+(predict_class,) for index,predict_class in zip(test_indexes,final_rs)]\n",
    "senti_dict ={}\n",
    "for id,entity,senti in final_rs:\n",
    "    if id not in senti_dict:\n",
    "        senti_dict[id] = []\n",
    "    senti_dict[id].append(senti)\n",
    "for id,sentis in senti_dict.items():\n",
    "    senti_dict[id] = int(np.mean(sentis) >= 0.5)\n",
    "items = []\n",
    "for id,negative,key_entity in rs_df[['id','negative','key_entity']].values:\n",
    "    items.append((id,senti_dict[id],key_entity))\n",
    "    if negative != senti_dict[id]:\n",
    "        print(\"convert %d to %d \"%(negative,senti_dict[id]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {},
   "outputs": [],
   "source": [
    "rs_df = pd.DataFrame(items,columns=['id','negative','key_entity'])\n",
    "positive_sentences_df = rs_df[rs_df['negative']==0]\n",
    "negative_sentences_df = rs_df[rs_df['negative']==1]\n",
    "positive_have_negentity_df = positive_sentences_df[positive_sentences_df['key_entity'].map(lambda x:not isinstance(x,float))]\n",
    "negative_have_none_entity_df = negative_sentences_df[negative_sentences_df['key_entity'].map(lambda x:isinstance(x,float))]\n",
    "rs_df.loc[rs_df['id'].isin(positive_have_negentity_df['id'].values),'key_entity'] = np.nan"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>id</th>\n",
       "      <th>negative</th>\n",
       "      <th>key_entity</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <td>0</td>\n",
       "      <td>13001</td>\n",
       "      <td>1</td>\n",
       "      <td>华赢凯来;北京华赢凯来资产管理有限公司</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>1</td>\n",
       "      <td>13002</td>\n",
       "      <td>1</td>\n",
       "      <td>米袋计划</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>2</td>\n",
       "      <td>13003</td>\n",
       "      <td>1</td>\n",
       "      <td>易通贷</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>3</td>\n",
       "      <td>13004</td>\n",
       "      <td>1</td>\n",
       "      <td>e租宝</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>4</td>\n",
       "      <td>13005</td>\n",
       "      <td>1</td>\n",
       "      <td>宜湃网;宜贷网</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>9992</td>\n",
       "      <td>22996</td>\n",
       "      <td>1</td>\n",
       "      <td>金银岛</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>9993</td>\n",
       "      <td>22997</td>\n",
       "      <td>0</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>9994</td>\n",
       "      <td>22998</td>\n",
       "      <td>1</td>\n",
       "      <td>上海聚胜投资管理有限公司;聚胜财富</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>9995</td>\n",
       "      <td>22999</td>\n",
       "      <td>0</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>9996</td>\n",
       "      <td>23000</td>\n",
       "      <td>1</td>\n",
       "      <td>宜贷网</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>9997 rows × 3 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "         id  negative           key_entity\n",
       "0     13001         1  华赢凯来;北京华赢凯来资产管理有限公司\n",
       "1     13002         1                 米袋计划\n",
       "2     13003         1                  易通贷\n",
       "3     13004         1                  e租宝\n",
       "4     13005         1              宜湃网;宜贷网\n",
       "...     ...       ...                  ...\n",
       "9992  22996         1                  金银岛\n",
       "9993  22997         0                  NaN\n",
       "9994  22998         1    上海聚胜投资管理有限公司;聚胜财富\n",
       "9995  22999         0                  NaN\n",
       "9996  23000         1                  宜贷网\n",
       "\n",
       "[9997 rows x 3 columns]"
      ]
     },
     "execution_count": 40,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "rs_df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "removed 华赢凯来 by 北京华赢凯来资产管理有限公司\n",
      "removed 团贷 by 团贷网\n",
      "removed 温商贷 by 浙江温商贷\n",
      "removed 融宝网 by 陕西融宝网信息技术有限公司\n",
      "removed 东方恒正 by 北京东方恒正科贸有限公司\n",
      "removed 红岭创投 by ????红岭创投\n",
      "removed 汇凯鑫 by 深圳市汇凯鑫投资管理有限公司\n",
      "removed 汇凯鑫 by 汇凯鑫创投\n",
      "removed 中能国投 by 北京中能国投投资管理有限公司\n",
      "removed 国美 by 国美在线金融\n",
      "removed 小沙僧理财 by 上海宜勇资产管理有限公司（小沙僧理财）\n",
      "removed 微积金 by 微积金互联网金融服务（上海）有限公司（微积金）\n",
      "removed 乐视 by 乐视金融\n",
      "removed 宜信 by 宜信普惠信息咨询有限公司\n",
      "removed 宜信 by 宜信惠民投资管理\n",
      "removed 国厚资产 by 深圳市国厚资产管理有限公司\n",
      "removed 发财猪 by 深圳发财猪金融服务有限公司\n",
      "removed 华夏万家 by 华夏万家金融服务外包有限公司\n",
      "removed 新华金典 by 新华金典财富管理股份有限公司\n",
      "removed 小资钱包 by 海淀区小资钱包公司(资易贷平台)\n",
      "removed 小牛投资 by 深圳市小牛投资管理有限公司\n",
      "removed 好易贷 by 神州好易贷\n",
      "removed 壹佰金融 by 深圳壹佰金融服务有限公司\n",
      "removed 荷包金融 by 深圳市荷包金融理财公司\n",
      "removed 小资钱包 by 海淀区小资钱包公司(资易贷平台)\n",
      "removed 永利宝 by 上海永利宝\n",
      "removed 金钰珠宝 by 深圳市东方金钰珠宝实业有限公司\n",
      "removed 达飞云贷 by ????达飞云贷\n",
      "removed 银票网 by 上海鸿翔银票网互联网金融信息服务有限公司\n",
      "removed 智融会 by ????智融会\n",
      "removed 信和财富 by ????信和财富\n",
      "removed 善林 by 善林金融\n",
      "removed 口贷网 by ????口贷网\n",
      "removed 贝格富 by 海南贝格富科技有限公司\n",
      "removed 坚固环球 by MS坚固环球\n",
      "removed 牛牛通宝 by 圳市牛牛通宝科技文化产业有限公司\n",
      "removed 菠菜理财 by 菠菜理财金融平台\n",
      "removed 盐商集团 by 上海盐商集团有限公司\n",
      "removed 杉德 by 杉德支付网络服务发展有限公司\n",
      "removed 中银投资 by 大同市南郊区中银投资担保有限公司\n",
      "removed 股易贷 by 股易贷金融网\n",
      "removed 东方金钰 by 深圳东方金钰珠宝\n",
      "removed 草根 by 草根投资\n",
      "removed 及贷 by 及贷分期\n",
      "removed 金钰珠宝 by 深圳市东方金钰珠宝实业有限公司\n",
      "removed 抱财网 by  抱财网\n",
      "removed 易极付 by ????重庆易极付公司\n",
      "removed 宜贷 by 宜贷网\n",
      "removed 京东 by 京东商城\n",
      "removed 安盈智选3期 by 安盈智选3期债权资产理财计划\n",
      "removed 囿文美丽汇 by 北京囿文美丽汇健康管理有限责任公司\n",
      "removed 中润天下 by 中润天下企业管理中心\n",
      "removed 京东 by 京东商城\n",
      "removed 华夏万家 by 华夏万家金服\n",
      "removed 金储宝 by 杭州金储宝\n",
      "removed 投之家 by 深圳投之家金融信息服务有限公司\n",
      "removed 利民网 by 关于深圳利民网金融信息服务有限公司\n",
      "removed 温商贷 by 浙江温商贷\n",
      "removed 天风天财 by 天风天财金融信息服务有限公司\n",
      "removed 上海捷量 by 上海捷量投资管理有限公司\n",
      "removed 小沙僧理财 by 上海宜勇资产管理有限公司（小沙僧理财）\n",
      "removed 微积金 by 微积金互联网金融服务（上海）有限公司（微积金）\n",
      "removed 平安金融 by 平安金融公司\n",
      "removed 壹佰金融 by 深圳壹佰金融服务有限公司\n",
      "removed 卡卡贷 by ????卡卡贷\n",
      "removed 软银 by 软银中国\n",
      "removed 菠菜理财 by 菠菜理财 ?????\n",
      "removed 丰盛金 by 丰盛金融投资有限公司\n",
      "removed 万科 by 万科借鹏金\n",
      "removed 宜贷 by 宜贷网\n",
      "removed 金豆包 by  金豆包\n",
      "removed 中信资本 by 中信资本深圳资产管理有限公司\n",
      "removed 黎明国际 by 黎明国际工贸\n",
      "removed 陨石地带 by 北京陨石地带信息技术有限公司\n",
      "removed 贝格富 by 海南贝格富科技有限公司\n",
      "removed 中融嘉盛 by 中融嘉盛投资管理有限公司\n",
      "removed 富乐投资 by 海南富乐投资有限公司\n",
      "removed 商易贷 by 河南商易贷电子商务有限公司\n",
      "removed 浙联储 by 宁波浙联储金融服务外包有限公司\n",
      "removed 中润天下 by 中润天下企业管理中心\n",
      "removed 微创金服 by 小野鸡微创金服\n",
      "removed 泛亚 by 泛亚有色金属交易所股份有限公司\n",
      "removed 中大财富 by 广州中大财富\n",
      "removed 蓝天格锐 by 蓝天格锐电子科技有限公司\n",
      "removed 微积金 by 微积金互联网金融服务(上海)有限公司\n",
      "removed 华夏万家 by 华夏万家金服\n",
      "removed 汇富帮 by 天津汇富帮信息科技有限公司\n",
      "removed 善心汇 by 深圳市善心汇文化传播有限公司\n",
      "removed 聚胜财富 by 聚胜财富投资有限公司\n",
      "removed 华夏万家 by 华夏万家金服\n",
      "removed 云联惠 by 广东云联惠网络科技有限公司\n",
      "removed 华赢凯来 by 北京华赢凯来资产管理有限公司\n",
      "removed 耀盛投资 by 耀盛投资管理集团\n",
      "removed 陨石地带 by 北京陨石地带信息技术有限公司\n",
      "removed 银易贷 by 江苏银易贷网络科技有限公司\n",
      "removed 善林金融 by 善林金融信息服务有限公司\n",
      "removed 添上星 by 添上星理财\n",
      "removed 酷宝盒 by 天酷宝盒\n",
      "removed 小牛投资 by 深圳市小牛投资管理有限公司\n",
      "removed 子昂众盈 by 珠海子昂众盈投资管理有限公司\n",
      "removed 通金投资 by 上海通金投资\n",
      "removed 壹宝贷 by  壹宝贷\n",
      "removed 壹佰金融 by 深圳壹佰金融服务有限公司\n",
      "removed 央金所 by  央金所\n",
      "removed 宜贷 by 宜贷网\n",
      "removed 宝多多 by 宝多多投资管理有限公司网\n",
      "removed 河南睿信 by 河南睿信网络科技有限公司\n",
      "removed 京东 by 京东商城\n",
      "removed 前沿财富 by  前沿财富\n",
      "removed 饿了么 by 饿了么未来餐厅\n",
      "removed 华夏信财 by 上海互金机构华夏信财\n",
      "removed 乐投财富 by 乐投财富投资管理公司\n",
      "removed 酷宝盒 by 南京天雄酷宝盒投资公司\n",
      "removed 念钱安 by 念钱安理财\n",
      "removed 钱内助 by 钱内助金融信息服务有限公司\n",
      "removed 邑民金融 by 邑民金融平台\n",
      "removed 信和 by 信和财富\n",
      "removed 甜菜金融 by ????甜菜金融\n",
      "removed 小牛资本 by 小牛资本集团\n",
      "removed 华赢凯来 by 北京华赢凯来资产管理有限公司\n",
      "removed 微贷 by 微贷网\n",
      "removed 联宝 by 江苏联宝订单信息科技发展有限公司\n",
      "removed 挖财 by 挖财宝\n",
      "removed 顺发投资 by 云南顺发投资管理有限公司\n",
      "removed 玖富普惠 by 玖富普惠理财\n",
      "removed 善心汇 by 深圳市善心汇文化公司\n",
      "removed 善心汇 by 深圳善心汇\n",
      "removed 小牛新财富 by ????小牛新财富天津公司\n",
      "removed 银湖网 by 银湖网络科技有限公司\n",
      "removed 云返汽车 by 广东云返汽车销售有限公司\n",
      "removed 贝格富 by 海南贝格富科技有限公司\n",
      "removed 银票网 by 上海鸿翔银票网互联网金融信息服务有限公司\n",
      "removed 宜贷 by 宜贷网\n",
      "removed 温商贷 by 浙江温商贷\n",
      "removed 团贷 by 团贷网\n"
     ]
    }
   ],
   "source": [
    "#对结果进行分析\n",
    "rs_df['key_entity'] = rs_df['key_entity'].map(lambda x: trans_keys(trans_map,x)).map(remove_short_entity_by_long)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "rs_df.to_csv('round2/tmp/stack_on_twomodels_sentiment和entity单独判断_新去重方式.csv',index=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "metadata": {},
   "outputs": [],
   "source": [
    "types = set()\n",
    "for id,negative,key_entity in rs_df.values:\n",
    "        for x in [id,negative,key_entity ]:\n",
    "            if isinstance(x,tuple):\n",
    "                print([id,negative,key_entity ])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "positive_have_negentity_df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_df = load_basic_dataset('test')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_df[test_df['id'].isin(negative_have_none_entity_df['id'].values)]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#用别的模型的结果来弥补负面文本无实体的情况\n",
    "other_df = pd.read_csv('round2/tmp/bert_attention_singlemodel_rs.csv')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "other_df[other_df['id'].isin(positive_have_negentity_df['id'].values)].head(20)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "items = []\n",
    "for id,negative,key in rs_df.values:\n",
    "    if id in positive_have_negentity_df['id'].values:\n",
    "        id,negative,key = other_df[other_df['id']==id].values[0]\n",
    "    elif id in negative_have_none_entity_df['id'].values:\n",
    "        id,negative,key = other_df[other_df['id']==id].values[0]\n",
    "    items.append((id,negative,key))\n",
    "no_nan_conflict_df = pd.DataFrame(items,columns=['id','negative','key_entity'])        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "no_nan_conflict_df.to_csv('round2/tmp/stack_on_twomodels_no_nan_conflict.csv',index=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "no_nan_conflict_df[no_nan_conflict_df['id'].isin(negative_have_none_entity_df['id'].values)].head(20)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.9"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
