{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "os.chdir('../')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'/home/njuciairs/wangshuai/test/FinancialNagetiveEntityJudge'"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "os.getcwd()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "from evaluation.evaluate import evaluate\n",
    "from data_utils.basic_data import load_train_val_dataset,load_basic_dataset\n",
    "from results_process.regulizer import remove_nine,remove_short_entity\n",
    "from results_process.utils import load_model_rs\n",
    "from results_process.bert_entity_model import reduce_rs_by_id\n",
    "from functools import reduce\n",
    "import numpy as np\n",
    "from data_utils.bert_multi_class_data import get_train_val_data_loader, get_test_loader,TestEntityDataset\n",
    "import pandas as pd\n",
    "from collections import Counter"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "raw_df = load_model_rs(model_name='multi_class_cross1',version_id=3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#交叉训练模型1 multi_class_cross1  \n",
    "#交叉训练模型2 BertSentiEntity_cross"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "dfs = [load_model_rs(model_name='multi_class_cross1',version_id=i) for i in range(1,10)]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_df = load_basic_dataset(split='test')\n",
    "test_dataset = TestEntityDataset(test_df, max_len=400)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "texts = [sample.text for sample in test_dataset]\n",
    "entity = [t.split('[SEP]')[0][5:] for t in texts]\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "for df in dfs:\n",
    "    df['key_entity'] = entity"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "group = pd.concat(dfs).groupby(['id','key_entity'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "rs_list = []\n",
    "for (id,key),df in group:\n",
    "    label = Counter(df['predict_labels']).most_common(1)[0][0]\n",
    "    rs_list.append((id,key,label))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "metadata": {},
   "outputs": [],
   "source": [
    "raw_df = pd.DataFrame(rs_list,columns=['id','key_entity','predict_label'])[['id','predict_label','key_entity']]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "metadata": {},
   "outputs": [],
   "source": [
    "# raw_df['key_entity'] = entity"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "metadata": {},
   "outputs": [],
   "source": [
    "rs_map = {}\n",
    "for id,label,entity in raw_df.values:\n",
    "    if id not in rs_map:\n",
    "        rs_map[id] = ([label],[entity])\n",
    "    else:\n",
    "        rs_map[id][0].append(label)\n",
    "        rs_map[id][1].append(entity)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "metadata": {},
   "outputs": [],
   "source": [
    "items = []\n",
    "for k,v in rs_map.items():\n",
    "    labels,entities = v\n",
    "    senti = int(np.mean(labels) >= 1)\n",
    "    keys = []\n",
    "    for l,e in zip(labels,entities):\n",
    "        if l==2:\n",
    "            keys.append(e)\n",
    "    key_entity = ';'.join(keys)\n",
    "    if len(keys)==0 or senti==0:\n",
    "        key_entity = np.nan\n",
    "    items.append((k,senti,key_entity))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>id</th>\n",
       "      <th>negative</th>\n",
       "      <th>key_entity</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <td>0</td>\n",
       "      <td>00049297</td>\n",
       "      <td>1</td>\n",
       "      <td>小资钱包;资易贷;资易贷金融信息服务有限公司</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>1</td>\n",
       "      <td>000b8b75</td>\n",
       "      <td>0</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>2</td>\n",
       "      <td>0012d20a</td>\n",
       "      <td>0</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>3</td>\n",
       "      <td>0033ebe3</td>\n",
       "      <td>1</td>\n",
       "      <td>联璧金融</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>4</td>\n",
       "      <td>003b1540</td>\n",
       "      <td>0</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>4995</td>\n",
       "      <td>ffa46c98</td>\n",
       "      <td>1</td>\n",
       "      <td>小资钱包;资易贷</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>4996</td>\n",
       "      <td>ffc0005d</td>\n",
       "      <td>0</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>4997</td>\n",
       "      <td>ffd1497a</td>\n",
       "      <td>0</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>4998</td>\n",
       "      <td>fff09e68</td>\n",
       "      <td>0</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>4999</td>\n",
       "      <td>fffe28dd</td>\n",
       "      <td>1</td>\n",
       "      <td>黑火金融</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>5000 rows × 3 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "            id  negative              key_entity\n",
       "0     00049297         1  小资钱包;资易贷;资易贷金融信息服务有限公司\n",
       "1     000b8b75         0                     NaN\n",
       "2     0012d20a         0                     NaN\n",
       "3     0033ebe3         1                    联璧金融\n",
       "4     003b1540         0                     NaN\n",
       "...        ...       ...                     ...\n",
       "4995  ffa46c98         1                小资钱包;资易贷\n",
       "4996  ffc0005d         0                     NaN\n",
       "4997  ffd1497a         0                     NaN\n",
       "4998  fff09e68         0                     NaN\n",
       "4999  fffe28dd         1                    黑火金融\n",
       "\n",
       "[5000 rows x 3 columns]"
      ]
     },
     "execution_count": 46,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "raw_df = pd.DataFrame(items,columns=['id','negative','key_entity'])\n",
    "raw_df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "metadata": {},
   "outputs": [],
   "source": [
    "#去重：把更短的去掉\n",
    "import numpy as np\n",
    "def remove_short_entity_by_long(entity_str):\n",
    "    \"\"\"\n",
    "    除去key_entity中同一实体的较短名称\n",
    "    :param entity_str:\n",
    "    :return:\n",
    "    \"\"\"\n",
    "    if not isinstance(entity_str, str):\n",
    "        return entity_str\n",
    "    entities = entity_str.split(';')\n",
    "    states = np.ones(len(entities))\n",
    "    for i, e in enumerate(entities):\n",
    "        for p in entities:\n",
    "            if e in p and len(e) < len(p):\n",
    "                print('removed %s by %s'%(e,p))\n",
    "                states[i] = 0\n",
    "    rs = []\n",
    "    for i, e in enumerate(entities):\n",
    "        if states[i] == 1:\n",
    "            rs.append(e)\n",
    "    rs = ';'.join(rs)\n",
    "    return rs\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_trans_map():\n",
    "    from data_utils.basic_data import load_basic_dataset\n",
    "    train_df = load_basic_dataset('train')\n",
    "    srcs = train_df['entity'].map(lambda x :list(str(x).split(';')))\n",
    "    dests =  train_df['key_entity'].map(lambda x :list(str(x).split(';')))\n",
    "    trans_map = {}\n",
    "    for srcs,dests in list(zip(srcs,dests)):\n",
    "        for src in srcs:\n",
    "            if src == '':\n",
    "                continue\n",
    "            for e in srcs:\n",
    "                if e== '':\n",
    "                    continue\n",
    "                if (src in e or e in src) and e!=src:\n",
    "                    if src in dests:\n",
    "                        trans_map[src+'-'+e] = src\n",
    "                        trans_map[e+'-'+src] = src\n",
    "                    if e in dests:\n",
    "                        trans_map[src+'-'+e] = e\n",
    "                        trans_map[e+'-'+src] = e\n",
    "    return trans_map\n",
    "def trans_keys(trans_map,entity_str):\n",
    "    if not isinstance(entity_str,str):\n",
    "        return entity_str\n",
    "    es = list(filter(lambda x:str(x).strip()!='',entity_str.split(';')))\n",
    "    rs = set()\n",
    "    for e in es:\n",
    "        finded = False\n",
    "        for y in es:\n",
    "            if e+'-'+y in trans_map and e!=y:\n",
    "                rs.add(trans_map[e+'-'+y])\n",
    "                finded = True\n",
    "        if not finded:\n",
    "            rs.add(e)\n",
    "    if len(rs) > 0:\n",
    "        rs = ';'.join(list(rs))\n",
    "    else:\n",
    "        rs = np.nan\n",
    "    return rs\n",
    "trans_map = get_trans_map()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 49,
   "metadata": {},
   "outputs": [],
   "source": [
    "rs_df = raw_df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 51,
   "metadata": {},
   "outputs": [],
   "source": [
    "rs_df['key_entity'] = rs_df['key_entity'].map(lambda x: trans_keys(trans_map,x)).map(remove_short_entity_by_long).map(remove_nine)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 52,
   "metadata": {},
   "outputs": [],
   "source": [
    "rs_df.to_csv('evaluation/tmp/multi_class_cross1-9_1013.csv',index=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {},
   "outputs": [],
   "source": [
    "rs_df['key_entity'] = 'ssss'\n",
    "rs_df.to_csv('evaluation/tmp/entity1.csv',index=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.9728144750000001"
      ]
     },
     "execution_count": 24,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "0.38912579000 /0.4"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.9390498833333333"
      ]
     },
     "execution_count": 25,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "(0.95255572000 - 0.38912579000)/0.6"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.964390925"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "0.38575637000  / 0.4"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.924355"
      ]
     },
     "execution_count": 26,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "(0.94036937000 - 0.38575637000) /0.6"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "178.04537499999995"
      ]
     },
     "execution_count": 20,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "5000 * (1-0.964390925)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.924355"
      ]
     },
     "execution_count": 19,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "(0.94036937000 - 0.38575637000)/0.6"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "rs_df.loc[rs_df['key_entity'].map(lambda x:str(x).strip()==''),'negative'] =0\n",
    "rs_df.loc[rs_df['key_entity'].map(lambda x:str(x).strip()==''),'key_entity'] =np.nan"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [],
   "source": [
    "rs_df.to_csv('evaluation/tmp/entity3_20191004.csv',index=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>id</th>\n",
       "      <th>negative</th>\n",
       "      <th>key_entity</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "Empty DataFrame\n",
       "Columns: [id, negative, key_entity]\n",
       "Index: []"
      ]
     },
     "execution_count": 22,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "space_sub = rs_df[rs_df['key_entity'].map(lambda x:str(x).strip()=='')]\n",
    "space_sub"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>id</th>\n",
       "      <th>negative</th>\n",
       "      <th>predict</th>\n",
       "      <th>entity_list</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <td>790</td>\n",
       "      <td>e9abc6b0</td>\n",
       "      <td>1</td>\n",
       "      <td>[0, 0, 0]</td>\n",
       "      <td>['蚂蚁金服', '花呗', '大学生贷']</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "           id  negative    predict             entity_list\n",
       "790  e9abc6b0         1  [0, 0, 0]  ['蚂蚁金服', '花呗', '大学生贷']"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "raw_df[raw_df['id']=='e9abc6b0']"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "###  以下是对结果的分析"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_df = load_basic_dataset(split ='test')\n",
    "raw_df = load_model_rs(model_name='BertSentiEntity',version_id=1)\n",
    "raw_rs_df = reduce_rs_by_id(raw_df)\n",
    "raw_rs_df['key_entity'] = raw_rs_df['key_entity'].map(remove_short_entity_by_long)\n",
    "a = raw_rs_df[raw_rs_df.id.isin(space_sub['id'].values)].sort_values('id')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "b = test_df[test_df.id.isin(space_sub['id'].values)].sort_values('id')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "37"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(a)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "3"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import torch\n",
    "len(torch.Tensor([1,2,3]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([3])"
      ]
     },
     "execution_count": 41,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "a = torch.Tensor([1,2,3])\n",
    "b = torch.Tensor([1,0,3])\n",
    "a.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 61,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "dsdc\n",
      "yes\n"
     ]
    }
   ],
   "source": [
    "\n",
    "class TestDataset(Dataset):\n",
    "    def __init__(self, df, max_len=300):\n",
    "        self.x = list(self.make_samples(df, max_len))\n",
    "        self.len = len(self.samples)\n",
    "\n",
    "    def make_samples(self, df, max_len):\n",
    "        texts = df['text'].values\n",
    "        tiltles = df['title'].values\n",
    "        estrs = df['entity'].values\n",
    "        ids = df['id'].values\n",
    "        for i, estr in enumerate(estrs):\n",
    "            for e in estr.split(';'):\n",
    "                yield TextEntitySample(ids[i], texts[i], tiltles[i], e, max_len)\n",
    "\n",
    "    def __getitem__(self, index):\n",
    "        return self.samples[index]\n",
    "\n",
    "    def __len__(self):\n",
    "        return self.len"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.9"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
