{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "os.chdir('../')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "from evaluation.evaluate import evaluate\n",
    "from data_utils.basic_data import load_train_val_dataset,load_basic_dataset\n",
    "from results_process.regulizer import remove_nine,remove_short_entity\n",
    "from results_process.utils import load_model_rs\n",
    "from results_process.bert_entity_model import reduce_rs_by_id\n",
    "from functools import reduce\n",
    "import numpy as np\n",
    "\n",
    "import pandas as pd\n",
    "from collections import Counter\n",
    "from glob import glob\n",
    "from os.path import join\n",
    "MODEL_ROOT = '/home/njuciairs/wangshuai/competitions/finacial_models_round2'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "#去重：把更短的去掉\n",
    "import numpy as np\n",
    "def remove_short_entity_by_long(entity_str):\n",
    "    \"\"\"\n",
    "    除去key_entity中同一实体的较短名称\n",
    "    :param entity_str:\n",
    "    :return:\n",
    "    \"\"\"\n",
    "    if not isinstance(entity_str, str):\n",
    "        return entity_str\n",
    "    entities = entity_str.split(';')\n",
    "    states = np.ones(len(entities))\n",
    "    for i, e in enumerate(entities):\n",
    "        for p in entities:\n",
    "            if e in p and len(e) < len(p):\n",
    "                print('removed %s by %s'%(e,p))\n",
    "                states[i] = 0\n",
    "    rs = []\n",
    "    for i, e in enumerate(entities):\n",
    "        if states[i] == 1:\n",
    "            rs.append(e)\n",
    "    rs = ';'.join(rs)\n",
    "    return rs\n",
    "\n",
    "good_remove = False\n",
    "def get_trans_map():\n",
    "    from data_utils.basic_data import load_basic_dataset\n",
    "    train_df = load_basic_dataset('train')\n",
    "    srcs = train_df['entity'].map(lambda x: list(str(x).split(';')))\n",
    "    dests = train_df['key_entity'].map(lambda x: list(str(x).split(';')))\n",
    "    trans_map = {}\n",
    "    both_existed_log = ''\n",
    "    for srcs, dests in list(zip(srcs, dests)):\n",
    "        for src in srcs:\n",
    "            if src == '':\n",
    "                continue\n",
    "            for e in srcs:\n",
    "                if e == '':\n",
    "                    continue\n",
    "                if (src in e or e in src) and e != src:\n",
    "                    if src in dests:\n",
    "                        trans_map[src + '-' + e] = src\n",
    "                        trans_map[e + '-' + src] = src\n",
    "                    if e in dests:\n",
    "                        trans_map[src + '-' + e] = e\n",
    "                        trans_map[e + '-' + src] = e\n",
    "                    if good_remove:\n",
    "                        if src in dests and e in dests:\n",
    "                            trans_map[src + '-' + e] = e + ';' + src\n",
    "                            trans_map[e + '-' + src] = e + ';' + src\n",
    "                            both_existed_log += e + ';' + src\n",
    "                            print('both existsed:', e + ';' + src)\n",
    "    return trans_map\n",
    "def trans_keys(trans_map, entity_str):\n",
    "    if not isinstance(entity_str, str):\n",
    "        return entity_str\n",
    "    es = list(filter(lambda x: str(x).strip() != '', entity_str.split(';')))\n",
    "    rs = set()\n",
    "    for e in es:\n",
    "        finded = False\n",
    "        for y in es:\n",
    "            if e + '-' + y in trans_map and e != y:\n",
    "                rs.add(trans_map[e + '-' + y])\n",
    "                finded = True\n",
    "        if not finded:\n",
    "            rs.add(e)\n",
    "    if len(rs) > 0:\n",
    "        rs = ';'.join(list(rs))\n",
    "    else:\n",
    "        rs = np.nan\n",
    "    return rs\n",
    "trans_map = get_trans_map()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "dfs = [pd.read_csv(p) for p in glob(join(MODEL_ROOT,'senti_entity_goodremove_full','*','*','raw_rs.csv')) ] "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "rs_dict = {}\n",
    "for id,negative,predict,entity_list in dfs[0].values:\n",
    "    predict = eval(predict)\n",
    "    entity_list = eval(entity_list)\n",
    "    item = [[negative],predict,entity_list]\n",
    "        \n",
    "    if id not in rs_dict:\n",
    "        rs_dict[id] = item\n",
    "    else:\n",
    "        in_item = rs_dict[id]\n",
    "        in_item[0] += item[0]\n",
    "        in_item[1] += item[1]\n",
    "        in_item[2] += item[2]\n",
    "        rs_dict[id] = item"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "items = []\n",
    "for id,(negatives,predicts,entities) in rs_dict.items():\n",
    "    negative = int(np.mean(negatives) >= 0.5)\n",
    "    ke = []\n",
    "    for p,e in zip(predicts,entities):\n",
    "        if p== 1:\n",
    "            ke.append(e)\n",
    "    if len(ke) > 0:\n",
    "        kestr = ';'.join(ke)\n",
    "    else:\n",
    "        kestr = np.nan\n",
    "    item = (id,negative,kestr)\n",
    "    items.append(item)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [],
   "source": [
    "rs_df = pd.DataFrame(items,columns=['id','negative','key_entity'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [],
   "source": [
    "rs_df.to_csv('round2/tmp/bert_attention_singlemodel_rs.csv',index=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.9"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
