{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "os.chdir('../')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'/home/njuciairs/wangshuai/test/FinancialNagetiveEntityJudge'"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "os.getcwd()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "from evaluation.evaluate import evaluate\n",
    "from data_utils.basic_data import load_train_val_dataset\n",
    "from results_process.regulizer import remove_nine,remove_short_entity\n",
    "from results_process.utils import load_model_rs\n",
    "from results_process.bert_entity_model import reduce_rs_by_id\n",
    "#去重：把更短的去掉\n",
    "import numpy as np\n",
    "def remove_short_entity_by_long(entity_str):\n",
    "    \"\"\"\n",
    "    除去key_entity中同一实体的较短名称\n",
    "    :param entity_str:\n",
    "    :return:\n",
    "    \"\"\"\n",
    "    if not isinstance(entity_str, str):\n",
    "        return entity_str\n",
    "    entities = entity_str.split(';')\n",
    "    states = np.ones(len(entities))\n",
    "    for i, e in enumerate(entities):\n",
    "        for p in entities:\n",
    "            if e in p and len(e) < len(p):\n",
    "                states[i] = 0\n",
    "    rs = []\n",
    "    for i, e in enumerate(entities):\n",
    "        if states[i] == 1:\n",
    "            rs.append(e)\n",
    "    rs = ';'.join(rs)\n",
    "    return rs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "_, val_dataset = load_train_val_dataset(split_ratio=0.8)\n",
    "val_dataset = val_dataset.sort_values(['id'])\n",
    "raw_val_df = load_model_rs(model_name='bert_one_entity',version_id=1,file_name='raw_val_rs.csv')\n",
    "val_rs = reduce_rs_by_id(raw_val_df)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "sentiment_f1 0.9550359712230215\n",
      "entity_f1 0.8735059760956175\n",
      "total_score 0.9061179741465791\n"
     ]
    }
   ],
   "source": [
    "#对模型的预测结果直接计算分数\n",
    "sentiment_f1, entity_f1, total_score = evaluate(val_dataset,val_rs)\n",
    "print('sentiment_f1', sentiment_f1)\n",
    "print('entity_f1', entity_f1)\n",
    "print('total_score', total_score)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "sentiment_f1 0.9550359712230215\n",
      "entity_f1 0.9085239085239085\n",
      "total_score 0.9271287336035537\n"
     ]
    }
   ],
   "source": [
    "remove_short_rs = val_rs\n",
    "remove_short_rs['key_entity'] = remove_short_rs['key_entity'].map(remove_short_entity_by_long)\n",
    "sentiment_f1, entity_f1, total_score = evaluate(val_dataset,remove_short_rs)\n",
    "print('sentiment_f1', sentiment_f1)\n",
    "print('entity_f1', entity_f1)\n",
    "print('total_score', total_score)\n",
    "val_rs = remove_short_rs\n",
    "val_dataset = val_dataset.sort_values('id')\n",
    "val_rs = val_rs.sort_values('id')\n",
    "val_dataset['predict_negative'] = val_rs['negative'].values\n",
    "val_dataset['predict_key'] = val_rs['key_entity'].values"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "senti_badcase = val_dataset[val_dataset[['negative','predict_negative']].apply(lambda x: x[0] !=x[1],axis=1)]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "senti_badcase.to_csv('evaluation/tmp/senti_badcase.csv',index=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "def comparee_entity(estr1,estr2):\n",
    "    e1s = set(str(estr1).split(';'))\n",
    "    e2s = set(str(estr2).split(';'))\n",
    "    if len(e1s.union(e2s)) !=  len(e1s.intersection(e2s)):\n",
    "        return True\n",
    "    return False\n",
    "entity_badcase = val_dataset[val_dataset[['key_entity','predict_key']].apply(lambda x: comparee_entity(x[0],x[1]),axis=1)]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "entity_badcase.to_csv('evaluation/tmp/entity_badcase.csv',index=False)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.9"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
