{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "step---0\n",
      "step---200\n",
      "step---400\n",
      "test               precision    recall  f1-score   support\n",
      "\n",
      "           0       1.00      1.00      1.00   7955959\n",
      "           1       0.91      0.94      0.92    554364\n",
      "           2       0.77      0.76      0.77    165749\n",
      "           3       0.91      0.93      0.92    554364\n",
      "           4       0.93      0.87      0.90    494486\n",
      "\n",
      "    accuracy                           0.98   9724922\n",
      "   macro avg       0.90      0.90      0.90   9724922\n",
      "weighted avg       0.98      0.98      0.98   9724922\n",
      "\n"
     ]
    }
   ],
   "source": [
    "'''\n",
    "using bert predict a sentence\n",
    "1. using CPU to run needs 0.5 seconds (just count predict seconds),  while using gpu cause 0.02 seconds.\n",
    "2. The parameter is huge.\n",
    "3. load model needs 10+ seconds.\n",
    "'''\n",
    "from operator import index\n",
    "import os\n",
    "import numpy as np \n",
    "from sklearn.metrics import precision_score, recall_score, f1_score, classification_report\n",
    "import sklearn \n",
    "import warnings\n",
    "import sklearn.exceptions\n",
    "warnings.filterwarnings(\"ignore\", category=sklearn.exceptions.UndefinedMetricWarning)\n",
    "import time\n",
    "\n",
    "import torch\n",
    "from torch.utils.data import Dataset,DataLoader\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "\n",
    "from model.LSTM_CRF import NERLSTM_CRF\n",
    "import random\n",
    "\n",
    "\n",
    "from sklearn.metrics import precision_score, recall_score, f1_score, classification_report\n",
    "\n",
    "seed = 42\n",
    "random.seed(seed)\n",
    "np.random.seed(seed)\n",
    "torch.manual_seed(seed)\n",
    "torch.cuda.manual_seed_all(seed)\n",
    "torch.backends.cudnn.deterministic = True\n",
    "\n",
    "DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') # cuda值的选取视服务器GPU使用情况而定\n",
    "# DEVICE = torch.device('cpu')\n",
    "model_path = 'save_dict/BEST_F1_0.pth'\n",
    "\n",
    "\n",
    "def load_dict(dict_path):\n",
    "    vocab = {}\n",
    "    i = 0\n",
    "    for line in open(dict_path, 'r', encoding='utf-8'):\n",
    "        key = line.strip('\\n')\n",
    "        vocab[key] = i\n",
    "        i += 1\n",
    "    return vocab, {v: k for k, v in vocab.items()}\n",
    "\n",
    "\n",
    "test = torch.load('../test_data')\n",
    "\n",
    "\n",
    "word2id, id2word = load_dict('../word2id.txt')\n",
    "tag2id, id2tag = load_dict('../tag2id.txt')\n",
    "\n",
    "\n",
    "\n",
    "def convert_tokens_2_id(tokens_list, token2id_dict):\n",
    "    '''\n",
    "    tokens_list should be [a,b,c,d,e,f,g]\n",
    "    '''\n",
    "    new_tokens_list = []\n",
    "    for i in tokens_list:\n",
    "        if i not in token2id_dict.keys():\n",
    "            i = 'OOV'\n",
    "        new_tokens_list.append(token2id_dict.get(i, 0))\n",
    "    return new_tokens_list\n",
    "\n",
    "# create Dataset\n",
    "class NERDataset(Dataset):\n",
    "    def __init__(self,data):\n",
    "        self.data = data\n",
    "\n",
    "    def __getitem__(self, index):\n",
    "        return self.data[index][0],self.data[index][1]\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.data)\n",
    "\n",
    "# dataloader ：padding \n",
    "def padding(data):\n",
    "    src,tgt = zip(*data)\n",
    "\n",
    "    src_lens = [len(t) for t in src]\n",
    "    tgt_lens = [len(g) for g in tgt]\n",
    "    src_pad = torch.zeros(len(src), max(src_lens)).long()\n",
    "    for i, s in enumerate(src):\n",
    "        end = src_lens[i]\n",
    "        src_pad[i, :end] = torch.LongTensor(s[:end])\n",
    "\n",
    "    tgt_pad = torch.zeros(len(tgt), max(tgt_lens)).long()\n",
    "    for i, t in enumerate(tgt):\n",
    "        end = tgt_lens[i]\n",
    "        tgt_pad[i, :end] = torch.LongTensor(t[:end])\n",
    "    return src_pad,tgt_pad\n",
    "\n",
    "\n",
    "test_dataset = NERDataset(test)\n",
    "print(len(test_dataset))\n",
    "test_dataloader = DataLoader(test_dataset, batch_size=64, shuffle=False,collate_fn=padding)\n",
    "\n",
    "\n",
    "\n",
    "DROP_OUT = 0.2\n",
    "EMBED_DIM = 100\n",
    "HIDDEN_DIM = 200\n",
    "\n",
    "model = NERLSTM_CRF(EMBED_DIM, HIDDEN_DIM , DROP_OUT, word2id, tag2id)\n",
    "model.load_state_dict(torch.load(model_path))\n",
    "model.to(DEVICE)\n",
    "\n",
    "\n",
    "preds, labels = [], []\n",
    "total_val_loss = 0\n",
    "\n",
    "\n",
    "for i, batch in enumerate(test_dataloader):\n",
    "    with torch.no_grad():\n",
    "        # result = model(batch[0].to(DEVICE), token_type_ids=None, attention_mask=(batch[0]>0).to(DEVICE), labels=batch[1].to(DEVICE))\n",
    "        result = model(batch[0].to(DEVICE))\n",
    "        # aver_loss += loss.item()\n",
    "        # labels_list = batch[1].to(DEVICE).cpu().tolist()\n",
    "        # for i in range(len(predict)):\n",
    "            # preds += predict[i]\n",
    "        # for j in range(len(labels_list)):\n",
    "            # labels += labels_list[j]\n",
    "# aver_loss /= (len(test_dataloader) * BATCH_SIZE)\n",
    "# print('val loss:{}'.format(aver_loss))\n",
    "\n",
    "        # total_val_loss += result.loss.item()\n",
    "        if i%200==0:\n",
    "            print('step---%s'%i)\n",
    "            # print('batch---%s, loss---'%(i, result.loss.item()))\n",
    "        logits = result\n",
    "        label_ids = batch[1].to('cpu').numpy()\n",
    "        for i in range(len(logits)):\n",
    "            preds.extend(logits[i])\n",
    "        for j in range(len(label_ids)):\n",
    "            labels.extend(label_ids[j])\n",
    "precision = precision_score(labels, preds, average='macro')\n",
    "recall = recall_score(labels, preds, average='macro')\n",
    "f1 = f1_score(labels, preds, average='macro')\n",
    "report = classification_report(labels, preds)\n",
    "print('test', report)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "interpreter": {
   "hash": "74fe859eaf5d00bce5565637797e820181e41cef3326bb2f5cfc02a88bf2f28b"
  },
  "kernelspec": {
   "display_name": "Python 3.7.13 ('py37')",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.13"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
