{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 加载环境\n",
    "import os\n",
    "import re\n",
    "import logging\n",
    "import json\n",
    "from glob import glob\n",
    "from tqdm import tqdm\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "\n",
    "import torch\n",
    "from transformers import BertTokenizer\n",
    "from torch.utils.tensorboard import SummaryWriter\n",
    "from torch.utils.data import TensorDataset, DataLoader\n",
    "\n",
    "from ignite.engine import Engine, Events\n",
    "from ignite.metrics import RunningAverage\n",
    "from ignite.handlers import ModelCheckpoint, EarlyStopping, global_step_from_engine\n",
    "from ignite.contrib.handlers import ProgressBar"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "def set_seed(seed: int):\n",
    "    \"\"\"\n",
    "    Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch`` and/or ``tf``\n",
    "    (if installed).\n",
    "\n",
    "    Args:\n",
    "        seed (:obj:`int`): The seed to set.\n",
    "    \"\"\"\n",
    "    random.seed(seed)\n",
    "    np.random.seed(seed)\n",
    "\n",
    "    torch.manual_seed(seed)\n",
    "    torch.cuda.manual_seed_all(seed)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "from model import NER_Model\n",
    "from utils import cmed_ner_metric"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "logger = logging.getLogger(__name__)\n",
    "# Setup logging\n",
    "logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s -   %(message)s',\n",
    "                    datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)\n",
    "import time\n",
    "import random"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "class cCKSNER:\n",
    "\n",
    "    def __init__(self, dataset, max_split_len=500, max_seq_len=512,\n",
    "                 per_gpu_batch_size=8, embed_size=300,\n",
    "                 model_name_or_path=\"bert-base-chinese\", no_cuda=False,\n",
    "                 dense_layer_type=\"linear\", dropout=0.5, embed_type=\"random\",\n",
    "                 vector_file=\"\", bert_lr=1e-5, crf_lr=1e-3, patience=3,\n",
    "                 output_dir=\"results\", n_saved=3, max_epochs=100):\n",
    "\n",
    "        set_seed(42)\n",
    "\n",
    "        self.train_path = os.path.join(dataset, \"train.txt\")\n",
    "        self.dev_path = os.path.join(dataset, \"dev.txt\")\n",
    "        self.test_path = os.path.join(dataset, \"test.txt\")\n",
    "        self.max_split_len = max_split_len\n",
    "        self.max_seq_len = max_seq_len\n",
    "\n",
    "        self.str2tag = {\n",
    "            \"疾病和诊断\": \"Dis\",\n",
    "            \"解剖部位\": \"Body\",\n",
    "            \"实验室检验\": \"Test\",\n",
    "            \"影像检查\": \"CT\",\n",
    "            \"药物\": \"Drug\",\n",
    "            \"手术\": \"Sur\"\n",
    "        }\n",
    "        self.tag2str = { self.str2tag[k] : k for k in self.str2tag.keys()}\n",
    "\n",
    "        self.label_list = ['<pad>', '<start>', '<end>', 'O']\n",
    "        for tag in self.str2tag.values():\n",
    "            for pre in ['B-', 'I-', 'E-', 'S-']:\n",
    "                self.label_list.append(pre + tag)\n",
    "\n",
    "        self.bert_tokenizer = BertTokenizer.from_pretrained(model_name_or_path)\n",
    "\n",
    "        self.per_gpu_batch_size = per_gpu_batch_size\n",
    "        self.embed_size = embed_size\n",
    "        self.no_cuda = no_cuda\n",
    "\n",
    "        self.dense_layer_type = dense_layer_type\n",
    "        self.dropout = dropout\n",
    "        self.embed_type = embed_type\n",
    "        self.model_name_or_path = model_name_or_path\n",
    "        self.vector_file = vector_file\n",
    "        self.bert_lr = bert_lr\n",
    "        self.crf_lr = crf_lr\n",
    "        self.patience = patience\n",
    "        self.n_saved = n_saved\n",
    "        self.max_epochs = max_epochs\n",
    "\n",
    "        device = torch.device(\"cuda\" if torch.cuda.is_available() and not self.no_cuda else \"cpu\")\n",
    "        self.n_gpu = max(torch.cuda.device_count() if not self.no_cuda else 1, 1)\n",
    "        self.device = device\n",
    "\n",
    "        if 'bert' not in self.embed_type:\n",
    "            model_name = \"{}_{}_crf\".format(self.embed_type, self.dense_layer_type)\n",
    "        else:\n",
    "            embed_type = os.path.split(self.model_name_or_path)[-1]\n",
    "            model_name = \"{}_{}_crf\".format(embed_type, self.dense_layer_type)\n",
    "\n",
    "        self.model_name = model_name\n",
    "        # self.output_dir = output_dir\n",
    "        self.output_dir = \"{}/{}\".format(output_dir, model_name)\n",
    "\n",
    "    def evaluation(self, gold_file, start_time, train_time):\n",
    "        predix = os.path.split(gold_file)[-1].replace(\".txt\", \"\")\n",
    "        pre_file = os.path.join(self.output_dir, '{}_{}.txt'.format(predix, self.model_name))\n",
    "        score_file = os.path.join(self.output_dir, 'score_{}_{}.txt'.format(predix, self.model_name))\n",
    "        with open(score_file, 'w', encoding=\"utf-8\") as w:\n",
    "            res = cher_ner_metric(pre_file, gold_file, self.str2tag)\n",
    "            w.write(\"overall_s:\\t{}\".format(res['overall_s']))\n",
    "            w.write(\"\\n\")\n",
    "            w.write(\"{}\".format(res['detial_s']))\n",
    "            w.write(\"\\n\")\n",
    "            w.write(\"message:\\n{}\".format(res['message']))\n",
    "            w.write(\"\\n\")\n",
    "            w.write(\"train time cost:\\t {:.2f} s\".format(train_time))\n",
    "            w.write(\"\\n\")\n",
    "            w.write(\"time cost:\\t {:.2f} s\".format(time.time() - start_time - train_time))\n",
    "            w.write(\"\\n\")\n",
    "            w.write(\"args:\\n{}\".format('\\n'.join(['%s:%s' % item for item in self.__dict__.items()])))\n",
    "\n",
    "    def export_results(self, unlabel_path):\n",
    "        X, cut_his, originalTexts = self.get_X(unlabel_path)\n",
    "        y_pred = self.predict(X)\n",
    "\n",
    "        entity_data = []\n",
    "        predix = os.path.split(unlabel_path)[-1].replace(\".txt\", \"\")\n",
    "\n",
    "        X_align, y_align = originalTexts, self.alignment_X_y(originalTexts, cut_his, y_pred)\n",
    "\n",
    "        for i, (text, y) in enumerate(tqdm(zip(X_align, y_align), desc=\"Decoding\")):\n",
    "            entities = []\n",
    "            for k, label in enumerate(y):\n",
    "                if \"-\" in label:\n",
    "                    tag_1 = label.split(\"-\")[0]\n",
    "                    tag_2 = label.split(\"-\")[1]\n",
    "                    ## Single\n",
    "                    if tag_1 == \"S\":\n",
    "                        start_pos = k\n",
    "                        end_pos = k + 1\n",
    "                        entity = text[start_pos: end_pos]\n",
    "                        label_type = self.tag2str[tag_2]\n",
    "                        tempObj = {\n",
    "                            \"start_pos\": start_pos,\n",
    "                            \"end_pos\": end_pos,\n",
    "                            \"label_type\": label_type\n",
    "                        }\n",
    "                        entities.append(tempObj)\n",
    "                        entity_data.append((i + 1, entity, label_type, start_pos, end_pos))\n",
    "\n",
    "                    if tag_1 == \"B\":\n",
    "                        start_pos = k\n",
    "                        end_pos = k + 1\n",
    "                        has_e = False\n",
    "                        for j in range(start_pos + 1, len(y)):\n",
    "                            if y[j] == \"I-\" + tag_2:\n",
    "                                end_pos += 1\n",
    "                            elif y[j] == 'E-' + tag_2:\n",
    "                                end_pos += 1\n",
    "                                has_e = True\n",
    "                                break\n",
    "                            else:\n",
    "                                break\n",
    "                        '''\n",
    "                        if has_e:\n",
    "                            entity = text[start_pos: end_pos]\n",
    "                            label_type = self.tag2str[tag_2]\n",
    "                            tempObj = {\n",
    "                                \"start_pos\": start_pos,\n",
    "                                \"end_pos\": end_pos,\n",
    "                                \"label_type\": label_type\n",
    "                            }\n",
    "                            entities.append(tempObj)\n",
    "                            entity_data.append((i + 1, entity, label_type, start_pos, end_pos))\n",
    "                        '''\n",
    "                        entity = text[start_pos: end_pos]\n",
    "                        label_type = self.tag2str[tag_2]\n",
    "                        tempObj = {\n",
    "                            \"start_pos\": start_pos,\n",
    "                            \"end_pos\": end_pos,\n",
    "                            \"label_type\": label_type\n",
    "                        }\n",
    "                        entities.append(tempObj)\n",
    "                        entity_data.append((i + 1, entity, label_type, start_pos, end_pos))\n",
    "\n",
    "            with open(os.path.join(self.output_dir, '{}_{}.txt'.format(predix, self.model_name)), 'a', encoding=\"utf-8\") as f:\n",
    "                s = json.dumps({\n",
    "                    \"originalText\": text,\n",
    "                    \"entities\": entities\n",
    "                }, ensure_ascii=False)\n",
    "                f.write(s)\n",
    "                f.write(\"\\n\")\n",
    "\n",
    "        tempDF = pd.DataFrame(data=entity_data, columns=['text_id', 'entity', 'label_type', 'start_pos', 'end_pos'])\n",
    "        tempDF.to_csv(os.path.join(self.output_dir, \"tmp_entities_{}_{}.csv\".format(predix, self.model_name)), index=False)\n",
    "\n",
    "    def alignment_X_y(self, originalTexts, cut_his, y_pred):\n",
    "        y_align = []\n",
    "        for i, X in enumerate(originalTexts):\n",
    "            cut_index = cut_his[i]\n",
    "            if isinstance(cut_index, int):\n",
    "                y_ = y_pred[cut_index]\n",
    "            else:\n",
    "                y_ =[]\n",
    "                for index in cut_index:\n",
    "                    y_.extend(y_pred[index])\n",
    "\n",
    "            assert len(X) == len(y_), 'i:{};text_len:{};while label_len:{}'.format(i, len(X), len(y_))\n",
    "            y_align.append(y_)\n",
    "\n",
    "        assert len(originalTexts) == len(y_align)\n",
    "        return y_align\n",
    "\n",
    "    def train(self):\n",
    "        ## train data\n",
    "        train_X, train_y, _ = self.get_X_y(self.train_path)\n",
    "        train_input_ids, train_input_mask_ids, train_label_ids, train_label_mask_ids = self.get_X_y_ids(train_X, train_y)\n",
    "\n",
    "        ## dev data\n",
    "        dev_X, dev_y, _ = self.get_X_y(self.dev_path)\n",
    "        dev_input_ids, dev_input_mask_ids, dev_label_ids, dev_label_mask_ids = self.get_X_y_ids(dev_X, dev_y)\n",
    "\n",
    "        train_ds = TensorDataset(train_input_ids, train_input_mask_ids, train_label_ids, train_label_mask_ids)\n",
    "        dev_ds = TensorDataset(dev_input_ids, dev_input_mask_ids, dev_label_ids, dev_label_mask_ids)\n",
    "\n",
    "        batch_size = self.n_gpu * self.per_gpu_batch_size\n",
    "        train_iter = DataLoader(train_ds, batch_size=batch_size, shuffle=True, drop_last=True)\n",
    "        dev_iter = DataLoader(dev_ds, batch_size=batch_size, shuffle=True, drop_last=True)\n",
    "\n",
    "        model = NER_Model(vocab_size=self.bert_tokenizer.vocab_size, embed_size=self.embed_size,\n",
    "                          num_tags=len(self.label_list), max_len=self.max_seq_len, device=self.device,\n",
    "                          dense_layer_type=self.dense_layer_type, dropout=self.dropout, embed_type=self.embed_type,\n",
    "                          model_name_or_path=self.model_name_or_path, vector_file=self.vector_file)\n",
    "\n",
    "        model.to(self.device)\n",
    "\n",
    "        if self.n_gpu > 1:\n",
    "            model = torch.nn.DataParallel(model)\n",
    "\n",
    "        logger.info(\"model.named_parameters()\")\n",
    "        for n, p in model.named_parameters():\n",
    "            logger.info(n)\n",
    "\n",
    "        parameters = [{\n",
    "            \"params\": [p for n, p in model.named_parameters() if \"bert\" in n],\n",
    "            \"lr\": self.bert_lr\n",
    "        }, {\n",
    "            \"params\": [p for n, p in model.named_parameters() if \"bert\" not in n],\n",
    "            \"lr\": self.crf_lr\n",
    "        }]\n",
    "\n",
    "        optimizer = torch.optim.AdamW(parameters, lr=self.crf_lr)\n",
    "\n",
    "        tb_writer = SummaryWriter()\n",
    "\n",
    "        def train_fn(engine, batch):\n",
    "            model.train()\n",
    "            optimizer.zero_grad()\n",
    "            batch = tuple(t.to(self.device) for t in batch)\n",
    "            labels = batch[2]\n",
    "\n",
    "            inputs = {\n",
    "                \"token_ids\": batch[0],\n",
    "                \"input_masks\": batch[1],\n",
    "                \"label_ids\": labels,\n",
    "            }\n",
    "\n",
    "            loss, sequence_tags = model(**inputs)\n",
    "\n",
    "            score = (sequence_tags == labels).float().detach().cpu().numpy()\n",
    "\n",
    "            condition_1 = (labels != self.label_list.index(\"O\")).detach().cpu().numpy()\n",
    "            condition_2 = (labels != self.label_list.index(\"<pad>\")).detach().cpu().numpy()\n",
    "            patten = np.logical_and(condition_1, condition_2)\n",
    "            score = score[patten].mean()\n",
    "\n",
    "            if self.n_gpu > 1:\n",
    "                loss = loss.mean()\n",
    "\n",
    "            ## tensorboard\n",
    "            global_step = global_step_from_engine(engine)(engine, engine.last_event_name)\n",
    "            # tb_writer.add_scalar('learning_rate', scheduler.get_lr()[0], global_step)\n",
    "            tb_writer.add_scalar('train_loss', loss.item(), global_step)\n",
    "            tb_writer.add_scalar('train_score', score.item(), global_step)\n",
    "\n",
    "            loss.backward()\n",
    "            torch.nn.utils.clip_grad_norm_(model.parameters(), 3.0)\n",
    "            optimizer.step()\n",
    "            return loss.item(), score\n",
    "\n",
    "        trainer = Engine(train_fn)\n",
    "        RunningAverage(output_transform=lambda x: x[0]).attach(trainer, 'loss')\n",
    "        RunningAverage(output_transform=lambda x: x[1]).attach(trainer, 'score')\n",
    "\n",
    "        def dev_fn(engine, batch):\n",
    "            model.eval()\n",
    "            optimizer.zero_grad()\n",
    "            with torch.no_grad():\n",
    "                batch = tuple(t.to(self.device) for t in batch)\n",
    "                labels = batch[2]\n",
    "\n",
    "                inputs = {\n",
    "                    \"token_ids\": batch[0],\n",
    "                    \"input_masks\": batch[1],\n",
    "                    \"label_ids\": labels,\n",
    "                }\n",
    "\n",
    "                loss, sequence_tags = model(**inputs)\n",
    "\n",
    "            score = (sequence_tags == labels).float().detach().cpu().numpy()\n",
    "\n",
    "            condition_1 = (labels != self.label_list.index(\"O\")).detach().cpu().numpy()\n",
    "            condition_2 = (labels != self.label_list.index(\"<pad>\")).detach().cpu().numpy()\n",
    "            patten = np.logical_and(condition_1, condition_2)\n",
    "            score = score[patten].mean()\n",
    "\n",
    "            if self.n_gpu > 1:\n",
    "                loss = loss.mean()\n",
    "\n",
    "            ## tensorboard\n",
    "            global_step = global_step_from_engine(engine)(engine, engine.last_event_name)\n",
    "            # tb_writer.add_scalar('learning_rate', scheduler.get_lr()[0], global_step)\n",
    "            tb_writer.add_scalar('dev_loss', loss.item(), global_step)\n",
    "            tb_writer.add_scalar('dev_score', score.item(), global_step)\n",
    "\n",
    "            return loss.item(), score\n",
    "\n",
    "        dev_evaluator = Engine(dev_fn)\n",
    "        RunningAverage(output_transform=lambda x: x[0]).attach(dev_evaluator, 'loss')\n",
    "        RunningAverage(output_transform=lambda x: x[1]).attach(dev_evaluator, 'score')\n",
    "\n",
    "        pbar = ProgressBar(persist=True, bar_format=\"\")\n",
    "        pbar.attach(trainer, ['loss', 'score'])\n",
    "        pbar.attach(dev_evaluator, ['loss', 'score'])\n",
    "\n",
    "        def score_fn(engine):\n",
    "            loss = engine.state.metrics['loss']\n",
    "            score = engine.state.metrics['score']\n",
    "            '''\n",
    "            if score < 0.5:\n",
    "                logger.info(\"Too low to learn!\")\n",
    "                trainer.terminate()\n",
    "            '''\n",
    "\n",
    "            return score / (loss + 1e-12)\n",
    "\n",
    "        handler = EarlyStopping(patience=self.patience, score_function=score_fn, trainer=trainer)\n",
    "        dev_evaluator.add_event_handler(Events.COMPLETED, handler)\n",
    "\n",
    "        @trainer.on(Events.EPOCH_COMPLETED)\n",
    "        def log_dev_results(engine):\n",
    "            dev_evaluator.run(dev_iter)\n",
    "            dev_metrics = dev_evaluator.state.metrics\n",
    "            avg_score = dev_metrics['score']\n",
    "            avg_loss = dev_metrics['loss']\n",
    "            logger.info(\n",
    "                \"Validation Results - Epoch: {}  Avg score: {:.2f} Avg loss: {:.2f}\"\n",
    "                    .format(engine.state.epoch, avg_score, avg_loss))\n",
    "\n",
    "        def model_score(engine):\n",
    "            score = engine.state.metrics['score']\n",
    "            return score\n",
    "\n",
    "\n",
    "        checkpointer = ModelCheckpoint(self.output_dir, \"cehr_ner\", n_saved=self.n_saved,\n",
    "                                       create_dir=True, score_name=\"model_score\",\n",
    "                                       score_function=model_score,\n",
    "                                       global_step_transform=global_step_from_engine(trainer),\n",
    "                                       require_empty=False)\n",
    "\n",
    "        dev_evaluator.add_event_handler(Events.COMPLETED, checkpointer,\n",
    "                                        {self.model_name: model.module if hasattr(model, 'module') else model})\n",
    "\n",
    "        # Clear cuda cache between training/testing\n",
    "        def empty_cuda_cache(engine):\n",
    "            torch.cuda.empty_cache()\n",
    "            import gc\n",
    "            gc.collect()\n",
    "\n",
    "        trainer.add_event_handler(Events.EPOCH_COMPLETED, empty_cuda_cache)\n",
    "        dev_evaluator.add_event_handler(Events.COMPLETED, empty_cuda_cache)\n",
    "\n",
    "        trainer.run(train_iter, max_epochs=self.max_epochs)\n",
    "\n",
    "    def predict(self, X):\n",
    "        all_input_ids, all_input_mask_ids, all_label_ids, all_label_mask_ids = self.get_X_y_ids(X)\n",
    "        dataset = TensorDataset(all_input_ids, all_input_mask_ids, all_label_ids)\n",
    "\n",
    "        batch_size = self.n_gpu * self.per_gpu_batch_size\n",
    "        dataloader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=False)\n",
    "        model = NER_Model(vocab_size=self.bert_tokenizer.vocab_size, embed_size=self.embed_size,\n",
    "                  num_tags=len(self.label_list), max_len=self.max_seq_len, device=self.device,\n",
    "                  dense_layer_type=self.dense_layer_type, dropout=self.dropout, embed_type=self.embed_type,\n",
    "                  model_name_or_path=self.model_name_or_path, vector_file=self.vector_file)\n",
    "\n",
    "        model.to(self.device)\n",
    "\n",
    "        y_preds = []\n",
    "\n",
    "        for model_state_path in glob(os.path.join(self.output_dir, '*{}*.pt*'.format(self.model_name))):\n",
    "            model.load_state_dict(torch.load(model_state_path))\n",
    "            y_pred = self.single_predict(model, dataloader)\n",
    "            y_preds.append(y_pred)\n",
    "\n",
    "        y_preds = torch.tensor(y_preds)\n",
    "        y_pred = torch.mode(y_preds, dim=0).values\n",
    "        y_pred = y_pred.numpy()\n",
    "\n",
    "        preds_list = [[] for _ in range(all_label_mask_ids.shape[0])]\n",
    "\n",
    "        for i in range(all_label_mask_ids.shape[0]):\n",
    "            for j in range(all_label_mask_ids.shape[1]):\n",
    "                if all_label_mask_ids[i, j] != -100:\n",
    "                    preds_list[i].append(self.label_list[y_pred[i][j]])\n",
    "        return preds_list\n",
    "\n",
    "    def single_predict(self, model, dataloader):\n",
    "        if self.n_gpu > 1:\n",
    "            model = torch.nn.DataParallel(model)\n",
    "\n",
    "        model.eval()\n",
    "        preds = None\n",
    "        with torch.no_grad():\n",
    "            for batch in tqdm(dataloader, desc=\"Predicting\"):\n",
    "                batch = tuple(t.to(self.device) for t in batch)\n",
    "                inputs = {\n",
    "                    \"token_ids\": batch[0],\n",
    "                    \"input_masks\": batch[1],\n",
    "                }\n",
    "                _, sequence_tags = model(**inputs)\n",
    "\n",
    "                sequence_tags = sequence_tags.detach().cpu().numpy()\n",
    "                if preds is None:\n",
    "                    preds = sequence_tags\n",
    "                else:\n",
    "                    preds = np.append(preds, sequence_tags, axis=0)\n",
    "        return preds\n",
    "\n",
    "    def get_X_y_ids(self, X, y=None):\n",
    "        all_input_ids = []\n",
    "        all_label_ids = []\n",
    "        all_input_mask_ids = []\n",
    "        all_label_mask_ids = []\n",
    "        for i, X_ in enumerate(tqdm(X, desc=\"Tokens to ids\")):\n",
    "            text = list(map(str.lower, X_))\n",
    "            input_ids = self.bert_tokenizer.encode(text=text)\n",
    "            input_mask_ids = [1] * len(input_ids)\n",
    "            padding_len = self.max_seq_len - len(input_ids)\n",
    "            input_ids += [self.bert_tokenizer.pad_token_id] * padding_len\n",
    "            input_mask_ids += [0] * padding_len\n",
    "\n",
    "            try:\n",
    "                y_ = ['<start>'] + y[i] + ['<end>']\n",
    "                y_ += ['<pad>'] * padding_len\n",
    "                label_mask_id = [-100] + [100] * len(y[i]) + [-100]\n",
    "                label_mask_id += [-100] * padding_len\n",
    "            except:\n",
    "                y_ = ['<start>', '<end>'] + ['<pad>'] * (self.max_seq_len - 2)\n",
    "                label_mask_id = [-100 if idx in [\n",
    "                    self.bert_tokenizer.pad_token_id,\n",
    "                    self.bert_tokenizer.cls_token_id,\n",
    "                    self.bert_tokenizer.sep_token_id\n",
    "                ] else 100 for idx in input_ids]\n",
    "\n",
    "            label_ids = list(map(self.label_list.index, y_))\n",
    "\n",
    "            assert len(input_ids) == len(input_mask_ids) == len(label_ids) == len(label_mask_id) == self.max_seq_len\n",
    "            all_input_ids.append(input_ids)\n",
    "            all_input_mask_ids.append(input_mask_ids)\n",
    "            all_label_ids.append(label_ids)\n",
    "            all_label_mask_ids.append(label_mask_id)\n",
    "\n",
    "            if i == 0:\n",
    "                logger.info(\"tokens:\\n{}\".format(text))\n",
    "                logger.info(\"token_ids: \\n{}\".format(input_ids))\n",
    "                logger.info(\"labels:\\n{}\".format(y_))\n",
    "                logger.info(\"label_ids: \\n{}\".format(label_ids))\n",
    "\n",
    "        all_input_ids = torch.tensor(all_input_ids, dtype=torch.long)\n",
    "        all_input_mask_ids = torch.tensor(all_input_mask_ids, dtype=torch.long)\n",
    "        all_label_ids = torch.tensor(all_label_ids, dtype=torch.long)\n",
    "        all_label_mask_ids = torch.tensor(all_label_mask_ids, dtype=torch.long)\n",
    "        return all_input_ids, all_input_mask_ids, all_label_ids, all_label_mask_ids\n",
    "\n",
    "    def get_X_y(self, file_path):\n",
    "        X = []\n",
    "        y = []\n",
    "        entity_data = []\n",
    "        flag = 0\n",
    "        with open(file_path, 'r', encoding=\"utf8\") as reader:\n",
    "            for i, line in enumerate(tqdm(reader.readlines(), desc=\"Read {}\".format(file_path))):\n",
    "                tmpObj = json.loads(line)\n",
    "                originalText = tmpObj['originalText']\n",
    "                text = self.clean_text(originalText)\n",
    "                entities = tmpObj['entities']\n",
    "                if len(text) < self.max_split_len:\n",
    "                    X_ = list(text)\n",
    "                    y_ = ['O'] * len(X_)\n",
    "                    for entity in entities:\n",
    "                        start_pos = entity['start_pos']\n",
    "                        end_pos = entity['end_pos']\n",
    "                        label_type = entity['label_type']\n",
    "                        tag = self.str2tag[label_type]\n",
    "                        if end_pos - start_pos > 1:\n",
    "                            y_[start_pos] = 'B-' + tag\n",
    "                            for i in range(start_pos+1, end_pos-1):\n",
    "                                y_[i] = 'I-' + tag\n",
    "                            y_[end_pos - 1] = 'E-' + tag\n",
    "                        else:\n",
    "                            y_[start_pos] = 'S-' + tag\n",
    "\n",
    "                        entity_data.append((text[start_pos : end_pos], label_type))\n",
    "                    X.append(X_)\n",
    "                    y.append(y_)\n",
    "\n",
    "                else:\n",
    "                    # split text\n",
    "                    dot_index_list = self.get_dot_index(text)\n",
    "\n",
    "                    X_list, y_list, entity_data_ = self.get_short_text_label(text, dot_index_list, entities)\n",
    "\n",
    "                    assert len(text) == sum(map(len, X_list))\n",
    "\n",
    "                    if flag < 3:\n",
    "                        logger.info(\"full text:\\n{}\".format(text))\n",
    "                        X_list_str = list(map(\"\".join, X_list))\n",
    "                        logger.info(\"short texts:\\n{}\".format(\"\\n\".join(X_list_str)))\n",
    "                        flag += 1\n",
    "\n",
    "                    X.extend(X_list)\n",
    "                    y.extend(y_list)\n",
    "                    entity_data.extend(entity_data_)\n",
    "\n",
    "        vocab_df = pd.DataFrame(data=entity_data, columns=['entity', 'label_type'])\n",
    "        vocab_df.drop_duplicates(inplace=True, ignore_index=True)\n",
    "        assert len(X) == len(y)\n",
    "        return X, y, vocab_df\n",
    "\n",
    "    def get_X(self, unlabeled_file):\n",
    "        X = []\n",
    "        cut_his = {}\n",
    "        originalTexts = []\n",
    "        print_flag = 0\n",
    "        with open(unlabeled_file, 'r', encoding='utf-8') as f:\n",
    "            for text_id, line in enumerate(tqdm(f.readlines(), desc=\"Reading {}\".format(unlabeled_file))):\n",
    "                tempObj = json.loads(line)\n",
    "                originalText = tempObj['originalText']\n",
    "                originalTexts.append(originalText)\n",
    "\n",
    "                text = self.clean_text(originalText)\n",
    "                if len(text) < self.max_split_len:\n",
    "                    X.append(list(text))\n",
    "                    cut_his[text_id] = len(X) - 1\n",
    "                else:\n",
    "                    # split text\n",
    "                    dot_index_list = self.get_dot_index(text)\n",
    "                    flag = 0\n",
    "                    text_id_list = []\n",
    "                    if print_flag < 3:\n",
    "                        logger.info(\"full text:\\n{}\".format(text))\n",
    "\n",
    "                    for i, do_index in enumerate(dot_index_list):\n",
    "                        short_text = text[flag: do_index + 1]\n",
    "                        if print_flag < 3:\n",
    "                            logger.info(\"short texts:\\n{}\".format(short_text))\n",
    "                        # print(\"Short text:{}\".format(short_text))\n",
    "                        X_ = list(short_text)\n",
    "                        X.append(X_)\n",
    "                        text_id_list.append(len(X) - 1)\n",
    "                        flag = do_index + 1\n",
    "\n",
    "                    print_flag += 1\n",
    "\n",
    "                    cut_his[text_id] = text_id_list\n",
    "        return X, cut_his, originalTexts\n",
    "\n",
    "    def get_short_text_label(self, text, dot_index_list, entities):\n",
    "        X = []\n",
    "        y = []\n",
    "        flag = 0\n",
    "        entity_data = []\n",
    "        for i, dot_index in enumerate(dot_index_list):\n",
    "            short_text = text[flag : dot_index+1]\n",
    "            X_ = list(short_text)\n",
    "            y_ = [\"O\"] * len(X_)\n",
    "\n",
    "            for entity in entities:\n",
    "                start_pos = entity[\"start_pos\"]\n",
    "                end_pos = entity[\"end_pos\"]\n",
    "                label_type = entity[\"label_type\"]\n",
    "                tag = self.str2tag[label_type]\n",
    "\n",
    "                k = start_pos - flag\n",
    "                en_list = []\n",
    "                if end_pos - start_pos > 1:\n",
    "                    if k >= 0 and k < len(y_):\n",
    "                        y_[k] = 'B-' + tag\n",
    "                        en_list.append(X_[k])\n",
    "\n",
    "                    for j in range(start_pos + 1, end_pos - 1):\n",
    "                        j = j - flag\n",
    "                        if j >= 0 and j < len(y_):\n",
    "                            y_[j] = 'I-' + tag\n",
    "                            en_list.append(X_[j])\n",
    "                    e = end_pos - 1 - flag\n",
    "                    if e >= 0 and e < len(y_):\n",
    "                        y_[e] = 'E-' + tag\n",
    "                        en_list.append(X_[e])\n",
    "                else:\n",
    "                    if k >= 0 and k < len(y_):\n",
    "                        y_[k] = 'S-' + tag\n",
    "                        en_list.append(X_[k])\n",
    "\n",
    "                if len(en_list) > 0:\n",
    "                    entity_data.append((\"\".join(en_list), label_type))\n",
    "\n",
    "            flag = dot_index + 1\n",
    "\n",
    "            X.append(X_)\n",
    "            y.append(y_)\n",
    "        return X, y, entity_data\n",
    "\n",
    "    def get_dot_index(self, text):\n",
    "        flag = 0\n",
    "        text_ = text\n",
    "        dot_index_list = []\n",
    "\n",
    "        while (len(text_) > self.max_split_len):\n",
    "            text_ = text_[:self.max_split_len]\n",
    "            index_list = []\n",
    "            for match in re.finditer(\"[,|，|;|；|。|、]\", text_):\n",
    "                index_list.append(match.span()[0])\n",
    "\n",
    "            index_list.sort()\n",
    "            if len(index_list) > 1:\n",
    "                last_dot = index_list.pop()\n",
    "            else:\n",
    "                last_dot = len(text_)\n",
    "            dot_index_list.append(last_dot + flag)\n",
    "            text_ = text[(last_dot + flag) :]\n",
    "            flag += last_dot\n",
    "\n",
    "        dot_index_list.append(len(text))\n",
    "        return dot_index_list\n",
    "\n",
    "    def clean_text(self, text):\n",
    "        def special2n(string):\n",
    "            string = string.replace(r\"\\n\", \"\")\n",
    "            return re.sub(\"[ |\\t|\\r|\\n|\\\\\\|\\u0004]\", \"_\", string)\n",
    "\n",
    "        def strQ2B(ustr):\n",
    "            \"全角转半角\"\n",
    "            rstr = \"\"\n",
    "            for uchar in ustr:\n",
    "                inside_code = ord(uchar)\n",
    "                # 全角空格直接转换\n",
    "                if inside_code == 12288:\n",
    "                    inside_code = 32\n",
    "                # 全角字符（除空格）根据关系转化\n",
    "                elif (inside_code >= 65281 and inside_code <= 65374):\n",
    "                    inside_code -= 65248\n",
    "\n",
    "                rstr += chr(inside_code)\n",
    "            return rstr\n",
    "\n",
    "        return strQ2B(special2n(text)).lower()\n",
    "\n",
    "    def explore_dataset(self, file_path):\n",
    "        text_list = []\n",
    "        entity_list = []\n",
    "        with open(file_path, 'r', encoding=\"utf8\") as reader:\n",
    "            for i, line in enumerate(tqdm(reader.readlines(), desc=\"Read {}\".format(file_path))):\n",
    "                try:\n",
    "                    tmpObj = json.loads(line)\n",
    "                except json.decoder.JSONDecodeError:\n",
    "                    print(i+1, line)\n",
    "                originalText = tmpObj['originalText']\n",
    "                entities = tmpObj['entities']\n",
    "                text_list.append(originalText)\n",
    "                for en_obj in entities:\n",
    "                    start_pos = en_obj['start_pos']\n",
    "                    end_pos = en_obj['end_pos']\n",
    "                    label_type = en_obj['label_type']\n",
    "                    entity_list.append((label_type, originalText[start_pos : end_pos]))\n",
    "\n",
    "        text_df = pd.DataFrame(data=text_list, columns=['text'])\n",
    "        entity_df = pd.DataFrame(data=entity_list, columns=['label_type', 'entity'])\n",
    "        desc_path = file_path.replace(\"txt\", \"desc\")\n",
    "\n",
    "        label_type_desc = entity_df['label_type'].value_counts()\n",
    "        text_lens = text_df['text'].str.len().describe()\n",
    "        with open(desc_path, 'w', encoding=\"utf-8\") as writer:\n",
    "            writer.write(\"label_type distribution:\\n{}\".format(label_type_desc))\n",
    "            writer.write(\"\\n\\n\")\n",
    "            writer.write(\"text len distribution:\\n{}\".format(text_lens))\n",
    "        logger.info(\"label_type distribution:\\n{}\".format(label_type_desc))\n",
    "        logger.info(\"text len distribution:\\n{}\".format(text_lens))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "def clean_cache():\n",
    "    torch.cuda.empty_cache()\n",
    "    import gc\n",
    "    gc.collect()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Read datasets/CCKS2019_NER\\train.txt:   0%|                                                                | 0/1000 [00:00<?, ?it/s]06/21/2024 06:55:59 - INFO - __main__ -   full text:\n",
      ",患者因罹患“胃癌”于2013-10-29在我院予行全麻上胃癌根治术,,术中见:腹腔内腹水,腹膜无转移,肝脏未触及明显转移性灶,肿瘤位于胃体、胃底部,小弯侧偏后壁,约5*4*2cm大小,肿瘤已侵达浆膜外,第1、3组淋巴结肿大,肿瘤尚能活动,经探查决定行全胃切除,空肠j字代胃术。手术顺利,术后积极予相关对症支持治疗;,后病理示:胃底、体小弯侧低分化腺癌,部分为印戒细胞癌图像,蕈伞型,面积5.2*3.5cm,局部侵达粘膜上层,并于少数腺管内查见癌栓。双端切线及另送“近端切线”未查见癌。呈三组(5/13个)淋巴结癌转移。一组(7个)、四组(13个)、五组(1个)、六组(4个)淋巴结未查见癌。,癌组织免疫组化染色示:ercc1(+)、β-tubulin-iii(+)、ts(-)、rrm1(-)、topoii阳性细胞数约20%、cerbb-2(2+)_。依据患者病情及肿瘤病理与分期继续术后辅助性化疗指征存在,患者及家属拒绝化疗。自术后出院以来,患者一般情况保持良好;无发热,偶有恶心,无呕吐,无反酸、嗳气,无明显进食不适,偶有进食后轻微腹胀,无腹痛。现患者为行进一步复查并必要时适当处理而再来我院就诊,门诊依情以“胃恶性肿瘤术后”收入院。目前患者精神及情绪状态良好,食欲较术前明显减少,饮食可,夜间睡眠后;今8个月体重减轻18kg。\n",
      "06/21/2024 06:55:59 - INFO - __main__ -   short texts:\n",
      ",患者因罹患“胃癌”于2013-10-29在我院予行全麻上胃癌根治术,,术中见:腹腔内腹水,腹膜无转移,肝脏未触及明显转移性灶,肿瘤位于胃体、胃底部,小弯侧偏后壁,约5*4*2cm大小,肿瘤已侵达浆膜外,第1、3组淋巴结肿大,肿瘤尚能活动,经探查决定行全胃切除,空肠j字代胃术。手术顺利,术后积极予相关对症支持治疗;,后病理示:胃底、体小弯侧低分化腺癌,部分为印戒细胞癌图像,蕈伞型,面积5.2*3.5cm,局部侵达粘膜上层,并于少数腺管内查见癌栓。双端切线及另送“近端切线”未查见癌。呈三组(5/13个)淋巴结癌转移。一组(7个)、四组(13个)、五组(1个)、六组(4个)淋巴结未查见癌。,癌组织免疫组化染色示:ercc1(+)、β-tubulin-iii(+)、ts(-)、rrm1(-)、topoii阳性细胞数约20%、cerbb-2(2+)_。依据患者病情及肿瘤病理与分期继续术后辅助性化疗指征存在,患者及家属拒绝化疗。自术后出院以来,患者一般情况保持良好;无发热,偶有恶心,无呕吐,无反酸、嗳气,无明显进食不适,偶有进食后轻微腹胀,无腹痛。\n",
      "现患者为行进一步复查并必要时适当处理而再来我院就诊,门诊依情以“胃恶性肿瘤术后”收入院。目前患者精神及情绪状态良好,食欲较术前明显减少,饮食可,夜间睡眠后;今8个月体重减轻18kg。\n",
      "06/21/2024 06:55:59 - INFO - __main__ -   full text:\n",
      ",缘于入院前5月余因“反复返酸、嗳气7年”就诊我院,,行胃镜检查示:“1.胃窦癌2.慢性萎缩性胃炎”,,病理示:“胃管状腺癌ii-iii级”,下腹部平扫+,增强示:“胃肿瘤,伴腹腔、腹膜后多发淋巴结肿大”,考虑肿瘤局部较晚期,遂于2015.04.18、2015.05.11、2015.06.02、2015.06.29、2015.08.02予“安素泰210mgivgttd1+奥沙利铂(艾恒)150mgivgttd2+希罗达1000mgbidpod1-14”方案新辅助化疗,辅以止吐、制酸、保胃、保肝、营养支持等治疗。化疗后未见明显骨髓抑制,一般情况尚可。并于入院前1月余(2015.09.01)全麻上行“腹腔镜上根治性远端胃大部切除术”。术顺,术后恢复可,,术后病理:(远端胃),:胃体及胃窦小弯侧早期隆起型(0-i型)低黏附性癌,印戒细胞癌为主,部分为低分化腺癌及未分化癌,浸润脉管及神经组织,脉管内见癌栓,侵及黏膜上层。标本下切端、上切端及另送(下切端),均未见癌浸润。找到小弯侧淋巴结0/13个,大弯侧淋巴结2/10个,幽门下淋巴结1/4个,幽门上淋巴结5/6个,另送(胃右动脉根部)淋巴结2/8个,(胃壁)淋巴结0/4个,见癌转移。,免疫组化染色结果:ck(pan)(+++),e-cadherin(+++),cdx-2(+++),cga(+),cd56(-),sy(-),her-2(-)。门诊拟“胃窦术后化疗”收入院。自发病以来,精神、睡眠可,食欲一般,大小便如常,体重无明显变化。\n",
      "06/21/2024 06:55:59 - INFO - __main__ -   short texts:\n",
      ",缘于入院前5月余因“反复返酸、嗳气7年”就诊我院,,行胃镜检查示:“1.胃窦癌2.慢性萎缩性胃炎”,,病理示:“胃管状腺癌ii-iii级”,下腹部平扫+,增强示:“胃肿瘤,伴腹腔、腹膜后多发淋巴结肿大”,考虑肿瘤局部较晚期,遂于2015.04.18、2015.05.11、2015.06.02、2015.06.29、2015.08.02予“安素泰210mgivgttd1+奥沙利铂(艾恒)150mgivgttd2+希罗达1000mgbidpod1-14”方案新辅助化疗,辅以止吐、制酸、保胃、保肝、营养支持等治疗。化疗后未见明显骨髓抑制,一般情况尚可。并于入院前1月余(2015.09.01)全麻上行“腹腔镜上根治性远端胃大部切除术”。术顺,术后恢复可,,术后病理:(远端胃),:胃体及胃窦小弯侧早期隆起型(0-i型)低黏附性癌,印戒细胞癌为主,部分为低分化腺癌及未分化癌,浸润脉管及神经组织,脉管内见癌栓,侵及黏膜上层。标本下切端、上切端及另送(下切端),均未见癌浸润。找到小弯侧淋巴结0/13个,大弯侧淋巴结2/10个,幽门下淋巴结1/4个,幽门上淋巴结5/6个,\n",
      "另送(胃右动脉根部)淋巴结2/8个,(胃壁)淋巴结0/4个,见癌转移。,免疫组化染色结果:ck(pan)(+++),e-cadherin(+++),cdx-2(+++),cga(+),cd56(-),sy(-),her-2(-)。门诊拟“胃窦术后化疗”收入院。自发病以来,精神、睡眠可,食欲一般,大小便如常,体重无明显变化。\n",
      "06/21/2024 06:55:59 - INFO - __main__ -   full text:\n",
      ",缘于入院前3月因“直肠癌”就诊我院,于2016年11月10日在全麻上行腹腔镜上直肠根治术(dixon手术),探查腹腔内无明显出血,没有明显粘连,无明显腹水,肿瘤位于腹膜返折下5cm对系膜侧,侵及浆膜层,未形成肠梗阻,近端无扩张,周围未见明显肿大淋巴结。术顺,术后予预防感染、制酸、补液、营养支持、增强免疫力等处理,术后恢复可。术后病理(201641027),:(直肠),:直肠隆起型中分化管状腺癌,小灶区为黏液腺癌,侵犯肠壁全层,脉管见癌侵犯,未见明确神经侵犯,手术标本下、上切端及另送(下切端)、(上切端)未见癌浸润。找到肠周淋巴结3/14枚及另送(肠系膜上动脉根部)淋巴结0/4枚,(中间组)淋巴结1/9枚,见癌转移。,免疫组化:肿瘤细胞示ck7(-),ck20(+++),ki67(60%+)。mlh-1(++),msh-2(+++),msh-6(+++),pms-2(+)提示该病例肿瘤微卫星稳定(mss),请结合临床。检测到kras基因存在g13d突变。未检测到nras基因热点突变。未检测到braf基因v600e突变。于2016-12-10予“奥沙利铂200mgivgttd1+卡培他滨1500mgbidd1-d14”方案化疗,此次为化疗再次就诊我院,门诊拟直肠癌术后化疗收入院。自下次出院以来精神、睡眠、饮食可,无腹痛、腹胀、发热,大小便正常,体重较前无明显变化。\n",
      "06/21/2024 06:55:59 - INFO - __main__ -   short texts:\n",
      ",缘于入院前3月因“直肠癌”就诊我院,于2016年11月10日在全麻上行腹腔镜上直肠根治术(dixon手术),探查腹腔内无明显出血,没有明显粘连,无明显腹水,肿瘤位于腹膜返折下5cm对系膜侧,侵及浆膜层,未形成肠梗阻,近端无扩张,周围未见明显肿大淋巴结。术顺,术后予预防感染、制酸、补液、营养支持、增强免疫力等处理,术后恢复可。术后病理(201641027),:(直肠),:直肠隆起型中分化管状腺癌,小灶区为黏液腺癌,侵犯肠壁全层,脉管见癌侵犯,未见明确神经侵犯,手术标本下、上切端及另送(下切端)、(上切端)未见癌浸润。找到肠周淋巴结3/14枚及另送(肠系膜上动脉根部)淋巴结0/4枚,(中间组)淋巴结1/9枚,见癌转移。,免疫组化:肿瘤细胞示ck7(-),ck20(+++),ki67(60%+)。mlh-1(++),msh-2(+++),msh-6(+++),pms-2(+)提示该病例肿瘤微卫星稳定(mss),请结合临床。检测到kras基因存在g13d突变。未检测到nras基因热点突变。未检测到braf基因v600e突变。\n",
      "于2016-12-10予“奥沙利铂200mgivgttd1+卡培他滨1500mgbidd1-d14”方案化疗,此次为化疗再次就诊我院,门诊拟直肠癌术后化疗收入院。自下次出院以来精神、睡眠、饮食可,无腹痛、腹胀、发热,大小便正常,体重较前无明显变化。\n",
      "Read datasets/CCKS2019_NER\\train.txt: 100%|███████████████████████████████████████████████████| 1000/1000 [00:00<00:00, 2442.92it/s]\n",
      "Tokens to ids:   0%|                                                                                       | 0/1280 [00:00<?, ?it/s]06/21/2024 06:56:00 - INFO - __main__ -   tokens:\n",
      "[',', '患', '者', '3', '月', '前', '因', '“', '直', '肠', '癌', '”', '于', '在', '我', '院', '于', '全', '麻', '上', '行', '直', '肠', '癌', '根', '治', '术', '(', 'd', 'i', 'x', 'o', 'n', '术', ')', ',', '手', '术', '过', '程', '顺', '利', ',', '术', '后', '给', '予', '抗', '感', '染', '及', '营', '养', '支', '持', '治', '疗', ',', '患', '者', '恢', '复', '好', ',', '切', '口', '愈', '合', '良', '好', '。', ',', '术', '后', '病', '理', '示', ':', '直', '肠', '腺', '癌', '(', '中', '低', '度', '分', '化', ')', ',', '浸', '润', '溃', '疡', '型', ',', '面', '积', '3', '.', '5', '*', '2', 'c', 'm', ',', '侵', '达', '外', '膜', '。', '双', '端', '切', '线', '另', '送', '“', '近', '端', '”', '、', '“', '远', '端', '”', '及', '环', '周', '底', '部', '切', '除', '面', '未', '查', '见', '癌', '。', '肠', '壁', '一', '站', '(', '1', '0', '个', ')', '、', '中', '间', '组', '(', '8', '个', ')', '淋', '巴', '结', '未', '查', '见', '癌', '。', ',', '免', '疫', '组', '化', '染', '色', '示', ':', 'e', 'r', 'c', 'c', '1', '弥', '漫', '(', '+', ')', '、', 't', 's', '少', '部', '分', '弱', '(', '+', ')', '、', 's', 'y', 'n', '(', '-', ')', '、', 'c', 'g', 'a', '(', '-', ')', '。', '术', '后', '查', '无', '化', '疗', '禁', '忌', '后', '给', '予', '3', '周', '期', '化', '疗', ',', ',', '方', '案', '为', ':', '奥', '沙', '利', '铂', '1', '5', '0', 'm', 'g', '_', 'd', '1', ',', '亚', '叶', '酸', '钙', '0', '.', '3', 'g', '+', '替', '加', '氟', '1', '.', '0', 'g', '_', 'd', '2', '-', 'd', '6', ',', '同', '时', '给', '与', '升', '白', '细', '胞', '、', '护', '肝', '、', '止', '吐', '、', '免', '疫', '增', '强', '治', '疗', ',', '患', '者', '副', '反', '应', '轻', '。', '院', '外', '期', '间', '患', '者', '一', '般', '情', '况', '好', ',', '无', '恶', '心', ',', '无', '腹', '痛', '腹', '胀', '胀', '不', '适', ',', '无', '现', '患', '者', '为', '行', '复', '查', '及', '化', '疗', '再', '次', '来', '院', '就', '诊', ',', '门', '诊', '以', '“', '直', '肠', '癌', '术', '后', '”', '收', '入', '院', '。', '_', '_', '_', '近', '期', '患', '者', '精', '神', '可', ',', '饮', '食', '可', ',', '大', '便', '正', '常', ',', '小', '便', '正', '常', ',', '近', '期', '体', '重', '无', '明', '显', '变', '化', '。']\n",
      "06/21/2024 06:56:00 - INFO - __main__ -   token_ids: \n",
      "[101, 117, 2642, 5442, 124, 3299, 1184, 1728, 100, 4684, 5499, 4617, 100, 754, 1762, 2769, 7368, 754, 1059, 7937, 677, 6121, 4684, 5499, 4617, 3418, 3780, 3318, 113, 146, 151, 166, 157, 156, 3318, 114, 117, 2797, 3318, 6814, 4923, 7556, 1164, 117, 3318, 1400, 5314, 750, 2834, 2697, 3381, 1350, 5852, 1075, 3118, 2898, 3780, 4545, 117, 2642, 5442, 2612, 1908, 1962, 117, 1147, 1366, 2689, 1394, 5679, 1962, 511, 117, 3318, 1400, 4567, 4415, 4850, 131, 4684, 5499, 5593, 4617, 113, 704, 856, 2428, 1146, 1265, 114, 117, 3863, 3883, 3971, 4550, 1798, 117, 7481, 4916, 124, 119, 126, 115, 123, 145, 155, 117, 909, 6809, 1912, 5606, 511, 1352, 4999, 1147, 5296, 1369, 6843, 100, 6818, 4999, 100, 510, 100, 6823, 4999, 100, 1350, 4384, 1453, 2419, 6956, 1147, 7370, 7481, 3313, 3389, 6224, 4617, 511, 5499, 1880, 671, 4991, 113, 122, 121, 702, 114, 510, 704, 7313, 5299, 113, 129, 702, 114, 3900, 2349, 5310, 3313, 3389, 6224, 4617, 511, 117, 1048, 4554, 5299, 1265, 3381, 5682, 4850, 131, 147, 160, 145, 145, 122, 2477, 4035, 113, 116, 114, 510, 162, 161, 2208, 6956, 1146, 2483, 113, 116, 114, 510, 161, 167, 156, 113, 118, 114, 510, 145, 149, 143, 113, 118, 114, 511, 3318, 1400, 3389, 3187, 1265, 4545, 4881, 2555, 1400, 5314, 750, 124, 1453, 3309, 1265, 4545, 117, 117, 3175, 3428, 711, 131, 1952, 3763, 1164, 7189, 122, 126, 121, 155, 149, 142, 146, 122, 117, 762, 1383, 7000, 7159, 121, 119, 124, 149, 116, 3296, 1217, 3703, 122, 119, 121, 149, 142, 146, 123, 118, 146, 127, 117, 1398, 3198, 5314, 680, 1285, 4635, 5301, 5528, 510, 2844, 5498, 510, 3632, 1402, 510, 1048, 4554, 1872, 2487, 3780, 4545, 117, 2642, 5442, 1199, 1353, 2418, 6768, 511, 7368, 1912, 3309, 7313, 2642, 5442, 671, 5663, 2658, 1105, 1962, 117, 3187, 2626, 2552, 117, 3187, 5592, 4578, 5592, 5515, 5515, 679, 6844, 117, 3187, 4385, 2642, 5442, 711, 6121, 1908, 3389, 1350, 1265, 4545, 1086, 3613, 3341, 7368, 2218, 6402, 117, 7305, 6402, 809, 100, 4684, 5499, 4617, 3318, 1400, 100, 3119, 1057, 7368, 511, 142, 142, 142, 6818, 3309, 2642, 5442, 5125, 4868, 1377, 117, 7650, 7608, 1377, 117, 1920, 912, 3633, 2382, 117, 2207, 912, 3633, 2382, 117, 6818, 3309, 860, 7028, 3187, 3209, 3227, 1359, 1265, 511, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
      "06/21/2024 06:56:00 - INFO - __main__ -   labels:\n",
      "['<start>', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B-Dis', 'I-Dis', 'E-Dis', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'E-Sur', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B-Dis', 'I-Dis', 'I-Dis', 'I-Dis', 'I-Dis', 'I-Dis', 'I-Dis', 'I-Dis', 'I-Dis', 'I-Dis', 'I-Dis', 'I-Dis', 'I-Dis', 'I-Dis', 'I-Dis', 'I-Dis', 'E-Dis', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B-Body', 'I-Body', 'I-Body', 'I-Body', 'I-Body', 'I-Body', 'I-Body', 'I-Body', 'I-Body', 'I-Body', 'I-Body', 'I-Body', 'I-Body', 'I-Body', 'I-Body', 'I-Body', 'I-Body', 'I-Body', 'I-Body', 'E-Body', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B-Drug', 'I-Drug', 'I-Drug', 'E-Drug', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B-Drug', 'I-Drug', 'I-Drug', 'E-Drug', 'O', 'O', 'O', 'O', 'O', 'B-Drug', 'I-Drug', 'E-Drug', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'S-Body', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'S-Body', 'O', 'S-Body', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B-Dis', 'I-Dis', 'I-Dis', 'I-Dis', 'E-Dis', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', '<end>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>']\n",
      "06/21/2024 06:56:00 - INFO - __main__ -   label_ids: \n",
      "[1, 3, 3, 3, 3, 3, 3, 3, 3, 4, 5, 6, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 24, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 26, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 20, 21, 21, 22, 3, 3, 3, 3, 3, 3, 3, 3, 3, 20, 21, 21, 22, 3, 3, 3, 3, 3, 20, 21, 22, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 11, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 11, 3, 11, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 5, 5, 5, 6, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
      "Tokens to ids: 100%|██████████████████████████████████████████████████████████████████████████| 1280/1280 [00:00<00:00, 1736.87it/s]\n",
      "Read datasets/CCKS2019_NER\\dev.txt:   0%|                                                                   | 0/379 [00:00<?, ?it/s]06/21/2024 06:56:01 - INFO - __main__ -   full text:\n",
      ",患者于2011年9月29日在我院因“子宫内膜癌ii期”在全麻上行“广泛全子宫切除+两侧附件切除+盆腔淋巴结清扫+腹主动脉旁淋巴结活检术”,术中探查见盆腹腔未见腹水,子宫增大,约10*8*7cm,饱满,两侧附件未见异常,盆腔及腹主动脉旁淋巴结未及肿大。术程顺利,,术后病理回报:腹水未见癌;(全子宫+两附件)送检子宫大小为10*6*4cm,宫腔内见菜花样肿物大小为5*4*3cm,灰黄质硬,浸润浅肌层;镜上中至低分化子宫内膜样腺癌,部分呈鳞状分化,浸润子宫浅肌层,未累及宫颈管;右输卵管系膜内见子宫内膜异位;两附件、阴道残端、淋巴结未见癌;,免疫组化:er(+),pr(-)。,术后诊断:子宫内膜样腺癌ia1期。因肿瘤为中至低分化且大小为5*4*3cm,术后有化疗指征。于2011年10月11日、11月16日行tp(泰素+伯尔定)方案化疗2程,化疗后出现轻度恶心、呕吐,伴脱发,无骨痛及四肢麻木等不适,白细胞最低降至2.7×109/l,未处理可自行升至正常。自发病以来,精神、食欲、睡眠良好,无腹痛及腹胀,无腰酸,大小便正常。体重较下次化疗增加3kg。,既往化疗及肿瘤标志物情况:,化疗药物毒副反应:。\n",
      "06/21/2024 06:56:01 - INFO - __main__ -   short texts:\n",
      ",患者于2011年9月29日在我院因“子宫内膜癌ii期”在全麻上行“广泛全子宫切除+两侧附件切除+盆腔淋巴结清扫+腹主动脉旁淋巴结活检术”,术中探查见盆腹腔未见腹水,子宫增大,约10*8*7cm,饱满,两侧附件未见异常,盆腔及腹主动脉旁淋巴结未及肿大。术程顺利,,术后病理回报:腹水未见癌;(全子宫+两附件)送检子宫大小为10*6*4cm,宫腔内见菜花样肿物大小为5*4*3cm,灰黄质硬,浸润浅肌层;镜上中至低分化子宫内膜样腺癌,部分呈鳞状分化,浸润子宫浅肌层,未累及宫颈管;右输卵管系膜内见子宫内膜异位;两附件、阴道残端、淋巴结未见癌;,免疫组化:er(+),pr(-)。,术后诊断:子宫内膜样腺癌ia1期。因肿瘤为中至低分化且大小为5*4*3cm,术后有化疗指征。于2011年10月11日、11月16日行tp(泰素+伯尔定)方案化疗2程,化疗后出现轻度恶心、呕吐,伴脱发,无骨痛及四肢麻木等不适,白细胞最低降至2.7×109/l,未处理可自行升至正常。自发病以来,精神、食欲、睡眠良好,无腹痛及腹胀,无腰酸,大小便正常。体重较下次化疗增加3kg。,既往化疗及肿瘤标志物情况:,\n",
      "化疗药物毒副反应:。\n",
      "06/21/2024 06:56:01 - INFO - __main__ -   full text:\n",
      ",患者于2010年10月因_\"上腹痛伴大便习惯改变_\"外院行肠镜,活检病理示中分化腺癌;,外院b超示:盆腔内2个巨大团块,考虑卵巢来源可能性大;盆腔大量积液。来我院就诊,考虑乙状结肠癌伴不完全梗阻、盆腔肿物,2010-10-25我院行_\"子宫切除+两附件切除+dixon术+大网膜切除术开腹恶性肿瘤特殊治疗术_\",术中5-fu1000mg肠腔化疗,门静脉5-fu250mg化疗,中人氟安800mg腹腔化疗,,病理回报:乙状结肠中分化腺癌,两侧卵巢转移瘤,中央组淋巴结转移4/15,pt4bn2m1。2010-11-10我院复查ct提示肝s5、6段肝转移。遂于2010-11-16至2011-3-11行forfiri方案化疗7程,4程、6程复查ct疗效评价为sd,后于2011-4-6转外科行_\"肝转移瘤切除术_\",,术后病理:符合肠癌肝转移。后于2011-05-20至2011-08-05继续forfiri方案化疗4程。后定期复查至2012-02-21我院ct提示右下肺及右上肺结节、左前胸壁结节,考虑转移瘤,两肺多发结节结节状小空洞影未排除转移。考虑肿瘤复发,于2012-03-01始行folfox方案6程,3,程后复查ct疗效评价:sd。,6程后2012-7-10我院ct:肝s6、8、2病灶,可疑转移瘤,建议mr检查。脾内低密度影,可疑转移瘤。,疗效评价:sd。现为进一步诊治入院,患者自觉无不适,胃纳、睡眠可,二便如常。\n",
      "06/21/2024 06:56:01 - INFO - __main__ -   short texts:\n",
      ",患者于2010年10月因_\"上腹痛伴大便习惯改变_\"外院行肠镜,活检病理示中分化腺癌;,外院b超示:盆腔内2个巨大团块,考虑卵巢来源可能性大;盆腔大量积液。来我院就诊,考虑乙状结肠癌伴不完全梗阻、盆腔肿物,2010-10-25我院行_\"子宫切除+两附件切除+dixon术+大网膜切除术开腹恶性肿瘤特殊治疗术_\",术中5-fu1000mg肠腔化疗,门静脉5-fu250mg化疗,中人氟安800mg腹腔化疗,,病理回报:乙状结肠中分化腺癌,两侧卵巢转移瘤,中央组淋巴结转移4/15,pt4bn2m1。2010-11-10我院复查ct提示肝s5、6段肝转移。遂于2010-11-16至2011-3-11行forfiri方案化疗7程,4程、6程复查ct疗效评价为sd,后于2011-4-6转外科行_\"肝转移瘤切除术_\",,术后病理:符合肠癌肝转移。后于2011-05-20至2011-08-05继续forfiri方案化疗4程。后定期复查至2012-02-21我院ct提示右下肺及右上肺结节、左前胸壁结节,考虑转移瘤,两肺多发结节结节状小空洞影未排除转移。考虑肿瘤复发,\n",
      "于2012-03-01始行folfox方案6程,3,程后复查ct疗效评价:sd。,6程后2012-7-10我院ct:肝s6、8、2病灶,可疑转移瘤,建议mr检查。脾内低密度影,可疑转移瘤。,疗效评价:sd。现为进一步诊治入院,患者自觉无不适,胃纳、睡眠可,二便如常。\n",
      "06/21/2024 06:56:01 - INFO - __main__ -   full text:\n",
      ",患者因_\"体检发现子宫肌瘤_\"于2008年5月就诊我院,_2008年5月4日于我院行全宫两附件切除附件切除术,,术后病理为:子宫平滑肌肉瘤,术后于2008.5.9~2008.8.7行epi+ifo+dtic方案化疗四程(均为静脉用药),化疗过程顺利。出院后定期复查,2011-6-2胸片检查提示左肺结节影,考虑转移瘤可能性大,遂进一步ct检查,,提示:子宫肉瘤术后改变,未见明确复发征。两肺多发结节,考虑肺转移瘤。于2011-6-27、2011-7-22、2011-8-19、2011-9-23、2011-10-25及2011-11-22予“ia+dtic”化疗六程,第一次化疗过程顺利,第二次化疗的最后一天,患者诉胸闷,予暂停化疗药物。化疗后出现轻度胃肠道反应。第六次化疗后白细胞最低降至2.5×109/l,升白细胞治疗后好转,,患者于第六次化疗前复查ct提示:两肺转移瘤较前无明显改变;2012-2-28入院予“里葆多40mg_d1+匹服平3.0_d1-2+安维汀300mgd1”化疗一程,wbc最低至2.3×109/l,升白治疗后好转。现患者再次入院化疗,精神、睡眠好,食欲欠佳,大小便正常。近期体重无明显改变。,既往化疗及肿瘤标志物情况:,化疗药物毒副反应:。\n",
      "06/21/2024 06:56:01 - INFO - __main__ -   short texts:\n",
      ",患者因_\"体检发现子宫肌瘤_\"于2008年5月就诊我院,_2008年5月4日于我院行全宫两附件切除附件切除术,,术后病理为:子宫平滑肌肉瘤,术后于2008.5.9~2008.8.7行epi+ifo+dtic方案化疗四程(均为静脉用药),化疗过程顺利。出院后定期复查,2011-6-2胸片检查提示左肺结节影,考虑转移瘤可能性大,遂进一步ct检查,,提示:子宫肉瘤术后改变,未见明确复发征。两肺多发结节,考虑肺转移瘤。于2011-6-27、2011-7-22、2011-8-19、2011-9-23、2011-10-25及2011-11-22予“ia+dtic”化疗六程,第一次化疗过程顺利,第二次化疗的最后一天,患者诉胸闷,予暂停化疗药物。化疗后出现轻度胃肠道反应。第六次化疗后白细胞最低降至2.5×109/l,升白细胞治疗后好转,,患者于第六次化疗前复查ct提示:两肺转移瘤较前无明显改变;2012-2-28入院予“里葆多40mg_d1+匹服平3.0_d1-2+安维汀300mgd1”化疗一程,wbc最低至2.3×109/l,升白治疗后好转。现患者再次入院化疗,精神、睡眠好,食欲欠佳,大小便正常。\n",
      "近期体重无明显改变。,既往化疗及肿瘤标志物情况:,化疗药物毒副反应:。\n",
      "Read datasets/CCKS2019_NER\\dev.txt: 100%|███████████████████████████████████████████████████████| 379/379 [00:00<00:00, 1225.01it/s]\n",
      "Tokens to ids:   0%|                                                                                        | 0/407 [00:00<?, ?it/s]06/21/2024 06:56:01 - INFO - __main__ -   tokens:\n",
      "[',', '患', '者', '2', '0', '0', '8', '年', '9', '月', '3', '日', '因', '“', '腹', '胀', ',', '发', '现', '腹', '部', '包', '块', '”', '在', '我', '院', '腹', '科', '行', '手', '术', '探', '查', ',', '术', '中', '见', '盆', '腹', '腔', '肿', '物', ',', '与', '肠', '管', '及', '子', '宫', '关', '系', '密', '切', ',', '遂', '行', '“', '全', '子', '宫', '左', '附', '件', '切', '除', '+', '盆', '腔', '肿', '物', '切', '除', '+', '右', '半', '结', '肠', '切', '除', '+', 'd', 'i', 'x', 'o', 'n', '术', '”', ',', '术', '后', '病', '理', '示', '颗', '粒', '细', '胞', '瘤', ',', '诊', '断', '为', '颗', '粒', '细', '胞', '瘤', 'i', 'i', 'i', 'c', '期', ',', '术', '后', '自', '2', '0', '0', '8', '年', '1', '1', '月', '起', '行', 'b', 'e', 'p', '方', '案', '化', '疗', '共', '4', '程', ',', '末', '次', '化', '疗', '时', '间', '为', '2', '0', '0', '9', '年', '3', '月', '2', '6', '日', '。', '之', '后', '患', '者', '定', '期', '复', '查', ',', '2', '0', '1', '5', '-', '6', '-', '1', ',', '复', '查', 'c', 't', '示', ':', '髂', '嵴', '水', '平', '上', '腹', '部', 'l', '5', '腰', '椎', '前', '见', '软', '组', '织', '肿', '块', ',', '大', '小', '约', '3', '0', 'm', 'm', '×', '4', '5', 'm', 'm', ',', '密', '度', '欠', '均', '匀', ',', '边', '界', '尚', '清', '楚', ',', '轻', '度', '强', '化', '。', '查', '肿', '瘤', '标', '志', '物', '均', '正', '常', '。', '于', '2', '0', '1', '5', '-', '7', '-', '6', '行', '剖', '腹', '探', '查', '+', '膀', '胱', '旁', '肿', '物', '切', '除', '+', '骶', '前', '肿', '物', '切', '除', '+', '肠', '表', '面', '肿', '物', '切', '除', '术', ',', '术', '程', '顺', '利', ',', ',', '术', '后', '病', '理', '示', ':', '膀', '胱', '旁', '肿', '物', '及', '骶', '前', '肿', '物', '符', '合', '颗', '粒', '细', '胞', '瘤', '。', '于', '2', '0', '1', '5', '-', '7', '-', '1', '3', '、', '8', '-', '1', '4', '给', '予', '泰', '素', '2', '4', '0', 'm', 'g', '+', '伯', '尔', '定', '6', '0', '0', 'm', 'g', '化', '疗', '2', '程', ',', '过', '程', '顺', '利', '。', '出', '院', '至', '今', ',', '无', '发', '热', ',', '无', '腹', '痛', '、', '腹', '胀', ',', '有', '脱', '发', ',', '现', '返', '院', '复', '诊', ',', '拟', '行', '再', '次', '化', '疗', '收', '入', '院', '。', '起', '病', '以', '来', ',', '精', '神', '、', '胃', '纳', '、', '睡', '眠', '可', ',', '大', '小', '便', '正', '常', ',', '体', '重', '无', '明', '显', '改', '变', '。']\n",
      "06/21/2024 06:56:01 - INFO - __main__ -   token_ids: \n",
      "[101, 117, 2642, 5442, 123, 121, 121, 129, 2399, 130, 3299, 124, 3189, 1728, 100, 5592, 5515, 117, 1355, 4385, 5592, 6956, 1259, 1779, 100, 1762, 2769, 7368, 5592, 4906, 6121, 2797, 3318, 2968, 3389, 117, 3318, 704, 6224, 4658, 5592, 5579, 5514, 4289, 117, 680, 5499, 5052, 1350, 2094, 2151, 1068, 5143, 2166, 1147, 117, 6876, 6121, 100, 1059, 2094, 2151, 2340, 7353, 816, 1147, 7370, 116, 4658, 5579, 5514, 4289, 1147, 7370, 116, 1381, 1288, 5310, 5499, 1147, 7370, 116, 146, 151, 166, 157, 156, 3318, 100, 117, 3318, 1400, 4567, 4415, 4850, 7578, 5108, 5301, 5528, 4606, 117, 6402, 3171, 711, 7578, 5108, 5301, 5528, 4606, 151, 151, 151, 145, 3309, 117, 3318, 1400, 5632, 123, 121, 121, 129, 2399, 122, 122, 3299, 6629, 6121, 144, 147, 158, 3175, 3428, 1265, 4545, 1066, 125, 4923, 117, 3314, 3613, 1265, 4545, 3198, 7313, 711, 123, 121, 121, 130, 2399, 124, 3299, 123, 127, 3189, 511, 722, 1400, 2642, 5442, 2137, 3309, 1908, 3389, 117, 123, 121, 122, 126, 118, 127, 118, 122, 117, 1908, 3389, 145, 162, 4850, 131, 7762, 100, 3717, 2398, 677, 5592, 6956, 154, 126, 5587, 3491, 1184, 6224, 6763, 5299, 5302, 5514, 1779, 117, 1920, 2207, 5276, 124, 121, 155, 155, 190, 125, 126, 155, 155, 117, 2166, 2428, 3612, 1772, 1258, 117, 6804, 4518, 2213, 3926, 3504, 117, 6768, 2428, 2487, 1265, 511, 3389, 5514, 4606, 3403, 2562, 4289, 1772, 3633, 2382, 511, 754, 123, 121, 122, 126, 118, 128, 118, 127, 6121, 1189, 5592, 2968, 3389, 116, 5598, 5537, 3178, 5514, 4289, 1147, 7370, 116, 7758, 1184, 5514, 4289, 1147, 7370, 116, 5499, 6134, 7481, 5514, 4289, 1147, 7370, 3318, 117, 3318, 4923, 7556, 1164, 117, 117, 3318, 1400, 4567, 4415, 4850, 131, 5598, 5537, 3178, 5514, 4289, 1350, 7758, 1184, 5514, 4289, 5016, 1394, 7578, 5108, 5301, 5528, 4606, 511, 754, 123, 121, 122, 126, 118, 128, 118, 122, 124, 510, 129, 118, 122, 125, 5314, 750, 3805, 5162, 123, 125, 121, 155, 149, 116, 843, 2209, 2137, 127, 121, 121, 155, 149, 1265, 4545, 123, 4923, 117, 6814, 4923, 7556, 1164, 511, 1139, 7368, 5635, 791, 117, 3187, 1355, 4178, 117, 3187, 5592, 4578, 510, 5592, 5515, 117, 3300, 5564, 1355, 117, 4385, 6819, 7368, 1908, 6402, 117, 2877, 6121, 1086, 3613, 1265, 4545, 3119, 1057, 7368, 511, 6629, 4567, 809, 3341, 117, 5125, 4868, 510, 5517, 5287, 510, 4717, 4697, 1377, 117, 1920, 2207, 912, 3633, 2382, 117, 860, 7028, 3187, 3209, 3227, 3121, 1359, 511, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
      "06/21/2024 06:56:01 - INFO - __main__ -   labels:\n",
      "['<start>', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'S-Body', 'O', 'O', 'O', 'O', 'S-Body', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'S-Body', 'O', 'O', 'O', 'O', 'O', 'S-Body', 'O', 'O', 'B-Body', 'E-Body', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'E-Sur', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B-Dis', 'I-Dis', 'I-Dis', 'I-Dis', 'E-Dis', 'O', 'O', 'O', 'O', 'B-Dis', 'I-Dis', 'I-Dis', 'I-Dis', 'I-Dis', 'I-Dis', 'I-Dis', 'I-Dis', 'I-Dis', 'E-Dis', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B-CT', 'E-CT', 'O', 'O', 'B-Body', 'I-Body', 'I-Body', 'I-Body', 'I-Body', 'I-Body', 'I-Body', 'I-Body', 'I-Body', 'I-Body', 'E-Body', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'I-Sur', 'E-Sur', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B-Body', 'E-Body', 'O', 'O', 'O', 'O', 'B-Body', 'E-Body', 'O', 'O', 'O', 'O', 'B-Dis', 'I-Dis', 'I-Dis', 'I-Dis', 'E-Dis', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B-Drug', 'E-Drug', 'O', 'O', 'O', 'O', 'O', 'O', 'B-Drug', 'I-Drug', 'E-Drug', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'S-Body', 'O', 'O', 'S-Body', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'S-Body', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', '<end>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>']\n",
      "06/21/2024 06:56:01 - INFO - __main__ -   label_ids: \n",
      "[1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 11, 3, 3, 3, 3, 11, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 11, 3, 3, 3, 3, 3, 11, 3, 3, 8, 10, 3, 3, 3, 3, 3, 3, 3, 3, 24, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 26, 3, 3, 3, 3, 3, 3, 3, 4, 5, 5, 5, 6, 3, 3, 3, 3, 4, 5, 5, 5, 5, 5, 5, 5, 5, 6, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 16, 18, 3, 3, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 24, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 26, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 8, 10, 3, 3, 3, 3, 8, 10, 3, 3, 3, 3, 4, 5, 5, 5, 6, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 20, 22, 3, 3, 3, 3, 3, 3, 20, 21, 22, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 11, 3, 3, 11, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 11, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
      "Tokens to ids: 100%|████████████████████████████████████████████████████████████████████████████| 407/407 [00:00<00:00, 1466.43it/s]\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   model.named_parameters()\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.embeddings.word_embeddings.weight\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.embeddings.position_embeddings.weight\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.embeddings.token_type_embeddings.weight\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.embeddings.LayerNorm.weight\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.embeddings.LayerNorm.bias\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.0.attention.self.query.weight\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.0.attention.self.query.bias\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.0.attention.self.key.weight\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.0.attention.self.key.bias\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.0.attention.self.value.weight\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.0.attention.self.value.bias\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.0.attention.output.dense.weight\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.0.attention.output.dense.bias\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.0.attention.output.LayerNorm.weight\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.0.attention.output.LayerNorm.bias\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.0.intermediate.dense.weight\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.0.intermediate.dense.bias\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.0.output.dense.weight\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.0.output.dense.bias\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.0.output.LayerNorm.weight\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.0.output.LayerNorm.bias\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.1.attention.self.query.weight\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.1.attention.self.query.bias\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.1.attention.self.key.weight\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.1.attention.self.key.bias\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.1.attention.self.value.weight\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.1.attention.self.value.bias\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.1.attention.output.dense.weight\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.1.attention.output.dense.bias\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.1.attention.output.LayerNorm.weight\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.1.attention.output.LayerNorm.bias\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.1.intermediate.dense.weight\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.1.intermediate.dense.bias\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.1.output.dense.weight\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.1.output.dense.bias\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.1.output.LayerNorm.weight\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.1.output.LayerNorm.bias\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.2.attention.self.query.weight\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.2.attention.self.query.bias\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.2.attention.self.key.weight\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.2.attention.self.key.bias\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.2.attention.self.value.weight\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.2.attention.self.value.bias\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.2.attention.output.dense.weight\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.2.attention.output.dense.bias\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.2.attention.output.LayerNorm.weight\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.2.attention.output.LayerNorm.bias\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.2.intermediate.dense.weight\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.2.intermediate.dense.bias\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.2.output.dense.weight\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.2.output.dense.bias\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.2.output.LayerNorm.weight\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.2.output.LayerNorm.bias\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.3.attention.self.query.weight\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.3.attention.self.query.bias\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.3.attention.self.key.weight\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.3.attention.self.key.bias\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.3.attention.self.value.weight\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.3.attention.self.value.bias\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.3.attention.output.dense.weight\n",
      "06/21/2024 06:56:03 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.3.attention.output.dense.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.3.attention.output.LayerNorm.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.3.attention.output.LayerNorm.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.3.intermediate.dense.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.3.intermediate.dense.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.3.output.dense.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.3.output.dense.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.3.output.LayerNorm.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.3.output.LayerNorm.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.4.attention.self.query.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.4.attention.self.query.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.4.attention.self.key.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.4.attention.self.key.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.4.attention.self.value.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.4.attention.self.value.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.4.attention.output.dense.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.4.attention.output.dense.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.4.attention.output.LayerNorm.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.4.attention.output.LayerNorm.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.4.intermediate.dense.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.4.intermediate.dense.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.4.output.dense.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.4.output.dense.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.4.output.LayerNorm.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.4.output.LayerNorm.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.5.attention.self.query.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.5.attention.self.query.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.5.attention.self.key.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.5.attention.self.key.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.5.attention.self.value.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.5.attention.self.value.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.5.attention.output.dense.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.5.attention.output.dense.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.5.attention.output.LayerNorm.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.5.attention.output.LayerNorm.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.5.intermediate.dense.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.5.intermediate.dense.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.5.output.dense.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.5.output.dense.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.5.output.LayerNorm.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.5.output.LayerNorm.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.6.attention.self.query.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.6.attention.self.query.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.6.attention.self.key.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.6.attention.self.key.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.6.attention.self.value.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.6.attention.self.value.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.6.attention.output.dense.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.6.attention.output.dense.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.6.attention.output.LayerNorm.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.6.attention.output.LayerNorm.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.6.intermediate.dense.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.6.intermediate.dense.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.6.output.dense.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.6.output.dense.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.6.output.LayerNorm.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.6.output.LayerNorm.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.7.attention.self.query.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.7.attention.self.query.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.7.attention.self.key.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.7.attention.self.key.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.7.attention.self.value.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.7.attention.self.value.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.7.attention.output.dense.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.7.attention.output.dense.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.7.attention.output.LayerNorm.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.7.attention.output.LayerNorm.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.7.intermediate.dense.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.7.intermediate.dense.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.7.output.dense.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.7.output.dense.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.7.output.LayerNorm.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.7.output.LayerNorm.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.8.attention.self.query.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.8.attention.self.query.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.8.attention.self.key.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.8.attention.self.key.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.8.attention.self.value.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.8.attention.self.value.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.8.attention.output.dense.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.8.attention.output.dense.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.8.attention.output.LayerNorm.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.8.attention.output.LayerNorm.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.8.intermediate.dense.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.8.intermediate.dense.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.8.output.dense.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.8.output.dense.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.8.output.LayerNorm.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.8.output.LayerNorm.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.9.attention.self.query.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.9.attention.self.query.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.9.attention.self.key.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.9.attention.self.key.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.9.attention.self.value.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.9.attention.self.value.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.9.attention.output.dense.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.9.attention.output.dense.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.9.attention.output.LayerNorm.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.9.attention.output.LayerNorm.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.9.intermediate.dense.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.9.intermediate.dense.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.9.output.dense.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.9.output.dense.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.9.output.LayerNorm.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.9.output.LayerNorm.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.10.attention.self.query.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.10.attention.self.query.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.10.attention.self.key.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.10.attention.self.key.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.10.attention.self.value.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.10.attention.self.value.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.10.attention.output.dense.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.10.attention.output.dense.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.10.attention.output.LayerNorm.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.10.attention.output.LayerNorm.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.10.intermediate.dense.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.10.intermediate.dense.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.10.output.dense.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.10.output.dense.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.10.output.LayerNorm.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.10.output.LayerNorm.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.11.attention.self.query.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.11.attention.self.query.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.11.attention.self.key.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.11.attention.self.key.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.11.attention.self.value.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.11.attention.self.value.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.11.attention.output.dense.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.11.attention.output.dense.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.11.attention.output.LayerNorm.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.11.attention.output.LayerNorm.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.11.intermediate.dense.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.11.intermediate.dense.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.11.output.dense.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.11.output.dense.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.11.output.LayerNorm.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.encoder.layer.11.output.LayerNorm.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.pooler.dense.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   embedding_layer.bert_layer.pooler.dense.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   gru.weight_ih_l0\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   gru.weight_hh_l0\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   gru.bias_ih_l0\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   gru.bias_hh_l0\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   gru.weight_ih_l0_reverse\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   gru.weight_hh_l0_reverse\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   gru.bias_ih_l0_reverse\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   gru.bias_hh_l0_reverse\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   gru.weight_ih_l1\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   gru.weight_hh_l1\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   gru.bias_ih_l1\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   gru.bias_hh_l1\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   gru.weight_ih_l1_reverse\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   gru.weight_hh_l1_reverse\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   gru.bias_ih_l1_reverse\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   gru.bias_hh_l1_reverse\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   dense_layer.weight\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   dense_layer.bias\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   crf.start_transitions\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   crf.end_transitions\n",
      "06/21/2024 06:56:04 - INFO - __main__ -   crf.transitions\n",
      "06/21/2024 06:56:04 - INFO - ignite.engine.engine.Engine -   Engine run starting with max_epochs=5.\n",
      "D:\\projects\\medbert\\model\\crf.py:157: UserWarning: where received a uint8 condition tensor. This behavior is deprecated and will be removed in a future version of PyTorch. Use a boolean condition instead. (Triggered internally at C:\\cb\\pytorch_1000000000000\\work\\aten\\src\\ATen\\native\\TensorCompare.cpp:530.)\n",
      "  score = torch.where(mask[i].unsqueeze(1), next_score, score)\n",
      "Epoch [1/5]:  14%|███████▉                                               | 23/160 [11:06<1:09:53, 30.61s/it, loss=2.66, score=0.008]06/21/2024 07:08:12 - ERROR - ignite.engine.engine.Engine -   Engine run is terminating due to exception: \n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[10], line 12\u001b[0m\n\u001b[0;32m      5\u001b[0m model_path \u001b[38;5;241m=\u001b[39m \u001b[38;5;124mu\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mpretrained_models/bert-base-chinese/\u001b[39m\u001b[38;5;124m'\u001b[39m\n\u001b[0;32m      6\u001b[0m ccks_ner \u001b[38;5;241m=\u001b[39m cCKSNER(dataset\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mdatasets/CCKS2019_NER\u001b[39m\u001b[38;5;124m\"\u001b[39m, embed_type\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mbert\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[0;32m      7\u001b[0m                    dense_layer_type\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mgru\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[0;32m      8\u001b[0m                    model_name_or_path\u001b[38;5;241m=\u001b[39mmodel_path,\n\u001b[0;32m      9\u001b[0m                    bert_lr\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m5e-5\u001b[39m, crf_lr\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m1e-3\u001b[39m, output_dir\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mresults/ccks_ner\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[0;32m     10\u001b[0m                    per_gpu_batch_size\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m8\u001b[39m, patience\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m5\u001b[39m, max_epochs\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m5\u001b[39m)\n\u001b[1;32m---> 12\u001b[0m \u001b[43mccks_ner\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtrain\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m     13\u001b[0m train_time \u001b[38;5;241m=\u001b[39m time\u001b[38;5;241m.\u001b[39mtime() \u001b[38;5;241m-\u001b[39m start_time\n\u001b[0;32m     14\u001b[0m ccks_ner\u001b[38;5;241m.\u001b[39mexport_results(ccks_ner\u001b[38;5;241m.\u001b[39mdev_path)\n",
      "Cell \u001b[1;32mIn[8], line 343\u001b[0m, in \u001b[0;36mcCKSNER.train\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m    340\u001b[0m trainer\u001b[38;5;241m.\u001b[39madd_event_handler(Events\u001b[38;5;241m.\u001b[39mEPOCH_COMPLETED, empty_cuda_cache)\n\u001b[0;32m    341\u001b[0m dev_evaluator\u001b[38;5;241m.\u001b[39madd_event_handler(Events\u001b[38;5;241m.\u001b[39mCOMPLETED, empty_cuda_cache)\n\u001b[1;32m--> 343\u001b[0m \u001b[43mtrainer\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtrain_iter\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmax_epochs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmax_epochs\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[1;32m~\\miniconda3\\envs\\pytorch_env\\Lib\\site-packages\\ignite\\engine\\engine.py:889\u001b[0m, in \u001b[0;36mEngine.run\u001b[1;34m(self, data, max_epochs, epoch_length)\u001b[0m\n\u001b[0;32m    886\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstate\u001b[38;5;241m.\u001b[39mdataloader \u001b[38;5;241m=\u001b[39m data\n\u001b[0;32m    888\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39minterrupt_resume_enabled:\n\u001b[1;32m--> 889\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_internal_run\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    890\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m    891\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_internal_run_legacy()\n",
      "File \u001b[1;32m~\\miniconda3\\envs\\pytorch_env\\Lib\\site-packages\\ignite\\engine\\engine.py:932\u001b[0m, in \u001b[0;36mEngine._internal_run\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m    930\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_internal_run_generator \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_internal_run_as_gen()\n\u001b[0;32m    931\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m--> 932\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mnext\u001b[39;49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_internal_run_generator\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    933\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mStopIteration\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m out:\n\u001b[0;32m    934\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_internal_run_generator \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n",
      "File \u001b[1;32m~\\miniconda3\\envs\\pytorch_env\\Lib\\site-packages\\ignite\\engine\\engine.py:990\u001b[0m, in \u001b[0;36mEngine._internal_run_as_gen\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m    988\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_dataloader_iter \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[0;32m    989\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mlogger\u001b[38;5;241m.\u001b[39merror(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mEngine run is terminating due to exception: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00me\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m--> 990\u001b[0m     \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_handle_exception\u001b[49m\u001b[43m(\u001b[49m\u001b[43me\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    992\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_dataloader_iter \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[0;32m    993\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstate\n",
      "File \u001b[1;32m~\\miniconda3\\envs\\pytorch_env\\Lib\\site-packages\\ignite\\engine\\engine.py:644\u001b[0m, in \u001b[0;36mEngine._handle_exception\u001b[1;34m(self, e)\u001b[0m\n\u001b[0;32m    642\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_fire_event(Events\u001b[38;5;241m.\u001b[39mEXCEPTION_RAISED, e)\n\u001b[0;32m    643\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m--> 644\u001b[0m     \u001b[38;5;28;01mraise\u001b[39;00m e\n",
      "File \u001b[1;32m~\\miniconda3\\envs\\pytorch_env\\Lib\\site-packages\\ignite\\engine\\engine.py:956\u001b[0m, in \u001b[0;36mEngine._internal_run_as_gen\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m    953\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_dataloader_iter \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m    954\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_setup_engine()\n\u001b[1;32m--> 956\u001b[0m epoch_time_taken \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01myield from\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_run_once_on_dataset_as_gen()\n\u001b[0;32m    958\u001b[0m \u001b[38;5;66;03m# time is available for handlers but must be updated after fire\u001b[39;00m\n\u001b[0;32m    959\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstate\u001b[38;5;241m.\u001b[39mtimes[Events\u001b[38;5;241m.\u001b[39mEPOCH_COMPLETED\u001b[38;5;241m.\u001b[39mname] \u001b[38;5;241m=\u001b[39m epoch_time_taken\n",
      "File \u001b[1;32m~\\miniconda3\\envs\\pytorch_env\\Lib\\site-packages\\ignite\\engine\\engine.py:1077\u001b[0m, in \u001b[0;36mEngine._run_once_on_dataset_as_gen\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m   1074\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_fire_event(Events\u001b[38;5;241m.\u001b[39mITERATION_STARTED)\n\u001b[0;32m   1075\u001b[0m \u001b[38;5;28;01myield from\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_maybe_terminate_or_interrupt()\n\u001b[1;32m-> 1077\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstate\u001b[38;5;241m.\u001b[39moutput \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_process_function\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mstate\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbatch\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m   1078\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_fire_event(Events\u001b[38;5;241m.\u001b[39mITERATION_COMPLETED)\n\u001b[0;32m   1079\u001b[0m \u001b[38;5;28;01myield from\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_maybe_terminate_or_interrupt()\n",
      "Cell \u001b[1;32mIn[8], line 246\u001b[0m, in \u001b[0;36mcCKSNER.train.<locals>.train_fn\u001b[1;34m(engine, batch)\u001b[0m\n\u001b[0;32m    243\u001b[0m tb_writer\u001b[38;5;241m.\u001b[39madd_scalar(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mtrain_loss\u001b[39m\u001b[38;5;124m'\u001b[39m, loss\u001b[38;5;241m.\u001b[39mitem(), global_step)\n\u001b[0;32m    244\u001b[0m tb_writer\u001b[38;5;241m.\u001b[39madd_scalar(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mtrain_score\u001b[39m\u001b[38;5;124m'\u001b[39m, score\u001b[38;5;241m.\u001b[39mitem(), global_step)\n\u001b[1;32m--> 246\u001b[0m \u001b[43mloss\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbackward\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    247\u001b[0m torch\u001b[38;5;241m.\u001b[39mnn\u001b[38;5;241m.\u001b[39mutils\u001b[38;5;241m.\u001b[39mclip_grad_norm_(model\u001b[38;5;241m.\u001b[39mparameters(), \u001b[38;5;241m3.0\u001b[39m)\n\u001b[0;32m    248\u001b[0m optimizer\u001b[38;5;241m.\u001b[39mstep()\n",
      "File \u001b[1;32m~\\miniconda3\\envs\\pytorch_env\\Lib\\site-packages\\torch\\_tensor.py:525\u001b[0m, in \u001b[0;36mTensor.backward\u001b[1;34m(self, gradient, retain_graph, create_graph, inputs)\u001b[0m\n\u001b[0;32m    515\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m has_torch_function_unary(\u001b[38;5;28mself\u001b[39m):\n\u001b[0;32m    516\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m handle_torch_function(\n\u001b[0;32m    517\u001b[0m         Tensor\u001b[38;5;241m.\u001b[39mbackward,\n\u001b[0;32m    518\u001b[0m         (\u001b[38;5;28mself\u001b[39m,),\n\u001b[1;32m   (...)\u001b[0m\n\u001b[0;32m    523\u001b[0m         inputs\u001b[38;5;241m=\u001b[39minputs,\n\u001b[0;32m    524\u001b[0m     )\n\u001b[1;32m--> 525\u001b[0m \u001b[43mtorch\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mautograd\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbackward\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m    526\u001b[0m \u001b[43m    \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mgradient\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mretain_graph\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcreate_graph\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43minputs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43minputs\u001b[49m\n\u001b[0;32m    527\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[1;32m~\\miniconda3\\envs\\pytorch_env\\Lib\\site-packages\\torch\\autograd\\__init__.py:267\u001b[0m, in \u001b[0;36mbackward\u001b[1;34m(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs)\u001b[0m\n\u001b[0;32m    262\u001b[0m     retain_graph \u001b[38;5;241m=\u001b[39m create_graph\n\u001b[0;32m    264\u001b[0m \u001b[38;5;66;03m# The reason we repeat the same comment below is that\u001b[39;00m\n\u001b[0;32m    265\u001b[0m \u001b[38;5;66;03m# some Python versions print out the first line of a multi-line function\u001b[39;00m\n\u001b[0;32m    266\u001b[0m \u001b[38;5;66;03m# calls in the traceback and some print out the last line\u001b[39;00m\n\u001b[1;32m--> 267\u001b[0m \u001b[43m_engine_run_backward\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m    268\u001b[0m \u001b[43m    \u001b[49m\u001b[43mtensors\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    269\u001b[0m \u001b[43m    \u001b[49m\u001b[43mgrad_tensors_\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    270\u001b[0m \u001b[43m    \u001b[49m\u001b[43mretain_graph\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    271\u001b[0m \u001b[43m    \u001b[49m\u001b[43mcreate_graph\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    272\u001b[0m \u001b[43m    \u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    273\u001b[0m \u001b[43m    \u001b[49m\u001b[43mallow_unreachable\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[0;32m    274\u001b[0m \u001b[43m    \u001b[49m\u001b[43maccumulate_grad\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[0;32m    275\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[1;32m~\\miniconda3\\envs\\pytorch_env\\Lib\\site-packages\\torch\\autograd\\graph.py:744\u001b[0m, in \u001b[0;36m_engine_run_backward\u001b[1;34m(t_outputs, *args, **kwargs)\u001b[0m\n\u001b[0;32m    742\u001b[0m     unregister_hooks \u001b[38;5;241m=\u001b[39m _register_logging_hooks_on_whole_graph(t_outputs)\n\u001b[0;32m    743\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m--> 744\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mVariable\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_execution_engine\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun_backward\u001b[49m\u001b[43m(\u001b[49m\u001b[43m  \u001b[49m\u001b[38;5;66;43;03m# Calls into the C++ engine to run the backward pass\u001b[39;49;00m\n\u001b[0;32m    745\u001b[0m \u001b[43m        \u001b[49m\u001b[43mt_outputs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\n\u001b[0;32m    746\u001b[0m \u001b[43m    \u001b[49m\u001b[43m)\u001b[49m  \u001b[38;5;66;03m# Calls into the C++ engine to run the backward pass\u001b[39;00m\n\u001b[0;32m    747\u001b[0m \u001b[38;5;28;01mfinally\u001b[39;00m:\n\u001b[0;32m    748\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m attach_logging_hooks:\n",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "if __name__ == '__main__':\n",
    "    os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2,3'\n",
    "\n",
    "    start_time = time.time()\n",
    "    model_path = u'pretrained_models/bert-base-chinese/'\n",
    "    ccks_ner = cCKSNER(dataset=\"datasets/CCKS2019_NER\", embed_type=\"bert\",\n",
    "                       dense_layer_type=\"gru\",\n",
    "                       model_name_or_path=model_path,\n",
    "                       bert_lr=5e-5, crf_lr=1e-3, output_dir=\"results/ccks_ner\",\n",
    "                       per_gpu_batch_size=8, patience=5, max_epochs=5)\n",
    "\n",
    "    ccks_ner.train()\n",
    "    train_time = time.time() - start_time\n",
    "    ccks_ner.export_results(ccks_ner.dev_path)\n",
    "    ccks_ner.evaluation(ccks_ner.dev_path, start_time, train_time)\n",
    "    train_time = time.time() - start_time\n",
    "    ccks_ner.export_results(ccks_ner.test_path)\n",
    "    ccks_ner.evaluation(ccks_ner.test_path, start_time, train_time)\n",
    "\n",
    "    clean_cache()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "pytorch_env",
   "language": "python",
   "name": "pytorch_env"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
