{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "a2015d83-93c0-453e-a17a-8b8649cfa232",
   "metadata": {},
   "source": [
    "# 加载数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "id": "d2345665-c390-4085-ba6d-031ce3140370",
   "metadata": {},
   "outputs": [],
   "source": [
    "from torch import nn\n",
    "import torch\n",
    "from torch.optim import Adam \n",
    "from torch.optim import AdamW\n",
    "from transformers import BertModel, BertTokenizer\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "from tqdm import tqdm\n",
    "import pandas as pd\n",
    "import ast\n",
    "import json"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "c8a3b622-16ba-4beb-b008-ac79a9b3700f",
   "metadata": {},
   "outputs": [],
   "source": [
    "class NERDataset(Dataset):\n",
    "    def __init__(self, csv_file, tokenizer, max_length=128):\n",
    "        # 读取CSV文件\n",
    "        self.df = pd.read_csv(csv_file)\n",
    "        self.tokenizer = tokenizer\n",
    "        self.max_length = max_length\n",
    "        \n",
    "        # 获取所有实体类型\n",
    "        self.labels = self.get_unique_labels()\n",
    "        self.label2id = {label: idx for idx, label in enumerate(['O'] + self.labels)}\n",
    "        self.id2label = {idx: label for label, idx in self.label2id.items()}\n",
    "        \n",
    "    def get_unique_labels(self):\n",
    "        # 收集所有唯一的实体类型\n",
    "        unique_labels = set()\n",
    "        for row in self.df['info_list']:\n",
    "            try:\n",
    "                entities = ast.literal_eval(row)\n",
    "                for entity_group in entities:\n",
    "                    for entity in entity_group:\n",
    "                        unique_labels.add(entity['type'])\n",
    "            except:\n",
    "                continue\n",
    "        return sorted(list(unique_labels))\n",
    "    \n",
    "    def encode_text_and_labels(self, text, info_list):\n",
    "        # 获取BERT tokenizer的输出\n",
    "        encoding = self.tokenizer(\n",
    "            text,\n",
    "            max_length=self.max_length,\n",
    "            padding='max_length',\n",
    "            truncation=True,\n",
    "            return_tensors='pt'\n",
    "        )\n",
    "        \n",
    "        # 初始化标签序列为0（对应'O'标签）\n",
    "        labels = torch.zeros(self.max_length, dtype=torch.long)\n",
    "        \n",
    "        # 解析实体列表并标注\n",
    "        try:\n",
    "            entities = ast.literal_eval(info_list)\n",
    "            for entity_group in entities:\n",
    "                for entity in entity_group:\n",
    "                    start_idx = entity['offset'][0]\n",
    "                    end_idx = entity['offset'][1]\n",
    "                    entity_type = entity['type']\n",
    "                    \n",
    "                    # 获取token的起始和结束位置\n",
    "                    token_start = len(self.tokenizer.encode(text[:start_idx])) - 1\n",
    "                    token_end = len(self.tokenizer.encode(text[:end_idx])) - 1\n",
    "                    \n",
    "                    if token_start < self.max_length and token_end < self.max_length:\n",
    "                        labels[token_start:token_end+1] = self.label2id[entity_type]\n",
    "                        \n",
    "        except Exception as e:\n",
    "            print(f\"Error processing text: {text}\")\n",
    "            print(f\"Error: {e}\")\n",
    "        \n",
    "        return {\n",
    "            'input_ids': encoding['input_ids'].squeeze(0),\n",
    "            'attention_mask': encoding['attention_mask'].squeeze(0),\n",
    "            'labels': labels\n",
    "        }\n",
    "    \n",
    "    def __len__(self):\n",
    "        return len(self.df)\n",
    "    \n",
    "    def __getitem__(self, idx):\n",
    "        row = self.df.iloc[idx]\n",
    "        text = row['text']\n",
    "        info_list = row['info_list']\n",
    "        \n",
    "        return self.encode_text_and_labels(text, info_list)\n",
    "\n",
    "\n",
    "def create_data_loaders(csv_file, tokenizer, batch_size=32, val_ratio=0.1):\n",
    "    \"\"\"\n",
    "    创建训练集和验证集的数据加载器\n",
    "    \"\"\"\n",
    "    # 创建数据集\n",
    "    dataset = NERDataset(csv_file, tokenizer)\n",
    "    \n",
    "    # 计算训练集和验证集的大小\n",
    "    val_size = int(len(dataset) * val_ratio)\n",
    "    train_size = len(dataset) - val_size\n",
    "    \n",
    "    # 随机分割数据集\n",
    "    train_dataset, val_dataset = torch.utils.data.random_split(\n",
    "        dataset, [train_size, val_size]\n",
    "    )\n",
    "    \n",
    "    # 创建数据加载器\n",
    "    train_loader = DataLoader(\n",
    "        train_dataset,\n",
    "        batch_size=batch_size,\n",
    "        shuffle=True,\n",
    "        num_workers=4\n",
    "    )\n",
    "    \n",
    "    val_loader = DataLoader(\n",
    "        val_dataset,\n",
    "        batch_size=batch_size,\n",
    "        shuffle=False,\n",
    "        num_workers=4\n",
    "    )\n",
    "    \n",
    "    return train_loader, val_loader, dataset.label2id, dataset.id2label"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "id": "82da4ad4-0c0b-4870-a50b-fb8baa91cbdc",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "标签映射: {'O': 0, '人物': 1, '地理位置': 2, '组织机构': 3}\n",
      "标签总数: 4\n"
     ]
    }
   ],
   "source": [
    "tokenizer = BertTokenizer.from_pretrained(\"/root/autodl-tmp/model/bert\")\n",
    "\n",
    "train_loader, val_loader, label2id, id2label = create_data_loaders(\n",
    "    csv_file='/root/autodl-tmp/ner/data/train.csv',\n",
    "    tokenizer=tokenizer,\n",
    "    batch_size=64\n",
    ")\n",
    "\n",
    "print(f\"标签映射: {label2id}\")\n",
    "print(f\"标签总数: {len(label2id)}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "id": "e9b72ad7-cfff-4b06-871d-dc676ce1a496",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "Batch的形状:\n",
      "输入 IDs: torch.Size([64, 128])\n",
      "注意力掩码: torch.Size([64, 128])\n",
      "标签: torch.Size([64, 128])\n",
      "tensor([0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,\n",
      "        0, 0, 0, 2, 2, 2, 0, 0, 0, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
      "        0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0,\n",
      "        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
      "        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
      "        0, 0, 0, 0, 0, 0, 0, 0])\n"
     ]
    }
   ],
   "source": [
    "for batch in train_loader:\n",
    "    print(\"\\nBatch的形状:\")\n",
    "    print(f\"输入 IDs: {batch['input_ids'].shape}\")\n",
    "    print(f\"注意力掩码: {batch['attention_mask'].shape}\")\n",
    "    print(f\"标签: {batch['labels'].shape}\")\n",
    "    print(batch['labels'][1])\n",
    "    break"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "6d0b0dbc-b577-4bc8-8e39-c444e1221623",
   "metadata": {},
   "source": [
    "# 定义模型-自定义实现BERT"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "1c3444a0-b46e-4361-bdac-7a01d8e1d4d2",
   "metadata": {},
   "outputs": [],
   "source": [
    "from bert_model.bert_pytorch.model.bert import BERT\n",
    "from torchcrf import CRF\n",
    "\n",
    "class BertNERWithCRF(nn.Module):\n",
    "    def __init__(self, bert, num_labels, dropout=0.1):\n",
    "        super().__init__()\n",
    "        self.bert = bert\n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "        self.classifier = nn.Linear(self.bert.hidden, num_labels)\n",
    "        self.crf = CRF(num_labels, batch_first=True)\n",
    "\n",
    "    def forward(self, input_ids, attention_mask=None, labels=None):\n",
    "        sequence_output = self.bert(input_ids)\n",
    "        sequence_output = self.dropout(sequence_output)\n",
    "        emissions = self.classifier(sequence_output)\n",
    "\n",
    "        if labels is not None:\n",
    "            # 训练阶段\n",
    "            mask = (input_ids > 0).bool()  # 生成mask,input_ids为0的位置作为padding\n",
    "            loss = -self.crf(emissions, labels, mask=mask)\n",
    "            return loss\n",
    "        else:\n",
    "            # 预测阶段\n",
    "            mask = (input_ids > 0).bool() \n",
    "            predictions = self.crf.decode(emissions, mask=mask)\n",
    "            return predictions"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "84e84772-fe22-4fa2-b881-d1e182bd7cc8",
   "metadata": {},
   "source": [
    "# 定义模型-预训练BERT"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "7f65d8ab-73b5-41e1-9c18-8f82941126d2",
   "metadata": {},
   "outputs": [],
   "source": [
    "from transformers import BertModel\n",
    "from torchcrf import CRF\n",
    "\n",
    "\n",
    "class PreTrainedBertNERWithCRF(nn.Module):\n",
    "    def __init__(self, num_labels, pretrained_model, dropout=0.5):\n",
    "        super().__init__()\n",
    "        self.bert = pretrained_model\n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "        self.classifier = nn.Linear(self.bert.config.hidden_size, num_labels)\n",
    "        self.crf = CRF(num_labels, batch_first=True)\n",
    "        \n",
    "    def forward(self, input_ids, attention_mask, labels=None):\n",
    "        outputs = self.bert(input_ids, attention_mask=attention_mask)\n",
    "        sequence_output = outputs[0]\n",
    "        sequence_output = self.dropout(sequence_output)\n",
    "        emissions = self.classifier(sequence_output)\n",
    "        \n",
    "        if labels is not None:\n",
    "            mask = attention_mask.bool()\n",
    "            loss = -self.crf(emissions, labels, mask=mask)\n",
    "            return loss\n",
    "        else:\n",
    "            mask = attention_mask.bool()\n",
    "            predictions = self.crf.decode(emissions, mask=mask)\n",
    "            return predictions"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "5d1844ea-4248-46e7-9e00-d923ca1af3a3",
   "metadata": {},
   "source": [
    "# 训练函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "9b151e92-ae61-4855-8f8e-7770ca4af533",
   "metadata": {},
   "outputs": [],
   "source": [
    "class NERTrainer:\n",
    "    def __init__(self,\n",
    "                 model,\n",
    "                 train_dataloader,\n",
    "                 val_dataloader=None,\n",
    "                 lr=1e-4,\n",
    "                 betas=(0.9, 0.999),\n",
    "                 weight_decay=0.01,\n",
    "                 device='cuda'):\n",
    "        \n",
    "        self.model = model\n",
    "        self.device = device\n",
    "        self.model.to(device)\n",
    "        \n",
    "        self.train_dataloader = train_dataloader\n",
    "        self.val_dataloader = val_dataloader\n",
    "        \n",
    "        self.optimizer = Adam(model.parameters(), lr=lr, betas=betas, weight_decay=weight_decay)\n",
    "        \n",
    "    def train_epoch(self, epoch):\n",
    "        self.model.train()\n",
    "        total_loss = 0\n",
    "        progress_bar = tqdm(self.train_dataloader, desc=f'Training Epoch {epoch}')\n",
    "        \n",
    "        for batch in progress_bar:\n",
    "            input_ids = batch['input_ids'].to(self.device)\n",
    "            attention_mask = batch['attention_mask'].to(self.device)\n",
    "            labels = batch['labels'].to(self.device)\n",
    "            \n",
    "            # 清零梯度\n",
    "            self.optimizer.zero_grad()\n",
    "            \n",
    "            # CRF模型已经在forward中计算了损失\n",
    "            loss = self.model(input_ids, attention_mask, labels)\n",
    "            \n",
    "            # 反向传播\n",
    "            loss.backward()\n",
    "            self.optimizer.step()\n",
    "            \n",
    "            total_loss += loss.item()\n",
    "            progress_bar.set_postfix({'loss': f'{loss.item():.4f}'})\n",
    "            \n",
    "        return total_loss / len(self.train_dataloader)\n",
    "    \n",
    "    def evaluate(self):\n",
    "        self.model.eval()\n",
    "        total_loss = 0\n",
    "        all_predictions = []\n",
    "        all_labels = []\n",
    "        \n",
    "        with torch.no_grad():\n",
    "            for batch in tqdm(self.val_dataloader, desc='Evaluating'):\n",
    "                input_ids = batch['input_ids'].to(self.device)\n",
    "                attention_mask = batch['attention_mask'].to(self.device)\n",
    "                labels = batch['labels'].to(self.device)\n",
    "                \n",
    "                # 计算验证损失\n",
    "                loss = self.model(input_ids, attention_mask, labels)\n",
    "                total_loss += loss.item()\n",
    "                \n",
    "                # 获取预测结果\n",
    "                predictions = self.model(input_ids, attention_mask)  # 不传labels时返回预测结果\n",
    "                \n",
    "                # 收集预测结果和真实标签\n",
    "                mask = attention_mask.bool()\n",
    "                for pred, label, m in zip(predictions, labels, mask):\n",
    "                    # 只取有效位置的预测和标签\n",
    "                    valid_len = m.sum().item()\n",
    "                    all_predictions.extend(pred[:valid_len])\n",
    "                    all_labels.extend(label[:valid_len].cpu().tolist())\n",
    "        \n",
    "        # 计算准确率\n",
    "        accuracy = sum(p == l for p, l in zip(all_predictions, all_labels)) / len(all_labels)\n",
    "        \n",
    "        return total_loss / len(self.val_dataloader), accuracy\n",
    "\n",
    "    def train(self, epochs, save_path='best_model.pt'):\n",
    "        best_loss = float('inf')\n",
    "        \n",
    "        for epoch in range(epochs):\n",
    "            print(f\"\\nEpoch {epoch + 1}/{epochs}\")\n",
    "            \n",
    "            # 训练\n",
    "            train_loss = self.train_epoch(epoch)\n",
    "            print(f\"Average training loss: {train_loss:.4f}\")\n",
    "            \n",
    "            # 验证\n",
    "            if self.val_dataloader is not None:\n",
    "                val_loss, val_accuracy = self.evaluate()\n",
    "                print(f\"Validation loss: {val_loss:.4f}\")\n",
    "                print(f\"Validation accuracy: {val_accuracy:.4f}\")\n",
    "                \n",
    "                # 保存最佳模型\n",
    "                if val_loss < best_loss:\n",
    "                    best_loss = val_loss\n",
    "                    torch.save({\n",
    "                        'epoch': epoch,\n",
    "                        'model_state_dict': self.model.state_dict(),\n",
    "                        'optimizer_state_dict': self.optimizer.state_dict(),\n",
    "                        'loss': best_loss,\n",
    "                    }, save_path)\n",
    "                    print(f\"Saved best model to {save_path}\")\n",
    "        "
   ]
  },
  {
   "cell_type": "markdown",
   "id": "5065ef20-a195-4bc5-a024-81979233341e",
   "metadata": {},
   "source": [
    "# 开始训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "c8ff3ac8-bc52-4d49-b05f-ceaffb5077ef",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "Epoch 1/20\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Training Epoch 0: 100%|██████████| 5/5 [00:02<00:00,  2.29it/s, loss=1379.8621]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average training loss: 5748.2828\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluating: 100%|██████████| 1/1 [00:01<00:00,  1.24s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation loss: 1354.2699\n",
      "Validation accuracy: 0.7558\n",
      "Saved best model to best_model.pt\n",
      "\n",
      "Epoch 2/20\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Training Epoch 1: 100%|██████████| 5/5 [00:02<00:00,  2.30it/s, loss=812.5721] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average training loss: 4082.3700\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluating: 100%|██████████| 1/1 [00:00<00:00,  1.00it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation loss: 700.6700\n",
      "Validation accuracy: 0.9149\n",
      "Saved best model to best_model.pt\n",
      "\n",
      "Epoch 3/20\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Training Epoch 2: 100%|██████████| 5/5 [00:02<00:00,  2.40it/s, loss=813.7090] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average training loss: 3064.1814\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluating: 100%|██████████| 1/1 [00:01<00:00,  1.14s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation loss: 584.4481\n",
      "Validation accuracy: 0.9175\n",
      "Saved best model to best_model.pt\n",
      "\n",
      "Epoch 4/20\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Training Epoch 3: 100%|██████████| 5/5 [00:02<00:00,  2.35it/s, loss=896.4575] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average training loss: 2541.0574\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluating: 100%|██████████| 1/1 [00:01<00:00,  1.24s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation loss: 628.9829\n",
      "Validation accuracy: 0.9175\n",
      "\n",
      "Epoch 5/20\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Training Epoch 4: 100%|██████████| 5/5 [00:02<00:00,  2.31it/s, loss=602.1784] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average training loss: 2324.9538\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluating: 100%|██████████| 1/1 [00:01<00:00,  1.24s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation loss: 693.7373\n",
      "Validation accuracy: 0.9175\n",
      "\n",
      "Epoch 6/20\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Training Epoch 5: 100%|██████████| 5/5 [00:02<00:00,  2.42it/s, loss=883.5955] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average training loss: 2293.0870\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluating: 100%|██████████| 1/1 [00:01<00:00,  1.11s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation loss: 740.6146\n",
      "Validation accuracy: 0.9175\n",
      "\n",
      "Epoch 7/20\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Training Epoch 6: 100%|██████████| 5/5 [00:02<00:00,  2.33it/s, loss=273.2648] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average training loss: 2294.8080\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluating: 100%|██████████| 1/1 [00:01<00:00,  1.30s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation loss: 761.6165\n",
      "Validation accuracy: 0.9175\n",
      "\n",
      "Epoch 8/20\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Training Epoch 7: 100%|██████████| 5/5 [00:02<00:00,  2.29it/s, loss=413.1489] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average training loss: 2207.2199\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluating: 100%|██████████| 1/1 [00:01<00:00,  1.08s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation loss: 763.0457\n",
      "Validation accuracy: 0.9175\n",
      "\n",
      "Epoch 9/20\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Training Epoch 8: 100%|██████████| 5/5 [00:02<00:00,  2.37it/s, loss=790.0963] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average training loss: 2284.1219\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluating: 100%|██████████| 1/1 [00:01<00:00,  1.20s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation loss: 750.9404\n",
      "Validation accuracy: 0.9175\n",
      "\n",
      "Epoch 10/20\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Training Epoch 9: 100%|██████████| 5/5 [00:01<00:00,  2.53it/s, loss=585.0558] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average training loss: 2254.5815\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluating: 100%|██████████| 1/1 [00:01<00:00,  1.07s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation loss: 730.9719\n",
      "Validation accuracy: 0.9175\n",
      "\n",
      "Epoch 11/20\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Training Epoch 10: 100%|██████████| 5/5 [00:02<00:00,  2.34it/s, loss=606.0186] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average training loss: 2229.8768\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluating: 100%|██████████| 1/1 [00:01<00:00,  1.17s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation loss: 709.2023\n",
      "Validation accuracy: 0.9175\n",
      "\n",
      "Epoch 12/20\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Training Epoch 11: 100%|██████████| 5/5 [00:02<00:00,  2.16it/s, loss=767.9026] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average training loss: 2229.9081\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluating: 100%|██████████| 1/1 [00:01<00:00,  1.27s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation loss: 689.4585\n",
      "Validation accuracy: 0.9175\n",
      "\n",
      "Epoch 13/20\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Training Epoch 12: 100%|██████████| 5/5 [00:02<00:00,  2.38it/s, loss=513.7581] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average training loss: 2189.8876\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluating: 100%|██████████| 1/1 [00:01<00:00,  1.08s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation loss: 672.2036\n",
      "Validation accuracy: 0.9175\n",
      "\n",
      "Epoch 14/20\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Training Epoch 13: 100%|██████████| 5/5 [00:02<00:00,  2.33it/s, loss=1215.4663]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average training loss: 2184.6785\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluating: 100%|██████████| 1/1 [00:01<00:00,  1.13s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation loss: 664.4696\n",
      "Validation accuracy: 0.9175\n",
      "\n",
      "Epoch 15/20\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Training Epoch 14: 100%|██████████| 5/5 [00:01<00:00,  2.70it/s, loss=348.7605] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average training loss: 2160.2042\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluating: 100%|██████████| 1/1 [00:00<00:00,  1.00it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation loss: 656.8145\n",
      "Validation accuracy: 0.9175\n",
      "\n",
      "Epoch 16/20\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Training Epoch 15: 100%|██████████| 5/5 [00:02<00:00,  2.43it/s, loss=557.7819] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average training loss: 2140.6504\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluating: 100%|██████████| 1/1 [00:01<00:00,  1.16s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation loss: 656.1982\n",
      "Validation accuracy: 0.9175\n",
      "\n",
      "Epoch 17/20\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Training Epoch 16: 100%|██████████| 5/5 [00:02<00:00,  2.45it/s, loss=545.0206] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average training loss: 2147.4331\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluating: 100%|██████████| 1/1 [00:01<00:00,  1.15s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation loss: 655.2080\n",
      "Validation accuracy: 0.9175\n",
      "\n",
      "Epoch 18/20\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Training Epoch 17: 100%|██████████| 5/5 [00:02<00:00,  2.29it/s, loss=509.6168] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average training loss: 2101.2420\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluating: 100%|██████████| 1/1 [00:01<00:00,  1.07s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation loss: 654.4722\n",
      "Validation accuracy: 0.9175\n",
      "\n",
      "Epoch 19/20\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Training Epoch 18: 100%|██████████| 5/5 [00:02<00:00,  2.17it/s, loss=990.6812] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average training loss: 2081.2208\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluating: 100%|██████████| 1/1 [00:01<00:00,  1.09s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation loss: 652.2855\n",
      "Validation accuracy: 0.9175\n",
      "\n",
      "Epoch 20/20\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Training Epoch 19: 100%|██████████| 5/5 [00:02<00:00,  2.29it/s, loss=496.7193] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average training loss: 2056.4174\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluating: 100%|██████████| 1/1 [00:01<00:00,  1.06s/it]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation loss: 649.3572\n",
      "Validation accuracy: 0.9175\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "bert = BERT(vocab_size=30522, hidden=256, n_layers=8, attn_heads=8)\n",
    "    \n",
    "# 创建NER模型\n",
    "model_1 = BertNERWithCRF(bert=bert, num_labels=4, dropout=0.5)\n",
    "\n",
    "# 创建训练器\n",
    "trainer = NERTrainer(\n",
    "    model=model_1,\n",
    "    train_dataloader=train_loader,\n",
    "    val_dataloader=val_loader,\n",
    "    lr=1e-5\n",
    ")\n",
    "\n",
    "# 开始训练\n",
    "trainer.train(epochs=20)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "31dc55e7-f70c-4be9-82d3-c415cb316153",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "Epoch 1/20\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Training Epoch 0: 100%|██████████| 5/5 [00:02<00:00,  1.69it/s, loss=431.3395] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average training loss: 2892.2717\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluating: 100%|██████████| 1/1 [00:01<00:00,  1.16s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation loss: 645.2015\n",
      "Validation accuracy: 0.9168\n",
      "Saved best model to best_model.pt\n",
      "\n",
      "Epoch 2/20\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Training Epoch 1: 100%|██████████| 5/5 [00:03<00:00,  1.63it/s, loss=200.5926] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average training loss: 1582.0465\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluating: 100%|██████████| 1/1 [00:01<00:00,  1.41s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation loss: 560.8063\n",
      "Validation accuracy: 0.9175\n",
      "Saved best model to best_model.pt\n",
      "\n",
      "Epoch 3/20\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Training Epoch 2: 100%|██████████| 5/5 [00:02<00:00,  1.82it/s, loss=249.1988] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average training loss: 1542.4882\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluating: 100%|██████████| 1/1 [00:01<00:00,  1.36s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation loss: 561.0821\n",
      "Validation accuracy: 0.9175\n",
      "\n",
      "Epoch 4/20\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Training Epoch 3: 100%|██████████| 5/5 [00:02<00:00,  1.93it/s, loss=567.4198] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average training loss: 1482.2125\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluating: 100%|██████████| 1/1 [00:01<00:00,  1.01s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation loss: 564.0854\n",
      "Validation accuracy: 0.9175\n",
      "\n",
      "Epoch 5/20\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Training Epoch 4: 100%|██████████| 5/5 [00:02<00:00,  1.98it/s, loss=305.8518] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average training loss: 1457.0943\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluating: 100%|██████████| 1/1 [00:01<00:00,  1.03s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation loss: 527.8645\n",
      "Validation accuracy: 0.9175\n",
      "Saved best model to best_model.pt\n",
      "\n",
      "Epoch 6/20\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Training Epoch 5: 100%|██████████| 5/5 [00:02<00:00,  2.04it/s, loss=357.5063] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average training loss: 1436.6700\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluating: 100%|██████████| 1/1 [00:00<00:00,  1.01it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation loss: 520.9875\n",
      "Validation accuracy: 0.9175\n",
      "Saved best model to best_model.pt\n",
      "\n",
      "Epoch 7/20\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Training Epoch 6: 100%|██████████| 5/5 [00:02<00:00,  2.03it/s, loss=506.8571] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average training loss: 1413.5251\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluating: 100%|██████████| 1/1 [00:00<00:00,  1.18it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation loss: 523.8201\n",
      "Validation accuracy: 0.9175\n",
      "\n",
      "Epoch 8/20\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Training Epoch 7: 100%|██████████| 5/5 [00:02<00:00,  2.03it/s, loss=311.8184] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average training loss: 1396.3410\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluating: 100%|██████████| 1/1 [00:01<00:00,  1.13s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation loss: 510.8887\n",
      "Validation accuracy: 0.9175\n",
      "Saved best model to best_model.pt\n",
      "\n",
      "Epoch 9/20\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Training Epoch 8: 100%|██████████| 5/5 [00:02<00:00,  2.13it/s, loss=406.2660] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average training loss: 1389.4237\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluating: 100%|██████████| 1/1 [00:01<00:00,  1.52s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation loss: 502.3806\n",
      "Validation accuracy: 0.9175\n",
      "Saved best model to best_model.pt\n",
      "\n",
      "Epoch 10/20\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Training Epoch 9: 100%|██████████| 5/5 [00:02<00:00,  1.98it/s, loss=208.8669] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average training loss: 1367.1987\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluating: 100%|██████████| 1/1 [00:01<00:00,  1.11s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation loss: 496.1777\n",
      "Validation accuracy: 0.9175\n",
      "Saved best model to best_model.pt\n",
      "\n",
      "Epoch 11/20\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Training Epoch 10: 100%|██████████| 5/5 [00:02<00:00,  1.98it/s, loss=156.1365] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average training loss: 1352.3776\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluating: 100%|██████████| 1/1 [00:01<00:00,  1.12s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation loss: 509.5500\n",
      "Validation accuracy: 0.9175\n",
      "\n",
      "Epoch 12/20\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Training Epoch 11: 100%|██████████| 5/5 [00:02<00:00,  1.93it/s, loss=401.6626] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average training loss: 1343.5731\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluating: 100%|██████████| 1/1 [00:01<00:00,  1.22s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation loss: 473.0179\n",
      "Validation accuracy: 0.9175\n",
      "Saved best model to best_model.pt\n",
      "\n",
      "Epoch 13/20\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Training Epoch 12: 100%|██████████| 5/5 [00:02<00:00,  2.02it/s, loss=460.6181] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average training loss: 1311.3882\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluating: 100%|██████████| 1/1 [00:01<00:00,  1.31s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation loss: 497.9666\n",
      "Validation accuracy: 0.9175\n",
      "\n",
      "Epoch 14/20\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Training Epoch 13: 100%|██████████| 5/5 [00:02<00:00,  1.89it/s, loss=281.5071] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average training loss: 1294.9367\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluating: 100%|██████████| 1/1 [00:01<00:00,  1.12s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation loss: 451.3347\n",
      "Validation accuracy: 0.9175\n",
      "Saved best model to best_model.pt\n",
      "\n",
      "Epoch 15/20\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Training Epoch 14: 100%|██████████| 5/5 [00:02<00:00,  1.90it/s, loss=268.5806] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average training loss: 1273.2113\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluating: 100%|██████████| 1/1 [00:01<00:00,  1.11s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation loss: 519.9881\n",
      "Validation accuracy: 0.9175\n",
      "\n",
      "Epoch 16/20\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Training Epoch 15: 100%|██████████| 5/5 [00:02<00:00,  1.71it/s, loss=282.8185] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average training loss: 1267.4730\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluating: 100%|██████████| 1/1 [00:01<00:00,  1.15s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation loss: 443.4946\n",
      "Validation accuracy: 0.9175\n",
      "Saved best model to best_model.pt\n",
      "\n",
      "Epoch 17/20\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Training Epoch 16: 100%|██████████| 5/5 [00:02<00:00,  1.88it/s, loss=135.6642] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average training loss: 1244.3266\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluating: 100%|██████████| 1/1 [00:01<00:00,  1.12s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation loss: 523.4210\n",
      "Validation accuracy: 0.9182\n",
      "\n",
      "Epoch 18/20\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Training Epoch 17: 100%|██████████| 5/5 [00:02<00:00,  1.71it/s, loss=298.6429] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average training loss: 1226.5189\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluating: 100%|██████████| 1/1 [00:01<00:00,  1.13s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation loss: 442.5626\n",
      "Validation accuracy: 0.9175\n",
      "Saved best model to best_model.pt\n",
      "\n",
      "Epoch 19/20\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Training Epoch 18: 100%|██████████| 5/5 [00:02<00:00,  1.82it/s, loss=274.9262] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average training loss: 1234.0518\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluating: 100%|██████████| 1/1 [00:01<00:00,  1.32s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation loss: 468.9293\n",
      "Validation accuracy: 0.9201\n",
      "\n",
      "Epoch 20/20\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Training Epoch 19: 100%|██████████| 5/5 [00:02<00:00,  1.92it/s, loss=482.2331] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Average training loss: 1207.9189\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Evaluating: 100%|██████████| 1/1 [00:01<00:00,  1.21s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Validation loss: 427.9413\n",
      "Validation accuracy: 0.9201\n",
      "Saved best model to best_model.pt\n"
     ]
    }
   ],
   "source": [
    "pretrained_bert = BertModel.from_pretrained(\"/root/autodl-tmp/model/bert\")\n",
    "    \n",
    "model_2 = PreTrainedBertNERWithCRF(pretrained_model=pretrained_bert, num_labels=4)\n",
    "\n",
    "# 创建训练器\n",
    "trainer = NERTrainer(\n",
    "    model=model_2,\n",
    "    train_dataloader=train_loader,\n",
    "    val_dataloader=val_loader,\n",
    "    lr=1e-5\n",
    ")\n",
    "\n",
    "# 开始训练\n",
    "trainer.train(epochs=20)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "18b1e8a7-e02d-4c72-ba54-1e76ac64e993",
   "metadata": {},
   "source": [
    "# 测验一下效果"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "id": "195f0ede-debf-4cdf-b841-1018bb1d827b",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "原文: 北京举办奥运会\n",
      "\n",
      "识别结果:\n",
      "类型: 地理位置, 文本: 京\n"
     ]
    }
   ],
   "source": [
    "def predict_entities(text, model, tokenizer, label2id, id2label, device='cuda', max_length=512):\n",
    "\n",
    "    model.eval()\n",
    "    \n",
    "    encoding = tokenizer(\n",
    "        text,\n",
    "        max_length=max_length,\n",
    "        padding='max_length',\n",
    "        truncation=True,\n",
    "        return_tensors='pt'\n",
    "    )\n",
    "    \n",
    "    input_ids = encoding['input_ids'].to(device)\n",
    "    attention_mask = encoding['attention_mask'].to(device)\n",
    "    \n",
    "    with torch.no_grad():\n",
    "        predictions = model(input_ids, attention_mask)\n",
    "    \n",
    "    # 获取原始文本的token\n",
    "    tokens = tokenizer.convert_ids_to_tokens(input_ids[0])\n",
    "    \n",
    "    # 解析预测结果\n",
    "    entities = []\n",
    "    current_entity = None\n",
    "    \n",
    "    # CRF模型返回的是列表，需要取第一个样本的预测结果\n",
    "    pred_labels = predictions[0]\n",
    "    \n",
    "    valid_len = attention_mask[0].sum().item()\n",
    "    for idx, label_id in enumerate(pred_labels[:valid_len]):\n",
    "        token = tokens[idx]\n",
    "        label = id2label[label_id]\n",
    "        \n",
    "        if label != 'O':  # 如果是实体标签\n",
    "            if current_entity is None:\n",
    "                current_entity = {\n",
    "                    'type': label,\n",
    "                    'text': token.replace('##', '')\n",
    "                }\n",
    "            else:\n",
    "                # 如果是同一个实体，继续添加文本\n",
    "                current_entity['text'] += token.replace('##', '')\n",
    "        else:  # 如果是O标签\n",
    "            if current_entity is not None:\n",
    "                entities.append(current_entity)\n",
    "                current_entity = None\n",
    "    \n",
    "    # 处理最后一个实体\n",
    "    if current_entity is not None:\n",
    "        entities.append(current_entity)\n",
    "    \n",
    "    return entities\n",
    "\n",
    "# 使用示例\n",
    "def format_result(text, entities):\n",
    "    print(f\"原文: {text}\\n\")\n",
    "    print(\"识别结果:\")\n",
    "    for entity in entities:\n",
    "        print(f\"类型: {entity['type']}, 文本: {entity['text']}\")\n",
    "\n",
    "\n",
    "# 实际使用示例\n",
    "if __name__ == \"__main__\":\n",
    "\n",
    "    text = \"北京举办奥运会\"\n",
    "    \n",
    "    # 预测实体\n",
    "    entities = predict_entities(text, model_2, tokenizer, label2id, id2label)\n",
    "    \n",
    "    # 输出结果\n",
    "    format_result(text, entities)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a6b71071-31c4-451b-b98b-16ba65fe38fa",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
