{
 "cells": [
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 基于Transformers的命名实体识别"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step1 导入相关包"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T00:40:00.965969Z",
     "start_time": "2025-09-09T00:39:52.522128Z"
    }
   },
   "source": [
    "import torch\n",
    "import evaluate\n",
    "from datasets import load_dataset\n",
    "from transformers import AutoTokenizer, AutoModelForTokenClassification, TrainingArguments, Trainer,\\\n",
    "\tDataCollatorForTokenClassification\n",
    "torch.cuda.empty_cache()\n",
    "torch.cuda.set_device(0)"
   ],
   "outputs": [],
   "execution_count": 2
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step2 加载数据集"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T00:40:12.158589Z",
     "start_time": "2025-09-09T00:40:03.772233Z"
    }
   },
   "source": [
    "# 如果可以联网，直接使用load_dataset进行加载\n",
    "ner_datasets = load_dataset(\"lansinuote/peoples-daily-ner\", cache_dir=\"./ner_data\")\n",
    "ner_datasets"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "DatasetDict({\n",
       "    train: Dataset({\n",
       "        features: ['id', 'tokens', 'ner_tags'],\n",
       "        num_rows: 20865\n",
       "    })\n",
       "    validation: Dataset({\n",
       "        features: ['id', 'tokens', 'ner_tags'],\n",
       "        num_rows: 2319\n",
       "    })\n",
       "    test: Dataset({\n",
       "        features: ['id', 'tokens', 'ner_tags'],\n",
       "        num_rows: 4637\n",
       "    })\n",
       "})"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 3
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T00:40:31.791256Z",
     "start_time": "2025-09-09T00:40:31.785757Z"
    }
   },
   "source": [
    "ner_datasets[\"train\"][0]"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'id': '0',\n",
       " 'tokens': ['海',\n",
       "  '钓',\n",
       "  '比',\n",
       "  '赛',\n",
       "  '地',\n",
       "  '点',\n",
       "  '在',\n",
       "  '厦',\n",
       "  '门',\n",
       "  '与',\n",
       "  '金',\n",
       "  '门',\n",
       "  '之',\n",
       "  '间',\n",
       "  '的',\n",
       "  '海',\n",
       "  '域',\n",
       "  '。'],\n",
       " 'ner_tags': [0, 0, 0, 0, 0, 0, 0, 5, 6, 0, 5, 6, 0, 0, 0, 0, 0, 0]}"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 8
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T00:40:31.812563Z",
     "start_time": "2025-09-09T00:40:31.808263Z"
    }
   },
   "source": [
    "ner_datasets[\"train\"].features"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'id': Value(dtype='string', id=None),\n",
       " 'tokens': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None),\n",
       " 'ner_tags': Sequence(feature=ClassLabel(names=['O', 'B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC'], id=None), length=-1, id=None)}"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 9
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T00:40:34.513173Z",
     "start_time": "2025-09-09T00:40:34.508173Z"
    }
   },
   "source": [
    "label_list = ner_datasets[\"train\"].features[\"ner_tags\"].feature.names\n",
    "label_list"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['O', 'B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC']"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 10
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step3 数据集预处理"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T00:40:37.556734Z",
     "start_time": "2025-09-09T00:40:37.160066Z"
    }
   },
   "source": [
    "tokenizer = AutoTokenizer.from_pretrained(\"hfl/chinese-macbert-base\")\n",
    "print(tokenizer)"
   ],
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "BertTokenizerFast(name_or_path='hfl/chinese-macbert-base', vocab_size=21128, model_max_length=1000000000000000019884624838656, is_fast=True, padding_side='right', truncation_side='right', special_tokens={'unk_token': '[UNK]', 'sep_token': '[SEP]', 'pad_token': '[PAD]', 'cls_token': '[CLS]', 'mask_token': '[MASK]'}, clean_up_tokenization_spaces=True),  added_tokens_decoder={\n",
      "\t0: AddedToken(\"[PAD]\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
      "\t100: AddedToken(\"[UNK]\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
      "\t101: AddedToken(\"[CLS]\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
      "\t102: AddedToken(\"[SEP]\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
      "\t103: AddedToken(\"[MASK]\", rstrip=False, lstrip=False, single_word=False, normalized=False, special=True),\n",
      "}\n"
     ]
    }
   ],
   "execution_count": 11
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T00:40:40.018827Z",
     "start_time": "2025-09-09T00:40:40.009951Z"
    }
   },
   "source": [
    "tokenizer(ner_datasets[\"train\"][0][\"tokens\"],\n",
    "\tis_split_into_words = True)  # 对于已经做好tokenize的数据，要指定is_split_into_words参数为True"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'input_ids': [101, 3862, 7157, 3683, 6612, 1765, 4157, 1762, 1336, 7305, 680, 7032, 7305, 722, 7313, 4638, 3862, 1818, 511, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]}"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 12
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T00:40:43.314063Z",
     "start_time": "2025-09-09T00:40:43.310245Z"
    }
   },
   "source": [
    "res = tokenizer(\"interesting word\")\n",
    "res"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'input_ids': [101, 10673, 12865, 12921, 8181, 8681, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1]}"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 13
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T00:40:52.902824Z",
     "start_time": "2025-09-09T00:40:52.897873Z"
    }
   },
   "source": [
    "res.word_ids()"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[None, 0, 0, 0, 0, 1, None]"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 16
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T00:40:54.854862Z",
     "start_time": "2025-09-09T00:40:54.850862Z"
    }
   },
   "source": [
    "# 借助word_ids 实现标签映射\n",
    "def process_function(examples):\n",
    "\ttokenized_exmaples = tokenizer(examples[\"tokens\"], max_length = 32, truncation = True, is_split_into_words = True)\n",
    "\tlabels = []\n",
    "\tfor i, label in enumerate(examples[\"ner_tags\"]):\n",
    "\t\tword_ids = tokenized_exmaples.word_ids(batch_index = i)\n",
    "\t\tlabel_ids = []\n",
    "\t\tfor word_id in word_ids:\n",
    "\t\t\tif word_id is None:\n",
    "\t\t\t\tlabel_ids.append(-100)\n",
    "\t\t\telse:\n",
    "\t\t\t\tlabel_ids.append(label[word_id])\n",
    "\t\tlabels.append(label_ids)\n",
    "\ttokenized_exmaples[\"labels\"] = labels\n",
    "\treturn tokenized_exmaples"
   ],
   "outputs": [],
   "execution_count": 17
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T00:40:57.953229Z",
     "start_time": "2025-09-09T00:40:57.916678Z"
    }
   },
   "source": [
    "tokenized_datasets = ner_datasets.map(process_function, batched = True)\n",
    "tokenized_datasets"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "DatasetDict({\n",
       "    train: Dataset({\n",
       "        features: ['id', 'tokens', 'ner_tags', 'input_ids', 'token_type_ids', 'attention_mask', 'labels'],\n",
       "        num_rows: 20865\n",
       "    })\n",
       "    validation: Dataset({\n",
       "        features: ['id', 'tokens', 'ner_tags', 'input_ids', 'token_type_ids', 'attention_mask', 'labels'],\n",
       "        num_rows: 2319\n",
       "    })\n",
       "    test: Dataset({\n",
       "        features: ['id', 'tokens', 'ner_tags', 'input_ids', 'token_type_ids', 'attention_mask', 'labels'],\n",
       "        num_rows: 4637\n",
       "    })\n",
       "})"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 18
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T00:41:07.020261Z",
     "start_time": "2025-09-09T00:41:07.015224Z"
    }
   },
   "source": [
    "print(tokenized_datasets[\"train\"][0])"
   ],
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'id': '0', 'tokens': ['海', '钓', '比', '赛', '地', '点', '在', '厦', '门', '与', '金', '门', '之', '间', '的', '海', '域', '。'], 'ner_tags': [0, 0, 0, 0, 0, 0, 0, 5, 6, 0, 5, 6, 0, 0, 0, 0, 0, 0], 'input_ids': [101, 3862, 7157, 3683, 6612, 1765, 4157, 1762, 1336, 7305, 680, 7032, 7305, 722, 7313, 4638, 3862, 1818, 511, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'labels': [-100, 0, 0, 0, 0, 0, 0, 0, 5, 6, 0, 5, 6, 0, 0, 0, 0, 0, 0, -100]}\n"
     ]
    }
   ],
   "execution_count": 19
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step4 创建模型"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T00:41:10.513702Z",
     "start_time": "2025-09-09T00:41:09.464857Z"
    }
   },
   "source": [
    "# 对于所有的非二分类任务，切记要指定num_labels，否则就会device错误\n",
    "model = AutoModelForTokenClassification.from_pretrained(\"hfl/chinese-macbert-base\", num_labels = len(label_list))\n",
    "\n",
    "def ensure_weights_contiguous(model):\n",
    "\t# model.named_parameters(): 获取模型中所有命名参数\n",
    "\tfor name, param in model.named_parameters():\n",
    "\t\t# param.is_contiguous(): 检查参数在内存中是否连续存储\n",
    "\t\tif not param.is_contiguous():\n",
    "\t\t\tprint(f\"Making {name} contiguous.\")\n",
    "\t\t\t# param.data.contiguous(): 创建连续存储版本并替换原数据\n",
    "\t\t\tparam.data = param.data.contiguous()\n",
    "ensure_weights_contiguous(model)"
   ],
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Some weights of BertForTokenClassification were not initialized from the model checkpoint at hfl/chinese-macbert-base and are newly initialized: ['classifier.bias', 'classifier.weight']\n",
      "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Making bert.encoder.layer.0.attention.self.query.weight contiguous.\n",
      "Making bert.encoder.layer.0.attention.self.key.weight contiguous.\n",
      "Making bert.encoder.layer.0.attention.self.value.weight contiguous.\n",
      "Making bert.encoder.layer.0.attention.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.0.intermediate.dense.weight contiguous.\n",
      "Making bert.encoder.layer.0.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.1.attention.self.query.weight contiguous.\n",
      "Making bert.encoder.layer.1.attention.self.key.weight contiguous.\n",
      "Making bert.encoder.layer.1.attention.self.value.weight contiguous.\n",
      "Making bert.encoder.layer.1.attention.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.1.intermediate.dense.weight contiguous.\n",
      "Making bert.encoder.layer.1.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.2.attention.self.query.weight contiguous.\n",
      "Making bert.encoder.layer.2.attention.self.key.weight contiguous.\n",
      "Making bert.encoder.layer.2.attention.self.value.weight contiguous.\n",
      "Making bert.encoder.layer.2.attention.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.2.intermediate.dense.weight contiguous.\n",
      "Making bert.encoder.layer.2.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.3.attention.self.query.weight contiguous.\n",
      "Making bert.encoder.layer.3.attention.self.key.weight contiguous.\n",
      "Making bert.encoder.layer.3.attention.self.value.weight contiguous.\n",
      "Making bert.encoder.layer.3.attention.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.3.intermediate.dense.weight contiguous.\n",
      "Making bert.encoder.layer.3.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.4.attention.self.query.weight contiguous.\n",
      "Making bert.encoder.layer.4.attention.self.key.weight contiguous.\n",
      "Making bert.encoder.layer.4.attention.self.value.weight contiguous.\n",
      "Making bert.encoder.layer.4.attention.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.4.intermediate.dense.weight contiguous.\n",
      "Making bert.encoder.layer.4.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.5.attention.self.query.weight contiguous.\n",
      "Making bert.encoder.layer.5.attention.self.key.weight contiguous.\n",
      "Making bert.encoder.layer.5.attention.self.value.weight contiguous.\n",
      "Making bert.encoder.layer.5.attention.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.5.intermediate.dense.weight contiguous.\n",
      "Making bert.encoder.layer.5.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.6.attention.self.query.weight contiguous.\n",
      "Making bert.encoder.layer.6.attention.self.key.weight contiguous.\n",
      "Making bert.encoder.layer.6.attention.self.value.weight contiguous.\n",
      "Making bert.encoder.layer.6.attention.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.6.intermediate.dense.weight contiguous.\n",
      "Making bert.encoder.layer.6.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.7.attention.self.query.weight contiguous.\n",
      "Making bert.encoder.layer.7.attention.self.key.weight contiguous.\n",
      "Making bert.encoder.layer.7.attention.self.value.weight contiguous.\n",
      "Making bert.encoder.layer.7.attention.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.7.intermediate.dense.weight contiguous.\n",
      "Making bert.encoder.layer.7.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.8.attention.self.query.weight contiguous.\n",
      "Making bert.encoder.layer.8.attention.self.key.weight contiguous.\n",
      "Making bert.encoder.layer.8.attention.self.value.weight contiguous.\n",
      "Making bert.encoder.layer.8.attention.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.8.intermediate.dense.weight contiguous.\n",
      "Making bert.encoder.layer.8.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.9.attention.self.query.weight contiguous.\n",
      "Making bert.encoder.layer.9.attention.self.key.weight contiguous.\n",
      "Making bert.encoder.layer.9.attention.self.value.weight contiguous.\n",
      "Making bert.encoder.layer.9.attention.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.9.intermediate.dense.weight contiguous.\n",
      "Making bert.encoder.layer.9.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.10.attention.self.query.weight contiguous.\n",
      "Making bert.encoder.layer.10.attention.self.key.weight contiguous.\n",
      "Making bert.encoder.layer.10.attention.self.value.weight contiguous.\n",
      "Making bert.encoder.layer.10.attention.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.10.intermediate.dense.weight contiguous.\n",
      "Making bert.encoder.layer.10.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.11.attention.self.query.weight contiguous.\n",
      "Making bert.encoder.layer.11.attention.self.key.weight contiguous.\n",
      "Making bert.encoder.layer.11.attention.self.value.weight contiguous.\n",
      "Making bert.encoder.layer.11.attention.output.dense.weight contiguous.\n",
      "Making bert.encoder.layer.11.intermediate.dense.weight contiguous.\n",
      "Making bert.encoder.layer.11.output.dense.weight contiguous.\n"
     ]
    }
   ],
   "execution_count": 20
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T00:41:15.260109Z",
     "start_time": "2025-09-09T00:41:15.255827Z"
    }
   },
   "source": "model.config.num_labels",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "7"
      ]
     },
     "execution_count": 21,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 21
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step5 创建评估函数"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T00:41:17.473391Z",
     "start_time": "2025-09-09T00:41:17.430699Z"
    }
   },
   "source": [
    "# 这里方便大家加载，替换成了本地的加载方式，无需额外下载\n",
    "seqeval = evaluate.load(\"seqeval_metric.py\")\n",
    "seqeval"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "EvaluationModule(name: \"seqeval\", module_type: \"metric\", features: {'predictions': Sequence(feature=Value(dtype='string', id='label'), length=-1, id='sequence'), 'references': Sequence(feature=Value(dtype='string', id='label'), length=-1, id='sequence')}, usage: \"\"\"\n",
       "Produces labelling scores along with its sufficient statistics\n",
       "from a source against one or more references.\n",
       "\n",
       "Args:\n",
       "    predictions: List of List of predicted labels (Estimated targets as returned by a tagger)\n",
       "    references: List of List of reference labels (Ground truth (correct) target values)\n",
       "    suffix: True if the IOB prefix is after type, False otherwise. default: False\n",
       "    scheme: Specify target tagging scheme. Should be one of [\"IOB1\", \"IOB2\", \"IOE1\", \"IOE2\", \"IOBES\", \"BILOU\"].\n",
       "        default: None\n",
       "    mode: Whether to count correct entity labels with incorrect I/B tags as true positives or not.\n",
       "        If you want to only count exact matches, pass mode=\"strict\". default: None.\n",
       "    sample_weight: Array-like of shape (n_samples,), weights for individual samples. default: None\n",
       "    zero_division: Which value to substitute as a metric value when encountering zero division. Should be on of 0, 1,\n",
       "        \"warn\". \"warn\" acts as 0, but the warning is raised.\n",
       "\n",
       "Returns:\n",
       "    'scores': dict. Summary of the scores for overall and per type\n",
       "        Overall:\n",
       "            'accuracy': accuracy,\n",
       "            'precision': precision,\n",
       "            'recall': recall,\n",
       "            'f1': F1 score, also known as balanced F-score or F-measure,\n",
       "        Per type:\n",
       "            'precision': precision,\n",
       "            'recall': recall,\n",
       "            'f1': F1 score, also known as balanced F-score or F-measure\n",
       "Examples:\n",
       "\n",
       "    >>> predictions = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]\n",
       "    >>> references = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]\n",
       "    >>> seqeval = evaluate.load(\"seqeval\")\n",
       "    >>> results = seqeval.compute(predictions=predictions, references=references)\n",
       "    >>> print(list(results.keys()))\n",
       "    ['MISC', 'PER', 'overall_precision', 'overall_recall', 'overall_f1', 'overall_accuracy']\n",
       "    >>> print(results[\"overall_f1\"])\n",
       "    0.5\n",
       "    >>> print(results[\"PER\"][\"f1\"])\n",
       "    1.0\n",
       "\"\"\", stored examples: 0)"
      ]
     },
     "execution_count": 22,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 22
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T00:41:19.684846Z",
     "start_time": "2025-09-09T00:41:19.679609Z"
    }
   },
   "source": [
    "import numpy as np\n",
    "\n",
    "\n",
    "def eval_metric(pred):\n",
    "\tpredictions, labels = pred\n",
    "\tpredictions = np.argmax(predictions, axis = -1)\n",
    "\n",
    "\t# 将id转换为原始的字符串类型的标签\n",
    "\ttrue_predictions = [\n",
    "\t\t[label_list[p] for p, l in zip(prediction, label) if l != -100]\n",
    "\t\tfor prediction, label in zip(predictions, labels)\n",
    "\t]\n",
    "\n",
    "\ttrue_labels = [\n",
    "\t\t[label_list[l] for p, l in zip(prediction, label) if l != -100]\n",
    "\t\tfor prediction, label in zip(predictions, labels)\n",
    "\t]\n",
    "\n",
    "\tresult = seqeval.compute(predictions = true_predictions, references = true_labels, mode = \"strict\", scheme = \"IOB2\")\n",
    "\n",
    "\treturn {\n",
    "\t\t\"f1\": result[\"overall_f1\"]\n",
    "\t}\n"
   ],
   "outputs": [],
   "execution_count": 23
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step6 配置训练参数"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T00:41:27.690823Z",
     "start_time": "2025-09-09T00:41:27.606250Z"
    }
   },
   "source": [
    "args = TrainingArguments(\n",
    "\toutput_dir = \"models_for_ner\",\n",
    "\tper_device_train_batch_size = 64,  # 每个设备上的训练批量大小\n",
    "\tper_device_eval_batch_size = 128,  # 每个设备上的评估批量大小\n",
    "\tgradient_accumulation_steps = 4,  # 梯度累积步数\n",
    "\tgradient_checkpointing = True,  # 启用梯度检查点以节省内存\n",
    "\tgradient_checkpointing_kwargs = {\"use_reentrant\": False},  # 推荐设置\n",
    "\toptim = \"adafactor\",  # 使用Adafactor优化器\n",
    "\tsave_steps = 1000,  # 每1000步保存一次模型\n",
    "\tfp16 = True,  # 使用16位浮点数训练\n",
    "\tlogging_steps = 100,  # 每100步记录一次日志\n",
    "\tnum_train_epochs = 1,  # 训练3个epoch\n",
    "\teval_strategy = \"epoch\",  # 每个epoch结束后进行评估\n",
    "\tsave_strategy = \"epoch\",  # 每个epoch结束后保存模型\n",
    "\tsave_total_limit = 3,  # 最多保存3个检查点\n",
    "\tlearning_rate = 2e-5,  # 学习率\n",
    "\tweight_decay = 0.01,  # 权重衰减系数\n",
    "\tmetric_for_best_model = \"f1\",  # 以F1分数作为最佳模型选择标准\n",
    "\tload_best_model_at_end = True,  # 训练结束后加载最佳模型\n",
    ")"
   ],
   "outputs": [],
   "execution_count": 25
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step7 创建训练器"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T00:41:32.686385Z",
     "start_time": "2025-09-09T00:41:32.293918Z"
    }
   },
   "source": [
    "trainer = Trainer(\n",
    "\tmodel = model,\n",
    "\targs = args,\n",
    "\ttokenizer = tokenizer,\n",
    "\ttrain_dataset = tokenized_datasets[\"train\"],\n",
    "\teval_dataset = tokenized_datasets[\"validation\"],\n",
    "\tcompute_metrics = eval_metric,\n",
    "\tdata_collator = DataCollatorForTokenClassification(tokenizer = tokenizer)\n",
    ")"
   ],
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\86134\\AppData\\Local\\Programs\\Python\\Python312\\Lib\\site-packages\\accelerate\\accelerator.py:488: FutureWarning: `torch.cuda.amp.GradScaler(args...)` is deprecated. Please use `torch.amp.GradScaler('cuda', args...)` instead.\n",
      "  self.scaler = torch.cuda.amp.GradScaler(**kwargs)\n"
     ]
    }
   ],
   "execution_count": 26
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step8 模型训练"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T00:42:58.497252Z",
     "start_time": "2025-09-09T00:41:36.191019Z"
    }
   },
   "source": [
    "trainer.train()"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ],
      "text/html": [
       "\n",
       "    <div>\n",
       "      \n",
       "      <progress value='81' max='81' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
       "      [81/81 01:21, Epoch 0/1]\n",
       "    </div>\n",
       "    <table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       " <tr style=\"text-align: left;\">\n",
       "      <th>Epoch</th>\n",
       "      <th>Training Loss</th>\n",
       "      <th>Validation Loss</th>\n",
       "      <th>F1</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <td>0</td>\n",
       "      <td>No log</td>\n",
       "      <td>0.066002</td>\n",
       "      <td>0.824491</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table><p>"
      ]
     },
     "metadata": {},
     "output_type": "display_data",
     "jetTransient": {
      "display_id": "dfdcc178da4042ada241530d18996ca4"
     }
    },
    {
     "data": {
      "text/plain": [
       "TrainOutput(global_step=81, training_loss=0.262517081366645, metrics={'train_runtime': 82.1524, 'train_samples_per_second': 253.979, 'train_steps_per_second': 0.986, 'total_flos': 338655904874496.0, 'train_loss': 0.262517081366645, 'epoch': 0.9908256880733946})"
      ]
     },
     "execution_count": 27,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 27
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T00:43:06.657994Z",
     "start_time": "2025-09-09T00:43:01.693950Z"
    }
   },
   "source": "trainer.evaluate(eval_dataset = tokenized_datasets[\"test\"])",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ],
      "text/html": [
       "\n",
       "    <div>\n",
       "      \n",
       "      <progress value='37' max='37' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
       "      [37/37 00:03]\n",
       "    </div>\n",
       "    "
      ]
     },
     "metadata": {},
     "output_type": "display_data",
     "jetTransient": {
      "display_id": "e751a4afdbeb371cf085a821427a25ad"
     }
    },
    {
     "data": {
      "text/plain": [
       "{'eval_loss': 0.06874030083417892,\n",
       " 'eval_f1': 0.8280423280423279,\n",
       " 'eval_runtime': 4.949,\n",
       " 'eval_samples_per_second': 936.962,\n",
       " 'eval_steps_per_second': 7.476,\n",
       " 'epoch': 0.9908256880733946}"
      ]
     },
     "execution_count": 28,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 28
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Step9 模型预测"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T00:43:20.998329Z",
     "start_time": "2025-09-09T00:43:20.994357Z"
    }
   },
   "source": [
    "from transformers import pipeline"
   ],
   "outputs": [],
   "execution_count": 29
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T00:43:23.006486Z",
     "start_time": "2025-09-09T00:43:23.002107Z"
    }
   },
   "source": [
    "# 使用pipeline进行推理，要指定id2label\n",
    "model.config.id2label = {idx: label for idx, label in enumerate(label_list)}\n",
    "model.config"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "BertConfig {\n",
       "  \"_name_or_path\": \"hfl/chinese-macbert-base\",\n",
       "  \"architectures\": [\n",
       "    \"BertForTokenClassification\"\n",
       "  ],\n",
       "  \"attention_probs_dropout_prob\": 0.1,\n",
       "  \"classifier_dropout\": null,\n",
       "  \"directionality\": \"bidi\",\n",
       "  \"gradient_checkpointing\": false,\n",
       "  \"hidden_act\": \"gelu\",\n",
       "  \"hidden_dropout_prob\": 0.1,\n",
       "  \"hidden_size\": 768,\n",
       "  \"id2label\": {\n",
       "    \"0\": \"O\",\n",
       "    \"1\": \"B-PER\",\n",
       "    \"2\": \"I-PER\",\n",
       "    \"3\": \"B-ORG\",\n",
       "    \"4\": \"I-ORG\",\n",
       "    \"5\": \"B-LOC\",\n",
       "    \"6\": \"I-LOC\"\n",
       "  },\n",
       "  \"initializer_range\": 0.02,\n",
       "  \"intermediate_size\": 3072,\n",
       "  \"label2id\": {\n",
       "    \"LABEL_0\": 0,\n",
       "    \"LABEL_1\": 1,\n",
       "    \"LABEL_2\": 2,\n",
       "    \"LABEL_3\": 3,\n",
       "    \"LABEL_4\": 4,\n",
       "    \"LABEL_5\": 5,\n",
       "    \"LABEL_6\": 6\n",
       "  },\n",
       "  \"layer_norm_eps\": 1e-12,\n",
       "  \"max_position_embeddings\": 512,\n",
       "  \"model_type\": \"bert\",\n",
       "  \"num_attention_heads\": 12,\n",
       "  \"num_hidden_layers\": 12,\n",
       "  \"pad_token_id\": 0,\n",
       "  \"pooler_fc_size\": 768,\n",
       "  \"pooler_num_attention_heads\": 12,\n",
       "  \"pooler_num_fc_layers\": 3,\n",
       "  \"pooler_size_per_head\": 128,\n",
       "  \"pooler_type\": \"first_token_transform\",\n",
       "  \"position_embedding_type\": \"absolute\",\n",
       "  \"torch_dtype\": \"float32\",\n",
       "  \"transformers_version\": \"4.43.4\",\n",
       "  \"type_vocab_size\": 2,\n",
       "  \"use_cache\": true,\n",
       "  \"vocab_size\": 21128\n",
       "}"
      ]
     },
     "execution_count": 30,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 30
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T00:43:39.032217Z",
     "start_time": "2025-09-09T00:43:39.028307Z"
    }
   },
   "source": [
    "# 如果模型是基于GPU训练的，那么推理时要指定device\n",
    "# 对于NER任务，可以指定aggregation_strategy为simple，得到具体的实体的结果，而不是token的结果\n",
    "ner_pipe = pipeline(\"token-classification\", model = model, tokenizer = tokenizer, device = 0,\n",
    "\taggregation_strategy = \"simple\")"
   ],
   "outputs": [],
   "execution_count": 31
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T00:43:40.654286Z",
     "start_time": "2025-09-09T00:43:40.559058Z"
    }
   },
   "source": [
    "res = ner_pipe(\"郑浩希望在北京上班\")\n",
    "res"
   ],
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Asking to truncate to max_length but no maximum length is provided and the model has no predefined maximum length. Default to no truncation.\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "[{'entity_group': 'PER',\n",
       "  'score': 0.9541801,\n",
       "  'word': '郑 浩',\n",
       "  'start': 0,\n",
       "  'end': 2},\n",
       " {'entity_group': 'LOC',\n",
       "  'score': 0.9380979,\n",
       "  'word': '北 京',\n",
       "  'start': 5,\n",
       "  'end': 7}]"
      ]
     },
     "execution_count": 32,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 32
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-09T00:43:45.530903Z",
     "start_time": "2025-09-09T00:43:45.525847Z"
    }
   },
   "source": [
    "# 根据start和end取实际的结果\n",
    "ner_result = {}\n",
    "x = \"郑浩希望在北京上班\"\n",
    "for r in res:\n",
    "\tif r[\"entity_group\"] not in ner_result:\n",
    "\t\tner_result[r[\"entity_group\"]] = []\n",
    "\tner_result[r[\"entity_group\"]].append(x[r[\"start\"]: r[\"end\"]])\n",
    "\n",
    "ner_result"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'PER': ['郑浩'], 'LOC': ['北京']}"
      ]
     },
     "execution_count": 33,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 33
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "transformers",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.16"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
