{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "9a19713e",
   "metadata": {},
   "source": [
    "# 基于Transformers的命名实体识别"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "e62c66e6",
   "metadata": {},
   "source": [
    "### step 1导入包"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "c75a7f41",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\Users\\31811\\.conda\\envs\\transformers\\lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n"
     ]
    }
   ],
   "source": [
    "import evaluate\n",
    "from datasets import load_dataset,load_from_disk\n",
    "from transformers import AutoTokenizer,AutoModelForTokenClassification,TrainingArguments,Trainer,DataCollatorForTokenClassification\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "39fba2a8",
   "metadata": {},
   "source": [
    "### step 2导入数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "2147e273",
   "metadata": {},
   "outputs": [],
   "source": [
    "ner_datasets=load_from_disk(\"ner_data\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "ee12749d",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'id': '0',\n",
       " 'tokens': ['海',\n",
       "  '钓',\n",
       "  '比',\n",
       "  '赛',\n",
       "  '地',\n",
       "  '点',\n",
       "  '在',\n",
       "  '厦',\n",
       "  '门',\n",
       "  '与',\n",
       "  '金',\n",
       "  '门',\n",
       "  '之',\n",
       "  '间',\n",
       "  '的',\n",
       "  '海',\n",
       "  '域',\n",
       "  '。'],\n",
       " 'ner_tags': [0, 0, 0, 0, 0, 0, 0, 5, 6, 0, 5, 6, 0, 0, 0, 0, 0, 0]}"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "ner_datasets[\"train\"][0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "96e6ec43",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'id': Value(dtype='string', id=None),\n",
       " 'tokens': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None),\n",
       " 'ner_tags': Sequence(feature=ClassLabel(names=['O', 'B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC'], id=None), length=-1, id=None)}"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "ner_datasets[\"train\"].features"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "2724e30f",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['O', 'B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC']"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "\"\"\"查看标签\"\"\"\n",
    "# label_list=['O', 'B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC']\n",
    "label_list=ner_datasets[\"train\"].features[\"ner_tags\"].feature.names\n",
    "label_list"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "f2541265",
   "metadata": {},
   "outputs": [],
   "source": [
    "data_train=ner_datasets[\"train\"]\n",
    "data_test=ner_datasets[\"train\"]\n",
    "data_validation=ner_datasets[\"validation\"]"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "87406e5d",
   "metadata": {},
   "source": [
    "### 数据集预处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "13b8abe4",
   "metadata": {},
   "outputs": [],
   "source": [
    "tokenizer=AutoTokenizer.from_pretrained(r\"D:\\HuggFace_model\\chinese-macbert-base\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "af450d44",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['海',\n",
       " '钓',\n",
       " '比',\n",
       " '赛',\n",
       " '地',\n",
       " '点',\n",
       " '在',\n",
       " '厦',\n",
       " '门',\n",
       " '与',\n",
       " '金',\n",
       " '门',\n",
       " '之',\n",
       " '间',\n",
       " '的',\n",
       " '海',\n",
       " '域',\n",
       " '。']"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "ner_datasets[\"train\"][0][\"tokens\"]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "c50595fb",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'input_ids': [[101, 3862, 102], [101, 7157, 102], [101, 3683, 102], [101, 6612, 102], [101, 1765, 102], [101, 4157, 102], [101, 1762, 102], [101, 1336, 102], [101, 7305, 102], [101, 680, 102], [101, 7032, 102], [101, 7305, 102], [101, 722, 102], [101, 7313, 102], [101, 4638, 102], [101, 3862, 102], [101, 1818, 102], [101, 511, 102]], 'token_type_ids': [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], 'attention_mask': [[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]]}"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tokenizer(ner_datasets[\"train\"][0][\"tokens\"])#发现把每个字都当成了一个句子"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "83284f93",
   "metadata": {},
   "source": [
    "对于处理好的tokenize的数据，要指定is_split_into_words参数为True"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "6c26fd85",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'input_ids': [101, 3862, 7157, 3683, 6612, 1765, 4157, 1762, 1336, 7305, 680, 7032, 7305, 722, 7313, 4638, 3862, 1818, 511, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]}"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tokenizer(ner_datasets[\"train\"][0][\"tokens\"],is_split_into_words=True)#当成一整句话处理"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "4d65f70d",
   "metadata": {},
   "source": [
    "### 对标签的思考"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "38f435e5",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'input_ids': [101, 10673, 12865, 12921, 8181, 9158, 9602, 10447, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1]}"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "res=tokenizer(\"interesting smell boy\")\n",
    "res#分不清谁对应的哪个词（不同词的范围）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "5732200f",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[None, 0, 0, 0, 0, 1, 1, 2, None]"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "\"\"\"使用word_ids()获取词的范围\"\"\"\n",
    "res.word_ids()#0000->interesting;11->smell;2->boy；cls与sep为None"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "c24aa130",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'input_ids': [101, 2769, 3221, 6443, 102], 'token_type_ids': [0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1]}"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "res1=tokenizer(\"我是谁\")\n",
    "res1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "ef407e12",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[None, 0, 1, 2, None]"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "res1.word_ids()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a974fbb5",
   "metadata": {},
   "source": [
    "### 定义处理函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "fa78a30c",
   "metadata": {},
   "outputs": [],
   "source": [
    "def process_function(examples):\n",
    "    tokenized_examples=tokenizer(examples['tokens'],max_length=128,truncation=True,is_split_into_words=True)\n",
    "    labels=[]\n",
    "    for i,label in enumerate(examples[\"ner_tags\"]):\n",
    "        word_ids=tokenized_examples.word_ids(batch_index=i)#获取第i批次句子的word_ids\n",
    "        label_ids=[]\n",
    "        for word_id in word_ids:\n",
    "            if word_id is None:\n",
    "                label_ids.append(-100)#cls或者sep标签对应给与-100，cross entropy会跳过\n",
    "            else:\n",
    "                label_ids.append(label[word_id])#给句子打上标签\n",
    "        labels.append(label_ids)#i批次训练后添加此批次的标签\n",
    "    tokenized_examples[\"labels\"]=labels\n",
    "    return tokenized_examples\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "bf8e036b",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Dataset({\n",
       "    features: ['id', 'tokens', 'ner_tags', 'input_ids', 'token_type_ids', 'attention_mask', 'labels'],\n",
       "    num_rows: 20865\n",
       "})"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tokenized_data_train=data_train.map(process_function,batched=True)\n",
    "tokenized_data_train#处理后多了一个特征labels"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "4754c373",
   "metadata": {},
   "outputs": [],
   "source": [
    "\"\"\"把测试集和验证集也处理了\"\"\"\n",
    "tokenized_data_test=data_test.map(process_function,batched=True)\n",
    "tokenized_data_validation=data_validation.map(process_function,batched=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "fffdf6c5",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'id': '0', 'tokens': ['海', '钓', '比', '赛', '地', '点', '在', '厦', '门', '与', '金', '门', '之', '间', '的', '海', '域', '。'], 'ner_tags': [0, 0, 0, 0, 0, 0, 0, 5, 6, 0, 5, 6, 0, 0, 0, 0, 0, 0], 'input_ids': [101, 3862, 7157, 3683, 6612, 1765, 4157, 1762, 1336, 7305, 680, 7032, 7305, 722, 7313, 4638, 3862, 1818, 511, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'labels': [-100, 0, 0, 0, 0, 0, 0, 0, 5, 6, 0, 5, 6, 0, 0, 0, 0, 0, 0, -100]}\n"
     ]
    }
   ],
   "source": [
    "print(tokenized_data_train[0])"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "edf4bb8d",
   "metadata": {},
   "source": [
    "## step4 创建模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "975002de",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Some weights of BertForTokenClassification were not initialized from the model checkpoint at D:\\HuggFace_model\\chinese-macbert-base and are newly initialized: ['classifier.bias', 'classifier.weight']\n",
      "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
     ]
    }
   ],
   "source": [
    "# 对于所有的非二分类任务，切记要指定num_labels，否则就会device错误\n",
    "model=AutoModelForTokenClassification.from_pretrained(r\"D:\\HuggFace_model\\chinese-macbert-base\",num_labels=len(label_list))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "aa7f5e6f",
   "metadata": {},
   "outputs": [],
   "source": [
    "seqeval=evaluate.load(\"seqeval\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "83e60902",
   "metadata": {},
   "source": [
    "seqeval地址：https://huggingface.co/spaces/evaluate-metric/seqeval/tree/main"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "id": "ca033c68",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "EvaluationModule(name: \"seqeval\", module_type: \"metric\", features: {'predictions': Sequence(feature=Value(dtype='string', id='label'), length=-1, id='sequence'), 'references': Sequence(feature=Value(dtype='string', id='label'), length=-1, id='sequence')}, usage: \"\"\"\n",
       "Produces labelling scores along with its sufficient statistics\n",
       "from a source against one or more references.\n",
       "\n",
       "Args:\n",
       "    predictions: List of List of predicted labels (Estimated targets as returned by a tagger)\n",
       "    references: List of List of reference labels (Ground truth (correct) target values)\n",
       "    suffix: True if the IOB prefix is after type, False otherwise. default: False\n",
       "    scheme: Specify target tagging scheme. Should be one of [\"IOB1\", \"IOB2\", \"IOE1\", \"IOE2\", \"IOBES\", \"BILOU\"].\n",
       "        default: None\n",
       "    mode: Whether to count correct entity labels with incorrect I/B tags as true positives or not.\n",
       "        If you want to only count exact matches, pass mode=\"strict\". default: None.\n",
       "    sample_weight: Array-like of shape (n_samples,), weights for individual samples. default: None\n",
       "    zero_division: Which value to substitute as a metric value when encountering zero division. Should be on of 0, 1,\n",
       "        \"warn\". \"warn\" acts as 0, but the warning is raised.\n",
       "\n",
       "Returns:\n",
       "    'scores': dict. Summary of the scores for overall and per type\n",
       "        Overall:\n",
       "            'accuracy': accuracy,\n",
       "            'precision': precision,\n",
       "            'recall': recall,\n",
       "            'f1': F1 score, also known as balanced F-score or F-measure,\n",
       "        Per type:\n",
       "            'precision': precision,\n",
       "            'recall': recall,\n",
       "            'f1': F1 score, also known as balanced F-score or F-measure\n",
       "Examples:\n",
       "\n",
       "    >>> predictions = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]\n",
       "    >>> references = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]\n",
       "    >>> seqeval = evaluate.load(\"seqeval\")\n",
       "    >>> results = seqeval.compute(predictions=predictions, references=references)\n",
       "    >>> print(list(results.keys()))\n",
       "    ['MISC', 'PER', 'overall_precision', 'overall_recall', 'overall_f1', 'overall_accuracy']\n",
       "    >>> print(results[\"overall_f1\"])\n",
       "    0.5\n",
       "    >>> print(results[\"PER\"][\"f1\"])\n",
       "    1.0\n",
       "\"\"\", stored examples: 0)"
      ]
     },
     "execution_count": 21,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "seqeval#IOB1比IOB2更严格"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "id": "8a12a2fd",
   "metadata": {},
   "outputs": [],
   "source": [
    "\"\"\"评估\"\"\"\n",
    "import numpy as np\n",
    "def eval_metric(pred):\n",
    "    predictions,labels=pred\n",
    "    predictions=np.argmax(predictions,axis=-1)#axis=0(列)/1(行)/-1（内层，如array([[1,1,1],[2,2,2]])内层就是两个行）\n",
    "    #预测值\n",
    "    true_predictions=[\n",
    "        [label_list[p] for p,l in zip(prediction,label) if l!= -100 ]\n",
    "        for prediction,label in zip(predictions,labels)\n",
    "    ]\n",
    "    # 真实值\n",
    "    true_labels=[\n",
    "        [label_list[l] for p,l in zip(prediction,label) if l !=-100]\n",
    "        for prediction,label in zip(predictions,labels) \n",
    "    ]\n",
    "    result=seqeval.compute(predictions=true_predictions,references=true_labels,mode=\"strict\",scheme=\"IOB2\")\n",
    "    return{\n",
    "        \"f1\":result[\"overall_f1\"]\n",
    "    }"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "104dfe09",
   "metadata": {},
   "source": [
    "## Step6:配置训练参数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "id": "f90568c7",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "c:\\Users\\31811\\.conda\\envs\\transformers\\lib\\site-packages\\transformers\\training_args.py:1494: FutureWarning: `evaluation_strategy` is deprecated and will be removed in version 4.46 of 🤗 Transformers. Use `eval_strategy` instead\n",
      "  warnings.warn(\n"
     ]
    }
   ],
   "source": [
    "args=TrainingArguments(\n",
    "    output_dir=\"moudle_for_ner_test\",\n",
    "    per_device_train_batch_size=64,\n",
    "    per_device_eval_batch_size=128,\n",
    "    evaluation_strategy=\"epoch\",\n",
    "    save_strategy=\"epoch\",\n",
    "    metric_for_best_model=\"f1\",\n",
    "    load_best_model_at_end=True,\n",
    "    logging_steps=50#每50batch打印一次\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d9aeafc6",
   "metadata": {},
   "source": [
    "## Step7:创建训练器"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "id": "5b4a18b6",
   "metadata": {},
   "outputs": [],
   "source": [
    "trainer=Trainer(\n",
    "    model=model,#模型\n",
    "    args=args,#参数\n",
    "    tokenizer=tokenizer,\n",
    "    train_dataset=tokenized_data_train,\n",
    "    eval_dataset=tokenized_data_validation,\n",
    "    compute_metrics=eval_metric,#计算评估指标的函数\n",
    "    data_collator=DataCollatorForTokenClassification(tokenizer=tokenizer)#将样本进行批处理和特定任务的数据整理器\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "88c6251d",
   "metadata": {},
   "source": [
    "## STEP8:模型训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "id": "703e0831",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  5%|▌         | 50/981 [00:27<08:14,  1.88it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'loss': 0.2294, 'grad_norm': 1.9038166999816895, 'learning_rate': 4.745158002038736e-05, 'epoch': 0.15}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 10%|█         | 100/981 [00:53<07:56,  1.85it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'loss': 0.0425, 'grad_norm': 1.3818609714508057, 'learning_rate': 4.490316004077472e-05, 'epoch': 0.31}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 15%|█▌        | 150/981 [01:19<06:52,  2.01it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'loss': 0.0385, 'grad_norm': 0.4910205900669098, 'learning_rate': 4.235474006116208e-05, 'epoch': 0.46}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 20%|██        | 200/981 [01:46<06:58,  1.87it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'loss': 0.0342, 'grad_norm': 0.5817380547523499, 'learning_rate': 3.980632008154944e-05, 'epoch': 0.61}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 25%|██▌       | 250/981 [02:13<06:38,  1.83it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'loss': 0.0348, 'grad_norm': 0.5567057132720947, 'learning_rate': 3.72579001019368e-05, 'epoch': 0.76}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 31%|███       | 300/981 [02:40<06:13,  1.82it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'loss': 0.0269, 'grad_norm': 0.5076452493667603, 'learning_rate': 3.4709480122324164e-05, 'epoch': 0.92}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 33%|███▎      | 326/981 [02:54<05:55,  1.84it/s]\n",
      " 33%|███▎      | 327/981 [03:07<05:54,  1.84it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'eval_loss': 0.02034037746489048, 'eval_f1': 0.9474694829241531, 'eval_runtime': 12.5129, 'eval_samples_per_second': 185.329, 'eval_steps_per_second': 1.518, 'epoch': 1.0}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 36%|███▌      | 350/981 [03:20<05:47,  1.82it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'loss': 0.0272, 'grad_norm': 0.4799582064151764, 'learning_rate': 3.2161060142711516e-05, 'epoch': 1.07}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 41%|████      | 400/981 [03:47<05:07,  1.89it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'loss': 0.0152, 'grad_norm': 0.32433900237083435, 'learning_rate': 2.9612640163098882e-05, 'epoch': 1.22}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 46%|████▌     | 450/981 [04:14<04:52,  1.82it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'loss': 0.0155, 'grad_norm': 0.6532738208770752, 'learning_rate': 2.7064220183486238e-05, 'epoch': 1.38}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 51%|█████     | 500/981 [04:41<04:31,  1.77it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'loss': 0.016, 'grad_norm': 0.1853652000427246, 'learning_rate': 2.45158002038736e-05, 'epoch': 1.53}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 56%|█████▌    | 550/981 [05:08<03:58,  1.81it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'loss': 0.0144, 'grad_norm': 0.266680508852005, 'learning_rate': 2.196738022426096e-05, 'epoch': 1.68}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 61%|██████    | 600/981 [05:36<03:34,  1.78it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'loss': 0.014, 'grad_norm': 0.9040787220001221, 'learning_rate': 1.9418960244648318e-05, 'epoch': 1.83}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 66%|██████▋   | 650/981 [06:03<02:53,  1.91it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'loss': 0.0122, 'grad_norm': 0.5228301882743835, 'learning_rate': 1.6870540265035677e-05, 'epoch': 1.99}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "                                                 \n",
      " 67%|██████▋   | 654/981 [06:17<02:48,  1.94it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'eval_loss': 0.017276452854275703, 'eval_f1': 0.9540433656075276, 'eval_runtime': 12.5766, 'eval_samples_per_second': 184.39, 'eval_steps_per_second': 1.511, 'epoch': 2.0}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 71%|███████▏  | 700/981 [06:43<02:35,  1.81it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'loss': 0.0083, 'grad_norm': 0.5266672372817993, 'learning_rate': 1.4322120285423038e-05, 'epoch': 2.14}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 76%|███████▋  | 750/981 [07:11<02:08,  1.79it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'loss': 0.0061, 'grad_norm': 0.3886092007160187, 'learning_rate': 1.1773700305810397e-05, 'epoch': 2.29}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 82%|████████▏ | 800/981 [07:39<01:44,  1.73it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'loss': 0.0063, 'grad_norm': 0.24387085437774658, 'learning_rate': 9.225280326197758e-06, 'epoch': 2.45}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 87%|████████▋ | 850/981 [08:07<01:14,  1.76it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'loss': 0.0063, 'grad_norm': 0.41789230704307556, 'learning_rate': 6.676860346585118e-06, 'epoch': 2.6}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 92%|█████████▏| 900/981 [08:35<00:46,  1.74it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'loss': 0.0064, 'grad_norm': 0.21563246846199036, 'learning_rate': 4.128440366972477e-06, 'epoch': 2.75}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      " 97%|█████████▋| 950/981 [09:03<00:18,  1.69it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'loss': 0.0067, 'grad_norm': 0.621703028678894, 'learning_rate': 1.580020387359837e-06, 'epoch': 2.91}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "                                                 \n",
      "100%|██████████| 981/981 [09:34<00:00,  1.72it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'eval_loss': 0.017186462879180908, 'eval_f1': 0.9565573770491804, 'eval_runtime': 12.914, 'eval_samples_per_second': 179.572, 'eval_steps_per_second': 1.471, 'epoch': 3.0}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 981/981 [09:36<00:00,  1.70it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'train_runtime': 576.1965, 'train_samples_per_second': 108.635, 'train_steps_per_second': 1.703, 'train_loss': 0.028784018165596156, 'epoch': 3.0}\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "TrainOutput(global_step=981, training_loss=0.028784018165596156, metrics={'train_runtime': 576.1965, 'train_samples_per_second': 108.635, 'train_steps_per_second': 1.703, 'total_flos': 3940951205762142.0, 'train_loss': 0.028784018165596156, 'epoch': 3.0})"
      ]
     },
     "execution_count": 25,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "trainer.train()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c68ec385",
   "metadata": {},
   "source": [
    "加载模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "id": "5fae739b",
   "metadata": {},
   "outputs": [],
   "source": [
    "\"\"\"加载训练好的模型\"\"\"\n",
    "from transformers import AutoModelForTokenClassification\n",
    "model2=AutoModelForTokenClassification.from_pretrained(r\"moudle_for_ner_test\\checkpoint-327\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "id": "a68d2e0c",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "BertConfig {\n",
       "  \"_name_or_path\": \"moudle_for_ner_test\\\\checkpoint-327\",\n",
       "  \"architectures\": [\n",
       "    \"BertForTokenClassification\"\n",
       "  ],\n",
       "  \"attention_probs_dropout_prob\": 0.1,\n",
       "  \"classifier_dropout\": null,\n",
       "  \"directionality\": \"bidi\",\n",
       "  \"gradient_checkpointing\": false,\n",
       "  \"hidden_act\": \"gelu\",\n",
       "  \"hidden_dropout_prob\": 0.1,\n",
       "  \"hidden_size\": 768,\n",
       "  \"id2label\": {\n",
       "    \"0\": \"O\",\n",
       "    \"1\": \"B-PER\",\n",
       "    \"2\": \"I-PER\",\n",
       "    \"3\": \"B-ORG\",\n",
       "    \"4\": \"I-ORG\",\n",
       "    \"5\": \"B-LOC\",\n",
       "    \"6\": \"I-LOC\"\n",
       "  },\n",
       "  \"initializer_range\": 0.02,\n",
       "  \"intermediate_size\": 3072,\n",
       "  \"label2id\": {\n",
       "    \"LABEL_0\": 0,\n",
       "    \"LABEL_1\": 1,\n",
       "    \"LABEL_2\": 2,\n",
       "    \"LABEL_3\": 3,\n",
       "    \"LABEL_4\": 4,\n",
       "    \"LABEL_5\": 5,\n",
       "    \"LABEL_6\": 6\n",
       "  },\n",
       "  \"layer_norm_eps\": 1e-12,\n",
       "  \"max_position_embeddings\": 512,\n",
       "  \"model_type\": \"bert\",\n",
       "  \"num_attention_heads\": 12,\n",
       "  \"num_hidden_layers\": 12,\n",
       "  \"pad_token_id\": 0,\n",
       "  \"pooler_fc_size\": 768,\n",
       "  \"pooler_num_attention_heads\": 12,\n",
       "  \"pooler_num_fc_layers\": 3,\n",
       "  \"pooler_size_per_head\": 128,\n",
       "  \"pooler_type\": \"first_token_transform\",\n",
       "  \"position_embedding_type\": \"absolute\",\n",
       "  \"torch_dtype\": \"float32\",\n",
       "  \"transformers_version\": \"4.42.4\",\n",
       "  \"type_vocab_size\": 2,\n",
       "  \"use_cache\": true,\n",
       "  \"vocab_size\": 21128\n",
       "}"
      ]
     },
     "execution_count": 28,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model2.config.id2label = {idx: label for idx, label in enumerate(label_list)}\n",
    "model2.config"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "id": "793e6ed3",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Hardware accelerator e.g. GPU is available in the environment, but no `device` argument is passed to the `Pipeline` object. Model will be on CPU.\n"
     ]
    }
   ],
   "source": [
    "from transformers import pipeline\n",
    "ner_pip=pipeline(\"token-classification\",model=model2,tokenizer=tokenizer)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "id": "7b5989f7",
   "metadata": {},
   "outputs": [],
   "source": [
    "\"\"\"定义方法\"\"\"\n",
    "def ner_realize(text):\n",
    "    res=ner_pip(text)\n",
    "    nnn=enumerate(res)\n",
    "    data_list=[]\n",
    "    for i in nnn:\n",
    "        data_list.append(i[1])\n",
    "    # 初始化变量用于存储连接后的实体\n",
    "    entities = {\n",
    "        'PER': [],\n",
    "        'LOC': [],\n",
    "        'ORG': []\n",
    "    }\n",
    "\n",
    "    current_entity = None\n",
    "    current_words = []\n",
    "\n",
    "    # 遍历数据列表\n",
    "    for item in data_list:\n",
    "        entity_type = item['entity'][2:]  # 获取实体类型，去除前缀'B-'或'I-'\n",
    "        word = item['word']\n",
    "        \n",
    "        if item['entity'].startswith('B-'):  # 如果是实体的开始\n",
    "            if current_entity is not None:  # 如果当前有正在处理的实体\n",
    "                entities[current_entity].append(''.join(current_words))  # 连接当前实体的词并存储\n",
    "            current_entity = entity_type\n",
    "            current_words = [word]\n",
    "        elif item['entity'].startswith('I-'):  # 如果是实体的中间部分\n",
    "            current_words.append(word)\n",
    "\n",
    "    # 处理最后一个实体\n",
    "    if current_entity is not None:\n",
    "        entities[current_entity].append(''.join(current_words))\n",
    "\n",
    "    # 输出连接后的实体\n",
    "    print(\"人名：\", entities['PER'])\n",
    "    print(\"地名：\", entities['LOC'])\n",
    "    print(\"机构名：\", entities['ORG'])\n",
    "    for d in entities:\n",
    "        print(d,entities[d])\n",
    "    \n",
    "    return entities"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "id": "91194d2c",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "人名： ['张作霖']\n",
      "地名： ['沈阳', '大连']\n",
      "机构名： ['奉天银行', '民族大学']\n",
      "PER ['张作霖']\n",
      "LOC ['沈阳', '大连']\n",
      "ORG ['奉天银行', '民族大学']\n"
     ]
    }
   ],
   "source": [
    "res=ner_realize(\"张作霖去沈阳的奉天银行办公了，又去了大连的民族大学\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "id": "f607d69b",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'PER': ['张作霖'], 'LOC': ['沈阳', '大连'], 'ORG': ['奉天银行', '民族大学']}"
      ]
     },
     "execution_count": 41,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "res"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "id": "5cb7ec1e",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<function list.index(value, start=0, stop=9223372036854775807, /)>"
      ]
     },
     "execution_count": 43,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "ner_pip(\"杰克去墨西哥的肯德基了\").index"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "adcc58db",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "transformers",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.23"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
