{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 导入相关包"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "from datasets import load_dataset\n",
    "from transformers import AutoTokenizer,AutoModelForTokenClassification,DataCollatorForTokenClassification,TrainingArguments,Trainer"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 加载数据集合"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from datasets import DatasetDict\n",
    "\n",
    "# ner_datasets = load_dataset(\"people_daily_ner\",cache_dir='/data/datasets')\n",
    "ner_datasets = DatasetDict.load_from_disk(\"/data/datasets/ner_data\")\n",
    "ner_datasets"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 获取实体标注类型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "label_list = ner_datasets['train'].features[\"ner_tags\"].feature.names"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 数据处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "tokenizer = AutoTokenizer.from_pretrained(\"/data/models/merged_model_2_02\")\n",
    "#  is_split_into_words 决定了词和句子的处理方式\n",
    "res = tokenizer([\"10.10.112.59\"]) # 7个词 ，7个标注 ，wordids 有 7 个。\n",
    "print(res.input_ids)\n",
    "print(res.word_ids())\n",
    "\n",
    "res = tokenizer([\"interesting word\"]) # 2个词，2 个标注 ，wordids 有 5 个。\n",
    "print(\"输入id序列\",res.input_ids)\n",
    "print(\"词的id\",res.word_ids())\n",
    "\n",
    "res = tokenizer(ner_datasets['train'][0:2][\"tokens\"],is_split_into_words=True,padding='max_length',max_length=64,truncation=True,return_offsets_mapping=True,return_tensors='pt')\n",
    "ner_datasets['train'][0:1][\"ner_tags\"]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def process_function(example):\n",
    "    # 先将文本进行分词处理和转化\n",
    "    # tokenizer_example [[101, 10673, 12865, 12921, 8181, 8681, 102]]\n",
    "    # word_ids [None, 0, 0, 0, 0, 1, None]\n",
    "    # ner_tags[5,1]\n",
    "    # example =  {'id': '0', 'tokens': ['interesting', 'word'], 'ner_tags': [5, 6]}\n",
    "    # word_ids = tokenizer_example = [None, 0, 0, 0, 0, 1, None]\n",
    "    # 最终输入的token_ids = [101, 10673, 12865, 12921, 8181, 8681, 102]\n",
    "    # 最终标注labels        [-100,5,     5,     5,     5,    6,   -100]\n",
    "\n",
    "    # 结论 ： 两个词，通过转换token的方式 拆分成了子词形式，因此 得到的tokenids是5个。\n",
    "    # ner_tags 的标注要分别标注到各个子词上。                                   \n",
    "    tokenizer_example =  tokenizer(example[\"tokens\"],max_length=128, truncation=True, is_split_into_words=True)\n",
    "    labels =  []\n",
    "    # 有两个词的标注，因此要循环两次\n",
    "    for i ,label in enumerate(example[\"ner_tags\"]):\n",
    "        # 取其中一个句子的文本词id信息\n",
    "        word_ids = tokenizer_example.word_ids(batch_index=i)\n",
    "        label_ids = []\n",
    "        for word_id in word_ids:\n",
    "            if word_id is None:\n",
    "                label_ids.append(-100)\n",
    "            else:\n",
    "                label_ids.append(label[word_id])\n",
    "        labels.append(label_ids)\n",
    "    tokenizer_example[\"labels\"] = labels\n",
    "    return tokenizer_example"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "tokenized_datasets = ner_datasets.map(process_function,batched=True)\n",
    "# tokenized_datasets = ner_datasets.map(process_function)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 加载模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "model =  AutoModelForTokenClassification.from_pretrained(\"/data/models/merged_model_2_02\",num_labels=len(label_list))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 创建评估函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "from seqeval.metrics import f1_score\n",
    "def evaluate_metrics(pred):\n",
    "    predictions,labels = pred\n",
    "    predictions =  np.argmax(predictions,axis=-1)\n",
    "\n",
    "    true_predictions =  [\n",
    "       [label_list[p] for p,l in zip(prediction,label) if l != -100]\n",
    "        for prediction,label in zip(predictions,labels)\n",
    "    ]\n",
    "    true_labels =  [\n",
    "       [label_list[l] for p,l in zip(prediction,label) if l != -100]\n",
    "        for prediction,label in zip(predictions,labels)\n",
    "    ]\n",
    "    f1 = f1_score(true_predictions, true_labels)\n",
    "    return {\n",
    "        \"f1\":f1\n",
    "    }\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 配置训练参数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "training_args = TrainingArguments(\n",
    "    output_dir=\"/data/logs/2_2/results\",          # output directory\n",
    "    num_train_epochs=3,              # total number of training epochs\n",
    "    per_device_train_batch_size=64,  # batch size per device during training\n",
    "    per_device_eval_batch_size=128,   # batch size for evaluation\n",
    "    warmup_steps=500,                # number of warmup steps for learning rate scheduler\n",
    "    evaluation_strategy=\"epoch\",\n",
    "    save_strategy=\"epoch\",\n",
    "    learning_rate=2e-5,\n",
    "    weight_decay=0.01,               # strength of weight decay\n",
    "    metric_for_best_model=\"f1\",\n",
    "    load_best_model_at_end=True,\n",
    "    logging_steps=100,\n",
    "    eval_steps=100,\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 创建训练器"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "trainer =  Trainer(\n",
    "    model=model,\n",
    "    args=training_args,\n",
    "    train_dataset=tokenized_datasets['train'],\n",
    "    eval_dataset=tokenized_datasets['validation'],\n",
    "    compute_metrics=evaluate_metrics,\n",
    "    data_collator=DataCollatorForTokenClassification(tokenizer),\n",
    ")\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 模型评估"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "trainer.evaluate(eval_dataset=tokenized_datasets[\"test\"])"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.19"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
