{
 "metadata": {
  "kernelspec": {
   "language": "python",
   "display_name": "Python 3",
   "name": "python3"
  },
  "language_info": {
   "name": "python",
   "version": "3.7.12",
   "mimetype": "text/x-python",
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "pygments_lexer": "ipython3",
   "nbconvert_exporter": "python",
   "file_extension": ".py"
  }
 },
 "nbformat_minor": 4,
 "nbformat": 4,
 "cells": [
  {
   "cell_type": "code",
   "source": [
    "#!git clone https://github.com/nlp-with-transformers/notebooks.git\n",
    "#%cd notebooks\n",
    "#from install import *\n",
    "#install_requirements()\n"
   ],
   "metadata": {
    "execution": {
     "iopub.status.busy": "2022-08-13T14:02:17.804248Z",
     "iopub.execute_input": "2022-08-13T14:02:17.805073Z",
     "iopub.status.idle": "2022-08-13T14:02:33.401117Z",
     "shell.execute_reply.started": "2022-08-13T14:02:17.805035Z",
     "shell.execute_reply": "2022-08-13T14:02:33.399429Z"
    },
    "trusted": true,
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "execution_count": 1,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "#hide\n",
    "#from utils import *\n",
    "#setup_chapter()\n",
    "#!pip install seqeval"
   ],
   "metadata": {
    "execution": {
     "iopub.status.busy": "2022-08-13T14:02:33.404812Z",
     "iopub.execute_input": "2022-08-13T14:02:33.405513Z",
     "iopub.status.idle": "2022-08-13T14:02:43.408066Z",
     "shell.execute_reply.started": "2022-08-13T14:02:33.405469Z",
     "shell.execute_reply": "2022-08-13T14:02:43.406901Z"
    },
    "trusted": true,
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "execution_count": 2,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "trigger_label = '触发词'\n",
    "schemas = ['O', 'B-地块编码','I-地块编码', 'B-地块位置', 'I-地块位置', 'B-出让面积', 'I-出让面积', 'B-土地用途', 'I-土地用途', 'B-容积率', 'I-容积率', \n",
    "           'B-起始价', 'I-起始价', 'B-成交价', 'I-成交价', 'B-溢价率', 'I-溢价率', 'B-成交时间', 'I-成交时间', 'B-受让人', 'I-受让人', \n",
    "           'B-城市', 'I-城市', 'B-触发词', 'I-触发词']\n",
    "id2tag = {idx: tag for idx, tag in enumerate(schemas)}\n",
    "tag2id = {tag: idx for idx, tag in enumerate(schemas)}\n",
    "\n",
    "id2tag,tag2id"
   ],
   "metadata": {
    "execution": {
     "iopub.status.busy": "2022-08-13T14:02:43.410521Z",
     "iopub.execute_input": "2022-08-13T14:02:43.411326Z",
     "iopub.status.idle": "2022-08-13T14:02:43.424599Z",
     "shell.execute_reply.started": "2022-08-13T14:02:43.411279Z",
     "shell.execute_reply": "2022-08-13T14:02:43.423394Z"
    },
    "trusted": true,
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "execution_count": 3,
   "outputs": [
    {
     "data": {
      "text/plain": "({0: 'O',\n  1: 'B-地块编码',\n  2: 'I-地块编码',\n  3: 'B-地块位置',\n  4: 'I-地块位置',\n  5: 'B-出让面积',\n  6: 'I-出让面积',\n  7: 'B-土地用途',\n  8: 'I-土地用途',\n  9: 'B-容积率',\n  10: 'I-容积率',\n  11: 'B-起始价',\n  12: 'I-起始价',\n  13: 'B-成交价',\n  14: 'I-成交价',\n  15: 'B-溢价率',\n  16: 'I-溢价率',\n  17: 'B-成交时间',\n  18: 'I-成交时间',\n  19: 'B-受让人',\n  20: 'I-受让人',\n  21: 'B-城市',\n  22: 'I-城市',\n  23: 'B-触发词',\n  24: 'I-触发词'},\n {'O': 0,\n  'B-地块编码': 1,\n  'I-地块编码': 2,\n  'B-地块位置': 3,\n  'I-地块位置': 4,\n  'B-出让面积': 5,\n  'I-出让面积': 6,\n  'B-土地用途': 7,\n  'I-土地用途': 8,\n  'B-容积率': 9,\n  'I-容积率': 10,\n  'B-起始价': 11,\n  'I-起始价': 12,\n  'B-成交价': 13,\n  'I-成交价': 14,\n  'B-溢价率': 15,\n  'I-溢价率': 16,\n  'B-成交时间': 17,\n  'I-成交时间': 18,\n  'B-受让人': 19,\n  'I-受让人': 20,\n  'B-城市': 21,\n  'I-城市': 22,\n  'B-触发词': 23,\n  'I-触发词': 24})"
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ]
  },
  {
   "cell_type": "code",
   "source": [
    "# 读取原始训练据并进行标准化处理\n",
    "import json\n",
    "import numpy, pandas as pd\n",
    "from datasets import Dataset\n",
    "\n",
    "def processLabelInALine(line):\n",
    "    # 初始化空数组\n",
    "    line = line.strip()\n",
    "    if len(line) == 0:\n",
    "        return\n",
    "    linejson = json.loads(line)\n",
    "    nertag = numpy.zeros(len(linejson[\"data\"]))\n",
    "    tokens = [i for i in linejson[\"data\"]]\n",
    "    for (start, stop, tagstr) in linejson[\"label\"]:\n",
    "        if tagstr == \"NN\":\n",
    "            nertag[start:stop] = 0\n",
    "        else:\n",
    "            starttagstr = \"B-\" + tagstr\n",
    "            intenelstr = \"I-\" + tagstr\n",
    "            nertag[start] = tag2id[starttagstr]\n",
    "            nertag[start + 1:stop] = tag2id[intenelstr]\n",
    "    return {\"id\": linejson[\"id\"], \"tokens\": tokens, \"ner_tags\": nertag}\n",
    "\n",
    "def processFile(filename):\n",
    "    f=open(filename,'r',encoding='utf8')\n",
    "    lines = f.readlines()\n",
    "    processed = list(map(processLabelInALine, lines))\n",
    "    processNone = list(filter(None, processed))\n",
    "    df = pd.DataFrame(processNone)\n",
    "    landDataset =  Dataset.from_pandas(df, split=\"train\")\n",
    "    return landDataset"
   ],
   "metadata": {
    "execution": {
     "iopub.status.busy": "2022-08-13T14:02:43.428104Z",
     "iopub.execute_input": "2022-08-13T14:02:43.428933Z",
     "iopub.status.idle": "2022-08-13T14:02:43.443065Z",
     "shell.execute_reply.started": "2022-08-13T14:02:43.428825Z",
     "shell.execute_reply": "2022-08-13T14:02:43.441723Z"
    },
    "trusted": true,
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "execution_count": 4,
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "#预处理数据，label转化为规范格式。\n",
    "from datasets import DatasetDict\n",
    "land_train,land_valid = processFile(\"train.json\"), processFile(\"valid.json\")\n",
    "dataset = DatasetDict()\n",
    "dataset['train'] = land_train\n",
    "dataset['valid'] = land_valid\n",
    "dataset"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "# hide_output\n",
    "from transformers import AutoTokenizer\n",
    "#加载模型对应的分词器\n",
    "bert_model_name = \"bert-base-chinese\"\n",
    "bert_tokenizer = AutoTokenizer.from_pretrained(bert_model_name)\n",
    "def tokenize_and_align_labels(examples):\n",
    "    #输入字的list，输出用tokenizer的词典中序号表示的字，并对特殊字符和子词进行特殊处理。\n",
    "    tokenized_inputs = bert_tokenizer(examples[\"tokens\"], truncation=True, \n",
    "                                      is_split_into_words=True)\n",
    "    #定义空的标签表示\n",
    "    labels = []\n",
    "    #迭代输入的ner_tags标签\n",
    "    for idx, label in enumerate(examples[\"ner_tags\"]):\n",
    "        #tokenized_input包含word_ids函数，实现对子词与整词的识别。\n",
    "        #这里我们可以看到word_ids将每个子单词映射到单词序列中对应的索引\n",
    "        word_ids = tokenized_inputs.word_ids(batch_index=idx)\n",
    "        previous_word_idx = None\n",
    "        label_ids = []\n",
    "        for word_idx in word_ids:\n",
    "            # 将特殊符号的标签设置为-100，以便在计算损失函数时自动忽略\n",
    "            if word_idx is None:\n",
    "                label_ids.append(-100)\n",
    "            # 把标签设置到每个词的第一个token上\n",
    "            elif word_idx != previous_word_idx:\n",
    "                label_ids.append(label[word_idx])\n",
    "            # 对于每个词的其他token也设置为当前标签\n",
    "            else:\n",
    "                label_ids.append(label[word_idx])\n",
    "            previous_word_idx = word_idx\n",
    "            \n",
    "        labels.append(label_ids)\n",
    "    #把处理后的labels数组，设置为tokenized_inputs[\"labels\"]\n",
    "    tokenized_inputs[\"labels\"] = labels\n",
    "    return tokenized_inputs"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "tokenized_datasets = dataset.map(tokenize_and_align_labels, batched=True, load_from_cache_file=False)\n",
    "pd.DataFrame([tokenized_datasets[\"train\"][\"tokens\"][0],tokenized_datasets[\"train\"][\"ner_tags\"][0]])"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "# 获取标签列表，并加载预训练模型\n",
    "label_list = schemas\n",
    "\n",
    "import torch\n",
    "from transformers import AutoConfig\n",
    "from transformers import BertForTokenClassification, TrainingArguments, Trainer, DataCollatorForTokenClassification\n",
    "\n",
    "\n",
    "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "#bert_config = AutoConfig.from_pretrained(bert_model_name, \n",
    "##                                         num_labels=25,\n",
    " #                                        id2label=id2tag, label2id=tag2id)\n",
    "model = BertForTokenClassification.from_pretrained('bert-base-chinese',  num_labels=len(schemas)).to(device)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "from datasets import load_dataset, load_metric\n",
    "from transformers import TrainingArguments, Trainer, DataCollatorForTokenClassification\n",
    "import numpy as np\n",
    "# 定义data_collator，并使用seqeval进行评价\n",
    "data_collator = DataCollatorForTokenClassification(bert_tokenizer)\n",
    "metric = load_metric(\"seqeval\")\n",
    "\n",
    "# 定义评价指标\n",
    "def compute_metrics(p):\n",
    "    predictions, labels = p\n",
    "    predictions = np.argmax(predictions, axis=2)\n",
    "\n",
    "    # 移除需要忽略的下标（之前记为-100）\n",
    "    true_predictions = [\n",
    "        [label_list[p] for (p, l) in zip(prediction, label) if l != -100]\n",
    "        for prediction, label in zip(predictions, labels)\n",
    "    ]\n",
    "    true_labels = [\n",
    "        [label_list[l] for (p, l) in zip(prediction, label) if l != -100]\n",
    "        for prediction, label in zip(predictions, labels)\n",
    "    ]\n",
    "\n",
    "    results = metric.compute(predictions=true_predictions, references=true_labels)\n",
    "    return {\n",
    "        \"precision\": results[\"overall_precision\"],\n",
    "        \"recall\": results[\"overall_recall\"],\n",
    "        \"f1\": results[\"overall_f1\"],\n",
    "        \"accuracy\": results[\"overall_accuracy\"],\n",
    "    }"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "\n",
    "# 定义训练参数TrainingArguments和Trainer\n",
    "args = TrainingArguments(\n",
    "    \"landata\",                     # 输出路径，存放检查点和其他输出文件\n",
    "    evaluation_strategy=\"epoch\",        # 定义每轮结束后进行评价\n",
    "    learning_rate=2e-5,                 # 定义初始学习率\n",
    "    per_device_train_batch_size=8,     # 定义训练批次大小\n",
    "    per_device_eval_batch_size=8,      # 定义测试批次大小\n",
    "    num_train_epochs=3,                 # 定义训练轮数\n",
    ")\n",
    "\n",
    "trainer = Trainer(\n",
    "    model,\n",
    "    args,\n",
    "    train_dataset=tokenized_datasets[\"train\"],\n",
    "    eval_dataset=tokenized_datasets[\"valid\"],\n",
    "    data_collator=data_collator,\n",
    "    tokenizer=bert_tokenizer,\n",
    "    compute_metrics=compute_metrics\n",
    ")\n",
    "\n",
    "# 开始训练！（主流GPU上耗时约几分钟）\n",
    "trainer.train()"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "from datasets import ClassLabel\n",
    "tags = ClassLabel(num_classes=len(schemas), names=schemas)\n",
    "\n",
    "def tag_text(text, tags, model, tokenizer):\n",
    "    # Get tokens with special characters\n",
    "    tokens = tokenizer(text).tokens()\n",
    "    # Encode the sequence into IDs\n",
    "    input_ids = bert_tokenizer(text, return_tensors=\"pt\").input_ids.to(device)\n",
    "    # Get predictions as distribution over 7 possible classes\n",
    "    outputs = model(input_ids)[0]\n",
    "    # Take argmax to get most likely class per token\n",
    "    predictions = torch.argmax(outputs, dim=2)\n",
    "    # Convert to DataFrame\n",
    "    preds = [tags.names[p] for p in predictions[0].cpu().numpy()]\n",
    "    return pd.DataFrame([tokens, preds], index=[\"Tokens\", \"Tags\"])\n",
    "    "
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "text = \"\"\"\n",
    "中国网地产讯 8月4日，深圳开启第二轮集中供地，16宗涉宅地块总土地面积约33.29万平，总建筑面积为179.85万平。\n",
    "\n",
    "最终深圳招商房地产有限公司以31.02亿元竞得A301-0586地块。\n",
    "\n",
    "地块编号A301-0586，地块位于宝安区沙井街道，土地用途为居住用地，土地面积为3.78万平，建筑面积为15.14万平，起始价26.98亿元。\n",
    "\n",
    "据土地出让文件显示，A301-0586宗地竞得人应按照地质灾害危险性评估报告的结论采取相应措施。\n",
    "\n",
    "A301-0586宗地与12号线轨道安全保护区范围重叠，范围内设有轨道站点及其附属工程，竞得人应在本宗地设计、施工等环节充分考虑站点及其附属工程的统筹布局并无偿无条件提供建设条件，项目建设方案需事先征得地铁（铁路）建设运营单位书面同意意见，方可办理本宗地的《建设工程规划许可证》。本宗地范围与18号线空港新城段比选方案1规划控制预警区范围重叠，宗地围护结构锚索禁止侵入18号线空港新城段比选方案1规划控制区。\n",
    "\"\"\"\n",
    "pd.set_option('display.max_columns', None) # 展示所有列\n",
    "\n",
    "tag_text(text, tags, trainer.model, bert_tokenizer)\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "from torch.nn.functional import cross_entropy\n",
    "\n",
    "def forward_pass_with_label(batch):\n",
    "    # Convert dict of lists to list of dicts suitable for data collator\n",
    "    features = [dict(zip(batch, t)) for t in zip(*batch.values())]\n",
    "    # Pad inputs and labels and put all tensors on device\n",
    "    batch = data_collator(features)\n",
    "    input_ids = batch[\"input_ids\"].to(device)\n",
    "    attention_mask = batch[\"attention_mask\"].to(device)\n",
    "    labels = batch[\"labels\"].to(device)\n",
    "    with torch.no_grad():\n",
    "        # Pass data through model\n",
    "        output = trainer.model(input_ids, attention_mask)\n",
    "        # Logit.size: [batch_size, sequence_length, classes]\n",
    "        # Predict class with largest logit value on classes axis\n",
    "        predicted_label = torch.argmax(output.logits, axis=-1).cpu().numpy()\n",
    "    # Calculate loss per token after flattening batch dimension with view\n",
    "    loss = cross_entropy(output.logits.view(-1, 7),\n",
    "                         labels.view(-1), reduction=\"none\")\n",
    "    # Unflatten batch dimension and convert to numpy array\n",
    "    loss = loss.view(len(input_ids), -1).cpu().numpy()\n",
    "\n",
    "    return {\"loss\":loss, \"predicted_label\": predicted_label}"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "# hide_output\n",
    "valid_set = tokenized_datasets[\"valid\"]\n",
    "valid_set = valid_set.map(forward_pass_with_label, batched=True, batch_size=8)\n",
    "df = valid_set.to_pandas()"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "outputs": [],
   "source": [
    "from torch.nn.functional import cross_entropy\n",
    "\n",
    "def forward_pass_with_label(batch):\n",
    "    # Convert dict of lists to list of dicts suitable for data collator\n",
    "    features = [dict(zip(batch, t)) for t in zip(*batch.values())]\n",
    "    # Pad inputs and labels and put all tensors on device\n",
    "    batch = data_collator(features)\n",
    "    input_ids = batch[\"input_ids\"].to(device)\n",
    "    attention_mask = batch[\"attention_mask\"].to(device)\n",
    "    labels = batch[\"labels\"].to(device)\n",
    "    with torch.no_grad():\n",
    "        # Pass data through model\n",
    "        output = trainer.model(input_ids, attention_mask)\n",
    "        # Logit.size: [batch_size, sequence_length, classes]\n",
    "        # Predict class with largest logit value on classes axis\n",
    "        predicted_label = torch.argmax(output.logits, axis=-1).cpu().numpy()\n",
    "    # Calculate loss per token after flattening batch dimension with view\n",
    "    loss = cross_entropy(output.logits.view(-1, 7),\n",
    "                         labels.view(-1), reduction=\"none\")\n",
    "    # Unflatten batch dimension and convert to numpy array\n",
    "    loss = loss.view(len(input_ids), -1).cpu().numpy()\n",
    "\n",
    "    return {\"loss\":loss, \"predicted_label\": predicted_label}"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'tokenized_datasets' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mNameError\u001B[0m                                 Traceback (most recent call last)",
      "\u001B[1;32m~\\AppData\\Local\\Temp/ipykernel_31152/1448294600.py\u001B[0m in \u001B[0;36m<module>\u001B[1;34m\u001B[0m\n\u001B[0;32m      1\u001B[0m \u001B[1;31m# hide_output\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[1;32m----> 2\u001B[1;33m \u001B[0mvalid_set\u001B[0m \u001B[1;33m=\u001B[0m \u001B[0mtokenized_datasets\u001B[0m\u001B[1;33m[\u001B[0m\u001B[1;34m\"valid\"\u001B[0m\u001B[1;33m]\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[0m\u001B[0;32m      3\u001B[0m \u001B[0mvalid_set\u001B[0m \u001B[1;33m=\u001B[0m \u001B[0mvalid_set\u001B[0m\u001B[1;33m.\u001B[0m\u001B[0mmap\u001B[0m\u001B[1;33m(\u001B[0m\u001B[0mforward_pass_with_label\u001B[0m\u001B[1;33m,\u001B[0m \u001B[0mbatched\u001B[0m\u001B[1;33m=\u001B[0m\u001B[1;32mTrue\u001B[0m\u001B[1;33m,\u001B[0m \u001B[0mbatch_size\u001B[0m\u001B[1;33m=\u001B[0m\u001B[1;36m16\u001B[0m\u001B[1;33m)\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[0;32m      4\u001B[0m \u001B[0mdf\u001B[0m \u001B[1;33m=\u001B[0m \u001B[0mvalid_set\u001B[0m\u001B[1;33m.\u001B[0m\u001B[0mto_pandas\u001B[0m\u001B[1;33m(\u001B[0m\u001B[1;33m)\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n",
      "\u001B[1;31mNameError\u001B[0m: name 'tokenized_datasets' is not defined"
     ]
    }
   ],
   "source": [
    "# hide_output\n",
    "valid_set = tokenized_datasets[\"valid\"]\n",
    "valid_set = valid_set.map(forward_pass_with_label, batched=True, batch_size=16)\n",
    "df = valid_set.to_pandas()"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  }
 ]
}