{
 "metadata": {
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.5-final"
  },
  "orig_nbformat": 2,
  "kernelspec": {
   "name": "python3",
   "display_name": "Python 3.8.5 64-bit (virtualenv)",
   "metadata": {
    "interpreter": {
     "hash": "7b4b4feff2f24a0f0a34464dbe537a36fda679851528fb8735cb41fa49dffb2d"
    }
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2,
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "[['O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O B-V E-V O O O B-N E-N O O O O', '从 这 些 反 应 中 ， 我 们 清 楚 地 看 出 农 民 们 为 了 急 于 提 高 自 己 的 文 化 水 平 ， 对 “ 速 成 识 字 法 ” 寄 予 了 很 大 希 望 ， 深 望 它'], ['O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O B-V E-V O O B-N E-N', '上 ， 这 些 农 具 受 到 广 大 农 民 的 热 爱 ， 农 民 代 表 们 并 给 刘 启 宇 提 了 很 多 宝 贵 意 见 ， 对 农 村 生 产 力 的 提 高 寄 予 很 大 希 望']]\n"
     ]
    }
   ],
   "source": [
    " with open(r\"D:\\project\\zhangyi\\NER\\ylx\\dataset\\test.txt\", \"r\", encoding=\"utf-8\") as f:\n",
    "        lines = []\n",
    "        words = []\n",
    "        labels = []\n",
    "        \n",
    "        for line in f.readlines():   \n",
    "            contends = line.strip()\n",
    "            tokens = line.strip().split()\n",
    "\n",
    "            if len(tokens) == 2:\n",
    "                words.append(tokens[0])\n",
    "                labels.append(tokens[1])\n",
    "            else:\n",
    "                if len(contends) == 0 and len(words) > 0:\n",
    "                    label = []\n",
    "                    word = []\n",
    "                    for l, w in zip(labels, words):\n",
    "                        if len(l) > 0 and len(w) > 0:\n",
    "                            label.append(l)\n",
    "                            word.append(w)\n",
    "                    lines.append([' '.join(label), ' '.join(word)])\n",
    "                    words = []\n",
    "                    labels = []\n",
    "        \n",
    "        print(lines[:2])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "{'E-V', 'B-N', 'O', 'E-N', 'I-N', 'B-V'}\n{'B': 0, 'I': 1, 'O': 2}\n{0: 'B', 1: 'I', 2: 'O'}\n"
     ]
    }
   ],
   "source": [
    "#D:\\project\\zhangyi\\NER\\ylx\n",
    "import pickle\n",
    "data={'B-V': 0, 'E-V': 1, 'O': 2, \"B-N\":3, \"E-N\":4}\n",
    "\n",
    "#读取pickle数据文件，还原数据结果    \n",
    "with open(r\"D:\\project\\zhangyi\\NER\\ylx\\label_list.pkl\",'rb') as file:\n",
    "    dada=pickle.load(file)\n",
    "    print(dada)\n",
    "label_list= {\"O\", \"B\", \"I\"}\n",
    "label2id = {value:id for id,value in enumerate(label_list)}\n",
    "id2label = {value:key for key,value in label2id.items()} \n",
    "print(label2id)\n",
    "print(id2label)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 69,
   "metadata": {},
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "label_map:  {'B-V': 0, 'E-N': 1, 'E-V': 2, 'B-N': 3, 'O': 4}\n",
      "['实']\n",
      "['际']\n",
      "['上']\n",
      "['，']\n",
      "['不']\n",
      "['仅']\n",
      "['法']\n",
      "['国']\n",
      "['殖']\n",
      "['民']\n",
      "['主']\n",
      "['义']\n",
      "['者']\n",
      "['，']\n",
      "['而']\n",
      "['且']\n",
      "['美']\n",
      "['、']\n",
      "['英']\n",
      "['殖']\n",
      "['民']\n",
      "['主']\n",
      "['义']\n",
      "['者']\n",
      "['都']\n",
      "['对']\n",
      "['这']\n",
      "['个']\n",
      "['[UNK]']\n",
      "['根']\n",
      "['本']\n",
      "['法']\n",
      "['[UNK]']\n",
      "['草']\n",
      "['案']\n",
      "['寄']\n",
      "['予']\n",
      "['很']\n",
      "['大']\n",
      "['希']\n",
      "['望']\n",
      "['。']\n",
      "labels: ['O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B-V', 'E-V', 'O', 'O', 'B-N', 'E-N', 'O'] 42\n",
      "tokens:  ['实', '际', '上', '，', '不', '仅', '法', '国', '殖', '民', '主', '义', '者', '，', '而', '且', '美', '、', '英', '殖', '民', '主', '义', '者', '都', '对', '这', '个', '[UNK]', '根', '本', '法', '[UNK]', '草', '案', '寄', '予', '很', '大', '希', '望', '。'] 42\n",
      "ori_tokens:  ['实', '际', '上', '，', '不', '仅', '法', '国', '殖', '民', '主', '义', '者', '，', '而', '且', '美', '、', '英', '殖', '民', '主', '义', '者', '都', '对', '这', '个', '“', '根', '本', '法', '”', '草', '案', '寄', '予', '很', '大', '希', '望', '。'] 42\n",
      "ori_tokens:  ['[CLS]', '实', '际', '上', '，', '不', '仅', '法', '国', '殖', '民', '主', '义', '者', '，', '而', '且', '美', '、', '英', '殖', '民', '主', '义', '者', '都', '对', '这', '个', '“', '根', '本', '法', '”', '草', '案', '寄', '予', '很', '大', '希', '望', '。', '[SEP]'] 44\n",
      "label_ids:  [4]\n",
      "ntokens: ['[CLS]', '实', '际', '上', '，', '不', '仅', '法', '国', '殖', '民', '主', '义', '者', '，', '而', '且', '美', '、', '英', '殖', '民', '主', '义', '者', '都', '对', '这', '个', '[UNK]', '根', '本', '法', '[UNK]', '草', '案', '寄', '予', '很', '大', '希', '望', '。', '[SEP]'] 44\n",
      "input_ids:  [101, 2141, 7354, 677, 8024, 679, 788, 3791, 1744, 3658, 3696, 712, 721, 5442, 8024, 5445, 684, 5401, 510, 5739, 3658, 3696, 712, 721, 5442, 6963, 2190, 6821, 702, 100, 3418, 3315, 3791, 100, 5770, 3428, 2164, 750, 2523, 1920, 2361, 3307, 511, 102, 0, 0, 0, 0, 0, 0]\n",
      "input_mask:  [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]\n",
      "segment_ids:  [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
      "label_ids:  [4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 2, 4, 4, 3, 1, 4, 4, 0, 0, 0, 0, 0, 0]\n",
      "ntokens:  ['[CLS]', '实', '际', '上', '，', '不', '仅', '法', '国', '殖', '民', '主', '义', '者', '，', '而', '且', '美', '、', '英', '殖', '民', '主', '义', '者', '都', '对', '这', '个', '[UNK]', '根', '本', '法', '[UNK]', '草', '案', '寄', '予', '很', '大', '希', '望', '。', '[SEP]', '**NULL**', '**NULL**', '**NULL**', '**NULL**', '**NULL**', '**NULL**']\n"
     ]
    }
   ],
   "source": [
    "textlist=['实', '际', '上', '，', '不', '仅', '法', '国', '殖', '民', '主', '义', '者', '，', '而', '且', '美', '、', '英', '殖', '民', '主', '义', '者', '都', '对', '这', '个', '“', '根', '本', '法', '”', '草', '案', '寄', '予', '很', '大', '希', '望', '。']\n",
    "labellist=['O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B-V', 'E-V', 'O', 'O', 'B-N', 'E-N', 'O']\n",
    "label_map={'B-V': 0, 'E-N': 1, 'E-V': 2, 'B-N': 3, 'O': 4}\n",
    "print(\"label_map: \",label_map)\n",
    "from transformers import BertTokenizer  \n",
    "tokenizer = BertTokenizer.from_pretrained(\"bert-base-chinese\", do_lower_case=True)\n",
    "tokens = []\n",
    "labels = []\n",
    "ori_tokens = []\n",
    "for i, word in enumerate(textlist):\n",
    "    token = tokenizer.tokenize(word)\n",
    "    print(token)\n",
    "    tokens.extend(token)\n",
    "    label_1 = labellist[i]\n",
    "    ori_tokens.append(word)\n",
    "    for m in range(len(token)):\n",
    "        if m == 0:\n",
    "            labels.append(label_1)\n",
    "        else:\n",
    "            if label_1 == \"O\":\n",
    "                labels.append(\"O\")\n",
    "            else:#为 B 或者 I\n",
    "                labels.append(\"I\")#如果有子单词，id加载为I\n",
    "print(\"labels:\",labels,len(labels))\n",
    "print(\"tokens: \",tokens,len(tokens))\n",
    "print(\"ori_tokens: \",ori_tokens,len(ori_tokens))\n",
    "if len(tokens) >= 50 - 1:\n",
    "        tokens = tokens[0:(50 - 2)]  # -2 的原因是因为序列需要加一个句首和句尾标志\n",
    "        labels = labels[0:(50 - 2)]\n",
    "        ori_tokens = ori_tokens[0:(50 - 2)]\n",
    "\n",
    "ori_tokens = [\"[CLS]\"] + ori_tokens + [\"[SEP]\"]\n",
    "print(\"ori_tokens: \",ori_tokens,len(ori_tokens))\n",
    "ntokens = []\n",
    "segment_ids = []\n",
    "label_ids = []\n",
    "ntokens.append(\"[CLS]\")\n",
    "segment_ids.append(0)\n",
    "label_ids.append(label_map[\"O\"])#开始的CLS对应label指定为：O\n",
    "print(\"label_ids: \",label_ids)\n",
    "for i, token in enumerate(tokens):\n",
    "    ntokens.append(token)\n",
    "    segment_ids.append(0)\n",
    "    label_ids.append(label_map[labels[i]])\n",
    "\n",
    "ntokens.append(\"[SEP]\")\n",
    "print(\"ntokens:\",ntokens,len(ntokens))\n",
    "segment_ids.append(0)\n",
    "label_ids.append(label_map[\"O\"])#最后加的是CLS='O'\n",
    "input_ids = tokenizer.convert_tokens_to_ids(ntokens)   \n",
    "\n",
    "input_mask = [1] * len(input_ids)\n",
    "while len(input_ids) < 50:\n",
    "    input_ids.append(0)\n",
    "    input_mask.append(0)\n",
    "    segment_ids.append(0)\n",
    "    # we don't concerned about it!\n",
    "    label_ids.append(0)#padding直接补零\n",
    "    ntokens.append(\"**NULL**\")\n",
    "print(\"input_ids: \",input_ids)\n",
    "print(\"input_mask: \",input_mask)\n",
    "print(\"segment_ids: \",segment_ids)\n",
    "print(\"label_ids: \",label_ids)\n",
    "print(\"ntokens: \",ntokens)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 67,
   "metadata": {},
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "label_map:  {'B-V': 0, 'E-N': 1, 'E-V': 2, 'B-N': 3, 'O': 4}\n",
      "tokenized_input:  {'input_ids': [101, 2141, 7354, 677, 8024, 679, 788, 3791, 1744, 3658, 3696, 712, 721, 5442, 8024, 5445, 684, 5401, 510, 5739, 3658, 3696, 712, 721, 5442, 6963, 2190, 6821, 702, 100, 3418, 3315, 3791, 100, 5770, 3428, 2164, 750, 2523, 1920, 2361, 3307, 511, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]}\n",
      "ntokens:  ['[CLS]', '实', '际', '上', '，', '不', '仅', '法', '国', '殖', '民', '主', '义', '者', '，', '而', '且', '美', '、', '英', '殖', '民', '主', '义', '者', '都', '对', '这', '个', '[UNK]', '根', '本', '法', '[UNK]', '草', '案', '寄', '予', '很', '大', '希', '望', '。', '[SEP]']\n",
      "word_ids:  [None, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, None]\n",
      "[4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 2, 4, 4, 3, 1, 4, 4]\n",
      "input_ids:  [101, 2141, 7354, 677, 8024, 679, 788, 3791, 1744, 3658, 3696, 712, 721, 5442, 8024, 5445, 684, 5401, 510, 5739, 3658, 3696, 712, 721, 5442, 6963, 2190, 6821, 702, 100, 3418, 3315, 3791, 100, 5770, 3428, 2164, 750, 2523, 1920, 2361, 3307, 511, 102, 0, 0, 0, 0, 0, 0]\n",
      "input_mask:  [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]\n",
      "segment_ids:  [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
      "label_ids:  [4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 2, 4, 4, 3, 1, 4, 4, 0, 0, 0, 0, 0, 0]\n",
      "ntokens:  ['[CLS]', '实', '际', '上', '，', '不', '仅', '法', '国', '殖', '民', '主', '义', '者', '，', '而', '且', '美', '、', '英', '殖', '民', '主', '义', '者', '都', '对', '这', '个', '[UNK]', '根', '本', '法', '[UNK]', '草', '案', '寄', '予', '很', '大', '希', '望', '。', '[SEP]', '**NULL**', '**NULL**', '**NULL**', '**NULL**', '**NULL**', '**NULL**']\n"
     ]
    }
   ],
   "source": [
    "textlist=['实', '际', '上', '，', '不', '仅', '法', '国', '殖', '民', '主', '义', '者', '，', '而', '且', '美', '、', '英', '殖', '民', '主', '义', '者', '都', '对', '这', '个', '“', '根', '本', '法', '”', '草', '案', '寄', '予', '很', '大', '希', '望', '。']\n",
    "labellist=['O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B-V', 'E-V', 'O', 'O', 'B-N', 'E-N', 'O']\n",
    "label_map={'B-V': 0, 'E-N': 1, 'E-V': 2, 'B-N': 3, 'O': 4}\n",
    "print(\"label_map: \",label_map)\n",
    "\n",
    "from transformers import AutoTokenizer\n",
    "tokenizer = AutoTokenizer.from_pretrained(\"bert-base-chinese\")\n",
    "tokenized_input = tokenizer(textlist, is_split_into_words=True)\n",
    "\n",
    "print(\"tokenized_input: \",tokenized_input)\n",
    "ntokens = tokenizer.convert_ids_to_tokens(tokenized_input[\"input_ids\"])\n",
    "print(\"ntokens: \",ntokens)\n",
    "word_ids=tokenized_input.word_ids()\n",
    "print(\"word_ids: \",word_ids)\n",
    "ll=[]\n",
    "for item in word_ids:#词的id 索引\n",
    "    if item is None:\n",
    "        ll.append(label_map[\"O\"])\n",
    "    else:\n",
    "        ll.append(label_map[labellist[item]])\n",
    "print(ll)\n",
    "while len(tokenized_input[\"input_ids\"]) < 50:\n",
    "    tokenized_input[\"input_ids\"].append(0)\n",
    "    tokenized_input[\"attention_mask\"].append(0)\n",
    "    tokenized_input[\"token_type_ids\"].append(0)\n",
    "    # we don't concerned about it!\n",
    "    ll.append(0)#padding直接补零\n",
    "    ntokens.append(\"**NULL**\")\n",
    "print(\"input_ids: \",tokenized_input[\"input_ids\"])\n",
    "print(\"input_mask: \",tokenized_input[\"attention_mask\"])\n",
    "print(\"segment_ids: \",tokenized_input[\"token_type_ids\"])\n",
    "print(\"label_ids: \",ll)\n",
    "print(\"ntokens: \",ntokens)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 83,
   "metadata": {},
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "label_map:  {'B-V': 0, 'E-N': 1, 'E-V': 2, 'B-N': 3, 'O': 4}\n",
      "tokenized_input:  [101, 2141, 7354, 677, 8024, 679, 788, 3791, 1744, 3658, 3696, 712, 721, 5442, 8024, 5445, 684, 5401, 510, 5739, 3658, 3696, 712, 721, 5442, 6963, 2190, 6821, 702, 100, 3418, 3315, 3791, 100, 5770, 3428, 2164, 750, 2523, 1920, 2361, 3307, 511, 102, 0, 0, 0, 0, 0, 0]\n"
     ]
    },
    {
     "output_type": "error",
     "ename": "TypeError",
     "evalue": "list indices must be integers or slices, not str",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mTypeError\u001b[0m                                 Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-83-2ede14afc201>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m      9\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     10\u001b[0m \u001b[0mprint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"tokenized_input: \"\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mtokenized_input\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 11\u001b[1;33m \u001b[0mntokens\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtokenizer\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mconvert_ids_to_tokens\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtokenized_input\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;34m\"input_ids\"\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     12\u001b[0m \u001b[0mprint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"ntokens: \"\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mntokens\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     13\u001b[0m \u001b[0mlabel_ids\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mTypeError\u001b[0m: list indices must be integers or slices, not str"
     ]
    }
   ],
   "source": [
    "textlist=['实', '际', '上', '，', '不', '仅', '法', '国', '殖', '民', '主', '义', '者', '，', '而', '且', '美', '、', '英', '殖', '民', '主', '义', '者', '都', '对', '这', '个', '“', '根', '本', '法', '”', '草', '案', '寄', '予', '很', '大', '希', '望', '。']\n",
    "labellist=['O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B-V', 'E-V', 'O', 'O', 'B-N', 'E-N', 'O']\n",
    "label_map={'B-V': 0, 'E-N': 1, 'E-V': 2, 'B-N': 3, 'O': 4}\n",
    "print(\"label_map: \",label_map)\n",
    "\n",
    "from transformers import BertTokenizer\n",
    "tokenizer = BertTokenizer.from_pretrained(\"bert-base-chinese\")\n",
    "tokenized_input = tokenizer.encode(textlist, is_split_into_words=True,add_special_tokens = True,padding = \"max_length\",max_length=50)\n",
    "\n",
    "print(\"tokenized_input: \",tokenized_input)\n",
    "ntokens = tokenizer.convert_ids_to_tokens(tokenized_input[\"input_ids\"])\n",
    "print(\"ntokens: \",ntokens)\n",
    "label_ids=[]\n",
    "label_ids.append(label_map[\"O\"])#开始加的是CLS='O'\n",
    "for item in labellist:\n",
    "    label_ids.append(label_map[item])\n",
    "label_ids.append(label_map[\"O\"])#最后加的是CLS='O'\n",
    "while len(label_ids)<50:\n",
    "    label_ids.append(0)\n",
    "    ntokens.append(\"**NULL**\")\n",
    "\n",
    "\n",
    "print(\"input_ids: \",tokenized_input[\"input_ids\"])\n",
    "print(\"input_mask: \",tokenized_input[\"attention_mask\"])\n",
    "print(\"segment_ids: \",tokenized_input[\"token_type_ids\"])\n",
    "print(\"label_ids: \",label_ids)\n",
    "print(\"ntokens: \",ntokens)\n",
    "ori_tokens=[\"[CLS]\"]+textlist+[\"[CLS]\"]\n",
    "print(ori_tokens)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "ntokens: ['[CLS]', '实', '际', '上', '，', '不', '仅', '法', '国', '殖', '民', '主', '义', '者', '，', '而', '且', '美', '、', '英', '殖', '民', '主', '义', '者', '都', '对', '这', '个', '[UNK]', '根', '本', '法', '[UNK]', '草', '案', '寄', '予', '很', '大', '希', '望', '。', '[SEP]'] 44\n",
    "input_ids:  [101, 2141, 7354, 677, 8024, 679, 788, 3791, 1744, 3658, 3696, 712, 721, 5442, 8024, 5445, 684, 5401, 510, 5739, 3658, 3696, 712, 721, 5442, 6963, 2190, 6821, 702, 100, 3418, 3315, 3791, 100, 5770, 3428, 2164, 750, 2523, 1920, 2361, 3307, 511, 102, 0, 0, 0, 0, 0, 0]\n",
    "input_mask:  [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]\n",
    "segment_ids:  [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
    "label_ids:  [4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0, 2, 4, 4, 3, 1, 4, 4, 0, 0, 0, 0, 0, 0]\n",
    "ntokens:  ['[CLS]', '实', '际', '上', '，', '不', '仅', '法', '国', '殖', '民', '主', '义', '者', '，', '而', '且', '美', '、', '英', '殖', '民', '主', '义', '者', '都', '对', '这', '个', '[UNK]', '根', '本', '法', '[UNK]', '草', '案', '寄', '予', '很', '大', '希', '望', '。', '[SEP]', '**NULL**', '**NULL**', '**NULL**', '**NULL**', '**NULL**', '**NULL**']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "[CLS] O B-V\n寄 B-N B-N\n予 E-N E-N\n很 O O\n[SEP] O O\n['寄 B-N B-N\\n', '予 E-N E-N\\n', '很 O O\\n', '\\n']\n['寄', 'B-N', 'B-N']\n['予', 'E-N', 'E-N']\n['很', 'O', 'O']\n[]\n"
     ]
    }
   ],
   "source": [
    "all_ori_tokens=[['[CLS]', '寄', '予', '很','[SEP]']]\n",
    "ori_labels=[['O', 'B-N', 'E-N', 'O','O','O','O']]\n",
    "pred_labels=[['B-V', 'B-N', 'E-N', 'O','O']]\n",
    "eval_list=[]\n",
    "for ori_tokens, oril, prel in zip(all_ori_tokens, ori_labels, pred_labels):\n",
    "        for ot, ol, pl in zip(ori_tokens, oril, prel):\n",
    "            print(ot, ol, pl)\n",
    "            if ot in [\"[CLS]\", \"[SEP]\"]:\n",
    "                continue\n",
    "            eval_list.append(f\"{ot} {ol} {pl}\\n\")\n",
    "        eval_list.append(\"\\n\")\n",
    "print(eval_list)\n",
    "i=0\n",
    "for line in eval_list:\n",
    "    line=line.rstrip('\\r\\n')\n",
    "    feature=line.split()\n",
    "    print(feature)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "('B', 'N')\n"
     ]
    }
   ],
   "source": [
    "import re\n",
    "m = re.match(r'^([^-]*)-(.*)$', \"B-N\")#B-N 标签是BIO,中间有个横杠\n",
    "if m:\n",
    "    print(m.groups() )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "\nnew:  zy\n"
     ]
    }
   ],
   "source": [
    "import copy\n",
    "line='zy'\n",
    "new=line\n",
    "line = ''\n",
    "print(line)\n",
    "print(\"new: \",new)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ]
}