{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import re\t\t\t\t\t\t#导入所需要的库与模块\n",
    "import sklearn_crfsuite\n",
    "from sklearn_crfsuite import metrics\n",
    "import joblib\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Pre_Corpus(object):\t\t\t\t\t#定义Pre_Corpus类\n",
    "\tdef __init__(self):\t\t\t\t\t\t#定义类的构造方法\n",
    "\t\tself.train_data = \"./data/1998_rmrb.txt\" #训练数据文件\n",
    "\t\t#预处理数据的结果文件\n",
    "\t\tself.pre_data = \"./data/pre_result.txt\"\n",
    "\t\tself.mark = {u't': u'T', u'nr': u'PER', u'ns': u'LOC', u'nt': u'ORG'}\t\t\t\t\t\t\t\t\t#标注映射字典\n",
    "\n",
    "\tdef read_data(self, file_path):\t\t#定义读取文件数据的方法\n",
    "\t\tf = open(file_path, 'r', encoding = 'utf-8')\n",
    "\t\tlines = f.readlines()\t\t\t\t#读取文件的所有行\n",
    "\t\tf.close()\n",
    "\t\treturn lines\n",
    "\t#定义数据写入文件的方法\n",
    "\tdef write_data(self, data, file_path):\n",
    "\t\tf = open(file_path, 'wb')\n",
    "\t\tf.write(data)\n",
    "\t\tf.close()\n",
    "\n",
    "\tdef pre_process(self):\t\t\t\t\t#定义语料数据预处理方法\n",
    "\t\tlines = self.read_data(self.train_data)\n",
    "\t\tnew_lines = []\n",
    "\t\tfor line in lines:\t\t\t\t\t#遍历从文件中读取的每一行\n",
    "\t\t\t#先删除两端的空白字符，再将全角字符转换为半角字符，并用空格分割\n",
    "\t\t\twords = self.dbc_to_sbc(line.strip()).split(u'  ')\n",
    "\t\t\t#对时间进行合并操作\n",
    "\t\t\tpre_words = self.merge_time(words)\n",
    "\t\t\t#对姓和名进行合并操作\n",
    "\t\t\tpre_words = self.merge_name(pre_words)\n",
    "\t\t\t#对括号内大粒度词进行合并操作\n",
    "\t\t\tpre_words = self.merge_organ(pre_words)\n",
    "\t\t\t#使用空格进行连接\n",
    "\t\t\tnew_lines.append('  '.join(pre_words[1:]))\n",
    "\t\t#将所有预处理后的行合并为一个字符串，并写入指定的文件中\n",
    "\t\tself.write_data(data = '\\n'.join(new_lines).encode('utf-8'), file_path = self.pre_data)\n",
    "\t\treturn new_lines\n",
    "\t#定义将全角字符转换为半角字符的方法\n",
    "\tdef dbc_to_sbc(self, dbc_str):\n",
    "\t\tsbc_str = \"\"\n",
    "\t\tfor k in dbc_str:\n",
    "\t\t\tunicode = ord(k)\t\t\t\t#获取当前字符的Unicode码值\n",
    "\t\t\t#判断当前字符的码值是否等于12288（全角空格的码值）\n",
    "\t\t\tif unicode == 12288:\n",
    "\t\t\t\tunicode = 32\n",
    "\t\t\t#判断当前字符的码值是否在全角字符的码值范围\n",
    "\t\t\telif 65374 >= unicode >= 65281:\n",
    "\t\t\t\tunicode -= 65248\n",
    "\t\t\t#将转换后的半角字符添加到字符串中\n",
    "\t\t\tsbc_str += chr(unicode)\n",
    "\t\treturn sbc_str\n",
    "\t#定义将括号中大粒度分词合并的方法\n",
    "\tdef merge_organ(self, words):\n",
    "\t\tpre_words = []\n",
    "\t\tindex = 0\n",
    "\t\ttemp = u''\n",
    "\t\twhile True:\t\t\t\t#设置一个无限循环，直到满足条件\n",
    "\t\t\t#获取当前索引处的分词，如果索引超出范围则赋值为空字符串\n",
    "\t\t\tword = words[index] if index < len(words) else u''\n",
    "\t\t\t#判断当前分词是否包含左括号\n",
    "\t\t\tif u'[' in word:\n",
    "\t\t\t\t#使用正则表达式替换掉当前分词中的斜杠和字母部分\n",
    "\t\t\t\ttemp += re.sub(pattern = u'/[a-zA-Z]*', repl = u'', string = word.replace(u'[', u''))\n",
    "\t\t\t#判断当前分词是否包含右括号\n",
    "\t\t\telif u']' in word:\n",
    "\t\t\t\tw = word.split(u']')\t\t#将当前分词分割为两部分\n",
    "\t\t\t\ttemp += re.sub(pattern = u'/[a-zA-Z]*', repl = u'', string = w[0])\n",
    "\t\t\t\t#将两部分进行合并\n",
    "\t\t\t\tpre_words.append(temp + u'/' + w[1])\n",
    "\t\t\t\ttemp = u''\n",
    "\t\t\telif temp:\n",
    "\t\t\t\ttemp += re.sub(pattern = u'/[a-zA-Z]*', repl = u'', string = word)\n",
    "\t\t\telif word:\n",
    "\t\t\t\tpre_words.append(word)\n",
    "\t\t\telse:\n",
    "\t\t\t\tbreak\n",
    "\t\t\tindex += 1\n",
    "\t\treturn pre_words\n",
    "\tdef merge_name(self, words):\t#定义将分开标注的姓和名合并的方法\n",
    "\t\tpre_words = []\n",
    "\t\tindex = 0\n",
    "\t\twhile True:\n",
    "\t\t\tword = words[index] if index < len(words) else u''\n",
    "\t\t\t#判断当前分词是否包含'/nr'\n",
    "\t\t\tif u'/nr' in word:\n",
    "\t\t\t\tnext_index = index + 1\n",
    "\t\t\t\t#判断下一个分词是否包含'/nr'\n",
    "\t\t\t\tif next_index < len(words) and u'/nr' in words[next_index]:\n",
    "\t\t\t\t\t#将当前分词中'/nr'替换为空字符串，并与下一个分词合并\n",
    "\t\t\t\t\tpre_words.append(word.replace(u'/nr', u'') + words[next_index])\n",
    "\t\t\t\t\tindex = next_index\n",
    "\t\t\t\telse:\n",
    "\t\t\t\t\tpre_words.append(word)\n",
    "\t\t\telif word:\n",
    "\t\t\t\tpre_words.append(word)\n",
    "\t\t\telse:\n",
    "\t\t\t\tbreak\n",
    "\t\t\tindex += 1\n",
    "\t\treturn pre_words\n",
    "\tdef merge_time(self, words):\t#定义将分开标注的时间合并的方法\n",
    "\t\tpre_words = []\n",
    "\t\tindex = 0\n",
    "\t\ttemp = u''\n",
    "\t\twhile True:\n",
    "\t\t\tword = words[index] if index < len(words) else u''\n",
    "\t\t\t#判断当前分词是否包含'/t'\n",
    "\t\t\tif u'/t' in word:\n",
    "\t\t\t\ttemp = temp.replace(u'/t', u'') + word\n",
    "\t\t\telif temp:\n",
    "\t\t\t\tpre_words.append(temp)\n",
    "\t\t\t\tpre_words.append(word)\n",
    "\t\t\t\ttemp = u''\n",
    "\t\t\telif word:\n",
    "\t\t\t\tpre_words.append(word)\n",
    "\t\t\telse:\n",
    "\t\t\t\tbreak\n",
    "\t\t\tindex += 1\n",
    "\t\treturn pre_words\n",
    "\n",
    "\tdef initialize(self):\t\t\t\t\t\t  #定义初始化方法\n",
    "\t\tlines = self.read_data(self.pre_data) #读取预处理后的语料\n",
    "\t\t#删除每一行两端的空白字符，并按空格进行分割\n",
    "\t\twords_list = [line.strip().split('  ') for line in lines if line.strip()]\n",
    "\t\tdel lines\t\t\t\t\t\t\t\t\t\t#释放内存\n",
    "\t\tself.init_seq(words_list)\n",
    "\n",
    "\t#定义将词性转换为标注符号的方法\n",
    "\tdef get_tag(self, p):\n",
    "\t\t#调用get()方法获取mark属性中与键p对应的值\n",
    "\t\tt = self.mark.get(p, None)\n",
    "\t\treturn t if t else u'O'\n",
    "\n",
    "\t#定义将标注符号转换为BIO编码格式的方法\n",
    "\tdef tag_BIO(self, tag, index):\n",
    "\t\tif index == 0 and tag != u'O':\n",
    "\t\t\treturn u'B_{}'.format(tag)\t  #返回实体的第一个字为 B_*\n",
    "\t\telif tag != u'O':\n",
    "\t\t\treturn u'I_{}'.format(tag)\t  #返回实体的其余字为 I_*\n",
    "\t\telse:\n",
    "\t\t\treturn tag\t\t\t\t\t  #非实体字统一标注为O\n",
    "\n",
    "\t#定义删除词性携带的先验知识的方法\n",
    "\tdef remove_pos(self, pos): \n",
    "\t\t#判断pos是否存在于mark的键中，且pos不是't'\n",
    "\t\tif pos in self.mark.keys() and pos != u't':\n",
    "\t\t\treturn u'n'\n",
    "\t\telse:\n",
    "\t\t\treturn pos\n",
    "\n",
    "\t#定义初始化字序列、词性序列和标注序列的方法\n",
    "\tdef init_seq(self, words_list):\n",
    "\t\twords_seq = [[word.split(u'/')[0] for word in words] for words in words_list]\t\t#使用'/'分割每个词，并提取第1部分（字）\n",
    "\t\tpos_seq = [[word.split(u'/')[1] for word in words] for words in words_list]\t\t#使用'/'分割每个词，并提取第2部分（词性）\n",
    "\t\t#将词性转换为对应的标注符号\n",
    "\t\ttag_seq = [[self.get_tag(p) for p in pos] for pos in pos_seq]\n",
    "\t\t#对每个句子中的词性进行嵌套列表处理，使其与原词一一对应\n",
    "\t\tself.pos_seq = [[[pos_seq[index][i] for _ in range(len(words_seq[index][i]))] for i in range(len(pos_seq[index]))] for index in range(len(pos_seq))]\n",
    "\t\tself.tag_seq = [[[self.tag_BIO(tag_seq[index][i], w) for w in range(len(words_seq[index][i]))] for i in range(len(tag_seq[index]))] for index in range(len(tag_seq))]\t\t#将tag_seq转换为BIO编码\n",
    "\t\t#调用remove_pos()方法处理中间的词性\n",
    "\t\tself.pos_seq = [[u'un'] + [self.remove_pos(p) for pos in pos_seq for p in pos] + [u'un'] for pos_seq in self.pos_seq]\n",
    "\t\t#对tag_seq进行扁平化处理，将其中的嵌套列表展平\n",
    "\t\tself.tag_seq = [[t for tag in tag_seq for t in tag] for tag_seq in self.tag_seq]\n",
    "\t\t#在词序列的开始和结束添加占位符<BOS>和<EOS>\n",
    "\t\tself.word_seq = [[u'<BOS>'] + [w for word in word_seq for w in word] + [u'<EOS>'] for word_seq in words_seq]\n",
    "\n",
    "\tdef deal_data(self):\t\t\t\t\t\t\t#定义数据处理方法\n",
    "\t\tword_grams = [self.split_window(word_list) for word_list in self.word_seq]\n",
    "\t\tcharacter = self.get_character(word_grams)\n",
    "\t\treturn character, self.tag_seq\n",
    "\t#定义列表切分的方法\n",
    "\tdef split_window(self, words_list = None, window = 3):\n",
    "\t\twords = []\n",
    "\t\tbegin, end = 0, window\n",
    "\t\t#处理除第一个元素外的所有元素\n",
    "\t\tfor _ in range(1, len(words_list)):\n",
    "\t\t\t#如果超过列表长度，则跳出循环\n",
    "\t\t\tif end > len(words_list): break\n",
    "\t\t\t#将从索引begin到end-1的子列表添加到words列表中\n",
    "\t\t\twords.append(words_list[begin:end])\n",
    "\t\t\tbegin = begin + 1\t\t\t\t\t#移动到下一个起始位置\n",
    "\t\t\tend = end + 1\t\t\t\t\t\t#移动到下一个结束位置\n",
    "\t\treturn words\n",
    "\t#定义特征提取的方法\n",
    "\tdef get_character(self, word_grams):\n",
    "\t\tcharacter, char_list = [], []\t\t\t    #初始化两个空列表\n",
    "\t\tfor index in range(len(word_grams)):\t    #遍历每个元素\n",
    "\t\t\tfor i in range(len(word_grams[index])):\n",
    "\t\t\t\tword_gram = word_grams[index][i]  #获取当前词组\n",
    "\t\t\t\t#创建字典，'w-1'表示前一个字，'w'表示当前字，'w+1'表示后一个字\n",
    "\t\t\t\tchar = {u'w-1': word_gram[0], u'w': word_gram[1], u'w+1': word_gram[2], u'w-1:w': word_gram[0] + word_gram[1], u'w:w+1': word_gram[1] + word_gram[2], u'bias': 1.0}\n",
    "\t\t\t\tchar_list.append(char)\n",
    "\t\t\tcharacter.append(char_list)\n",
    "\t\t\tchar_list = []\t\t\t\t\t\t\t  #重置一个空列表\n",
    "\t\treturn character\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "预处理后的部分数据：\n",
      " ['迈向/v  充满/v  希望/n  的/u  新/a  世纪/n  ——/w  一九九八年新年/t  讲话/n  (/w  附/v  图片/n  1/m  张/q  )/w']\n",
      "提取的字序列：\n",
      " [['<BOS>', '迈', '向', '充', '满', '希', '望', '的', '新', '世', '纪', '—', '—', '一', '九', '九', '八', '年', '新', '年', '讲', '话', '(', '附', '图', '片', '1', '张', ')', '<EOS>']]\n",
      "提取的词性序列：\n",
      " [['un', 'v', 'v', 'v', 'v', 'n', 'n', 'u', 'a', 'n', 'n', 'w', 'w', 't', 't', 't', 't', 't', 't', 't', 'n', 'n', 'w', 'v', 'n', 'n', 'm', 'q', 'w', 'un']]\n",
      "提取的标注序列：\n",
      " [['O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B_T', 'I_T', 'I_T', 'I_T', 'I_T', 'I_T', 'I_T', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O']]\n"
     ]
    }
   ],
   "source": [
    "corpus = Pre_Corpus()\n",
    "new_lines = corpus.pre_process()\n",
    "print(\"预处理后的部分数据：\\n\", new_lines[:1])\n",
    "corpus.initialize()\n",
    "print(\"提取的字序列：\\n\", corpus.word_seq[:1])\n",
    "print(\"提取的词性序列：\\n\", corpus.pos_seq[:1])\n",
    "print(\"提取的标注序列：\\n\", corpus.tag_seq[:1])\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "class CRF(object):\t\t\t\t\t\t\t#定义CRF类\n",
    "\t#定义类的构造方法\n",
    "\tdef __init__(self):\n",
    "\t\t#设置算法为\"lbfgs\"，用于优化模型的参数\n",
    "\t\tself.algorithm = \"lbfgs\"\n",
    "\t\tself.c1 = \"0.1\"\t\t\t\t\t\t#初始化参数c1和c2\n",
    "\t\tself.c2 = \"0.1\"\n",
    "\t\tself.max_iter = 100\t\t\t\t\t#初始化最大迭代次数\n",
    "\t\t#设置模型保存的文件\n",
    "\t\tself.model_path = \"./data/CRF-model.pkl\"\n",
    "\t\tself.corpus = Pre_Corpus()\t\t\t#实例化Pre_Corpus类\n",
    "\t\tself.corpus.pre_process()\t\t\t#语料预处理\n",
    "\t\tself.corpus.initialize()\t\t\t#初始化语料\n",
    "\t\tself.model = None\n",
    "\n",
    "\tdef init_model(self):\t\t\t\t\t#定义模型\n",
    "\t\talgorithm = self.algorithm\t\t\t#获取算法\n",
    "\t\t#将参数转变为浮点型\n",
    "\t\tc1 = float(self.c1)\n",
    "\t\tc2 = float(self.c2)\n",
    "\t\tmax_iter = int(self.max_iter)\t\t#将迭代次数转变为整型\n",
    "\t\t#实例化CRF模型的对象\n",
    "\t\tself.model = sklearn_crfsuite.CRF(algorithm = algorithm, c1 = c1, c2 = c2, max_iterations = max_iter, all_possible_transitions = True)\n",
    "\n",
    "\tdef train_model(self):\t\t\t\t\t\t#定义训练模型方法\n",
    "\t\tself.init_model()\t\t\t\t\t\t#初始化模型\n",
    "\t\tx, y = self.corpus.deal_data()\t\t#获取数据并进行预处理\n",
    "\t\t#将预处理后的数据划分为训练集和测试集\n",
    "\t\tx_train, y_train = x[500:], y[500:]\n",
    "\t\tx_test, y_test = x[:500], y[:500]\n",
    "\t\tself.model.fit(x_train, y_train)\t\t#训练模型\n",
    "\t\tlabels = list(self.model.classes_)\t#获取模型中的标注\n",
    "\t\tlabels.remove('O')\t\t\t\t#从标注列表中移除'O'标注\n",
    "\t\t#使用测试集数据对模型进行预测\n",
    "\t\ty_predict = self.model.predict(x_test)\n",
    "\t\t#计算模型的F1值\n",
    "\t\tf1 = metrics.flat_f1_score(y_test, y_predict, average = 'weighted', labels = labels)\n",
    "\t\tjoblib.dump(self.model, self.model_path)\t\t#保存模型\n",
    "\t\treturn f1\n",
    "\n",
    "\tdef predict(self, sentence):\t\t\t\t#定义命名识别的方法\n",
    "\t\t#将句子中全角字符转换为半角字符\n",
    "\t\tu_sent = self.corpus.dbc_to_sbc(sentence)\n",
    "\t\t#将处理后的句子转换为单词列表\n",
    "\t\tword_lists = [[u'<BOS>'] + [c for c in u_sent] + [u'<EOS>']]\n",
    "\t\t#对每个单词列表进行列表分割，将其拆分为词组\n",
    "\t\tword_grams = [self.corpus.split_window(word_list) for word_list in word_lists]\n",
    "\t\t#从分割后的词组中提取特征\n",
    "\t\tcharacter = self.corpus.get_character(word_grams)\n",
    "\t\tself.model = joblib.load(self.model_path)\t\t#加载模型\n",
    "\t\t#使用训练好的模型对特征进行预测\n",
    "\t\ty_predict = self.model.predict(character)\n",
    "\t\tprint(\"句子标注：\\n\", y_predict[0])\n",
    "\t\tentity = u''\n",
    "\t\t#遍历预测结果的每个字符\n",
    "\t\tfor index in range(len(y_predict[0])):\n",
    "\t\t\t#如果当前字符的预测标注不是'O'\n",
    "\t\t\tif y_predict[0][index] != u'O':\n",
    "\t\t\t\t#判断当前字符的标注是否与前一个字符的标注不同\n",
    "\t\t\t\tif index > 0 and y_predict[0][index][-1] != y_predict[0][index - 1][-1]:\n",
    "\t\t\t\t\tentity += u' '\t\t#在命名实体后面添加一个空格\n",
    "\t\t\t\t#将当前字符添加到命名实体字符串中\n",
    "\t\t\t\tentity += u_sent[index]\n",
    "\t\t\telif entity[-1] != u' ':\n",
    "\t\t\t\tentity += u' '\n",
    "\t\treturn entity\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CRF模型的F1值： 0.9275273211022879\n"
     ]
    }
   ],
   "source": [
    "ner = CRF()\n",
    "f1 = ner.train_model()\n",
    "print(\"CRF模型的F1值：\", f1)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "句子标注：\n",
      " ['B_LOC', 'I_LOC', 'O', 'B_LOC', 'I_LOC', 'I_LOC', 'I_LOC', 'I_LOC', 'I_LOC', 'I_LOC', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B_T', 'I_T', 'I_T', 'I_T', 'I_T', 'I_T', 'I_T', 'I_T', 'I_T', 'O', 'B_T', 'I_T', 'I_T', 'O', 'O', 'B_LOC', 'I_LOC', 'I_LOC', 'I_LOC', 'I_LOC', 'I_LOC', 'I_LOC', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B_T', 'I_T', 'I_T', 'I_T', 'I_T', 'I_T', 'I_T', 'I_T', 'I_T', 'I_T', 'O', 'O', 'O']\n",
      "命名实体识别的结果： 北京  中华人民共和国  2022年2月4日  星期五  北京国家体育场  2022年2月20日 \n"
     ]
    }
   ],
   "source": [
    "def main():\n",
    "\tner = CRF()\n",
    "\tner.train_model()\n",
    "\tprint(\"命名实体识别的结果：\", ner.predict(u'北京是中华人民共和国的首都，第24届冬奥会于2022年2月4日(星期五)在北京国家体育场开幕举办，并于2022年2月20日闭幕。'))\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "\tmain()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.0"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
