{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "text_state:: 100%|███████████████████████████████████████████████████████| 1112889/1112889 [00:06<00:00, 159671.89it/s]\n",
      "100%|█████████████████████████████████████████████████████████████████████| 1112889/1112889 [00:25<00:00, 43767.76it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "最优路径： ['S', 'B', 'M', 'E', 'S', 'B', 'E', 'S', 'B', 'E', 'B', 'E', 'S', 'B', 'E']\n",
      "使用HMM进行分词的结果： 某/研究生/的/课题/是/研究/生命/的/意义/\n"
     ]
    }
   ],
   "source": [
    "import os\n",
    "import pickle\n",
    "import numpy as np\n",
    "from tqdm import tqdm\n",
    "def get_file(file: str):\t\t\t\t\t\t#文件读取\n",
    "\twith open(file, \"r\", encoding = \"utf-8\") as f:\n",
    "\t\treturn f.readlines()\t\t\t\t\t#返回所有数据列表\n",
    "#处理文本数据\n",
    "def text_state(state_file = \"data/train_state.txt\", file = \"data/train.txt\"):\n",
    "\ttrain_data = get_file(file)\t\t\t\t#读取训练数据\n",
    "\twith open(state_file, \"w\", encoding = \"utf-8\") as f:\n",
    "\t\tfor i, data in tqdm(enumerate(train_data), total = len(train_data), desc = \"text_state:\"):\n",
    "\t\t\tif not data:\t\t\t\t\t\t\t#如果数据为空，跳过\n",
    "\t\t\t\tcontinue\n",
    "\t\t\tstate = \"\"\n",
    "\t\t\t#读取每个词或字符，标注每个词或字符的状态\n",
    "\t\t\tfor word in data.split(\" \"):\n",
    "\t\t\t\tif not word:\n",
    "\t\t\t\t\tcontinue\n",
    "\t\t\t\tstate = state + set_state(word) + \" \"\n",
    "\t\t\tif i != len(train_data) - 1:\n",
    "\t\t\t\tstate = state.strip() + \"\\n\"\n",
    "\t\t\tf.write(state)\n",
    "def set_state(word: str):\t\t\t\t#对每个字符进行状态标注\n",
    "\ttext_len = len(word)\n",
    "\tif text_len == 1:\t\t\t\t\t#如果为一个字符时，标注为S\n",
    "\t\treturn \"S\"\n",
    "\t#如果为多个字符时，将第一个字符标注为B，中间字符标注为M\n",
    "\treturn \"B\" + \"M\" * (text_len - 2) + \"E\"\t#最后一个字符标注为E\n",
    "class HMM:\t\t\t\t\t\t\t\t#定义HMM类\n",
    "\t#初始化参数\n",
    "\tdef __init__(self, train_file = \"data/train.txt\", state_file = \"data/train_state.txt\"):\n",
    "\t\tself.train_data = get_file(train_file)\n",
    "\t\tself.state_data = get_file(state_file)\n",
    "\t\tself.state_to_index = {\"B\": 0, \"M\": 1, \"S\": 2, \"E\": 3}\n",
    "\t\tself.index_to_state = [\"B\", \"M\", \"S\", \"E\"]\n",
    "\t\tself.len_states = len(self.state_to_index)\n",
    "\t\t#对初始状态概率向量进行初始化\n",
    "\t\tself.init_matrix = np.zeros(self.len_states)\n",
    "\t\tself.transfer_matrix = np.zeros((self.len_states, self.len_states))\t\t\t\t\t\t\t#对状态转移概率矩阵进行初始化\n",
    "\t\t#对观测概率矩阵进行初始化\n",
    "\t\tself.emit_matrix = {\"B\": {\"total\": 0}, \"M\": {\"total\": 0}, \"S\": {\"total\": 0}, \"E\": {\"total\": 0}}\n",
    "\t#计算并更新初始状态概率向量的值\n",
    "\tdef get_initMatrix(self, state):\n",
    "\t\t#根据句子中第一个字的状态，对应状态的值加1\n",
    "\t\tself.init_matrix[self.state_to_index[state[0]]] += 1\n",
    "\t#计算并更新状态转移概率矩阵的值\n",
    "\tdef get_transMatrix(self, states): \n",
    "\t\tstates = \"\".join(states)\t\t#将状态列表连接成一个字符串\n",
    "\t\tstate_start = states[:-1]\t\t#分割起始状态和结束状态\n",
    "\t\tstate_end = states[1:]\n",
    "\t\tfor start, end in zip(state_start, state_end):\n",
    "\t\t\tself.transfer_matrix[self.state_to_index[start], self.state_to_index[end]] += 1\t\t\t#更新\n",
    "\t#计算并更新观测概率矩阵的值\n",
    "\tdef get_emitMatrix(self, words, states): \n",
    "\t\tfor word, state in zip(\"\".join(words), \"\".join(states)):\n",
    "\t\t\tself.emit_matrix[state][word] = self.emit_matrix[state].get(word, 0) + 1\n",
    "\t\t\t#更新每个状态的计数\n",
    "\t\t\tself.emit_matrix[state][\"total\"] += 1\n",
    "\t#归一化矩阵\n",
    "\tdef standar_matrix(self):\n",
    "\t\tself.init_matrix = self.init_matrix / np.sum(self.init_matrix)\t\t\t#对init_matrix矩阵的每一行进行归一化处理\n",
    "\t\tself.transfer_matrix = self.transfer_matrix / np.sum(self.transfer_matrix, axis=1, keepdims = True)\n",
    "\t\t#遍历矩阵中的每个状态和对应的词计数字典，计算词的相对频率\n",
    "\t\tself.emit_matrix = {state: {word: t / word_times[\"total\"] * 100 for word, t in word_times.items() if word != \"total\"} for state, word_times in self.emit_matrix.items()}\n",
    "\tdef train_model(self):\t\t\t\t\t\t\t\t\t#训练模型\n",
    "\t\tif os.path.exists(\"data/train_matrix.pkl\"):\n",
    "\t\t\t#加载已经训练好的矩阵数据\n",
    "\t\t\tself.init_matrix, self.transfer_matrix, self.emit_matrix = pickle.load(open(\"data/train_matrix.pkl\", \"rb\"))\n",
    "\t\t\treturn\n",
    "\t\t#通过遍历训练数据和状态数据,计算和更新对应矩阵\n",
    "\t\tfor words, states in tqdm(zip(self.train_data,  self.state_data), total = len(self.train_data)):\n",
    "\t\t\twords = words.strip().split(\" \")\n",
    "\t\t\tstates = states.strip().split(\" \")\n",
    "\t\t\tself.get_initMatrix(states[0])\n",
    "\t\t\tself.get_transMatrix(states)\n",
    "\t\t\tself.get_emitMatrix(words, states)\n",
    "\t\tself.standar_matrix()\t\t\t\t\t\t\t#归一化处理\n",
    "\t\t#保存训练得到的矩阵\n",
    "\t\tpickle.dump([self.init_matrix, self.transfer_matrix, self.emit_matrix], open(\"data/train_matrix.pkl\", \"wb\"))\n",
    "#viterbi算法，寻找最优路径\n",
    "def viterbi(text: str, hmm: HMM):\n",
    "\t#获取HMM的状态、观测概率矩阵、状态转移概率矩阵和初始状态概率向量\n",
    "\tstates = hmm.index_to_state\n",
    "\temit_m = hmm.emit_matrix\n",
    "\ttrans_m = hmm.transfer_matrix\n",
    "\tinit_m = hmm.init_matrix\n",
    "\t#初始化动态规划表v和路径信息path\n",
    "\tv = [{}]\t\t\t\t\t\t\t#存储每个状态的最大概率\n",
    "\tpath = {}\t\t\t\t\t\t\t#存储每个状态的前一个状态的路径\n",
    "\t#初始化动态规划表的第一个元素\n",
    "\tfor s in states:\n",
    "\t\t#计算第一个字符在每种状态下的概率\n",
    "\t\tv[0][s] = init_m[hmm.state_to_index[s]] * emit_m[s].get(text[0], 0)\n",
    "\t\tpath[s] = [s]\n",
    "\tfor t in range(1, len(text)):\n",
    "\t\tv.append({})\t\t\t\t\t#创建新的动态规划表项\n",
    "\t\tnew_path = {}\t\t\t\t\t#存储新状态的路径信息\n",
    "\t\t#判断当前字符是否未出现过\n",
    "\t\tnever_seen = text[t] not in emit_m['S'].keys() and text[t] not in emit_m[\"M\"].keys() and text[t] not in emit_m[\"E\"].keys() and text[t] not in emit_m[\"B\"].keys()\n",
    "\t\tfor s in states:\n",
    "\t\t\t#如果字符没有出现过，概率设置为1\n",
    "\t\t\temit_p = emit_m[s].get(text[t], 0) if not never_seen else 1.0\n",
    "\t\t\t#更新动态规划表，找到从开始到当前状态的最大概率和对应的前一状态\n",
    "\t\t\t(prob, state) = max([(v[t - 1][y] * trans_m[hmm.state_to_index[y], hmm.state_to_index[s]] * emit_p, y) for y in states])\n",
    "\t\t\tv[t][s] = prob\n",
    "\t\t\tnew_path[s] = path[state] + [s]\n",
    "\t\tpath = new_path\n",
    "\t#寻找最优路径\n",
    "\t(prob, state) = max([(v[len(text) - 1][s], s) for s in states])\n",
    "\tprint(\"最优路径：\", path[state])\n",
    "\tresult = \"\"\n",
    "\t#根据状态进行分词\n",
    "\tfor t, s in zip(text, path[state]):\n",
    "\t\tresult += t\n",
    "\t\tif s == \"S\" or s == \"E\":\n",
    "\t\t\tresult += \"/\"\n",
    "\treturn result.strip()\n",
    "if __name__ == '__main__':\n",
    "\ttext_state()\n",
    "\thmm = HMM()\t\t#实例化HMM类的对象\n",
    "\thmm.train_model()\n",
    "\ttext = \"某研究生的课题是研究生命的意义\"\n",
    "\tresult = viterbi(text, hmm)\n",
    "\tprint(\"使用HMM进行分词的结果：\", result)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "text_state:: 100%|███████████████████████████████████████████████████████| 1112889/1112889 [00:07<00:00, 152090.12it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "最优路径： ['S', 'B', 'M', 'E', 'S', 'B', 'E', 'S', 'B', 'E', 'B', 'E', 'S', 'B', 'E']\n",
      "使用HMM进行分词的结果： 某/研究生/的/课题/是/研究/生命/的/意义/\n"
     ]
    }
   ],
   "source": [
    "import os\n",
    "import pickle\n",
    "import numpy as np\n",
    "from tqdm import tqdm\n",
    "def get_file(file: str):\t\t\t\t\t\t#文件读取\n",
    "\twith open(file, \"r\", encoding = \"utf-8\") as f:\n",
    "\t\treturn f.readlines()\t\t\t\t\t#返回所有数据列表\n",
    "#处理文本数据\n",
    "def text_state(state_file = \"data/train_state.txt\", file = \"data/train.txt\"):\n",
    "\ttrain_data = get_file(file)\t\t\t\t#读取训练数据\n",
    "\twith open(state_file, \"w\", encoding = \"utf-8\") as f:\n",
    "\t\tfor i, data in tqdm(enumerate(train_data), total = len(train_data), desc = \"text_state:\"):\n",
    "\t\t\tif not data:\t\t\t\t\t\t\t#如果数据为空，跳过\n",
    "\t\t\t\tcontinue\n",
    "\t\t\tstate = \"\"\n",
    "\t\t\t#读取每个词或字符，标注每个词或字符的状态\n",
    "\t\t\tfor word in data.split(\" \"):\n",
    "\t\t\t\tif not word:\n",
    "\t\t\t\t\tcontinue\n",
    "\t\t\t\tstate = state + set_state(word) + \" \"\n",
    "\t\t\tif i != len(train_data) - 1:\n",
    "\t\t\t\tstate = state.strip() + \"\\n\"\n",
    "\t\t\tf.write(state)\n",
    "def set_state(word: str):\t\t\t\t#对每个字符进行状态标注\n",
    "\ttext_len = len(word)\n",
    "\tif text_len == 1:\t\t\t\t\t#如果为一个字符时，标注为S\n",
    "\t\treturn \"S\"\n",
    "\t#如果为多个字符时，将第一个字符标注为B，中间字符标注为M\n",
    "\treturn \"B\" + \"M\" * (text_len - 2) + \"E\"\t#最后一个字符标注为E\n",
    "class HMM:\t\t\t\t\t\t\t\t#定义HMM类\n",
    "\t#初始化参数\n",
    "\tdef __init__(self, train_file = \"data/train.txt\", state_file = \"data/train_state.txt\"):\n",
    "\t\tself.train_data = get_file(train_file)\n",
    "\t\tself.state_data = get_file(state_file)\n",
    "\t\tself.state_to_index = {\"B\": 0, \"M\": 1, \"S\": 2, \"E\": 3}\n",
    "\t\tself.index_to_state = [\"B\", \"M\", \"S\", \"E\"]\n",
    "\t\tself.len_states = len(self.state_to_index)\n",
    "\t\t#对初始状态概率向量进行初始化\n",
    "\t\tself.init_matrix = np.zeros(self.len_states)\n",
    "\t\tself.transfer_matrix = np.zeros((self.len_states, self.len_states))\t\t\t\t\t\t\t#对状态转移概率矩阵进行初始化\n",
    "\t\t#对观测概率矩阵进行初始化\n",
    "\t\tself.emit_matrix = {\"B\": {\"total\": 0}, \"M\": {\"total\": 0}, \"S\": {\"total\": 0}, \"E\": {\"total\": 0}}\n",
    "\t#计算并更新初始状态概率向量的值\n",
    "\tdef get_initMatrix(self, state):\n",
    "\t\t#根据句子中第一个字的状态，对应状态的值加1\n",
    "\t\tself.init_matrix[self.state_to_index[state[0]]] += 1\n",
    "\t#计算并更新状态转移概率矩阵的值\n",
    "\tdef get_transMatrix(self, states): \n",
    "\t\tstates = \"\".join(states)\t\t#将状态列表连接成一个字符串\n",
    "\t\tstate_start = states[:-1]\t\t#分割起始状态和结束状态\n",
    "\t\tstate_end = states[1:]\n",
    "\t\tfor start, end in zip(state_start, state_end):\n",
    "\t\t\tself.transfer_matrix[self.state_to_index[start], self.state_to_index[end]] += 1\t\t\t#更新\n",
    "\t#计算并更新观测概率矩阵的值\n",
    "\tdef get_emitMatrix(self, words, states): \n",
    "\t\tfor word, state in zip(\"\".join(words), \"\".join(states)):\n",
    "\t\t\tself.emit_matrix[state][word] = self.emit_matrix[state].get(word, 0) + 1\n",
    "\t\t\t#更新每个状态的计数\n",
    "\t\t\tself.emit_matrix[state][\"total\"] += 1\n",
    "\t#归一化矩阵\n",
    "\tdef standar_matrix(self):\n",
    "\t\tself.init_matrix = self.init_matrix / np.sum(self.init_matrix)\t\t\t#对init_matrix矩阵的每一行进行归一化处理\n",
    "\t\tself.transfer_matrix = self.transfer_matrix / np.sum(self.transfer_matrix, axis=1, keepdims = True)\n",
    "\t\t#遍历矩阵中的每个状态和对应的词计数字典，计算词的相对频率\n",
    "\t\tself.emit_matrix = {state: {word: t / word_times[\"total\"] * 100 for word, t in word_times.items() if word != \"total\"} for state, word_times in self.emit_matrix.items()}\n",
    "\tdef train_model(self):\t\t\t\t\t\t\t\t\t#训练模型\n",
    "\t\tif os.path.exists(\"data/train_matrix.pkl\"):\n",
    "\t\t\t#加载已经训练好的矩阵数据\n",
    "\t\t\tself.init_matrix, self.transfer_matrix, self.emit_matrix = pickle.load(open(\"data/train_matrix.pkl\", \"rb\"))\n",
    "\t\t\treturn\n",
    "\t\t#通过遍历训练数据和状态数据,计算和更新对应矩阵\n",
    "\t\tfor words, states in tqdm(zip(self.train_data,  self.state_data), total = len(self.train_data)):\n",
    "\t\t\twords = words.strip().split(\" \")\n",
    "\t\t\tstates = states.strip().split(\" \")\n",
    "\t\t\tself.get_initMatrix(states[0])\n",
    "\t\t\tself.get_transMatrix(states)\n",
    "\t\t\tself.get_emitMatrix(words, states)\n",
    "\t\tself.standar_matrix()\t\t\t\t\t\t\t#归一化处理\n",
    "\t\t#保存训练得到的矩阵\n",
    "\t\tpickle.dump([self.init_matrix, self.transfer_matrix, self.emit_matrix], open(\"data/train_matrix.pkl\", \"wb\"))\n",
    "#viterbi算法，寻找最优路径\n",
    "def viterbi(text: str, hmm: HMM):\n",
    "\t#获取HMM的状态、观测概率矩阵、状态转移概率矩阵和初始状态概率向量\n",
    "\tstates = hmm.index_to_state\n",
    "\temit_m = hmm.emit_matrix\n",
    "\ttrans_m = hmm.transfer_matrix\n",
    "\tinit_m = hmm.init_matrix\n",
    "\t#初始化动态规划表v和路径信息path\n",
    "\tv = [{}]\t\t\t\t\t\t\t#存储每个状态的最大概率\n",
    "\tpath = {}\t\t\t\t\t\t\t#存储每个状态的前一个状态的路径\n",
    "\t#初始化动态规划表的第一个元素\n",
    "\tfor s in states:\n",
    "\t\t#计算第一个字符在每种状态下的概率\n",
    "\t\tv[0][s] = init_m[hmm.state_to_index[s]] * emit_m[s].get(text[0], 0)\n",
    "\t\tpath[s] = [s]\n",
    "\tfor t in range(1, len(text)):\n",
    "\t\tv.append({})\t\t\t\t\t#创建新的动态规划表项\n",
    "\t\tnew_path = {}\t\t\t\t\t#存储新状态的路径信息\n",
    "\t\t#判断当前字符是否未出现过\n",
    "\t\tnever_seen = text[t] not in emit_m['S'].keys() and text[t] not in emit_m[\"M\"].keys() and text[t] not in emit_m[\"E\"].keys() and text[t] not in emit_m[\"B\"].keys()\n",
    "\t\tfor s in states:\n",
    "\t\t\t#如果字符没有出现过，概率设置为1\n",
    "\t\t\temit_p = emit_m[s].get(text[t], 0) if not never_seen else 1.0\n",
    "\t\t\t#更新动态规划表，找到从开始到当前状态的最大概率和对应的前一状态\n",
    "\t\t\t(prob, state) = max([(v[t - 1][y] * trans_m[hmm.state_to_index[y], hmm.state_to_index[s]] * emit_p, y) for y in states])\n",
    "\t\t\tv[t][s] = prob\n",
    "\t\t\tnew_path[s] = path[state] + [s]\n",
    "\t\tpath = new_path\n",
    "\t#寻找最优路径\n",
    "\t(prob, state) = max([(v[len(text) - 1][s], s) for s in states])\n",
    "\tprint(\"最优路径：\", path[state])\n",
    "\tresult = \"\"\n",
    "\t#根据状态进行分词\n",
    "\tfor t, s in zip(text, path[state]):\n",
    "\t\tresult += t\n",
    "\t\tif s == \"S\" or s == \"E\":\n",
    "\t\t\tresult += \"/\"\n",
    "\treturn result.strip()\n",
    "if __name__ == '__main__':\n",
    "\ttext_state()\n",
    "\thmm = HMM()\t\t#实例化HMM类的对象\n",
    "\thmm.train_model()\n",
    "\ttext = \"某研究生的课题是研究生命的意义\"\n",
    "\tresult = viterbi(text, hmm)\n",
    "\tprint(\"使用HMM进行分词的结果：\", result)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.0"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
