import os
import pickle

import numpy as np
from tqdm import tqdm


def read_file(file: str):
    """
    读取文件
    :param file: 文件名
    :return: 返回所有数据列表
    """
    with open(file, "r", encoding="utf-8") as f:
        return f.readlines()


def generate_state(state_file="data/train_state.txt", file="data/train.txt"):
    """
    生成状态文件
    :param state_file: 状态文件名
    :param file: 训练文件名
    :return:
    """
    if os.path.exists(state_file):
        print("文件已经存在，不再重新生成！")
        return

    # 读取所有训练数据
    train_data = read_file(file)

    with open(state_file, "w", encoding="utf-8") as f:
        for i, data in tqdm(enumerate(train_data), total=len(train_data), desc="generate state:"):
            # 如果数据为空，直接跳过
            if not data:
                continue
            state = ""
            # 读取每个单词或字符，标记每个单词或字符的状态
            for word in data.split(" "):
                if not word:
                    continue
                state = state + make_label(word) + " "
            if i != len(train_data) - 1:
                state = state.strip() + "\n"
            f.write(state)


def make_label(word: str):
    """
    标记单词的状态，如果为一个字符，标记为S，如果开始为多个字符，第一个标记为B，中间标记为M，最后一个标记为E
    :param word: 单词或字符
    :return: 标记
    """
    text_len = len(word)
    if text_len == 1:
        return "S"
    return "B" + "M" * (text_len - 2) + "E"


class HMM:
    def __init__(self, train_file="data/train.txt", state_file="data/train_state.txt"):
        self.train_data = read_file(train_file)
        self.train_state = read_file(state_file)
        self.state_to_index = {"B": 0, "M": 1, "S": 2, "E": 3}
        self.index_to_state = ["B", "M", "S", "E"]
        self.len_states = len(self.state_to_index)
        # 创建初始化矩阵
        self.init_matrix = np.zeros(self.len_states)
        # 创建转移矩阵
        self.transfer_matrix = np.zeros((self.len_states, self.len_states))
        # 创建发射矩阵，total用于归一化
        self.emit_matrix = {"B": {"total": 0}, "M": {"total": 0}, "S": {"total": 0}, "E": {"total": 0}}

    def generate_init_matrix(self, state):
        self.init_matrix[self.state_to_index[state[0]]] += 1

    def generate_transfer_matrix(self, states):
        states = "".join(states)
        state_start = states[:-1]
        state_end = states[1:]
        for start, end in zip(state_start, state_end):
            self.transfer_matrix[self.state_to_index[start], self.state_to_index[end]] += 1

    def generate_emit_matrix(self, words, states):
        for word, state in zip("".join(words), "".join(states)):
            self.emit_matrix[state][word] = self.emit_matrix[state].get(word, 0) + 1
            self.emit_matrix[state]["total"] += 1

    def normalize(self):
        self.init_matrix = self.init_matrix / np.sum(self.init_matrix)
        self.transfer_matrix = self.transfer_matrix / np.sum(self.transfer_matrix, axis=1, keepdims=True)
        self.emit_matrix = {state: {word: t / word_times["total"] * 100 for word, t in word_times.items()
                                    if word != "total"} for state, word_times in self.emit_matrix.items()}

    def train(self):
        if os.path.exists("model/train_matrix.pkl"):
            print("加载已经训练好的矩阵数据...")
            self.init_matrix, self.transfer_matrix, self.emit_matrix = pickle.load(open("model/train_matrix.pkl", "rb"))
            return
        for words, states in tqdm(zip(self.train_data, self.train_state), total=len(self.train_data)):
            words = words.strip().split(" ")
            states = states.strip().split(" ")
            self.generate_init_matrix(states[0])
            self.generate_transfer_matrix(states)
            self.generate_emit_matrix(words, states)

        self.normalize()
        pickle.dump([self.init_matrix, self.transfer_matrix, self.emit_matrix], open("model/train_matrix.pkl", "wb"))


def viterbi(text: str, hmm: HMM):
    states = hmm.index_to_state
    emit_m = hmm.emit_matrix
    trans_m = hmm.transfer_matrix
    init_m = hmm.init_matrix
    v = [{}]
    # 前一个状态作为key，整个状态序列为value
    path = {}
    # 初始矩阵
    for s in states:
        # 计算第一个字在每种状态下的概率
        v[0][s] = init_m[hmm.state_to_index[s]] * emit_m[s].get(text[0], 0)
        path[s] = [s]

    for t in range(1, len(text)):
        v.append({})
        new_path = {}
        # 定义字符是否出现过
        never_seen = text[t] not in emit_m['S'].keys() and \
                     text[t] not in emit_m["M"].keys() and \
                     text[t] not in emit_m["E"].keys() and \
                     text[t] not in emit_m["B"].keys()
        for s in states:
            # 如果字符没有出现过，概率设置为1
            emit_p = emit_m[s].get(text[t], 0) if not never_seen else 1.0
            # 计算前面所有状态中，当当前状态的那个最大的概率和状态
            (prob, state) = max(
                [(v[t - 1][y] * trans_m[hmm.state_to_index[y], hmm.state_to_index[s]] * emit_p, y)
                 for y in states])
            v[t][s] = prob
            new_path[s] = path[state] + [s]
        path = new_path
    # 计算概率最大的状态路径
    (prob, state) = max([(v[len(text) - 1][s], s) for s in states])
    result = ""
    # 将文本标记为S和E状态末尾添加空格，进行分词
    for t, s in zip(text, path[state]):
        result += t
        if s == "S" or s == "E":
            result += " "
    return result.strip()


if __name__ == '__main__':
    generate_state()
    hmm = HMM()
    hmm.train()
    text = "两情若是久长时，又岂在朝朝暮暮"
    result = viterbi(text, hmm)
    print(result)
