import torch


class HMM:
    def __init__(self, num_states, num_obs):
        self.num_states = num_states
        # 这个是观测状态的文字的数量
        self.num_obs = num_obs
        # 初始概率矩阵
        self.initial_prob = torch.zeros(num_states)
        # 转移概率矩阵 N * N
        self.transition_prob = torch.zeros((num_states, num_states))
        # 发射概率矩阵 N * M
        self.emission_prob = torch.zeros((num_states, num_obs))

    def train(self, word_lists, tag_lists, word_to_index, tag_to_index):
        # word_to_index 和 tag_to_index 是字典
        assert len(word_lists) == len(tag_lists)

        # 训练初始概率
        for tag_list in tag_lists:
            init_tagindex = tag_to_index[tag_list[0]]
            self.initial_prob[init_tagindex] += 1
        # 进行平滑（smoothing）操作。
        # 平滑是为了处理在训练数据中可能出现的问题，例如某些状态没有在初始概率中出现
        # 将 `self.initial_prob` 中为零的元素替换为一个很小的非零值（这里设为1e-10）
        # 避免在后续计算中出现除以零的情况，以及确保每个状态都有一些非零的初始概率
        # 对初始概率进行归一化，确保它们的和为1
        self.initial_prob[self.initial_prob == 0.] = 1e-10
        self.initial_prob = self.initial_prob / self.initial_prob.sum()

        # 训练转移概率
        for tag_list in tag_lists:
            for i in range(len(tag_list) - 1):
                current_tagindex = tag_to_index[tag_list[i]]
                next_tagindex = tag_to_index[tag_list[i+1]]
                self.transition_prob[current_tagindex][next_tagindex] += 1
        # 对一些没有状态转移关系的做平滑处理 防止后继会 ÷ 0
        self.transition_prob[self.transition_prob == 0.] = 1e-10
        # N * N 每一行计算和 并且保持为列向量 逐行归一化
        self.transition_prob = self.transition_prob / \
            self.transition_prob.sum(dim=1, keepdim=True)

        # 训练发射概率
        for tag_list, word_list in zip(tag_lists, word_lists):
            assert len(tag_list) == len(word_list)
            for tag, word in zip(tag_list, word_list):
                tag_index = tag_to_index[tag]
                word_index = word_to_index[word]
                self.emission_prob[tag_index][word_index] += 1
        self.emission_prob[self.emission_prob == 0.] = 1e-10
        self.emission_prob = self.emission_prob / \
            self.emission_prob.sum(dim=1, keepdim=True)

    def viterbi_decode(self, word_list, word_to_index, tag_to_index):
        # 防止太多小概率连续相乘导致数据下溢
        # 采用对数概率，这样源空间中的很小概率，就被映射到对数空间的大的负数
        # 同时相乘操作也变成简单的相加操作 在向前推导时容易计算

        initial_prob = torch.log(self.initial_prob)
        transition_prob = torch.log(self.transition_prob)
        emission_prob = torch.log(self.emission_prob)

        # 其中viterbi[i, j]表示标注序列的第j个标注为i的所有单个序列(i_1, i_2, ..i_j)出现的概率最大值
        # backpointer是跟viterbi一样大小的矩阵
        # backpointer[i, j]存储的是 标注序列的第j个标注为i时，第j-1个标注的 隐藏状态索引
        length = len(word_list)
        viterbi = torch.zeros((self.num_states, length))
        backpointer = torch.zeros(self.num_states, length).long()

        start_word_index = word_to_index.get(word_list[0], None)
        # 原本是 N *M 转置后变为 M * N 即一个观测由各个状态发射而来的概率数组
        Bt = emission_prob.t()
        if start_word_index is None:
            # 如果字不再字典里，则假设状态的概率分布是均匀的
            bt = torch.log(torch.ones(self.num_states) / self.num_states)
        else:
            bt = Bt[start_word_index]
        # 前面已经 log对数化概率了 所以这里直接加上就行 等同于概率相乘再对数化
        viterbi[:, 0] = initial_prob + bt
        backpointer[:, 0] = -1

        # 向前推导
        for step in range(1, length):
            wordindex = word_to_index.get(word_list[step], None)
            # 处理字不在字典中的情况
            # bt是在t时刻字为wordid时，状态的概率分布
            if wordindex is None:
                # 如果字不在字典里，则假设状态的概率分布是均匀的
                bt = torch.log(torch.ones(self.num_states) / self.num_states)
            else:
                bt = Bt[wordindex]
            for tag_index in range(len(tag_to_index)):
                # 得到第二个观测字符在各个状态链下发射的概率
                max_prob, max_index = torch.max(
                    viterbi[:, step-1] + transition_prob[:, tag_index], dim=0)
                viterbi[tag_index, step] = max_prob + bt[tag_index]
                backpointer[tag_index, step] = max_index

        best_path_prob, best_path_pointer = torch.max(
            viterbi[:, length-1], dim=0)

        best_path_pointer = best_path_pointer.item()
        best_path = [best_path_pointer]
        for back_step in range(length-1, 0, -1):
            best_path_pointer = backpointer[best_path_pointer, back_step]
            best_path_pointer = best_path_pointer.item()
            best_path.append(best_path_pointer)

        assert len(best_path) == len(word_list)
        index_to_tag = dict((index_, tag)
                            for tag, index_ in tag_to_index.items())
        tag_list = [index_to_tag[id_] for id_ in reversed(best_path)]

        return tag_list


def read_data(file_path):
    word_lists = []
    word_list = []
    tag_lists = []
    tag_list = []
    with open(file_path, 'r', encoding='utf-8') as file:
        for line in file:
            parts = line.strip().split(" ", 2)
            if len(parts) >= 2:
                word_list.append(parts[0])
                tag_list.append(parts[1])
            else:
                # 要注意 在这里对两个list进行清空时 如果不对原来的列表进行copy会连着大数组中的元素一起进行清空
                word_lists.append(word_list.copy())
                tag_lists.append(tag_list.copy())
                word_list.clear()
                tag_list.clear()
    return word_lists, tag_lists


def get_dict_from_train_data(file_path):
    word_to_index = {}
    i = 0
    with open(file_path, 'r', encoding='utf-8') as file:
        for line in file:
            parts = line.strip().split(" ", 2)
            if (len(parts) >= 2) and (parts[0] not in word_to_index):
                word_to_index[parts[0]] = i
                i += 1

    return word_to_index


tag_to_index = {
    'O': 0,
    'B-NAME': 1, 'M-NAME': 2, 'E-NAME': 3, 'S-NAME': 4,
    'B-CONT': 5, 'M-CONT': 6, 'E-CONT': 7, 'S-CONT': 8,
    'B-EDU': 9, 'M-EDU': 10, 'E-EDU': 11, 'S-EDU': 12,
    'B-TITLE': 13, 'M-TITLE': 14, 'E-TITLE': 15, 'S-TITLE': 16,
    'B-ORG': 17, 'M-ORG': 18, 'E-ORG': 19, 'S-ORG': 20,
    'B-RACE': 21, 'M-RACE': 22, 'E-RACE': 23, 'S-RACE': 24,
    'B-PRO': 25, 'M-PRO': 26, 'E-PRO': 27, 'S-PRO': 28,
    'B-LOC': 29, 'M-LOC': 30, 'E-LOC': 31, 'S-LOC': 32
}
train_file = 'NER\\NER\\Chinese\\train.txt'
test_file = 'NER\\NER\\Chinese\\validation.txt'
result_file = 'NER\\NER\\example_data\\my.txt'

if __name__ == "__main__":

    word_to_index = get_dict_from_train_data(train_file)

    # # 创建HMM模型
    num_states = len(tag_to_index)
    num_obs = len(word_to_index)
    hmm_model = HMM(num_states, num_obs)

    # # 训练模型
    train_word_lists, train_tag_lists = read_data(train_file)
    hmm_model.train(train_word_lists, train_tag_lists,
                    word_to_index, tag_to_index)

    test_word_lists, test_tag_lists = read_data(test_file)
    results = []
    for word_list in test_word_lists:
        result = hmm_model.viterbi_decode(
            word_list, word_to_index, tag_to_index)
        results.append(result)

    with open(result_file, 'w', encoding='utf-8') as file:
        for word_list, result in zip(test_word_lists, results):
            for i, j in zip(word_list, result):
                file.write(i + " " + j + "\n")
            file.write("\n")
