import numpy as np
from tqdm import tqdm   # 进度条可视化
from NER.check import check  # 导入自定义的check模块

# 定义英文和中文的标签集
sorted_labels_eng = ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC", "B-MISC", "I-MISC"]

sorted_labels_chn = [
    'O',
    'B-NAME', 'M-NAME', 'E-NAME', 'S-NAME',
    'B-CONT', 'M-CONT', 'E-CONT', 'S-CONT',
    'B-EDU', 'M-EDU', 'E-EDU', 'S-EDU',
    'B-TITLE', 'M-TITLE', 'E-TITLE', 'S-TITLE',
    'B-ORG', 'M-ORG', 'E-ORG', 'S-ORG',
    'B-RACE', 'M-RACE', 'E-RACE', 'S-RACE',
    'B-PRO', 'M-PRO', 'E-PRO', 'S-PRO',
    'B-LOC', 'M-LOC', 'E-LOC', 'S-LOC'
]


def init(language='English', mode='train'):
    # 根据不同的语言选择对应的标签集
    if language == "English":
        sort_labels = sorted_labels_eng
    else:
        sort_labels = sorted_labels_chn

    tag2id = {}  # 用于存储标签到索引的映射关系
    for s in sort_labels:
        tag2id[s] = len(tag2id)

    # 读取训练数据
    f = open('./NER/' + language + '/' + mode + '.txt', 'r', encoding='utf-8')

    word2id = {}  # 用于存储单词到索引的映射关系
    sentences = []  # 存储句子
    sentence = []
    tags = []
    tag_list = []

    # 逐行读取数据
    for i in range(10000000):  # 为防止死循环，设置最大读取行数
        s = f.readline()
        if s == '':
            break
        s = s[:-1]

        if s != '':
            word, tag = s.split(' ')
            if word2id.get(word) is None:
                word2id[word] = len(word2id)
            sentence.append(word)
            if tag2id.get(tag) is None:
                print(tag)  # 打印未知的标签
            tags.append(tag2id[tag])
        elif len(sentence) != 0:
            sentences.append(sentence.copy())
            tag_list.append(tags.copy())
            sentence.clear()
            tags.clear()

    # 处理最后一个句子
    if len(sentence) != 0:
        sentences.append(sentence.copy())
        tag_list.append(tags.copy())

    return sort_labels, word2id, sentences, tag_list


def build(sort_labels, word2id, sentences, tag_list):
    A = np.zeros([len(sort_labels), len(sort_labels)])  # 状态转移矩阵
    B = np.zeros([len(word2id), len(sort_labels)])  # 发射概率矩阵
    Pi = np.zeros([len(sort_labels)])  # 初始状态概率向量

    # 统计状态转移矩阵、发射概率矩阵和初始状态概率向量
    for tags in tag_list:
        Pi[tags[0]] += 1
        for idx in range(len(tags) - 1):
            A[tags[idx]][tags[idx + 1]] += 1

    for idx in range(len(sentences)):
        sentence = sentences[idx]
        tags = tag_list[idx]
        for idx2 in range(len(tags)):
            B[word2id[sentence[idx2]]][tags[idx2]] += 1

    # 平滑处理
    A[A == 0] += 5e-2
    B[B == 0] += 5e-2
    Pi[Pi == 0] += 5e-2

    # 归一化
    A = A / np.sum(A, axis=1, keepdims=True)
    B = B / np.sum(B, axis=0, keepdims=True)
    Pi = Pi / np.sum(Pi)

    # 取对数
    A = np.log2(A)
    B = np.log2(B)
    Pi = np.log2(Pi)

    return A, B, Pi


def viterbi(word2id, A, B, Pi, sentence):
    sentence_len = len(sentence)
    tag_num = len(Pi)
    dp = np.zeros((tag_num, sentence_len))  # 动态规划表
    pre = np.zeros((tag_num, sentence_len))  # 回溯表

    # 计算初始状态
    start = word2id.get(sentence[0])
    if start is None:
        now = np.ones(tag_num)
        now = now * np.log(1.0 / tag_num)
    else:
        now = B[start]
    dp[:, 0] = Pi + now
    pre[:, 0] = -1

    # 动态规划求解最优路径
    for idx in range(1, sentence_len):
        wordid = word2id.get(sentence[idx], None)
        if wordid is None:
            now = np.ones(tag_num)
            now = now * np.log(1.0 / tag_num)
        else:
            now = B[wordid]
        dp[:, idx] = [np.max(dp[:, idx - 1] + A[:, tag_id], 0) for tag_id in range(tag_num)] + now
        pre[:, idx] = [np.argmax(dp[:, idx - 1] + A[:, tag_id], 0) for tag_id in range(tag_num)]

    # 回溯获取最优路径
    p = int(np.argmax(a=dp[:, sentence_len - 1], axis=0))
    path = [p]
    for idx in range(sentence_len - 1, 0, -1):
        p = int(pre[p, idx])
        path.append(p)
    path.reverse()
    return path


language = 'English'
mode = 'train'

# 初始化训练数据
sort_labels, word2id, train_sentences, train_tag_list = init(language, 'train')

# 构建HMM模型
A, B, Pi = build(sort_labels, word2id, train_sentences, train_tag_list)

# 读取验证数据
_, _, validation_sentences, test_tag_list = init(language, mode)

# 预测并写入文件
mypath = './NER/example_data/example_my_result.txt'
f = open(mypath, 'w', encoding='utf-8')
for sentence in tqdm(validation_sentences):
    my_tag_list = viterbi(word2id, A, B, Pi, sentence)
    assert (len(sentence) == len(my_tag_list))  # 检查句子和标签长度是否一致
    for idx in range(len(sentence)):
        f.write(sentence[idx] + ' ' + sort_labels[my_tag_list[idx]] + '\n')  # 写入预测结果
    f.write('\n')
f.close()

# 检查预测结果
check(language, 'NER/' + language + '/' + mode + '.txt', mypath)
