import json
import re

import numpy as np
import scipy
from scipy.sparse import csr_matrix
from sklearn_crfsuite import CRF

from evaluating import Metrics

"""
README
1.由于使用逻辑回归时，有些标签没有被预测到，因此老师的评测函数会报除以0的错误，因此我为老师的评测函数增加了一个极小的分母（1e-10），以防止除以0
2.由于在预测实体标签时，可能出现类似于 O, M-NAME, M-NAME, E-NAME的情况，因此我增加了对此种情况的修正函数check_is_valid
3.我自己实现的实体级evaluate函数中，也采用了与1.的相同的处理，并且由于需要测试该函数，而浮点相等的判断不能精确到太多位数，因此对p，r，f1都保留5位小数。
"""


def data_build(file_name: str, make_vocab=True):
    """
    读取并处理 ner_char_data 目录下的数据文件

    :param file_name: 文件路径
    :param make_vocab: 是否构造词典；词典格式为：word:id
    :return: 词列表、对应的标签列表；其中，均为2维矩阵，一行表示一个句子，一列表示句子中的一个词
    """
    word_lists = []
    tag_lists = []
    with open('./ner_char_data/' + file_name, 'r', encoding='utf-8') as file_read:
        word_list = []
        tag_list = []
        for line in file_read:
            if line != '\n':
                word, tag = line.strip('\n').split()
                word_list.append(word)
                tag_list.append(tag)
            else:
                word_lists.append(word_list)
                tag_lists.append(tag_list)
                word_list = []
                tag_list = []

    if make_vocab == True:
        word2id = {}
        for word_list in word_lists:
            for word in word_list:
                if word not in word2id:
                    word2id[word] = len(word2id)
        tag2id = {}
        for tag_list in tag_lists:
            for tag in tag_list:
                if tag not in tag2id:
                    tag2id[tag] = len(tag2id)
        return word_lists, tag_lists, word2id, tag2id
    return word_lists, tag_lists


# TODO 需要提交的第[1]个代码。在格子in[3]中
def data_build_clue(file_name: str, make_vocab=True):
    """
    读取并处理 ner_char_clue 目录下的训练数据文件

    :param file_name: 文件路径
    :param make_vocab: 是否构造词典；词典格式为：word:id
    :return: 词列表、对应的标签列表；其中，均为2维矩阵，一行表示一个句子，一列表示句子中的一个词
    """
    word_lists = []
    tag_lists = []
    with open('./ner_clue_data/' + file_name, 'r', encoding='utf-8') as file_read:
        for line in file_read:
            word_list = []
            tag_list = []
            result = json.loads(line)
            text = result['text']
            labels = result['label']
            for i in range(len(text)):
                word_list.append(text[i])
                tag_list.append('O')
            for label in labels:
                for term in labels[label]:
                    for index in labels[label][term]:
                        if index[0] == index[1]:
                            tag_list[index[0]] = 'S-{}'.format(label)  # 单个字的实体
                        else:
                            tag_list[index[0]] = 'B-{}'.format(label)
                            for i in range(index[0] + 1, index[1]):
                                tag_list[i] = 'M-{}'.format(label)
                            tag_list[index[1]] = 'E-{}'.format(label)
            word_lists.append(word_list)
            tag_lists.append(tag_list)
    assert len(word_lists) == len(tag_lists)
    if make_vocab == True:
        word2id = {}
        for word_list in word_lists:
            for word in word_list:
                if word not in word2id:
                    word2id[word] = len(word2id)
        tag2id = {}
        for tag_list in tag_lists:
            for tag in tag_list:
                if tag not in tag2id:
                    tag2id[tag] = len(tag2id)
        return word_lists, tag_lists, word2id, tag2id
    return word_lists, tag_lists


def viterbi(word_list, word2id, tag2id):
    """
    使用维特比算法对给定观测序列求状态序列， 这里就是对字组成的序列,求其对应的标注。
    维特比算法实际是用动态规划解隐马尔可夫模型预测问题，即用动态规划求概率最大路径（最优路径）
    这时一条路径对应着一个状态序列
    """
    # 问题:整条链很长的情况下，十分多的小概率相乘，最后可能造成下溢
    # 解决办法：采用对数概率，这样源空间中的很小概率，就被映射到对数空间的大的负数
    #  同时相乘操作也变成简单的相加操作
    ALog = np.log(A)
    BLog = np.log(B)
    PiLog = np.log(Pi)

    # 初始化 维比特矩阵viterbi 它的维度为[状态数, 序列长度]
    # 其中viterbi[i, j]表示标注序列的第j个标注为i的所有单个序列(i_1, i_2, ..i_j)出现的概率最大值
    seq_len = len(word_list)
    viterbi = np.zeros(shape=(N, seq_len), dtype=float)
    # backpointer是跟viterbi一样大小的矩阵
    # backpointer[i, j]存储的是 标注序列的第j个标注为i时，第j-1个标注的id
    # 等解码的时候，我们用backpointer进行回溯，以求出最优路径
    backpointer = np.zeros(shape=(N, seq_len), dtype=float)

    # Pi[i] 表示第一个字的标记为i的概率
    # Bt[word_id]表示字为word_id的时候，对应各个标记的概率
    # A.t()[tag_id]表示各个状态转移到tag_id对应的概率

    # 所以第一步为
    start_wordid = word2id.get(word_list[0], None)
    Bt = BLog.T
    if start_wordid is None:
        # 如果字不再字典里，则假设状态的概率分布是均匀的
        bt = np.log(np.ones(shape=N, dtype=float) / N)
    else:
        bt = Bt[start_wordid]
    viterbi[:, 0] = PiLog + bt
    backpointer[:, 0] = -1

    # 递推公式：viterbi[tag_id, step] = max(viterbi[:, step-1]* A.t()[tag_id] * Bt[word])
    # 其中word是step时刻对应的字, 由上述递推公式求后续各步
    for step in range(1, seq_len):
        wordid = word2id.get(word_list[step], None)
        # 处理字不在字典中的情况
        # bt是在t时刻字为wordid时，状态的概率分布
        if wordid is None:
            # 如果字不再字典里，则假设状态的概率分布是均匀的
            bt = np.log(np.ones(N) / N)
        else:
            bt = Bt[wordid]  # 否则从观测概率矩阵中取bt
        for tag_id in range(len(tag2id)):
            max_prob = np.max(a=viterbi[:, step - 1] + ALog[:, tag_id], axis=0)
            max_id = np.argmax(a=viterbi[:, step - 1] + ALog[:, tag_id], axis=0)
            viterbi[tag_id, step] = max_prob + bt[tag_id]
            backpointer[tag_id, step] = max_id

    # 终止， t=seq_len 即 viterbi[:, seq_len]中的最大概率，就是最优路径的概率
    best_path_prob = np.max(a=viterbi[:, seq_len - 1], axis=0)
    best_path_pointer = np.argmax(a=viterbi[:, seq_len - 1], axis=0)

    # 回溯，求最优路径
    best_path_pointer = int(best_path_pointer)
    best_path = [best_path_pointer]

    for back_step in range(seq_len - 1, 0, -1):
        best_path_pointer = backpointer[best_path_pointer, back_step]
        best_path_pointer = int(best_path_pointer)
        best_path.append(best_path_pointer)

    # 将tag_id组成的序列转化为tag
    assert len(best_path) == len(word_list)
    id2tag = dict((id_, tag) for tag, id_ in tag2id.items())
    tag_list = [id2tag[id_] for id_ in reversed(best_path)]

    return tag_list


def word2features(sent, i):
    """CRF中，抽取单个字的特征"""
    word = sent[i]
    prev_word = "<s>" if i == 0 else sent[i - 1]
    next_word = "</s>" if i == (len(sent) - 1) else sent[i + 1]
    # 使用的特征：
    # 前一个词，当前词，后一个词，
    # 前一个词+当前词， 当前词+后一个词
    features = {
        'w': word,
        'w-1': prev_word,
        'w+1': next_word,
        'w-1:w': prev_word + word,
        'w:w+1': word + next_word,
        'w-1:w:w+1': prev_word + word + next_word,
        'bias': 1
    }
    return features


def sent2features(sent):
    """抽取序列特征"""
    return [word2features(sent, i) for i in range(len(sent))]


# ## 3.4  CRF模型的实现
class CRFModel(object):
    def __init__(self,
                 algorithm='lbfgs',
                 c1=0.1,
                 c2=0.1,
                 max_iterations=100,
                 all_possible_transitions=False
                 ):
        self.model = CRF(algorithm=algorithm,
                         c1=c1,
                         c2=c2,
                         max_iterations=max_iterations,
                         all_possible_transitions=all_possible_transitions
                         )

    def train(self, sentences, tag_lists):
        features = [sent2features(s) for s in sentences]
        self.model.fit(features, tag_lists)

    def test(self, sentences):
        features = [sent2features(s) for s in sentences]
        pred_tag_lists = self.model.predict(features)
        return pred_tag_lists


# TODO 需要提交的第[2]个代码。在格子in[3]中
# 4. 实现实体级别评价程序
# 请在下面的Cell中实现一个命名实体识别的评价程序，该程序可以在实体级别计算测试结果的每种实体以及总体的Precision、Recall和F1值。（3分）

def evaluate(golden_tags: [], predict_tags: [], check=False) -> {}:
    """
    实体级别评价程序，标签为B,M,E,O格式，且包含S。

    :param golden_tags: 真实值，要求是二维数组，一行表示一个句子
    :param predict_tags: 预测值，要求是二维数组，一行表示一个句子；要求行数与golden_tags相同，且每一行的标签数目与golden_tags的对应行相同
    :param check: 是否要检查并且修正错误的命名实体标签
    :return: 字典：key:实体名称（包括总体（avg/total））;value:对应的p,r,f1
    """
    from evaluating import flatten_lists
    from collections import Counter
    if check:
        predict_tags = check_is_valid(predict_tags)
    golden_tags = flatten_lists(golden_tags)
    predict_tags = flatten_lists(predict_tags)
    if len(golden_tags) != len(predict_tags):
        raise Exception
    tag_set = set()
    for tag in golden_tags:
        if tag == get_full_tag(tag):
            continue
        else:
            tag_set.add(get_full_tag(tag))
    # 计算预测正确的实体数目
    correct_dict = {}
    is_stared = False
    present_tag = ''
    for i in range(len(golden_tags)):
        if golden_tags[i] == predict_tags[i]:
            same_tag = golden_tags[i]
            if same_tag.startswith('S'):  # 处理单个字为实体的情况
                full_tag = get_full_tag(same_tag)
                if full_tag not in correct_dict:
                    correct_dict[full_tag] = 1
                else:
                    correct_dict[full_tag] += 1
            if same_tag.startswith('M'):
                if present_tag != get_full_tag(same_tag):
                    is_stared = False
                continue
            if same_tag.startswith('B'):
                present_tag = get_full_tag(same_tag)
                is_stared = True
            if same_tag.startswith('E'):
                full_tag = get_full_tag(same_tag)
                if is_stared is True and full_tag == present_tag:
                    if full_tag not in correct_dict:
                        correct_dict[full_tag] = 1
                    else:
                        correct_dict[full_tag] += 1
                is_stared = False
            if same_tag == 'O':
                is_stared = False
        else:
            is_stared = False
    golden_full_tags = merge_tags_to_full_tag(golden_tags)
    predict_full_tags = merge_tags_to_full_tag(predict_tags)
    golden_tags_counter = Counter(golden_full_tags)
    predict_tags_counter = Counter(predict_full_tags)
    # 计算p，r，f1
    result = {}
    save_num = 5  # 保留5位小数
    for tag in tag_set:
        result[tag] = {}
        result[tag]['precision'] = p = correct_dict.get(tag, 0) / (predict_tags_counter[tag] + 1e-10)
        result[tag]['recall'] = r = correct_dict.get(tag, 0) / (golden_tags_counter[tag] + +1e-10)
        result[tag]['f1'] = 2 * p * r / (p + r + 1e-10)  # 加上一个特别小的数，防止分母为0
    total_golden = len(golden_full_tags)
    result['avg/total'] = {}
    result['avg/total']['precision'] = 0.
    result['avg/total']['recall'] = 0.
    result['avg/total']['f1'] = 0.
    for tag in tag_set:
        size = golden_tags_counter[tag]
        result['avg/total']['precision'] += result[tag]['precision'] * size
        result['avg/total']['recall'] += result[tag]['recall'] * size
        result['avg/total']['f1'] += result[tag]['f1'] * size
        result[tag]['precision'] = np.round(result[tag]['precision'], save_num)
        result[tag]['recall'] = np.round(result[tag]['recall'], save_num)
        result[tag]['f1'] = np.round(result[tag]['f1'], save_num)
    for metric in result['avg/total'].keys():
        result['avg/total'][metric] /= total_golden
        result['avg/total'][metric] = np.round(result['avg/total'][metric], save_num)
    return result


def check_is_valid(predict_tags, verbose=True):
    """
    标签为B,M,E,O格式，且包含S
    检查是否有错误的命名实体标签：如B-NAME E-POS这种不完整的；
    如错误，则手动修正（以第一个标签为准），修正规则如下：
    1.不是出现在B之后的M,E都会被改为O，如：M-NAME M-NAME E-NAME B-NAME E-NAME -> O O O B-NAME E-NAME
    2.对于B之后的错误格式进行修改。如果没有遇到E，就遇到O了，则将中间的标签全部变为该实体对应的标签，如：B-NAME M-POS E-POS O -> B-NAME M-NAME E-	NAME O
    3.对于单独出现的一个标签，则都变为O，如：O B-NAME O -> O O O

    :param predict_tags: 预测的标签，要求是二维数组，一行表示一个句子
    :param verbose: 是否输出修改信息
    :return: 修正后的命名实体标签
    """
    new_predict_tags = []
    for sentence in predict_tags:
        new_predict_tag = []
        is_changed = False
        is_start = False
        now_full_tag = ''
        b_tag_index = 0
        for i in range(len(sentence)):
            tag = sentence[i]
            if tag.startswith('B'):
                if is_start is True:
                    is_changed = True
                    if sentence[i - 1].startswith('B'):
                        new_predict_tag[i - 1] = 'O'
                    elif sentence[i - 1].startswith('M'):
                        new_predict_tag[i - 1] = 'E-' + now_full_tag
                    else:
                        raise Exception
                else:
                    is_start = True
                now_full_tag = get_full_tag(tag)
                new_predict_tag.append(tag)
                b_tag_index = sentence.index(tag)
            elif tag.startswith('M'):
                if is_start is False:
                    is_changed = True
                    new_predict_tag.append('O')
                else:
                    if get_full_tag(tag) == now_full_tag:
                        new_predict_tag.append(tag)
                    else:
                        is_changed = True
                        new_predict_tag.append('M-' + now_full_tag)
            elif tag.startswith('E'):
                if is_start is False:
                    is_changed = True
                    new_predict_tag.append('O')
                else:
                    if get_full_tag(tag) == now_full_tag:
                        new_predict_tag.append(tag)
                    else:
                        is_changed = True
                        new_predict_tag.append('E-' + now_full_tag)
                    is_start = False
            else:
                if is_start is True:
                    is_changed = True
                    if sentence[i - 1] != 'O' and not sentence[i - 1].startswith('B'):
                        new_predict_tag[i - 1] = 'E-' + now_full_tag
                    else:
                        new_predict_tag[b_tag_index] = 'O'
                is_start = False
                new_predict_tag.append(tag)
        if is_changed and verbose:
            print('origin->{}'.format(sentence))
            print('changed->{}'.format(new_predict_tag))
            print()
        new_predict_tags.append(new_predict_tag)
    return new_predict_tags


def get_full_tag(tag: str):
    """
    获得实体的全称（不含B、M、E、S）
    :param tag: 实体
    :return: 实体的全称
    """
    try:
        tag = tag.split('-')[1]
    except IndexError:
        pass
    return tag


def merge_tags_to_full_tag(tags: []):
    """
    将BME标签合并为一个完整的标签，如B-NAME M-NAME E-NAME 合并为 NAME

    :param tags: BMEO标签
    :return: 合并之后的标签列表
    """
    full_tags = []
    is_stared = False
    present_tag = ''
    for tag in tags:
        if tag.startswith('M'):
            if present_tag != get_full_tag(tag):
                is_stared = False
            continue
        if tag.startswith('B'):
            present_tag = get_full_tag(tag)
            is_stared = True
        if tag.startswith('E'):
            full_tag = get_full_tag(tag)
            if is_stared is True and full_tag == present_tag:
                full_tags.append(full_tag)
            is_stared = False
        if tag == 'O':
            is_stared = False
    return full_tags


def report_scores(result: {}):
    """将结果用表格的形式打印出来，像这个样子：

                  precision    recall  f1-score
          B-LOC      0.775     0.757     0.766
          I-LOC      0.601     0.631     0.616
         B-MISC      0.698     0.499     0.582
         I-MISC      0.644     0.567     0.603
          B-ORG      0.795     0.801     0.798
          I-ORG      0.831     0.773     0.801
          B-PER      0.812     0.876     0.843
          I-PER      0.873     0.931     0.901

      avg/total      0.779     0.764     0.770
    """
    report = ''
    # 打印表头
    header_format = '{:>9s}  {:>9} {:>9} {:>9}'
    header = ['precision', 'recall', 'f1-score']
    report += header_format.format('', *header) + '\n'

    row_format = '{:>9s}  {:>9.4f} {:>9.4f} {:>9.4f}'
    # 打印每个标签的 精确率、召回率、f1分数
    for tag in result:
        if tag == 'avg/total':
            report += '\n'
        report += row_format.format(
            tag,
            result[tag]['precision'],
            result[tag]['recall'],
            result[tag]['f1']
        ) + '\n'
    return report


# 请在下面的Cell中实现一个基于最大熵模型的实体识别系统，并利用ner_char_data目录下的train.txt文件训练模型，利用test.txt文件测试模型效果。
# # 请在这里实现一个基于最大熵模型的实体识别系统
# TODO 需要提交的第[3]个代码。在格子in[13]中
# 导入sklearn中的LogisticRegression （LogisticRegression即为最大熵模型）
from sklearn.linear_model import LogisticRegression
from sklearn.feature_extraction import DictVectorizer
from evaluating import Metrics
from evaluating import flatten_lists


def is_chinese_punctuation(word: str):
    if re.match('[。？！，、；：“”‘’「」『』（）\\[\\]〔〕【】—…\\·\\-～《》〈〉_/]+$', word):
        return True
    else:
        return False


def is_english_punctuation(word: str):
    if re.match("[\\.?!,:;\\-—\\(\\)\\[\\]{}']+$", word) or re.match('"+$', word):
        return True
    else:
        return False


def word2features_forLR(sent, target, i):
    """LR中，抽取单个字的特征"""
    word = sent[i]
    prev_word = "<s>" if i == 0 else sent[i - 1]
    next_word = "</s>" if i == (len(sent) - 1) else sent[i + 1]
    prev_word_2 = "<d>" if i == 0 or i == 1 else sent[i - 2]
    next_word_2 = "</d>" if i == (len(sent) - 1) or i == (len(sent) - 2) else sent[i + 2]
    prev_target = "<unk>" if i == 0 else target[i - 1]
    prev_target_2 = "<unk>" if i == 0 or i == 1 else target[i - 2]
    # 使用的特征：
    features = {
        'w': word,
        'w-1': prev_word,
        'w+1': next_word,
        'w-2': prev_word_2,
        'w+2': next_word_2,
        'w-2:w-1': prev_word_2 + prev_word,
        'w-1:w': prev_word + word,
        'w:w+1': word + next_word,
        'w+1:w+2': next_word + next_word_2,
        'idx': i,
        't-1': prev_target,
        't-2': prev_target_2,

        'is_alpha': word.isalpha(),
        'is_ascii': word.isascii(),
        'is_digit': word.isdigit(),
        'is_title': word.istitle(),
        'is_lower': word.islower(),
        'is_chinese_punctuation': is_chinese_punctuation(word),
        'is_english_punctuation': is_english_punctuation(word),

        'is_alpha_pre': prev_word.isalpha(),
        'is_ascii_pre': prev_word.isascii(),
        'is_digit_pre': prev_word.isdigit(),
        'is_title_pre': prev_word.istitle(),
        'is_lower_pre': prev_word.islower(),
        'is_chinese_punctuation_pre': is_chinese_punctuation(prev_word),
        'is_english_punctuation_pre': is_english_punctuation(prev_word),

        'is_alpha_next': next_word.isalpha(),
        'is_ascii_next': next_word.isascii(),
        'is_digit_next': next_word.isdigit(),
        'is_title_next': next_word.istitle(),
        'is_lower_next': next_word.islower(),
        'is_chinese_punctuation_next': is_chinese_punctuation(next_word),
        'is_english_punctuation_next': is_english_punctuation(next_word)
    }
    return features


def sent2features_forLR(sent, target):
    """抽取序列特征"""
    return [word2features_forLR(sent, target, i) for i in range(len(sent))]


class LRModel(object):
    def __init__(self,
                 penalty='l2',
                 C=1.0,
                 solver='lbfgs',
                 max_iter=100,
                 multi_class='auto',
                 verbose=1,
                 tol=1e-4,
                 n_jobs=-1
                 ):
        self.model = LogisticRegression(penalty=penalty, C=C, solver=solver, max_iter=max_iter, multi_class=multi_class,
                                        verbose=verbose, tol=tol, n_jobs=n_jobs, random_state=1)

        self.dict_vec = DictVectorizer(sparse=True, dtype=np.int8)  # #sparse=False意思是不产生稀疏矩阵

    def train(self, sentences, tag_lists):
        features = []
        for i in range(len(sentences)):
            for item in sent2features_forLR(sentences[i], tag_lists[i]):
                features.append(item)
        features = self.dict_vec.fit_transform(features)
        print(features.shape)
        tag_lists = np.array(flatten_lists(tag_lists), dtype=str)
        self.model.fit(features, tag_lists)

    def test(self, sentences):
        pred_tag_lists = []
        max_sentence_len = 0
        for i in range(len(sentences)):
            if len(sentences[i]) > max_sentence_len:
                max_sentence_len = len(sentences[i])
            pred_tag_lists.append(['O'] * len(sentences[i]))
        for i in range(max_sentence_len):
            features = []
            sentence_indexes = []
            for j in range(len(sentences)):
                if i < len(sentences[j]):
                    features.append(word2features_forLR(sentences[j], pred_tag_lists[j], i))
                    sentence_indexes.append(j)
            features = self.dict_vec.transform(features)
            preds = self.model.predict(features)
            for j in range(len(preds)):
                pred_tag_lists[sentence_indexes[j]][i] = preds[j]
        return pred_tag_lists


# TODO 提交的时候记得删除这一个函数
def local_evaluate(filepath: str):
    """
    从本地文件读取数据并且进行效果比较

    :param filepath: 文件路径
    :return: 无
    """
    global test_tag_lists_clue
    with open(filepath, 'r', encoding='utf-8') as f:
        pred_tag_lists = json.loads(f.read())
    print(filepath)
    print('老师的预测函数')
    Metrics(test_tag_lists_clue, pred_tag_lists, remove_O=False).report_scores()
    print('我的的预测函数')
    print(report_scores(evaluate(test_tag_lists_clue, pred_tag_lists)))
    print('修正了不符合规范的实体标签')
    print(report_scores(evaluate(test_tag_lists_clue, pred_tag_lists, check=True)))


# TODO 需要补交的函数
import json


def write_result(test_word_lists: [], pred_tag_lists: [], output_filepath):
    """
    将预测的结果输出到文件中，采用的格式与ner_clue_data/dev.txt相同；
    标签为BMEOS格式，需要保证标签格式正确，否则会抛出异常

    :param test_word_lists: 测试的二维字列表。一行表示一个句子
    :param pred_tag_lists: 根据test_word_lists预测得到的二维、与test_word_lists具有相同shape的标签列表
    :param output_filepath: 输出的文件路径，以.txt结尾
    :return:
    """
    with open(output_filepath, mode='x', encoding='utf-8') as f:
        for i in range(len(test_word_lists)):
            line = test_word_lists[i]
            result = {}
            text = ''
            labels = {}
            is_start = False
            start_index = 0
            now_full_tag = ''
            for j in range(len(line)):
                text += line[j]
                tag = pred_tag_lists[i][j]
                if tag.startswith('B'):
                    if is_start is True:
                        raise Exception('{},{},{}'.format(i, j, pred_tag_lists[i]))
                    is_start = True
                    start_index = j
                    now_full_tag = get_full_tag(tag)
                elif tag.startswith('M'):
                    if is_start is False:
                        raise Exception('{},{},{}'.format(i, j, pred_tag_lists[i]))
                    else:
                        if now_full_tag != get_full_tag(tag):
                            raise Exception('{},{},{}'.format(i, j, pred_tag_lists[i]))
                elif tag.startswith('E'):
                    if is_start is False:
                        raise Exception('{},{},{}'.format(i, j, pred_tag_lists[i]))
                    else:
                        if now_full_tag != get_full_tag(tag):
                            raise Exception('{},{},{}'.format(i, j, pred_tag_lists[i]))
                        else:
                            word = text[start_index:j + 1]
                            if now_full_tag in labels.keys():
                                if labels[now_full_tag].get(word) is None:
                                    labels[now_full_tag][word] = [[start_index, j]]
                                else:
                                    labels[now_full_tag][word].append([start_index, j])
                            else:
                                labels[get_full_tag(tag)] = {word: [[start_index, j]]}
                            is_start = False
                elif tag.startswith('O'):
                    if is_start is True:
                        raise Exception('{},{},{}'.format(i, j, pred_tag_lists[i]))
                elif tag.startswith('S'):
                    if is_start is True:
                        raise Exception('{},{},{}'.format(i, j, pred_tag_lists[i]))
                    else:
                        word = text[j:j + 1]
                        now_full_tag = get_full_tag(tag)
                        if now_full_tag in labels.keys():
                            if labels[now_full_tag].get(word) is None:
                                labels[now_full_tag][word] = [[j, j]]
                            else:
                                labels[now_full_tag][word].append([j, j])
                        else:
                            labels[get_full_tag(tag)] = {word: [[j, j]]}
                else:
                    raise Exception('{},{},{}'.format(i, j, pred_tag_lists[i]))
            result['text'] = text
            result['label'] = labels
            f.write(json.dumps(result, ensure_ascii=False))
            f.write('\n')
    print('结果输出成功！文件被保存在{}'.format(output_filepath))


if __name__ == '__main__':
    # # 读取ner_char_data
    # train_word_lists, train_tag_lists, word2id, tag2id = data_build(file_name="train.txt", make_vocab=True)
    # test_word_lists, test_tag_lists = data_build(file_name="test.txt", make_vocab=False)
    #
    # # 隐马参数构建
    # # N: 状态数，这里对应存在的标注的种类
    # # M: 观测数，这里对应有多少不同的字
    # N, M = len(tag2id), len(word2id)
    # # 状态转移概率矩阵 A[i][j]表示从i状态转移到j状态的概率
    # A = np.zeros(shape=(N, N), dtype=float)
    # # 观测概率矩阵, B[i][j]表示i状态下生成j观测的概率
    # B = np.zeros(shape=(N, M), dtype=float)
    # # 初始状态概率  Pi[i]表示初始时刻为状态i的概率
    # Pi = np.zeros(shape=N, dtype=float)
    #
    # """ 构建转移概率矩阵 """
    # for tag_list in train_tag_lists:
    #     seq_len = len(tag_list)
    #     for i in range(seq_len - 1):
    #         current_tagid = tag2id[tag_list[i]]
    #         next_tagid = tag2id[tag_list[i + 1]]
    #         A[current_tagid][next_tagid] += 1
    # A[A == 0.] = 1e-10  # 平滑处理
    # # 归一化
    # A = A / np.sum(a=A, axis=1, keepdims=True)
    #
    # """ 构建观测概率矩阵 """
    # for tag_list, word_list in zip(train_tag_lists, train_word_lists):
    #     assert len(tag_list) == len(word_list)
    #     for tag, word in zip(tag_list, word_list):
    #         tag_id = tag2id[tag]
    #         word_id = word2id[word]
    #         B[tag_id][word_id] += 1
    # B[B == 0.] = 1e-10  # 平滑处理
    # # 归一化
    # B = B / np.sum(a=B, axis=1, keepdims=True)
    #
    # """ 构建初始状态概率 """
    # for tag_list in train_tag_lists:
    #     init_tagid = tag2id[tag_list[0]]
    #     Pi[init_tagid] += 1
    # Pi[Pi == 0.] = 1e-10  # 平滑处理
    # Pi = Pi / np.sum(a=Pi)
    #
    # """ 利用HMM识别ner_char_data目录下test.txt中的数据"""
    # pred_tag_lists = []
    # for word_list in test_word_lists:
    #     pred_tag_list = viterbi(word_list, word2id, tag2id)
    #     pred_tag_lists.append(pred_tag_list)
    #
    # """ HMM 评测 """
    # from evaluating import Metrics
    #
    # metrics = Metrics(test_tag_lists, pred_tag_lists, remove_O=False)
    # metrics.report_scores()
    # # metrics.report_confusion_matrix()
    # print(report_scores(evaluate(test_tag_lists, pred_tag_lists)))
    # print('修正了不符合规范的实体标签')
    # print(report_scores(evaluate(test_tag_lists, pred_tag_lists, check=True)))
    #
    # # 读取ner_clue_data
    # # TODO 需要提交的第[1]个代码。在格子in[3]中
    #
    # # 3. 条件随机场(CRF)模型
    # ## 3.3  CRF模型训练、测试与评价
    # # 训练CRF模型
    # crf_model = CRFModel()
    # crf_model.train(train_word_lists, train_tag_lists)
    #
    # pred_tag_lists = crf_model.test(test_word_lists)
    #
    # metrics = Metrics(test_tag_lists, pred_tag_lists, remove_O=False)
    # metrics.report_scores()
    # metrics.report_confusion_matrix()
    #
    # # 实体级别评价程序
    # # TODO 需要提交的第[2]个代码。在格子in[22]中
    # print(report_scores(evaluate(test_tag_lists, pred_tag_lists)))
    # print('修正了不符合规范的实体标签')
    # print(report_scores(evaluate(test_tag_lists, pred_tag_lists, check=True)))
    #
    # # 基于最大熵模型的实体识别
    # # TODO 需要提交的第[3]个代码。在格子in[13]中
    # lr_model = LRModel(verbose=2, max_iter=400, multi_class='multinomial', solver='sag')
    # lr_model.train(train_word_lists, train_tag_lists)
    # pred_tag_lists = lr_model.test(test_word_lists)
    # metrics = Metrics(test_tag_lists, pred_tag_lists, remove_O=False)
    # # metrics.report_confusion_matrix()
    # metrics.report_scores()
    # print(report_scores(evaluate(test_tag_lists, pred_tag_lists)))
    # print('修正了不符合规范的实体标签')
    # print(report_scores(evaluate(test_tag_lists, pred_tag_lists, check=True)))
    #
    # # pred_tag_lists = lr_model.test(test_word_lists)
    # # # 写入文件，来看一看效果
    # # with open('pred_tag_lists_new5.json', 'w', encoding='utf-8') as f:
    # #     f.write(json.dumps(pred_tag_lists))
    # # # TODO 本地评测函数
    # # local_evaluate('pred_tag_lists.json')
    # # local_evaluate('pred_tag_lists_new.json')
    # # for i in range(1, 6):
    # #     local_evaluate('pred_tag_lists_new{}.json'.format(i))
    # #
    # # for i in range(5, 10):
    # #     for j in range(len(test_word_lists[i])):
    # #         print('{:<10}'.format(test_word_lists[i][j]), end='')
    # #     print()
    # #     for j in range(len(test_word_lists[i])):
    # #         print('{:<10}'.format(test_tag_lists[i][j]), end='')
    # #     print()
    # #     for j in range(len(test_word_lists[i])):
    # #         print('{:<10}'.format(pred_tag_lists[i][j]), end='')
    # #     print()
    # #     print()
    # # # 利用新数据重新训练和测试HMM、ME和CRF模型
    # # # # TODO 需要提交的第[4]个代码。在格子in[14]中
    # # 在这里实现利用新数据重新训练和测试HMM、ME和CRF模型
    train_word_lists_clue, train_tag_lists_clue, word2id_clue, tag2id_clue = data_build_clue(file_name="train.txt",
                                                                                             make_vocab=True)
    test_word_lists_clue, test_tag_lists_clue = data_build_clue(file_name="dev.txt", make_vocab=False)

    # HMM训练
    # 隐马参数构建
    # N: 状态数，这里对应存在的标注的种类
    # M: 观测数，这里对应有多少不同的字
    N, M = len(tag2id_clue), len(word2id_clue)
    # 状态转移概率矩阵 A[i][j]表示从i状态转移到j状态的概率
    A = np.zeros(shape=(N, N), dtype=float)
    # 观测概率矩阵, B[i][j]表示i状态下生成j观测的概率
    B = np.zeros(shape=(N, M), dtype=float)
    # 初始状态概率  Pi[i]表示初始时刻为状态i的概率
    Pi = np.zeros(shape=N, dtype=float)

    """ 构建转移概率矩阵 """
    for tag_list in train_tag_lists_clue:
        seq_len = len(tag_list)
        for i in range(seq_len - 1):
            current_tagid = tag2id_clue[tag_list[i]]
            next_tagid = tag2id_clue[tag_list[i + 1]]
            A[current_tagid][next_tagid] += 1
    A[A == 0.] = 1e-10  # 平滑处理
    # 归一化
    A = A / np.sum(a=A, axis=1, keepdims=True)

    """ 构建观测概率矩阵 """
    for tag_list, word_list in zip(train_tag_lists_clue, train_word_lists_clue):
        assert len(tag_list) == len(word_list)
        for tag, word in zip(tag_list, word_list):
            tag_id = tag2id_clue[tag]
            word_id = word2id_clue[word]
            B[tag_id][word_id] += 1
    B[B == 0.] = 1e-10  # 平滑处理
    # 归一化
    B = B / np.sum(a=B, axis=1, keepdims=True)

    """ 构建初始状态概率 """
    for tag_list in train_tag_lists_clue:
        init_tagid = tag2id_clue[tag_list[0]]
        Pi[init_tagid] += 1
    Pi[Pi == 0.] = 1e-10  # 平滑处理
    Pi = Pi / np.sum(a=Pi)

    """ 利用HMM识别ner_char_data目录下test.txt中的数据"""
    pred_tag_lists = []
    for word_list in test_word_lists_clue:
        pred_tag_list = viterbi(word_list, word2id_clue, tag2id_clue)
        pred_tag_lists.append(pred_tag_list)

    """ HMM 评测 """
    from evaluating import Metrics

    print('HMM')
    metrics = Metrics(test_tag_lists_clue, pred_tag_lists, remove_O=False)
    metrics.report_scores()
    # metrics.report_confusion_matrix()
    print(report_scores(evaluate(test_tag_lists_clue, pred_tag_lists)))
    print('修正了不符合规范的实体标签')
    pred_tag_lists = check_is_valid(pred_tag_lists, verbose=False)
    write_result(test_word_lists_clue, pred_tag_lists, output_filepath='./ner_clue_data/HMM.txt')
    test_word_lists_clue, pred_tag_lists = data_build_clue(file_name="HMM.txt", make_vocab=False)
    print(report_scores(evaluate(test_tag_lists_clue, pred_tag_lists, check=True)))

    # ME训练
    lr_model = LRModel(verbose=2, max_iter=400, multi_class='multinomial', solver='sag')
    lr_model.train(train_word_lists_clue, train_tag_lists_clue)
    pred_tag_lists = lr_model.test(test_word_lists_clue)
    metrics = Metrics(test_tag_lists_clue, pred_tag_lists, remove_O=False)
    print('ME')
    metrics.report_scores()
    # metrics.report_confusion_matrix()
    print(report_scores(evaluate(test_tag_lists_clue, pred_tag_lists)))
    print('修正了不符合规范的实体标签')
    pred_tag_lists = check_is_valid(pred_tag_lists, verbose=False)
    write_result(test_word_lists_clue, pred_tag_lists, output_filepath='./ner_clue_data/ME.txt')
    test_word_lists_clue, pred_tag_lists = data_build_clue(file_name="ME.txt", make_vocab=False)
    print(report_scores(evaluate(test_tag_lists_clue, pred_tag_lists, check=True)))

    # CRF训练
    crf_model = CRFModel()
    crf_model.train(train_word_lists_clue, train_tag_lists_clue)
    pred_tag_lists = crf_model.test(test_word_lists_clue)
    print('CRF')
    metrics = Metrics(test_tag_lists_clue, pred_tag_lists, remove_O=False)
    metrics.report_scores()
    # metrics.report_confusion_matrix()
    print(report_scores(evaluate(test_tag_lists_clue, pred_tag_lists)))
    print('修正了不符合规范的实体标签')
    pred_tag_lists = check_is_valid(pred_tag_lists, verbose=False)
    write_result(test_word_lists_clue, pred_tag_lists, output_filepath='./ner_clue_data/CRF.txt')
    test_word_lists_clue, pred_tag_lists = data_build_clue(file_name="CRF.txt", make_vocab=False)
    print(report_scores(evaluate(test_tag_lists_clue, pred_tag_lists, check=True)))
