#!/usr/bin/env python 
# -*- coding:utf-8 -*-
'''
@File    :   main.py    
@Modify Time      @Author    @Version    @Desciption
------------      -------    --------    -----------
2022/4/12 0012 11:11   st      1.0         None
'''
import time

import xlwt

import process.data_pretreatment_nlpcc as dp
from process.data_process_medical_1118 import get_medical_tag_datas
from process.data_process_nlpcc import get_to_text
# from similar.bert_process import temp_word_dict
from utils.constent import *
from process import similar_process as sp


# from process import data_export_process as dep


def get_result_dict_threshold(results, pmi=False, percent=0.7):
    """
    获取结果字典中的最大值设置阈值
    :param results:
    :param percent:
    :return:
    """
    values = []
    for result in results:
        if not pmi:
            values.extend(result.values())
        else:
            values.extend([x[0] for x in result.values()])
    max_value = max(values)
    return max_value * percent


def word_classify(values, thresholds):
    """
    候选词分类
    :param values:(freq, pmi, conf, H_min)
    :param thresholds: (threshold_freq, threshold_pmi, threshold_conf, threshold_H)
    :return: 无意义：None、低频：low、高频：high
    """
    #
    values_freq, values_pmi, values_conf, values_H = values
    threshold_freq, threshold_pmi, threshold_conf, threshold_H = thresholds

    # 无意义 pmi, conf, H_min 均小于阈值
    if values_pmi <= threshold_pmi and values_conf <= threshold_conf and values_H <= threshold_H:
        return 'none'
    # 高频候选范围：freq > 阈值
    if values_freq > threshold_freq:
        # pmi < 阈值  && conf, H_min 有一个 < 阈值; 则为低频
        if values_pmi <= threshold_pmi and (values_conf <= threshold_conf or values_H <= threshold_H):
            # 低频
            return 'low'
        # 其他情况为高频
        # 高频
        return 'high'
    # 低频范围，freq < 阈值
    else:
        # 低频
        return 'low'


def create_ngram_result_dict(root, percent=1):
    """
    创建ngram结果集字典
    :return:
    """
    result_dict = dict()
    for num in range(NGRAM_MINI + 1, NGRAM_MAX + 1):
        result_pmi, result_freq, result_conf, result_left, result_right = root.search_ngram_by_num(num)
        threshold_pmi = get_result_dict_threshold([result_pmi], pmi=True, percent=percent)
        threshold_freq = get_result_dict_threshold([result_freq], percent=percent)
        threshold_conf = get_result_dict_threshold([result_conf], percent=percent)
        threshold_H = get_result_dict_threshold([result_left, result_right], percent=percent)
        temp_dict = dict()
        temp_dict['pmi'] = (result_pmi, threshold_pmi)
        temp_dict['freq'] = (result_freq, threshold_freq)
        temp_dict['conf'] = (result_conf, threshold_conf)
        temp_dict['H'] = (result_left, result_right, threshold_H)
        result_dict[num] = temp_dict
    return result_dict


def words_classify_sort(words_classify_dict):
    words_classify_dict['high'] = sort_high_words_KN(words_classify_dict['high'])
    words_classify_dict['low'] = sort_high_words_KN(words_classify_dict['low'])
    words_classify_dict['none'] = sort_high_words_KN(words_classify_dict['none'])
    return words_classify_dict


def sort_high_words_KN(data_list, wc='none'):
    """
        # 计算加权平均信息量K
    :param data_list:
    :return:
    """
    if not data_list:
        return data_list
    new_data_list = []
    voc_dict = temp_word_dict(data_list)
    for words, (freq, pmi, pmi_mini, conf, H_min) in data_list:
        K = freq * K_freq + pmi * K_pmi + conf * K_conf + H_min * K_H
        sim_bert_aveg, sim_hownet_aveg, sim_handian_aveg = 0, 0, 0
        if wc == 'low' or wc == 'none':
            sim_aveg, sim_bert_aveg, sim_hownet_aveg, sim_handian_aveg = sp.get_words_sim(words, voc_dict)
            K += sim_aveg
        new_data_list.append(
            (words, K, (freq, pmi, pmi_mini, conf, H_min), (sim_bert_aveg, sim_hownet_aveg, sim_handian_aveg)))
    new_data_list = sorted(new_data_list, key=lambda x: x[1], reverse=True)
    return new_data_list


def main(test=True):
    """
    测试入口
    :param is_freq: 词频
    :param is_pmi: 互信息
    :param is_pmi3: 改进互信息
    :param is_conf: 关联置信度
    :param is_H_mini: 左右最小邻接熵
    :param is_voc: 向量相似度
    :return:
    """
    # nlpcc数据读取
    if test:
        text_list, res_list = get_to_text()
    else:
        text_list, res_list = get_medical_tag_datas()
    # 原始数据分词
    datas = dp.matadata_cut_txt(text_list)
    # 统计词频
    # datas_frequency = dp.matadata_word_frequency(datas)
    # 模型初始化
    print('模型初始化')
    # root = dp.init_model(is_new=True, single=False, model_new=False)
    root = dp.init_model(datas, is_new=True, single=False, model_new=True)
    print('ngram_候选词，四参数计算统计')

    ngram_dict = create_ngram_result_dict(root)

    print('根据四个参数进行待选词组高低频分类、并进行高频词K值计算、低频次相似度计算--排序')

    threads_list = [0] * 6
    index_num = 0
    all_res = dict()
    f_log = open('log_medical.txt', 'w', encoding='utf-8')
    t1 = time.time()
    while True:
        index_num += 1
        for ind, i in enumerate(str(index_num)):
            threads_list[ind] = (int(i)) / 10
        is_freq, is_pmi, is_pmi3, is_conf, is_H_mini, is_voc = [x > 0 for x in threads_list]
        is_freq = False
        percent_freq, percent_pmi, percent_pmi3, percent_conf, percent_H_mini, percent_voc = threads_list
        controle_str = '\t'.join([str(x) for x in [is_freq, is_pmi, is_pmi3, is_conf, is_H_mini, is_voc]])
        percent_str = '\t'.join(
            [str(x) for x in [percent_freq, percent_pmi, percent_pmi3, percent_conf, percent_H_mini, percent_voc]])
        if not all_res.__contains__(controle_str):
            all_res[controle_str] = dict()
            t2 = time.time()
            print(controle_str, '-----------------', t2 - t1)
            t1 = t2
        if not all_res[controle_str].__contains__(percent_str):
            all_res[controle_str][percent_str] = {'standard': 0, 'right': 0, 'error': 0}

        for index, words_list in enumerate(datas):
            # t1 = time.time()
            words_list_compare = res_list[index]
            all_res[controle_str][percent_str]['standard'] += len(words_list_compare)

            for num in range(NGRAM_MINI + 1, NGRAM_MAX + 1):

                result_freq, threshold_freq = ngram_dict[num]['freq']
                result_pmi, threshold_pmi = ngram_dict[num]['pmi']
                result_conf, threshold_conf = ngram_dict[num]['conf']
                result_left, result_right, threshold_H = ngram_dict[num]['H']

                threshold_freq = threshold_freq * percent_freq
                threshold_pmi = threshold_pmi * percent_pmi
                threshold_pmi3 = threshold_pmi ** 3 * percent_pmi
                threshold_conf = threshold_conf * percent_conf
                threshold_H_mini = threshold_H * percent_H_mini
                threshold_voc = percent_voc
                # N阶所有数据的词组
                ngram_words = dp.create_ngram(words_list, num, num)
                for words in ngram_words:
                    words_char = CHAR_CONNECTOR.join(words)
                    # freq, pmi, pmi3, conf, H_min = 0, 0, 0, 0, 0
                    if is_freq:
                        freq = result_freq.get(words_char, 0)
                        if freq < threshold_freq:
                            continue
                    if is_pmi:
                        pmi, pmi_mini = result_pmi.get(words_char, (0, 0))
                        if pmi < threshold_pmi:
                            continue
                    if is_pmi3:
                        pmi, pmi_mini = result_pmi.get(words_char, (0, 0))
                        pmi3 = pmi ** 3
                        if pmi3 < threshold_pmi3:
                            continue
                    if is_conf:
                        conf = result_conf.get(words_char, 0)
                        if conf < threshold_conf:
                            continue
                    if is_H_mini:
                        H_min = min(result_left.get(words_char, 0), result_right.get(words_char, 0))
                        if H_min < threshold_H_mini:
                            continue
                    if is_voc:
                        sim_aveg, sim_hownet_aveg, sim_handian_aveg = sp.get_words_sim(words, None)
                        if sim_aveg < threshold_voc:
                            continue
                    # 判断词组是否在标准分词结果中
                    if words_char.replace(CHAR_CONNECTOR, '') in words_list_compare:
                        all_res[controle_str][percent_str]['right'] += 1
                    else:
                        all_res[controle_str][percent_str]['error'] += 1
        standard = all_res[controle_str][percent_str]['standard']
        rt = all_res[controle_str][percent_str]['right']
        er = all_res[controle_str][percent_str]['error']
        p = rt / (rt + er) if rt + er > 0 else 0
        f_log.write('\t'.join([str(x) for x in [controle_str, percent_str, standard, rt, er, p]]) + '\n')
        if index_num == 999999:
            break


def main_detail(test=False, cut_mod=0):
    """
    获取特定组合的
    :param test: 是否走测试数据
    :return:
    """
    label_list = []
    # nlpcc数据读取
    if test:
        text_list, res_list = get_to_text()
    else:
        text_list, res_list, label_list = get_medical_tag_datas()
    # 原始数据分词
    datas = dp.matadata_cut_txt(text_list, cut_mod=cut_mod)
    # return None
    # 统计词频
    # datas_frequency = dp.matadata_word_frequency(datas)
    # 模型初始化
    print('模型初始化')
    # root = dp.init_model(is_new=True, single=False, model_new=False)
    root = dp.init_model(datas, is_new=True, single=False, model_new=True)
    print('ngram_候选词，四参数计算统计')

    ngram_dict = create_ngram_result_dict(root)

    print('根据四个参数进行待选词组高低频分类、并进行高频词K值计算、低频次相似度计算--排序')

    is_freq = False
    is_pmi = False
    is_pmi3 = False
    is_conf = True
    is_H_mini = True
    is_voc = True

    percent_freq = 0
    percent_pmi = 0
    percent_pmi3 = 0
    percent_conf = 0.4
    percent_H_mini = 0.1
    percent_voc = 0.3

    all_res = []
    num_right = 0
    num_error = 0
    for index, words_list in enumerate(datas):
        # t1 = time.time()
        words_list_compare = res_list[index]
        text = text_list[index]
        label_dict = label_list[index]
        temp_res = []
        temp_words = []
        for num in range(NGRAM_MINI + 1, NGRAM_MAX + 1):
            result_freq, threshold_freq = ngram_dict[num]['freq']
            result_pmi, threshold_pmi = ngram_dict[num]['pmi']
            result_conf, threshold_conf = ngram_dict[num]['conf']
            result_left, result_right, threshold_H = ngram_dict[num]['H']

            threshold_freq = threshold_freq * percent_freq
            threshold_pmi = threshold_pmi * percent_pmi
            threshold_pmi3 = threshold_pmi ** 3 * percent_pmi
            threshold_conf = threshold_conf * percent_conf
            threshold_H_mini = threshold_H * percent_H_mini
            threshold_voc = percent_voc
            # N阶所有数据的词组
            ngram_words = dp.create_ngram(words_list, num, num)
            for words in ngram_words:
                words_char = CHAR_CONNECTOR.join(words)
                # freq, pmi, pmi3, conf, H_min = 0, 0, 0, 0, 0
                if is_freq:
                    freq = result_freq.get(words_char, 0)
                    if freq < threshold_freq:
                        continue
                if is_pmi:
                    pmi, pmi_mini = result_pmi.get(words_char, (0, 0))
                    if pmi < threshold_pmi:
                        continue
                if is_pmi3:
                    pmi, pmi_mini = result_pmi.get(words_char, (0, 0))
                    pmi3 = pmi ** 3
                    if pmi3 < threshold_pmi3:
                        continue
                if is_conf:
                    conf = result_conf.get(words_char, 0)
                    if conf < threshold_conf:
                        continue
                if is_H_mini:
                    H_min = min(result_left.get(words_char, 0), result_right.get(words_char, 0))
                    if H_min < threshold_H_mini:
                        continue
                if is_voc:
                    sim_aveg, sim_hownet_aveg, sim_handian_aveg = sp.get_words_sim(words, None)
                    if sim_aveg < threshold_voc:
                        continue
                # 判断词组是否在标准分词结果中
                new_word = words_char.replace(CHAR_CONNECTOR, '')
                if new_word in words_list_compare:
                    temp_res.append((num, new_word, label_dict[new_word]))
                    temp_words.append(new_word)
                    num_right += 1
                else:
                    num_error += 1
        all_res.append((text, temp_res))
    print('正确：', num_right)
    print('错误：', num_error)
    print('准确率：', num_right / (num_right + num_error))
    print('召回率：', num_right / 13466)
    workbook = xlwt.Workbook()
    sheet = workbook.add_sheet("Sheet1")
    row2 = sheet.row(0)
    row2.write(0, '编号')
    row2.write(1, 'N-gram')
    row2.write(2, '新词')
    row2.write(3, '新词所在位置')
    row2.write(4, '原文')
    num = 0
    for text, temp_res in all_res:
        for n_gram, word, labels in temp_res:
            num += 1
            row2 = sheet.row(num)
            row2.write(0, num)
            row2.write(1, n_gram)
            row2.write(2, word)
            row2.write(3, str(labels))
            row2.write(4, text)
    if cut_mod == 0:
        cut_mod_str = 'pkuseg'
    elif cut_mod == 1:
        cut_mod_str = 'pkuseg_dict'
    elif cut_mod == 2:
        cut_mod_str = 'jieba'
    else:
        cut_mod_str = 'None'
    result_file_path = os.path.join(data_path, 'result', 'medical_20230109', '详情结果_' + cut_mod_str + '.xls')
    workbook.save(result_file_path)


# res_dir_name = 'nlpcc_1221'
res_dir_name = 'medical_1230'


def result_statistics():
    result_file_path = os.path.join(data_path, 'result', res_dir_name, 'log.txt')
    res_dict = dict()
    for index, line in enumerate(open(result_file_path, 'r', encoding='utf-8')):
        ls = line.strip().split('\t')
        if index == 0:
            continue
        threshold_name = '\t'.join(ls[:6])
        threshold_value = '\t'.join(ls[6:6 + 6])
        standard, rt, er, _ = [float(x) for x in ls[12:]]

        p = round(100 * (rt / (rt + er)), 4) if rt + er > 0 else 0
        r = round(100 * (rt / standard), 4) if standard > 0 else 0
        f1 = round(2 * p * r / (p + r), 4) if p + r > 0 else 0
        if res_dict.__contains__(threshold_name):
            if res_dict[threshold_name][-1] >= f1:
                continue
        res_dict[threshold_name] = (threshold_value, standard, rt, er, p, r, f1)
    new_res_path = os.path.join(data_path, 'result', res_dir_name, 'new_res.txt')
    with open(new_res_path, 'w', encoding='utf-8') as f:
        for key in res_dict.keys():
            f.write(key + '\t' + '\t'.join([str(x) for x in res_dict[key]]) + '\n')


def result_statistics_top2():
    threshold_name_list = ['词频', 'PMI(互信息)', 'PMI3(改进互信息)', 'CC(关联置信度)', 'IE（左右信息熵）', 'vector']
    result_file_path = os.path.join(data_path, 'result', res_dir_name, 'new_res.txt')
    with open(os.path.join(data_path, 'result', res_dir_name, 'new_res_1.txt'), 'w', encoding='utf-8') as f1:
        for index, line in enumerate(open(result_file_path, 'r', encoding='utf-8')):
            ls = line.strip().split('\t')
            threshold_names = ls[:6]
            threshold_values = ls[6:6 + 6]
            res = [str(float(x)) for x in ls[12:]]
            name_str = []
            value_str = []
            for i in range(len(threshold_names)):
                if threshold_names[i] == 'True':
                    name_str.append(threshold_name_list[i])
                    value_str.append(threshold_values[i])
            f1.write('N-gram+' + '+'.join(name_str) + '\t' + '\t'.join(res) + '\t' + '+'.join(value_str) + '\n')


if __name__ == '__main__':
    # main(test=False)
    # result_statistics()
    # result_statistics_top2()
    for i in range(0, 4):
        main_detail(cut_mod=i)
