#!/usr/bin/env python 
# -*- coding:utf-8 -*-
'''
@File    :   main.py    
@Modify Time      @Author    @Version    @Desciption
------------      -------    --------    -----------
2022/4/12 0012 11:11   st      1.0         None
'''
import time

import process.data_pretreatment as dp
from similar.bert_process import temp_word_dict
from utils.constent import *
from process import similar_process as sp
from process import data_export_process as dep


def get_result_dict_threshold(results, pmi=False, percent=0.7):
    """
    获取结果字典中的最大值设置阈值
    :param results:
    :param percent:
    :return:
    """
    values = []
    for result in results:
        if not pmi:
            values.extend(result.values())
        else:
            values.extend([x[0] for x in result.values()])
    max_value = max(values)
    return max_value * percent


def word_classify(values, thresholds):
    """
    候选词分类
    :param values:(freq, pmi, conf, H_min)
    :param thresholds: (threshold_freq, threshold_pmi, threshold_conf, threshold_H)
    :return: 无意义：None、低频：low、高频：high
    """
    #
    values_freq, values_pmi, values_conf, values_H = values
    threshold_freq, threshold_pmi, threshold_conf, threshold_H = thresholds

    # 无意义 pmi, conf, H_min 均小于阈值
    if values_pmi <= threshold_pmi and values_conf <= threshold_conf and values_H <= threshold_H:
        return 'none'
    # 高频候选范围：freq > 阈值
    if values_freq > threshold_freq:
        # pmi < 阈值  && conf, H_min 有一个 < 阈值; 则为低频
        if values_pmi <= threshold_pmi and (values_conf <= threshold_conf or values_H <= threshold_H):
            # 低频
            return 'low'
        # 其他情况为高频
        # 高频
        return 'high'
    # 低频范围，freq < 阈值
    else:
        # 低频
        return 'low'


def create_ngram_result_dict(root):
    """
    创建ngram结果集字典
    :return:
    """
    result_dict = dict()
    for num in range(NGRAM_MINI + 1, NGRAM_MAX + 1):
        result_pmi, result_freq, result_conf, result_left, result_right = root.search_ngram_by_num(num)
        threshold_pmi = get_result_dict_threshold([result_pmi], pmi=True)
        threshold_freq = get_result_dict_threshold([result_freq])
        threshold_conf = get_result_dict_threshold([result_conf])
        threshold_H = get_result_dict_threshold([result_left, result_right])
        temp_dict = dict()
        temp_dict['pmi'] = (result_pmi, threshold_pmi)
        temp_dict['freq'] = (result_freq, threshold_freq)
        temp_dict['conf'] = (result_conf, threshold_conf)
        temp_dict['H'] = (result_left, result_right, threshold_H)
        result_dict[num] = temp_dict
    return result_dict


def words_classify_sort(words_classify_dict):
    words_classify_dict['high'] = sort_high_words_KN(words_classify_dict['high'])
    words_classify_dict['low'] = sort_high_words_KN(words_classify_dict['low'])
    words_classify_dict['none'] = sort_high_words_KN(words_classify_dict['none'])
    return words_classify_dict


def sort_high_words_KN(data_list, wc='none'):
    """
        # 计算加权平均信息量K
    :param data_list:
    :return:
    """
    if not data_list:
        return data_list
    new_data_list = []
    voc_dict = temp_word_dict(data_list)
    for words, (freq, pmi, pmi_mini, conf, H_min) in data_list:
        K = freq * K_freq + pmi * K_pmi + conf * K_conf + H_min * K_H
        sim_bert_aveg, sim_hownet_aveg, sim_handian_aveg = 0, 0, 0
        if wc == 'low' or wc == 'none':
            sim_aveg, sim_bert_aveg, sim_hownet_aveg, sim_handian_aveg = sp.get_words_sim(words, voc_dict)
            K += sim_aveg
        new_data_list.append(
            (words, K, (freq, pmi, pmi_mini, conf, H_min), (sim_bert_aveg, sim_hownet_aveg, sim_handian_aveg)))
    new_data_list = sorted(new_data_list, key=lambda x: x[1], reverse=True)
    return new_data_list


def main():
    # 原始数据分词
    dp.matadata_cut_txt()
    # 统计词频
    dp.matadata_word_frequency()
    # 模型初始化
    print('模型初始化')
    # root = dp.init_model(is_new=True, single=False, model_new=False)
    root = dp.init_model(is_new=True, single=False, model_new=True)
    print('ngram_候选词，四参数计算统计')

    ngram_dict = create_ngram_result_dict(root)
    # 测试
    test_reslut = root.search_ngram_N()

    all_datas = []
    print('根据四个参数进行待选词组高低频分类、并进行高频词K值计算、低频次相似度计算--排序')
    start_time = time.time()
    for index, line in enumerate(open(matadata_path_words, 'r', encoding='utf-8').readlines()):
        # # 测试
        if index > 20:
            break
        lines = line.strip().split('\t')
        id = lines[0]
        words_list = lines[-1].split(' ')
        # 高低频词集
        words_classify_dict = {'high': [], 'low': [], 'none': []}
        # t1 = time.time()
        for num in range(NGRAM_MINI + 1, NGRAM_MAX + 1):
            result_freq, threshold_freq = ngram_dict[num]['freq']
            result_pmi, threshold_pmi = ngram_dict[num]['pmi']
            result_conf, threshold_conf = ngram_dict[num]['conf']
            result_left, result_right, threshold_H = ngram_dict[num]['H']
            # N阶所有数据的词组
            ngram_words = dp.create_ngram(words_list, num, num)
            for words in ngram_words:
                # if words[0] == '作用' and words[1] == '及':
                #     print('----')
                words_char = CHAR_CONNECTOR.join(words)
                freq = result_freq.get(words_char, 0)
                pmi, pmi_mini = result_pmi.get(words_char, (0, 0))
                conf = result_conf.get(words_char, 0)
                H_min = min(result_left.get(words_char, 0), result_right.get(words_char, 0))
                res = word_classify((freq, pmi, conf, H_min),
                                    (threshold_freq, threshold_pmi, threshold_conf, threshold_H))
                words_classify_dict[res].append((words, (freq, pmi, pmi_mini, conf, H_min)))
        # t2 = time.time()
        # print('-------------t1-t2', t2 - t1)
        # 待选词组参数计算、排序
        words_classify_dict = words_classify_sort(words_classify_dict)
        # t3 = time.time()
        # print('-------------t2-t3', t3 - t2)
        all_datas.append({'id': id, 'words_list': words_list, 'words_classify': words_classify_dict})
        # 打印进度
        if (index + 1) % 50 == 0:
            end_time = time.time()
            print('数据处理中。。。。：', index + 1, '-耗时：', end_time - start_time)
            start_time = end_time

    print('结果持久化')
    dep.data_export_txt(all_datas)
    dep.data_export_excel(all_datas)
    dep.data_export_word_info(all_datas)
    print('------------done')


if __name__ == '__main__':
    main()
