#!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
@File    :   main.py
@Modify Time      @Author    @Version    @Desciption
------------      -------    --------    -----------
2022/4/12 0012 11:11   st      1.0         None
'''
import math
from utils.constent import *


class Node(object):
    """
    建立字典树的节点
    """

    def __init__(self, char):
        self.char = char
        # 记录词语是否结束
        self.word_ended = False
        # 用来计数
        self.count = 0
        # 用来存放节点
        self.next = dict()
        # 方便计算 左右熵
        # 判断是否是后缀（标识后缀用的，也就是记录 b->c->a 变换后的标记）
        self.postfix = False
        # 后缀数量
        self.count_postfix = 0


class TrieNode(object):
    """
    建立前缀树，并且包含统计词频，计算左右熵，计算互信息的方法
    """

    def __init__(self, node, data=None, PMI_limit=20):
        """
        初始函数，data为外部词频数据集
        :param node:
        :param data:
        """
        self.root = Node(node)
        self.PMI_limit = PMI_limit
        if not data:
            return
        node = self.root
        for key, values in data.items():
            new_node = Node(key)
            new_node.count = int(values)
            new_node.word_ended = True
            node.next[key] = new_node

    def add(self, word):
        """
        添加节点，对于左熵计算时，这里采用了一个trick，用a->b<-c 来表示 cba
        具体实现是利用 self.postfix 来进行判断
        :param word:
        :return:  相当于对 [a, b, c] a->b->c, [b, c, a] b->c->a
        """
        node = self.root
        # 正常加载
        for index, char in enumerate(word):
            found_in_child = False
            # 在节点中找字符
            if node.next.__contains__(char):
                node = node.next.get(char)
                found_in_child = True

            # 顺序在节点后面添加节点。 a->b->c
            if not found_in_child:
                new_node = Node(char)
                node.next[char] = new_node
                node = new_node

            # 判断是词组最后一个词，则词组词频+1
            if index == len(word) - 1:
                node.count += 1
                node.word_ended = True

        # ---建立后缀表示，计算左熵
        length = len(word)
        node = self.root
        # 二阶词组开始统计
        if length >= 2:
            # 把第一个词放到最后，意思前面的词在第一个词左边
            word = list(word)
            word_new = word[1:]
            word_new.append(word[0])

            for index, char in enumerate(word_new):
                found_in_child = False
                # 在节点中找字符（不是最后的后缀词）
                if node.next.__contains__(char):
                    node = node.next.get(char)
                    found_in_child = True

                # if index != length - 1:
                #     if node.next.__contains__(char):
                #         node = node.next.get(char)
                #         found_in_child = True
                # else:
                #     # 由于初始化的 postfix 都是 False， 所以在追加 word[2] 后缀肯定找不到
                #     if node.next.__contains__(char) and node.next.get(char).postfix:
                #         node = node.next.get(char)
                #         found_in_child = True

                # 顺序在节点后面添加节点。 b->c->a
                if not found_in_child:
                    new_node = Node(char)
                    node.next[char] = new_node
                    node = new_node

                # 判断是否是最后一个节点，这个词每出现一次就+1
                if index == length - 1:
                    node.count_postfix += 1
                    node.postfix = True
                    # node.word_ended = True

    def search_ngram_by_num(self, num=2):
        if num == 2:
            result_pmi, result_freq, _, _, result_conf, _ = self.search_ngram_2()
            _, _, result_left, result_right, _, _ = self.search_ngram_3()
        elif num == 3:
            result_pmi, result_freq, _, _, result_conf, _ = self.search_ngram_3()
            _, _, result_left, result_right, _, _ = self.search_ngram_4()
        elif num == 4:
            result_pmi, result_freq, _, _, result_conf, _ = self.search_ngram_4()
            _, _, result_left, result_right, _, _ = self.search_ngram_5()
        elif num == 5:
            result_pmi, result_freq, _, _, result_conf, _ = self.search_ngram_5()
            _, _, result_left, result_right, _, _ = self.search_ngram_6()
        elif num == 6:
            result_pmi, result_freq, _, _, result_conf, _ = self.search_ngram_6()
            _, _, result_left, result_right, _, _ = self.search_ngram_7()
        elif num == 7:
            result_pmi, result_freq, _, _, result_conf, _ = self.search_ngram_7()
            _, _, result_left, result_right, _, _ = self.search_ngram_8()
        elif num == 8:
            result_pmi, result_freq, _, _, result_conf, _ = self.search_ngram_8()
            _, _, result_left, result_right, _ = self.search_ngram_9()
        else:
            return None, None, None, None

        return result_pmi, result_freq, result_conf, result_left, result_right

    def search_ngram_1(self):
        """
        计算互信息: 寻找一阶共现，并返回词概率
        :return:
        """
        result = dict()
        node = self.root
        if not node.next:
            return False, 0

        # 计算 1 gram 总的出现次数
        total = 0
        for key_1 in node.next.keys():
            child = node.next.get(key_1)
            if child.word_ended:
                total += child.count

        # 计算 当前词 占整体的比例
        for key_1 in node.next.keys():
            child = node.next.get(key_1)
            if child.word_ended:
                result[child.char] = child.count / total
        return result

    def search_ngram_2(self):
        """
        计算二阶信息
        :return:
        """
        result_pmi = dict()
        result_freq = dict()
        result_p = dict()
        result_cd = dict()
        result_left = dict()
        result_right = dict()
        node = self.root
        if not node.next:
            return False, 0

        # 一阶词组信息
        # 1 grem 各词的占比，和 1 grem 的总次数
        ngram_dict_1 = self.search_ngram_1()

        # 统计所有二阶词频总数
        total = 0
        for key_1 in node.next.keys():
            # 二阶词组第1个词
            child_1 = node.next.get(key_1)
            for key_2 in child_1.next.keys():
                # 二阶词组第2个词
                child_2 = child_1.next.get(key_2)
                if child_2.word_ended:
                    total += child_2.count

        # 计算二阶词频概率及PMI
        for key_1 in node.next.keys():
            # 二阶词组第1个词
            child_1 = node.next.get(key_1)
            # 置信度分母
            if child_1.word_ended:
                count_cd = child_1.count
            else:
                count_cd = 0
            # 左熵
            total_left = 0
            p_left = 0.0
            # 右熵
            total_right = 0
            p_right = 0.0
            # 左右熵统计
            for key_2 in child_1.next.keys():
                # 二阶词组第2个词
                child_2 = child_1.next.get(key_2)

                if child_2.word_ended:
                    total_right += child_2.count
                if child_2.postfix:
                    total_left += child_2.count_postfix

            # 计算各个参数
            for key_2 in child_1.next.keys():
                # 二阶词组第2个词
                child_2 = child_1.next.get(key_2)
                char_cunt = child_2.count
                if child_2.word_ended:
                    # PMI = P(ab)/P(a)P(b)
                    PMI_MINI = (char_cunt / total) / (ngram_dict_1[child_1.char] *
                                                      ngram_dict_1[child_2.char])

                    PMI = PMI_MINI
                    result_pmi[child_1.char + CHAR_CONNECTOR + child_2.char] = (PMI, PMI_MINI)
                    result_freq[child_1.char + CHAR_CONNECTOR + child_2.char] = child_2.count
                    result_p[child_1.char + CHAR_CONNECTOR + child_2.char] = child_2.count / total
                    # 置信度 Confidence 表示在发生X的项集中，同时会发生Y的可能性，即X和Y同时发生的个数占仅仅X发生个数的比例
                    if count_cd:
                        CD = self.get_div(char_cunt, count_cd)
                    else:
                        CD = 0
                    result_cd[child_1.char + CHAR_CONNECTOR + child_2.char] = CD
                    # 计算右熵
                    p_right += self.get_div(char_cunt, total_right) * math.log(self.get_div(char_cunt, total_right), 2)
                # 计算左熵
                if child_2.postfix:
                    p_left += self.get_div(child_2.count_postfix, total_left) * math.log(
                        self.get_div(child_2.count_postfix, total_left), 2)

            result_left[child_1.char] = -p_left
            result_right[child_1.char] = -p_right

        return result_pmi, result_freq, result_left, result_right, result_cd, result_p

    def search_ngram_3(self):
        """
        计算三阶信息
        :return:
        """
        # 互信息凝固度
        result_pmi = dict()
        # 词频
        result_freq = dict()
        # 词概率
        result_p = dict()
        # 置信度
        result_cd = dict()
        # 信息熵
        result_left = dict()
        result_right = dict()
        node = self.root
        if not node.next:
            return False, 0
        ngram_dict_1 = self.search_ngram_1()
        _, _, _, _, _, ngram_dict_2 = self.search_ngram_2()
        # 统计所有三阶词频总数
        total = 0
        for key_1 in node.next.keys():
            # 三阶词组第1个词
            child_1 = node.next.get(key_1)
            for key_2 in child_1.next.keys():
                # 三阶词组第2个词
                child_2 = child_1.next.get(key_2)
                for key_3 in child_2.next.keys():
                    # 三阶词组第3个词
                    child_3 = child_2.next.get(key_3)
                    if child_3.word_ended:
                        total += child_3.count

        for key_1 in node.next.keys():
            # 三阶词组第1个词
            child_1 = node.next.get(key_1)
            for key_2 in child_1.next.keys():
                # 三阶词组第2个词
                child_2 = child_1.next.get(key_2)
                # 左熵
                total_left = 0
                p_left = 0.0
                # 右熵
                total_right = 0
                p_right = 0.0
                for key in child_2.next.keys():
                    # 三阶词组第3个词
                    child = child_2.next[key]
                    # 左右熵总数统计
                    if child.postfix:
                        total_left += child.count_postfix
                    if child.word_ended:
                        total_right += child.count
                # 置信度分母
                if child_2.word_ended:
                    count_cd = child_2.count
                else:
                    count_cd = 0

                for key_3 in child_2.next.keys():
                    # 三阶词组第3个词
                    child_3 = child_2.next.get(key_3)
                    if child_3.word_ended:
                        char_count = child_3.count

                        # mini{P(abc)/P(a)P(bc), P(abc)/P(ab)P(c)}
                        PMI_MINI = self.get_pmi(char_count / total, [child_1.char, child_2.char, child_3.char],
                                                [ngram_dict_1, ngram_dict_2])
                        # 点间互信息 PMI = P(abc)/P(a)*P(b)*P(c)
                        PMI = (child_3.count / total) / (
                                    ngram_dict_1[child_1.char] * ngram_dict_1[child_2.char] * ngram_dict_1[
                                child_3.char])
                        result_pmi[child_1.char + CHAR_CONNECTOR + child_2.char + CHAR_CONNECTOR + child_3.char] = (
                        PMI, PMI_MINI)
                        result_freq[
                            child_1.char + CHAR_CONNECTOR + child_2.char + CHAR_CONNECTOR + child_3.char] = char_count
                        result_p[
                            child_1.char + CHAR_CONNECTOR + child_2.char + CHAR_CONNECTOR + child_3.char] = char_count / total

                        # 置信度 Confidence 表示在发生X的项集中，同时会发生Y的可能性，即X和Y同时发生的个数占仅仅X发生个数的比例
                        if count_cd:
                            CD = char_count / count_cd
                        else:
                            CD = 0
                        result_cd[child_1.char + CHAR_CONNECTOR + child_2.char + CHAR_CONNECTOR + child_3.char] = CD

                        # 计算左右熵
                        p_right += self.get_div(char_count, total_right) * math.log(
                            self.get_div(char_count, total_right), 2)
                    if child_3.postfix:
                        p_left += self.get_div(child_3.count_postfix, total_left) * math.log(
                            self.get_div(child_3.count_postfix, total_left), 2)
                result_left[child_1.char + CHAR_CONNECTOR + child_2.char] = -p_left
                result_right[child_1.char + CHAR_CONNECTOR + child_2.char] = -p_right
        return result_pmi, result_freq, result_left, result_right, result_cd, result_p

    def search_ngram_4(self):
        """
        计算四阶信息
        :return:
        """
        result_pmi = dict()
        result_freq = dict()
        result_p = dict()
        result_cd = dict()
        result_left = dict()
        result_right = dict()
        node = self.root
        if not node.next:
            return False, 0
        ngram_dict_1 = self.search_ngram_1()
        _, _, _, _, _, ngram_dict_2 = self.search_ngram_2()
        _, _, _, _, _, ngram_dict_3 = self.search_ngram_3()
        # 统计所有四阶词频总数
        total = 0
        for key_1 in node.next.keys():
            # 四阶词组第1个词
            child_1 = node.next.get(key_1)
            for key_2 in child_1.next.keys():
                # 四阶词组第2个词
                child_2 = child_1.next.get(key_2)
                for key_3 in child_2.next.keys():
                    # 四阶词组第3个词
                    child_3 = child_2.next.get(key_3)
                    for key_4 in child_3.next.keys():
                        # 四阶词组第4个词
                        child_4 = child_3.next.get(key_4)
                        if child_4.word_ended:
                            total += child_4.count

        for key_1 in node.next.keys():
            # 四阶词组第1个词
            child_1 = node.next.get(key_1)
            for key_2 in child_1.next.keys():
                # 四阶词组第2个词
                child_2 = child_1.next.get(key_2)
                for key_3 in child_2.next.keys():
                    # 四阶词组第3个词
                    child_3 = child_2.next.get(key_3)
                    # ---------左右熵总数统计
                    # 左熵
                    total_left = 0
                    p_left = 0.0
                    # 右熵
                    total_right = 0
                    p_right = 0.0
                    # 置信度分母
                    if child_3.word_ended:
                        count_cd = child_3.count
                    else:
                        count_cd = 0
                    for key in child_3.next.keys():
                        child = child_3.next[key]
                        if child.postfix:
                            total_left += child.count_postfix
                        if child.word_ended:
                            total_right += child.count
                    # ---------------------------
                    for key_4 in child_3.next.keys():
                        # 四阶词组第4个词
                        child_4 = child_3.next.get(key_4)
                        if child_4.word_ended:
                            char_count = child_4.count
                            # mini{P(abcd)/P(a)P(bcd), P(abc)/P(ab)P(cd), P(abc)/P(abc)P(d)}
                            PMI_MINI = self.get_pmi(char_count / total,
                                                    [child_1.char, child_2.char, child_3.char, child_4.char],
                                                    [ngram_dict_1, ngram_dict_2, ngram_dict_3])
                            PMI = (child_4.count / total) / (
                                    ngram_dict_1[child_1.char] * ngram_dict_1[child_2.char] * ngram_dict_1[
                                child_3.char] * ngram_dict_1[child_4.char])
                            result_pmi[
                                child_1.char + CHAR_CONNECTOR + child_2.char + CHAR_CONNECTOR + child_3.char + CHAR_CONNECTOR + child_4.char] = (
                            PMI, PMI_MINI)
                            result_freq[
                                child_1.char + CHAR_CONNECTOR + child_2.char + CHAR_CONNECTOR + child_3.char + CHAR_CONNECTOR + child_4.char] = child_4.count
                            result_p[
                                child_1.char + CHAR_CONNECTOR + child_2.char + CHAR_CONNECTOR + child_3.char + CHAR_CONNECTOR + child_4.char] = child_4.count / total
                            # 置信度 Confidence 表示在发生X的项集中，同时会发生Y的可能性，即X和Y同时发生的个数占仅仅X发生个数的比例
                            if count_cd:
                                CD = char_count / count_cd
                            else:
                                CD = 0
                            result_cd[
                                child_1.char + CHAR_CONNECTOR + child_2.char + CHAR_CONNECTOR + child_3.char + CHAR_CONNECTOR + child_4.char] = CD
                            # 计算左右熵
                            p_right += self.get_div(char_count, total_right) * math.log(
                                self.get_div(char_count, total_right), 2)
                        if child_4.postfix:
                            p_left += self.get_div(child_4.count_postfix, total_left) * math.log(
                                self.get_div(child_4.count_postfix, total_left), 2)
                    result_left[child_1.char + CHAR_CONNECTOR + child_2.char + CHAR_CONNECTOR + child_3.char] = -p_left
                    result_right[
                        child_1.char + CHAR_CONNECTOR + child_2.char + CHAR_CONNECTOR + child_3.char] = -p_right
        return result_pmi, result_freq, result_left, result_right, result_cd, result_p

    def search_ngram_5(self):
        """
        计算五阶信息
        :return:
        """
        result_pmi = dict()
        result_freq = dict()
        result_p = dict()
        result_cd = dict()
        result_left = dict()
        result_right = dict()
        node = self.root
        if not node.next:
            return False, 0
        ngram_dict_1 = self.search_ngram_1()
        _, _, _, _, _, ngram_dict_2 = self.search_ngram_2()
        _, _, _, _, _, ngram_dict_3 = self.search_ngram_3()
        _, _, _, _, _, ngram_dict_4 = self.search_ngram_4()
        # 统计所有四阶词频总数
        total = 0
        for key_1 in node.next.keys():
            # 五阶词组第1个词
            child_1 = node.next.get(key_1)
            for key_2 in child_1.next.keys():
                # 五阶词组第2个词
                child_2 = child_1.next.get(key_2)
                for key_3 in child_2.next.keys():
                    # 五阶词组第3个词
                    child_3 = child_2.next.get(key_3)
                    for key_4 in child_3.next.keys():
                        # 五阶词组第4个词
                        child_4 = child_3.next.get(key_4)
                        for key_5 in child_4.next.keys():
                            # 五阶词组第5个词
                            child_5 = child_4.next.get(key_5)
                            if child_5.word_ended:
                                total += child_5.count

        for key_1 in node.next.keys():
            # 三阶词组第1个词
            child_1 = node.next.get(key_1)
            for key_2 in child_1.next.keys():
                # 三阶词组第2个词
                child_2 = child_1.next.get(key_2)
                for key_3 in child_2.next.keys():
                    # 三阶词组第2个词
                    child_3 = child_2.next.get(key_3)
                    for key_4 in child_3.next.keys():
                        # 四阶词组第3个词
                        child_4 = child_3.next.get(key_4)
                        # ---------左右熵总数统计
                        # 左熵
                        total_left = 0
                        p_left = 0.0
                        # 右熵
                        total_right = 0
                        p_right = 0.0
                        # 置信度分母
                        if child_4.word_ended:
                            count_cd = child_4.count
                        else:
                            count_cd = 0
                        for key in child_4.next.keys():
                            child = child_4.next[key]
                            if child.postfix:
                                total_left += child.count_postfix
                            if child.word_ended:
                                total_right += child.count
                        # ---------------------------
                        for key_5 in child_4.next.keys():
                            # 五阶词组第5个词
                            child_5 = child_4.next.get(key_5)
                            if child_5.word_ended:
                                char_count = child_5.count
                                # mini{P(abcde)/P(a)P(bcde), P(abcde)/P(ab)P(cde), P(abcde)/P(abc)P(de), P(abcde)/P(abcde)P(e)}
                                PMI_MINI = self.get_pmi(char_count / total,
                                                        [child_1.char, child_2.char, child_3.char,
                                                         child_4.char, child_5.char],
                                                        [ngram_dict_1, ngram_dict_2, ngram_dict_3,
                                                         ngram_dict_4])
                                PMI = (child_5.count / total) / (
                                            ngram_dict_1[child_1.char] * ngram_dict_1[child_2.char] * ngram_dict_1[
                                        child_3.char] * ngram_dict_1[child_4.char] * ngram_dict_1[child_5.char])
                                result_pmi[
                                    child_1.char + CHAR_CONNECTOR + child_2.char + CHAR_CONNECTOR + child_3.char + CHAR_CONNECTOR + child_4.char + CHAR_CONNECTOR + child_5.char] = (
                                PMI, PMI_MINI)
                                result_freq[
                                    child_1.char + CHAR_CONNECTOR + child_2.char + CHAR_CONNECTOR + child_3.char + CHAR_CONNECTOR + child_4.char + CHAR_CONNECTOR + child_5.char] = child_5.count
                                result_p[
                                    child_1.char + CHAR_CONNECTOR + child_2.char + CHAR_CONNECTOR + child_3.char + CHAR_CONNECTOR + child_4.char + CHAR_CONNECTOR + child_5.char] = child_5.count / total
                                # 置信度 Confidence 表示在发生X的项集中，同时会发生Y的可能性，即X和Y同时发生的个数占仅仅X发生个数的比例
                                if count_cd:
                                    CD = char_count / count_cd
                                else:
                                    CD = 0
                                result_cd[
                                    child_1.char + CHAR_CONNECTOR + child_2.char + CHAR_CONNECTOR + child_3.char + CHAR_CONNECTOR + child_4.char + CHAR_CONNECTOR + child_5.char] = CD
                                # 计算左右熵
                                p_right += self.get_div(char_count, total_right) * math.log(
                                    self.get_div(char_count, total_right), 2)
                            if child_5.postfix:
                                p_left += self.get_div(child_5.count_postfix, total_left) * math.log(
                                    self.get_div(child_5.count_postfix, total_left), 2)
                        result_left[
                            child_1.char + CHAR_CONNECTOR + child_2.char + CHAR_CONNECTOR + child_3.char + CHAR_CONNECTOR + child_4.char] = -p_left
                        result_right[
                            child_1.char + CHAR_CONNECTOR + child_2.char + CHAR_CONNECTOR + child_3.char + CHAR_CONNECTOR + child_4.char] = -p_right
        return result_pmi, result_freq, result_left, result_right, result_cd, result_p

    def search_ngram_6(self):
        """
        计算六阶信息
        :return:
        """
        result_pmi = dict()
        result_freq = dict()
        result_p = dict()
        result_cd = dict()
        result_left = dict()
        result_right = dict()
        node = self.root
        if not node.next:
            return False, 0
        ngram_dict_1 = self.search_ngram_1()
        _, _, _, _, _, ngram_dict_2 = self.search_ngram_2()
        _, _, _, _, _, ngram_dict_3 = self.search_ngram_3()
        _, _, _, _, _, ngram_dict_4 = self.search_ngram_4()
        _, _, _, _, _, ngram_dict_5 = self.search_ngram_5()
        total = 0
        for key_1 in node.next.keys():
            # 六阶词组第1个词
            child_1 = node.next.get(key_1)
            for key_2 in child_1.next.keys():
                # 六阶词组第2个词
                child_2 = child_1.next.get(key_2)
                for key_3 in child_2.next.keys():
                    # 六阶词组第3个词
                    child_3 = child_2.next.get(key_3)
                    for key_4 in child_3.next.keys():
                        # 六阶词组第4个词
                        child_4 = child_3.next.get(key_4)
                        for key_5 in child_4.next.keys():
                            # 六阶词组第5个词
                            child_5 = child_4.next.get(key_5)
                            for key_6 in child_5.next.keys():
                                # 六阶词组第6个词
                                child_6 = child_5.next.get(key_6)
                                if child_6.word_ended:
                                    total += child_6.count

        for key_1 in node.next.keys():
            # 三阶词组第1个词
            child_1 = node.next.get(key_1)
            for key_2 in child_1.next.keys():
                # 三阶词组第2个词
                child_2 = child_1.next.get(key_2)
                for key_3 in child_2.next.keys():
                    # 三阶词组第2个词
                    child_3 = child_2.next.get(key_3)
                    for key_4 in child_3.next.keys():
                        # 四阶词组第3个词
                        child_4 = child_3.next.get(key_4)
                        for key_5 in child_4.next.keys():
                            # 五阶词组第5个词
                            child_5 = child_4.next.get(key_5)
                            # ---------左右熵总数统计
                            # 左熵
                            total_left = 0
                            p_left = 0.0
                            # 右熵
                            total_right = 0
                            p_right = 0.0
                            # 置信度分母
                            if child_5.word_ended:
                                count_cd = child_5.count
                            else:
                                count_cd = 0
                            for key in child_5.next.keys():
                                child = child_5.next[key]
                                if child.postfix:
                                    total_left += child.count_postfix
                                if child.word_ended:
                                    total_right += child.count
                            # ---------------------------
                            for key_6 in child_5.next.keys():
                                # 六阶词组第6个词
                                child_6 = child_5.next.get(key_6)
                                if child_6.word_ended:
                                    char_count = child_6.count
                                    # mini{P(abcde)/P(a)P(bcde), P(abcde)/P(ab)P(cde), P(abcde)/P(abc)P(de), P(abcde)/P(abcde)P(e)}
                                    PMI_MINI = self.get_pmi(char_count / total,
                                                            [child_1.char, child_2.char, child_3.char,
                                                             child_4.char, child_5.char, child_6.char],
                                                            [ngram_dict_1, ngram_dict_2, ngram_dict_3,
                                                             ngram_dict_4, ngram_dict_5])
                                    PMI = (child_6.count / total) / (
                                            ngram_dict_1[child_1.char] * ngram_dict_1[child_2.char] * ngram_dict_1[
                                        child_3.char] * ngram_dict_1[child_4.char] * ngram_dict_1[child_5.char] *
                                            ngram_dict_1[child_6.char])
                                    result_pmi[
                                        child_1.char + CHAR_CONNECTOR + child_2.char + CHAR_CONNECTOR + child_3.char + CHAR_CONNECTOR + child_4.char + CHAR_CONNECTOR + child_5.char + CHAR_CONNECTOR + child_6.char] = (
                                    PMI, PMI_MINI)
                                    result_freq[
                                        child_1.char + CHAR_CONNECTOR + child_2.char + CHAR_CONNECTOR + child_3.char + CHAR_CONNECTOR + child_4.char + CHAR_CONNECTOR + child_5.char + CHAR_CONNECTOR + child_6.char] = char_count
                                    result_p[
                                        child_1.char + CHAR_CONNECTOR + child_2.char + CHAR_CONNECTOR + child_3.char + CHAR_CONNECTOR + child_4.char + CHAR_CONNECTOR + child_5.char + CHAR_CONNECTOR + child_6.char] = char_count / total
                                    # 置信度 Confidence 表示在发生X的项集中，同时会发生Y的可能性，即X和Y同时发生的个数占仅仅X发生个数的比例
                                    if count_cd:
                                        CD = char_count / count_cd
                                    else:
                                        CD = 0
                                    result_cd[
                                        child_1.char + CHAR_CONNECTOR + child_2.char + CHAR_CONNECTOR + child_3.char + CHAR_CONNECTOR + child_4.char + CHAR_CONNECTOR + child_5.char + CHAR_CONNECTOR + child_6.char] = CD
                                    # 计算左右熵
                                    p_right += self.get_div(char_count, total_right) * math.log(
                                        self.get_div(char_count, total_right), 2)
                                if child_6.postfix:
                                    p_left += self.get_div(child_6.count_postfix, total_left) * math.log(
                                        self.get_div(child_6.count_postfix, total_left), 2)
                            result_left[
                                child_1.char + CHAR_CONNECTOR + child_2.char + CHAR_CONNECTOR + child_3.char + CHAR_CONNECTOR + child_4.char + CHAR_CONNECTOR + child_5.char] = -p_left
                            result_right[
                                child_1.char + CHAR_CONNECTOR + child_2.char + CHAR_CONNECTOR + child_3.char + CHAR_CONNECTOR + child_4.char + CHAR_CONNECTOR + child_5.char] = -p_right
        return result_pmi, result_freq, result_left, result_right, result_cd, result_p

    def search_ngram_7(self):
        """
        计算六阶信息
        :return:
        """
        result_pmi = dict()
        result_freq = dict()
        result_p = dict()
        result_cd = dict()
        result_left = dict()
        result_right = dict()
        node = self.root
        if not node.next:
            return False, 0
        ngram_dict_1 = self.search_ngram_1()
        _, _, _, _, _, ngram_dict_2 = self.search_ngram_2()
        _, _, _, _, _, ngram_dict_3 = self.search_ngram_3()
        _, _, _, _, _, ngram_dict_4 = self.search_ngram_4()
        _, _, _, _, _, ngram_dict_5 = self.search_ngram_5()
        _, _, _, _, _, ngram_dict_6 = self.search_ngram_6()
        # 统计所有六阶词频总数
        total = 0
        for key_1 in node.next.keys():
            # 六阶词组第1个词
            child_1 = node.next.get(key_1)
            for key_2 in child_1.next.keys():
                # 六阶词组第2个词
                child_2 = child_1.next.get(key_2)
                for key_3 in child_2.next.keys():
                    # 六阶词组第3个词
                    child_3 = child_2.next.get(key_3)
                    for key_4 in child_3.next.keys():
                        # 六阶词组第4个词
                        child_4 = child_3.next.get(key_4)
                        for key_5 in child_4.next.keys():
                            # 六阶词组第5个词
                            child_5 = child_4.next.get(key_5)
                            for key_6 in child_5.next.keys():
                                # 六阶词组第6个词
                                child_6 = child_5.next.get(key_6)
                                for key_7 in child_6.next.keys():
                                    # 六阶词组第6个词
                                    child_7 = child_6.next.get(key_7)
                                    if child_7.word_ended:
                                        total += child_7.count
        for key_1 in node.next.keys():
            # 三阶词组第1个词
            child_1 = node.next.get(key_1)
            for key_2 in child_1.next.keys():
                # 三阶词组第2个词
                child_2 = child_1.next.get(key_2)
                for key_3 in child_2.next.keys():
                    # 三阶词组第2个词
                    child_3 = child_2.next.get(key_3)
                    for key_4 in child_3.next.keys():
                        # 四阶词组第3个词
                        child_4 = child_3.next.get(key_4)
                        for key_5 in child_4.next.keys():
                            # 五阶词组第5个词
                            child_5 = child_4.next.get(key_5)
                            for key_6 in child_5.next.keys():
                                # 六阶词组第6个词
                                child_6 = child_5.next.get(key_6)
                                # ---------左右熵总数统计
                                # 左熵
                                total_left = 0
                                p_left = 0.0
                                # 右熵
                                total_right = 0
                                p_right = 0.0
                                # 置信度分母
                                if child_6.word_ended:
                                    count_cd = child_6.count
                                else:
                                    count_cd = 0
                                for key in child_6.next.keys():
                                    child = child_6.next[key]
                                    if child.postfix:
                                        total_left += child.count_postfix
                                    if child.word_ended:
                                        total_right += child.count
                                # ---------------------------
                                for key_7 in child_6.next.keys():
                                    # 六阶词组第6个词
                                    child_7 = child_6.next.get(key_7)
                                    if child_7.word_ended:
                                        char_count = child_7.count
                                        # mini{P(abcde)/P(a)P(bcde), P(abcde)/P(ab)P(cde), P(abcde)/P(abc)P(de), P(abcde)/P(abcde)P(e)}
                                        PMI_MINI = self.get_pmi(char_count / total,
                                                                [child_1.char, child_2.char, child_3.char,
                                                                 child_4.char, child_5.char, child_6.char,
                                                                 child_7.char],
                                                                [ngram_dict_1, ngram_dict_2, ngram_dict_3,
                                                                 ngram_dict_4, ngram_dict_5, ngram_dict_6])

                                        PMI = (char_count / total) / (
                                                ngram_dict_1[child_1.char] * ngram_dict_1[child_2.char] *
                                                ngram_dict_1[child_3.char] * ngram_dict_1[child_4.char] *
                                                ngram_dict_1[child_5.char] * ngram_dict_1[child_6.char] *
                                                ngram_dict_1[child_7.char])
                                        result_pmi[
                                            child_1.char + CHAR_CONNECTOR + child_2.char + CHAR_CONNECTOR + child_3.char + CHAR_CONNECTOR + child_4.char + CHAR_CONNECTOR + child_5.char + CHAR_CONNECTOR + child_6.char + CHAR_CONNECTOR + child_7.char] = (
                                        PMI, PMI_MINI)
                                        result_freq[
                                            child_1.char + CHAR_CONNECTOR + child_2.char + CHAR_CONNECTOR + child_3.char + CHAR_CONNECTOR + child_4.char + CHAR_CONNECTOR + child_5.char + CHAR_CONNECTOR + child_6.char + CHAR_CONNECTOR + child_7.char] = char_count
                                        result_p[
                                            child_1.char + CHAR_CONNECTOR + child_2.char + CHAR_CONNECTOR + child_3.char + CHAR_CONNECTOR + child_4.char + CHAR_CONNECTOR + child_5.char + CHAR_CONNECTOR + child_6.char + CHAR_CONNECTOR + child_7.char] = char_count / total
                                        # 置信度 Confidence 表示在发生X的项集中，同时会发生Y的可能性，即X和Y同时发生的个数占仅仅X发生个数的比例
                                        if count_cd:
                                            CD = char_count / count_cd
                                        else:
                                            CD = 0
                                        result_cd[
                                            child_1.char + CHAR_CONNECTOR + child_2.char + CHAR_CONNECTOR + child_3.char + CHAR_CONNECTOR + child_4.char + CHAR_CONNECTOR + child_5.char + CHAR_CONNECTOR + child_6.char + CHAR_CONNECTOR + child_7.char] = CD
                                        # 计算左右熵
                                        p_right += self.get_div(char_count, total_right) * math.log(
                                            self.get_div(char_count, total_right), 2)
                                    if child_7.postfix:
                                        p_left += self.get_div(child_7.count_postfix, total_left) * math.log(
                                            self.get_div(child_7.count_postfix, total_left), 2)
                                result_left[
                                    child_1.char + CHAR_CONNECTOR + child_2.char + CHAR_CONNECTOR + child_3.char + CHAR_CONNECTOR + child_4.char + CHAR_CONNECTOR + child_5.char + CHAR_CONNECTOR + child_6.char] = -p_left
                                result_right[
                                    child_1.char + CHAR_CONNECTOR + child_2.char + CHAR_CONNECTOR + child_3.char + CHAR_CONNECTOR + child_4.char + CHAR_CONNECTOR + child_5.char + CHAR_CONNECTOR + child_6.char] = -p_right
        return result_pmi, result_freq, result_left, result_right, result_cd, result_p

    def search_ngram_8(self):
        """
        计算六阶信息
        :return:
        """
        result_pmi = dict()
        result_freq = dict()
        result_p = dict()
        result_cd = dict()
        result_left = dict()
        result_right = dict()
        node = self.root
        if not node.next:
            return False, 0
        ngram_dict_1 = self.search_ngram_1()
        _, _, _, _, _, ngram_dict_2 = self.search_ngram_2()
        _, _, _, _, _, ngram_dict_3 = self.search_ngram_3()
        _, _, _, _, _, ngram_dict_4 = self.search_ngram_4()
        _, _, _, _, _, ngram_dict_5 = self.search_ngram_5()
        _, _, _, _, _, ngram_dict_6 = self.search_ngram_6()
        _, _, _, _, _, ngram_dict_7 = self.search_ngram_7()
        # 统计所有六阶词频总数
        total = 0
        for key_1 in node.next.keys():
            # 六阶词组第1个词
            child_1 = node.next.get(key_1)
            for key_2 in child_1.next.keys():
                # 六阶词组第2个词
                child_2 = child_1.next.get(key_2)
                for key_3 in child_2.next.keys():
                    # 六阶词组第3个词
                    child_3 = child_2.next.get(key_3)
                    for key_4 in child_3.next.keys():
                        # 六阶词组第4个词
                        child_4 = child_3.next.get(key_4)
                        for key_5 in child_4.next.keys():
                            # 六阶词组第5个词
                            child_5 = child_4.next.get(key_5)
                            for key_6 in child_5.next.keys():
                                # 六阶词组第6个词
                                child_6 = child_5.next.get(key_6)
                                for key_7 in child_6.next.keys():
                                    # 六阶词组第6个词
                                    child_7 = child_6.next.get(key_7)
                                    for key_8 in child_7.next.keys():
                                        # 六阶词组第6个词
                                        child_8 = child_7.next.get(key_8)
                                        if not child_8:
                                            continue
                                        if child_8.word_ended:
                                            total += child_8.count
        for key_1 in node.next.keys():
            # 三阶词组第1个词
            child_1 = node.next.get(key_1)
            for key_2 in child_1.next.keys():
                # 三阶词组第2个词
                child_2 = child_1.next.get(key_2)
                for key_3 in child_2.next.keys():
                    # 三阶词组第2个词
                    child_3 = child_2.next.get(key_3)
                    for key_4 in child_3.next.keys():
                        # 四阶词组第3个词
                        child_4 = child_3.next.get(key_4)
                        for key_5 in child_4.next.keys():
                            # 五阶词组第5个词
                            child_5 = child_4.next.get(key_5)
                            for key_6 in child_5.next.keys():
                                # 六阶词组第6个词
                                child_6 = child_5.next.get(key_6)
                                for key_7 in child_6.next.keys():
                                    # 六阶词组第6个词
                                    child_7 = child_6.next.get(key_7)
                                    # ---------左右熵总数统计
                                    # 左熵
                                    total_left = 0
                                    p_left = 0.0
                                    # 右熵
                                    total_right = 0
                                    p_right = 0.0
                                    # 置信度分母
                                    if child_7.word_ended:
                                        count_cd = child_7.count
                                    else:
                                        count_cd = 0
                                    for key in child_7.next.keys():
                                        child = child_7.next[key]
                                        if child.postfix:
                                            total_left += child.count_postfix
                                        if child.word_ended:
                                            total_right += child.count
                                    # ---------------------------
                                    for key_8 in child_6.next.keys():
                                        # 六阶词组第6个词
                                        child_8 = child_7.next.get(key_8)
                                        if not child_8:
                                            continue
                                        if child_8.word_ended:
                                            char_count = child_8.count
                                            # mini{P(abcde)/P(a)P(bcde), P(abcde)/P(ab)P(cde), P(abcde)/P(abc)P(de), P(abcde)/P(abcde)P(e)}
                                            PMI_MINI = self.get_pmi(char_count / total,
                                                                    [child_1.char, child_2.char, child_3.char,
                                                                     child_4.char, child_5.char, child_6.char,
                                                                     child_7.char, child_8.char],
                                                                    [ngram_dict_1, ngram_dict_2, ngram_dict_3,
                                                                     ngram_dict_4, ngram_dict_5, ngram_dict_6,
                                                                     ngram_dict_7])

                                            PMI = (char_count / total) / (
                                                    ngram_dict_1[child_1.char] * ngram_dict_1[child_2.char] *
                                                    ngram_dict_1[child_3.char] * ngram_dict_1[child_4.char] *
                                                    ngram_dict_1[child_5.char] * ngram_dict_1[child_6.char] *
                                                    ngram_dict_1[child_7.char] * ngram_dict_1[child_8.char])
                                            result_pmi[
                                                child_1.char + CHAR_CONNECTOR + child_2.char + CHAR_CONNECTOR + child_3.char + CHAR_CONNECTOR + child_4.char + CHAR_CONNECTOR + child_5.char + CHAR_CONNECTOR + child_6.char + CHAR_CONNECTOR + child_7.char + CHAR_CONNECTOR + child_8.char] = (
                                            PMI, PMI_MINI)
                                            result_freq[
                                                child_1.char + CHAR_CONNECTOR + child_2.char + CHAR_CONNECTOR + child_3.char + CHAR_CONNECTOR + child_4.char + CHAR_CONNECTOR + child_5.char + CHAR_CONNECTOR + child_6.char + CHAR_CONNECTOR + child_7.char + CHAR_CONNECTOR + child_8.char] = char_count
                                            result_p[
                                                child_1.char + CHAR_CONNECTOR + child_2.char + CHAR_CONNECTOR + child_3.char + CHAR_CONNECTOR + child_4.char + CHAR_CONNECTOR + child_5.char + CHAR_CONNECTOR + child_6.char + CHAR_CONNECTOR + child_7.char + CHAR_CONNECTOR + child_8.char] = char_count / total

                                            # 置信度 Confidence 表示在发生X的项集中，同时会发生Y的可能性，即X和Y同时发生的个数占仅仅X发生个数的比例
                                            if count_cd:
                                                CD = char_count / count_cd
                                            else:
                                                CD = 0
                                            result_cd[
                                                child_1.char + CHAR_CONNECTOR + child_2.char + CHAR_CONNECTOR + child_3.char + CHAR_CONNECTOR + child_4.char + CHAR_CONNECTOR + child_5.char + CHAR_CONNECTOR + child_6.char + CHAR_CONNECTOR + child_7.char + CHAR_CONNECTOR + child_8.char] = CD
                                            # 计算左右熵
                                            p_right += self.get_div(char_count, total_right) * math.log(
                                                self.get_div(char_count, total_right), 2)
                                        if child_8.postfix:
                                            p_left += self.get_div(child_8.count_postfix, total_left) * math.log(
                                                self.get_div(child_8.count_postfix, total_left), 2)
                                    result_left[
                                        child_1.char + CHAR_CONNECTOR + child_2.char + CHAR_CONNECTOR + child_3.char + CHAR_CONNECTOR + child_4.char + CHAR_CONNECTOR + child_5.char + CHAR_CONNECTOR + child_6.char + CHAR_CONNECTOR + child_7.char] = -p_left
                                    result_right[
                                        child_1.char + CHAR_CONNECTOR + child_2.char + CHAR_CONNECTOR + child_3.char + CHAR_CONNECTOR + child_4.char + CHAR_CONNECTOR + child_5.char + CHAR_CONNECTOR + child_6.char + CHAR_CONNECTOR + child_7.char] = -p_right
        return result_pmi, result_freq, result_left, result_right, result_cd, result_p

    def search_ngram_9(self):
        """
        计算六阶信息
        :return:
        """
        result_pmi = dict()
        result_freq = dict()
        result_cd = dict()
        result_left = dict()
        result_right = dict()
        node = self.root
        if not node.next:
            return False, 0
        ngram_dict_1 = self.search_ngram_1()
        # 统计所有六阶词频总数
        total = 0
        for key_1 in node.next.keys():
            # 六阶词组第1个词
            child_1 = node.next.get(key_1)
            for key_2 in child_1.next.keys():
                # 六阶词组第2个词
                child_2 = child_1.next.get(key_2)
                for key_3 in child_2.next.keys():
                    # 六阶词组第3个词
                    child_3 = child_2.next.get(key_3)
                    for key_4 in child_3.next.keys():
                        # 六阶词组第4个词
                        child_4 = child_3.next.get(key_4)
                        for key_5 in child_4.next.keys():
                            # 六阶词组第5个词
                            child_5 = child_4.next.get(key_5)
                            for key_6 in child_5.next.keys():
                                # 六阶词组第6个词
                                child_6 = child_5.next.get(key_6)
                                for key_7 in child_6.next.keys():
                                    # 六阶词组第6个词
                                    child_7 = child_6.next.get(key_7)
                                    for key_8 in child_7.next.keys():
                                        # 六阶词组第6个词
                                        child_8 = child_7.next.get(key_8)
                                        for key_9 in child_8.next.keys():
                                            # 六阶词组第6个词
                                            child_9 = child_8.next[key_9]
                                            if child_9.word_ended:
                                                total += child_9.count
        for key_1 in node.next.keys():
            # 三阶词组第1个词
            child_1 = node.next.get(key_1)
            for key_2 in child_1.next.keys():
                # 三阶词组第2个词
                child_2 = child_1.next.get(key_2)
                for key_3 in child_2.next.keys():
                    # 三阶词组第2个词
                    child_3 = child_2.next.get(key_3)
                    for key_4 in child_3.next.keys():
                        # 四阶词组第3个词
                        child_4 = child_3.next.get(key_4)
                        for key_5 in child_4.next.keys():
                            # 五阶词组第5个词
                            child_5 = child_4.next.get(key_5)
                            for key_6 in child_5.next.keys():
                                # 六阶词组第6个词
                                child_6 = child_5.next.get(key_6)
                                for key_7 in child_6.next.keys():
                                    # 六阶词组第6个词
                                    child_7 = child_6.next.get(key_7)
                                    for key_8 in child_7.next.keys():
                                        # 六阶词组第6个词
                                        child_8 = child_7.next.get(key_8)
                                        # ---------左右熵总数统计
                                        # 左熵
                                        total_left = 0
                                        p_left = 0.0
                                        # 右熵
                                        total_right = 0
                                        p_right = 0.0
                                        # 置信度分母
                                        if not child_8:
                                            continue
                                        if child_8.word_ended:
                                            count_cd = child_8.count
                                        else:
                                            count_cd = 0
                                        for key in child_8.next.keys():
                                            child = child_8.next[key]
                                            if child.postfix:
                                                total_left += child.count_postfix
                                            if child.word_ended:
                                                total_right += child.count
                                        # ---------------------------
                                        for key_9 in child_8.next.keys():
                                            # 六阶词组第6个词
                                            child_9 = child_8.next[key_9]
                                            if child_9.word_ended:
                                                char_count = child_9.count
                                                PMI_MINI = (char_count / total) / (
                                                        ngram_dict_1[child_1.char] * ngram_dict_1[child_2.char] *
                                                        ngram_dict_1[child_3.char] * ngram_dict_1[child_4.char] *
                                                        ngram_dict_1[child_5.char] * ngram_dict_1[child_6.char] *
                                                        ngram_dict_1[child_7.char] * ngram_dict_1[child_8.char] *
                                                        ngram_dict_1[child_9.char])
                                                result_pmi[
                                                    child_1.char + CHAR_CONNECTOR + child_2.char + CHAR_CONNECTOR + child_3.char + CHAR_CONNECTOR + child_4.char + CHAR_CONNECTOR + child_5.char + CHAR_CONNECTOR + child_6.char + CHAR_CONNECTOR + child_7.char + CHAR_CONNECTOR + child_8.char + CHAR_CONNECTOR + child_9.char] = PMI_MINI
                                                result_freq[
                                                    child_1.char + CHAR_CONNECTOR + child_2.char + CHAR_CONNECTOR + child_3.char + CHAR_CONNECTOR + child_4.char + CHAR_CONNECTOR + child_5.char + CHAR_CONNECTOR + child_6.char + CHAR_CONNECTOR + child_7.char + CHAR_CONNECTOR + child_8.char + CHAR_CONNECTOR + child_9.char] = char_count
                                                # 置信度 Confidence 表示在发生X的项集中，同时会发生Y的可能性，即X和Y同时发生的个数占仅仅X发生个数的比例
                                                if count_cd:
                                                    CD = char_count / count_cd
                                                else:
                                                    CD = 0
                                                result_cd[
                                                    child_1.char + CHAR_CONNECTOR + child_2.char + CHAR_CONNECTOR + child_3.char + CHAR_CONNECTOR + child_4.char + CHAR_CONNECTOR + child_5.char + CHAR_CONNECTOR + child_6.char + CHAR_CONNECTOR + child_7.char + CHAR_CONNECTOR + child_8.char + CHAR_CONNECTOR + child_9.char] = CD
                                                # 计算左右熵
                                                p_right += self.get_div(char_count, total_right) * math.log(
                                                    self.get_div(char_count, total_right), 2)
                                            if child_9.postfix:
                                                p_left += self.get_div(child_9.count_postfix, total_left) * math.log(
                                                    self.get_div(child_9.count_postfix, total_left), 2)
                                        result_left[
                                            child_1.char + CHAR_CONNECTOR + child_2.char + CHAR_CONNECTOR + child_3.char + CHAR_CONNECTOR + child_4.char + CHAR_CONNECTOR + child_5.char + CHAR_CONNECTOR + child_6.char + CHAR_CONNECTOR + child_7.char + CHAR_CONNECTOR + child_8.char] = -p_left
                                        result_right[
                                            child_1.char + CHAR_CONNECTOR + child_2.char + CHAR_CONNECTOR + child_3.char + CHAR_CONNECTOR + child_4.char + CHAR_CONNECTOR + child_5.char + CHAR_CONNECTOR + child_6.char + CHAR_CONNECTOR + child_7.char + CHAR_CONNECTOR + child_8.char] = -p_right
        return result_pmi, result_freq, result_left, result_right, result_cd

    def get_pmi(self, p, word_list=[], dicts=[]):
        pmi_list = []
        for i in range(len(word_list)):
            if i == len(word_list) - 1:
                break
            wl1 = word_list[:i + 1]
            dict1 = dicts[len(wl1) - 1]
            w1 = CHAR_CONNECTOR.join(wl1)
            if not dict1.__contains__(w1):
                continue
            wl2 = word_list[i + 1:]
            dict2 = dicts[len(wl2) - 1]
            w2 = CHAR_CONNECTOR.join(wl2)
            if not dict2.__contains__(w2):
                continue
            pmi_list.append(p / (dict1[w1] * dict2[w2]))
        pmi_mini = min(pmi_list)
        return pmi_mini

    def get_div(self, a, b):
        if b == 0:
            return 0.0
        return a * 1.0 / b

    def get_new_word(self, node, word_sequence):
        """
        获取词组出现频次
        :param node:
        :param word_sequence:
        :return:
        """
        child_node = node.next[word_sequence[0]]
        if len(word_sequence) > 1:
            self.get_new_word(child_node, word_sequence[1:])
        else:
            if child_node.word_ended:
                return child_node.count
            return 0

    def search_ngram_N(self):
        node = self.root
        if not node.next:
            return False, 0

        ngram_dict_1 = self.search_ngram_1()
        N_mini = 1
        N_max = 6
        # N阶总数统计
        words_len = 1
        words_list = []
        # 总数统计字典
        total_dict = dict()
        # 左右熵分母统计字典
        total_H_dict = dict()
        ngram_parames_dict = dict()

        def node_census_total(node, word_len, word_list):
            print('total===============word len:', word_len)
            # 超过N阶最大值
            if word_len > N_max:
                return
            if not total_dict.__contains__(word_len):
                total_dict[word_len] = 0
            if not total_H_dict.__contains__(word_len):
                total_H_dict[word_len] = dict()
            for key in node.next.keys():
                node_next = node.next.get(key)
                char = node_next.char
                word_list.append(char)
                # 低于N阶最小取值，直接跳过不统计
                if word_len < N_mini:
                    continue
                if node_next.word_ended:
                    # N阶总数统计
                    total_dict[word_len] += node_next.count
                    # N+1阶左右熵分母在N阶进行统计
                    word_char = CHAR_CONNECTOR.join(word_list)

                    if not total_H_dict[word_len].__contains__(word_char):
                        total_H_dict[word_len][word_char] = {'left': 0, 'right': 0}
                    if node_next.postfix:
                        total_H_dict[word_len][word_char]['left'] += node_next.count
                    else:
                        total_H_dict[word_len][word_char]['right'] += node_next.count

                node_census_total(node_next, word_len + 1, word_list)

        def node_census_parames(node, word_len, word_list, cd_count=0):
            print('parames===============word len:', word_len)
            # 超过N阶最大值
            if word_len > N_max or words_len <= N_mini:
                return
            if not ngram_parames_dict.__contains__(word_len):
                ngram_parames_dict[word_len] = dict()

            for key in node.next.keys():
                node_next = node.next.get(key)
                char_count = node_next.count
                char = node_next.char
                word_list.append(char)

                if node_next.word_ended:
                    word_list_char = CHAR_CONNECTOR.join(word_list)
                    if not ngram_parames_dict.get(word_len).__contains__(word_list_char):
                        ngram_parames_dict[word_len][word_list_char] = {'freq': 0, 'pmi': 0, 'conf': 0,
                                                                        'H': {'left': 0, 'right': 0}}

                    word_list_char_front = CHAR_CONNECTOR.join(word_list[:-2])
                    if not ngram_parames_dict.get(word_len - 1).__contains__(word_list_char_front):
                        ngram_parames_dict[word_len - 1][word_list_char_front] = {'freq': 0, 'pmi': 0, 'conf': 0,
                                                                                  'H': {'left': 0, 'right': 0}}

                    # 词频freq
                    char_freq = char_count / total_dict[word_len]
                    ngram_parames_dict.get(word_len).get(word_list_char_front)['freq'] = char_freq
                    # ------互信息
                    # 分母
                    pmi_deno = 1
                    for i, cr in enumerate(word_list):
                        if i == 0:
                            pmi_deno = ngram_dict_1.get(cr)
                            continue
                        pmi_deno *= ngram_dict_1.get(cr)
                    # 互信息pmi
                    char_pmi = (char_count / total_dict[word_len]) / pmi_deno
                    ngram_parames_dict.get(word_len).get(word_list_char_front)['pmi'] = char_pmi
                    # 置信度 Confidence 表示在发生X的项集中，同时会发生Y的可能性，即X和Y同时发生的个数占仅仅X发生个数的比例
                    if cd_count:
                        char_cd = char_count / cd_count
                    else:
                        char_cd = 0
                    ngram_parames_dict.get(word_len).get(word_list_char_front)['cd'] = char_cd
                    # 左右熵值
                    if node_next.postfix:
                        total_left = total_H_dict.get(word_len).get(word_list_char).get('left')
                        left = -(char_count / total_left) * math.log(char_count / total_left, 2)
                        ngram_parames_dict.get(word_len - 1).get(word_list_char_front).get('H')['left'] += left

                        total_right = total_H_dict.get(word_len).get(word_list_char).get('right')
                        right = -(char_count / total_right) * math.log(char_count / total_right, 2)
                        ngram_parames_dict.get(word_len - 1).get(word_list_char_front).get('H')['right'] += right
                node_census_parames(node, word_len + 1, word_list, char_count)

        node_census_total(node, words_len, words_list)
        node_census_parames(node, words_len, words_list, 0)
        print(total_dict)
        return ngram_parames_dict

    def search_bi(self):
        """
        计算互信息: 寻找二阶共现，并返回 log2( P(X,Y) / (P(X) * P(Y)) 和词概率
        :return:
        """
        result = dict()
        node = self.root
        if not node.next:
            return False, 0

        total = 0
        # 1 grem 各词的占比，和 1 grem 的总次数
        one_dict, total_one = self.search_ngram_1()
        for child in node.next:
            for ch in child.next:
                if ch.word_ended:
                    total += ch.count

        for child in node.next:
            for ch in child.next:
                if ch.word_ended:
                    # 互信息值越大，说明 a,b 两个词相关性越大
                    PMI = math.log(max(ch.count, 1), 2) - math.log(total, 2) - math.log(one_dict[child.char],
                                                                                        2) - math.log(one_dict[ch.char],
                                                                                                      2)
                    # 这里做了PMI阈值约束
                    if PMI > self.PMI_limit:
                        # 例如: dict{ "a_b": (PMI, 出现概率), .. }
                        result[child.char + CHAR_CONNECTOR + ch.char] = (PMI, ch.count / total)
        return result

    # def find_word(self, N):
    #     # 通过搜索得到互信息
    #     # 例如: dict{ "a_b": (PMI, 出现概率), .. }
    #     bi = self.search_bi()
    #     # 通过搜索得到左右熵
    #     # left = self.search_left()
    #     # right = self.search_right()
    #     result = dict()
    #     for key, values in bi.items():
    #         d = "".join(key.split(CHAR_CONNECTOR))
    #         # 计算公式 score = PMI + min(左熵， 右熵) => 熵越小，说明越有序，这词再一次可能性更大！
    #         result[key] = (values[0] + min(left[d], right[d])) * values[1]
    #
    #     # 按照 大到小倒序排列，value 值越大，说明是组合词的概率越大
    #     # result变成 => [('世界卫生_大会', 0.4380419441616299), ('蔡_英文', 0.28882968751888893) ..]
    #     result = sorted(result.items(), key=lambda x: x[1], reverse=True)
    #     print("result: ", result)
    #     dict_list = [result[0][0]]
    #     # print("dict_list: ", dict_list)
    #     add_word = dict()
    #     new_word = "".join(dict_list[0].split(CHAR_CONNECTOR))
    #     # 获得概率
    #     add_word[new_word] = result[0][1]
    #
    #     # 取前5个
    #     # [('蔡_英文', 0.28882968751888893), ('民进党_当局', 0.2247420989996931), ('陈时_中', 0.15996145099751344), ('九二_共识', 0.14723726297223602)]
    #     for d in result[1: N]:
    #         flag = True
    #         for tmp in dict_list:
    #             pre = tmp.split(CHAR_CONNECTOR)[0]
    #             # 新出现单词后缀，再老词的前缀中 or 如果发现新词，出现在列表中; 则跳出循环
    #             # 前面的逻辑是： 如果A和B组合，那么B和C就不能组合(这个逻辑有点问题)，例如：`蔡_英文` 出现，那么 `英文_也` 这个不是新词
    #             # 疑惑: **后面的逻辑，这个是完全可能出现，毕竟没有重复**
    #             if d[0].split(CHAR_CONNECTOR)[-1] == pre or "".join(tmp.split(CHAR_CONNECTOR)) in "".join(
    #                     d[0].split(CHAR_CONNECTOR)):
    #                 flag = False
    #                 break
    #         if flag:
    #             new_word = "".join(d[0].split(CHAR_CONNECTOR))
    #             add_word[new_word] = d[1]
    #             dict_list.append(d[0])
    #
    #     return result, add_word
