"""
本程序用于完成深度学习与自然语言处理第一次大作业——中文信息熵计算
学号：ZY2103802
姓名：胡庆隆
"""
import re
import math
import jieba


def get_text_content(file_path):
    """
    按照路径读取文本并清洗文本
    """
    symbol_filter = u'[a-zA-Z0-9’!"#$%&\'()（）*+,-./:：;<=>?@，。?★、…【】《》？“”‘’！[\\]^_`{|}~「」]+'
    with open(file_path, 'r', encoding='gb18030') as file:
        # 清洗文本
        file_context = file.read()
        file_context = re.sub(symbol_filter, '', file_context)  # 将所有符号替换成空格
        file_context = file_context.replace("\n", '')
        file_context = file_context.replace(" ", '')
        file_context = file_context.replace('\u3000', '')
        file_context = file_context.replace("本书来自免费小说下载站", '')
        file_context = file_context.replace("更多更新免费电子书请关注", '')
        count = len(file_context)
        # print(file_context)
    return count, file_context


def word_frequency(contents, n_gram):
    """
    为文本创建以字或词为key的字典，并计算字/词的出现次数
    """
    if n_gram == 1:
        one_word = {}
        for word in contents:
            one_word[word] = one_word.get(word, 0) + 1
        return one_word

    if n_gram == 2:
        two_word = {}
        for i in range(len(contents) - 2):
            two_word[(contents[i], contents[i + 1])] = two_word.get((contents[i], contents[i + 1]), 0) + 1
        return two_word

    if n_gram == 3:
        three_word = {}
        for i in range(len(contents) - 3):
            three_word[(contents[i], contents[i + 1], contents[i + 2])] = three_word.get(
                (contents[i], contents[i + 1], contents[i + 2]), 0) + 1
        return three_word

def phrase_frequency(contents):
    """
    计算jieba分词的频率
    """
    contents = jieba.lcut(contents, cut_all=False)
    fre_count = {}
    for word in contents:
        fre_count[word] = fre_count.get(word, 0) + 1
    # count_list = dict(sorted(fre_count.items(), key=lambda x: x[1], reverse=True))
    # return count_list
    return fre_count


def cal_entropy(n_gram, dict_one, dict_two=None, dict_three=None):
    """
    计算平均信息熵
    """
    entropy = 0
    # jieba分词和一元模型都属于一元模型
    if n_gram == 1 or n_gram == 'jieba':
        total_freq = sum(dict_one.values())
        for key in dict_one.keys():
            p = dict_one[key] / total_freq
            entropy -= p * math.log2(p)
        return entropy

    if n_gram == 2:
        total_frequ = sum(dict_two.values())
        for key1, key2 in dict_two.keys():
            px = dict_two[key1, key2] / total_frequ
            p_xy = dict_two[key1, key2] / dict_one[key1]
            entropy -= px * math.log2(p_xy)
        return entropy

    if n_gram == 3:
        total_frequ = sum(dict_three.values())
        for key1, key2, key3 in dict_three.keys():
            p_x = dict_three[key1, key2, key3] / total_frequ
            p_xyz = dict_three[key1, key2, key3] / dict_two[key1, key2]
            entropy -= p_x * math.log2(p_xyz)
        return entropy


# 读取inf文件，以便自动按顺序读取txt文件
inf_name = './datasets/inf.txt'
with open(inf_name, 'r', encoding='gb18030') as f:
    novel_names = [i[:].split(',') for i in f.readlines()]

# 按照inf中的顺序读取小说文本，并按词或字计算其中文的平均信息熵
for novel_name in novel_names[0]:
    # 读取文本并清洗文本，同时输出小说总字数
    text_count, text_content = get_text_content('./datasets/' + novel_name + '.txt')
    print('小说《', novel_name, '》共有', text_count, '个字')
    one_dict = word_frequency(text_content, 1)
    two_dict = word_frequency(text_content, 2)
    three_dict = word_frequency(text_content, 3)
    jieba_dict = phrase_frequency(text_content)
    one_entropy = cal_entropy(1, one_dict)
    bi_entropy = cal_entropy(2, one_dict, two_dict)
    thri_entropy = cal_entropy(3, one_dict, two_dict, three_dict)
    jieba_entropy = cal_entropy('jieba', jieba_dict)
    print('其中文的平均信息熵(以字为单位)为', round(one_entropy, 4), 'bit/字')
    print('其中文的平均信息熵(以二元模型为单位)为', round(bi_entropy, 4), 'bit/词')
    print('其中文的平均信息熵(以三元模型为单位)为', round(thri_entropy, 4), 'bit/词')
    print('其中文的平均信息熵(以jieba分词为单位)为', round(jieba_entropy, 4), 'bit/词')
    print('--------------------------------------------------')