# -*- coding: utf-8 -*-
# @Author  : zhu_zhao_yang
# @Time    : 2021/10/24 17:24
# @Function: 实现计算信息熵的函数、实现计算信息增益的函数、实现计算信息增益率的函数

import pandas as pd
from pandas import DataFrame
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from math import log

feature = ['年龄', '收入', '学生', '信用等级']  # 特征标签
# names：设置列名 ，sep:分隔的正则表达式,'/t'表示以tab键进行分割
lensesLabels = ['年龄', '收入', '学生', '信用等级', 'class']  # 特征数据的类别标签
lenses = pd.read_csv('buy.txt', names=lensesLabels, sep=' ')


# 计算熵
def calcEntropy(dataSet):
    numEntries = len(dataSet)
    labelCounts = {}
    for currentLabel in dataSet:  # 为所有可能分类创建字典
        if currentLabel not in labelCounts.keys():
            labelCounts[currentLabel] = 0  # 分类标签值初始化
        labelCounts[currentLabel] += 1  # 给标签赋值
    entropy = 0.0  # 熵初始化
    for key in labelCounts:
        prob = float(labelCounts[key]) / numEntries  # 求得每个标签的概率     # L(Xi) = -log2P(Xi)
        entropy -= prob * log(prob, 2)  # 以2为底求对数      # H = - Σi=1 n  P(Xi)*log2P(Xi)
    return entropy


# 计算增益
def gain(dataSet, label):
    entropy = calcEntropy(dataSet['class'])  # 分类标签熵
    # print("entropy:%f" % entropy)
    group = dict(list(dataSet.groupby(label)))  # 按label分组
    numEntries = len(dataSet)
    # print("numEntries:%f" % numEntries)
    group_entropy = 0.0  # 分类后的加权熵值
    for key in group:
        subSet = DataFrame(group[key])
        subEntries = len(subSet)
        prob = float(subEntries) / numEntries  # 各分类的概率
        sub_entropy = calcEntropy(subSet['class'])  # 各分类的熵值
        group_entropy += sub_entropy * prob
    return entropy - group_entropy


# 计算增益率
def gain_ratio(dataSet, label):
    label_gain = gain(dataSet, label)  # 计算熵增益
    label_entropy = calcEntropy(lenses[label])  # 计算自身熵
    return label_gain / label_entropy


def main():
    x_train, y_train = lenses.loc[:, feature], lenses['class']  # 取出特征数据和特征结果
    le = LabelEncoder()  # 创建LabelEncoder()对象，用于序列化
    for col in x_train.columns:  # 分列序列化(给字符编写数字编号)
        x_train[col] = le.fit_transform(x_train[col])
    x_train = x_train.values
    # 将数据分为训练数据和测试数据
    x_train, x_test, y_train, y_test = train_test_split(x_train, y_train, test_size=0.25)
    # 创建决策树对象
    clf = DecisionTreeClassifier(random_state=0, max_depth=4)
    # 根据数据，构造决策树
    model = clf.fit(x_train, y_train)

    # 预测
    y_pred = model.predict(x_test)

    # 计算分类标签列的信息熵
    entropy = calcEntropy(lenses['class'])

    # 输出
    print("正确值：\n{0}".format(y_test))
    print("预测值：\n{0}".format(y_pred))
    print("准确率：%f%%" % (accuracy_score(y_test, y_pred) * 100))
    print("分类标签列的信息熵：%f" % entropy)

    # 计算各特征标签的熵增益和熵增益率
    for label in feature:
        label_gain = gain(lenses, label)
        label_gain_ratio = gain_ratio(lenses, label)
        print(label + ' 信息熵增益：%f' % label_gain + ' 信息熵增益率：%f%%' % (label_gain_ratio * 100))


if __name__ == '__main__':
    main()
