import pandas as pd
from math import log


def load_dataset():
    """加载数据集（loan.xlsx）"""
    data = pd.read_excel('./data/loan.xlsx')
    return data


def calc_shannon_ent(data_set, class_series):
    """计算香农熵"""
    num_entries = data_set.shape[0]  # 数据总条数
    shannon_ent = 0.0
    for key, value in class_series.items():
        prob = float(value) / num_entries  # 每个类别出现的概率
        shannon_ent -= prob * log(prob, 2)  # 熵的计算公式
    return shannon_ent


def split_dataset(data_set, column, key):
    """按指定属性和值分割数据集，并删除该属性列"""
    return data_set.loc[data_set[column] == key, :].drop(column, axis=1)


def choose_best_feature(data_set, class_series):
    """选择信息增益最大的属性（信息增益=基础熵-新熵）"""
    base_entropy = calc_shannon_ent(data_set, class_series)  # 基础熵
    best_info_gain = 0.0
    best_feature = data_set.columns[0]  # 初始最佳属性

    for column in data_set.iloc[:, :-1].columns:  # 遍历所有特征（排除最后一列标签）
        feature_counts = data_set[column].value_counts()  # 该特征的取值计数
        new_entropy = 0.0

        for key, count in feature_counts.items():
            sub_dataset = split_dataset(data_set, column, key)  # 分割后的子集
            prob = float(len(sub_dataset)) / len(data_set)  # 子集占比
            sub_class = sub_dataset.iloc[:, -1].value_counts()  # 子集的标签计数
            new_entropy += prob * calc_shannon_ent(sub_dataset, sub_class)  # 计算新熵

        info_gain = base_entropy - new_entropy  # 信息增益
        print(f"特征:{column}, 基础熵:{base_entropy:.4f}, 新熵:{new_entropy:.4f}, 信息增益:{info_gain:.4f}")

        if info_gain > best_info_gain:
            best_info_gain = info_gain
            best_feature = column  # 更新最佳属性

    print(f"最佳特征为: {best_feature}\n{'*' * 50}")
    return best_feature


def create_tree(data_set):
    """递归创建决策树"""
    labels = data_set.iloc[:, -1]  # 最后一列为标签列
    class_series = labels.value_counts()  # 标签计数

    # 若所有样本标签相同，直接返回该标签
    if len(class_series) == 1:
        return class_series.index[0]

    # 若只剩最后一列（无特征可分），返回最多的标签
    if data_set.shape[1] == 1:
        return class_series.index[0]

    # 选择最佳特征
    best_feature = choose_best_feature(data_set, class_series)
    my_tree = {best_feature: {}}  # 用字典存储树结构

    # 对最佳特征的每个取值递归建子树
    for key in data_set[best_feature].unique():
        sub_dataset = split_dataset(data_set, best_feature, key)
        my_tree[best_feature][key] = create_tree(sub_dataset)

    return my_tree


def classify(input_tree, feat_labels, test_vec):
    """用决策树预测新样本"""
    first_str = next(iter(input_tree))  # 根节点特征
    second_dict = input_tree[first_str]  # 子树
    feat_index = feat_labels.index(first_str)  # 特征在测试集中的索引

    for key in second_dict.keys():
        if test_vec[feat_index] == key:
            if type(second_dict[key]).__name__ == 'dict':
                # 若仍是字典，递归预测
                class_label = classify(second_dict[key], feat_labels, test_vec)
            else:
                # 到达叶子节点，返回标签
                class_label = second_dict[key]
            return class_label


if __name__ == '__main__':
    # 加载数据并创建决策树
    data_set = load_dataset()
    feat_labels = data_set.columns[:-1].tolist()  # 特征标签列表
    my_tree = create_tree(data_set)
    print("决策树结构:", my_tree)

    from plot_tree import create_plot
    create_plot(my_tree)


    # 测试新样本 [年龄=0, 有工作=1, 有房子=1, 信贷情况=2]
    test_vec = [0, 1, 1, 2]
    result = classify(my_tree, feat_labels, test_vec)
    print("预测结果:", result)  # 输出应为'yes'