from math import log


def create_dataset():
    dataset = [
        [0, 0, 0, 0, 'no'],
        [0, 0, 0, 1, 'no'],
        [0, 1, 0, 1, 'yes'],
        [0, 1, 1, 0, 'yes'],
        [0, 0, 0, 0, 'no'],
        [1, 0, 0, 0, 'no'],
        [1, 0, 0, 1, 'no'],
        [1, 1, 1, 1, 'yes'],
        [1, 0, 1, 2, 'yes'],
        [1, 0, 1, 2, 'yes'],
        [2, 0, 1, 2, 'yes'],
        [2, 0, 1, 1, 'yes'],
        [2, 1, 0, 1, 'yes'],
        [2, 1, 0, 2, 'yes'],
        [2, 0, 0, 0, 'no']
    ]
    labels = ['F1-AGE', 'F2-WORK', 'F3-HOME', 'F4-LOAN']
    return dataset, labels


def create_tree(dataset, labels, feat_labels):
    """
    决策树主体结构
    :param dataset: 数据集
    :param labels: 数据集标签
    :param feat_labels: 决策特征标签
    :return: 构建好的决策树
    """
    # 取出所有目标值
    class_list = [example[-1] for example in dataset]
    # 满纯度递归退出
    if class_list.count(class_list[0]) == len(class_list):
        return class_list[0]
    # 决策深度达到最深，统计并递归退出，返回类别最多的标签
    if len(dataset[0]) == 1:
        return majority_cnt(class_list)
    # 选出根节点索引
    best_feat = choose_best_feat_idx(dataset)
    # 根节点特征标签
    best_feat_label = labels[best_feat]
    feat_labels.append(best_feat_label)
    # 记录树模型
    mytree = {best_feat_label: {}}
    # 去除已使用的特征标签
    del labels[best_feat]
    # 特征标签分类个数
    feat_value = [example[best_feat] for example in dataset]
    unique_vals = set(feat_value)

    for value in unique_vals:
        mytree[best_feat_label][value] = create_tree(
            split_dataset(dataset, best_feat, value), labels, feat_labels
        )
    return mytree


def majority_cnt(class_list):
    """
    统计此目标值的各类数量
    :return: 当前出现次数最多的目标类别
    """
    class_count = {}
    for vote in class_list:
        if vote not in class_count.keys():
            class_count[vote] = 0
        class_count[vote] += 1
    sorted_class_count = sorted(class_count.items(), key=lambda x: x[1], reverse=False)
    return sorted_class_count[0][0]


def choose_best_feat_idx(dataset):
    """
    选出根节点索引
    :param dataset: 每次递归的数据集
    :return: 根节点索引
    """
    num_features = len(dataset[0]) - 1
    base_entropy = calc_shannon_entropy(dataset)
    best_info_gain = 0
    best_feat = -1
    for i in range(num_features):
        feat_value = [example[i] for example in dataset]
        unique_vals = set(feat_value)
        new_entropy = 0
        # 计算决策熵
        for val in unique_vals:
            sub_dataset = split_dataset(dataset, i, val)
            prob = len(sub_dataset) / len(dataset)
            new_entropy += prob * calc_shannon_entropy(sub_dataset)
        # 计算信息增益并更新，更新最好特征索引
        info_gain = base_entropy - new_entropy
        if info_gain > best_info_gain:
            best_info_gain = info_gain
            best_feat = i
    return best_feat


def calc_shannon_entropy(dataset):
    """
    计算香农熵
    :param dataset: 传入的数据集
    :return: 计算结果
    """
    # 统计各类的 yes 和 no 的数量
    num_examples = len(dataset)
    label_counts = {}
    for feat_vec in dataset:
        current_label = feat_vec[-1]
        if current_label not in label_counts.keys():
            label_counts[current_label] = 0
        label_counts[current_label] += 1
    # 计算香农熵
    shannon_ent = 0
    for key in label_counts:
        prob = label_counts[key] / num_examples
        shannon_ent += - prob * log(prob, 2)
    return shannon_ent


def split_dataset(dataset, feat, value):
    """
    切分数据集
    :param dataset: 传入的数据集
    :param feat: 数据集的特征列索引
    :param value: 特征值的分类
    :return: 切分好的数据集
    """
    ret_dataset = []
    for feat_vec in dataset:
        if feat_vec[feat] == value:
            reduce_feat_vec = feat_vec[:feat]
            reduce_feat_vec.extend(feat_vec[feat + 1:])
            ret_dataset.append(feat_vec)
    return ret_dataset


if __name__ == '__main__':
    decision_labels = []
    data_set, column_name = create_dataset()
    my_tree = create_tree(data_set, column_name, decision_labels)
    print(my_tree)
    print(decision_labels)

    # 自定义函数画树图太麻烦，只能直接调用sklearn
    from sklearn import tree
    import matplotlib.pyplot as plt
    import numpy as np

    clf = tree.DecisionTreeClassifier(criterion='entropy', random_state=31)
    data = np.array(data_set)
    clf = clf.fit(data[:, :-1], data[:, -1])
    plt.figure(figsize=(12, 4))
    tree.plot_tree(clf, rounded=True, filled=True)
    plt.show()
