"""
注：习题4.4代码与习题4.3大致相同
完整的注释详见习题4.3的代码
与习题4.3不同的部分将会在注释中说明
"""
from matplotlib import pyplot as plt
from numpy import shape
import copy
import pandas as pd
import operator
from tree_plotter import createPlot

def Gini(data):
    """ 计算集合data的基尼指数 公式(4.5) """
    gini = 1.0
    class_count = {}
    class_list = [sample[-1] for sample in data]

    for cls in class_list:
        if cls not in class_count.keys():
            class_count[cls] = 0
        class_count[cls] += 1

    for cls in class_count:
        p = class_count[cls] / float(len(class_list))
        gini -= p * p

    return gini


def split_discrete_data(data, attribute, value):
    data_ret = []
    for sample in data:
        if sample[attribute] == value:
            sample_removed = sample[:attribute]
            sample_removed.extend(sample[attribute+1:])

            data_ret.append(sample_removed)

    return data_ret


def split_continuous_data(data, attribute, threshold, direction):
    data_ret = []
    for sample in data:
        if direction == 0:
            if sample[attribute] <= threshold:
                data_ret.append(sample)
        else:
            if sample[attribute] > threshold:
                data_ret.append(sample)

    return data_ret


def major_class(class_list):
    class_count = {}
    for vote in class_list:
        if vote not in class_count.keys():
            class_count[vote] = 0
        class_count[vote] += 1

    sorted_class_count = sorted(
        class_count.items(), key=operator.itemgetter(1), reverse=True
    )
    return sorted_class_count[0][0]


def identify_sample(decision_tree, features_list, test_sample):
    """
    利用决策树对一个样本进行分类
    decision_tree : 输入决策树
    features_list : 包含所有属性名的列表
    test_sample : 待分类样本的所有属性值
    """
    first_str = list(decision_tree.keys())[0]

    if '<=' in first_str:
        # 该节点是对样本的一个连续属性进行考察
        value = float(first_str.split('<=')[-1]) # 读取节点的划分点
        feature_key = first_str.split('<=')[0] # 读取该节点的属性名
        second_dict = decision_tree[first_str]
        feature_index = features_list.index(feature_key) # 读取该属性的索引值

        if test_sample[feature_index] <= value:
            result = 1
        else:
            result = 0

        for key in second_dict.keys():
            # 用子节点对样本进行考察
            if result == int(key):
                # 根据父结点的判断结果选择对应的子节点
                if type(second_dict[key]).__name__ == 'dict':
                    # 子节点是非叶节点，进行递归考察
                    identified_class = identify_sample(second_dict[key], features_list, test_sample)
                else:
                    # 子节点是叶节点，直接输出判断结果
                    identified_class = second_dict[key]
    else:
        # 该节点是对样本的一个离散属性进行考察
        second_dict = decision_tree[first_str]
        feature_index = features_list.index(first_str) # 读取该属性的索引值

        for key in second_dict.keys():
            if test_sample[feature_index] == key:
                # 样本属性的取值与其中一个子节点的属性值相同
                if type(second_dict[key]).__name__ == 'dict':
                    # 子节点是非叶节点，进行递归考察
                    identified_class = identify_sample(second_dict[key], features_list, test_sample)
                else:
                    # 子节点是叶节点，直接输出判断结果
                    identified_class = second_dict[key]

    return identified_class


def feature_test(feature, data, data_test, features_list):
    """ 对将要生成的结点进行性能评估 """
    class_list = [sample[-1] for sample in data]
    feature_index = features_list.index(feature) # 读取待评估结点属性的索引值
    train_data = [sample[feature_index] for sample in data] # 训练集在该属性上的取值
    test_data = [(sample[feature_index], sample[-1]) for sample in data_test] # 测试集在该属性上的取值以及其类别
    unique_value = set(train_data) # 该属性所有可能的取值集合
    error = 0.0

    for value in unique_value:
        # 所有属性值为value的样本的类别列表：
        class_on_value = [class_list[i] for i in range(len(class_list)) if train_data[i] == value]
        for sample in test_data:
            # 用测试集评估错误个数
            if sample[0] == value and sample[1] != major_class(class_on_value):
                error += 1.0
    
    return error


def tree_test_before_pruning(tree, data_test, features_list):
    """ 对剪枝前的决策树进行性能评估 """
    error = 0.0
    for i in range(len(data_test)):
        if identify_sample(tree, features_list, data_test[i]) != data_test[i][-1]:
            error += 1.0
    return error


def tree_test_after_pruning(majorClass, data_test):
    """ 对剪枝后的决策树进行性能评估 """
    error = 0.0
    for i in range(len(data_test)):
        if majorClass != data_test[i][-1]:
            error += 1.0
    return error


def best_split_feature(data, features):
    num_features = len(data[0]) - 1
    min_Gini_index = float('inf') # 记录Gini_index的最小值
    best_feature = -1
    best_dividing_point = 0.0 # 记录连续属性的最优划分点
    best_dividing_point_all = 0.0 # 记录最终确定的连续属性的最优划分点


    for i in range(num_features):
        feature_value = [sample[i] for sample in data]
        
        if type(feature_value[0]).__name__ == 'float' or type(feature_value[0]).__name__ == 'int':
            sorted_value = sorted(feature_value)
            dividing_point_list = []
            for j in range(len(sorted_value) - 1):
                dividing_point_list.append(
                    (sorted_value[j] + sorted_value[j+1]) / 2.0
                )
            
            Gini_index = float('inf')
            # 找到使Gini指数最小的划分点best_dividing_point
            for point in dividing_point_list:
                gini = 0.0
                subdata0 = split_continuous_data(data, i, point, 0)
                subdata1 = split_continuous_data(data, i, point, 1)
                # 公式(4.6)
                weight_negative = len(subdata0) / float(len(data))
                gini += weight_negative * Gini(subdata0)
                weight_positive = len(subdata1) / float(len(data))
                gini += weight_positive * Gini(subdata1)
                if gini < Gini_index:
                    Gini_index = gini
                    best_dividing_point = point
        else:
            unique_value = set(feature_value)
            Gini_index = 0.0
            for value in unique_value:
                subdata = split_discrete_data(data, i, value)
                weight = len(subdata) / float(len(data))
                Gini_index += weight * Gini(subdata) # 公式(4.6)

        if Gini_index < min_Gini_index:
            min_Gini_index = Gini_index
            best_feature = i
            best_dividing_point_all = best_dividing_point

    if type(data[0][best_feature]).__name__ == 'float' or type(data[0][best_feature]).__name__ == 'int':
        features[best_feature] = features[best_feature] + '<=' + str(best_dividing_point_all)
        for i in range(shape(data)[0]):
            if data[i][best_feature] <= best_dividing_point_all:
                data[i][best_feature] = 1
            else:
                data[i][best_feature] = 0

    return best_feature


def TreeGenerate(data, features, data_full, features_full):
    class_list = [sample[-1] for sample in data]

    if class_list.count(class_list[0]) == len(class_list):
        return class_list[0]

    if len(data[0]) == 1:
        return major_class(class_list)

    best_feat_idx = best_split_feature(data, features)
    best_feat = features[best_feat_idx]
    my_tree = {best_feat:{}}
    best_feat_value = [sample[best_feat_idx] for sample in data]
    unique_bf_value = set(best_feat_value)

    if type(data[0][best_feat_idx]).__name__ == 'str':
        bf_idx_full = features_full.index(features[best_feat_idx])
        bf_value_full = [sample[bf_idx_full] for sample in data_full]
        unique_bf_value_full = set(bf_value_full)

    del(features[best_feat_idx])

    for value in unique_bf_value:
        subfeatures = features[:]
        if type(data[0][best_feat_idx]).__name__ == 'str':
            unique_bf_value_full.remove(value)

        my_tree[best_feat][value] = TreeGenerate(
            split_discrete_data(data, best_feat_idx, value), subfeatures, data_full, features_full
        )

    if type(data[0][best_feat_idx]).__name__ == 'str':
        for value in unique_bf_value_full:
            my_tree[best_feat][value] = major_class(class_list)

    return my_tree


def prePruningTree(data, features, data_full, features_full, data_test):
    class_list = [sample[-1] for sample in data]

    if class_list.count(class_list[0]) == len(class_list):
        return class_list[0]

    if len(data[0]) == 1:
        return major_class(class_list)

    features_copy = copy.deepcopy(features)
    best_feat_idx = best_split_feature(data, features)
    best_feat = features[best_feat_idx]

    if feature_test(best_feat, data, data_test, features_copy) < tree_test_after_pruning(major_class(class_list), data_test):
        # 使用该节点进行预测的错误数比将该节点剪枝后的错误数小
        my_pre_pruning_tree = {best_feat:{}} # 不进行剪枝
    else:
        return major_class(class_list) # 进行剪枝

    best_feat_value = [sample[best_feat_idx] for sample in data]
    unique_bf_value = set(best_feat_value)

    if type(data[0][best_feat_idx]).__name__ == 'str':
        bf_idx_full = features_full.index(features[best_feat_idx])
        bf_value_full = [sample[bf_idx_full] for sample in data_full]
        unique_bf_value_full = set(bf_value_full)

    del(features[best_feat_idx])

    for value in unique_bf_value:
        subfeatures = features[:]
        if type(data[0][best_feat_idx]).__name__ == 'str':
            unique_bf_value_full.remove(value)

        my_pre_pruning_tree[best_feat][value] = prePruningTree(
            split_discrete_data(data, best_feat_idx, value),
            subfeatures, data_full, features_full,
            split_discrete_data(data_test, best_feat_idx, value)
        )

    if type(data[0][best_feat_idx]).__name__ == 'str':
        for value in unique_bf_value_full:
            my_pre_pruning_tree[best_feat][value] = major_class(class_list)

    return my_pre_pruning_tree


def postPruningTree(tree, data, data_test, features):
    """ 对决策树tree进行后剪枝 """
    first_str = list(tree.keys())[0]
    feature_key = copy.deepcopy(first_str)
    features_copy = copy.deepcopy(features)
    second_dict = tree[first_str]
    class_list = [sample[-1] for sample in data]

    if '<=' in first_str:
        # 该属性是连续属性
        feature_key = first_str.split('<=')[0] # 获取连续属性名
        feature_value = first_str.split('<=')[-1] # 获取连续属性的划分点

    feature_index = features.index(feature_key) # 获取该属性的索引值
    del(features[feature_index]) # 将该属性移出属性列表
    for key in second_dict.keys():
        if type(second_dict[key]).__name__ == 'dict':
            # 该节点不是叶节点，递归生成后剪枝决策树
            if type(data[0][feature_index]).__name__ == 'str':
                # 当前节点的属性是离散值
                tree[first_str][key] = postPruningTree(
                    second_dict[key],
                    split_discrete_data(data, feature_index, key),
                    split_discrete_data(data_test, feature_index, key),
                    copy.deepcopy(features)
                )
            else:
                # 当前节点的属性是连续值
                tree[first_str][key] = postPruningTree(
                    second_dict[key],
                    split_continuous_data(data, feature_index, feature_value, key),
                    split_continuous_data(data_test, feature_index, feature_value, key),
                    copy.deepcopy(features)
                )
    
    # 对最下面的一个非叶节点进行测试
    if tree_test_before_pruning(tree, data_test, features_copy) <= tree_test_after_pruning(major_class(class_list), data_test):
        return tree # 不进行剪枝
    return major_class(class_list) # 进行剪枝


if __name__ == '__main__':
    df = pd.read_csv('watermelon_4_2.csv')
    data = df.values[:11, 1:].tolist() # 选择表4.2中的训练集
    data_full = data[:]
    data_test = df.values[11:, 1:].tolist() # 选择表4.2中的测试集
    features = df.columns.values[1:-1].tolist()
    features_full = features[:]

    print('\n未剪枝决策树')
    my_tree = TreeGenerate(
        copy.deepcopy(data), copy.deepcopy(features), data_full, features_full
    )
    print(my_tree)
    print('错误率：', tree_test_before_pruning(
        my_tree, copy.deepcopy(data_test), copy.deepcopy(features)
    ) / float(len(data_test)))
    createPlot(my_tree, '未剪枝决策树', '未剪枝决策树.png')

    print('\n预剪枝决策树')
    my_pre_pruning_tree = prePruningTree(
        copy.deepcopy(data), copy.deepcopy(features),
        data_full, features_full, copy.deepcopy(data_test)
    )
    print(my_pre_pruning_tree)
    print('错误率：', tree_test_before_pruning(
        my_pre_pruning_tree, copy.deepcopy(data_test), copy.deepcopy(features)
    ) / float(len(data_test)))
    createPlot(my_pre_pruning_tree, '预剪枝决策树', '预剪枝决策树.png')

    print('\n后剪枝决策树')
    my_post_pruning_tree = postPruningTree(
        my_tree, copy.deepcopy(data), copy.deepcopy(data_test), features_full
    )
    print(my_post_pruning_tree)
    print('错误率：', tree_test_before_pruning(
        my_post_pruning_tree, copy.deepcopy(data_test), copy.deepcopy(features)
    ) / float(len(data_test)))
    createPlot(my_post_pruning_tree, '后剪枝决策树', '后剪枝决策树.png')
