import numpy as np
import pandas as pd
from copy import deepcopy


class MyDecisionTree(object):
    def __init__(self, max_depth=10, criterion="entropy"):
        """
        分类回归树的实现
        :param max_depth: 树的最大深度，默认值是10
        :param criterion: {'mse', 'entropy', 'gini'}
            'mse': 均方误差计算，用来实现回归树
            'entropy': 用信息增益来筛选特征
            'gini'：用gini系数来筛选特征
        :param dispersed: 是否是离散值。离散值和连续值在进行特征分箱处理时不是一样的
        :param label_map: 可以将标签的字符串映射成为数字，便于计算信息增益
        :param tree: 最后的结果，字典存储
        """

        self.max_depth = max_depth
        self.criterion = criterion

        self.dispersed = None
        self.label_map = None
        self.tree = None
        self.eps = np.finfo(np.float32).eps

    def _entropy(self, y):
        """
        计算熵
        H(x) = - sum p_i* log p_i
        :param y: 标签值
        :return:
        """
        hist = np.bincount(y)
        ps = hist / np.sum(hist)
        return 1 - np.sum(np.power(ps + self.eps, 2))

    def _gini(self, y):
        """
        计算Gini
        G(p) = 1 - sum p^2
        :param y:
        :return:
        """
        hist = np.bincount(y)
        ps = hist / np.sum(hist)
        return 1 - np.sum(np.power(ps, 2))

    def _mse(self, y):
        """
        计算均方误差
        :param y:
        :return:
        """
        return np.mean(np.power((y - np.mean(y)),2))


    def _get_loss(self, df_data, df_sub_data_1, df_sub_data_2):
        """
        计算信息增益
        :param df_data: 样本
        :param df_sub_data_1: 样本分裂后的第一部分样本
        :param df_sub_data_2: 样本分裂后的第二部分样本
        :return:
        """
        if self.criterion == "entropy":
            loss = self._entropy
        elif self.criterion == "gini":
            loss = self._gini
        elif self.criterion == "mse":
            loss = self._mse
        else:
            raise TabError('wrong criterion.')

        parent_len = len(df_data)
        sub_1_len = len(df_sub_data_1)
        sub_2_len = len(df_sub_data_2)

        if parent_len <= 0 or sub_1_len <= 0 or sub_2_len <= 0:
            return 0

        parent_y = df_data['label'].values
        parent_y = np.array([self.label_map[y] for y in parent_y])
        parent_loss = loss(parent_y)

        sub_1_y = df_sub_data_1['label'].values
        sub_1_y = np.array([self.label_map[y] for y in sub_1_y])
        sub_1_loss = loss(sub_1_y)

        sub_2_y = df_sub_data_2['label'].values
        sub_2_y = np.array([self.label_map[y] for y in sub_2_y])
        sub_2_loss = loss(sub_2_y)

        child_loss = (sub_1_len / parent_len) * sub_1_loss + (sub_2_len / parent_len) * sub_2_loss
        return parent_loss - child_loss


    def _choose_best_feature(self, df_data):
        """
        遍历所有的特征，之后遍历该特征的所有值，选出信息增益最大的特征和对应的值
        :param df_data:
        :return:
        """
        columns = list(df_data.columns)
        if 'label' in columns:
            columns.remove('label')
        print(columns)
        best_gain = -np.inf
        best_feature = None
        best_value = None
        for column in columns:
            values = df_data[column].unique()
            for value in values:
                # 这儿进行分组的时候，离散变量和连续变量是不一样的
                if column in self.dispersed:
                    df_sub_data_1 = df_data[df_data[column] == value]
                    df_sub_data_2 = df_data[df_data[column] != value]
                else:
                    if value == max(values):
                        continue
                    df_sub_data_1 = df_data[df_data[column] <= value]
                    df_sub_data_2 = df_data[df_data[column] > value]
                gain = self._get_loss(df_data, df_sub_data_1, df_sub_data_2)
                if best_gain < gain:
                    best_gain = gain
                    best_feature = column
                    best_value = value
        return best_feature, best_value


    def _create_tree(self, df_data, depth=0):
        """
        创建决策树
        :param df_data:
        :param depth: 决策树的深度
        :return:
        """

        y = df_data['label'].values

        # 终止条件一：所有样本都属于同一类
        if len(set(y)) <= 1:
            return y[0]

        # 终止条件二：虽然样本不属于同一类，但是特征值为空，这种情况只能取数目多的那一类
        # 终止条件三：树的深度达到了限制条件
        if (len(df_data.iloc[0]) <= 1) or (depth>self.max_depth):
            if self.criterion != 'mse':
                y_dict = {}
                for y_s in y:
                    if y_s in y_dict.keys():
                        y_dict[y_s] += 1
                    else:
                        y_dict[y_s] = 1

                best_y = None
                count_y = 0
                for key, value in y_dict.items():
                    if count_y < value:
                        count_y = value
                        best_y = key
                return best_y
            else:
                return np.average(y)

        depth += 1

        # 选择最优的切分点
        best_feature, best_value = self._choose_best_feature(df_data)

        if best_feature in self.dispersed:
            df_sub_data_1 = df_data[df_data[best_feature] == best_value]
            df_sub_data_2 = df_data[df_data[best_feature] != best_value]

        else:
            df_sub_data_1 = df_data[df_data[best_feature] <= best_value]
            df_sub_data_2 = df_data[df_data[best_feature] > best_value]
        tree = {best_feature: {'threshold': best_value}}

        if len(df_sub_data_1[best_feature].unique()) <= 0:
            del df_sub_data_1[best_feature]
        if len(df_sub_data_2[best_feature].unique()) <= 0:
            del df_sub_data_2[best_feature]

        tree[best_feature][str(best_value)] = self._create_tree(df_sub_data_1, depth)
        tree[best_feature]['not_' + str(best_value)] = self._create_tree(df_sub_data_2, depth)

        return tree

    def fit(self, data_set, columns, dispersed=list(), label_map=None):
        """
        模型训练的入口
        :param data_set: 输入的训练数据集
        :param columns: 样本的特征名称
        :param dispersed: 离散特征的名称
        :param label_map: 标签的映射
        :return:
        """
        self.dispersed = dispersed
        self.label_map = label_map
        self.columns = columns
        columns.append('label')
        df_data = pd.DataFrame(data_set, columns=columns)
        self.tree = self._create_tree(df_data)

    def predict(self, X, columns):
        """
        预测结果
        :param X: 输入数据
        :param columns:
        :return:
        """
        df_data = pd.DataFrame(X, columns=columns)
        result_list = []
        for i in range(len(df_data)):
            ret_df_data = df_data.iloc[i]
            result = self._predict_one(ret_df_data, deepcopy(self.tree))
            result_list.append(result)

        return result_list

    def _predict_one(self, df_data, tree_dict):
        if isinstance(tree_dict, int) or isinstance(tree_dict, str):
            return tree_dict
        key = list(tree_dict.keys())[0]
        value = df_data[key]
        threshold = tree_dict[key]['threshold']

        if key in self.dispersed:
            if value == threshold:
                value = str(value)
            else:
                value = 'not_' + str(threshold)
        else:
            if value <= threshold:
                value = str(value)
            else:
                value = 'not_' + str(threshold)
        return self._predict_one(df_data, tree_dict[key][value])




def run_my_model():
    data_set = [['youth', 'no', 'no', 1, 'refuse'],
                ['youth', 'no', 'no', 2, 'refuse'],
                ['youth', 'yes', 'no', 2, 'agree'],
                ['youth', 'yes', 'yes', 1, 'agree'],
                ['youth', 'no', 'no', 1, 'refuse'],
                ['mid', 'no', 'no', 1, 'refuse'],
                ['mid', 'no', 'no', 2, 'refuse'],
                ['mid', 'yes', 'yes', 2, 'agree'],
                ['mid', 'no', 'yes', 3, 'agree'],
                ['mid', 'no', 'yes', 3, 'agree'],
                ['elder', 'no', 'yes', 3, 'agree'],
                ['elder', 'no', 'yes', 2, 'agree'],
                ['elder', 'yes', 'no', 2, 'agree'],
                ['elder', 'yes', 'no', 3, 'agree'],
                ['elder', 'no', 'no', 1, 'refuse']
                ]
    columns = ['age', 'working', 'house', 'credit_situation']
    my = MyDecisionTree()
    train_data_set = data_set[:12]
    test_data_set = data_set[12:]

    my.fit(train_data_set, columns, dispersed=['age', 'working', 'house'], label_map={'refuse':0, 'agree':1})
    print(my.tree)
    test_X = [i[:-1] for i in test_data_set]
    test_Y = [i[-1] for i in test_data_set]

    pred_Y = my.predict(test_X, columns=['age', 'working', 'house', 'credit_situation'])
    print('result: ')
    print('test: ', test_Y)
    print('pred: ', pred_Y)


if __name__ == '__main__':
    run_my_model()
