# @Author :等风的云
# -*- coding: utf-8 -*-
# 实现CART算法 - 回归树
"""
在分类树中，我们用基尼指数来判定分类纯度，在回归树中，我们用MSE（也就是均方误差）来进行判断
"""
import numpy as np


class DecisionTree(object):
    class Node(object):
        def __init__(self):
            self.value = None
            self.feature_index = None
            self.feature_value = None
            self.left = None
            self.right = None

    def __init__(self, mse_threshold=1e-2, mse_dec_threshold=0.):
        self.mse_threshold = mse_threshold
        self.mse_dec_threshold = mse_dec_threshold
        self.tree = None

    @staticmethod
    def _mse(y):
        """
        计算数据集D的均方误差mse
        mse 计算公式为：mse =（真值-估计值）**2 / 样本数
        在这里 我们将真值平均值作为估计值  所以 在这 mse=var
        """
        return np.var(y)

    @staticmethod
    def _get_split_points(feature):
        """
        返回特征的所有切分点
        :param feature:
        :return:
        """
        feature_value = np.unique(feature)
        return [(v1 + v2) / 2 for v1, v2 in zip(feature_value[:-1], feature_value[1:])]

    def _mse_split(self, y, feature, point):
        """
        计算划分后的mse
        :param y:
        :param feature:
        :param point: 划分点
        :return:
        """
        idx = feature > point
        y1 = y[idx]
        y2 = y[~idx]
        y1_mse = self._mse(y1)
        y2_mse = self._mse(y2)
        return (y1.size * y1_mse + y2.size * y2_mse) / y.size

    def _select_feature(self, x, y):
        best_feature_index = None
        best_point = None
        min_mse = np.inf
        _, n = x.shape
        for i in range(n):
            points = self._get_split_points(x[:, i])
            for point in points:
                mse = self._mse_split(y, x[:, i], point)
                if mse < min_mse:
                    min_mse = mse
                    best_point = point
                    best_feature_index = i
        if self._mse(y) - min_mse < self.mse_dec_threshold:
            best_point = None
            best_feature_index = None
        return best_point, best_feature_index, min_mse

    def _create_tree(self, x, y):
        """
        递归创建回归树
        :param x:
        :param y:
        :return:
        """
        node = self.Node()
        node.value = np.mean(y)
        if self._mse(y) < self.mse_threshold:
            return node
        feature_value, feature_index, min_mse = self._select_feature(x, y)
        if feature_index:
            node.feature_index = feature_index
            node.feature_value = feature_value
            idx = x[:, feature_index] > feature_value
            x1, y1 = x[~idx], y[~idx]
            x2, y2 = x[idx], y[idx]
            node.left = self._create_tree(x1, y1)
            node.right = self._create_tree(x2, y2)
        return node

    def train(self, x_train, y_train):
        self.tree = self._create_tree(x_train, y_train)

    def _predict_one(self, x):
        """
        预测单个样本
        :param x:
        :return:
        """
        node = self.tree
        while node.left:
            if x[node.feature_index] > node.feature_value:
                node = node.right
            else:
                node = node.left
        return node.value

    def predict(self, x):
        """
        批量预测
        :param x:
        :return:
        """
        y_predict = np.apply_along_axis(self._predict_one, 1, x)
        return y_predict


if __name__ == '__main__':
    from sklearn.metrics import mean_absolute_error, mean_squared_error
    from sklearn.metrics import r2_score
    from sklearn.model_selection import train_test_split

    data = np.genfromtxt('DecisionTree_Regression_housing.data', dtype=np.float)
    x_data = data[:, : -1]
    y_data = data[:, -1]
    X_train, X_test, Y_train, Y_test = train_test_split(x_data, y_data, test_size=0.3)
    cart = DecisionTree()
    cart.train(X_train, Y_train)
    Y_predict = cart.predict(X_test)
    print(r2_score(y_true=Y_test, y_pred=Y_predict))  # 求拟合优度R2
    print(mean_absolute_error(Y_test, Y_predict))  # 求mae
    print(mean_squared_error(Y_test, Y_predict))  # mse
