"""
GBDT
house price prediction
we set this regression task as classification P/N
if the prediction is below the 25% threshold, then it is labeled as positve

"""
# accuracy: 0.79
# time: 瞬间

import pandas as pd
import numpy as np


def loaddata(filename):
    data = pd.read_csv(filename)

    # shuffle
    from sklearn.utils import shuffle
    data = shuffle(data, random_state=666)

    # 切分样本，作为训练集和测试集, 没用train_test_split
    rate = 0.2
    data = data.values
    train_data = data[:int(len(data)*(1-rate)), :]
    test_data = data[int(len(data)*(1-rate)):, :]
    return train_data, test_data


def findBestFeatureAndPoint(node):
    '''
    依据MSE准则，找到最佳切分特征和最佳切分点
    :param node: 进行分裂的节点, 一个矩阵
    :return: 切分特征与切分点
    '''

    # n为特征数
    m, n = node.shape
    # 最后一列是标签值
    n = n-1
    # 真实值
    y = node[:, -1]

    # 用来保存最佳切分特征与切分点
    # 以及左右子树
    min_loss = np.Inf
    best_feature = -1
    best_point = -1
    best_left = None
    best_right = None

    # 遍历所有特征，然后遍历该特征所有（或者部分）切分点
    # 取决于该特征是离散还是连续变量
    for feature in range(n):
        # 获得进行切分列
        # 因为是连续数据，有可能有很多不同的值
        # 所以此处我们进行切分的时候，若是离散数据（默认种类小于等于10），我们进行精确切分; 若类型大于10，认为是连续变量，进行10分位点切分
        column = node[:, feature]  # 特定的一个feature
        category = sorted(set(column))
        if len(category) <= 10:
            split_point = category  # 离散
        else:
            # 使用np.arrange来每次找到1/10数据点所在的索引然后进行切分
            split_point = np.arange(0, len(category), len(category) // 10)
            split_point = [category[split_point[i]]
                           for i in range(0, len(split_point))]

        # 确定了所有切分点之后，对切分点进行遍历，找到最佳切分点
        for point in split_point:
            # 尝试切分
            left = column[column <= point]
            right = column[column > point]

            # 左右两边的需要预测的真实值
            y_left = y[column <= point]
            y_right = y[column > point]
            # 计算左右两边最佳的切点
            # cart回归树损失函数为MSE,我们只需要取节点上的均值即可

            c_left = np.average(y_left)
            c_right = np.average(y_right)
            loss = np.sum(np.square(y_left-c_left)) + \
                np.sum(np.square(y_right-c_right))
            if loss < min_loss:
                min_loss = loss
                best_feature = feature
                best_point = point
                best_left = node[column <= point]
                best_right = node[column > point]
    return (best_feature, best_point, best_left, best_right)


def createCART(data, deep, max_deep=2):
    '''
    创建回归树，分裂准则MSE（最小均方误差）
    :param deep: 树的当前深度
    :param max_deep:  树的最大深度（从0开始），默认为2，即产生4个叶子节点
    :param data: 训练样本，其中data中的最后一列值为上一轮训练之后的残差直接减的
    :return: 一颗回归树
    '''
    # tree={3:{'left':{4:{'left':23.1,'right':19.6},'point':0},'right':{6:{'left':23.1,'right':19.6},'point':4.5}},'point':10.4}

    if deep <= max_deep:
        feature, point, left, right = findBestFeatureAndPoint(data)
        tree = {feature: {}}
        if deep != max_deep:
            tree['point'] = point
            if len(left) >= 2:
                tree[feature]['left'] = createCART(left, deep+1, max_deep)
            else:
                tree[feature]['left'] = np.average(left)
            if len(right) >= 2:
                tree[feature]['right'] = createCART(right, deep+1, max_deep)
            else:
                tree[feature]['right'] = np.average(right)

        else:
            # 最后一层树，保存叶节点的值
            return np.average(data[:, -1])
        return tree


def predict_for_rm(data, tree, alpha):  # 用于gradientBoosting
    '''
    获得前一轮 第m-1颗树 的预测值，从而获得残差
    :param data: 一条样本
    :param tree: 第 m-1 颗树
    :param alpha: 正则化系数
    :return:  第m-1颗树预测的值
    '''
    while True:
        # tree={3:{'left':{4:{'left':23.1,'right':19.6},'point':0},'right':{6:{'left':23.1,'right':19.6},'point':4.5}},'point':10.4}
        if type(tree).__name__ == 'dict':
            point = tree['point']
            feature = list(tree.keys())[0] if type(list(tree.keys())[
                0]).__name__ == 'int' else list(tree.keys())[1]
            if data[feature] <= point:
                tree = tree[feature]['left']  # 进行下一轮
            else:
                tree = tree[feature]['right']
        else:
            return tree * alpha  # 这里返回的适用于line 167, 注意这里是rm所以直接用


def gradientBoosting(round, data, alpha):
    '''
    :param round: 迭代论数，也就是树的个数
    :param data: 训练集
    :param alpha: 防止过拟合，每一棵树的正则化系数
    :return: 一坨树
    '''
    tree_list = []
    # 第一步，初始化fx0，即找到使得损失函数最小的c
    # 即所有样本点的均值
    # -1 代表没有切分特征，所有值均预测为样本点均值
    fx0 = {-1: np.average(data[:, -1])}

    tree_list.append(fx0)
    # 开始迭代训练，对每一轮的残差拟合回归树
    for i in range(1, round):
        # 更新样本值，rmi=yi-fmx
        if i == 1:
            data[:, -1] = data[:, -1]-fx0[-1]
        else:
            for i in range(len(data)):
                data[i, -1] = data[i, -1] - \
                    predict_for_rm(data[i], tree_list[-1], alpha)
        # 对残差拟合一颗回归树
        fx = createCART(data, deep=0, max_deep=4)
        tree_list.append(fx)
    return tree_list


def predict(data, tree_list, alpha):
    '''
    对一条样本进行预测
    :param tree_list: 所有树的列表
    :param data: 一条需要预测的样本点
    :param alpha:正则化系数
    :return: 预测值
    '''
    m = len(tree_list)
    fmx = 0
    for i in range(m):
        tree = tree_list[i]
        if i == 0:
            # fx0是一个叶节点，只有一个预测值，树的深度为0
            fmx += tree[-1]
        else:
            while True:
                # tree={3:{'left':{4:{'left':23.1,'right':19.6},'point':0},'right':{6:{'left':23.1,'right':19.6},'point':4.5}},'point':10.4}
                if type(tree).__name__ == 'dict':
                    point = tree['point']
                    feature = list(tree.keys())[0] if type(list(tree.keys())[
                        0]).__name__ == 'int' else list(tree.keys())[1]
                    if data[feature] <= point:
                        tree = tree[feature]['left']
                    else:
                        tree = tree[feature]['right']
                else:
                    fmx += alpha * tree
                    break
    return fmx


def test(X_test, y_test, tree_list, alpha):
    acc = 0
    acc_num = 0
    y_predict = []
    for i in range(len(X_test)):
        print('testing :', i)
        x = X_test[i]
        y_pred = predict(x, tree_list, alpha)
        y_predict.append(y_pred)
        if y_pred/y_test[i] < 1.25 and y_pred/y_test[i] > 0.75:
            acc_num += 1
        print(f'testing {i}th data :y_pred={y_pred},y={y_test[i]}')
        print('now_acc=', acc_num / (i + 1))
    return y_predict


if __name__ == '__main__':
    train_data, test_data = loaddata('boston_house_prices.csv')

    tree_list = gradientBoosting(10, train_data, 0.15)  # 0.1 为正则化系数

    X_test, y_test = test_data[:, :-1], test_data[:, -1]
    y_pred = test(X_test, y_test, tree_list, 0.15)

    from sklearn.metrics import r2_score
    score = r2_score(y_test, y_pred)
    print(score)
