import numpy as np
from sklearn.tree import DecisionTreeClassifier


def load_data(file):
    with open(file, 'r') as f:
        file_data = f.readlines()

    data_arr = [info[: -1].split('\t') for info in file_data]

    # return [[float(num) for num in info] for info in data_arr]
    return [list(map(float, info)) for info in data_arr]


class Tree(object):
    """
    回归树(CART：每个叶子节点使用各自的均值预测)
    模型树(每个叶子节点均构建一个线性模型)
    """
    @staticmethod
    def data_split(data, feature, value):
        """
        数据集切分
        :param data:
        :param feature:
        :param value:
        :return:
        """
        # np.nonzero 返回tuple
        greater_index = np.nonzero(np.array([num[feature] for num in data]) > value)[0]
        smaller_index = np.nonzero(np.array([num[feature] for num in data]) <= value)[0]

        return np.array(data)[greater_index], np.array(data)[smaller_index]

    @staticmethod
    def reg_leaf(data):
        """
        建立叶子点的函数：最后一列均值
        :param data:
        :return:
        """
        return np.mean([num[-1] for num in data])

    @staticmethod
    def reg_err(data):
        """
        误差计算函数（总方差）
        :param data:
        :return:
        """
        return np.var([num[-1] for num in data]) * np.shape(data)[0]

    def choose_best_split(self, data, leaf_type, err_type, ops=(1, 4)):
        """
        最佳方式切分数据集和生成相应叶子节点
        :param leaf_type: 建立叶子点的函数
        :param err_type: 误差计算函数（总方差）
        :param ops: 容许误差下降值，切分的最小样本数
                  决定了决策树划分停止的threshold（预剪枝）
        :return: 特征的index坐标和切分的最优值
        """
        m, n = np.shape(data)
        # 误差阈值和划分的最小size（预剪枝：缺点：对ops参数极度敏感）
        tol_s, tol_n = ops
        # 如果最后一列的所有值都相等，退出
        if len(set([num[-1] for num in data])) == 1:
            return None, leaf_type(data)

        # 无分类误差的总方差和
        s = err_type(data)
        best_s, best_index, best_value = np.inf, 0, 0
        for fea_index in range(n - 1):
            for split_val in set([num[fea_index] for num in data]):
                data_0, data_1 = self.data_split(data, feature=fea_index, value=split_val)
                # 判断切分数据是否小于划分的最小size
                if (np.shape(data_0)[0] < tol_n) or (np.shape(data_1)[0] < tol_n):
                    continue

                new_s = err_type(data_0) + err_type(data_1)
                if new_s < best_s:
                    best_s = new_s
                    best_index = fea_index
                    best_value = split_val
        if (s - best_s) < tol_s:
            return None, leaf_type(data)

        data_0, data_1 = self.data_split(data, feature=best_index, value=best_value)
        if (np.shape(data_0)[0] < tol_n) or (np.shape(data_1)[0] < tol_n):
            return None, leaf_type(data)

        return best_index, best_value

    def create_cart_tree(self, data, leaf_type=reg_leaf, err_type=reg_err, ops=(1, 4)):
        """
        构建cart树: 在每个叶子节点上使用各自的均值做预测
        :param ops:
        :return:
        """
        feat, val = self.choose_best_split(data, leaf_type, err_type, ops)
        if feat is None:
            return val

        tree = dict()
        tree['sp_ind'] = feat
        tree['sp_val'] = val

        l_set, r_set = self.data_split(data, feature=feat, value=val)
        tree['left'] = self.create_cart_tree(l_set, leaf_type, err_type, ops)
        tree['right'] = self.create_cart_tree(r_set, leaf_type, err_type, ops)
        return tree

    def get_mean(self, tree):
        """
        :param tree:
        :return:
        """
        if isinstance(tree['right'], dict):
            tree['right'] = self.get_mean(tree)
        if isinstance(tree['left'], dict):
            tree['left'] = self.get_mean(tree)

        return (np.array(tree['left']) + np.array(tree['right'])) / 2

    def tree_prune(self, tree, test_data):
        """
        后剪枝
        :param tree:
        :param test_data:
        :return:
        """
        # 如果测试数据为空，返回树的均值
        if np.shape(test_data)[0] == 0 and isinstance(tree, dict):
            return self.get_mean(tree)

        # 在左右枝任意存在的情况下，切分测试集
        if (isinstance(tree, dict) and isinstance(tree['right'], dict)) or \
                (isinstance(tree, dict) and isinstance(tree['left'], dict)):
            l_set, r_set = self.data_split(data=test_data, feature=tree['sp_ind'], value=tree['sp_val'])

            # 左剪枝
            if isinstance(tree, dict) and isinstance(tree['left'], dict):
                tree['left'] = self.tree_prune(tree=tree['left'], test_data=l_set)
            if isinstance(tree, dict) and isinstance(tree['right'], dict):
                tree['right'] = self.tree_prune(tree=tree['right'], test_data=r_set)

        # 若左右枝不存在（均为叶子节点），切分测试集
        if not (isinstance(tree, dict) and isinstance(tree['right'], dict)) and \
                not (isinstance(tree, dict) and isinstance(tree['left'], dict)):
            l_set, r_set = self.data_split(data=test_data, feature=tree['sp_ind'], value=tree['sp_val'])
            # 左右枝的最后一列与切分的数据集进行均方误求和
            errro_no_merge = 0
            if l_set.any():
                errro_no_merge += np.power(np.array([num[-1] for num in l_set]) - tree['left'], 2).sum()
            if r_set.any():
                errro_no_merge += np.power(np.array([num[-1] for num in r_set]) - tree['right'], 2).sum()

            tree_mean = (tree['left'] + tree['right']) / 2
            # 测试集与树均值的均方误（总的均方误）
            errro_merge = np.power(np.array([num[-1] for num in test_data]) - tree_mean, 2).sum()

            if errro_merge < errro_no_merge: # 总均方误 小于 左右枝均方误之和
                print('merging')
                return tree_mean
            return tree
        return tree

    @staticmethod
    def cart_tree_eval(node, test_data):
        """
        cart树叶子结点预测值
        :param node:
        :param test_data:
        :return:
        """
        return float(node)

    @staticmethod
    def model_tree_eval(node, tes_data):
        """
        模型树叶子节点预测值
        :param node:
        :param tes_data:
        :return:
        """
        x = [1] + list(tes_data)
        return np.dot(x, node)[0]

    def cart_tree_predict(self, tree, test_data, get_predict):
        """
        cart tree predict
        :param tree:
        :param test:
        :return:
        """
        if not isinstance(tree, dict):
            return get_predict(tree, test_data)

        if test_data[tree['sp_ind']] > tree['sp_val']:
            if isinstance(tree['left'], dict):
                return self.cart_tree_predict(tree['left'], test_data, get_predict)
            else:
                return get_predict(tree['left'], test_data)
        else:
            if isinstance(tree['right'], dict):
                return self.cart_tree_predict(tree['right'], test_data, get_predict)
            else:
                return get_predict(tree['right'], test_data)

    def cart_tree_error(self, tree, test_data, get_predict):
        """
        回归树  预测均方误，相关系数
        :param tree: cart树
        :param test_data:
        :return:
        """
        y = []
        pre_y = []
        for num in test_data:
            y.append(num[-1])
            x = num[: -1]
            predict = self.cart_tree_predict(tree, test_data=x, get_predict=get_predict)
            pre_y.append(predict)
        return np.power(pre_y - np.array(y), 2).sum(), np.corrcoef(pre_y, y)[0][1]

    @staticmethod
    def sklearn_decision_tree(train_data, test_data):
        """
        sklearn 决策树
        :param test_data:
        :return:
        """
        if len(train_data[0]) < 3:
            raise NameError('训练集只有一维特征，至少需要2个特征')
        train_data = np.mat(train_data)
        test_data = np.mat(test_data)
        train_x = train_data[:, :-1].A
        train_y = train_data[:, -1].A
        test_x = test_data[:, :-1].A
        test_y = test_data[:, -1].A

        clf = DecisionTreeClassifier()
        clf.fit(train_x, train_y)
        pre = clf.predict(test_x)

        print('test_y: {}'.format(test_y))
        print('pre: {}'.format(pre))

    @staticmethod
    def model_tree(data):
        """
        模型树：每个叶子节点均构建一个线性模型
        解释性和准确率均优于回归树
        :return:
        """
        x = [[1] + list(num[: -1]) for num in data]
        y = [[num[-1]] for num in data]

        x_t = np.array(x).T
        x_t_x = np.dot(x_t, x)

        if np.linalg.det(x_t_x) == 0:
            raise NameError('奇异矩阵，不能求逆，请调整参数lam值')

        w_t_y = np.dot(x_t, y)

        weight = np.dot(np.linalg.inv(x_t_x), w_t_y)
        return weight, x, y

    def model_leaf(self, data):
        """
        与reg_leaf类似，当数据不再需要切分时，负责生成叶子节点的模型（LR模型）
        :param data:
        :return:
        """
        weight, x, y = self.model_tree(data)
        return weight

    def model_tree_error(self, data):
        """
        模型树训练过程中最小化误差
        :param data:
        :return:
        """
        weight, x, y = self.model_tree(data)
        fit_y = np.dot(x, weight)
        return np.power(fit_y - np.array(y), 2).sum()


def run_simple():
    classify_tree = Tree()
    test_data = np.eye(4)
    print('test_data: {}'.format(test_data))
    data_0, data_1 = classify_tree.data_split(test_data, feature=1, value=0.5)
    print('data_0: {}'.format(data_0))
    print('data_1: {}'.format(data_1))


def run_cart_tree():
    classify_tree = Tree()

    file = './data/B/chp9/ex2.txt'
    test_file = './data/B/chp9/ex2test.txt'

    data_set = load_data(file)
    cart_tree = classify_tree.create_cart_tree(data_set, leaf_type=classify_tree.reg_leaf, err_type=classify_tree.reg_err)
    # test_set = load_data(test_file)
    #
    # cart_tree = classify_tree.create_cart_tree(data=data_set, leaf_type=classify_tree.reg_leaf,
    #                                            err_type=classify_tree.reg_err, ops=(0, 1))
    print(cart_tree)
    # new_tree = classify_tree.tree_prune(tree=cart_tree, test_data=test_set)
    # print('new_tree: {}'.format(new_tree))


def run_model_tree():
    classify_tree = Tree()

    model_tree_file = './data/B/chp9/exp2.txt'
    data_set = load_data(model_tree_file)

    model_tree = classify_tree.create_cart_tree(data_set, classify_tree.model_leaf, classify_tree.model_tree_error,
                                                ops=(1, 10))
    print(model_tree)


def model_evalute():
    """
    模型评估：
    1、回归树
    2、模型树
    3、线性回归
    :return:
    """
    train_file = './data/chp9/bikeSpeedVsIq_train.txt'
    test_file = './data/chp9/bikeSpeedVsIq_test.txt'
    train_data = load_data(train_file)
    test_data = load_data(test_file)

    classify_tree = Tree()

    # 回归树
    cart_tree = classify_tree.create_cart_tree(train_data, leaf_type=classify_tree.reg_leaf,
                                               err_type=classify_tree.reg_err, ops=(1, 20))
    cart_tree_var, cart_tree_var_cor = classify_tree.cart_tree_error(tree=cart_tree, test_data=test_data,
                                                                     get_predict=classify_tree.cart_tree_eval)
    # 模型树
    model_tree = classify_tree.create_cart_tree(train_data, classify_tree.model_leaf, classify_tree.model_tree_error,
                                                ops=(1, 20))
    model_tree_var, model_tree_var_cor = classify_tree.cart_tree_error(tree=model_tree, test_data=test_data,
                                                                       get_predict=classify_tree.model_tree_eval)
    # 线性回归
    weight, train_x, train_y = classify_tree.model_tree(train_data)
    test_x = [[1] + list(arr[: -1]) for arr in test_data]
    pre_y = np.dot(test_x, weight)
    test_y = [num[-1] for num in test_data]

    pre_y = [num[0] for num in pre_y]
    lr_var = np.power(pre_y - np.array(test_y), 2).sum()
    lr_corr = np.corrcoef(pre_y, test_y)[0][1]

    # skelarn
    classify_tree.sklearn_decision_tree(train_data, test_data)

    print('cart tree: var: {}, corr: {}'.format(cart_tree_var, cart_tree_var_cor))
    print('model tree: var: {}, corr: {}'.format(model_tree_var, model_tree_var_cor))
    print('lr: var: {}, corr: {}'.format(lr_var, lr_corr))


# classify_tree = Tree()
if __name__ == '__main__':
    #
    # run_model_tree()
    model_evalute()
