"""
决策树的节点类，主要实现：
* 训练中节点分类出子节点 split(), get_best_split();
* 预测中把流到该节点的测试集数据分流到子节点 predict();
* 打印决策树中提取该节点的描述信息 description()

get_best_split() 方法都提供了快速版本 get_best_split_quick，通过利用 numpy 的数组
运算实现遍历特征和分割点这一过程的并行加速。
快速版本对分类任务效果明显，可以加速到 500多倍，但对于回归任务效果一般，
原因是快速版本中遍历特征和分割点时还是有不少重复计算
这两个方法的选择只需在 split() 中选相应的方法就可以了
    best_feature, threshhold, mask = self.get_best_split_quick(
        sorted_data, sorted_target, num_feature,
        num_samples, min_impurity_decrease)
"""
import numpy as np


class MyStopError(Exception):
    """遍历特征的时候，用来控制程序执行"""
    pass


def get_entropy(y):
    """计算标签序列的信息熵

    :param y: 样本的标签序列
    :type  y: numpy.ndarray
    :returns: 信息熵
    """
    if y.size == 0:
        return 0
    p = np.array([np.mean(y == label) for label in np.unique(y)])
    return np.sum(-p * np.log2(p))


def get_gini(y):
    """计算标签序列的基尼系数

    :param y: 样本的标签序列
    :type  y: numpy.ndarray
    :returns: 基尼系数
    """
    if y.size == 0:
        return 0
    p = np.array([np.mean(y == label) for label in np.unique(y)])
    return 1 - np.sum(p*p)


def get_loss(y):
    """计算回归问题中标签序列的loss"""
    if y.size == 0:
        return 0
    return np.mean(np.square(y.mean() - y))


class NodeBase:
    """决策树的节点类

    :param father: 父节点，在trainning中赋值
    :type  father: NodeDiscrete or NodeRegressor
    :param depth: 节点在决策树的深度
    :type  depth: int
    """
    # 这些类属性在 tree.fit开头赋值
    root_data = np.array([])
    root_target = np.array([])

    def __init__(self, father=None, depth=0):
        self.father = father
        self.children = {}
        self.depth = depth
        self.feature = None  # 在training中，划分该节点的特征
        self.threshhold = None  # 特征的划分值
        self.tmp = {}  # 用来存放可能用到的中间结果

    @property
    def data(self):
        """属于该节点的训练集数据，dtype=np.ndarray"""
        try:
            return self._data
        except AttributeError:
            self._data = self.root_data[self.tmp['training_index']]
            return self._data

    @data.setter
    def data(self, value: np.ndarray):
        self._data = value

    @property
    def target(self):
        """属于该节点的训练集标签，dtype=np.ndarray"""
        try:
            return self._target
        except AttributeError:
            self._target = self.root_target[self.tmp['training_index']]
            return self._target

    @target.setter
    def target(self, value: np.ndarray):
        self._target = value

    def is_pure(self):
        raise NotImplementedError

    def get_best_split(self, *args, **kwargs):
        raise NotImplementedError

    def get_best_split_quick(self, *args, **kwargs):
        raise NotImplementedError

    def new(self, *args, **kwargs):
        return self.__class__(*args, **kwargs)

    def cut(self):
        self.children = {}
        self.feature = None
        self.threshhold = None

    def split(self, min_samples_split=2, max_depth=None,
              min_impurity_decrease=0.0, min_samples_leaf=1):
        """在训练过程中分裂节点

        需要判断节点是否需要分裂, 这里有三个条件：
            * 决策树的所有节点的样本数须大于 min_samples_leaf
            * 节点纯度大于规定值，比如节点内的样本信息熵为零就不再分裂了
            * 节点样本数须大于 min_samples_split 才可以分裂
            * 节点 depth 须小于 max_depth
        如果满足就分裂节点，遍历特征和分割点，选出最好的组合进行分裂

        代码中写了三种方法，方法一、二都是 训练集data 按 axis=0 只排序一次，
        子节点按照样本序号 tmp['training_index'] 从父节点的排序数据中提取自己的排序数据。
        这两个方法的区别只是用了numpy不同的函数而已。

        方法三中，每个节点都对自己的data 按照axis=0 排序
        实验结果来看，对于几千个数据的训练集，方法三更快，因为每个节点自己排序数据
        的时间比从父节点的排序数据中提取还要快。

        这三个方法的选择方式是直接注释其他两个方法就可以了

        :param min_samples_split: 节点样本数超过此值才可以分裂
        :type  param: int
        :param max_depth: 决策树的最大深度
        :type  max_depth: int
        :param min_impurity_decrease: 增益大于此值才分裂
        :type  min_impurity_decrease: float
        :param min_samples_leaf: 允许节点的最小样本数
        :type  min_samples_leaf: int
        """
        try:
            # 判断是否需要分裂
            if (
                    self.is_pure() or
                    max_depth is not None and self.depth >= max_depth or
                    self.data.shape[0] < min_samples_split
            ):
                raise MyStopError

            num_samples, num_feature = self.data.shape
            sorted_data = self.tmp['sorted_data']
            sorted_target = self.tmp['sorted_target']

            best_feature, threshhold, mask = self.get_best_split_quick(
                sorted_data, sorted_target, num_feature,
                num_samples, min_impurity_decrease)

            for key, m in [("left", mask), ("right", ~mask)]:
                if np.sum(m) < min_samples_leaf:
                    raise MyStopError

                child = self.new(father=self, depth=self.depth+1)
                child_index = self.tmp['training_index'][m]
                child.tmp['training_index'] = child_index

                # ##方法一
                #sorted_index = self.tmp['sorted_index']
                #m_ = np.isin(sorted_index.flatten(order='F'), child_index)
                #child.tmp['sorted_index'] = sorted_index.flatten(order='F')[m_].reshape(-1, num_feature, order='F')
                #child.tmp['sorted_data'] = sorted_data.flatten(order='F')[m_].reshape(-1, num_feature, order='F')
                #child.tmp['sorted_target'] = sorted_target.flatten(order='F')[m_].reshape(-1, num_feature, order='F')

                # ##方法二
                #sorted_index = self.tmp['sorted_index']
                #m_ = np.isin(sorted_index.flatten(order='F'), child_index)
                #child.tmp['sorted_index']= sorted_index.flatten(order='F')[m_].reshape(-1, num_feature, order='F')
                #child.tmp['sorted_data'] = np.take_along_axis(self.root_data, child.tmp['sorted_index'], axis=0)
                #child.tmp['sorted_target'] = self.root_target[child.tmp['sorted_index']]

                # ##方法三
                sorted_index = np.argsort(child.data, axis=0)
                child.tmp['sorted_data'] = np.take_along_axis(
                    child.data, sorted_index, axis=0)
                child.tmp['sorted_target'] = child.target[sorted_index]

                self.children[key] = child

            self.feature = best_feature
            self.threshhold = threshhold
        except MyStopError:  # 说明遇到不满足分裂的情况
            self.cut()

    def predict(self, data, index):
        """在预测模式中，把流入该节点的数据分流到子节点
        :param data: 测试集
        :type  data: ndarray
        :param index: data 在测试集中的序号
        :type  index: ndarray
        :returns: 子节点字典
        """
        if not self.children:  # 说明是叶子节点
            return {"label": self.label,  # 回归任务中 label就是value，已在特性label中设置
                    "children": [],
                    "index": None}

        mask = data[:, self.feature] <= self.threshhold
        return {"label": None,
                "children": [self.children["left"], self.children["right"]],
                "index": [index[mask], index[~mask]]}


class NodeClassifer(NodeBase):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

    @property
    def gini(self) -> float:
        try:
            return self.__gini
        except AttributeError:
            self.__gini = get_gini(self.target)
        return self.__gini

    @property
    def entropy(self) -> float:
        try:
            return self.__entropy
        except AttributeError:
            self.__entropy = get_entropy(self.target)
        return self.__entropy

    @property
    def label(self) -> int:
        """该节点的预测值"""
        try:
            return self._label
        except AttributeError:
            self._label = self._get_node_label()
            return self._label

    def _get_node_label(self):
        """计算节点标签，即样本最多的类别
        回归树在子类中覆盖此方法

        :returns: 节点标签 (int)
        """
        # 统计各类别的样本数量
        count = [(label, np.sum(self.target == label))
                 for label in np.unique(self.target)]
        # 返回标签
        return max(count, key=lambda x: x[1])[0]

    def is_pure(self):
        """判断该节点是否够纯，需不需要继续分裂"""
        if (self.target == self.target[0]).all():
            return True
        return False

    def get_best_split_quick(self, sorted_data, sorted_target, num_feature,
                             num_samples, min_impurity_decrease=0.0):
        """计算分裂该节点的最好特征及阈值

        循环用numpy的数组运算来代替

        :param sorted_data: 节点的data 按照 axis=0 排序得到的结果
        :type  sorted_data: np.ndarray
        :param sorted_target: 节点的target(ndim=1) 按照 sorted_data 的每个特征的排序方式排序一次
        :type  sorted_target: np.ndarray
        :param num_feature: 训练集特征数量
        :type  num_feature: int
        :param num_samples:  Description
        :type  num_samples: int
        :param min_impurity_decrease: 分裂节点要求的最小增益
        :type  min_impurity_decrease:  float
        :returns: 分裂特征和分裂值
        """

        classes = np.arange(self.target.max()+1)
        num_classes = classes.size

        # count记录不同分割点划分时，左子节点中样本各类标签的数量
        count = np.zeros((num_samples, num_classes, num_feature), dtype=int)
        i = np.repeat(np.arange(num_samples), num_feature)
        j = sorted_target.flatten()
        k = np.tile(np.arange(num_feature), num_samples)
        count_N = np.sum(self.target == classes.reshape(-1, 1),
                         axis=1, keepdims=True)
        # 特征排序后，在特征k的第i个分割点划分下，左子节点内包含一个类别为j的样例
        count[i, j, k] = 1
        # 统计在特征为k，分割点为i的划分下，左子节点内各个类别的样例数
        count = np.cumsum(count, axis=0)
        # 计算各个划分下的增益
        gini_l = np.sum(np.square(count), axis=1) / \
            np.arange(1, num_samples+1).reshape(-1, 1)
        # 划分不可能为特征最大值，所以[:-1], 并且[-1] 会在下式出现除数为零的情况
        gini_r = np.sum(np.square(
            count_N - count), axis=1)[:-1] / np.arange(num_samples-1, 0, -1).reshape(-1, 1)
        gini = gini_l[:-1] + gini_r

        # 去掉重复的特征值分割点
        # mask 标记特征值排序后所有重复的分割点
        # 去重后才找最好的 feature, threshhold 组合
        mask = sorted_data[:-1] == sorted_data[1:]
        gini[mask] = 0.0

        # 此时的 gini是 :math:`num_samples * \sum{p_i^2}`, 越大越好
        # （1 - 1/n * gini）才是真正的gini值
        i, best_feature = np.unravel_index(np.argmax(gini), gini.shape)

        # ######### 考虑 min_impurity_decrease 停止条件
        # 求真 gini
        gini_min = 1 - gini[i, best_feature] / num_samples
        gain = get_gini(self.target) - gini_min
        if gain < min_impurity_decrease:
            raise MyStopError
        #threshhold = sorted_data[i, best_feature]
        threshhold = (sorted_data[i, best_feature] +
                      sorted_data[i+1, best_feature]) / 2.
        return best_feature, threshhold, self.data[:, best_feature] <= threshhold

    def get_best_split(self, sorted_data=None, sorted_target=None, num_feature=None,
                       num_samples=None, min_impurity_decrease=0.0):
        """按照定义，用两层循环实现，易读"""
        num_samples, num_feature = self.data.shape
        gain_max = -np.inf
        best_feature = None
        threshhold = None
        best_mask = None
        for feature in range(num_feature):
            values = self.data[:, feature]
            for v in np.unique(values):
                mask = values <= v
                # left
                target_l = self.target[mask]
                gini_l = get_gini(target_l)
                rate_l = mask.sum() / num_samples

                # right
                target_r = self.target[~mask]
                gini_r = get_gini(target_r)
                rate_r = (~mask).sum() / num_samples

                gini_after_split = rate_l * gini_l + rate_r * gini_r
                gain = get_gini(self.target) - gini_after_split

                if gain > gain_max:
                    gain_max = gain
                    best_feature = feature
                    threshhold = v
                    best_mask = mask

            if gain_max < min_impurity_decrease:
                raise MyStopError
        return best_feature, threshhold, best_mask

    def description(self):
        """用于打印决策树，提取该节点的描述信息

        节点描述模板：
        -----------------------
        |   X[1] <= 2.3       |    : 代表判断的条件，特征1 小于等于阈值
        |   gini：1.230       |
        |   num_samples: 23   |
        |   value = [2, 4, 0] |    ：节点包含的类别标签
        -----------------------

        :returns: 描述节点的字符串，描述边的字符串
        """

        desc = []
        desc.append(f"gini: {get_gini(self.target):.3f}")
        desc.append(f"num_samples: {self.target.size}")
        value = [np.sum(self.target == label)
                 for label in np.unique(self.target)]
        desc.append(f"value: {value}")
        if len(self.children) > 0:  # 非叶节点
            # 获取分裂特征
            feature = self.feature
            threshhold = self.threshhold
            desc.insert(0, f"X[{feature}] <= {threshhold:.2f}")
        node_comment = '\n'.join(desc)

        # edge 描述
        edge_comment = ''
        if self.father is not None and self.father.father is None:
            feature = self.father.feature
            threshhold = self.father.threshhold
            value = self.data[0, feature]
            edge_comment = 'True' if value <= threshhold else 'False'
        return node_comment, edge_comment


class NodeRegressor(NodeBase):
    def __init__(self, *args, **kwargs):
        super(NodeRegressor, self).__init__(*args, **kwargs)

    @property
    def value(self) -> float:
        try:
            return self._value
        except AttributeError:
            self._value = self.target.mean()
            return self._value

    @property
    def label(self) -> float:
        return self.value

    def is_pure(self):
        """判断该节点是否够纯，需不需要继续分裂

        在这里限制方差比均值来实现
        """
        if np.sum(np.square(self.target - self.value)) <= 0.0:
            return True
        return False

    def get_best_split_quick(self, sorted_data, sorted_target, num_feature,
                             num_samples, min_impurity_decrease=0.0):
        """计算分裂该节点的最好特征及阈值

        :param sorted_data: 节点的data 按照 axis=0 排序得到的结果
        :type  sorted_data: np.ndarray
        :param sorted_target: 节点的target(ndim=1) 按照 sorted_data 的每个特征的排序方式排序一次
        :type  sorted_target: np.ndarray
        :param num_feature: 训练集特征数量
        :type  num_feature: int
        :param num_samples:  Description
        :type  num_samples: int
        :param min_impurity_decrease: 分裂节点要求的最小增益
        :type  min_impurity_decrease:  float
        :returns: 分裂特征和分裂值
        """

        sum_ = np.sum(self.target)
        sum_l = np.cumsum(sorted_target, axis=0)[:-1]
        mean = np.zeros((num_samples-1, num_samples, num_feature))
        for i in range(num_samples-1):
            mean_l = sum_l[i] / (i+1)
            mean[i, :i+1] = mean_l
            mean_r = (sum_ - sum_l[i]) / (num_samples - i - 1)
            mean[i, i+1:] = mean_r

        sse = np.sum(np.square(sorted_target - mean), axis=1)
        # 去掉重复的特征值分割点
        # mask 标记特征值排序后所有重复的分割点
        # 去重后才找最好的 feature, threshhold 组合
        mask = sorted_data[:-1] == sorted_data[1:]
        sse[mask] = np.inf

        i, best_feature = np.unravel_index(np.argmin(sse), sse.shape)
        sse_min = sse[i, best_feature] / num_samples
        gain = np.mean(np.square(self.target - self.target.mean())) - sse_min
        if gain < min_impurity_decrease:
            raise MyStopError
        #threshhold = sorted_data[i, best_feature]
        threshhold = (sorted_data[i, best_feature] +
                      sorted_data[i+1, best_feature]) / 2.
        best_mask = self.data[:, best_feature] <= threshhold
        return best_feature, threshhold, best_mask

    def get_best_split(self, sorted_data=None, sorted_target=None, num_feature=None,
                       num_samples=None, min_impurity_decrease=0.0):
        """计算分裂该节点的最好特征及阈值

        :param min_impurity_decrease: 分裂节点要求的最小增益
        :type  min_impurity_decrease:  float
        :returns: 分裂特征和分裂值
        """
        num_samples, num_feature = self.data.shape
        gain_max = -np.inf
        best_feature = None
        threshhold = None
        best_mask = None
        for feature in range(num_feature):
            values = self.data[:, feature]
            for v in np.unique(values):
                mask = values <= v
                # left
                target_l = self.target[mask]
                loss_l = get_loss(target_l)
                rate_l = np.sum(mask) / num_samples

                # right
                target_r = self.target[~mask]
                loss_r = get_loss(target_r)
                rate_r = np.sum(~mask) / num_samples

                loss_after = rate_l * loss_l + rate_r * loss_r

                gain = get_loss(self.target) - loss_after

                if gain > gain_max:
                    gain_max = gain
                    best_feature = feature
                    threshhold = v
                    best_mask = mask
        if gain_max < min_impurity_decrease:
            raise MyStopError
        return best_feature, threshhold, best_mask

    def description(self):
        """用于打印决策树，提取该节点的描述信息

        节点描述模板：
        -----------------------
        |   x[1] <= 2.3       |    : 代表判断的条件，特征1 小于等于阈值
        |   mse：1.230        |
        |   num_samples: 23   |
        |   value = 3.4       |    ：节点预测值
        -----------------------

        :returns: 描述节点的字符串，描述边的字符串
        """

        desc = []
        desc.append(f"std: {(self.target.std().item()):.3f}")
        desc.append(f"num_samples: {self.target.size}")
        desc.append(f"value: {self.target.mean().item():.3f}")
        if len(self.children) > 0:  # 非叶节点
            # 获取分裂特征
            feature = self.feature
            threshhold = self.threshhold
            desc.insert(0, f"x[{feature}] <= {threshhold:.2f}")
        node_comment = '\n'.join(desc)

        # edge 描述
        edge_comment = ''
        if self.father is not None and self.father.father is None:
            feature = self.father.feature
            threshhold = self.father.threshhold
            value = self.data[0, feature]
            edge_comment = 'True' if value <= threshhold else 'False'
        return node_comment, edge_comment
