# @Author :等风的云
# -*- coding: utf-8 -*-
# 实现CART算法 - 分类决策树

import numpy as np


class DecisionTree(object):
    class Node(object):
        def __init__(self):
            self.value = None
            self.feature_index = None
            self.feature_value = None
            self.left = None
            self.right = None

    def __init__(self, threshold=1e-2):
        self.threshold = threshold
        self.tree = None

    @staticmethod
    def _gini_index(y):
        """
        计算基尼指数  公式 gini(D) = 1- ∑(p**2)
        :param y:
        :return:
        """
        c = np.unique(y)
        s = 0
        for v in c:
            y_sub = y[y == v]
            p = y_sub.size / y.size
            s += p ** 2
        return 1 - s

    @staticmethod
    def _select_points(x):
        """
        选择特征的所有切分点
        :param x:
        :return:
        """
        x = np.unique(x)
        # zip()函数 在计算平均值时，我们需要将数组中的相邻项相加，因此我们选择zip()函数
        # zip([1,2,3,4], [2,3,4]) = [(1,2),(2,3),(3,4)] 将两个数组对应位置的值放到一个元组里，当两个数组长度不一致时，返回的长度=最短的
        points = [(v1 + v2) / 2 for v1, v2 in zip(x, x[1:])]
        return points

    def _gini_split(self, feature, point, y):
        """
        切割子集，计算切割后的基尼指数
        :param feature:
        :param point:
        :param y:
        :return:
        """
        idx = feature < point
        left = y[idx]
        right = y[~idx]
        left_gini = self._gini_index(left)
        right_gini = self._gini_index(right)
        gini = (left.size * left_gini + right.size * right_gini) / y.size
        return gini

    def _select_feature(self, x, y):
        """
        选择特征划分
        :param x:
        :param y:
        :return:
        """
        best_feature_index = None
        best_split_value = None
        min_gini = np.inf
        _, n = x.shape
        for feature_idx in range(n):
            points = self._select_points(x[:, feature_idx])
            for p in points:
                gini = self._gini_split(x[:, feature_idx], p, y)
                if gini < min_gini:
                    min_gini = gini
                    best_feature_index = feature_idx
                    best_split_value = p
        # 寻找到了最佳切分特征和最佳切分点
        if self._gini_index(y) - min_gini < self.threshold:
            # 如果超过阈值， 则没有切分特征和切分点
            best_feature_index = None
            best_split_value = None
        return best_feature_index, best_split_value, min_gini

    def _build_tree(self, x, y):
        """
        递归建立决策树（二叉树）
        :param x:
        :param y:
        :return:
        """
        node = DecisionTree.Node()
        node.value = np.argmax(np.bincount(y))
        # 1.选择最佳切分特征和切分点
        best_feature_index, best_split_value, min_gini = self._select_feature(x, y)
        if best_feature_index:
            # 如果有最佳切分点
            node.feature_index = best_feature_index
            node.feature_value = best_split_value
            feature = x[:, best_feature_index]
            idx = feature < best_split_value
            x_left = x[idx]
            y_left = y[idx]
            x_right = x[~idx]
            y_right = y[~idx]
            node.left = self._build_tree(x_left, y_left)
            node.right = self._build_tree(x_right, y_right)
        return node

    def train(self, x, y):
        """
        训练数据， 保存树模型
        :param x:
        :param y:
        :return:
        """
        self.tree = self._build_tree(x, y)

    def predict(self, x):
        """
        预测一组x
        :param x:
        :return:
        """
        return np.apply_along_axis(self._single_predict, 1, x)

    def _single_predict(self, x):
        """
        对输入的单个x进行预测
        :param x:
        :return:
        """
        node = self.tree
        while node.left:
            # 如果左节点存在
            if x[node.feature_index] > node.feature_value:
                # 如果值大于节点值, 则爬到右树
                node = node.right
            else:
                node = node.left
        return node.value


if __name__ == '__main__':
    from sklearn.preprocessing import LabelEncoder
    from sklearn.model_selection import train_test_split
    from sklearn.metrics import accuracy_score

    X = np.genfromtxt('CART_Classify.data', delimiter=',', usecols=range(4), dtype=np.float)
    y = np.genfromtxt('CART_Classify.data', delimiter=',', usecols=(4,), dtype=np.str)
    le = LabelEncoder()
    trans_y = le.fit_transform(y)
    cart = DecisionTree()
    x_train, x_test, y_train, y_test = train_test_split(X, trans_y, test_size=0.3)
    cart.train(x_train, y_train)
    y_predict = cart.predict(x_test)
    print(accuracy_score(y_test, y_predict))
