# @Author :等风的云
# -*- coding: utf-8 -*-
# 实现ID3算法

import numpy as np


class DecisionTree(object):
    """决策树类"""

    class Node(object):
        """节点类"""

        def __init__(self):
            self.value = None
            self.feature_index = None
            self.children = {}

        def __str__(self):
            if self.children:
                s = "内部节点<%s>:\n" % self.feature_index
                for fv, node in self.children.items():
                    ss = "[%s] -> %s" % (fv, node)
                    s += "\t" + ss.replace("\n", "\n\t") + "\n"
            else:
                s = "叶节点:(%s)" % self.value
            return s

    def __init__(self, threshold=1e-2):
        self.tree = None
        self.threshold = threshold

    @staticmethod
    def _entropy(y):
        """计算信息熵（联合熵）  公式： -∑p*long2(p)"""
        # 1.求p（所有类标记的概率）
        c = np.bincount(y)
        indexes = np.nonzero(c)
        p = c[indexes] / y.size
        return -np.sum(p * np.log2(p))

    def _con_entropy(self, feature, y):
        """计算条件熵"""
        # 1.统计该特征的全部取值
        feature_values = np.unique(feature)
        # 初始化条件熵值h
        h = 0.
        # 2.遍历所有取值，将类标记划为n个子集
        for value in feature_values:
            y_sub = y[feature == value]
            p = y_sub.size / y.size
            h += p * self._entropy(y_sub)
        return h

    def _info_gain(self, feature, y):
        """计算信息增益值"""
        return self._entropy(y) - self._con_entropy(feature, y)

    def _select_feature(self, x, y, feature_list):
        """选择信息增益最大的特征"""
        if feature_list:
            gain = np.apply_along_axis(self._info_gain, 0, x[:, feature_list], y)
            index = np.argmax(gain)
            if gain[index] > self.threshold:
                return index
        return None

    def _build_tree(self, x, y, feature_list):
        """递归建立决策树"""
        # 1.创建节点
        node = DecisionTree.Node()
        # 2.统计类标记
        labels_count = np.bincount(y)
        node.value = np.argmax(labels_count)
        if np.count_nonzero(labels_count) != 1:
            index = self._select_feature(x, y, feature_list)
            if index:
                node.feature_index = feature_list.pop(index)
                feature_values = np.unique(x[:, node.feature_index])
                for value in feature_values:
                    idx = x[:, node.feature_index] == value
                    x_sub = x[idx]
                    y_sub = y[idx]
                    node.children[value] = self._build_tree(x_sub, y_sub, feature_list.copy())
        return node

    def train(self, x_train, y_train):
        _, n = x_train.shape
        self.tree = self._build_tree(x_train, y_train, list(range(n)))

    def _predict_one(self, x):
        """
        预测单个实例
        :param x:
        :return:
        """
        node = self.tree
        while node.children:
            child = node.children.get(x[node.feature_index])
            if not child:
                # 没有该值
                break
            node = child
        return node.value

    def predict(self, x):
        """
        批量预测值
        :param x:
        :return:
        """
        y_predict = np.apply_along_axis(self._predict_one, 1, x)
        return y_predict

    def __str__(self):
        if hasattr(self, "tree"):
            return str(self.tree)
        else:
            return "没有树"


if __name__ == '__main__':
    from sklearn.preprocessing import LabelEncoder
    from sklearn.metrics import accuracy_score
    from sklearn.model_selection import train_test_split

    data = np.genfromtxt('ID3.data', dtype=np.str, delimiter=",")
    _, n = data.shape
    for i in range(n):
        col = data[:, i]
        le = LabelEncoder()
        trans_col = le.fit_transform(col)
        data[:, i] = trans_col
    new_data = data.astype(np.int)
    print(new_data)
    X = new_data[:, :-1]
    Y = new_data[:, -1]
    print(new_data.shape)
    X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3)
    ID3 = DecisionTree()
    ID3.train(X_train, Y_train)
    print(ID3)
    y_pred = ID3.predict(X_test)
    print(Y_test)
    print(y_pred)
    print(accuracy_score(y_true=Y_test, y_pred=y_pred))
