import numpy as np
import graphviz

class Node:
    def __init__(self):
        self.child = []
        self.feature = None     #这一层要判断的特征
        self.feature_value = None      #作为子结点时，该节点的取值

        self.is_leaf = False
        
    def set_feature(self, feature):
        self.feature = feature
    
    def set_feature_value(self, feature_value):
        self.feature_value = feature_value

    def add_child(self, node):
        self.child.append(node)

    def set_as_leaf(self, category):
        self.is_leaf = True
        self.category = category

#决策树，使用id3优化方式，实现分类问题
class DecisionTree:
    def __init__(self):
        self.root = None

    def fit(self, X, Y, D):
        '''
        使用id3优化方式，结合回溯算法，得到决策树，实现分类问题
        为了计算方便，在实现上，将属性集及其值分开存放在两个数组中，但一一对应，使用d_index进行索引
        '''
        self.D = D  #属性集
        self.D_values = self._get_feature_values(X) #属性值
        d_index = np.arange(len(D)) #属性索引
        
        #开始回溯
        self.root = self._grow(X, Y, d_index)

    def _grow(self, X, Y, d_index):
        print()
        print(X, Y, d_index)

        d = self.D[d_index] #取出属性集

        #生成节点node
        node = Node()
        
        #样本全部属于同一类
        if( len(np.unique(Y)) == 1 ):
            #标记类别，并返回
            node.set_as_leaf(np.unique(Y)[0])
            return node
 
        #样本属性为空，或者所有样本x在属性D上取值相同
        if( len(d) == 0 or self._features_are_identical(X) ):
            h = np.bincount(Y)  #计算比例大的类别
            node.set_as_leaf(np.argmax(h))  #标记类别
            return node

        #选择最优划分属性
        feature_best_index, d_index_hat = self._optimize(X, Y, d_index)
        print("select {}".format(self.D[feature_best_index]))

        node.set_feature(self.D[feature_best_index])  #保存该层的划分属性
        col = X[:,feature_best_index]       #取出该属性，即该列

        #遍历属性值
        for feature_value in self.D_values[feature_best_index]:
            sample_index = np.argwhere(col == feature_value).squeeze()  #取出属于该属性值的样本子集的索引
            x_hat = X[sample_index]
            y_hat = Y[sample_index]

            #样本子集为空
            if(len(x_hat) == 0):
                child = Node()
                child.set_feature_value(feature_value)  #保存属性值
                h = np.bincount(Y)  #计算比例大的类别
                child.set_as_leaf(np.argmax(h)) #标记类别
                node.add_child(child)

            else:
                print("call grow at feature {}-{}".format(node.feature,feature_value))
                child = self._grow(x_hat, y_hat, d_index_hat)
                child.set_feature_value(feature_value)  #保存属性值
                node.add_child(child)

        return node
            


    def _optimize(self, x, y, d_index):
        '''
        由信息增益得到最优划分属性
        x,y : 结点样例，及其标签
        d_index : 可用属性的索引
        '''
        gain = []   #信息增益
        for i in d_index:
            col = x[:,i]   #取出第i个属性，即第i行

            values = self.D_values[i] #属性值
            ent = entropy(y)    #计算当前的信息熵

            ent_sub = []
            for el in values:
                samples_index = np.argwhere(col == el).squeeze()  #属性i符合该子属性的样本
                samples_index  = np.array([samples_index]).squeeze()

                if not samples_index.shape:
                    num_samples = 1 #解决只有一个样本的情况
                    ent_sub.append(0)
                    break
                else:
                    num_samples = samples_index.shape[0]
                    
                ratio = num_samples/ x.shape[0] #该属性值所占的比例
                labels = y[samples_index]   #正反例标签
                ent_sub.append( ratio * entropy(labels) )   #计算加权后的信息熵

            gain.append( ent - np.sum(ent_sub) )

        print(d_index, gain)
        max_i = np.argmax(gain)

        #返回信息增益最大的属性的索引，以及删去该属性之后的属性集索引
        return d_index[max_i], np.delete(d_index, max_i)  


    def view(self):
        self.node_count = 0
        self.g = graphviz.Digraph()

        self.g.node(str(0), self.root.feature, fontname="Microsoft YaHei")
        self._sub_plot(0, self.root)

        print(self.g)
        self.g.view()


    #迭代递归，绘制决策树
    def _sub_plot(self, parent_node_id, node:Node):
        for c in node.child:
            self.node_count += 1

            if c.is_leaf:
                label = 'good' if c.category == 1 else 'bad'
                bg = '#2A9D8F' if c.category == 1 else '#e76f51'
                font_color = 'white'
                self.g.node(
                    str(self.node_count), 
                    label, 
                    fontname="Microsoft YaHei", 
                    color = bg, 
                    fontcolor = font_color, 
                    style="filled"
                )
                self.g.edge(str(parent_node_id), str(self.node_count), c.feature_value, fontname="Microsoft YaHei")
                continue
            
            self.g.node(str(self.node_count), c.feature, fontname="Microsoft YaHei")
            self.g.edge(str(parent_node_id), str(self.node_count), c.feature_value, fontname="Microsoft YaHei")
            self._sub_plot(self.node_count, c)

        return


    def _features_are_identical(self, X):
        Y = np.unique(X.astype("<U22"), axis=0)
        return False if Y.shape[0] != 1 else True

    def _get_feature_values(self, X):
        values = []
        #遍历每一列
        for i in range(X.shape[1]):
            values.append(set(X[:,i]))

        return values

        

def entropy(y):
    """
    Entropy of a label sequence
    注意传入一维的向量
    """
    hist = np.bincount(y)   #计算每一个类别的数量
    ps = hist / np.sum(hist)    #计算每一个子类的概率
    return -np.sum([p * np.log2(p) for p in ps if p > 0])   #求和
