import numpy as np
import copy

class Decision_tree():
    """
    decision tree model
    """
    
        
    def entropy(self, Xi):
        """
        H(X) = -sum(pxi * log(pxi))
        input:
            data = dataset
            feature = feature
        output:
            entropy of input array
        """
        h_x = 0
        type_x = set(Xi)
        
        for i in type_x:
            pxi = len(Xi[Xi == i]) / len(Xi)
            #here we use -=, cause we have -before sum
            h_x -= pxi * np.log2(pxi)
            
        return h_x
    
    def conditional_entropy(self, Xi, y):
        """
        H(Y|X) = sum(pxi * H(Y|X=xi))
        input:
            data = dataset
            feature = feature
        """
        #we set initial values = 0 cause log0 was error
        h_y_x = 0
        type_x = set(Xi)
        
        for i in type_x:
            #pxi
            pxi = len(Xi[Xi == i]) / len(Xi)
            #entroy of y by given x
            hy_xi = pxi * self.entropy(y[Xi == i])
            h_y_x += hy_xi
        
        return h_y_x
    
    def get_best_feature(self, X, y):
        """
        get best feaure by calculated the max information gain
        """
        #number and dimension of data
        N, P = X.shape
        
        #inital gain & feature
        max_gain = -1
        max_feature = -1
        
        #iter for each dimention
        for i in range(P):
            #1. get entropy of y
            h_y = self.entropy(y)
            #2. get condition entropy of feature i
            h_y_x = self.conditional_entropy(X[:, i], y)
            gain = h_y - h_y_x
                
            if gain > max_gain:
                max_gain = gain
                max_feature = i
        
        return max_feature, max_gain
    
    
    def get_sub_dataset(self, X, y, feature, value):
        """
        update dataset
        input:
            X, the dataset need to update
            y, the label need to update
            feature, remove the feature which already use
            value, keep the row which X[featreu == value]
        """
        # get the index 
        index = X[:, feature] == value
        # remove feature from np array. do deep copy here
        ret_X = copy.deepcopy(X)
        ret_y = copy.deepcopy(y)
        # delete feature column from dataset
        ret_X = np.delete(ret_X, feature, axis=1)
        # return the rows we need
        ret_X = ret_X[index]
        ret_y = ret_y[index]
        
        return ret_X, ret_y

    def major_class(self, y):
        """
        retrun the marjor class of y
        """
        #get the unique value 
        unique, counts = np.unique(y, return_counts=True)
        #sort values by cout
        tmp_l = [(i, j) for i, j in zip(unique, counts)]
        tmp_l = sorted(tmp_l, key=lambda x: x[1])
        #return the marjor class
        return tmp_l[-1][0]
        
        
    def build_tree(self, X, y, threshold=.1):
        """
        use id3 to build the tree. c4.5 is similar, but use infomation gain ratio
        recurve build a decision tree
        input:
            X, dataset
            y, label
            threshold, epsilon
        output:
            tree
        """
        #inistial class dict
        class_dict = {i for i in y}
        #if class_dict's len == 1 that means all y are in same class return T
        if len(class_dict) == 1:
            return y[0]
        #if data set is null, return T, set marjor calss as class.
        if len(X) == 0:
            return self.major_class(y)
        
        #other wise, get best feature
        feature, gain = self.get_best_feature(X, y)
        #if we did't get enought infomation gain, return the marjor class
        if gain < threshold:
            return self.major_class(y)
        
        #otherwise, build tree
        tree_dict = {feature:{}}
        for value in set(X[:, feature]):
            new_X, new_y = self.get_sub_dataset(X, y, feature, value)
            tree_dict[feature][value] = self.build_tree(new_X, new_y)
        
        return tree_dict

    def predict(self, X_test, tree):
        """
        prediction
        input:
            X_test, testset
            tree, decision tree
        """
        
        while True:
            #key: feature we split, value, node or leave
            (key, value), = tree.items()
            #if type of value is dict, iterate
            if type(tree[key]).__name__ == 'dict':
                #remove feature from test set
                data_value = X_test[key]
                del X_test[key]
                tree = value[data_value]
                #if the value is string, then return string
                #for example{2 : {'是': '是', '否': {1: {'是': '是', '否': '否'}}}}
                #if yes then tree = '是'， we could not iter string
                if type(tree).__name__ == 'str_':
                    return tree
            else:
                return value
        
if __name__ == '__main__':
    
    #数据来自李航统计机器学习P71, (PDF P90)
    datasets = [['青年', '否', '否', '一般', '否'],
    			['青年', '否', '否', '好', '否'],
    			['青年', '是', '否', '好', '是'],
    			['青年', '是', '是', '一般', '是'],
    			['青年', '否', '否', '一般', '否'],
    			['中年', '否', '否', '一般', '否'],
    			['中年', '否', '否', '好', '否'],
    			['中年', '是', '是', '好', '是'],
    			['中年', '否', '是', '非常好', '是'],
    			['中年', '否', '是', '非常好', '是'],
    			['老年', '否', '是', '非常好', '是'],
    			['老年', '否', '是', '好', '是'],
    			['老年', '是', '否', '好', '是'],
    			['老年', '是', '否', '非常好', '是'],
    			['老年', '否', '否', '一般', '否'],
    			]
    
    dataset = np.array(datasets)
    X, y = dataset[:, :-1], dataset[:, -1]
    tree = Decision_tree().build_tree(X, y)
    
    res = []
    for i in range(len(datasets)):
        res.append(Decision_tree().predict(datasets[i][:-1], tree))
    
    