import numpy as np
import time
import matplotlib.pyplot as plt
import math


class BaseTree(object):
    def __init__(self):
        pass

    def calcShannonEntropy(self, labels):
        labels_array = np.array(labels)
        assert len(labels_array.shape) == 1

        label_counts = {}
        for lb in labels_array:
            label_counts[lb] = label_counts.get(lb, 0) + 1
        size = labels_array.shape[0]

        shannon_entropy = 0
        for k, v in label_counts.items():
            prob = float(v) / size
            shannon_entropy -= prob * math.log(prob, 2)
        return shannon_entropy

    def splitDataSet(self, data_set, labels, axis):
        ret_data_split = []
        ret_labels_split = []
        feature_values = set()
        for vec in data_set:
            feature_values.add(vec[axis])

        for v in feature_values:
            data_piece = data_set[data_set[:, axis] == v]
            label_piece = labels[data_set[:, axis] == v]
            ret_data_split.append(data_piece)
            ret_labels_split.append(label_piece)
        return ret_data_split, ret_labels_split

    def chooseBestFeature(self, data_set, labels):
        feature_dim = data_set.shape[1]
        base_entropy = self.calcShannonEntropy(labels)
        best_info_gain = 0
        best_feature = 0
        for fea_id in range(feature_dim):
            data_split, labels_split = self.splitDataSet(data_set, labels, fea_id)
            condition_entropy = 0
            for i in range(len(data_split)):
                sub_prob = 1. * data_split[i].shape[0] / data_set.shape[0]
                sub_entropy = self.calcShannonEntropy(labels_split[i])
                condition_entropy += sub_prob * sub_entropy

            info_gain = base_entropy - condition_entropy
            if info_gain > best_info_gain:
                best_info_gain = info_gain
                best_feature = fea_id

        return best_feature, best_info_gain

    def majorLabel(self, labels):
        most_label = None
        most_count = 0
        label_count = {}
        for lb in labels:
            label_count[lb] = label_count.get(lb, 0) + 1
            if label_count[lb] > most_count:
                most_count = label_count[lb]
                most_label = lb
        return most_label


class ID3(BaseTree):
    def __init__(self):
        self.DecisionTree = None

    def createTree(self, data_set, labels):
        best_fea, info_gain = self.chooseBestFeature(data_set, labels)
        major_label = self.majorLabel(labels)
        if info_gain < 1e-6:
            return {'branch': None, 'branch_value_map': None, 'key_fea': best_fea, 'label': major_label}
        data_split, label_split = self.splitDataSet(data_set, labels, best_fea)
        children = []
        children_value_map = {}
        for i in range(len(data_split)):
            children.append(self.createTree(data_split[i], label_split[i]))
            split_value = data_split[i][0][best_fea]
            children_value_map[split_value] = i
        return {'branch': children, 'branch_value_map': children_value_map, 'key_fea': best_fea, 'label': major_label}

    def train(self, data_set, labels):
        data_array = np.array(data_set, np.float32)
        assert len(data_array.shape) == 2, 'data_set必须是维数为2的向量'
        labels_array = np.array(labels)
        assert len(labels_array.shape) == 1
        assert data_array.shape[0] == labels_array.shape[0], '样本和标注的长度必须相等'

        self.DecisionTree = self.createTree(data_array, labels_array)
        return self.DecisionTree

    def predict(self, data):
        tree = self.DecisionTree
        predict_label = tree['label']
        while tree['branch'] is not None:
            branch_id = tree['branch_value_map'][data[tree['key_fea']]]
            subtree = tree['branch'][branch_id]
            predict_label = subtree['label']
            tree = subtree
        return predict_label


class CART(BaseTree):
    def __init__(self):
        self.tree = None
        self.mode = 'regress'
        self.min_regress_error = 1.0e-3
        self.min_split_num = 2
        self.pruned_num = 0

    def binarySplitDataSet(self, data_set, labels, split_axis, split_value):
        data_left = data_set[data_set[:, split_axis] > split_value]
        label_left = labels[data_set[:, split_axis] > split_value]

        data_right = data_set[data_set[:, split_axis] <= split_value]
        label_right = labels[data_set[:, split_axis] <= split_value]

        left_set = [data_left, label_left]
        right_set = [data_right, label_right]
        return left_set, right_set

    def regressError(self, labels):
        if self.mode == 'regress':
            return np.var(labels) * labels.shape[0]
        else:
            pass

    def chooseBestSplit_Reg(self, data_set: np.ndarray, labels: np.ndarray):
        if len(set(labels.tolist())) == 1:
            return None, np.mean(labels)

        base_error = self.regressError(labels)
        best_error = float('inf')
        bestID, bestVAL = 0, 0

        for feaID in range(data_set.shape[1]):
            for split_val in data_set[:, feaID]:
                left_set, right_set = self.binarySplitDataSet(data_set, labels, feaID, split_val)
                if left_set[0].shape[0] < self.min_split_num or right_set[0].shape[0] < self.min_split_num:
                    continue
                new_error = self.regressError(left_set[1]) + self.regressError(right_set[1])
                if new_error < best_error:
                    bestID = feaID
                    bestVAL = split_val
                    best_error = new_error
        if (base_error - best_error) < self.min_regress_error:
            return None, np.mean(labels)
        return bestID, bestVAL

    def createTree(self, data_set, labels):
        fea, val = self.chooseBestSplit_Reg(data_set, labels)
        ret = {}
        ret['split_fea'] = fea
        ret['value'] = val
        ret['left'] = ret['right'] = None
        if fea is None:
            return ret
        leftSet, rightSet = self.binarySplitDataSet(data_set, labels, fea, val)
        ret['left'] = self.createTree(leftSet[0], leftSet[1])
        ret['right'] = self.createTree(rightSet[0], rightSet[1])
        return ret

    def train(self, data_set, labels):
        data_array = np.array(data_set, np.float32)
        assert len(data_array.shape) == 2, 'data_set必须是维数为2的向量'
        labels_array = np.array(labels)
        assert len(labels_array.shape) == 1
        assert data_array.shape[0] == labels_array.shape[0], '样本和标注的长度必须相等'

        self.tree = self.createTree(data_array, labels_array)

    def isLeaf(self, tree):
        assert tree is not None, 'tree必须是实例'
        return (tree['left'] is None) and (tree['right'] is None)

    # TODO
    def prune(self, tree, test_data, test_labels):
        if (not self.isLeaf(tree['left'])) or (not self.isLeaf(tree['right'])):
            l_set, r_set = self.binarySplitDataSet(test_data, test_labels, tree['split_fea'], tree['value'])
            if not self.isLeaf(tree['left']):
                tree['left'] = self.prune(tree['left'], l_set[0], l_set[1])
            if not self.isLeaf(tree['right']):
                tree['right'] = self.prune(tree['right'], r_set[0], r_set[1])
        if self.isLeaf(tree['left']) and self.isLeaf(tree['right']):
            l_set, r_set = self.binarySplitDataSet(test_data, test_labels, tree['split_fea'], tree['value'])
            error_split = np.sum((l_set[1] - tree['left']['value']) ** 2) + np.sum(
                (r_set[1] - tree['right']['value']) ** 2)
            leaf_mean = (tree['left']['value'] + tree['right']['value']) / 2.0
            error_merge = np.sum((test_labels - leaf_mean) ** 2)
            if error_merge < error_split:
                self.pruned_num += 1
                print('merging', "pruned_num:%d" % self.pruned_num)
                tree['left'] = tree['right'] = None
                tree['value'] = leaf_mean
            return tree
        else:
            return tree

    def predict(self, data):
        tree = self.tree
        while not self.isLeaf(tree):
            if data[tree['split_fea']] > tree['value']:
                tree = tree['left']
            else:
                tree = tree['right']
        return tree['value']


if __name__ == '__main__':
    pass
