from math import log
from collections import defaultdict
import numpy as np
import operator

"""
References
 https://blog.csdn.net/wzmsltw/article/details/51039928
"""


def calcShannonEnt(dataSet):
    """
    Calculate Shannon Entropy
    """
    num_entries = len(dataSet)
    label_counts = defaultdict(int)

    for featVec in dataSet:
        current_label = featVec[-1]
        label_counts[current_label] += 1

        shannon_ent = 0.0

        for val in label_counts.values():
            prob = float(val) / num_entries
            shannon_ent -= prob * log(prob, 2)

    return shannon_ent


def createDataset():
    dataset = [
        [1, 1, 'maybe'],
        [1, 1, 'yes'],
        [1, 0, 'no'],
        [0, 1, 'no'],
        [0, 1, 'no']
    ]
    labels = ['no surfacing', 'flippers']
    return dataset, labels


def splitDataset(dataset, axis, value):
    """
    对离散变量划分数据集,取出该特征值为 value 的所有样本
    """
    ret_dataset = []
    for featVec in dataset:
        if featVec[axis] == value:
            reduced_feat_vec = featVec[:axis]
            reduced_feat_vec.extend(featVec[axis + 1:])
            ret_dataset.append(reduced_feat_vec)

    return ret_dataset


def splitContinuousDataset(dataset, axis, value, direction):
    """
    对连续变量划分数据集, direction 规定划分的方向
    决定是划分出小于 value 的数据样本还是大于 value 的数据样本集
    """
    ret_dataset = []
    for featVec in dataset:
        if direction == 0:
            if featVec[axis] > value:
                reduced_feat_vec = featVec[:axis]
                reduced_feat_vec.extend(featVec[axis + 1:])
                ret_dataset.append(reduced_feat_vec)
        else:
            if featVec[axis] <= value:
                reduced_feat_vec = featVec[:axis]
                reduced_feat_vec.extend(featVec[axis + 1:])
                ret_dataset.append(reduced_feat_vec)

    return ret_dataset


def chooseBestFeatureToSplit(dataset, labels):
    num_features = len(dataset[0]) - 1
    base_entropy = calcShannonEnt(dataset)
    best_gain = 0.0
    best_feature = -1
    best_split_dict = {}
    for i in range(num_features):
        feat_list = [example[i] for example in dataset]

        if isinstance(feat_list[0], float) or isinstance(feat_list[0], int):
            sorted_feat_list = sorted(feat_list)
            split_list = []
            for j in range(len(sorted_feat_list) - 1):
                split_list.append((sorted_feat_list[j] + sorted_feat_list[j + 1]) / 2.0)

            best_split_entropy = np.inf
            slen = len(split_list)

            for j in range(slen):
                value = split_list[j]
                new_entropy = 0.0
                sub_dataset_0 = splitContinuousDataset(dataset, i, value, 0)
                sub_dataset_1 = splitContinuousDataset(dataset, i, value, 1)

                prob0 = len(sub_dataset_0) / float(len(dataset))
                new_entropy += prob0 * calcShannonEnt(sub_dataset_0)

                prob1 = len(sub_dataset_1) / float(len(dataset))
                new_entropy += prob1 * calcShannonEnt(sub_dataset_1)

                if new_entropy < best_split_entropy:
                    best_split_entropy = new_entropy
                    best_split = j

            best_split_dict[labels[i]] = split_list[best_split]
            gain = base_entropy - best_split_entropy

        else:
            unique_vals = set(feat_list)
            new_entropy = 0.0

            for value in unique_vals:
                sub_dataset = splitDataset(dataset, i, value)
                prob = len(sub_dataset) / float(len(dataset))
                new_entropy += prob * calcShannonEnt(sub_dataset)
            gain = base_entropy - new_entropy

        if gain > best_gain:
            best_gain = gain
            best_feature = i

    # 若当前节点的最佳划分特征为连续特征, 则将其之间记录的划分点为界进行二值化处理
    if isinstance(dataset[0][best_feature], float) or isinstance(dataset[0][best_feature], int):
        best_split_value = best_split_dict[labels[best_feature]]
        labels[best_feature] = labels[best_feature] + '<=' + str(best_split_value)

        for i in range(dataset[0].shape):
            if dataset[i][best_feature] <= best_split_value:
                dataset[i][best_feature] = 1
            else:
                dataset[i][best_feature] = 0

    return best_feature


def majority_cnt(class_list):
    """
    特征已经划分完 剩下的样本还需要指定类
    """
    class_count = defaultdict(int)
    for vote in class_list:
        class_count[vote] += 1

    return max(class_count)


def create_tree(dataset, labels, full_data, full_labels):
    class_list = [example[-1] for example in dataset]
    if class_list.count(class_list[0]) == len(class_list):
        return class_list[0]

    if len(dataset[0]) == 1:
        return majority_cnt(class_list)

    best_feature = chooseBestFeatureToSplit(dataset, labels)
    best_feature_label = labels[best_feature]
    my_tree = {best_feature_label: {}}
    feature_values = [example[best_feature] for example in dataset]
    unique_vals = set(feature_values)

    if isinstance(dataset[0][best_feature], str):
        current_label = full_labels.index(labels[best_feature])
        feature_full_values = [example[current_label] for example in full_data]
        unique_full_vals = set(feature_full_values)
    del labels[best_feature]

    for value in unique_vals:
        sub_labels = labels[:]
        if isinstance(dataset[0][best_feature], str):
            unique_full_vals.remove(value)

        my_tree[best_feature_label][value] = create_tree(splitDataset(dataset, best_feature, value)
                                                         , sub_labels, full_data, full_labels)

        if isinstance(dataset[0][best_feature], str):
            for value in unique_full_vals:
                my_tree[best_feature_label][value] = majority_cnt(class_list)

    return my_tree


if __name__ == '__main__':
    # dat, labels = createDataset()
    # ret = calcShannonEnt(dat)
    a = (12 / 17)
    b = (5 / 17)
    dummy = isinstance(a, float)
    ret = - (a * log(a, 2) + b * log(b, 2))
    d = 1e88 > np.inf

    print()
