import numpy as np
from collections import Counter, defaultdict

def seperate(column):
    # 对列按相同值进行分块
    d = defaultdict(list)
    indexes = []
    for key, val in [(v, i) for i, v in enumerate(column)]:
        d[key].append(val)
    for val in d.values():
        indexes.append(val)
    return indexes


def caclEntropy(column, indexes=None):
    # 计算熵
    entropy = 0
    tmp = []
    if indexes != None:
        for i in indexes:
            tmp.append(column[i])
    else:
        tmp = column
    length = len(tmp)
    c = Counter(tmp)
    for i in c.keys():
        Pi = c[i] / length
        entropy += Pi * np.log2(Pi)
    return -entropy


def caclGain(empEntropy, dataset):
    # 计算信息增益
    row, col = dataset.shape
    tags = dataset[:, -1]
    gains = []
    for i in range(col - 1):
        indexes = seperate(dataset[:, i])
        condiEntropy = 0
        for index in indexes:
            condiEntropy += (len(index) / len(tags)) * caclEntropy(tags, index)  # 条件熵
        gains.append(empEntropy - condiEntropy)
    return gains


def splitDataset(dataset, max_gain, label):
    retDataset = []
    for row_data in dataset:
        if row_data[max_gain] == label:
            retDataset.append(row_data)
    return np.delete(retDataset, max_gain, axis=1)


def cretateDecisionTree(dataset, features):
    labels = [rows[-1] for rows in dataset]
    # label都一致时，停止划分
    if labels.count(labels[0]) == len(labels):
        return labels[0]

    tags = dataset[:, -1]
    empEntropy = caclEntropy(tags)  # 经验熵
    gains = caclGain(empEntropy, dataset)  # 信息增益
    max_gain = gains.index(max(gains))  # 信息增益最大属性
    bestFeat = features[max_gain]
    tree = {bestFeat: {}}  # 使用字典存储决定树

    # 生成子树
    max_gain_labels = set(dataset[:, max_gain])
    del (features[max_gain])
    for label in max_gain_labels:
        subFeats = features
        subDataset = splitDataset(dataset, max_gain, label)
        print(subDataset)
        tree[bestFeat][label] = cretateDecisionTree(subDataset, subFeats)

    return tree


if __name__ == '__main__':

    dataset = np.array([
        [0, 0, 0, 0, 0],
        [0, 0, 0, 1, 0],
        [0, 1, 0, 1, 1],
        [0, 1, 1, 0, 1],
        [0, 0, 0, 0, 0],
        [1, 0, 0, 0, 0],
        [1, 0, 0, 1, 0],
        [1, 1, 1, 1, 1],
        [1, 0, 1, 2, 1],
        [1, 0, 1, 2, 1],
        [2, 0, 1, 2, 1],
        [2, 0, 1, 1, 1],
        [2, 1, 0, 1, 1],
        [2, 1, 0, 2, 1],
        [2, 0, 0, 0, 0],
    ])  # 最后一列为类别
    features = ['ages', 'jobs', 'property', 'credit']

    tree = cretateDecisionTree(dataset, features)
    print(tree)