from math import log

# 数据集 :    色泽    根蒂    敲声    纹理    脐部    触感  |  好瓜
data_set = [['青绿', '蜷缩', '浊响', '清晰', '凹陷', '硬滑', True],
            ['乌黑', '蜷缩', '沉闷', '清晰', '凹陷', '硬滑', True],
            ['乌黑', '蜷缩', '浊响', '清晰', '凹陷', '硬滑', True],
            ['青绿', '蜷缩', '沉闷', '清晰', '凹陷', '硬滑', True],
            ['浅白', '蜷缩', '浊响', '清晰', '凹陷', '硬滑', True],
            ['青绿', '稍蜷', '浊响', '清晰', '稍凹', '软粘', True],
            ['乌黑', '稍蜷', '浊响', '稍糊', '稍凹', '软粘', True],
            ['乌黑', '稍蜷', '浊响', '清晰', '稍凹', '硬滑', True],
            ['乌黑', '稍蜷', '浊响', '稍糊', '凹陷', '硬滑', False],
            ['青绿', '硬挺', '清脆', '清晰', '平坦', '软粘', False],
            ['青绿', '硬挺', '清脆', '模糊', '平坦', '硬滑', False],
            ['浅白', '蜷缩', '浊响', '模糊', '平坦', '软粘', False],
            ['青绿', '稍蜷', '浊响', '稍糊', '凹陷', '硬滑', False],
            ['浅白', '稍蜷', '沉闷', '稍糊', '凹陷', '硬滑', False],
            ['乌黑', '稍蜷', '浊响', '清晰', '稍凹', '软粘', False],
            ['浅白', '蜷缩', '浊响', '模糊', '平坦', '硬滑', False],
            ['青绿', '蜷缩', '沉闷', '稍糊', '稍凹', '硬滑', False]]

# 对某特征可能出现的所有情况进行计数
def unique_condition_count(rows):
    results = {}
    for row in rows:
        r = row[len(row) - 1]
        if r not in results: results[r] = 0
        results[r] += 1
    return results

# 信息熵计算函数
def entropy_calcution(rows):
    log2 = lambda x: log(x) / log(2)
    results = unique_condition_count(rows)          # 统计特征各取值的数量
    ent = .0
    for r in results.keys():
        p = float(results[r] / len(rows))
        ent -= p * log2(p)
    return ent

# 对于rows行，根据column特征值是否为value，返回对应的两组集合
def divide_dataset(rows, column, value):
    split_function = lambda row: row[column] == value
    set1 = [row for row in rows if split_function(row)]
    set2 = [row for row in rows if not split_function(row)]
    return set1, set2

class DecisionNode:                   # 决策树节点类
    def __init__(self, col = -1, value = None, results = None, tb = None, fb = None):
        self.col = col                # 当前节点划分条件对应数据集所在列
        self.value = value            # value对应于为了使结果为True, 否则为False
        self.results = results        # 存储决策结果，非子节点为None
        self.tb = tb                  # 结果为True时，树上相对于当前节点的子数上的节点
        self.fb = fb                  # 结果为False时，树上相对于当前节点的子数上的节点

# 以递归方式构建树，信息增益用作最佳树的拆分条件（目前采用的是二叉树）
def build_tree(rows, score_func = entropy_calcution):
    if len(rows) == 0:
        return DecisionNode()

    current_score = score_func(rows)    # 信息熵

    best_gain = 0.0
    best_criteria = None
    best_set = None

    for col in range(0, len(rows[0]) - 1):

        # 统计col列包含的属性值
        column_value = {}
        for row in rows:
            column_value[row[col]] = 1

        # 根据col列中的每个值，尝试对数据集进行拆分
        for value in column_value.keys():
            (set1, set2) = divide_dataset(rows, col, value)

            # 对于属性col的信息增益
            p = float(len(set1) / len(rows))
            gain = current_score - p * score_func(set1) - (1 - p) * score_func(set2)
            if gain > best_gain and len(set1) > 0 and len(set2) > 0:
                best_gain = gain
                best_criteria = (col, value)
                best_set = (set1, set2)

    # 根据score_func创建子分支
    if best_gain > 0:
        true_branch = build_tree(best_set[0])
        false_branch = build_tree(best_set[1])
        return DecisionNode(col = best_criteria[0], value = best_criteria[1], tb = true_branch, fb = false_branch)
    else:
        return DecisionNode(results = unique_condition_count(rows))   # 做什么用的

# 绘制二叉决策树
def print_decision_tree(tree, indent = ''):
    if tree.results is not None:
        print(str(tree.results))       # 叶子节点直接输出结果
    else:
        print(str(tree.col) + ":" + str(tree.value) + "?")
        print(indent + "T->")
        print_decision_tree(tree.tb, indent + " ")
        print(indent + "F->")
        print_decision_tree(tree.fb, indent + " ")

# 利用决策树对测试数据进行类型判别
def classify_check(observation, tree):
    if tree.results is not None:
        print('classify_check result:')
        return tree.results
    else:
        value = observation[tree.col]
        branch = None                    # 以属性tree.col判别后对应的分支
        if value is tree.value:
            branch = tree.tb
        else:
            branch = tree.fb
        return classify_check(observation, branch)

def prune_decision_tree(tree, mingain):
    # 分支不是叶节点，对其递归进行剪枝
    if tree.tb.results is None:
        prune_decision_tree(tree.tb, mingain)
    if tree.fb.results is None:
        prune_decision_tree(tree.fb, mingain)

    # 两个分支均为叶节点，判断是否进行后剪枝（合并）
    if tree.tb.results is not None and tree.fb.results is not None:
        tb, fb = [], []
        for v, c in tree.tb.results.items():
            tb += [[v]] * c
        for v, c in tree.fb.results.items():
            fb += [[v]] * c

        # 减少熵的减少量
        delta = entropy_calcution(tb + fb) - (entropy_calcution(tb) + entropy_calcution(fb)) / 2
        if delta < mingain:
            # 分支合并
            tree.tb, tree.fb = None, None
            tree.results = unique_condition_count(tb + fb)


# 主程序
if __name__ == '__main__':
    a_decision_tree = build_tree(data_set)
    prune_decision_tree(a_decision_tree, 0.1)
    print_decision_tree(a_decision_tree)
    print(classify_check(['青绿', '稍蜷', '沉闷', '清晰', '稍凹', '软粘'], a_decision_tree))

# 参考资料: 1、决策树算法的Python实现 https://zhuanlan.zhihu.com/p/20794583
#          2、统计学习方法
