import numpy as np


# 加载数据集
def load_data():
    with open('../data/决策树数据.txt') as fr:
        lines = fr.readlines()
    x = np.empty((len(lines), 7), dtype=int)
    for i in range(len(lines)):
        line = lines[i].strip().split(',')
        x[i] = line
    test_x = x[10:]
    x = x[:10]
    return x, test_x


x, test_x = load_data()


# print(x.shape, test_x.shape)

# 计算数据集的熵
def get_entropy(_x):
    entropy = 0
    # 统计y的熵
    y = _x[:, -1]
    # 统计每个结果出现的次数,[5,5],表示0出现了5次，1出现了5次
    bincount = np.bincount(y)
    for count in bincount:
        if count == 0:
            continue

        # 出现次数/总次数=出现概率
        prob = count / len(_x)

        # 熵 = p * log(p) * -1
        entropy -= prob * np.log2(prob)

    return entropy


# print(get_entropy(x))#1.0 数据的混乱程度达到最大

# 计算信息增益
def get_gain(_x, col):
    # 列熵
    col_entropy = 0

    # 这里是为了防止除以0
    iv = 1e-20

    # 根据列的值，把数据分成n份
    for val in set(_x[:, col]):
        x_by_col_and_value = _x[_x[:, col] == val]
        # 这个数据子集的概率
        prob = len(x_by_col_and_value) / len(_x)
        # 求这个数据子集的熵
        entropy = get_entropy(x_by_col_and_value)
        # 这个列的熵，等于这个式子的累积
        col_entropy += prob * entropy

        iv -= prob * np.log2(prob)

    # 信息增益，就是切分数据后，熵值能下降多少，这个值越大越好
    gain = get_entropy(_x) - col_entropy
    # 用这个就是C4.5决策树，他解决了取值多的列更容易被选择的问题
    return gain / iv


# print(get_gain(x, 0))

# 求信息增益最大的列
def get_split_col(_x):
    best_col = -1
    best_gain = 0

    for col in range(_x.shape[1] - 1):
        gain = get_gain(_x, col)
        if gain > best_gain:
            best_gain = gain
            best_col = col

    return best_col


# print(get_split_col(x))

# 定义树节点对象
class Node():
    def __init__(self, col):
        self.col = col
        self.children = {}

    def __str__(self):
        return 'Node col=%d' % self.col


class Leaf():
    def __init__(self, y):
        self.y = y

    def __str__(self):
        return 'Leaf y=%d' % self.y


# print(Node(0))
# print(Leaf(1))

# 打印树的方法
def print_tree(node, prefix='', subfix=''):
    prefix += '-' * 4
    print(prefix, node, subfix)
    if isinstance(node, Leaf):
        return
    for i in node.children:
        subfix = 'value=' + str(i)
        print_tree(node.children[i], prefix, subfix)


root = Node(get_split_col(x))


# 添加子节点的方法
def create_children(_x, parent_node):
    # 遍历父节点col列所有的取值
    for split_value in np.unique(_x[:, parent_node.col]):

        # 首先根据父节点col列的取值分割数据
        sub_x = _x[_x[:, parent_node.col] == split_value]

        # 取去重y值
        unique_y = np.unique(sub_x[:, -1])

        # 如果所有的y都是一样的,说明是个叶子节点
        if len(unique_y) == 1:
            parent_node.children[split_value] = Leaf(unique_y[0])
            continue

        # 否则,是个分支节点,计算最佳切分列
        split_col = get_split_col(sub_x)

        # 添加分支节点到父节点上
        parent_node.children[split_value] = Node(col=split_col)


create_children(x, root)

print_tree(root)
