# 用信息增益比生成决策树
import numpy as np

try:
    data = np.genfromtxt('LearningData1.csv', delimiter=',',skip_header=1,dtype=str,encoding='utf-8')
except:
    data = np.genfromtxt('Works/第5章特征选择/LearningData1.csv', delimiter=',',skip_header=1,dtype=str,encoding='utf-8')

epsilon = 1e-6  # 信息增益比的阈值

# 计算信息增益
def calc_info_gain(data , feature_index):   # feature_index: 特征列索引
    label_index = len(data[0]) - 1  # 假设标签在最后一列
    # 计算数据集的熵
    def calc_entropy(data):
        labels, counts = np.unique(data[:, label_index], return_counts=True)
        probabilities = counts / len(data)
        entropy = -np.sum(probabilities * np.log2(probabilities))
        return entropy

    # 计算条件熵
    def calc_conditional_entropy(feature_index, label_index=label_index):
        feature_values, feature_counts = np.unique(data[:, feature_index], return_counts=True)
        conditional_entropy = 0.0
        for value, count in zip(feature_values, feature_counts):
            subset = data[data[:, feature_index] == value]
            prob = count / len(data)
            conditional_entropy += prob * calc_entropy(subset)
        return conditional_entropy

    # 计算信息增益
    total_entropy = calc_entropy(data)
    conditional_entropy = calc_conditional_entropy(feature_index)
    info_gain = total_entropy - conditional_entropy
    return info_gain

# 计算信息增益比
def calc_info_gain_ratio(data, feature_index):
    info_gain = calc_info_gain(data, feature_index)

    # 计算分裂信息
    feature_values, feature_counts = np.unique(data[:, feature_index], return_counts=True)
    probabilities = feature_counts / len(data)
    split_info = -np.sum(probabilities * np.log2(probabilities))

    # 避免除以零或数值非常小
    if split_info <= 1e-12:
        return 0.0

    info_gain_ratio = info_gain / split_info
    return info_gain_ratio

# 选择最佳特征
def choose_best_feature(data, feature_names=None):
    num_cols = data.shape[1]
    best_info_gain_ratio = float('-inf')
    best_feature_index = -1

    # 决定是否跳过第0列：如果传入了 feature_names 并且第0列看起来像 ID 则跳过
    start_idx = 0
    if feature_names:
        first = feature_names[0].strip().lower()
        if first in ('id', 'day', '编号','No.','序号'):
            start_idx = 1

    for feature_index in range(start_idx, num_cols - 1):  # 不包括最后一列（标签）
        info_gain_ratio = calc_info_gain_ratio(data, feature_index)
        if info_gain_ratio > best_info_gain_ratio and info_gain_ratio > epsilon:
            best_info_gain_ratio = info_gain_ratio
            best_feature_index = feature_index

    return best_feature_index

def majority_label(data):
    labels, counts = np.unique(data[:, -1], return_counts=True)
    return labels[np.argmax(counts)]

def build_tree(data, feature_names, max_depth=None, min_samples=1, depth=0):
    # 终止条件1：样本都属于同一类
    if np.all(data[:, -1] == data[0, -1]):
        return {'type': 'leaf', 'label': data[0, -1]}

    # 终止条件2：没有特征可分或达到深度/最小样本限制
    # 当只剩下编号和标签两列（<=2）时应终止
    if data.shape[1] <= 2 or (max_depth is not None and depth >= max_depth) or len(data) <= min_samples:
        return {'type': 'leaf', 'label': majority_label(data)}

    # 选择最佳特征
    best = choose_best_feature(data, feature_names)
    if best == -1:
        return {'type': 'leaf', 'label': majority_label(data)}

    node_name = feature_names[best]
    node = {'type': 'node', 'feature_index': best, 'feature_name': node_name, 'children': {}}

    # 对每个取值分支递归建树
    values = np.unique(data[:, best])
    for v in values:
        subset = data[data[:, best] == v]
        # 删除已用特征列，并同步特征名
        sub_feature_names = feature_names[:best] + feature_names[best+1:]
        subset_reduced = np.delete(subset, best, axis=1)
        child = build_tree(subset_reduced, sub_feature_names, max_depth, min_samples, depth+1)
        node['children'][v] = child

    return node

# 用法：用文件读取到的 data 初始化 feature_names（与列数对应）
n_cols = data.shape[1]
# 从文件头部读取真实特征名
try:
    with open('LearningData1.csv', encoding='utf-8') as f:
        header = f.readline().strip().split(',')
except:
    with open('Works/第5章特征选择/LearningData1.csv', encoding='utf-8') as f:
        header = f.readline().strip().split(',')
feature_names = header
# 构建决策树（可传 max_depth/min_samples）
tree = build_tree(data, feature_names)

def print_tree(node, indent=0):
    pad = '  ' * indent
    if node['type'] == 'leaf':
        print(f"{pad}Leaf -> label: {node['label']}")
    else:
        print(f"{pad}Node -> feature_name: {node['feature_name']}")
        for val, child in node['children'].items():
            print(f"{pad}  if value == {val}:")
            print_tree(child, indent + 2)

# 打印决策树
print_tree(tree)