import numpy as np

class ID3DecisionTree:
    def __init__(self):
        self.tree = None  # 存储决策树（字典结构）
        self.feature_names = [  # Wine数据集13个特征的名称（对应UCI定义）
            "Alcohol", "Malic acid", "Ash", "Alcalinity of ash", "Magnesium",
            "Total phenols", "Flavanoids", "Nonflavanoid phenols", "Proanthocyanins",
            "Color intensity", "Hue", "OD280/OD315", "Proline"
        ]

    def load_wine(self, file_path="wine.data"):
        """
        加载Wine数据集，处理类别标签（1/2/3→0/1/2），划分训练集（80%）和测试集（20%）
        """
        # 加载数据（每行：13个特征 + 1个类别标签）
        data = []
        with open(file_path, 'r') as f:
            for line in f:
                if line.strip():
                    parts = list(map(float, line.strip().split(',')))
                    label = int(parts[0]) - 1  # 类别标签1/2/3→0/1/2（方便后续计算）
                    features = parts[1:]       # 13个特征
                    data.append(features + [label])
        
        # 转换为numpy数组并随机打乱
        data = np.array(data)
        np.random.seed(42)  # 固定随机种子，结果可复现
        np.random.shuffle(data)
        
        # 划分特征（X）和标签（y）
        X = data[:, :-1]  # 前13列：特征
        y = data[:, -1].astype(int)  # 最后1列：类别标签（整数）
        
        # 按8:2划分训练集和测试集
        test_size = 0.2
        test_num = int(len(X) * test_size)
        X_train, X_test = X[test_num:], X[:test_num]
        y_train, y_test = y[test_num:], y[:test_num]
        
        return X_train, X_test, y_train, y_test

    def calculate_entropy(self, y):
        """计算数据集的信息熵"""
        class_counts = np.bincount(y)  # 统计每个类别的样本数
        total = len(y)
        entropy = 0.0
        for count in class_counts:
            if count == 0:
                continue  # 避免log2(0)报错
            p = count / total
            entropy -= p * np.log2(p)
        return entropy

    def discrete_continuous_feature(self, X_col, y, feature_idx):
        """
        离散化连续特征：用二分法找最优阈值，返回（最优信息增益，最优阈值，离散后的特征）
        X_col: 单个特征的所有样本值（1D数组）
        y: 对应样本的标签
        feature_idx: 特征索引（用于打印日志）
        """
        # 排序并去重，取相邻样本的中间值作为候选阈值
        sorted_vals = np.unique(np.sort(X_col))
        candidate_thresholds = [(sorted_vals[i] + sorted_vals[i+1])/2 
                                for i in range(len(sorted_vals)-1)]
        if not candidate_thresholds:  # 若特征所有值相同，无法划分
            return 0.0, X_col[0], (X_col >= X_col[0]).astype(int)
        
        max_gain = -1.0
        best_threshold = candidate_thresholds[0]
        best_discrete_col = None
        
        # 计算原始熵
        base_entropy = self.calculate_entropy(y)
        
        # 遍历所有候选阈值，找信息增益最大的
        for threshold in candidate_thresholds:
            # 按阈值离散化：≤阈值→0，>阈值→1
            discrete_col = (X_col > threshold).astype(int)
            # 计算条件熵
            conditional_entropy = 0.0
            for val in [0, 1]:
                subset_idx = discrete_col == val
                subset_y = y[subset_idx]
                if len(subset_y) == 0:
                    continue
                p = len(subset_y) / len(y)
                conditional_entropy += p * self.calculate_entropy(subset_y)
            # 计算信息增益
            gain = base_entropy - conditional_entropy
            # 更新最优阈值和信息增益
            if gain > max_gain:
                max_gain = gain
                best_threshold = threshold
                best_discrete_col = discrete_col
        
        return max_gain, best_threshold, best_discrete_col

    def select_best_feature(self, X, y, used_features):
        """
        选择最优特征（信息增益最大），返回（最优特征索引，最优阈值，离散后的特征矩阵）
        used_features: 已使用的特征索引（避免重复使用）
        """
        n_features = X.shape[1]
        max_gain = -1.0
        best_feature_idx = -1
        best_threshold = None
        best_discrete_X = None  # 所有特征离散后的矩阵（仅当前最优特征需离散化记录）
        
        # 遍历所有未使用的特征
        for idx in range(n_features):
            if idx in used_features:
                continue
            # 提取当前特征的所有样本值
            X_col = X[:, idx]
            # 离散化当前特征并计算信息增益
            gain, threshold, discrete_col = self.discrete_continuous_feature(X_col, y, idx)
            # 更新最优特征
            if gain > max_gain:
                max_gain = gain
                best_feature_idx = idx
                best_threshold = threshold
                # 构建离散后的特征矩阵（仅当前特征替换为离散值，其他保持原连续值）
                best_discrete_X = X.copy()
                best_discrete_X[:, idx] = discrete_col
        
        # 若所有特征信息增益为0（无法划分），返回-1（无最优特征）
        if max_gain <= 0:
            return -1, None, None
        
        return best_feature_idx, best_threshold, best_discrete_X

    def majority_vote(self, y):
        """投票法：返回样本数最多的类别（用于叶子节点）"""
        class_counts = np.bincount(y)
        return np.argmax(class_counts)

    def build_tree(self, X, y, used_features, depth=0, max_depth=10):
        """
        递归构建ID3决策树（字典结构）
        used_features: 已使用的特征索引
        depth: 当前树深度（防止过拟合，设max_depth=10）
        max_depth: 树的最大深度
        """
        # 终止条件1：所有样本同类别
        if len(np.unique(y)) == 1:
            return {"leaf": True, "class": y[0], "depth": depth}
        
        # 终止条件2：无未使用特征 或 达到最大深度
        if len(used_features) == X.shape[1] or depth >= max_depth:
            majority_class = self.majority_vote(y)
            return {"leaf": True, "class": majority_class, "depth": depth}
        
        # 选择最优特征
        best_feature_idx, best_threshold, discrete_X = self.select_best_feature(X, y, used_features)
        if best_feature_idx == -1:  # 无最优特征（信息增益≤0）
            majority_class = self.majority_vote(y)
            return {"leaf": True, "class": majority_class, "depth": depth}
        
        # 标记当前特征为已使用
        used_features.add(best_feature_idx)
        # 初始化决策树节点（非叶子节点：包含特征名、阈值、子节点）
        tree_node = {
            "leaf": False,
            "feature_idx": best_feature_idx,
            "feature_name": self.feature_names[best_feature_idx],
            "threshold": best_threshold,
            "depth": depth,
            "children": {}  # 子节点：key=离散值（0/1），value=子树
        }
        
        # 按最优特征的离散值划分数据集，递归构建子树
        for val in [0, 1]:
            # 找到离散值为val的样本索引
            subset_idx = discrete_X[:, best_feature_idx] == val
            subset_X = X[subset_idx]
            subset_y = y[subset_idx]
            # 若子集为空，用父节点的多数类别作为叶子节点
            if len(subset_y) == 0:
                tree_node["children"][val] = {
                    "leaf": True,
                    "class": self.majority_vote(y),
                    "depth": depth + 1
                }
            else:
                # 递归构建子树
                tree_node["children"][val] = self.build_tree(
                    subset_X, subset_y, used_features.copy(), depth + 1, max_depth
                )
        
        return tree_node

    def predict_sample(self, x, tree):
        """预测单个样本的类别（递归遍历决策树）"""
        # 若当前节点是叶子节点，返回类别
        if tree["leaf"]:
            return tree["class"]
        
        # 获取当前节点的特征和阈值
        feature_idx = tree["feature_idx"]
        threshold = tree["threshold"]
        # 离散化当前样本的特征值（≤阈值→0，>阈值→1）
        val = 1 if x[feature_idx] > threshold else 0
        
        # 递归进入对应子节点
        if val not in tree["children"]:  # 若子节点不存在（异常情况），返回父节点多数类别
            return self.majority_vote(np.array([tree["children"][0]["class"], tree["children"][1]["class"]]))
        
        return self.predict_sample(x, tree["children"][val])

    def predict(self, X, tree):
        """预测多个样本的类别"""
        y_pred = [self.predict_sample(x, tree) for x in X]
        return np.array(y_pred)

    def evaluate(self, y_true, y_pred):
        """计算模型准确率"""
        accuracy = np.sum(y_true == y_pred) / len(y_true)
        return accuracy

    def print_tree(self, tree, indent=""):
        """打印决策树结构（方便可视化）"""
        if tree["leaf"]:
            print(f"{indent}→ 叶子节点：类别={tree['class']}（深度={tree['depth']}）")
            return
        # 打印非叶子节点信息
        print(f"{indent}→ 特征：{tree['feature_name']}（索引={tree['feature_idx']}）")
        print(f"{indent}  阈值：{tree['threshold']:.4f}（>阈值→1，≤阈值→0）")
        # 递归打印子节点
        for val, child in tree["children"].items():
            print(f"{indent}  子节点（离散值={val}）：")
            self.print_tree(child, indent + "    ")


# 测试ID3决策树（基于Wine数据集）
if __name__ == "__main__":
    # 1. 初始化模型
    id3_tree = ID3DecisionTree()
    
    # 2. 加载数据（需将wine.data放在脚本同一目录，或指定file_path）
    # 若文件路径不同，需修改file_path，例如：
    # X_train, X_test, y_train, y_test = id3_tree.load_wine(file_path="D:/data/wine.data")
    X_train, X_test, y_train, y_test = id3_tree.load_wine(file_path="wine.data")
    print(f"数据加载完成：训练集{len(X_train)}条，测试集{len(X_test)}条\n")
    
    # 3. 构建ID3决策树（初始used_features为空集合，max_depth=10）
    print("开始构建ID3决策树...")
    id3_tree.tree = id3_tree.build_tree(X_train, y_train, used_features=set(), max_depth=10)
    print("决策树构建完成\n")
    
    # 4. 打印决策树结构（简化版，可观察节点划分逻辑）
    print("===== ID3决策树结构 =====")
    id3_tree.print_tree(id3_tree.tree)
    print()
    
    # 5. 预测测试集
    y_pred = id3_tree.predict(X_test, id3_tree.tree)
    
    # 6. 评估模型（准确率）
    accuracy = id3_tree.evaluate(y_test, y_pred)
    
    # 7. 打印分类结果和评估指标
    print("===== 测试集分类结果 =====")
    # 标签反向映射（0/1/2→1/2/3，对应原始Wine类别）
    label_map_rev = {0: 1, 1: 2, 2: 3}
    y_test_original = [label_map_rev[label] for label in y_test]
    y_pred_original = [label_map_rev[label] for label in y_pred]
    
    # 逐条打印前10个样本（避免输出过长）
    for i in range(min(10, len(y_test))):
        result = "正确" if y_test_original[i] == y_pred_original[i] else "错误"
        print(f"样本{i+1}：真实类别={y_test_original[i]}，预测类别={y_pred_original[i]}，结果={result}")
    if len(y_test) > 10:
        print(f"...（共{len(y_test)}个样本，省略剩余{len(y_test)-10}个）")
    
    print(f"\n===== 模型评估 =====")
    print(f"测试集准确率：{accuracy:.2f}（{np.sum(y_test==y_pred)}/{len(y_test)}个样本预测正确）")