import pandas as pd
import numpy as np
import math
from collections import Counter
from sklearn.datasets import load_wine
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, classification_report

class Node:
    """决策树节点类"""
    def __init__(self, feature=None, threshold=None, value=None, children=None):
        self.feature = feature      # 分裂特征
        self.threshold = threshold  # 分裂阈值（连续特征）
        self.value = value          # 叶节点的预测值
        self.children = children    # 子节点字典 {特征值: 节点}

class ID3DecisionTree:
    """ID3决策树分类器"""
    
    def __init__(self, max_depth=None, min_samples_split=2):
        self.max_depth = max_depth
        self.min_samples_split = min_samples_split
        self.root = None
        self.feature_names = None
    
    def fit(self, X, y, feature_names=None):
        """训练决策树"""
        self.feature_names = feature_names
        self.root = self._build_tree(X, y, depth=0)
    
    def _entropy(self, y):
        """计算信息熵"""
        if len(y) == 0:
            return 0
        counts = np.bincount(y)
        probabilities = counts / len(y)
        return -np.sum([p * math.log2(p) for p in probabilities if p > 0])
    
    def _information_gain(self, X, y, feature_idx):
        """计算信息增益"""
        # 父节点的熵
        parent_entropy = self._entropy(y)
        
        # 对于连续特征，找到最佳分裂点
        if self._is_continuous(X[:, feature_idx]):
            return self._information_gain_continuous(X, y, feature_idx, parent_entropy)
        else:
            return self._information_gain_discrete(X, y, feature_idx, parent_entropy)
    
    def _is_continuous(self, feature_values):
        """判断特征是否为连续特征"""
        return len(np.unique(feature_values)) > 10  # 简单判断：唯一值超过10个认为是连续特征
    
    def _information_gain_discrete(self, X, y, feature_idx, parent_entropy):
        """离散特征的信息增益计算"""
        unique_values = np.unique(X[:, feature_idx])
        children_entropy = 0
        
        for value in unique_values:
            mask = X[:, feature_idx] == value
            if np.sum(mask) > 0:
                child_entropy = self._entropy(y[mask])
                weight = np.sum(mask) / len(y)
                children_entropy += weight * child_entropy
        
        return parent_entropy - children_entropy
    
    def _information_gain_continuous(self, X, y, feature_idx, parent_entropy):
        """连续特征的信息增益计算"""
        feature_values = X[:, feature_idx]
        sorted_values = np.sort(np.unique(feature_values))
        
        best_gain = 0
        best_threshold = None
        
        # 尝试所有可能的分裂点
        for i in range(len(sorted_values) - 1):
            threshold = (sorted_values[i] + sorted_values[i + 1]) / 2
            
            # 分裂为两个子集
            left_mask = feature_values <= threshold
            right_mask = feature_values > threshold
            
            if np.sum(left_mask) == 0 or np.sum(right_mask) == 0:
                continue
            
            # 计算加权熵
            left_entropy = self._entropy(y[left_mask])
            right_entropy = self._entropy(y[right_mask])
            weighted_entropy = (np.sum(left_mask) * left_entropy + 
                              np.sum(right_mask) * right_entropy) / len(y)
            
            gain = parent_entropy - weighted_entropy
            
            if gain > best_gain:
                best_gain = gain
                best_threshold = threshold
        
        return best_gain, best_threshold
    
    def _best_feature(self, X, y):
        """选择信息增益最大的特征"""
        best_gain = -1
        best_feature = None
        best_threshold = None
        
        for feature_idx in range(X.shape[1]):
            if self._is_continuous(X[:, feature_idx]):
                gain, threshold = self._information_gain(X, y, feature_idx)
            else:
                gain = self._information_gain(X, y, feature_idx)
                threshold = None
            
            if gain > best_gain:
                best_gain = gain
                best_feature = feature_idx
                best_threshold = threshold
        
        return best_feature, best_threshold, best_gain  # 修正：返回best_gain
    
    def _build_tree(self, X, y, depth):
        """递归构建决策树"""
        # 终止条件
        if (len(np.unique(y)) == 1 or  # 所有样本属于同一类别
            len(X) < self.min_samples_split or  # 样本数太少
            (self.max_depth and depth >= self.max_depth)):  # 达到最大深度
            
            return Node(value=Counter(y).most_common(1)[0][0])
        
        # 选择最佳分裂特征
        best_feature, threshold, best_gain = self._best_feature(X, y)  # 修正：接收best_gain
        
        if best_feature is None or best_gain <= 0:  # 无法继续分裂或信息增益为负
            return Node(value=Counter(y).most_common(1)[0][0])
        
        node = Node(feature=best_feature, threshold=threshold)
        node.children = {}
        
        if threshold is not None:  # 连续特征
            # 二分分裂
            left_mask = X[:, best_feature] <= threshold
            right_mask = X[:, best_feature] > threshold
            
            node.children['left'] = self._build_tree(X[left_mask], y[left_mask], depth + 1)
            node.children['right'] = self._build_tree(X[right_mask], y[right_mask], depth + 1)
        else:  # 离散特征
            unique_values = np.unique(X[:, best_feature])
            for value in unique_values:
                mask = X[:, best_feature] == value
                if np.sum(mask) > 0:
                    node.children[value] = self._build_tree(X[mask], y[mask], depth + 1)
        
        return node
    
    def predict(self, X):
        """预测"""
        return np.array([self._predict_single(x, self.root) for x in X])
    
    def _predict_single(self, x, node):
        """预测单个样本"""
        if node.value is not None:  # 叶节点
            return node.value
        
        feature_value = x[node.feature]
        
        if node.threshold is not None:  # 连续特征
            if feature_value <= node.threshold:
                return self._predict_single(x, node.children['left'])
            else:
                return self._predict_single(x, node.children['right'])
        else:  # 离散特征
            if feature_value in node.children:
                return self._predict_single(x, node.children[feature_value])
            else:
                # 如果遇到未见过的特征值，返回最常见的类别
                return self._get_most_common_class(node)
    
    def _get_most_common_class(self, node):
        """获取节点下最常见的类别"""
        if node.value is not None:
            return node.value
        # 递归查找第一个子节点
        for child in node.children.values():
            return self._get_most_common_class(child)
        return 0
    
    def print_tree(self, node=None, depth=0, prefix=""):
        """打印决策树"""
        if node is None:
            node = self.root
        
        indent = "  " * depth
        
        if node.value is not None:
            print(f"{indent}{prefix}预测类别: {node.value}")
        else:
            feature_name = self.feature_names[node.feature] if self.feature_names else f"Feature_{node.feature}"
            
            if node.threshold is not None:  # 连续特征
                print(f"{indent}{prefix}{feature_name} <= {node.threshold:.3f}")
                self.print_tree(node.children['left'], depth + 1, "是 → ")
                print(f"{indent}{prefix}{feature_name} > {node.threshold:.3f}")
                self.print_tree(node.children['right'], depth + 1, "否 → ")
            else:  # 离散特征
                for value, child_node in node.children.items():
                    print(f"{indent}{prefix}{feature_name} = {value:.3f}")
                    self.print_tree(child_node, depth + 1, "→ ")

def main():
    """主函数"""
    # 加载Wine数据集
    print("正在加载Wine数据集...")
    wine = load_wine()
    X, y = wine.data, wine.target
    feature_names = wine.feature_names
    target_names = wine.target_names
    
    print(f"数据集形状: X {X.shape}, y {y.shape}")
    print(f"特征名称: {feature_names}")
    print(f"类别名称: {target_names}")
    print(f"类别分布: {np.bincount(y)}")
    
    # 分割数据集
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.3, random_state=42, stratify=y
    )
    
    print(f"\n训练集大小: {X_train.shape[0]}")
    print(f"测试集大小: {X_test.shape[0]}")
    
    # 创建并训练ID3决策树
    print("\n训练ID3决策树...")
    dt = ID3DecisionTree(max_depth=5, min_samples_split=5)
    dt.fit(X_train, y_train, feature_names)
    
    # 预测
    print("\n进行预测...")
    y_pred = dt.predict(X_test)
    
    # 评估模型
    accuracy = accuracy_score(y_test, y_pred)
    print(f"\n模型准确率: {accuracy:.4f}")
    
    print("\n分类报告:")
    print(classification_report(y_test, y_pred, target_names=target_names))
    
    # 打印决策树（前几层）
    print("\n决策树结构（前3层）:")
    dt.print_tree()
    
    # 测试单个样本预测
    print(f"\n随机测试一个样本:")
    sample_idx = np.random.randint(0, len(X_test))
    sample = X_test[sample_idx]
    true_label = y_test[sample_idx]
    pred_label = dt.predict([sample])[0]
    
    print(f"样本特征: {sample}")
    print(f"真实类别: {target_names[true_label]}")
    print(f"预测类别: {target_names[pred_label]}")
    print(f"预测{'正确' if true_label == pred_label else '错误'}")

if __name__ == "__main__":
    main()