import numpy as np
from collections import Counter
import math

class DecisionTreeNode:
    def __init__(self):
        self.feature_idx = None  # 分割特征的索引
        self.threshold = None    # 分割阈值
        self.left = None        # 左子树
        self.right = None       # 右子树
        self.value = None       # 叶节点的预测值
        self.samples = 0        # 节点样本数
        self.class_counts = {}  # 各类别样本数

class DecisionTreeClassifier:
    def __init__(self, max_depth=None, min_samples_split=2):
        self.max_depth = max_depth
        self.min_samples_split = min_samples_split
        self.root = None
        self.feature_names = None
        self.class_names = None
    
    def entropy(self, y):
        """计算熵"""
        class_counts = Counter(y)
        total = len(y)
        entropy = 0
        
        for count in class_counts.values():
            if count > 0:
                p = count / total
                entropy -= p * math.log2(p)
        
        return entropy
    
    def information_gain(self, X, y, feature_idx, threshold):
        """计算信息增益"""
        # 分割数据
        left_mask = X[:, feature_idx] <= threshold
        right_mask = ~left_mask
        
        left_y = y[left_mask]
        right_y = y[right_mask]
        
        if len(left_y) == 0 or len(right_y) == 0:
            return 0
        
        # 计算信息增益
        total_entropy = self.entropy(y)
        left_weight = len(left_y) / len(y)
        right_weight = len(right_y) / len(y)
        
        weighted_entropy = (left_weight * self.entropy(left_y) + 
                           right_weight * self.entropy(right_y))
        
        return total_entropy - weighted_entropy
    
    def find_best_split(self, X, y):
        """找到最佳分割点"""
        best_gain = 0
        best_feature = None
        best_threshold = None
        
        n_features = X.shape[1]
        
        for feature_idx in range(n_features):
            # 尝试不同的阈值
            feature_values = np.unique(X[:, feature_idx])
            thresholds = (feature_values[:-1] + feature_values[1:]) / 2
            
            for threshold in thresholds:
                gain = self.information_gain(X, y, feature_idx, threshold)
                
                if gain > best_gain:
                    best_gain = gain
                    best_feature = feature_idx
                    best_threshold = threshold
        
        return best_feature, best_threshold, best_gain
    
    def build_tree(self, X, y, depth=0):
        """递归构建决策树"""
        node = DecisionTreeNode()
        node.samples = len(y)
        node.class_counts = Counter(y)
        
        # 停止条件
        if (self.max_depth is not None and depth >= self.max_depth) or \
           len(y) < self.min_samples_split or \
           len(set(y)) == 1:
            # 创建叶节点
            node.value = Counter(y).most_common(1)[0][0]
            return node
        
        # 找到最佳分割点
        best_feature, best_threshold, best_gain = self.find_best_split(X, y)
        
        if best_gain == 0:
            # 无法进一步分割
            node.value = Counter(y).most_common(1)[0][0]
            return node
        
        # 分割数据
        node.feature_idx = best_feature
        node.threshold = best_threshold
        
        left_mask = X[:, best_feature] <= best_threshold
        right_mask = ~left_mask
        
        # 递归构建子树
        node.left = self.build_tree(X[left_mask], y[left_mask], depth + 1)
        node.right = self.build_tree(X[right_mask], y[right_mask], depth + 1)
        
        return node
    
    def fit(self, X, y, feature_names=None, class_names=None):
        """训练决策树"""
        self.feature_names = feature_names
        self.class_names = class_names
        self.root = self.build_tree(X, y)
    
    def predict_sample(self, x, node=None):
        """预测单个样本"""
        if node is None:
            node = self.root
        
        if node.value is not None:
            return node.value
        
        if x[node.feature_idx] <= node.threshold:
            return self.predict_sample(x, node.left)
        else:
            return self.predict_sample(x, node.right)
    
    def predict(self, X):
        """预测多个样本"""
        return [self.predict_sample(x) for x in X]
    
    def print_tree(self, node=None, depth=0, prefix="Root: "):
        """打印决策树结构"""
        if node is None:
            node = self.root
        
        indent = "  " * depth
        
        if node.value is not None:
            # 叶节点
            class_name = self.class_names[node.value] if self.class_names else node.value
            print(f"{indent}{prefix}Predict: {class_name} "
                  f"(samples: {node.samples}, counts: {node.class_counts})")
        else:
            # 内部节点
            feature_name = (self.feature_names[node.feature_idx] 
                          if self.feature_names else f"feature_{node.feature_idx}")
            print(f"{indent}{prefix}{feature_name} <= {node.threshold:.2f}")
            print(f"{indent}  (samples: {node.samples}, counts: {node.class_counts})")
            
            # 打印子树
            self.print_tree(node.left, depth + 1, "├── True: ")
            self.print_tree(node.right, depth + 1, "└── False: ")
    
    def explain_prediction(self, x):
        """解释预测过程"""
        path = []
        node = self.root
        
        while node.value is None:
            feature_name = (self.feature_names[node.feature_idx] 
                          if self.feature_names else f"feature_{node.feature_idx}")
            
            if x[node.feature_idx] <= node.threshold:
                path.append(f"{feature_name} ({x[node.feature_idx]:.2f}) <= {node.threshold:.2f}")
                node = node.left
            else:
                path.append(f"{feature_name} ({x[node.feature_idx]:.2f}) > {node.threshold:.2f}")
                node = node.right
        
        class_name = self.class_names[node.value] if self.class_names else node.value
        
        return {
            'prediction': class_name,
            'path': path,
            'confidence': max(node.class_counts.values()) / node.samples
        }

# 测试决策树分类器
if __name__ == "__main__":
    # 创建鸢尾花数据集（简化版）
    from sklearn.datasets import load_iris
    from sklearn.model_selection import train_test_split
    
    # 加载数据
    iris = load_iris()
    X, y = iris.data, iris.target
    
    # 分割训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.3, random_state=42
    )
    
    # 训练决策树
    dt = DecisionTreeClassifier(max_depth=3, min_samples_split=2)
    dt.fit(X_train, y_train, 
           feature_names=iris.feature_names,
           class_names=iris.target_names)
    
    # 打印决策树结构
    print("决策树结构：")
    dt.print_tree()
    print("\n" + "="*60 + "\n")
    
    # 预测并解释
    predictions = dt.predict(X_test)
    accuracy = sum(predictions == y_test) / len(y_test)
    print(f"测试准确率: {accuracy:.2f}")
    print("\n详细预测解释（前5个样本）：")
    
    for i in range(min(5, len(X_test))):
        explanation = dt.explain_prediction(X_test[i])
        actual_class = iris.target_names[y_test[i]]
        
        print(f"\n样本 {i+1}:")
        print(f"特征值: {X_test[i]}")
        print(f"预测: {explanation['prediction']}")
        print(f"实际: {actual_class}")
        print(f"置信度: {explanation['confidence']:.2f}")
        print("决策路径:")
        for step in explanation['path']:
            print(f"  → {step}")
            