import numpy as np
from collections import defaultdict


class DecisionTreeClassifier:
    """使用标准库实现的决策树分类器"""

    def __init__(self, max_depth=None, min_samples_split=2, random_state=None):
        self.max_depth = max_depth
        self.min_samples_split = min_samples_split
        self.random_state = random_state
        self.tree = None
        self.feature_importances_ = None  # 新增特征重要性属性

    def fit(self, X, y):
        if self.random_state is not None:
            np.random.seed(self.random_state)
        self.tree = self._build_tree(X, y, depth=0)
        self._compute_feature_importances(X.shape[1])  # 计算特征重要性

    def get_tree_depth(self):
        """返回决策树的深度"""
        return self._get_node_depth(self.tree)

    def _get_node_depth(self, node):
        """递归计算节点深度"""
        if 'class' in node:
            return 0
        return 1 + max(self._get_node_depth(node['left']),
                       self._get_node_depth(node['right']))

    def has_splits(self):
        """检查树是否有实际分割(非仅叶子节点)"""
        return self._has_splits(self.tree)

    def _has_splits(self, node):
        """递归检查是否有分割节点"""
        if 'class' in node:
            return False
        return True or self._has_splits(node['left']) or self._has_splits(node['right'])

    def is_trivial(self):
        """检查是否是平凡树(无任何分割)"""
        return 'class' in self.tree

    def _compute_feature_importances(self, n_features):
        self.feature_importances_ = np.zeros(n_features)
        self._accumulate_feature_importance(self.tree)

        # 添加微小值避免归零
        self.feature_importances_ += 1e-10
        total = np.sum(self.feature_importances_)
        self.feature_importances_ /= total

        # 打印调试信息
        # print("原始特征重要性:", self.feature_importances_)
        # print("重要性总和:", total)

    def _accumulate_feature_importance(self, node):
        """递归累积特征重要性"""
        if 'feature_idx' in node:
            # 重要性基于节点覆盖的样本比例和Gini减少量
            self.feature_importances_[node['feature_idx']] += node['importance']
            self._accumulate_feature_importance(node['left'])
            self._accumulate_feature_importance(node['right'])



    def predict(self, X):
        return np.array([self._predict(x, self.tree) for x in X])

    def predict_proba(self, X):
        preds = [self._predict_proba(x, self.tree) for x in X]
        return np.array(preds)

    def _build_tree(self, X, y, depth):
        # print(f"\n当前深度 {depth}，节点样本数: {len(y)}，类别分布: {np.unique(y, return_counts=True)}")
        # 确保y是numpy数组
        y = np.asarray(y)

        # 如果所有样本属于同一类别
        if len(np.unique(y)) == 1:
            return {'class': y[0], 'prob': 1.0}

        # 如果达到最大深度或样本数小于最小分割数
        if (self.max_depth is not None and depth >= self.max_depth) or len(X) < self.min_samples_split:
            majority_class = np.argmax(np.bincount(y))
            prob = np.mean(y == majority_class)
            return {'class': majority_class, 'prob': prob}

        # 寻找最佳分割
        best_split = self._find_best_split(X, y)
        if best_split is None:
            majority_class = np.argmax(np.bincount(y))
            prob = np.mean(y == majority_class)
            return {'class': majority_class, 'prob': prob}

        feature_idx, threshold, gini_reduction = best_split  # 修改为返回gini_reduction

        # 分割数据
        left_mask = X[:, feature_idx] <= threshold
        right_mask = ~left_mask

        # 递归构建子树
        left_subtree = self._build_tree(X[left_mask], y[left_mask], depth + 1)
        right_subtree = self._build_tree(X[right_mask], y[right_mask], depth + 1)

        return {
            'feature_idx': feature_idx,
            'threshold': threshold,
            'left': left_subtree,
            'right': right_subtree,
            'importance': gini_reduction * len(y)  # 存储重要性信息
        }

    def _find_best_split(self, X, y):
        best_gini = float('inf')
        best_split = None

        n_samples, n_features = X.shape
        n_classes = len(np.unique(y))

        parent_gini = 1.0 - sum((np.sum(y == c) / n_samples for c in np.unique(y))) ** 2

        # 随机选择特征子集
        feature_indices = np.random.permutation(n_features)

        for feature_idx in feature_indices:
            # 获取该特征的所有唯一值作为候选阈值
            thresholds = np.unique(X[:, feature_idx])

            for threshold in thresholds:
                # 计算Gini指数
                left_mask = X[:, feature_idx] <= threshold
                right_mask = ~left_mask

                if left_mask.sum() == 0 or right_mask.sum() == 0:
                    continue

                gini = self._gini_index(y[left_mask], y[right_mask])
                gini_reduction = parent_gini - gini

                # print(f"特征 {feature_idx} 的候选阈值 {threshold}: "
                #       f"父节点Gini={parent_gini:.4f}, "
                #       f"分割后Gini={gini:.4f}, "
                #       f"减少量={gini_reduction:.4f}")

                if gini < best_gini:
                    best_gini = gini
                    best_split = (feature_idx, threshold, gini_reduction)

        return best_split

    # def _gini_index(self, left_y, right_y):
    #     n_left = len(left_y)
    #     n_right = len(right_y)
    #     n_total = n_left + n_right
    #
    #     p_left = n_left / n_total
    #     p_right = n_right / n_total
    #
    #     gini_left = 1.0 - sum((np.sum(left_y == c) / n_left for c in np.unique(left_y))) ** 2
    #     gini_right = 1.0 - sum((np.sum(right_y == c) / n_right for c in np.unique(right_y))) ** 2
    #
    #     # 在_gini_index方法中添加调试
    #     # print(f"左节点Gini: {gini_left:.4f}, 右节点Gini: {gini_right:.4f}")
    #
    #     return p_left * gini_left + p_right * gini_right

    def _gini_index(self, left_y, right_y):
        """计算加权Gini指数，增加鲁棒性处理"""
        n_left = len(left_y)
        n_right = len(right_y)

        # 处理空节点或单样本节点
        if n_left == 0 or n_right == 0:
            return float('inf')  # 返回无穷大表示无效分割

        # 处理单类别节点（避免除零错误）
        def _safe_gini(y):
            counts = np.bincount(y)
            proportions = counts / len(y)
            return 1.0 - np.sum(proportions ** 2)

        # 计算加权Gini
        gini_left = _safe_gini(left_y)
        gini_right = _safe_gini(right_y)

        # 调试输出（取消注释可查看详细计算过程）
        # print(f"左节点: 样本数={n_left}, 类别分布={np.unique(left_y, return_counts=True)}, Gini={gini_left:.6f}")
        # print(f"右节点: 样本数={n_right}, 类别分布={np.unique(right_y, return_counts=True)}, Gini={gini_right:.6f}")

        # 加权计算
        total = n_left + n_right
        return (n_left / total) * gini_left + (n_right / total) * gini_right

    def _predict(self, x, node):
        if 'class' in node:
            return node['class']
        else:
            if x[node['feature_idx']] <= node['threshold']:
                return self._predict(x, node['left'])
            else:
                return self._predict(x, node['right'])

    def _predict_proba(self, x, node):
        if 'class' in node:
            return [1 - node['prob'], node['prob']] if node['class'] == 1 else [node['prob'], 1 - node['prob']]
        else:
            if x[node['feature_idx']] <= node['threshold']:
                return self._predict_proba(x, node['left'])
            else:
                return self._predict_proba(x, node['right'])