import numpy as np
from sklearn.model_selection import train_test_split

###############################################################################
# 1. 树节点定义
###############################################################################
class Node:
    """
    CART决策树中的节点:
    - feature_index: int, 表示当前节点分裂使用哪个特征
    - threshold: float, 表示该特征的分割阈值
    - left, right: 左右子节点 (Node)
    - label: 若该节点为叶节点, 则存储它的预测标签(分类最常见类)
    """
    def __init__(self, feature_index=None, threshold=None, left=None, right=None, label=None):
        self.feature_index = feature_index
        self.threshold = threshold
        self.left = left
        self.right = right
        self.label = label

###############################################################################
# 2. 基础函数：Gini、找最优分裂、建树等
###############################################################################
def gini_impurity(y):
    """
    计算 Gini 不纯度.
    y: array-like, 分类标签(如 0,1,2... 多分类).
    """
    labels, counts = np.unique(y, return_counts=True)
    p = counts / len(y)
    return 1 - np.sum(p**2)

def majority_label(y):
    """
    返回 y 中出现次数最多的标签
    """
    labels, counts = np.unique(y, return_counts=True)
    return labels[np.argmax(counts)]

def find_best_split(X, y, feature_indices):
    """
    遍历给定的特征集合(feature_indices)与可能阈值, 
    找到最优分裂 (best_feature, best_threshold).
    X: shape=[n_samples, n_features]
    y: shape=[n_samples,], 分类标签
    feature_indices: 在随机森林中可只传一部分特征索引,
                    若是普通CART,就传所有特征的range.
    返回 (None, None) 表示无法分裂(或无意义分裂).
    """
    n_samples, _ = X.shape
    if n_samples <= 1 or len(np.unique(y)) == 1:
        return None, None

    best_gini = float("inf")
    best_feature = None
    best_threshold = None

    for feature_index in feature_indices:
        feature_values = X[:, feature_index]
        unique_vals = np.unique(feature_values)

        for threshold in unique_vals:
            left_mask = (feature_values <= threshold)
            right_mask = ~left_mask
            y_left = y[left_mask]
            y_right = y[right_mask]
            if len(y_left) == 0 or len(y_right) == 0:
                continue

            gini_left = gini_impurity(y_left)
            gini_right = gini_impurity(y_right)
            w_left = len(y_left) / n_samples
            w_right = len(y_right) / n_samples
            weighted_gini = w_left*gini_left + w_right*gini_right

            if weighted_gini < best_gini:
                best_gini = weighted_gini
                best_feature = feature_index
                best_threshold = threshold

    if best_feature is None:
        return None, None
    return best_feature, best_threshold

def build_tree(X, y, max_depth, min_samples_split, depth=0, n_features=None):
    """
    递归构建决策树(CART).
    - X: shape=[n_samples, n_features]
    - y: shape=[n_samples,]
    - max_depth: 树最大深度
    - min_samples_split: 节点最少样本数
    - depth: 当前深度(递归参数)
    - n_features: 分裂时可使用的特征数 (用于随机森林). None表示用全部特征.
    """
    # 在根节点进行类型转换(仅一次)
    if depth == 0:
        X = np.asarray(X)
        y = np.asarray(y)

    n_samples, n_total_features = X.shape

    # 停止条件
    if (depth >= max_depth 
        or len(y) < min_samples_split 
        or len(np.unique(y)) == 1):
        # 叶节点：返回出现最多的类
        return Node(label=majority_label(y))

    # 若在随机森林模式下，抽取 n_features 列来找最优分裂
    if n_features is None or n_features > n_total_features:
        feature_indices = range(n_total_features)
    else:
        # 从全部特征随机选 n_features 列
        feature_indices = np.random.choice(n_total_features, n_features, replace=False)

    # 找最优分裂
    best_feature, best_threshold = find_best_split(X, y, feature_indices)
    if best_feature is None:
        return Node(label=majority_label(y))

    # 根据最优分裂划分数据
    left_mask = (X[:, best_feature] <= best_threshold)
    right_mask = ~left_mask
    X_left, y_left = X[left_mask], y[left_mask]
    X_right, y_right = X[right_mask], y[right_mask]

    # 递归构建左右子树
    left_child = build_tree(X_left, y_left, max_depth, min_samples_split, depth+1, n_features)
    right_child = build_tree(X_right, y_right, max_depth, min_samples_split, depth+1, n_features)

    # 返回当前节点
    return Node(feature_index=best_feature,
                threshold=best_threshold,
                left=left_child,
                right=right_child)

###############################################################################
# 3. MyCARTDecisionTree 类
###############################################################################
class MyCARTDecisionTree:
    """
    手写的 CART 决策树(分类).
    """
    def __init__(self, max_depth=5, min_samples_split=2, n_features=None):
        """
        max_depth: 树最大深度
        min_samples_split: 节点最少样本数
        n_features: 每次分裂可用的特征数(用于随机森林); None=>全特征
        """
        self.max_depth = max_depth
        self.min_samples_split = min_samples_split
        self.n_features = n_features
        self.root = None

    def fit(self, X, y):
        """
        训练决策树
        """
        self.root = build_tree(
            X, y, 
            max_depth=self.max_depth, 
            min_samples_split=self.min_samples_split,
            depth=0,
            n_features=self.n_features
        )

    def _predict_single(self, x, node):
        if node.label is not None:
            return node.label
        if x[node.feature_index] <= node.threshold:
            return self._predict_single(x, node.left)
        else:
            return self._predict_single(x, node.right)

    def predict(self, X):
        X = np.asarray(X)
        preds = [self._predict_single(row, self.root) for row in X]
        return np.array(preds)

###############################################################################
# 4. MyRandomForest 类
###############################################################################
class MyRandomForest:
    """
    简化版随机森林:
    - n_estimators: 森林中决策树数量
    - max_depth, min_samples_split: 传给每棵CART
    - max_features: 每次分裂时可使用的特征数(如 sqrt(n_features)对分类)
    - bootstrap: 是否对数据进行bootstrap抽样
    """
    def __init__(self, 
                 n_estimators=10, 
                 max_depth=5,
                 min_samples_split=2,
                 max_features=None,
                 bootstrap=True):
        self.n_estimators = n_estimators
        self.max_depth = max_depth
        self.min_samples_split = min_samples_split
        self.max_features = max_features
        self.bootstrap = bootstrap

        self.trees = []  # 保存所有子树

    def fit(self, X, y):
        X = np.asarray(X)
        y = np.asarray(y)
        n_samples = len(y)

        self.trees = []
        for _ in range(self.n_estimators):
            # 1) bootstrap采样
            if self.bootstrap:
                indices = np.random.choice(n_samples, size=n_samples, replace=True)
                X_bootstrap = X[indices]
                y_bootstrap = y[indices]
            else:
                # 不用bootstrap, 全量训练
                X_bootstrap = X
                y_bootstrap = y

            # 2) 训练一棵 CART
            tree = MyCARTDecisionTree(
                max_depth=self.max_depth,
                min_samples_split=self.min_samples_split,
                n_features=self.max_features
            )
            tree.fit(X_bootstrap, y_bootstrap)
            self.trees.append(tree)

    def predict(self, X):
        X = np.asarray(X)
        # 收集每棵树的预测
        all_preds = []
        for tree in self.trees:
            preds = tree.predict(X)  # shape=[n_samples,]
            all_preds.append(preds)

        # 转置: shape=[n_samples, n_estimators]
        all_preds = np.array(all_preds).T

        # 对每个样本, 让 n_estimators 个预测投票
        final_preds = []
        for row in all_preds:
            values, counts = np.unique(row, return_counts=True)
            majority = values[np.argmax(counts)]
            final_preds.append(majority)

        return np.array(final_preds)

###############################################################################
# 5. 测试示例 (以简单的二分类数据集为例)
###############################################################################
if __name__ == "__main__":
    # 生成一个简单的二分类数据, 2维特征
    np.random.seed(42)
    n_samples = 200
    X0 = np.random.normal(loc=[2,2], scale=1, size=(n_samples//2, 2))
    y0 = np.zeros(n_samples//2, dtype=int)
    X1 = np.random.normal(loc=[6,6], scale=1, size=(n_samples//2, 2))
    y1 = np.ones(n_samples//2, dtype=int)

    X_data = np.vstack([X0, X1])
    y_data = np.concatenate([y0, y1])

    # 打乱
    shuffle_idx = np.random.permutation(n_samples)
    X_data = X_data[shuffle_idx]
    y_data = y_data[shuffle_idx]

    # 拆分训练/测试集
    X_train, X_test, y_train, y_test = train_test_split(
        X_data, y_data, test_size=0.3, random_state=42
    )

    print("Train shape:", X_train.shape, "Test shape:", X_test.shape)

    # 1) 先试单棵CART (max_depth=3)
    single_tree = MyCARTDecisionTree(max_depth=3)
    single_tree.fit(X_train, y_train)
    y_pred_tree = single_tree.predict(X_test)
    acc_tree = np.mean(y_pred_tree == y_test)
    print("Single CART accuracy:", acc_tree)

    # 2) 再试随机森林 (n_estimators=10, max_features=1)
    rf_clf = MyRandomForest(
        n_estimators=10,
        max_depth=3,
        min_samples_split=2,
        max_features=1,  # 每次分裂只看1个特征
        bootstrap=True
    )
    rf_clf.fit(X_train, y_train)
    y_pred_rf = rf_clf.predict(X_test)
    acc_rf = np.mean(y_pred_rf == y_test)
    print("RandomForest accuracy:", acc_rf)
