import numpy as np
from sklearn.model_selection import train_test_split

##################################################
# 1) 定义树节点结构
##################################################
class Node:
    """
    二叉树节点:
      - feature_index: 用于分裂的特征索引 (int)
      - threshold: 用于分裂的阈值 (float)
      - left, right: 左、右子节点 (Node)
      - label: 若为叶节点, 则此节点类别标签 ( -1 或 +1 )
    """
    def __init__(self, 
                 feature_index=None, 
                 threshold=None, 
                 left=None, 
                 right=None, 
                 label=None):
        self.feature_index = feature_index
        self.threshold = threshold
        self.left = left
        self.right = right
        self.label = label


##################################################
# 2) 帮助函数
##################################################

def majority_label(y):
    """
    在y中找出现次数最多的标签. y的取值是{-1, +1}, 也可扩展到多类别.
    """
    unique_vals, counts = np.unique(y, return_counts=True)
    return unique_vals[np.argmax(counts)]

def gini_impurity(y):
    """
    Gini 不纯度: Gini(D) = 1 - ∑(p_k^2)
    如果 y ∈ {-1,+1}, 其实也能通用. 
    """
    labels, counts = np.unique(y, return_counts=True)
    p = counts / len(y)
    return 1 - np.sum(p**2)

def find_best_split(X, y):
    """
    遍历所有特征、所有可能阈值, 找到加权Gini最小的分裂
    (这里没考虑 sample_weight, 若需要加权可自行改写)
    返回 (best_feature, best_threshold)
    如果无法分裂(如只剩1条或纯一个类), 返回 (None, None)
    """
    n_samples, n_features = X.shape
    if n_samples <= 1 or len(np.unique(y)) == 1:
        return None, None

    best_gini = float("inf")
    best_feature = None
    best_threshold = None

    for feature_index in range(n_features):
        feature_values = X[:, feature_index]
        unique_vals = np.unique(feature_values)

        for threshold in unique_vals:
            left_mask = (feature_values <= threshold)
            right_mask = (feature_values > threshold)
            y_left = y[left_mask]
            y_right = y[right_mask]

            if len(y_left) == 0 or len(y_right) == 0:
                continue

            gini_left = gini_impurity(y_left)
            gini_right = gini_impurity(y_right)
            w_left = len(y_left) / n_samples
            w_right = len(y_right) / n_samples
            weighted_gini = w_left*gini_left + w_right*gini_right

            if weighted_gini < best_gini:
                best_gini = weighted_gini
                best_feature = feature_index
                best_threshold = threshold

    if best_feature is None:
        return None, None
    return best_feature, best_threshold

def build_tree(X, y, max_depth, min_samples_split, depth=0):
    """
    递归构建决策树(二分类), y ∈ {-1,+1}
    """
    # 停止条件
    if (depth >= max_depth 
        or len(y) < min_samples_split
        or len(np.unique(y)) == 1):
        leaf_label = majority_label(y)
        return Node(label=leaf_label)

    # 找最优分裂
    feature_index, threshold = find_best_split(X, y)
    if feature_index is None:
        leaf_label = majority_label(y)
        return Node(label=leaf_label)

    # 根据 threshold 划分 X, y
    left_mask = (X[:, feature_index] <= threshold)
    right_mask = (X[:, feature_index] > threshold)
    X_left, y_left = X[left_mask], y[left_mask]
    X_right, y_right = X[right_mask], y[right_mask]

    # 递归构建
    left_child = build_tree(X_left, y_left, max_depth, min_samples_split, depth+1)
    right_child = build_tree(X_right, y_right, max_depth, min_samples_split, depth+1)

    return Node(feature_index=feature_index,
                threshold=threshold,
                left=left_child,
                right=right_child)

##################################################
# 3) CARTDecisionTree 类
##################################################
class CARTDecisionTree:
    """
    这里的 CART 用于二分类, 标签是 {-1, +1}.
    """
    def __init__(self, max_depth=1, min_samples_split=2):
        self.max_depth = max_depth
        self.min_samples_split = min_samples_split
        self.root = None

    def fit(self, X, y):
        """
        X: shape=[n_samples, n_features], 
        y: shape=[n_samples,], ∈{-1,+1}
        """
        # 若 X,y 是 DataFrame/Series, 转成 NumPy
        if not isinstance(X, np.ndarray):
            X = X.values
        if not isinstance(y, np.ndarray):
            y = y.values

        self.root = build_tree(X, y, self.max_depth, self.min_samples_split)

    def _predict_single(self, x, node):
        # 若为叶节点, 直接返回
        if node.label is not None:
            return node.label

        if x[node.feature_index] <= node.threshold:
            return self._predict_single(x, node.left)
        else:
            return self._predict_single(x, node.right)

    def predict(self, X):
        if not isinstance(X, np.ndarray):
            X = X.values
        preds = []
        for row in X:
            label = self._predict_single(row, self.root)
            preds.append(label)
        return np.array(preds)


##################################################
# 4) AdaBoost 类 (二分类, 标签 ∈ {-1,+1})
##################################################
class AdaBoost:
    """
    基于 CART(或其他弱分类器) 实现的简易 AdaBoost, 
    对 -1/+1 标签直接做加权投票.
    """
    def __init__(self, base_estimator, n_estimators=10, learning_rate=1.0):
        self.base_estimator = base_estimator
        self.n_estimators = n_estimators
        self.learning_rate = learning_rate
        
        self.models_ = []   # 存储每轮训练的模型
        self.alphas_ = []   # 存储每轮的 α
    
    def fit(self, X, y):
        """
        X: shape=[n_samples, n_features]
        y: shape=[n_samples,], 值是 {-1, +1}
        """
        if not isinstance(X, np.ndarray):
            X = X.values
        if not isinstance(y, np.ndarray):
            y = y.values

        n_samples = len(y)

        # 样本权重
        w = np.full(n_samples, 1.0 / n_samples)

        self.models_ = []
        self.alphas_ = []

        for m in range(self.n_estimators):
            # 1) 训练弱分类器 (base_estimator)
            model = self.base_estimator()
            model.fit(X, y)  # 不使用 sample_weight, 仅做示意

            # 2) 预测 => 得到 y_pred ∈ {-1, +1}
            y_pred = model.predict(X)

            # 3) 计算分类错误率 err_m = ∑(w_i * [y_pred_i != y_i])
            misclassified = (y_pred != y)
            err_m = np.sum(w[misclassified])
            err_m = max(1e-9, min(err_m, 1 - 1e-9))  # 避免除0或log(0)

            # 4) α_m = 0.5 * ln((1 - err_m)/err_m)
            alpha_m = 0.5 * np.log((1 - err_m)/err_m)
            alpha_m *= self.learning_rate

            # 5) 更新 w_i => w_i * exp(- alpha_m * y_i * y_pred_i)
            #    y_i, y_pred_i ∈ {-1, +1}. 分类正确 => y_i*y_pred_i=+1, 分类错误 => -1
            w *= np.exp(-alpha_m * y * y_pred)

            # 6) 归一化
            w /= np.sum(w)

            # 保存
            self.models_.append(model)
            self.alphas_.append(alpha_m)

    def predict(self, X):
        """
        最终通过 F(x) = sign(∑( alpha_m * h_m(x) )) 做加权投票
        """
        if not isinstance(X, np.ndarray):
            X = X.values
        agg = np.zeros(X.shape[0])

        for alpha_m, model in zip(self.alphas_, self.models_):
            y_pred = model.predict(X)  # ∈ {-1,+1}
            agg += alpha_m * y_pred

        return np.sign(agg)  # sign(agg) ∈ {-1,+1}


##################################################
# 5) 测试示例: 构造标签=-1/+1 的简单数据
##################################################
if __name__ == "__main__":
    # 生成一些简单数据:
    #   - Class -1 围绕(2,2)
    #   - Class +1 围绕(6,6)
    np.random.seed(42)
    n_samples = 200

    X_minus1 = np.random.normal(loc=[2,2], scale=1.0, size=(n_samples//2, 2))
    y_minus1 = np.full(n_samples//2, -1)
    X_plus1 = np.random.normal(loc=[6,6], scale=1.0, size=(n_samples//2, 2))
    y_plus1 = np.full(n_samples//2, +1)

    X_data = np.vstack([X_minus1, X_plus1])
    y_data = np.concatenate([y_minus1, y_plus1])

    # 打乱
    shuffle_idx = np.random.permutation(n_samples)
    X_data = X_data[shuffle_idx]
    y_data = y_data[shuffle_idx]

    # 拆分训练/测试集
    X_train, X_test, y_train, y_test = train_test_split(X_data, y_data, 
                                                        test_size=0.3, 
                                                        random_state=42)

    # 6) 先用单棵决策树(max_depth=2)看看效果
    tree_clf = CARTDecisionTree(max_depth=2)
    tree_clf.fit(X_train, y_train)
    y_pred_tree = tree_clf.predict(X_test)
    acc_tree = np.mean(y_pred_tree == y_test)
    print("Single CART accuracy:", acc_tree)

    # 7) 再试 AdaBoost(10个弱分类器, each max_depth=1 => 树桩)
    def stump():
        return CARTDecisionTree(max_depth=1)

    ada_clf = AdaBoost(base_estimator=stump, n_estimators=10, learning_rate=1.0)
    ada_clf.fit(X_train, y_train)
    y_pred_ada = ada_clf.predict(X_test)
    acc_ada = np.mean(y_pred_ada == y_test)
    print("AdaBoost accuracy:", acc_ada)
