# AdaBoost Classfier
import numpy as np
import copy
import logging
from datetime import datetime

from src.models.supmodel import SupModel


class AdaBoost(SupModel):
    """
    @brief 多分类AdaBoost（SAMME算法）实现

    @details
    该类实现了多分类的AdaBoost算法（SAMME），可用于提升弱分类器（如决策树桩、线性SVM等）的性能。
    基分类器需支持 sample_weight 参数。
    """
    def __init__(self, base_estimator, n_estimators=20):
        """
        @brief 初始化AdaBoost分类器
        @param base_estimator 基分类器对象，需实现fit(X, y, sample_weight=...)和predict(X)
        @param n_estimators 迭代次数（弱分类器数量），默认20
        """
        self.base_estimator = base_estimator
        self.n_estimators = n_estimators
        self.models = []
        self.alphas = []
        self.classes_ = None
        self.name = f"Ada_{self.base_estimator.name}_{self.n_estimators}"
        self.loss = []

    def fit(self, X_train, y_train):
        """
        @brief 训练AdaBoost模型（SAMME算法）
        @param X_train 训练特征，shape=(n_samples, n_features)
        @param y_train 训练标签，shape=(n_samples,)
        @exception ValueError 若某轮基分类器误差过大则抛出异常
        """
        n = X_train.shape[0]
        self.classes_ = np.unique(y_train)
        K = len(self.classes_)
        w = np.ones(n) / n
        self.models = []
        self.alphas = []
        self.loss = []
        logger = logging.getLogger(__name__)
        for t in range(self.n_estimators):
            model = self._clone_estimator()
            model.fit(X_train, y_train, sample_weight=w)
            y_pred = model.predict(X_train)
            err = np.sum(w * (y_pred != y_train)) / np.sum(w)
            self.loss.append(err)

            # 每5轮或最后一轮输出一次用时
            if (t + 1) % 5 == 0 or (t + 1) == self.n_estimators:
                now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                logger.info(f"[{now}] AdaBoost 第{t+1}轮基分类器训练结束，误差: {err:.4f}")

            # SAMME要求误差小于(1-1/K)
            if err >= 1 - 1.0 / K:
                raise ValueError(f"第{t+1}轮基分类器误差过大（err={err:.4f}），无法继续提升。")
            if err == 0:
                break
            # SAMME权重公式
            alpha = np.log((1 - err) / err) + np.log(K - 1)
            # 更新样本权重
            w *= np.exp(alpha * (y_pred != y_train))
            w /= np.sum(w)
            self.models.append(model)
            self.alphas.append(alpha)

    def predict(self, X_test):
        """
        @brief 对输入数据进行预测
        @param X_test 测试特征，shape=(n_samples, n_features)
        @return 预测标签，shape=(n_samples,)
        """
        # SAMME加权投票
        n_classes = len(self.classes_)
        pred = np.zeros((X_test.shape[0], n_classes))
        for alpha, model in zip(self.alphas, self.models):
            y_pred = model.predict(X_test)
            for idx, c in enumerate(self.classes_):
                pred[:, idx] += alpha * (y_pred == c)
        return self.classes_[np.argmax(pred, axis=1)]

    def _clone_estimator(self):
        """
        @brief 深拷贝基分类器
        @return 新的基分类器对象
        """
        return copy.deepcopy(self.base_estimator)