from sklearn.base import clone
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import roc_auc_score
import numpy as np
import pandas as pd
import gc
from xgboost import XGBClassifier
from lightgbm import LGBMClassifier
from lightgbm import early_stopping  
from catboost import CatBoostClassifier, Pool


def cross_validate_score(
    model, 
    data: pd.DataFrame, 
    cv=None,
    test_data: pd.DataFrame = None, 
    label: str = 'Response'
) -> tuple[list, np.ndarray, np.ndarray]:
    """
    执行带早停的交叉验证并生成预测结果
    
    参数:
        model: 支持早停的分类器（XGBoost/LightGBM/CatBoost）
        data: 包含特征和标签的训练数据
        cv: 交叉验证策略，默认分层5折
        test_data: 外部测试数据集
        label: 目标列名
        
    返回:
        val_scores: 各折验证集AUC得分列表
        val_predictions: 验证集样本的OOF预测概率
        test_predictions: 测试集预测概率（各折平均）
    """
    # 初始化交叉验证策略
    if cv is None:
        cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
        
    if test_data is None:
        raise ValueError("必须提供测试数据集 test_data")

    # 分离特征和标签
    X = data.copy()  # 避免修改原始数据
    y = X.pop(label)

    # 初始化预测数组
    val_predictions = np.zeros(len(X))
    test_predictions = np.zeros(len(test_data))
    train_scores, val_scores = [], []

    for fold, (train_idx, val_idx) in enumerate(cv.split(X, y)):
        # 划分训练集/验证集
        X_train = X.iloc[train_idx].reset_index(drop=True)
        y_train = y.iloc[train_idx].reset_index(drop=True)
        X_val = X.iloc[val_idx].reset_index(drop=True)
        y_val = y.iloc[val_idx].reset_index(drop=True)

        # 克隆模型保证每次迭代独立
        model_clone = clone(model)

        # 根据模型类型进行差异化处理
        if isinstance(model_clone, XGBClassifier):
            """XGBoost处理逻辑"""
            eval_set = [(X_val, y_val)]
            model_clone.fit(
                X_train, y_train,
                eval_set=eval_set,
                verbose=False
            )
            best_iter = model_clone.best_iteration
            pred_args = {'iteration_range': (0, best_iter)}

        elif isinstance(model_clone, LGBMClassifier):
            """LightGBM处理逻辑"""
            eval_set = [(X_val, y_val)]
            model_clone.fit(
                X_train, y_train,
                eval_set=eval_set,
                eval_metric='auc',
                callbacks=[early_stopping(50)]
            )
            best_iter = model_clone.best_iteration_
            pred_args = {'num_iteration': best_iter}

        elif isinstance(model_clone, CatBoostClassifier):
            """CatBoost处理逻辑"""
            # 使用Pool提高内存效率
            train_pool = Pool(X_train, y_train)
            val_pool = Pool(X_val, y_val)
            test_pool = Pool(test_data)
            
            model_clone.fit(
                train_pool,
                eval_set=val_pool,
                early_stopping_rounds=50,
                verbose=False
            )
            best_iter = model_clone.get_best_iteration()
            pred_args = {}  # CatBoost自动使用最佳迭代
            
            # 及时释放Pool内存
            del train_pool, val_pool, test_pool

        else:
            raise ValueError("仅支持XGBoost、LightGBM、CatBoost分类器")

        # 生成预测概率
        train_preds = model_clone.predict_proba(X_train, **pred_args)[:, 1]
        val_preds = model_clone.predict_proba(X_val, **pred_args)[:, 1]
        test_preds = model_clone.predict_proba(test_data, **pred_args)[:, 1]

        # 记录分数和预测结果
        val_predictions[val_idx] = val_preds
        train_scores.append(roc_auc_score(y_train, train_preds))
        val_scores.append(roc_auc_score(y_val, val_preds))
        test_predictions += test_preds / cv.get_n_splits()

        # 释放内存
        del model_clone, X_train, y_train, X_val, y_val
        gc.collect()

        # 打印本折结果
        print(f'Fold {fold}: Val AUC = {val_scores[-1]:.5f}')

    # 打印汇总结果
    print(f'\n验证集平均 AUC: {np.mean(val_scores):.5f} ± {np.std(val_scores):.5f}')
    print(f'训练集平均 AUC: {np.mean(train_scores):.5f} ± {np.std(train_scores):.5f}')
    
    return val_scores, val_predictions, test_predictions