import numpy as np
import matplotlib.pyplot as plt
from catboost import CatBoostClassifier
from lightgbm import LGBMClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier

from sklearn.model_selection import StratifiedKFold, GridSearchCV
from sklearn.neural_network import MLPClassifier
from sklearn.pipeline import Pipeline
from sklearn.metrics import (
    roc_curve, auc,
    accuracy_score, f1_score, precision_score, recall_score,
    roc_auc_score, confusion_matrix
)
import joblib  # 推荐 joblib 而不是 pickle

from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.utils import resample
from sklearn.naive_bayes import BernoulliNB ,GaussianNB
from xgboost import XGBClassifier
from config.config import *
from utils.data_split import *
from utils.ensemble import AverageEnsemble

# =======================================
# Step 1. 数据读取与特征选择
# =======================================

# =======================================
# Step 2. 各模型定义及随机种子
# =======================================
mean_fpr = np.linspace(0, 1, 100)

# seeds_dict = {
#     "Logistic": [1761, 1844, 517, 565, 651, 1843, 248, 1462, 767, 855, 1447, 1651, 1800, 358, 989, 1318, 1320, 1659,
#                  1879],
#     "SVM": [113, 461, 651, 667, 1079, 1208, 1460, 1807, 1832, 1958, 1963],
#     "GBM": [111, 222, 333, 444, 555, 666, 777, 888, 999, 1001],
#     "NeuralNetwork": [101, 202, 303, 404, 505, 606, 707, 808, 909, 1010],
#     "RandomForest": [101, 202, 303, 404, 505, 606, 707, 808, 909, 1010],
#     "XGBoost": [121, 242, 363, 484, 605, 726, 847, 968, 1089, 1210],
#     "KNN": [33, 56, 228, 338, 339, 341, 458, 496, 592, 722, 770, 880, 942, 1006, 1688, 1810, 1905],
#     "AdaBoost": [151, 302, 453, 604, 755, 906, 1057, 1208, 1359, 1510],
#     "LightGBM": [131, 262, 393, 524, 655, 786, 917, 1048, 1179, 1310],
#     "CatBoost": [141, 282, 423, 564, 705, 846, 987, 1128, 1269, 1410],
#     "BNB": [264, 386, 439, 469, 517, 585, 830, 969, 1200, 1930, 1963]
# }

seeds_list = [1761, 1844, 517, 565, 651, 1843, 248, 1462, 767, 855, 1447, 1651, 1800, 358, 989, 1318, 1320, 1659,
                 1879]
models = {
    "Logistic": {
        "estimator": LogisticRegression(solver="liblinear", max_iter=1000),
        "param_grid": [
            {'clf__penalty': ['l1', 'l2'], 'clf__C': [0.1, 1], 'clf__solver': ['liblinear'], 'clf__max_iter': [200]}
        ]
    },
    # "SVM": {
    #     "estimator": SVC(probability=True),
    #     "param_grid": {
    #         'clf__C': [0.1, 1],
    #         'clf__kernel': ['linear', 'rbf'],
    #         'clf__gamma': ['scale']
    #     }
    # },
    # "GBM": {
    #     "estimator": GradientBoostingClassifier(random_state=42),
    #     "param_grid": {
    #         'clf__n_estimators': [100, 200],
    #         'clf__learning_rate': [0.05, 0.1],
    #         'clf__max_depth': [3, 5]
    #     }
    # },
    "MLP": {
        "estimator": MLPClassifier(max_iter=1000, random_state=42),
        "param_grid": {
            'clf__hidden_layer_sizes': [(100,), (100, 50)],
            'clf__activation': ['relu'],
            'clf__solver': ['adam'],
            'clf__alpha': [1e-4],
            'clf__learning_rate_init': [0.001],
            'clf__max_iter': [200]
        }
    },
    # "RandomForest": {
    #     "estimator": RandomForestClassifier(random_state=42),
    #     "param_grid": {
    #         "clf__n_estimators": [100, 200],
    #         "clf__max_depth": [None, 10],
    #         "clf__min_samples_split": [2, 5]
    #     }
    # },
    # "XGBoost": {
    #     "estimator": XGBClassifier(use_label_encoder=False, eval_metric='logloss', random_state=42),
    #     "param_grid": {
    #         "clf__n_estimators": [100, 200],
    #         "clf__max_depth": [3, 6],
    #         "clf__learning_rate": [0.05, 0.1]
    #     }
    # },
    "KNN": {
        "estimator": KNeighborsClassifier(),
        "param_grid": {
            "clf__n_neighbors": [3, 5, 7],
            "clf__weights": ["uniform", "distance"]
        }
    },
    "AdaBoost": {
        "estimator": AdaBoostClassifier(random_state=42),
        "param_grid": {
            'clf__n_estimators': [50, 100],
            'clf__learning_rate': [0.1, 1.0],
            'clf__base_estimator': [DecisionTreeClassifier(max_depth=1)],
            'clf__algorithm': ['SAMME.R']
        }
    },
    # "LightGBM": {
    #     "estimator": LGBMClassifier(random_state=42),
    #     "param_grid": {
    #         "clf__num_leaves": [31, 50],
    #         "clf__learning_rate": [0.05, 0.1],
    #         "clf__n_estimators": [100, 200]
    #     }
    # },
    "CatBoost": {
        "estimator": CatBoostClassifier(verbose=0, random_state=42),
        "param_grid": {
            'clf__iterations': [100, 200],
            'clf__learning_rate': [0.05, 0.1],
            'clf__depth': [4, 6]
        }
    },
    # "BNB": {
    #     "estimator": BernoulliNB(),
    #     "param_grid": {
    #         'clf__alpha': [0.1, 1.0],
    #         'clf__binarize': [0.0, 0.5]
    #     }
    # },
    #   "GNB": {
    #     "estimator": GaussianNB(),
    #     "param_grid": {
    #         'clf__var_smoothing': [1e-9, 1e-8, 1e-7]  # 可调参数
    #     }
    # }
}

from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score, confusion_matrix

def find_best_threshold_by_youden(y_true, y_scores):
    fpr, tpr, thresholds = roc_curve(y_true, y_scores)
    J = tpr - fpr
    ix = np.argmax(J)
    return thresholds[ix]

def compute_metrics(y_true, y_scores, threshold):
    y_pred = (y_scores >= threshold).astype(int)
    acc = accuracy_score(y_true, y_pred)
    sens = recall_score(y_true, y_pred)  # TPR
    tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()
    spec = tn / (tn + fp) if (tn + fp) > 0 else 0
    ppv  = tp / (tp + fp) if (tp + fp) > 0 else 0
    npv  = tn / (tn + fn) if (tn + fn) > 0 else 0
    f1   = f1_score(y_true, y_pred)
    return acc, sens, spec, ppv, npv, f1



def main():
    def bootstrap_metric_ci(y_true, y_scores, metric_fn, threshold=None, n_bootstraps=200, ci=0.95, random_state=42):
        """
        使用 bootstrap 计算二分类指标的置信区间
        y_true: array-like, 真实标签
        y_scores: array-like, 预测概率
        metric_fn: 函数，返回单个指标（如 accuracy, sensitivity 等）
                signature: metric_fn(y_true, y_scores, threshold)
        threshold: 分类阈值
        n_bootstraps: 重采样次数
        ci: 置信水平
        """
        rng = np.random.RandomState(random_state)
        bootstrapped_scores = []
        n = len(y_true)
        
        for _ in range(n_bootstraps):
            indices = rng.choice(np.arange(n), size=n, replace=True)
            if len(np.unique(y_true[indices])) < 2:
                continue
            if threshold is not None:
                score = metric_fn(y_true[indices], y_scores[indices], threshold)
            else:
                score = metric_fn(y_true[indices], y_scores[indices])
            bootstrapped_scores.append(score)
        
        alpha = (1 - ci) / 2
        lower = np.percentile(bootstrapped_scores, 100 * alpha)
        upper = np.percentile(bootstrapped_scores, 100 * (1 - alpha))
        return lower, upper
    # ========== 置信区间计算函数 ==========
    def bootstrap_auc_ci(y_true, y_scores, n_bootstraps=200, ci=0.95, random_state=42):
        rng = np.random.RandomState(random_state)
        bootstrapped_scores = []
        n = len(y_true)
        for _ in range(n_bootstraps):
            indices = rng.choice(np.arange(n), size=n, replace=True)
            if len(np.unique(y_true[indices])) < 2:
                continue
            fpr, tpr, _ = roc_curve(y_true[indices], y_scores[indices])
            score = auc(fpr, tpr)
            bootstrapped_scores.append(score)
        alpha = (1 - ci) / 2
        lower = np.percentile(bootstrapped_scores, 100 * alpha)
        upper = np.percentile(bootstrapped_scores, 100 * (1 - alpha))
        return lower, upper

    # ========== 训练并收集验证集预测 ==========
    def train_and_collect_preds(train_df, models, seeds_list, n_augments=1, noise_std=0.01):
        model_pipelines = {name: [] for name in models.keys()}
        train_preds_all_models = {}
        train_trues_all_models = {}
        y_train = train_df[target_column]
        X_train = train_df.drop(columns=exclude_columns)
        imp = SimpleImputer(strategy="median")
        X_train_imputed =  pd.DataFrame(
            imp.fit_transform(X_train),
            columns=X_train.columns,
            index=X_train.index
        )
        save_path = opj(weight_result_path,"ensemble_model")
        md(save_path)
        joblib.dump(imp, opj(save_path,'imputer_median.pkl'))
        # 归一化分别处理
        scaler = StandardScaler()
        X_train_scalered =  pd.DataFrame(
            scaler.fit_transform(X_train_imputed),
            columns=X_train.columns,
            index=X_train.index
        )
        joblib.dump(scaler, opj(save_path,'scaler_standard.pkl'))
        for name, cfg in models.items():
            X_train_process = X_train_imputed
            if  name in need_standard_set:
                X_train_process = X_train_scalered
            train_preds_all_models[name] = {}
            train_trues_all_models[name] = {}

            print(f"\n>> Running {name} with {len(seeds_list)} seeds")

            for seed in seeds_list:
                train_preds_all_models[name][seed] = {}
                train_trues_all_models[name][seed] = {}

                # 全数据网格搜索最佳参数
                base_pipe = Pipeline([("scaler", StandardScaler()), ("clf", cfg["estimator"])])
                gs = GridSearchCV(base_pipe, cfg["param_grid"], cv=5, scoring="roc_auc", n_jobs=-1)
                gs.fit(X_train_process, y_train)
                best_params = gs.best_params_

                cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=seed)
                fold_idx = 0

                for tr_idx, va_idx in cv.split(X_train_process, y_train):
                    X_tr, X_va = X_train_process.iloc[tr_idx], X_train_process.iloc[va_idx]
                    y_tr, y_va = y_train.iloc[tr_idx], y_train.iloc[va_idx]

                    # 数据增强
                    X_list = [X_tr.values]; y_list = [y_tr.values]
                    for _ in range(n_augments):
                        np.random.seed(seed)
                        noise = np.random.normal(0, noise_std, X_tr.shape)
                        X_list.append(X_tr.values + noise)
                        y_list.append(y_tr.values.copy())
                    Xtr2 = pd.DataFrame(np.vstack(X_list), columns=X_tr.columns)
                    ytr2 = np.hstack(y_list)

                    # 用最佳参数初始化模型
                    param_dict = {k.split("__")[1]: v for k, v in best_params.items()}
                    base_params = cfg["estimator"].get_params()
                    updated_params = base_params.copy()
                    updated_params.update(param_dict)
                    est_class = cfg["estimator"].__class__
                    est = est_class(**updated_params)

                    model = Pipeline([("scaler", StandardScaler()), ("clf", est)])
                    model.fit(Xtr2, ytr2)

                    # 保存模型pipeline
                    model_pipelines[name].append(model)

                    # 验证集预测概率
                    proba = model.predict_proba(X_va)[:, 1]
                    train_preds_all_models[name][seed][fold_idx] = proba
                    train_trues_all_models[name][seed][fold_idx] = y_va.values

                    fold_idx += 1

        return model_pipelines, train_preds_all_models, train_trues_all_models

    # ========== 训练集集成预测（概率均值） ==========
    def get_ensemble_cv_train_preds(train_preds_all_models, train_trues_all_models):
        model_names = list(train_preds_all_models.keys())
        seeds = list(train_preds_all_models[model_names[0]].keys())
        n_folds = len(train_preds_all_models[model_names[0]][seeds[0]])

        preds_per_fold = {}
        labels_per_fold = {}

        for seed in seeds:
            for fold in range(n_folds):
                preds_per_fold[(seed, fold)] = []
                labels_per_fold[(seed, fold)] = train_trues_all_models[model_names[0]][seed][fold]

        for name in model_names:
            for seed in seeds:
                for fold in range(n_folds):
                    preds_per_fold[(seed, fold)].append(train_preds_all_models[name][seed][fold])

        avg_preds_per_fold = {}
        for key in preds_per_fold:
            preds_stack = np.vstack(preds_per_fold[key])
            avg_preds_per_fold[key] = np.mean(preds_stack, axis=0)

        all_avg_preds = np.concatenate(list(avg_preds_per_fold.values()))
        all_labels = np.concatenate(list(labels_per_fold.values()))

        fpr, tpr, _ = roc_curve(all_labels, all_avg_preds)
        roc_auc = auc(fpr, tpr)
        ci_low, ci_up = bootstrap_auc_ci(all_labels, all_avg_preds)

        print(f" Ensemble Training CV AUC: {roc_auc:.4f} (95% CI [{ci_low:.4f}, {ci_up:.4f}])")
        return all_avg_preds, all_labels, fpr, tpr, roc_auc, ci_low, ci_up

    # ========== 绘制集成ROC曲线 ==========
    def plot_roc_curves(train_preds_all_models, train_trues_all_models, model_pipelines, test_df, seeds_list, save_prefix="roc"):
        plt.figure(figsize=(10, 8))

        # 训练集 ROC
        for name in model_pipelines.keys():
            # 收集该模型所有验证预测
            all_preds = []
            all_labels = []
            for seed in seeds_list:
                for fold_preds in train_preds_all_models[name][seed].values():
                    all_preds.append(fold_preds)
                for fold_trues in train_trues_all_models[name][seed].values():
                    all_labels.append(fold_trues)
            all_preds = np.concatenate(all_preds)
            all_labels = np.concatenate(all_labels)

            fpr, tpr, _ = roc_curve(all_labels, all_preds)
            roc_auc = auc(fpr, tpr)
            ci_low, ci_up = bootstrap_auc_ci(all_labels, all_preds)

            plt.plot(fpr, tpr, label=f"{name} Train AUC={roc_auc:.3f} [{ci_low:.3f},{ci_up:.3f}]")

        # 训练集集成
        train_avg_preds, train_labels, train_fpr, train_tpr, train_auc, train_ci_low, train_ci_up = get_ensemble_cv_train_preds(
            train_preds_all_models, train_trues_all_models)
        
        best_thresh = find_best_threshold_by_youden(train_labels, train_avg_preds)
        print(f">> Best threshold from training set (Youden): {best_thresh:.3f}")
        plt.plot(train_fpr, train_tpr, linewidth=2, color="black",
                label=f"Ensemble Train AUC={train_auc:.3f} [{train_ci_low:.3f},{train_ci_up:.3f}]")

        plt.plot([0, 1], [0, 1], "--", color="red")
        plt.xlabel("False Positive Rate")
        plt.ylabel("True Positive Rate")
        plt.title("ROC Curve on Training Set")
        plt.legend(loc="lower right")
        plt.grid(True)
        plt.savefig(opj(final_model_ensemble_result_path,f"{save_prefix}_train.pdf"))
        plt.show()

        # 训练集指标计算
        acc, sens, spec, ppv, npv, f1 = compute_metrics(train_labels, train_avg_preds, best_thresh)

        acc_ci = bootstrap_metric_ci(train_labels, train_avg_preds,
                                    lambda y, s, t: compute_metrics(y, s, t)[0],
                                    threshold=best_thresh)
        sens_ci = bootstrap_metric_ci(train_labels, train_avg_preds,
                                    lambda y, s, t: compute_metrics(y, s, t)[1],
                                    threshold=best_thresh)
        spec_ci = bootstrap_metric_ci(train_labels, train_avg_preds,
                                    lambda y, s, t: compute_metrics(y, s, t)[2],
                                    threshold=best_thresh)
        ppv_ci = bootstrap_metric_ci(train_labels, train_avg_preds,
                                    lambda y, s, t: compute_metrics(y, s, t)[3],
                                    threshold=best_thresh)
        npv_ci = bootstrap_metric_ci(train_labels, train_avg_preds,
                                    lambda y, s, t: compute_metrics(y, s, t)[4],
                                    threshold=best_thresh)
        f1_ci = bootstrap_metric_ci(train_labels, train_avg_preds,
                                    lambda y, s, t: compute_metrics(y, s, t)[5],
                                    threshold=best_thresh)

        acc_str = f"{acc:.3f} [{acc_ci[0]:.3f},{acc_ci[1]:.3f}]"
        sens_str = f"{sens:.3f} [{sens_ci[0]:.3f},{sens_ci[1]:.3f}]"
        spec_str = f"{spec:.3f} [{spec_ci[0]:.3f},{spec_ci[1]:.3f}]"
        ppv_str = f"{ppv:.3f} [{ppv_ci[0]:.3f},{ppv_ci[1]:.3f}]"
        npv_str = f"{npv:.3f} [{npv_ci[0]:.3f},{npv_ci[1]:.3f}]"
        f1_str = f"{f1:.3f} [{f1_ci[0]:.3f},{f1_ci[1]:.3f}]"
        auc_str = f"{train_auc:.3f} [{train_ci_low:.3f},{train_ci_up:.3f}]"

        train_metrics_dict = {
            'Threshold': [best_thresh],
            'Accuracy': [acc_str],
            'Sensitivity': [sens_str],
            'Specificity': [spec_str],
            'PPV': [ppv_str],
            'NPV': [npv_str],
            'F1': [f1_str],
            'AUC': [auc_str]
        }

        train_metrics_df = pd.DataFrame(train_metrics_dict)
        train_metrics_df.to_csv(opj(final_model_ensemble_result_path, "train_metrics.csv"), index=False)


        # 测试集 ROC
        y_test = test_df[target_column]
        X_test = test_df.drop(columns=exclude_columns)
        path = opj(weight_result_path,"ensemble_model")    

        imp = joblib.load(opj(path,'imputer_median.pkl'))
        X_test_imputed = pd.DataFrame(
            imp.transform(X_test),
            columns=X_test.columns,
            index=X_test.index
        )
        # 归一化分别处理
        scaler = joblib.load(opj(path,'scaler_standard.pkl'))
        X_test_scalered = pd.DataFrame(
            scaler.transform(X_test_imputed),
            columns=X_test.columns,
            index=X_test.index
        )
    
        plt.figure(figsize=(10, 8))
        for name in model_pipelines.keys():
            X_process = X_test_imputed
            if name in need_standard_set:
                X_process = X_test_scalered
            # 多个模型pipeline的预测平均
            proba_list = []
            for mdl in model_pipelines[name]:
                proba_list.append(mdl.predict_proba(X_process)[:, 1])
            avg_proba = np.mean(proba_list, axis=0)

            fpr, tpr, _ = roc_curve(y_test, avg_proba)
            roc_auc = auc(fpr, tpr)
            ci_low, ci_up = bootstrap_auc_ci(y_test.values, avg_proba)

            plt.plot(fpr, tpr, label=f"{name} Test AUC={roc_auc:.3f} [{ci_low:.3f},{ci_up:.3f}]")

 
        # 测试集集成
        ensemble_model = AverageEnsemble(model_pipelines)

        md(ensemble_model_weight_path)
        joblib.dump(ensemble_model, opj(ensemble_model_weight_path,'model.joblib'))
        test_avg_proba = ensemble_model.predict_proba_by_dif(X_test)[:, 1]
        test_fpr, test_tpr, _ = roc_curve(y_test, test_avg_proba)
        test_auc = auc(test_fpr, test_tpr)
        test_ci_low, test_ci_up = bootstrap_auc_ci(y_test.values, test_avg_proba)
        plt.plot(test_fpr, test_tpr,  color="black", linewidth=2,
                label=f"Ensemble Test AUC={test_auc:.3f} [{test_ci_low:.3f},{test_ci_up:.3f}]")

        plt.plot([0, 1], [0, 1], "--", color="red")
        plt.xlabel("False Positive Rate")
        plt.ylabel("True Positive Rate")
        plt.title("ROC Curve on Test Set")
        plt.legend(loc="lower right")
        plt.grid(True)
        plt.savefig(opj(final_model_ensemble_result_path,f"{save_prefix}_test.pdf"))

        acc, sens, spec, ppv, npv, f1 = compute_metrics(y_test.values, test_avg_proba, best_thresh)
        
        acc_ci = bootstrap_metric_ci(y_test.values, test_avg_proba,
                                    lambda y, s, t: compute_metrics(y, s, t)[0],
                                    threshold=best_thresh)
        sens_ci = bootstrap_metric_ci(y_test.values, test_avg_proba,
                                    lambda y, s, t: compute_metrics(y, s, t)[1],
                                    threshold=best_thresh)
        spec_ci = bootstrap_metric_ci(y_test.values, test_avg_proba,
                                    lambda y, s, t: compute_metrics(y, s, t)[2],
                                    threshold=best_thresh)
        ppv_ci = bootstrap_metric_ci(y_test.values, test_avg_proba,
                                    lambda y, s, t: compute_metrics(y, s, t)[3],
                                    threshold=best_thresh)
        npv_ci = bootstrap_metric_ci(y_test.values, test_avg_proba,
                                    lambda y, s, t: compute_metrics(y, s, t)[4],
                                    threshold=best_thresh)
        f1_ci = bootstrap_metric_ci(y_test.values, test_avg_proba,
                                    lambda y, s, t: compute_metrics(y, s, t)[5],
                                    threshold=best_thresh)
        acc_str = f"{acc:.3f} [{acc_ci[0]:.3f},{acc_ci[1]:.3f}]"
        sens_str = f"{sens:.3f} [{sens_ci[0]:.3f},{sens_ci[1]:.3f}]"
        spec_str = f"{spec:.3f} [{spec_ci[0]:.3f},{spec_ci[1]:.3f}]"
        ppv_str = f"{ppv:.3f} [{ppv_ci[0]:.3f},{ppv_ci[1]:.3f}]"
        npv_str = f"{npv:.3f} [{npv_ci[0]:.3f},{npv_ci[1]:.3f}]"
        f1_str = f"{f1:.3f} [{f1_ci[0]:.3f},{f1_ci[1]:.3f}]"
        auc_str = f"{test_auc:.3f} [{test_ci_low:.3f},{test_ci_up:.3f}]"

        # 保存为 DataFrame
        test_metrics_dict = {
            'Threshold': [best_thresh],
            'Accuracy': [acc_str],
            'Sensitivity': [sens_str],
            'Specificity': [spec_str],
            'PPV': [ppv_str],
            'NPV': [npv_str],
            'F1': [f1_str],
            'AUC': [auc_str]
        }

        metrics_df = pd.DataFrame(test_metrics_dict)
        metrics_df.to_csv(opj(final_model_ensemble_result_path, "test_metrics.csv"), index=False)


        # 测试集 ROC 只有ROC曲线和对角线
        plt.figure(figsize=(10, 8))

        for name in model_pipelines.keys():
            # 多个模型pipeline的预测平均
            proba_list = []
            for mdl in model_pipelines[name]:
                proba_list.append(mdl.predict_proba(X_test)[:, 1])
            avg_proba = np.mean(proba_list, axis=0)

            fpr, tpr, _ = roc_curve(y_test, avg_proba)
            roc_auc = auc(fpr, tpr)
            ci_low, ci_up = bootstrap_auc_ci(y_test.values, avg_proba)

            plt.plot(fpr, tpr)

        # 测试集集成
        plt.plot(test_fpr, test_tpr,  color="black", linewidth=2)

        plt.plot([0, 1], [0, 1], "--", color="red")
        # 去除所有视觉元素
        plt.axis('off')                # 隐藏坐标轴
        plt.grid(False)                # 隐藏网格
        plt.title("")                  # 不显示标题

        # 移除图例（防止小框）
        legend = plt.gca().get_legend()
        if legend:
            legend.remove()

        # 移除边框线
        for spine in plt.gca().spines.values():
            spine.set_visible(False)
        plt.savefig(opj(final_model_ensemble_result_path,f"{save_prefix}_test_without_label.pdf"))


    # ========== 主流程示例调用 ==========
    # 假设你已经准备好了 X, y, X_test, y_test, models, seeds_list
    # n_augments 和 noise_std 也根据需要调整
    md(final_model_ensemble_result_path)
    model_pipelines, train_preds_all_models, train_trues_all_models = train_and_collect_preds(train_df,models, seeds_list, n_augments=1, noise_std=0.01)
    plot_roc_curves(train_preds_all_models, train_trues_all_models, model_pipelines, test_df, seeds_list)


if __name__ =='__main__':
    df = pd.read_csv(signature_score_csv_path)
    dp = DataSplitUtil(split_random_state_list[0])
    train_df, test_df = dp.get_train_test_df(df)
    main()
