import numpy as np
import matplotlib.pyplot as plt
from catboost import CatBoostClassifier
from lightgbm import LGBMClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier

from sklearn.model_selection import StratifiedKFold, GridSearchCV
from sklearn.neural_network import MLPClassifier
from sklearn.pipeline import Pipeline
from sklearn.metrics import (
    roc_curve, auc,
    recall_score,
    roc_auc_score, confusion_matrix, brier_score_loss
)
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.utils import resample
from sklearn.naive_bayes import BernoulliNB ,GaussianNB
from xgboost import XGBClassifier
from utils.draw_pic import plot_mutil_model_roc, bootstrap_auc_ci
from config.config import *
from utils.data_split import *

# =======================================
# Step 1. 数据读取与特征选择
# =======================================

# =======================================
# Step 2. 各模型定义及随机种子
# =======================================
mean_fpr = np.linspace(0, 1, 100)

# seeds_dict = {
#     "Logistic": [1761, 1844, 517, 565, 651, 1843, 248, 1462, 767, 855, 1447, 1651, 1800, 358, 989, 1318, 1320, 1659,
#                  1879],
#     "SVM": [113, 461, 651, 667, 1079, 1208, 1460, 1807, 1832, 1958, 1963],
#     "GBM": [111, 222, 333, 444, 555, 666, 777, 888, 999, 1001],
#     "NeuralNetwork": [101, 202, 303, 404, 505, 606, 707, 808, 909, 1010],
#     "RandomForest": [101, 202, 303, 404, 505, 606, 707, 808, 909, 1010],
#     "XGBoost": [121, 242, 363, 484, 605, 726, 847, 968, 1089, 1210],
#     "KNN": [33, 56, 228, 338, 339, 341, 458, 496, 592, 722, 770, 880, 942, 1006, 1688, 1810, 1905],
#     "AdaBoost": [151, 302, 453, 604, 755, 906, 1057, 1208, 1359, 1510],
#     "LightGBM": [131, 262, 393, 524, 655, 786, 917, 1048, 1179, 1310],
#     "CatBoost": [141, 282, 423, 564, 705, 846, 987, 1128, 1269, 1410],
#     "BNB": [264, 386, 439, 469, 517, 585, 830, 969, 1200, 1930, 1963]
# }

seeds_list = [1761, 1844, 517, 565, 651, 1843, 248, 1462, 767, 855, 1447, 1651, 1800, 358, 989, 1318, 1320, 1659,
                 1879]

models = {
    "Logistic": {
        "estimator": LogisticRegression(solver="liblinear", max_iter=1000),
        "param_grid": [
            {'clf__penalty': ['l1', 'l2'], 'clf__C': [0.1, 1], 'clf__solver': ['liblinear'], 'clf__max_iter': [200]}
        ]
    },
    "SVM": {
        "estimator": SVC(probability=True),
        "param_grid": {
            'clf__C': [0.1, 1],
            'clf__kernel': ['linear', 'rbf'],
            'clf__gamma': ['scale']
        }
    },
    "GBM": {
        "estimator": GradientBoostingClassifier(random_state=42),
        "param_grid": {
            'clf__n_estimators': [100, 200],
            'clf__learning_rate': [0.05, 0.1],
            'clf__max_depth': [3, 5]
        }
    },
    "MLP": {
        "estimator": MLPClassifier(max_iter=1000, random_state=42),
        "param_grid": {
            'clf__hidden_layer_sizes': [(100,), (100, 50)],
            'clf__activation': ['relu'],
            'clf__solver': ['adam'],
            'clf__alpha': [1e-4],
            'clf__learning_rate_init': [0.001],
            'clf__max_iter': [200]
        }
    },
    "RandomForest": {
        "estimator": RandomForestClassifier(random_state=42),
        "param_grid": {
            "clf__n_estimators": [100, 200],
            "clf__max_depth": [None, 10],
            "clf__min_samples_split": [2, 5]
        }
    },
    "XGBoost": {
        "estimator": XGBClassifier(use_label_encoder=False, eval_metric='logloss', random_state=42),
        "param_grid": {
            "clf__n_estimators": [100, 200],
            "clf__max_depth": [3, 6],
            "clf__learning_rate": [0.05, 0.1]
        }
    },
    "KNN": {
        "estimator": KNeighborsClassifier(),
        "param_grid": {
            "clf__n_neighbors": [3, 5, 7],
            "clf__weights": ["uniform", "distance"]
        }
    },
    "AdaBoost": {
        "estimator": AdaBoostClassifier(random_state=42),
        "param_grid": {
            'clf__n_estimators': [50, 100],
            'clf__learning_rate': [0.1, 1.0],
            'clf__base_estimator': [DecisionTreeClassifier(max_depth=1)],
            'clf__algorithm': ['SAMME.R']
        }
    },
    "LightGBM": {
        "estimator": LGBMClassifier(random_state=42),
        "param_grid": {
            "clf__num_leaves": [31, 50],
            "clf__learning_rate": [0.05, 0.1],
            "clf__n_estimators": [100, 200]
        }
    },
    "CatBoost": {
        "estimator": CatBoostClassifier(verbose=0, random_state=42),
        "param_grid": {
            'clf__iterations': [100, 200],
            'clf__learning_rate': [0.05, 0.1],
            'clf__depth': [4, 6]
        }
    },
    "BNB": {
        "estimator": BernoulliNB(),
        "param_grid": {
            'clf__alpha': [0.1, 1.0],
            'clf__binarize': [0.0, 0.5]
        }
    },
      "GNB": {
        "estimator": GaussianNB(),
        "param_grid": {
            'clf__var_smoothing': [1e-9, 1e-8, 1e-7]  # 可调参数
        }
    }
}


# ========== 置信区间计算函数 ==========
def bootstrap_ci(y_true, y_pred, metric_func, n_bootstrap=1000, alpha=0.95):
    """通用bootstrap置信区间计算"""
    stats = []
    n = len(y_true)
    for _ in range(n_bootstrap):
        idx = np.random.choice(range(n), size=n, replace=True)
        y_true_bs = np.array(y_true)[idx]
        y_pred_bs = np.array(y_pred)[idx]
        try:
            stats.append(metric_func(y_true_bs, y_pred_bs))
        except:
            continue
    stats = np.array(stats)
    lower = np.percentile(stats, (1 - alpha) / 2 * 100)
    upper = np.percentile(stats, (1 + alpha) / 2 * 100)
    return np.mean(stats), lower, upper


# ========== 训练并收集验证集预测 ==========
def train_and_collect_preds(train_df, models, seeds_list,save_path, n_augments=1, noise_std=0.01):
    train_preds_all_models = {}
    train_trues_all_models = {}
    y_train = train_df[target_column]
    X_train = train_df.drop(columns=exclude_columns)
    imp = SimpleImputer(strategy="median")
    X_train_imputed =  pd.DataFrame(
        imp.fit_transform(X_train),
        columns=X_train.columns,
        index=X_train.index
    )
    save_path = opj(weight_result_path,"model_select")
    md(save_path)
    joblib.dump(imp, opj(save_path,'imputer_median.pkl'))
    # 归一化分别处理
    scaler = StandardScaler()
    X_train_scalered =  pd.DataFrame(
        scaler.fit_transform(X_train_imputed),
        columns=X_train.columns,
        index=X_train.index
    )
    joblib.dump(scaler, opj(save_path,'scaler_standard.pkl'))
    for name, cfg in models.items():
        model_save_path = opj(save_path,'model_select',name)
        md(model_save_path)
        X_train_process = X_train_imputed
        if  name in need_standard_set:
            X_train_process = X_train_scalered
        train_preds_all_models[name] = {}
        train_trues_all_models[name] = {}

        print(f"\n>> Running {name} with {len(seeds_list)} seeds")

        for seed in seeds_list:
            train_preds_all_models[name][seed] = {}
            train_trues_all_models[name][seed] = {}

            # 全数据网格搜索最佳参数
            base_pipe = Pipeline([("scaler", StandardScaler()), ("clf", cfg["estimator"])])
            gs = GridSearchCV(base_pipe, cfg["param_grid"], cv=5, scoring="roc_auc", n_jobs=-1)
            gs.fit(X_train_process, y_train)
            best_params = gs.best_params_

            cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=seed)
            fold_idx = 0

            for tr_idx, va_idx in cv.split(X_train_process, y_train):
                X_tr, X_va = X_train_process.iloc[tr_idx], X_train_process.iloc[va_idx]
                y_tr, y_va = y_train.iloc[tr_idx], y_train.iloc[va_idx]

                # 数据增强
                X_list = [X_tr.values]; y_list = [y_tr.values]
                for _ in range(n_augments):
                    np.random.seed(seed)
                    noise = np.random.normal(0, noise_std, X_tr.shape)
                    X_list.append(X_tr.values + noise)
                    y_list.append(y_tr.values.copy())
                Xtr2 = pd.DataFrame(np.vstack(X_list), columns=X_tr.columns)
                ytr2 = np.hstack(y_list)

                # 用最佳参数初始化模型
                param_dict = {k.split("__")[1]: v for k, v in best_params.items()}
                base_params = cfg["estimator"].get_params()
                updated_params = base_params.copy()
                updated_params.update(param_dict)
                est_class = cfg["estimator"].__class__
                est = est_class(**updated_params)

                model = Pipeline([("scaler", StandardScaler()), ("clf", est)])
                model.fit(Xtr2, ytr2)

                # 保存模型pipeline
                joblib.dump(model,opj(model_save_path, f"{seed}_{fold_idx}.joblib"))

                # 验证集预测概率
                proba = model.predict_proba(X_va)[:, 1]
                train_preds_all_models[name][seed][fold_idx] = proba
                train_trues_all_models[name][seed][fold_idx] = y_va.values

                fold_idx += 1

    return  train_preds_all_models, train_trues_all_models


# 找到最佳阈值
def find_best_threshold_by_youden(y_true, y_score):
    fpr, tpr, thresholds = roc_curve(y_true, y_score)
    youden_index = tpr - fpr
    best_idx = np.argmax(youden_index)
    best_threshold = thresholds[best_idx]
    return best_threshold, tpr[best_idx], fpr[best_idx]

def net_benefit(y_true, y_prob, thresholds):
    """
    计算每个阈值下的 Net Benefit
    NB = (TP/n) - (FP/n) * (pt/(1-pt))
    """
    n = len(y_true)
    nb_list = []
    for pt in thresholds:
        y_pred = (y_prob >= pt).astype(int)
        tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()
        nb = (tp / n) - (fp / n) * (pt / (1 - pt))
        nb_list.append(nb)
    return np.array(nb_list)
# ========== 绘制 训练集 单模型曲线 ==========
def plot_train_roc_curves(train_preds_all_models, train_trues_all_models, seeds_list,save_path, save_prefix="roc", n_bootstrap=1000):
    fig_cal, ax_cal = plt.subplots(figsize=(10, 8))  # 校准曲线
    fig_roc, ax_roc = plt.subplots(figsize=(10, 8))  # ROC 曲线
    model_preds_concat = {}
    model_metrics_dict = {}
    for name in models.keys():
        # 收集该模型所有验证预测
        all_preds = []
        all_labels = []
        for seed in seeds_list:
            for fold_preds in train_preds_all_models[name][seed].values():
                all_preds.append(fold_preds)
            for fold_trues in train_trues_all_models[name][seed].values():
                all_labels.append(fold_trues)
        all_preds = np.concatenate(all_preds)
        all_labels = np.concatenate(all_labels)
        model_preds_concat[name] = all_preds

        best_thres, best_tpr, best_fpr = find_best_threshold_by_youden(all_labels, all_preds)

        auc, auc_low, auc_up = bootstrap_ci(all_labels, all_preds, 
                                    lambda yt, yp: roc_auc_score(yt, yp),
                                    n_bootstrap=n_bootstrap)

        # --- Sensitivity & Specificity ---
        y_pred_label = (all_preds >= best_thres).astype(int)
        sens, sens_low, sens_up = bootstrap_ci(all_labels, all_preds, 
                                            lambda yt, yp: confusion_matrix(yt, (yp >= best_thres).astype(int)).ravel()[3] /
                                                            (confusion_matrix(yt, (yp >= best_thres).astype(int)).ravel()[3] +
                                                            confusion_matrix(yt, (yp >= best_thres).astype(int)).ravel()[2]),
                                            n_bootstrap=n_bootstrap)
        spe, spe_low, spe_up = bootstrap_ci(all_labels, all_preds, 
                                            lambda yt, yp: confusion_matrix(yt, (yp >= best_thres).astype(int)).ravel()[0] /
                                                        (confusion_matrix(yt, (yp >= best_thres).astype(int)).ravel()[0] +
                                                        confusion_matrix(yt, (yp >= best_thres).astype(int)).ravel()[1]),
                                            n_bootstrap=n_bootstrap)

        # --- NPV ---
        npv, npv_low, npv_up = bootstrap_ci(all_labels, all_preds, 
                                            lambda yt, yp: confusion_matrix(yt, (yp >= best_thres).astype(int)).ravel()[0] /
                                                        (confusion_matrix(yt, (yp >= best_thres).astype(int)).ravel()[0] +
                                                        confusion_matrix(yt, (yp >= best_thres).astype(int)).ravel()[2]),
                                            n_bootstrap=n_bootstrap)

        # --- Calibration (Brier Score) ---
        brier, brier_low, brier_up = bootstrap_ci(all_labels, all_preds, 
                                                lambda yt, yp: brier_score_loss(yt, yp),
                                                n_bootstrap=n_bootstrap)
        from sklearn.calibration import calibration_curve
        prob_true, prob_pred = calibration_curve(all_labels, all_preds, n_bins=10)
        ax_cal.plot(prob_pred, prob_true, marker="o", label=f"{name} (Brier={brier:.3f}[{brier_low:.3f}, {brier_up:.3f}])")
        # 保存为拼接好的字符串
        model_metrics_dict[name] = {
            'threshold': best_thres,
            'AUC': f"{auc:.3f} [{auc_low:.3f}, {auc_up:.3f}]",
            'Sensitivity': f"{sens:.3f} [{sens_low:.3f}, {sens_up:.3f}]",
            'Specificity': f"{spe:.3f} [{spe_low:.3f}, {spe_up:.3f}]",
            'NPV': f"{npv:.3f} [{npv_low:.3f}, {npv_up:.3f}]",
            'Calibration(Brier)': f"{brier:.3f} [{brier_low:.3f}, {brier_up:.3f}]"
        }
        fpr, tpr, _ = roc_curve(all_labels, all_preds)
        ax_roc.plot(fpr, tpr, label=f"{name} Train AUC={auc:.3f} [{auc_low:.3f},{auc_up:.3f}]")
    ax_cal.plot([0, 1], [0, 1], "k--", label="Perfectly calibrated")
    ax_cal.set_xlabel("Predicted probability")
    ax_cal.set_ylabel("Observed proportion")
    ax_cal.set_title("Calibration Curves with Brier Score")
    ax_cal.legend(loc='lower right')
    ax_cal.grid(True)
    fig_cal.tight_layout()
    fig_cal.savefig(opj(save_path,'calibration_curves.png'))
    df_metrics = pd.DataFrame.from_dict(model_metrics_dict, orient='index').reset_index()
    df_metrics = df_metrics.rename(columns={'index': 'Model'})
    df_metrics.to_csv(opj(save_path,'model_metrics_summary.csv'),index=False)

    ax_roc.plot([0, 1], [0, 1], "--", color="red")
    ax_roc.set_xlabel("False Positive Rate")
    ax_roc.set_ylabel("True Positive Rate")
    ax_roc.set_title("ROC Curve on Training Set")
    ax_roc.legend(loc="lower right")
    ax_roc.grid(True)
    fig_roc.tight_layout()
    fig_roc.savefig(opj(save_path,f"{save_prefix}_train.pdf"))


    fig_dca, ax_dca = plt.subplots(figsize=(10, 8))
    thresholds = np.linspace(0.01, 0.99, 99)

    for name, preds in model_preds_concat.items():
        nb = net_benefit(all_labels, preds, thresholds)
        ax_dca.plot(thresholds, nb, label=f"{name}")

    # treat-all 和 treat-none 基准线
    treat_all = [np.mean(all_labels) - (1 - np.mean(all_labels)) * (pt / (1 - pt)) for pt in thresholds]
    treat_none = np.zeros_like(thresholds)

    ax_dca.plot(thresholds, treat_all, "--", color="grey", label="Treat All")
    ax_dca.plot(thresholds, treat_none, "--", color="black", label="Treat None")

    ax_dca.set_xlabel("Threshold Probability")
    ax_dca.set_ylabel("Net Benefit")
    ax_dca.set_title("Decision Curve Analysis")
    ax_dca.legend(loc="best")
    ax_dca.set_ylim(-0.1, 0.4)
    ax_dca.grid(True)
    fig_dca.tight_layout()
    fig_dca.savefig(opj(save_path, "decision_curve_analysis.png"))
    plt.close('all') 
    if prefix == 'All':
        df_preds = pd.DataFrame(model_preds_concat)  
        corr_matrix = df_preds.corr(method='pearson')
        plt.figure(figsize=(10, 8))
        import seaborn as sns
        sns.heatmap(corr_matrix, annot=True, fmt=".2f", cmap='coolwarm', vmin=0, vmax=1)
        plt.title("Correlation Matrix of Model Prediction Probabilities")
        plt.tight_layout()
        plt.savefig(opj(save_path, 'model_proba_corr_matrix.pdf'))


def select_type():
    model_order = ['Logistic', 'SVM', 'GBM', 'MLP', 'RandomForest',
               'XGBoost', 'KNN', 'AdaBoost', 'LightGBM', 'CatBoost', 'BNB', 'GNB']
    type_order = ['Collagen', 'Region', 'Nuclei', 'Clinic', 'TME', 'All']


    all_data = []

    for t in type_order:
        df = pd.read_csv(opj(final_model_select_result_path,t,'model_metrics_summary.csv'))
        df['Source'] = t
        all_data.append(df)

    df_all = pd.concat(all_data, ignore_index=True)
    df_all['Model'] = pd.Categorical(df_all['Model'], categories=model_order, ordered=True)
    df_all['Model'] = df_all['Model'].replace('RandomForest', 'RF')
    df_all['Source'] = pd.Categorical(df_all['Source'], categories=type_order, ordered=True)
    df_all = df_all.sort_values(['Model', 'Source'])
    df_all.to_csv(opj(final_model_select_result_path,'model_metrics_summary.csv'), index=False)
    # 3. 绘制 grouped bar chart（每个模型一组，每组六个柱子）
    import seaborn as sns
    plt.figure(figsize=(14, 6))
    sns.set(style='white')  
    # palette =   [
    #      (102/255, 155/255, 187/255),   # 蓝色
    # (255/255, 223/255, 146/255),    # 红色

    # (144/255, 190/255, 224/255),   # 橙色
    # (252/255, 140/255, 90/255),  # 紫色

    # (75/255, 116/255, 178/255),    # 绿色
    # (219/255, 49/255, 36/255)    # 青色
    # ]
    palette = [
        "#1f77b4",  # 蓝
        "#ff7f0e",  # 橙
        "#2ca02c",  # 绿
        "#d62728",  # 红
        "#9467bd",  # 紫
        "#8c564b"   # 棕
    ]
    df_all['AUC'] = df_all['AUC'].str.split(' ').str[0].astype(float)

    ax = sns.barplot(data=df_all, x='Model', y='AUC', hue='Source', palette=palette)

    # 标题和Y轴设置
    plt.title('AUC comparison across different models and sources', fontsize=14)
    plt.ylabel('AUC')
    plt.ylim(0.55, 0.85)

    # 去掉边框和顶部、右侧的轴
    sns.despine(top=True, right=True, left=False, bottom=False)

    # 图例嵌入图内，且去边框
    ax.legend(loc='upper right',bbox_to_anchor=(1.15, 1.0), frameon=False)

    # 调整布局
    plt.tight_layout()
    plt.savefig(opj(final_model_select_result_path, 'model_auc_comparison.pdf'))

def select_model():
    df = pd.read_csv(opj(final_model_select_result_path,'All','model_metrics_summary.csv'))
    df = df.sort_values(by=['AUC'], ascending=False)
    df['Model'] = df['Model'].replace('RandomForest', 'RF')
    metrics = ['AUC', 'Sensitivity','Specificity','NPV']
    num_models = len(df)
    num_metrics = len(metrics)
    # 去掉置信区间，只保留前面的数值
    for col in metrics:
        df[col] = df[col].str.split(' ').str[0].astype(float)
    # 参数设置
    inner_radius = 0.3  # 中心空心圆半径

    # 计算总单位（包括柱子和间隔）
    # 每组之间空3个单位间隔，最后无闭合环
    total_units = num_metrics * num_models + 12

    # 单位角度
    unit_angle = 2 * np.pi / total_units

    # 计算每个柱子角度
    angles = []
    for i in range(num_metrics):
        start_angle = i * (num_models + 3) * unit_angle
        for j in range(num_models):
            angle = start_angle + j * unit_angle
            angles.append(angle)

    # 准备数据，按指标顺序拼接
    values = []
    for metric in metrics:
        values.extend(df[metric].values)

    # 颜色映射，模型对应颜色，重复给每个指标用
    cmap = plt.get_cmap('tab20')
    colors = [cmap(i % 20) for i in range(num_models)] * num_metrics

    # 归一化柱子高度，使其加上inner_radius最大不超过1
    max_possible_height = 1 - inner_radius
    values_scaled = np.array(values) / np.max(values) * max_possible_height

    # 创建极坐标图
    fig, ax = plt.subplots(figsize=(12, 12), subplot_kw=dict(polar=True))

    # 绘制柱状图
    bars = ax.bar(angles, values_scaled, width=unit_angle*0.9, bottom=inner_radius, color=colors, edgecolor='none', alpha=0.85, zorder=100)

    # 设置极坐标径向范围，从inner_radius开始，结束为1
    ax.set_ylim(0, 1)

    # 设置指标标签位置，居中于各组柱子中间
    label_angles = []
    for i in range(num_metrics):
        start_angle = i * (num_models + 3) * unit_angle
        label_angle = start_angle + (num_models / 2) * unit_angle
        label_angles.append(label_angle)

    ax.set_xticks(label_angles)
    ax.set_xticklabels(metrics, fontsize=18, fontweight='bold')
    ax.tick_params(axis='x', pad=25)

    # 关闭径向坐标刻度标签和网格
    ax.set_yticklabels([])
    ax.xaxis.grid(False)
    ax.yaxis.grid(False)
    ax.spines['polar'].set_visible(False)

    
    # 画刻度线和刻度值，刻度值为原始数据对应的真实值
    r_ticks = np.linspace(inner_radius, 1, 6)
    for r in r_ticks:
        ax.plot(np.linspace(0, 2*np.pi, 200), [r]*200, color='gray', lw=0.6, alpha=0.5)
        val = (r - inner_radius) / max_possible_height * np.max(values)
        ax.text(np.pi/180*77, r + 0.03, f"{val:.2f}",
            ha='center', va='center',
            fontsize=10, color='black',
            rotation=-8, rotation_mode='anchor')  # 朝外垂直

   
    from matplotlib.lines import Line2D
    # 图例，无边框
    legend_elements = [Line2D([0], [0], color=cmap(i % 20), lw=6, label=df['Model'].iloc[i]) for i in range(num_models)]
    leg = ax.legend(handles=legend_elements, bbox_to_anchor=(1.15, 0), fontsize=12,loc='lower right')
    leg.get_frame().set_linewidth(0)

    # 保存图像
    plt.savefig(opj(final_model_select_result_path,'All','model_metrics_ringbar.pdf'), bbox_inches='tight')
    plt.show()

    import re
    def parse_brier(s):
        match = re.match(r"([\d.]+)\s+\[([\d.]+),\s*([\d.]+)\]", s)
        if match:
            mean = float(match.group(1))
            low = float(match.group(2))
            high = float(match.group(3))
            return mean, low, high
        return None, None, None

    df[["Brier", "CI_low", "CI_high"]] = df["Calibration(Brier)"].apply(
        lambda x: pd.Series(parse_brier(x))
    )

    # ===== 绘制横向误差棒图 =====
    plt.figure(figsize=(8, 6))
    plt.errorbar(df["Brier"], df["Model"],
                xerr=[df["Brier"] - df["CI_low"], df["CI_high"] - df["Brier"]],
                fmt='o', color='blue', ecolor='gray', elinewidth=2, capsize=4)

    plt.axvline(0, color="black", linewidth=0.8)
    plt.xlabel("Brier score (95% CI)")
    plt.title("Model Calibration (Brier score)")
    plt.gca().invert_yaxis()  # 让最优模型在最上面
    plt.tight_layout()
    plt.savefig(opj(final_model_select_result_path,'All', 'model_Calibration.pdf'))

def main():
    # ========== 主流程示例调用 ==========
    # 假设你已经准备好了 X, y, X_test, y_test, models, seeds_list
    # n_augments 和 noise_std 也根据需要调整
    save_path = opj(final_model_select_result_path,f"{prefix}")
    md(save_path)
    train_preds_all_models, train_trues_all_models = train_and_collect_preds(train_df,models, seeds_list,save_path, n_augments=1, noise_std=0.01)
    plot_train_roc_curves(train_preds_all_models, train_trues_all_models, seeds_list,save_path)


if __name__ =='__main__':
    # 比较TME、Clinic、All三种特征组合在各个模型下性能的表现,以及ALL特征组合下，各个模型的相关性
    df = pd.read_csv(signature_score_csv_path)
    dp = DataSplitUtil(split_random_state_list[0])
    type_dict = {'Collagen':['Collagen'], 'Region':['Region'],'Nuclei':['Nuclei'],'Clinic':['Clinic'],'TME':['Collagen','Region','Nuclei'], 'All':['Collagen','Region','Nuclei','Clinic']}
    # for name,select_columns in type_dict.items():
    #     # if name != 'All':
    #     #     continue
    #     temp_df = df[select_columns + exclude_columns]
    #     train_df, test_df = dp.get_train_test_df(temp_df)
    #     prefix = name
    #     print(f"==================={prefix} start ==========================")
    #     main()
    #     print(f"==================={prefix} end ==========================")
    select_type()
    select_model()
