import numpy as np
import matplotlib.pyplot as plt
from catboost import CatBoostClassifier
from lightgbm import LGBMClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier

from sklearn.model_selection import StratifiedKFold, GridSearchCV
from sklearn.neural_network import MLPClassifier
from sklearn.pipeline import Pipeline
from sklearn.metrics import (
    roc_curve, auc,
    accuracy_score, f1_score, precision_score, recall_score,
    roc_auc_score
)
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.utils import resample
from sklearn.naive_bayes import BernoulliNB
from xgboost import XGBClassifier
from utils.draw_pic import plot_mutil_model_roc, bootstrap_auc_ci
from config.config import *
from utils.data_split import *

# =======================================
# Step 1. 数据读取与特征选择
# =======================================
df = pd.read_csv(signature_score_csv_path)
dp = DataSplitUtil(split_random_state_list[0])
X, X_test, y, y_test, _, _ = dp.split_train_and_test(df,"",flag=True,model_name="ensemble_model")
# =======================================
# Step 2. 各模型定义及随机种子
# =======================================
mean_fpr = np.linspace(0, 1, 100)
n_augments, noise_std = 1, 0.01

seeds_dict = {
    "Logistic": [1761, 1844, 517, 565, 651, 1843, 248, 1462, 767, 855, 1447, 1651, 1800, 358, 989, 1318, 1320, 1659,
                 1879],
    "SVM": [113, 461, 651, 667, 1079, 1208, 1460, 1807, 1832, 1958, 1963],
    "GBM": [111, 222, 333, 444, 555, 666, 777, 888, 999, 1001],
    "NeuralNetwork": [101, 202, 303, 404, 505, 606, 707, 808, 909, 1010],
    "RandomForest": [101, 202, 303, 404, 505, 606, 707, 808, 909, 1010],
    "XGBoost": [121, 242, 363, 484, 605, 726, 847, 968, 1089, 1210],
    "KNN": [33, 56, 228, 338, 339, 341, 458, 496, 592, 722, 770, 880, 942, 1006, 1688, 1810, 1905],
    "AdaBoost": [151, 302, 453, 604, 755, 906, 1057, 1208, 1359, 1510],
    "LightGBM": [131, 262, 393, 524, 655, 786, 917, 1048, 1179, 1310],
    "CatBoost": [141, 282, 423, 564, 705, 846, 987, 1128, 1269, 1410],
    "BNB": [264, 386, 439, 469, 517, 585, 830, 969, 1200, 1930, 1963]
}

models = {
    "Logistic": {
        "estimator": LogisticRegression(solver="liblinear", max_iter=1000),
        "param_grid": [
            {'clf__penalty': ['l1', 'l2'], 'clf__C': [0.1, 1], 'clf__solver': ['liblinear'], 'clf__max_iter': [200]}
        ]
    },
    "SVM": {
        "estimator": SVC(probability=True),
        "param_grid": {
            'clf__C': [0.1, 1],
            'clf__kernel': ['linear', 'rbf'],
            'clf__gamma': ['scale']
        }
    },
    "GBM": {
        "estimator": GradientBoostingClassifier(random_state=42),
        "param_grid": {
            'clf__n_estimators': [100, 200],
            'clf__learning_rate': [0.05, 0.1],
            'clf__max_depth': [3, 5]
        }
    },
    "NeuralNetwork": {
        "estimator": MLPClassifier(max_iter=1000, random_state=42),
        "param_grid": {
            'clf__hidden_layer_sizes': [(100,), (100, 50)],
            'clf__activation': ['relu'],
            'clf__solver': ['adam'],
            'clf__alpha': [1e-4],
            'clf__learning_rate_init': [0.001],
            'clf__max_iter': [200]
        }
    },
    "RandomForest": {
        "estimator": RandomForestClassifier(random_state=42),
        "param_grid": {
            "clf__n_estimators": [100, 200],
            "clf__max_depth": [None, 10],
            "clf__min_samples_split": [2, 5]
        }
    },
    "XGBoost": {
        "estimator": XGBClassifier(use_label_encoder=False, eval_metric='logloss', random_state=42),
        "param_grid": {
            "clf__n_estimators": [100, 200],
            "clf__max_depth": [3, 6],
            "clf__learning_rate": [0.05, 0.1]
        }
    },
    "KNN": {
        "estimator": KNeighborsClassifier(),
        "param_grid": {
            "clf__n_neighbors": [3, 5, 7],
            "clf__weights": ["uniform", "distance"]
        }
    },
    "AdaBoost": {
        "estimator": AdaBoostClassifier(random_state=42),
        "param_grid": {
            'clf__n_estimators': [50, 100],
            'clf__learning_rate': [0.1, 1.0],
            'clf__base_estimator': [DecisionTreeClassifier(max_depth=1)],
            'clf__algorithm': ['SAMME.R']
        }
    },
    "LightGBM": {
        "estimator": LGBMClassifier(random_state=42),
        "param_grid": {
            "clf__num_leaves": [31, 50],
            "clf__learning_rate": [0.05, 0.1],
            "clf__n_estimators": [100, 200]
        }
    },
    "CatBoost": {
        "estimator": CatBoostClassifier(verbose=0, random_state=42),
        "param_grid": {
            'clf__iterations': [100, 200],
            'clf__learning_rate': [0.05, 0.1],
            'clf__depth': [4, 6]
        }
    },
    "BNB": {
        "estimator": BernoulliNB(),
        "param_grid": {
            'clf__alpha': [0.1, 1.0],
            'clf__binarize': [0.0, 0.5]
        }
    }
}

valid_results = {}
test_optimal_results = {}
test_ensemble_results = {}

import numpy as np


def bootstrap_ci_for_ensemble(y_true, y_scores, n_bootstraps=200, ci=0.95, random_state=None):
    rng = np.random.RandomState(random_state)
    n = len(y_true)
    bootstrapped_aucs = []

    for _ in range(n_bootstraps):
        # Bootstrap sample of indices
        indices = rng.choice(range(n), size=n, replace=True)
        if len(np.unique(y_true[indices])) < 2:
            continue  # Skip if bootstrap sample doesn't have both classes

        auc = roc_auc_score(y_true[indices], y_scores[indices])
        bootstrapped_aucs.append(auc)

    alpha = (1 - ci) / 2
    lower = np.percentile(bootstrapped_aucs, 100 * alpha)
    upper = np.percentile(bootstrapped_aucs, 100 * (1 - alpha))
    return lower, upper


def bootstrap_ci_from_list(vals, n_bootstraps=200, ci=0.95, random_state=None):
    rng = np.random.RandomState(random_state)
    boot = []
    n = len(vals)
    for _ in range(n_bootstraps):
        sample = rng.choice(vals, size=n, replace=True)
        boot.append(np.mean(sample))  # 或者直接取 sample 的分位数看你想对什么做 CI
    alpha = (1 - ci) / 2
    lower = np.percentile(boot, 100 * alpha)
    upper = np.percentile(boot, 100 * (1 - alpha))
    return lower, upper


# =======================================
# Step 3. 按模型-种子-折 结构执行 CV、收集“最佳折”
# =======================================

for name, cfg in models.items():
    seeds = seeds_dict[name]
    best_pipes, best_tprs = [], []
    seed_aucs = []  # ← 用于存储每个 seed 下最佳折的 AUC
    print(f"\n>> Running {name} with {len(seeds)} seeds")

    for seed in seeds:
        # 3.1 全数据上 GridSearchCV 寻优
        base_pipe = Pipeline([("scaler", StandardScaler()), ("clf", cfg["estimator"])])
        gs = GridSearchCV(base_pipe, cfg["param_grid"], cv=5, scoring="roc_auc", n_jobs=-1)
        gs.fit(X, y)
        best_params = gs.best_params_

        # 3.2 用 StratifiedKFold 做 5 折，挑选“最佳折”
        cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=seed)
        best_fold_auc, best_fold_tpr, best_pipe = -np.inf, None, None

        for tr_idx, va_idx in cv.split(X, y):
            X_tr, X_va = X.iloc[tr_idx], X.iloc[va_idx]
            y_tr, y_va = y.iloc[tr_idx], y.iloc[va_idx]

            # 数据增强
            X_list = [X_tr.values]
            y_list = [y_tr.values]
            for _ in range(n_augments):
                np.random.seed(seed)
                noise = np.random.normal(0, noise_std, X_tr.shape)
                X_list.append(X_tr.values + noise)
                y_list.append(y_tr.values.copy())
            Xtr2 = pd.DataFrame(np.vstack(X_list), columns=X_tr.columns)
            ytr2 = np.hstack(y_list)

            # 重建并训练最佳模型
            # est = cfg["estimator"].set_params(**{k.split("__")[1]:v for k,v in best_params.items()})
            # model = Pipeline([("scaler", StandardScaler()), ("clf", est)])
            param_dict = {k.split("__")[1]: v for k, v in best_params.items()}
            base_params = cfg["estimator"].get_params()
            updated_params = base_params.copy()
            updated_params.update(param_dict)

            # 新建模型
            est_class = cfg["estimator"].__class__
            est = est_class(**updated_params)
            model = Pipeline([("scaler", StandardScaler()), ("clf", est)])
            model.fit(Xtr2, ytr2)

            # 验证集 ROC & 插值
            proba = model.predict_proba(X_va)[:, 1]
            fpr, tpr, _ = roc_curve(y_va, proba)
            interp_tpr = np.interp(mean_fpr, fpr, tpr);
            interp_tpr[0] = 0.0
            this_auc = auc(mean_fpr, interp_tpr)

            if this_auc > best_fold_auc:
                best_fold_auc, best_fold_tpr, best_pipe = this_auc, interp_tpr, model

        best_pipes.append(best_pipe)
        best_tprs.append(best_fold_tpr)
        seed_aucs.append(best_fold_auc)

    # 交叉验证的结果
    best_tprs = np.array(best_tprs)
    mean_tpr = best_tprs.mean(axis=0)
    mean_tpr[-1] = 1.0
    auc_mean = auc(mean_fpr, mean_tpr)
    lower, upper = bootstrap_ci_from_list(seed_aucs, n_bootstraps=200, ci=0.95, random_state=42)
    print(f"{name} AUC = {auc_mean:.3f} (95% CI [{lower:.3f}, {upper:.3f}])")

    valid_results[name] = {
        "fpr": mean_fpr,
        "tpr": mean_tpr,
        "auc": auc_mean,
        "ci": (lower, upper)
    }

    # 测试集
    # 1️⃣ 选出AUC最高的那个模型来预测测试集
    best_index = np.argmax(seed_aucs)
    final_model = best_pipes[best_index]
    test_proba_best = final_model.predict_proba(X_test)[:, 1]
    test_pred_best = final_model.predict(X_test)
    fpr_best, tpr_best, _ = roc_curve(y_test, test_proba_best)
    auc_best = roc_auc_score(y_test, test_proba_best)
    ci_best = bootstrap_auc_ci(final_model, X_test, y_test, n_bootstraps=200, random_state=42)
    print(f"→ {name} 测试集（最优模型）AUC: {auc_best:.3f}")

    # 2️⃣ 用所有模型进行集成预测（平均概率）
    all_probas = np.array([pipe.predict_proba(X_test)[:, 1] for pipe in best_pipes])
    test_proba_ensemble = all_probas.mean(axis=0)
    test_pred_ensemble = (test_proba_ensemble >= 0.5).astype(int)
    fpr_ensemble, tpr_ensemble, _ = roc_curve(y_test, test_proba_ensemble)
    auc_ensemble = roc_auc_score(y_test, test_proba_ensemble)
    ci_ensemble = bootstrap_ci_for_ensemble(y_test.values, test_proba_ensemble, n_bootstraps=200, ci=0.95,
                                            random_state=42)
    print(f"→ {name} 测试集（集成模型）AUC: {auc_ensemble:.3f}")

    # 可选：保存测试结果
    test_optimal_results[name] = {
        "fpr": fpr_best,
        "tpr": tpr_best,
        "auc": auc_best,
        "ci": ci_best
    }
    test_ensemble_results[name] = {
        "fpr": fpr_ensemble,
        "tpr": tpr_ensemble,
        "auc": auc_ensemble,
        "ci": ci_ensemble
    }
# =======================================
# Step 4. ROC 曲线绘图和结果保存
# =======================================
results = {'cross_validation': valid_results, 'test_by_optimal': test_optimal_results,
           'test_by_ensemble': test_ensemble_results}
cohort_map = {
    'cross_validation': "BCNB training cohort",
    'test_by_optimal': "BCNB testing cohort by optimal",
    'test_by_ensemble': "BCNB testing cohort by ensemble"
}
for name, res in results.items():
    auc_title = f"Mean ROC Curve\n{cohort_map.get(name, 'Unknown cohort')}"
    save_path = final_model_select_result_path
    plot_mutil_model_roc(res, save_path=opj(save_path, 'auc'), filename=f'{name}.pdf', auc_title=auc_title)
    jw(
        opj(save_path, 'fpr_tpr', f'{name}.json'),
        {
            model: {
                'fpr': info['fpr'].tolist(),
                'tpr': info['tpr'].tolist(),
                'auc': info['auc'],
                'ci': [float(info['ci'][0]), float(info['ci'][1])]
            }
            for model, info in res.items()
        }
    )

# =========================
# Step 6. 做外部测试集
# =========================

# from utils.ensemble import AverageEnsemble
# ensemble_model = AverageEnsemble(ensemble_pipelines, threshold=0.41)
#
# # df_ext = pd.read_excel("./data/QL-TNBC-40x/QL-factor_scores_ml_pearson_one_all_cut_selected_ID_v2.xlsx")
# df_ext = pd.read_excel("./data/IMPRESS/IMPRESS_HER2-factor_scores_ml_pearson_one_v2.xlsx")
#
# # 如果有 "ID" 列，先取出来（不做预测，只做记录）
# ids = df_ext["ID"].values if "ID" in df_ext.columns else None
#
# # 如果有真值 "pCR"，先取出来，用于后续评估
# y_ext_true = df_ext["pCR"].values if "pCR" in df_ext.columns else None
#
# # =========================
# # 3. 只保留与训练时一致的 6 个特征列
# # =========================
# selected_features_list = [
#     "Factor 1 Score",
#     "Factor 2 Score",
#     "Factor 3 Score",
#     "Factor 4 Score",
#     "Factor 5 Score",
#     "Factor 6 Score"
# ]
#
# missing = [c for c in selected_features_list if c not in df_ext.columns]
# if missing:
#     raise ValueError(f"外部测试集缺少训练时的特征：{missing}")
#
# # X_ext = df_ext[selected_features_list].values  # 转为 NumPy，避免列名匹配错误
# X_ext = df_ext[selected_features_list]  # 保留列名的 DataFrame
#
#
# # =========================
# # 4. 用模型做预测
# # =========================
# y_ext_proba = ensemble_model.predict_proba(X_ext)[:, 1]
# y_ext_pred  = ensemble_model.predict(X_ext)
#
# # =========================
# # 7. 绘制 ROC 曲线
# # =========================
# if y_ext_true is not None:
#     fpr, tpr, thresholds = roc_curve(y_ext_true, y_ext_proba)
#     roc_auc_val = auc(fpr, tpr)
#
#     plt.figure(figsize=(6, 6))
#     plt.plot(fpr, tpr, lw=2, label=f"ROC curve (AUC = {roc_auc_val:.3f})")
#     plt.plot([0, 1], [0, 1], linestyle='--', color='gray', lw=1, label="Random guess")
#     plt.xlim([0.0, 1.0])
#     plt.ylim([0.0, 1.05])
#     plt.xlabel("False Positive Rate")
#     plt.ylabel("True Positive Rate")
#     plt.title("ROC Curve on External Test Set")
#     plt.legend(loc="lower right")
#     plt.grid(alpha=0.3)
#     plt.tight_layout()
#     plt.show()
#
# # =========================
# # 8. 保存 ID、True label、预测概率 到 Excel
# # =========================
# # 如果有 ID 列，先保留；否则可以自行生成行号作为 ID
# if ids is not None:
#     df_ext["ID"] = ids
# else:
#     df_ext["ID"] = range(1, len(df_ext) + 1)
#
# # 如果有真值 pCR 列，也保留；否则填充 NaN
# if y_ext_true is not None:
#     df_ext["True_pCR"] = y_ext_true
# else:
#     df_ext["True_pCR"] = np.nan
#
# # 添加预测概率和预测标签
# df_ext["Predicted_Proba"] = y_ext_proba
# df_ext["Predicted_Label"] = y_ext_pred
#
# # 只保留需要的列输出
# output_cols = ["ID", "True_pCR", "Predicted_Proba", "Predicted_Label"]
#
# # 输出到 Excel
# output_path = "./results/external_test_predictions_IMPRESS_HER2_Voting.xlsx"
# df_ext[output_cols].to_excel(output_path, index=False)
# print(f"已保存预测结果到 {output_path}")
#

# =======================================
# Step 7. 保存 Voting Ensemble 模型
# =======================================
# os.makedirs("./results/seed", exist_ok=True)
# save_path = "./results/seed/Ensemble_voting_no_split_selected.pkl"
# import joblib
# joblib.dump(ensemble, save_path)
# print(f"\nVoting Ensemble 模型已保存至: {save_path}")

# # =========================
# # 8. 单类置信度—样本排序图
# # =========================
# from matplotlib.patches import Patch

# # -------------------------------
# # 变量重命名以避免与上面重复
# # -------------------------------
# # 假设 ensemble, X, y 已经定义并训练完毕
# y_ext_proba = ensemble.predict_proba(X)[:, 1]
# y_ext_true = y.values  # 或 y.to_numpy()

# # -------------------------------
# # 配色
# # -------------------------------
# color_pos = '#1f78b4'       # 深湖蓝（pCR=1）
# color_neg = '#a6bddb'       # 柔和浅蓝（pCR=0）
# threshold_color = '#e31a1c' # 红色阈值线

# # -------------------------------
# # 排序：高分在前（可选）
# # -------------------------------
# sorted_indices = np.argsort(-y_ext_proba)
# # 如果你想按原序不排序，就用下面一行并注释掉上面一行
# # sorted_indices = np.arange(len(y_ext_proba))

# sorted_probs = y_ext_proba[sorted_indices]
# sorted_labels = y_ext_true[sorted_indices]

# # 样本在 x 轴的位置
# x_vals = np.arange(1, len(sorted_probs) + 1)

# # -------------------------------
# # 画图
# # -------------------------------
# plt.figure(figsize=(6, 4))
# bars = plt.bar(
#     x_vals,
#     sorted_probs,
#     color=[color_pos if lbl == 1 else color_neg for lbl in sorted_labels],
#     width=1.0
# )

# # ==== 新增：在每个柱子上方标注概率（小数点后三位） ====
# for x, p in zip(x_vals, sorted_probs):
#     plt.text(
#         x,
#         p + 0.02,                  # 在柱顶上方一点
#         f"{p:.3f}",               # 三位小数
#         ha='center',
#         va='bottom',
#         fontsize=6                # 字号根据样本数量可适当调小
#     )
# # ==== 新增结束 ====

# # # 阈值线及文字
# # plt.axhline(threshold, color=threshold_color, linestyle='--', linewidth=1.5)
# # plt.text(
# #     len(sorted_probs) * 0.5 + 1,
# #     threshold + 0.02,
# #     f'Threshold = {threshold:.3f}',
# #     fontsize=10,
# #     color=threshold_color
# # )

# # 坐标和范围
# plt.xlabel("Samples (sorted by predicted probability)", fontsize=12)
# plt.ylabel("Predicted probability of pCR", fontsize=12)
# plt.xlim(0.5, len(sorted_probs) + 0.5)
# # plt.ylim(-0.02, 1.02)

# # 边框美化
# ax = plt.gca()
# ax.spines['top'].set_visible(False)
# ax.spines['right'].set_visible(False)
# ax.grid(False)

# # 图例
# legend_elements = [
#     Patch(facecolor=color_pos, edgecolor='black', label='pCR = 1'),
#     Patch(facecolor=color_neg, edgecolor='black', label='pCR = 0')
# ]
# plt.legend(
#     handles=legend_elements,
#     loc='upper right',
#     bbox_to_anchor=(1, 1.005),
#     frameon=False,
#     fontsize=10
# )

# # 标题
# plt.title("Training Set - Single-Class Confidence", fontsize=13, weight='bold')

# plt.tight_layout()

# # 如需保存文件，请取消注释以下两行
# # os.makedirs("./results/seed", exist_ok=True)
# # plt.savefig("./results/seed/probability_plot_YC_test_jama.pdf", format='pdf', dpi=300)

# plt.show()

# =========================
# 9. 计算SHAP值，并可视化
# =========================
# import shap
#
# # 假设你已有：
# # X: 包含 6 个特征的 DataFrame，shape=(n_samples, 6)
# # ensemble: 训练好的 AverageEnsemble 对象
#
# # 1. 背景样本：这里直接用全部 X（6 个特征，样本量一般也不会太大）
# background = X.values
#
# # 2. 定义 ensemble 的预测函数
# def ensemble_predict_proba(data):
#     # data: numpy array 或者 DataFrame
#     df = pd.DataFrame(data, columns=X.columns) if not isinstance(data, pd.DataFrame) else data
#     return ensemble.predict_proba(df)[:, 1]
#
# # 3. 创建 KernelExplainer（对 6 维输入，用 logit link）
# explainer = shap.KernelExplainer(
#     model=ensemble_predict_proba,
#     data=background,
#     link="logit"
# )
#
# # 4. 计算 SHAP 值
# #    这里对全部样本计算，也仅仅是 6 维，通常几十秒内能跑完
# shap_values = explainer.shap_values(background)
#
# # 5. 全局特征重要性：Bar 图
# shap.summary_plot(
#     shap_values,
#     X,
#     plot_type="bar",
#     feature_names=X.columns,
#     show=False  # 不显示图像，允许我们先保存
# )
#
# # 保存为 PDF
# plt.savefig("./results/shap_summary_plot_abs.pdf",dpi=300, bbox_inches='tight')
#
# # 如果你还想显示图像，可以加上：
# plt.show()
#
# # 6. 特征分布图（Beeswarm），创建 SHAP summary plot
# # plt.figure(figsize=(8, 5))
# shap.summary_plot(
#     shap_values,
#     X,
#     feature_names=X.columns,
#     show=False  # 不显示图像，允许我们先保存
# )
#
# # 保存为 PDF
# plt.savefig("./results/shap_summary_plot.pdf",dpi=300, bbox_inches='tight')
#
# # 如果你还想显示图像，可以加上：
# plt.show()

# 7. （可选）单一样本解释：Waterfall 图
#    取第 0 个样本为例
# shap.plots.waterfall(
#     shap.Explanation(values=shap_values[0],
#                      base_values=explainer.expected_value,
#                      data=background[0],
#                      feature_names=X.columns)
# )

