import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
from config.config import *
from utils.data_split import DataSplitUtil
from utils.utils import *
import joblib
from sklearn.metrics import accuracy_score, confusion_matrix, roc_auc_score,roc_curve, precision_recall_curve, f1_score
from sklearn.utils import resample
import warnings
warnings.filterwarnings("ignore", category=UserWarning, message=".*does not have valid feature names.*")
from sklearn.metrics import (
    roc_curve, auc,
    accuracy_score, f1_score, precision_score, recall_score,
    roc_auc_score, confusion_matrix
)
best_threshold_by_youden = 0.4291484596852248
# ========== 置信区间计算函数 ==========
def bootstrap_auc_ci(y_true, y_scores, n_bootstraps=200, ci=0.95, random_state=42):
    rng = np.random.RandomState(random_state)
    bootstrapped_scores = []
    n = len(y_true)
    for _ in range(n_bootstraps):
        indices = rng.choice(np.arange(n), size=n, replace=True)
        if len(np.unique(y_true[indices])) < 2:
            continue
        fpr, tpr, _ = roc_curve(y_true[indices], y_scores[indices])
        score = auc(fpr, tpr)
        bootstrapped_scores.append(score)
    alpha = (1 - ci) / 2
    lower = np.percentile(bootstrapped_scores, 100 * alpha)
    upper = np.percentile(bootstrapped_scores, 100 * (1 - alpha))
    return lower, upper

def plot_auc():
    plt.figure(figsize=(10, 8))
    test_fpr, test_tpr, _ = roc_curve(y_test, test_avg_proba)
    test_auc = auc(test_fpr, test_tpr)
    test_ci_low, test_ci_up = bootstrap_auc_ci(y_test.values, test_avg_proba)
    plt.plot(test_fpr, test_tpr,  color="#006699", linewidth=2,
            label=f"Roc curve AUC={test_auc:.3f} [{test_ci_low:.3f},{test_ci_up:.3f}]")
    plt.plot([0, 1], [0, 1], "--", color="black")
    plt.xlabel("False Positive Rate", fontsize=14)
    plt.ylabel("True Positive Rate", fontsize=14)
    title = f'Internal Validation Cohort (n={len(y_test)})' if ds == 'BCNB' else f'External Validation Cohort (n={len(y_test)})'
    plt.title(title, fontsize=14)
    plt.legend(loc="lower right", fontsize=12)
    plt.grid(True)
    plt.savefig(opj(save_path,f"test_auc.pdf"))


def plot_model_prob_distribution():
    df_plot = pd.DataFrame({
        "Predicted Probability": test_avg_proba,
        "Status": ["Non-ALNM" if y==0 else "ALNM" for y in y_test]
    })
    import seaborn as sns
    # fig, ax = plt.subplots(figsize=(6, 5))
    # sns.violinplot(
    #     x="Status", 
    #     y="Predicted Probability", 
    #     data=df_plot,
    #     inner="box",   # 在小提琴里面加boxplot
    #     cut=0,         # 限制不外延
    #     ax=ax
    # )

    # ax.set_title(f"Internal Validation Cohort ")
    # ax.set_xlabel("")
    # ax.set_ylabel("Predicted Probability")
    # ax.grid(True, axis="y", linestyle="--", alpha=0.7)

    # fig.tight_layout()
    # fig.savefig(opj(save_path, f"probability_violin.pdf"))
    # plt.close(fig)
    
    from statsmodels.distributions.empirical_distribution import ECDF
    plt.subplots(figsize=(6,5))
    for status, group in df_plot.groupby("Status"):
        ecdf = ECDF(group["Predicted Probability"])
        plt.plot(ecdf.x, ecdf.y, label=status)
    plt.xlabel("Predicted Probability",fontsize=14)
    plt.ylabel("Cumulative Probability",fontsize=14)
    title = f'Internal Validation Cohort (n={len(y_test)})' if ds == 'BCNB' else f'External Validation Cohort (n={len(y_test)})'
    plt.title(title,fontsize=14)
    plt.axvline(best_threshold_by_youden, color="black", linestyle="--", label=f"Threshold={best_threshold_by_youden:.3f}")
    plt.legend()
    plt.grid(True, linestyle="--", alpha=0.7)
    plt.savefig(opj(save_path, f"cdf.pdf"))

def find_useful_range(df):
    """
    找到模型曲线高于 Treat all 和 Treat none 的区间
    """
    mask = (df["net_benefit_model"] > df["net_benefit_all"]) & \
           (df["net_benefit_model"] > df["net_benefit_none"])

    if not mask.any():
        return None

    # 找连续区间
    useful_thresholds = df["threshold"][mask]
    return useful_thresholds.min(), useful_thresholds.max()

def plot_dca():
    # 初始化图像和结果容器
    plt.figure(figsize=(7, 5))
    plot_thresholds = np.linspace(0.01, 0.99, 100)
    n = len(y_test)
    prevalence = sum(y_test) / n  # 样本中阳性比例

    net_benefit_model = []
    net_benefit_all = []
    net_benefit_none = []

    for pt in plot_thresholds:
        y_pred = (test_avg_proba >= pt).astype(int)
        tn, fp, fn, tp = confusion_matrix(y_test, y_pred).ravel()
        net_benefit = (tp / n) - (fp / n) * (pt / (1 - pt))
        net_benefit_model.append(net_benefit)
        net_benefit_all.append(prevalence - (1 - prevalence) * (pt / (1 - pt)))
        net_benefit_none.append(0)
    # 绘制当前模型的 DCA 曲线
    plt.plot(plot_thresholds, net_benefit_model,label="Ensemble Model")
    # 添加 treat all 和 treat none 的曲线（只绘一次）
    plt.plot(plot_thresholds, net_benefit_all, linestyle='--', color='red', label='Treat All')
    plt.plot(plot_thresholds, net_benefit_none, linestyle='--', color='black', label='Treat None')
    plt.ylim(-0.1, 0.4)
    plt.xlim(0, 1)
    plt.xlabel('Threshold Probability')
    plt.ylabel('Net Benefit')
    plt.title('Decision Curve Analysis On Internal Cohort')
    plt.legend()
    plt.grid(True)

    # 保存 DCA 图
    dca_plot_path = opj(save_path, 'dca.pdf')
    plt.savefig(dca_plot_path, dpi=300, bbox_inches='tight')
    plt.close()
    return pd.DataFrame({
        "threshold": plot_thresholds,
        "net_benefit_model": net_benefit_model,
        "net_benefit_all": net_benefit_all,
        "net_benefit_none": net_benefit_none
    })


from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
def plot_confusion_matrix():

    y_pred = (test_avg_proba >= best_threshold_by_youden).astype(int)
    
    # 计算混淆矩阵
    cm = confusion_matrix(y_test, y_pred)
    tn, fp, fn, tp = cm.ravel()
    spec = tn / (tn + fp) if (tn + fp) > 0 else 0
    sensitivity = tp / (tp + fn) if (tp + fn) > 0 else 0
    # 绘制
    disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=["Negative", "Positive"])
    fig, ax = plt.subplots(figsize=(5, 5))
    disp.plot(cmap=plt.cm.Blues, values_format="d", ax=ax, colorbar=False)
    plt.title(f"Confusion Matrix (Threshold={best_threshold_by_youden:.3f})")
    cm_plot_path = opj(save_path, 'confusion_matrix.pdf')
    plt.savefig(cm_plot_path, dpi=300, bbox_inches="tight")
   

def plot_calibration_curve():
    from sklearn.calibration import calibration_curve

    # 新建一张图用于绘制校准曲线
    fig_cal, ax_cal = plt.subplots(figsize=(7, 5))

    # 使用 sklearn 的 calibration_curve 计算校准点
    prob_true, prob_pred = calibration_curve(y_test, test_avg_proba, n_bins=10, strategy='uniform')

    # 绘制当前模型的校准曲线
    ax_cal.plot(prob_pred, prob_true, marker='o')

    # 添加参考线（完美校准线）
    ax_cal.plot([0, 1], [0, 1], linestyle='--', color='gray', label='Perfect Calibration')

    # 图像美化
    ax_cal.set_xlabel("Predicted Probability")
    ax_cal.set_ylabel("True Fraction of Positives")
    ax_cal.set_title("Calibration Curves for All Models")
    ax_cal.legend(loc="lower right")
    ax_cal.grid(True)

    # 保存图像
    calibration_plot_path = opj(save_path, 'calibration_curve.pdf')
    fig_cal.savefig(calibration_plot_path, dpi=300, bbox_inches='tight')
    plt.close(fig_cal)


import seaborn as sns
from sklearn.metrics import confusion_matrix
def plot_confusion_matrix():
    y_pred = (test_avg_proba >= best_threshold_by_youden).astype(int)
    cm = confusion_matrix(y_test, y_pred)
    
    plt.figure(figsize=(6, 5))
    sns.heatmap(cm, annot=True, fmt="d", cmap="Blues", xticklabels=["Negative", "Positive"], yticklabels=["Negative", "Positive"])
    plt.xlabel("Predicted Label")
    plt.ylabel("True Label")
    plt.tight_layout()
    confusion_matrix_path = opj(save_path, 'confusion_matrix.pdf')
    plt.savefig(confusion_matrix_path, dpi=300, bbox_inches='tight')
    plt.close()

if __name__ == '__main__':
    md(final_result_path)
    # 准备数据
    data = pd.read_csv(signature_score_csv_path)
    ensemble_model = joblib.load(opj(ensemble_model_weight_path,'model.joblib'))
    for ds in data_set:
        if ds == 'BCNB':
            dp = DataSplitUtil(split_random_state_list[0])
            _, test_df = dp.get_train_test_df(data)
            y_test = test_df[target_column]
            X_test = test_df.drop(columns=exclude_columns)

            test_avg_proba = ensemble_model.predict_proba_by_dif(X_test)[:, 1]
            save_path = opj(final_result_path,ds)
            md(save_path)
            plot_auc()
            plot_model_prob_distribution()
            print(">> 模型优于 Treat All 和 Treat None 的区间：", find_useful_range(plot_dca()))
            plot_calibration_curve()
            plot_confusion_matrix()
            plot_confusion_matrix()
        else:
            print('QL')