
from matplotlib import pyplot as plt
import warnings
import seaborn as sns

from utils.data_split import *
warnings.filterwarnings("ignore")
import statsmodels.api as sm
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
import numpy as np
# ======================
# 📂 1. 读取训练集和测试集
# ======================
signature_data = pd.read_csv(signature_score_csv_path)
clinic_data = pd.read_csv(opj(base_path,'data/original_data/BCNB/Clinic/patient-clinical-data-process.csv'))
df = pd.merge(signature_data,clinic_data,on=['Patient_ID','ALN status'])
dp = DataSplitUtil(split_random_state=split_random_state_list[0])
df_train,df_test= dp.get_train_test_df(df)
# ======================
# 🧠 2. 定义子组分析函数
# ======================
def run_subgroup_auc(df_train, df_test, score_cols, group_col, group_val, label_col='ALN status', standardize=False):
    """
    score_cols: 字符串 或者 字符串列表
    """
    # --- 确保 features 是 list ---
    if isinstance(score_cols, str):
        features = [score_cols]
    else:
        features = score_cols

    # --- 亚组划分 ---
    train_sub = df_train[group_val(df_train[group_col])]
    if train_sub[label_col].nunique() < 2:
        return f"🚫 训练集中的亚组 {group_col} 不含足够多类别，无法训练"

    test_sub = df_test[group_val(df_test[group_col])]
    if test_sub[label_col].nunique() < 2:
        return f"🚫 测试集中的亚组 {group_col} 不含足够多类别，无法评估"

    n_alnm = sum(train_sub[label_col] == 1)
    n_non_alnm = sum(train_sub[label_col] == 0)

    # --- 特征 ---
    X_train = train_sub[features].copy()
    X_test = test_sub[features].copy()
    if standardize and len(features) >= 1:
        scaler = StandardScaler()
        X_train = scaler.fit_transform(X_train)
        X_test = scaler.transform(X_test)

    # --- sklearn Logistic ---
    model = LogisticRegression()
    model.fit(X_train, train_sub[label_col])
    y_true = test_sub[label_col]
    y_score = model.predict_proba(X_test)[:, 1]
    auc = roc_auc_score(y_true, y_score)

    # --- statsmodels Logistic (用于 OR) ---
    X_sm = sm.add_constant(X_train)  # 添加截距
    y = train_sub[label_col]
    sm_model = sm.Logit(y, X_sm).fit(disp=0)

    coef = sm_model.params[1:]  # 去掉常数项
    conf = sm_model.conf_int().iloc[1:]
    p_values = sm_model.pvalues.iloc[1:]

    or_val = np.exp(coef)
    or_lower = np.exp(conf[0])
    or_upper = np.exp(conf[1])

    or_dict = dict(zip(features, or_val))
    lower_dict = dict(zip(features, or_lower))
    upper_dict = dict(zip(features, or_upper))
    p_dict = dict(zip(features, p_values))

    if len(features) == 1:
        or_dict = or_val.iloc[0]
        lower_dict = or_lower.iloc[0]
        upper_dict = or_upper.iloc[0]
        p_dict = p_values.iloc[0]

    return {
        'Signature': 'total' if len(features) > 1 else features[0],
        'N_train': len(train_sub),
        'N_test': len(test_sub),
        'AUC': auc,
        'OR': or_dict,
        'p': p_dict,
        'lower': lower_dict,
        'upper': upper_dict,
        'N_non_ALNM': n_non_alnm,
        'N_ALNM': n_alnm,
    }


# 整体组分析
def compute_overall_or(df_train, df_test, score_cols, label_col='ALN status', standardize=False):
    """
    计算 signature 的整体 OR、AUC 等指标
    score_cols: 字符串 或者 字符串列表
    """
    # 👉 保证 features 一定是列表
    if isinstance(score_cols, str):
        features = [score_cols]
    else:
        features = score_cols

    n_alnm = sum(df_train[label_col] == 1)
    n_non_alnm = sum(df_train[label_col] == 0)

    # 👉 标准化处理（多特征时才做）
    X_train = df_train[features].copy()
    X_test = df_test[features].copy()
    if standardize and len(features) > 1:
        scaler = StandardScaler()
        X_train = scaler.fit_transform(X_train)
        X_test = scaler.transform(X_test)

    # Sklearn 模型算 AUC
    model = LogisticRegression()
    model.fit(X_train, df_train[label_col])
    y_true = df_test[label_col]
    y_score = model.predict_proba(X_test)[:, 1]
    auc = roc_auc_score(y_true, y_score)

    signature_name = 'total' if len(features) > 1 else features[0]

    # 👉 统一用 statsmodels 计算 OR, CI, p
    X = sm.add_constant(X_train)
    y = df_train[label_col]
    model = sm.Logit(y, X).fit(disp=0)

    coef = model.params[1:]  # 去掉常数项
    conf = model.conf_int().iloc[1:]
    p_values = model.pvalues.iloc[1:]

    or_val = np.exp(coef)
    or_lower = np.exp(conf[0])
    or_upper = np.exp(conf[1])

    # 如果是单变量，直接返回单值而不是字典
    if len(features) == 1:
        return {
            'Signature': signature_name,
            'N_train': len(df_train),
            'N_test': len(df_test),
            'AUC': auc,
            'OR': or_val.iloc[0],
            'p': p_values.iloc[0],
            'lower': or_lower.iloc[0],
            'upper': or_upper.iloc[0],
            'N_non_ALNM': n_non_alnm,
            'N_ALNM': n_alnm,
        }
    else:
        or_dict = dict(zip(features, or_val))
        lower_dict = dict(zip(features, or_lower))
        upper_dict = dict(zip(features, or_upper))
        p_dict = dict(zip(features, p_values))

        return {
            'Signature': signature_name,
            'N_train': len(df_train),
            'N_test': len(df_test),
            'AUC': auc,
            'OR': or_dict,
            'p': p_dict,
            'lower': lower_dict,
            'upper': upper_dict,
            'N_non_ALNM': n_non_alnm,
            'N_ALNM': n_alnm,
        }

# 定义多个 signature 和多个子组条件
signature_list = ['Collagen', 'Region', 'Nuclei', 'Clinic',['Collagen', 'Region', 'Nuclei', 'Clinic']]
subgroup_conditions = {
    'Age(years)': {
        '<=40': lambda x: x <= 40,
        '>40': lambda x: x > 40
    },
    'Tumour Size(cm)': {
        '<=2cm': lambda x: x <= 2,
        '>2cm': lambda x: x > 2
    },
    'Tumour Type': {
        'IDC': lambda x: x == 'Invasive ductal carcinoma',
        'Other': lambda x: x.isin(['Invasive lobular carcinoma', 'Other type'])
    },
    'Molecular subtype': {
        'Luminal A': lambda x: x == 'Luminal A',
        'Luminal B': lambda x: x == 'Luminal B',
        'HER2+': lambda x: x == 'HER2(+)',
        'TNBC': lambda x: x == 'Triple negative'
    }
}

subgroup_results = []

for sig in signature_list:
    for group_var, group_dict in subgroup_conditions.items():
        for name, cond in group_dict.items():
            r = run_subgroup_auc(
                df_train, df_test,
                score_cols=sig,
                group_col=group_var,
                group_val=cond
            )
            if isinstance(r, dict):
                r['Group'] = f"{group_var}:{name}"
                subgroup_results.append(r)

overall_results = []
for sig in signature_list:
    r = compute_overall_or(df_train,df_test,score_cols=sig)
    r['Group'] = 'Overall:Overall'
    overall_results.append(r)

# 最终合并全部结果
final_results = pd.DataFrame(subgroup_results + overall_results)

# 转换为 DataFrame
results_df = pd.DataFrame(final_results)


md(subgroup_analysis_result_path)
results_df.to_csv(opj(subgroup_analysis_result_path,'signature_single_total_metric.csv'))

results_df['Subgroup_Var'] = results_df['Group'].apply(lambda x: x.split(':')[0])

# 分亚组绘制AUC柱状图
subgroup_vars = results_df['Subgroup_Var'].unique()
for var in subgroup_vars:
    plt.figure(figsize=(10, 6))
    sub_df = results_df[results_df['Subgroup_Var'] == var].copy()
    sub_df['Category'] = sub_df.apply(lambda row: f"{row['Group'].split(':')[1]} (n={int(row['N_test'])})", axis=1)

    ax = sns.barplot(data=sub_df, x='Category', y='AUC', hue='Signature')

    # 添加 AUC 数值标签
    for container in ax.containers:
        ax.bar_label(container, fmt='%.2f', label_type='edge', fontsize=9, padding=2)

    plt.ylim(0.2, 1.2)
    plt.title(f"AUC of Signatures by Categories in Subgroup: {var}")
    plt.xticks(rotation=0)
    plt.tight_layout()

    safe_varname = var.replace(' ', '_').replace('(', '').replace(')', '')
    save_path = opj(opj(subgroup_analysis_result_path, 'barplot', f"{safe_varname}.pdf"))
    md(opd(save_path))
    plt.savefig(save_path, dpi=300)

# # 只取total模型的
# sub_df = results_df[results_df['Signature'] == 'total'].copy()


# # 1. 展开 OR 字符串字典
# def explode_or(row):
#     or_dict = row['OR']
#     upper_dict = row['upper']
#     lower_dict = row['lower']
#     p_dict = row['p']
#     rows = []
#     for sig, or_str in or_dict.items():
#         new_row = row.drop('OR').to_dict()
#         new_row['Signature'] = sig
#         new_row['OR'] = or_str
#         new_row['upper'] = upper_dict[sig]
#         new_row['lower'] = lower_dict[sig]
#         new_row['p'] = p_dict[sig]
#         rows.append(new_row)
#     return pd.DataFrame(rows)


# df_expanded = pd.concat(sub_df.apply(explode_or, axis=1).to_list(), ignore_index=True)

# # 2. 解析Group列，拆成两列：大类和子类，比如 'Age(years)' 和 '<=40'
# df_expanded[['Group_main', 'Group_sub']] = df_expanded['Group'].str.split(':', expand=True)


# # 3. 给每个Group_main创建一个空行（N_train 等字段全空）
# def insert_blank_rows_and_clear_main(df):
#     new_rows = []
#     overall_rows = None  # 用于暂存 Overall
#     for group_main, group_df in df.groupby('Group_main', sort=False):
#         if group_main != 'Overall':
#             # 插入空行标题（带 group_main 名字）
#             blank = pd.DataFrame([{col: '' for col in df.columns}])
#             blank['Group_sub'] = group_main
#             new_rows.append(blank)

#             # 清空 group_df 中的 Group_main 列值（仅数据部分）
#             group_df = group_df.copy()
#             group_df['Group_main'] = ''

#             new_rows.append(group_df)
#         else:
#             overall_rows = group_df

#     if overall_rows is not None:
#         new_rows.append(overall_rows)

#     return pd.concat(new_rows, ignore_index=True)

# # 4. 按照Group_main、Group_sub排序，Group_sub按常规顺序（你可以自定义顺序）
# order_dict = {
#     '<=40': 1,
#     '>40': 2,
#     '<=2cm': 1,
#     '>2cm': 2,
#     'IDC': 1,
#     'Other': 2,
#     'Luminal A': 1,
#     'Luminal B': 2,
#     'HER2': 3,
#     'TNBC': 4,
#     # 可根据你实际亚组补充顺序
# }


# def sort_key(x):
#     return order_dict.get(x, 99)

# df_expanded['Group_sub_order'] = df_expanded['Group_sub'].map(sort_key)

# df_expanded = df_expanded.sort_values(['Group_main', 'Group_sub_order'])
# # 5. 分组存 CSV，每个文件名用 Signature 命名
# for sig_name, sig_df in df_expanded.groupby('Signature'):
#     # 插入空行
#     sig_df = insert_blank_rows_and_clear_main(sig_df)
#     # 删除排序辅助列
#     sig_df = sig_df[[ 'Group_sub','N_non_ALNM','N_ALNM','OR','upper','lower','p']]

#     # 保存 CSV
#     save_path = opj(subgroup_analysis_result_path,'total_model',f'{sig_name}.csv')
#     r_save_path = opj(subgroup_analysis_result_path,f'{sig_name}.csv')
#     md(opd(save_path))
#     sig_df.to_csv(save_path, index=False, encoding='utf-8-sig')


# # 3. 给每个Group_main创建一个空行（N_train 等字段全空）
def insert_blank_rows_and_clear_main(df):
    new_rows = []
    overall_rows = None  # 用于暂存 Overall
    for group_main, group_df in df.groupby('Group_main', sort=False):
        if group_main != 'Overall':
            # 插入空行标题（带 group_main 名字）
            blank = pd.DataFrame([{col: '' for col in df.columns}])
            blank['Group_sub'] = group_main
            new_rows.append(blank)

            # 清空 group_df 中的 Group_main 列值（仅数据部分）
            group_df = group_df.copy()
            group_df['Group_main'] = ''

            new_rows.append(group_df)
        else:
            overall_rows = group_df

    if overall_rows is not None:
        new_rows.append(overall_rows)

    return pd.concat(new_rows, ignore_index=True)

# # 4. 按照Group_main、Group_sub排序，Group_sub按常规顺序（你可以自定义顺序）
order_dict = {
    '<=40': 1,
    '>40': 2,
    '<=2cm': 1,
    '>2cm': 2,
    'IDC': 1,
    'Other': 2,
    'Luminal A': 1,
    'Luminal B': 2,
    'HER2': 3,
    'TNBC': 4,
    # 可根据你实际亚组补充顺序
}

def sort_key(x):
    return order_dict.get(x, 99)


for sig_name in ['Collagen', 'Region', 'Nuclei', 'Clinic']:
    sub_df = results_df[results_df['Signature'] == sig_name].copy()


# # 2. 解析Group列，拆成两列：大类和子类，比如 'Age(years)' 和 '<=40'
    sub_df[['Group_main', 'Group_sub']] = sub_df['Group'].str.split(':', expand=True)

    sub_df['Group_sub_order'] = sub_df['Group_sub'].map(sort_key)

    sub_df = sub_df.sort_values(['Group_main', 'Group_sub_order'])

    sig_df = insert_blank_rows_and_clear_main(sub_df)
    # 删除排序辅助列
    sig_df = sig_df[[ 'Group_sub','N_non_ALNM','N_ALNM','OR','upper','lower','p']]

    # 保存 CSV
    save_path = opj(subgroup_analysis_result_path,'total_model',f'{sig_name}.csv')
    md(opd(save_path))
    sig_df.to_csv(save_path, index=False, encoding='utf-8-sig')