#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
综合生存分析工具
功能：
1. 以OS和PFS为预测目标，进行独立预测因素分析，计算HR值、CI值和P值
2. 对所有可能影响因素完成单因素分析，计算相关性和P值，对P<0.05的因素进行ROC分析
3. 对影响因素进行多因素分析，计算相关性和P值，对P<0.05的因素进行ROC分析
4. 汇总分析结果，设计联合模型找到最佳联合模型
"""

import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import random

# 设置随机种子以确保结果可重复性
np.random.seed(42)
random.seed(42)
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import roc_curve, auc, classification_report, precision_recall_curve, average_precision_score
from sklearn.model_selection import StratifiedKFold, train_test_split, cross_val_score
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier, ExtraTreesClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from itertools import combinations
from scipy import stats
from statsmodels.stats.multitest import multipletests
from lifelines import CoxPHFitter, KaplanMeierFitter
from lifelines.statistics import logrank_test
import warnings

# 忽略警告
warnings.filterwarnings('ignore')

# 设置matplotlib支持中文显示
plt.rcParams["font.family"] = ["Microsoft YaHei", "SimSun", "FangSong"]
plt.rcParams["axes.unicode_minus"] = False  # 解决负号显示问题

# 设置输出目录
output_dir = "Comprehensive_Survival_Results"
if not os.path.exists(output_dir):
    os.makedirs(output_dir)

# 加载和处理数据
def load_and_process_data(file_path="IV期肺癌.csv"):
    """加载并预处理数据"""
    print(f"\n=== 加载数据: {file_path} ===")
    try:
        # 尝试使用不同的编码格式
        encodings = ['utf-8-sig', 'gbk', 'latin1']
        data = None
        
        for encoding in encodings:
            try:
                data = pd.read_csv(file_path, encoding=encoding)
                print(f"成功使用{encoding}编码加载文件")
                break
            except UnicodeDecodeError:
                print(f"尝试使用{encoding}编码失败")
                continue
        
        if data is None:
            raise ValueError("无法使用任何支持的编码格式读取文件")
        
        print(f"成功加载 {len(data)} 行数据，包含 {len(data.columns)} 个列")
        
        # 基础数据清洗：处理空字符串和非数值型值
        print("\n=== 处理数据中的空字符串和非数值 ===")
        for col in data.columns:
            # 替换空字符串为NaN
            if data[col].dtype == 'object':
                data[col] = data[col].replace(' ', np.nan)
                data[col] = data[col].replace('', np.nan)
            
            # 尝试将列转换为数值型
            try:
                data[col] = pd.to_numeric(data[col], errors='coerce')
            except:
                print(f"警告: 列 '{col}' 无法转换为数值型")
        
        # 确保关键生存变量是数值型
        survival_vars = ['是否死亡', 'IV期OS', 'IV期PFS', '是否PD', 'PD时间', '死亡时间']
        for var in survival_vars:
            if var in data.columns:
                try:
                    data[var] = pd.to_numeric(data[var])
                except:
                    print(f"警告: 无法将'{var}'转换为数值型")
        
        return data
    except Exception as e:
        print(f"加载数据时出错: {str(e)}")
        exit()

def create_grouped_variables(data, numeric_vars, outcome_var=None):
    """
    对数值变量按中位值和最佳截断值进行分组
    
    参数:
    data: pandas DataFrame，输入数据
    numeric_vars: list，要分组的数值变量列表
    outcome_var: str，结局变量（用于计算最佳截断值）
    
    返回:
    data: pandas DataFrame，添加了分组变量的数据
    grouped_vars: list，新创建的分组变量名称列表
    """
    grouped_vars = []
    best_thresholds = {}
    
    print(f"\n=== 对 {len(numeric_vars)} 个数值变量进行分组处理 ===")
    
    # 1. 先计算所有变量的最佳截断值（如果提供了结局变量）
    if outcome_var and outcome_var in data.columns:
        print("计算最佳截断值...")
        for var in numeric_vars:
            try:
                # 获取有效数据
                valid_data = data[[var, outcome_var]].dropna()
                if len(valid_data) >= 30:
                    # 计算不同阈值的Youden指数，找到最佳截断值
                    fpr, tpr, thresholds = roc_curve(valid_data[outcome_var], valid_data[var])
                    # 计算Youden指数 (sensitivity + specificity - 1)
                    youden_index = tpr - fpr
                    
                    # 改进：处理多个相同最大约登指数的情况
                    max_y = np.max(youden_index)
                    best_indices = np.where(youden_index >= max_y - 1e-10)[0]  # 考虑浮点误差
                    
                    # 选择中间位置的阈值
                    if len(best_indices) > 1:
                        optimal_idx = best_indices[len(best_indices) // 2]
                    else:
                        optimal_idx = best_indices[0]
                    
                    optimal_threshold = thresholds[optimal_idx]
                    
                    # 改进：添加截断值合理性检查
                    if np.isinf(optimal_threshold) or np.isnan(optimal_threshold):
                        # 如果得到无穷大或NaN，使用中位数作为替代
                        optimal_threshold = valid_data[var].median()
                        print(f"{var}: 修正异常截断值为中位数: {optimal_threshold:.4f}")
                    
                    best_thresholds[var] = optimal_threshold
                    print(f"{var} 最佳截断值: {optimal_threshold:.4f}")
            except Exception as e:
                print(f"计算 {var} 最佳截断值时出错: {str(e)}")
    
    # 2. 为每个变量创建分组变量
    for var in numeric_vars:
        try:
            valid_data = data[var].dropna()
            
            # 检查变量是否已经是分类变量（唯一值数量较少）
            # 对于二元/分类变量，不需要进行分组处理
            num_unique_values = valid_data.nunique()
            if num_unique_values <= 2:
                print(f"{var} 是二元/分类变量（唯一值数量: {num_unique_values}），跳过分组处理")
                continue
                
            if len(valid_data) >= 10:
                # 按中位值分组
                median_value = valid_data.median()
                median_group_name = f"{var}_median_group"
                data[median_group_name] = 0
                data.loc[data[var] > median_value, median_group_name] = 1
                grouped_vars.append(median_group_name)
                print(f"{median_group_name}: 中位值 = {median_value:.4f}, 0=≤中位值, 1=>中位值")
                
                # 按最佳截断值分组（如果有）
                if var in best_thresholds:
                    threshold_group_name = f"{var}_threshold_group"
                    data[threshold_group_name] = 0
                    data.loc[data[var] > best_thresholds[var], threshold_group_name] = 1
                    grouped_vars.append(threshold_group_name)
                    print(f"{threshold_group_name}: 截断值 = {best_thresholds[var]:.4f}, 0=≤截断值, 1=>截断值")
        except Exception as e:
            print(f"创建 {var} 分组变量时出错: {str(e)}")
    
    print(f"\n成功创建 {len(grouped_vars)} 个分组变量")
    return data, grouped_vars

# 自动发现数值指标
def discover_numeric_indicators(data):
    """自动发现数据中的数值指标"""
    numeric_cols = []
    excluded_cols = ['Subject', '姓名', '放疗开始日期', '死亡时间', '确诊至当前', '放疗结束日期', '确诊日期', '追踪方式']
    
    for col in data.columns:
        # 跳过排除列和非数值列
        if col in excluded_cols or data[col].dtype == 'object':
            continue
        
        # 尝试转换为数值类型
        try:
            pd.to_numeric(data[col])
            numeric_cols.append(col)
        except:
            continue
    
    print(f"\n自动发现了 {len(numeric_cols)} 个潜在的数值指标")
    return numeric_cols

# 数据筛选函数
def filter_indicators(data, indicators, missing_threshold=0.3, variance_threshold=0.01, exclude_vars=None):
    """基于缺失值比例和变异度筛选指标"""
    # 确保exclude_vars是列表
    if exclude_vars is None:
        exclude_vars = []
    elif isinstance(exclude_vars, str):
        exclude_vars = [exclude_vars]
    
    # 首先排除指定的变量
    filtered_indicators = [ind for ind in indicators if ind not in exclude_vars]
    
    print(f"\n=== 对 {len(indicators)} 个指标进行数据筛选 ===")
    
    if exclude_vars:
        print(f"已排除指定变量: {', '.join(exclude_vars)}")
    
    # 进一步根据缺失值和变异度筛选
    final_filtered = []
    
    for indicator in filtered_indicators:
        # 计算缺失值比例
        missing_ratio = data[indicator].isna().mean()
        
        if missing_ratio > missing_threshold:
            print(f"指标 {indicator}: 缺失值比例过高 ({missing_ratio:.2f})，已排除")
            continue
        
        # 计算变异度（变异系数）
        valid_data = data[indicator].dropna()
        if len(valid_data) < 10:
            print(f"指标 {indicator}: 有效样本量不足，已排除")
            continue
        
        # 计算变异系数（标准差/均值）
        mean_val = valid_data.mean()
        if mean_val == 0:
            # 避免除以零
            cv = 0
        else:
            cv = valid_data.std() / abs(mean_val)
        
        if cv < variance_threshold:
            print(f"指标 {indicator}: 变异度过低 (CV={cv:.4f})，已排除")
            continue
        
        final_filtered.append(indicator)
    
    print(f"\n筛选后保留 {len(final_filtered)} 个指标")
    return final_filtered

# 独立预测因素分析 (Cox比例风险模型)
def cox_regression_analysis(data, indicators, time_var, event_var, output_suffix=""):
    """使用Cox比例风险模型进行独立预测因素分析"""
    print(f"\n=== 以{time_var}为时间变量，{event_var}为事件变量的Cox回归分析 ===")
    
    # 确保indicators中不包含time_var和event_var，避免重复列名
    unique_indicators = [ind for ind in indicators if ind != time_var and ind != event_var]
    
    # 准备适合CoxPHFitter的数据
    # 移除包含缺失值的行
    analysis_data = data[[time_var, event_var] + unique_indicators].dropna()
    print(f"用于分析的有效样本量: {len(analysis_data)}")
    
    # 标准化连续变量以获得可比的系数
    scaler = StandardScaler()
    scaled_data = analysis_data.copy()
    
    for col in unique_indicators:
        try:
            # 检查数据是否包含非数值
            if scaled_data[col].isna().all():
                print(f"警告: 指标 {col} 全为NaN值，跳过标准化")
                continue
            
            # 确保数据是数值类型
            if not np.issubdtype(scaled_data[col].dtype, np.number):
                print(f"警告: 指标 {col} 不是数值类型，尝试转换")
                scaled_data[col] = pd.to_numeric(scaled_data[col], errors='coerce')
                
                # 转换后再次检查
                if not np.issubdtype(scaled_data[col].dtype, np.number):
                    print(f"警告: 指标 {col} 转换失败，跳过标准化")
                    continue
            
            # 确保数据形状正确，避免维度不匹配错误
            values = scaled_data[col].values.reshape(-1, 1)
            # 检查数据长度
            if len(values) != len(scaled_data):
                print(f"警告: 指标 {col} 的数据长度 ({len(values)}) 与DataFrame长度 ({len(scaled_data)}) 不匹配")
                continue
            
            scaled_values = scaler.fit_transform(values)
            # 再次检查缩放后的数据长度
            if len(scaled_values) != len(scaled_data):
                print(f"警告: 指标 {col} 缩放后的数据长度 ({len(scaled_values)}) 与DataFrame长度 ({len(scaled_data)}) 不匹配")
                continue
            
            # 确保赋值前形状一致
            scaled_data[col] = scaled_values.flatten()
        except Exception as e:
            print(f"处理指标 {col} 时出错: {str(e)}")
            continue
    
    # 创建Cox比例风险模型
    cph = CoxPHFitter()
    
    # 结果存储
    cox_results = {}
    
    # 对每个指标进行单变量Cox回归
    print("\n单变量Cox回归分析结果:")
    print("=" * 100)
    print(f"{'指标':<20}{'HR':<10}{'95% CI':<20}{'p值':<10}{'结果'}")
    print("=" * 100)
    
    significant_indicators = []
    
    for indicator in unique_indicators:
        try:
            # 构建包含单个指标的Cox模型
            cph.fit(scaled_data[[time_var, event_var, indicator]], duration_col=time_var, event_col=event_var)
            
            # 提取HR、CI和p值
            hr = cph.summary.loc[indicator, 'exp(coef)']
            ci_lower = cph.summary.loc[indicator, 'exp(coef) lower 95%']
            ci_upper = cph.summary.loc[indicator, 'exp(coef) upper 95%']
            p_value = cph.summary.loc[indicator, 'p']
            
            # 存储结果
            cox_results[indicator] = {
                'hr': hr,
                'ci_lower': ci_lower,
                'ci_upper': ci_upper,
                'p_value': p_value
            }
            
            # 显示结果
            significance = "显著" if p_value < 0.05 else "不显著"
            print(f"{indicator:<20}{hr:10.3f}{f'({ci_lower:.3f}, {ci_upper:.3f})':<20}{p_value:10.4f}{significance}")
            
            if p_value < 0.05:
                significant_indicators.append(indicator)
        except Exception as e:
            print(f"指标 {indicator} 分析失败: {str(e)}")
    
    print(f"\n筛选出 {len(significant_indicators)} 个在单变量分析中显著的指标")
    
    # 保存单变量Cox回归结果
    save_cox_results(cox_results, f"univariate_cox_results_{output_suffix}.csv")
    
    # 生成森林图
    generate_cox_forest_plot(cox_results, f"univariate_cox_forest_plot_{output_suffix}")
    
    return cox_results, significant_indicators

# 多因素Cox回归分析
def multivariate_cox_analysis(data, indicators, time_var, event_var, output_suffix=""):
    """使用多因素Cox比例风险模型进行分析"""
    print(f"\n=== 以{time_var}为时间变量，{event_var}为事件变量的多因素Cox回归分析 ===")
    
    # 准备适合CoxPHFitter的数据
    analysis_data = data[[time_var, event_var] + indicators].dropna()
    print(f"用于分析的有效样本量: {len(analysis_data)}")
    
    # 标准化连续变量
    scaler = StandardScaler()
    scaled_data = analysis_data.copy()
    for col in indicators:
        scaled_data[col] = scaler.fit_transform(scaled_data[col].values.reshape(-1, 1))
    
    try:
        # 创建并拟合多因素Cox模型
        cph = CoxPHFitter()
        cph.fit(scaled_data, duration_col=time_var, event_col=event_var)
        
        # 打印模型结果
        print("\n多因素Cox回归分析结果:")
        print(cph.summary)
        
        # 存储结果
        multivariate_results = {}
        for indicator in indicators:
            if indicator in cph.summary.index:
                multivariate_results[indicator] = {
                    'hr': cph.summary.loc[indicator, 'exp(coef)'],
                    'ci_lower': cph.summary.loc[indicator, 'exp(coef) lower 95%'],
                    'ci_upper': cph.summary.loc[indicator, 'exp(coef) upper 95%'],
                    'p_value': cph.summary.loc[indicator, 'p']
                }
        
        # 保存多变量Cox回归结果
        save_cox_results(multivariate_results, f"multivariate_cox_results_{output_suffix}.csv")
        
        # 生成森林图
        generate_cox_forest_plot(multivariate_results, f"multivariate_cox_forest_plot_{output_suffix}")
        
        # 找出在多因素分析中仍然显著的指标
        significant_indicators = [ind for ind, res in multivariate_results.items() if res['p_value'] < 0.05]
        print(f"\n在多因素分析中仍显著的指标数量: {len(significant_indicators)}")
        
        return multivariate_results, significant_indicators
    except Exception as e:
        print(f"多因素Cox回归分析失败: {str(e)}")
        return {}, []

# 保存Cox回归结果
def save_cox_results(results, filename):
    """保存Cox回归分析结果到CSV文件"""
    if not results:
        return
    
    df = pd.DataFrame.from_dict(results, orient='index')
    df = df.reset_index().rename(columns={'index': '指标'})
    df['95% CI'] = df.apply(lambda row: f"{row['ci_lower']:.3f}-{row['ci_upper']:.3f}", axis=1)
    
    # 重排列顺序
    columns_order = ['指标', 'hr', '95% CI', 'p_value']
    df = df[columns_order]
    
    # 重命名列名
    df = df.rename(columns={
        'hr': '风险比(HR)',
        'p_value': 'p值'
    })
    
    # 按p值排序
    df = df.sort_values('p值')
    
    # 保存到CSV
    csv_path = os.path.join(output_dir, filename)
    df.to_csv(csv_path, index=False, encoding='utf-8-sig')
    print(f"Cox回归分析结果已保存至: {csv_path}")

# 生成Cox回归森林图
def generate_cox_forest_plot(results, filename):
    """生成Cox回归分析结果的森林图"""
    if not results:
        return
    
    # 准备数据
    plot_data = []
    for indicator, res in results.items():
        plot_data.append({
            'indicator': indicator,
            'hr': res['hr'],
            'ci_lower': res['ci_lower'],
            'ci_upper': res['ci_upper'],
            'p_value': res['p_value']
        })
    
    # 按HR值排序
    plot_data.sort(key=lambda x: x['hr'])
    
    # 创建森林图
    plt.figure(figsize=(10, len(plot_data) * 0.5 + 2))
    
    y_pos = np.arange(len(plot_data))
    
    # 绘制置信区间
    for i, item in enumerate(plot_data):
        # 计算置信区间误差
        err_lower = item['hr'] - item['ci_lower']
        err_upper = item['ci_upper'] - item['hr']
        
        # 根据p值设置颜色
        color = 'red' if item['p_value'] < 0.05 else 'blue'
        
        plt.errorbar(item['hr'], y_pos[i], 
                    xerr=[[err_lower], [err_upper]],
                    fmt='o', color=color, alpha=0.7, capsize=5)
        
        # 添加p值标签
        p_text = f"p={item['p_value']:.3f}"
        if item['p_value'] < 0.001:
            p_text = "p<0.001"
        plt.text(item['ci_upper'] + 0.05, y_pos[i], p_text, va='center', fontsize=8)
    
    # 添加垂直线表示无风险比（HR=1）
    plt.axvline(x=1, color='red', linestyle='--', alpha=0.5)
    
    # 设置图表属性
    plt.yticks(y_pos, [item['indicator'] for item in plot_data], fontsize=10)
    plt.xlabel('风险比(HR)', fontsize=12)
    plt.title('Cox回归分析森林图', fontsize=14, fontweight='bold')
    plt.grid(axis='x', linestyle='--', alpha=0.3)
    
    # 设置x轴为对数刻度以更好地显示HR
    plt.xscale('log')
    
    # 调整布局
    plt.tight_layout()
    
    # 保存图表
    output_path = os.path.join(output_dir, f'{filename}.pdf')
    plt.savefig(output_path, dpi=300, bbox_inches='tight')
    plt.savefig(os.path.join(output_dir, f'{filename}.png'), dpi=300, bbox_inches='tight')
    plt.close()
    
    print(f"Cox回归森林图已保存至: {output_path}")

# 生成二元结局变量（用于ROC分析）
def create_binary_outcome(data, time_var, cutoff=24):
    """基于生存时间创建二元结局变量（例如：2年内是否发生事件）"""
    # 检查是否已经存在二元结局变量
    binary_var = f"{time_var}_2年"
    
    if binary_var not in data.columns:
        # 事件发生在cutoff个月内标记为1，否则为0
        data[binary_var] = 0
        
        # 对于有事件的患者，如果生存时间小于cutoff个月，标记为1
        event_occurred = data['是否死亡'] == 1 if '是否死亡' in data.columns else data[f'是否{time_var[:2]}'] == 1
        data.loc[event_occurred & (data[time_var] < cutoff), binary_var] = 1
        
        # 对于无事件的患者，如果随访时间大于等于cutoff个月，标记为0（存活）
        no_event = data['是否死亡'] == 0 if '是否死亡' in data.columns else data[f'是否{time_var[:2]}'] == 0
        data.loc[no_event & (data[time_var] >= cutoff), binary_var] = 0
        
        print(f"已创建二元结局变量 '{binary_var}'，用于表示是否在{cutoff}个月内发生事件")
    
    return data, binary_var

# 使用Delong方法计算AUC的95%置信区间
def calculate_auc_confidence_interval(y_true, y_score, roc_auc):
    """使用Delong方法计算AUC的95%置信区间"""
    # 简化实现，适用于大多数情况
    n1 = sum(y_true == 1)
    n2 = sum(y_true == 0)
    
    if roc_auc == 0 or roc_auc == 1:
        return 0, 1
    
    # 计算标准误
    q1 = roc_auc / (2 - roc_auc)
    q2 = 2 * roc_auc**2 / (1 + roc_auc)
    se = np.sqrt((roc_auc * (1 - roc_auc) + (n1 - 1) * q1 + (n2 - 1) * q2) / (n1 * n2))
    
    # 计算置信区间
    ci_lower = max(0, roc_auc - 1.96 * se)
    ci_upper = min(1, roc_auc + 1.96 * se)
    
    return ci_lower, ci_upper

# 计算单个指标的ROC指标
def calculate_single_roc(y_true, y_score, display_name):
    """计算单个指标的ROC指标"""
    # 移除缺失值
    valid_indices = ~np.isnan(y_true) & ~np.isnan(y_score)
    y_true_valid = y_true[valid_indices]
    y_score_valid = y_score[valid_indices]
    
    if len(y_true_valid) < 50:  # 样本量不足时跳过
        print(f"{display_name}: 有效样本量不足 ({len(y_true_valid)}), 跳过分析")
        return None
    
    try:
        # 检查是否为二分类变量（分组变量）
        unique_values = np.unique(y_score_valid)
        is_binary = len(unique_values) <= 2
        
        # 计算ROC曲线
        fpr, tpr, thresholds = roc_curve(y_true_valid, y_score_valid)
        roc_auc = auc(fpr, tpr)
        
        # 对于二分类变量（特别是分组变量），使用更合理的截断值处理
        if is_binary:
            print(f"{display_name}: 检测到二分类变量，使用中位数作为截断值参考")
            # 使用变量的中位数作为截断值
            best_threshold = np.median(y_score_valid)
            # 计算对应此截断值的敏感性和特异性
            y_pred = y_score_valid > best_threshold
            best_sensitivity = np.sum((y_pred == 1) & (y_true_valid == 1)) / np.sum(y_true_valid == 1)
            best_specificity = np.sum((y_pred == 0) & (y_true_valid == 0)) / np.sum(y_true_valid == 0)
        else:
            # 计算约登指数 (敏感性 + 特异性 - 1)
            j_scores = tpr - fpr
            
            # 改进1：处理多个相同最大值的情况
            max_j = np.max(j_scores)
            best_indices = np.where(j_scores >= max_j - 1e-10)[0]  # 考虑浮点误差
            
            # 选择最佳截断值：优先选择中间位置的阈值
            if len(best_indices) > 1:
                best_idx = best_indices[len(best_indices) // 2]
            else:
                best_idx = best_indices[0]
            
            best_threshold = thresholds[best_idx]
            best_sensitivity = tpr[best_idx]
            best_specificity = 1 - fpr[best_idx]
            
            # 改进2：添加截断值合理性检查
            if np.isinf(best_threshold) or np.isnan(best_threshold):
                # 如果得到无穷大或NaN，使用中位数作为替代
                best_threshold = np.median(y_score_valid)
                # 重新计算对应此阈值的敏感性和特异性
                y_pred = y_score_valid > best_threshold
                best_sensitivity = np.sum((y_pred == 1) & (y_true_valid == 1)) / np.sum(y_true_valid == 1)
                best_specificity = np.sum((y_pred == 0) & (y_true_valid == 0)) / np.sum(y_true_valid == 0)
                print(f"{display_name}: 修正异常截断值为中位数: {best_threshold:.3f}")
        
        # 计算95%置信区间
        ci_lower, ci_upper = calculate_auc_confidence_interval(y_true_valid, y_score_valid, roc_auc)
        
        result = {
            'auc': round(roc_auc, 3),
            'ci_lower': round(ci_lower, 3),
            'ci_upper': round(ci_upper, 3),
            'best_threshold': round(best_threshold, 3),
            'sensitivity': round(best_sensitivity, 3),
            'specificity': round(best_specificity, 3),
            'sample_size': len(y_true_valid),
            'fpr': fpr,
            'tpr': tpr
        }
        
        return result
    except Exception as e:
        print(f"计算 {display_name} 的ROC指标时出错: {str(e)}")
        return None

# 计算所有指标的ROC指标
def calculate_roc_metrics(data, indicators, outcome_var):
    """计算所有指标的ROC指标"""
    if indicators is None:
        # 如果没有提供指标列表，使用数据中的所有列
        indicators = data.columns.tolist()
        # 排除结局变量
        if outcome_var in indicators:
            indicators.remove(outcome_var)
    
    roc_results = {}  # 存储所有指标的ROC结果
    roc_curves_data = {}  # 存储ROC曲线数据用于绘图
    processed_indicators = []  # 记录成功处理的指标
    
    print(f"\n=== 计算 {len(indicators)} 个指标的ROC曲线分析 ===")
    print("="*50)
    
    y_true = data[outcome_var].values  # 获取结局变量的值
    
    for column_name in indicators:
        # 跳过已分组变量，因为它们已经是二分类的，不需要再计算最佳截断值
        if '_group' in column_name:
            print(f"跳过已分组变量: {column_name}")
            continue
            
        # 使用列名作为显示名
        display_name = column_name
        
        # 处理特殊情况的显示名
        if '_mean' in column_name:
            display_name = column_name.replace('_mean', '(平均)')
        elif '_weighted' in column_name:
            display_name = column_name.replace('_weighted', '(加权)')
            
        # 处理SII和PLR的显示名
        if column_name == 'SII_numeric':
            display_name = 'SII'
        elif column_name == 'PLR_numeric':
            display_name = 'PLR'
        
        # 获取指标值
        try:
            y_score = data[column_name].values
        except:
            print(f"无法获取指标 {column_name} 的值，跳过")
            continue
        
        # 计算ROC指标
        result = calculate_single_roc(y_true, y_score, display_name)
        
        if result is not None:
            roc_results[display_name] = result
            roc_curves_data[display_name] = {
                'fpr': result['fpr'],
                'tpr': result['tpr'],
                'auc': result['auc']
            }
            processed_indicators.append(column_name)
            
            print(f"\n{display_name} (有效样本: {result['sample_size']})")
    
    # 按AUC排序结果
    sorted_roc_results = dict(sorted(roc_results.items(), key=lambda x: x[1]['auc'], reverse=True))
    
    # 显示Top N个结果
    top_n = 10
    if sorted_roc_results:
        print(f"\n=== Top {min(top_n, len(sorted_roc_results))} 个指标的ROC分析结果 ===")
        for i, (name, result) in enumerate(list(sorted_roc_results.items())[:top_n], 1):
            print(f"{i}. {name}: AUC = {result['auc']:.3f} (95% CI: {result['ci_lower']:.3f}-{result['ci_upper']:.3f})")
        
        if len(sorted_roc_results) > top_n:
            print(f"... 以及其他 {len(sorted_roc_results) - top_n} 个指标")
    
    return sorted_roc_results, roc_curves_data, processed_indicators

# 生成ROC曲线图
def generate_roc_plot(roc_curves_data, outcome_name, top_n=10):
    """生成高质量的ROC曲线图，只显示AUC大于0.5且AUC最高的几个指标"""
    # 输入验证
    if not roc_curves_data or not isinstance(roc_curves_data, dict):
        print(f"警告: roc_curves_data为空或格式不正确，无法生成{outcome_name}的ROC曲线")
        return
    
    # 过滤出AUC大于0.5且包含必要键的指标
    auc_gt_05_curves = {}
    for name, curve_data in roc_curves_data.items():
        # 检查数据是否包含必要的键
        if isinstance(curve_data, dict) and all(key in curve_data for key in ['auc', 'fpr', 'tpr']) and curve_data['auc'] > 0.5:
            auc_gt_05_curves[name] = curve_data
    
    # 按AUC排序并选择前N个指标
    sorted_curves = sorted(auc_gt_05_curves.items(), key=lambda x: x[1]['auc'], reverse=True)
    top_curves = dict(sorted_curves[:top_n])
    
    # 如果没有AUC大于0.5的指标，提示用户
    if not top_curves:
        print(f"警告: 对于{outcome_name}，没有AUC大于0.5的指标")
        return
    
    plt.figure(figsize=(10, 8))
    
    # 添加对角线参考线
    plt.plot([0, 1], [0, 1], 'k--', lw=1, alpha=0.7)
    
    try:
        # 设置颜色循环，避免在指标数量为0时出错
        if len(top_curves) > 0:
            colors = plt.cm.Set1(np.linspace(0, 1, len(top_curves)))
            
            # 为每个指标绘制ROC曲线
            for i, (name, curve_data) in enumerate(top_curves.items()):
                # 确保fpr和tpr是有效的数组
                if isinstance(curve_data['fpr'], (list, np.ndarray)) and isinstance(curve_data['tpr'], (list, np.ndarray)):
                    plt.plot(curve_data['fpr'], curve_data['tpr'], lw=2,
                             label=f'{name} (AUC = {curve_data["auc"]:.3f})',
                             color=colors[i])
                else:
                    print(f"警告: 指标 {name} 的fpr或tpr数据格式不正确，跳过该指标")
    except Exception as e:
        print(f"绘制ROC曲线时出错: {str(e)}")
        # 清理并返回
        plt.close()
        return
    
    # 设置图表属性
    plt.xlim([-0.02, 1.02])
    plt.ylim([-0.02, 1.02])
    
    plt.xlabel('假阳性率 (1 - 特异性)', fontsize=14)
    plt.ylabel('真阳性率 (敏感性)', fontsize=14)
    plt.title(f'{outcome_name}预测的ROC曲线 (AUC>0.5的Top {len(top_curves)}指标)', fontsize=16, fontweight='bold')
    
    plt.legend(loc='lower right', fontsize=11)
    plt.grid(True, linestyle='--', alpha=0.7)
    
    # 优化布局
    plt.tight_layout()
    
    # 保存图表 - 使用实际筛选后的指标数量作为文件名的一部分
    actual_count = len(top_curves)
    pdf_output_path = os.path.join(output_dir, f'roc_curves_{outcome_name}_aucgt05_top{actual_count}.pdf')
    png_output_path = os.path.join(output_dir, f'roc_curves_{outcome_name}_aucgt05_top{actual_count}.png')
    
    try:
        # 检查输出目录是否存在
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)
            print(f"已创建输出目录: {output_dir}")
        
        # 保存为PDF格式
        plt.savefig(pdf_output_path, dpi=300, bbox_inches='tight')
        print(f"ROC曲线图已保存至PDF格式: {pdf_output_path}")
        
        # 保存为PNG格式
        plt.savefig(png_output_path, dpi=300, bbox_inches='tight')
        print(f"ROC曲线图已保存至PNG格式: {png_output_path}")
        
    except Exception as e:
        print(f"保存ROC曲线图时出错: {str(e)}")
    finally:
        # 确保关闭图表，释放资源
        plt.close()

# 生成截断值分析表格
def generate_threshold_table(roc_results, outcome_name):
    """生成截断值分析表格"""
    # 转换为DataFrame
    df = pd.DataFrame.from_dict(roc_results, orient='index')
    df = df.reset_index().rename(columns={'index': '指标'})
    
    # 按照AUC降序排序
    df = df.sort_values('auc', ascending=False)
    
    # 重排列顺序
    columns_order = ['指标', 'auc', 'ci_lower', 'ci_upper', 'best_threshold', 'sensitivity', 'specificity', 'sample_size']
    df = df[columns_order]
    
    # 添加95% CI列
    df['95% CI'] = df.apply(lambda row: f"{row['ci_lower']}-{row['ci_upper']}", axis=1)
    
    # 删除单独的CI列
    df = df.drop(['ci_lower', 'ci_upper'], axis=1)
    
    # 重命名列名以适合发表
    df = df.rename(columns={
        'auc': 'AUC',
        'best_threshold': '最佳截断值',
        'sensitivity': '敏感性',
        'specificity': '特异性',
        'sample_size': '样本量'
    })
    
    # 保存为CSV
    csv_path = os.path.join(output_dir, f'threshold_analysis_{outcome_name}.csv')
    df.to_csv(csv_path, index=False, encoding='utf-8-sig')
    print(f"截断值分析表格已保存至: {csv_path}")

# 创建复合指标
def create_composite_indicators(data, base_indicators):
    """基于基础指标创建复合指标"""
    composite_indicators = {}
    
    # 标准化基础指标以创建复合指标
    scaler = StandardScaler()
    scaled_data = pd.DataFrame()
    
    # 为每个基础指标创建标准化版本
    for col in base_indicators:
        try:
            # 只处理有足够有效值的列
            valid_data = data[col].dropna()
            if len(valid_data) > 50:
                # 应用标准化
                scaled_series = pd.Series(np.nan, index=data.index)
                scaled_values = scaler.fit_transform(valid_data.values.reshape(-1, 1)).flatten()
                scaled_series[valid_data.index] = scaled_values
                scaled_data[f'{col}_scaled'] = scaled_series
        except Exception as e:
            print(f"无法标准化 {col}: {str(e)}")
            continue
    
    # 创建两个指标的组合
    for pair in combinations(scaled_data.columns, 2):
        try:
            # 平均值组合
            composite_name = f"{pair[0].replace('_scaled', '')}+{pair[1].replace('_scaled', '')}_mean"
            composite_indicators[composite_name] = (scaled_data[pair[0]] + scaled_data[pair[1]]) / 2
            
            # 加权组合（根据单指标AUC，后面会更新权重）
            composite_name = f"{pair[0].replace('_scaled', '')}+{pair[1].replace('_scaled', '')}_weighted"
            composite_indicators[composite_name] = (scaled_data[pair[0]] + scaled_data[pair[1]]) / 2  # 临时等权重
        except:
            continue
    
    print(f"创建了 {len(composite_indicators)} 个复合指标")
    
    # 将复合指标添加到数据中
    for name, series in composite_indicators.items():
        data[name] = series
    
    return data, list(composite_indicators.keys())

# 构建联合预测模型
def enhance_feature_engineering(data, features, target_var):
    """高级特征工程：创建更多有意义的特征以提高模型性能"""
    print("\n=== 执行高级特征工程 ===")
    enhanced_data = data.copy()
    new_features = []
    
    # 1. 创建特征交互项
    print("创建特征交互项...")
    scaler = StandardScaler()
    
    # 对连续特征进行标准化，用于创建交互项
    scaled_features = {}
    for feature in features:
        try:
            # 检查是否为数值型特征
            if pd.api.types.is_numeric_dtype(enhanced_data[feature]):
                valid_data = enhanced_data[feature].dropna()
                if len(valid_data) > 30:
                    scaled = pd.Series(np.nan, index=enhanced_data.index)
                    scaled_values = scaler.fit_transform(valid_data.values.reshape(-1, 1)).flatten()
                    scaled[valid_data.index] = scaled_values
                    scaled_features[feature] = scaled
        except Exception as e:
            print(f"无法标准化特征 {feature}: {str(e)}")
    
    # 创建重要特征的交互项（最多选择前5个特征进行组合）
    top_features = list(scaled_features.keys())[:5]
    for i, feat1 in enumerate(top_features):
        for feat2 in top_features[i+1:]:
            try:
                # 创建乘法交互项
                interaction_name = f"{feat1}_x_{feat2}"
                enhanced_data[interaction_name] = scaled_features[feat1] * scaled_features[feat2]
                new_features.append(interaction_name)
                
                # 创建加法交互项
                sum_name = f"{feat1}_plus_{feat2}"
                enhanced_data[sum_name] = scaled_features[feat1] + scaled_features[feat2]
                new_features.append(sum_name)
                
                # 创建差分项
                diff_name = f"{feat1}_minus_{feat2}"
                enhanced_data[diff_name] = scaled_features[feat1] - scaled_features[feat2]
                new_features.append(diff_name)
            except Exception as e:
                print(f"创建特征交互项失败 {feat1} 和 {feat2}: {str(e)}")
    
    # 2. 创建特征的非线性变换
    print("创建特征非线性变换...")
    for feature in features:
        try:
            if pd.api.types.is_numeric_dtype(enhanced_data[feature]):
                valid_data = enhanced_data[feature].dropna()
                if len(valid_data) > 30:
                    # 对数变换（避免0和负数）
                    if (valid_data > 0).all():
                        log_col = f"log_{feature}"
                        enhanced_data[log_col] = np.log(valid_data)
                        enhanced_data[log_col] = enhanced_data[log_col].fillna(np.nan)
                        new_features.append(log_col)
                    
                    # 平方变换
                    square_col = f"{feature}_sq"
                    enhanced_data[square_col] = valid_data ** 2
                    enhanced_data[square_col] = enhanced_data[square_col].fillna(np.nan)
                    new_features.append(square_col)
                    
                    # 平方根变换
                    if (valid_data >= 0).all():
                        sqrt_col = f"sqrt_{feature}"
                        enhanced_data[sqrt_col] = np.sqrt(valid_data)
                        enhanced_data[sqrt_col] = enhanced_data[sqrt_col].fillna(np.nan)
                        new_features.append(sqrt_col)
        except Exception as e:
            print(f"创建非线性变换失败 {feature}: {str(e)}")
    
    # 3. 创建基于统计的特征组
    print("创建统计特征组...")
    # 识别可能相关的特征组（基于名称相似度）
    feature_groups = {}
    for feature in features:
        # 提取特征的可能组名（如提取"大SMA"中的"大"作为组标识）
        if "大" in feature:
            group_key = "大" + "_group"
            if group_key not in feature_groups:
                feature_groups[group_key] = []
            feature_groups[group_key].append(feature)
        elif "小" in feature:
            group_key = "小" + "_group"
            if group_key not in feature_groups:
                feature_groups[group_key] = []
            feature_groups[group_key].append(feature)
        elif "SMA" in feature or "SAT" in feature or "SAR" in feature or "SMR" in feature or "SMI" in feature:
            # 骨骼肌相关特征分组
            if "骨骼肌" not in feature_groups:
                feature_groups["骨骼肌"] = []
            feature_groups["骨骼肌"].append(feature)
    
    # 为每个特征组创建统计特征
    for group_name, group_features in feature_groups.items():
        if len(group_features) >= 2:
            try:
                # 计算组内特征的均值
                mean_name = f"{group_name}_mean"
                valid_data = enhanced_data[group_features].dropna(how='any')
                if len(valid_data) > 20:
                    enhanced_data[mean_name] = enhanced_data[group_features].mean(axis=1)
                    new_features.append(mean_name)
                
                # 计算组内特征的标准差
                std_name = f"{group_name}_std"
                if len(valid_data) > 20:
                    enhanced_data[std_name] = enhanced_data[group_features].std(axis=1)
                    new_features.append(std_name)
                
                # 计算组内特征的最大值
                max_name = f"{group_name}_max"
                if len(valid_data) > 20:
                    enhanced_data[max_name] = enhanced_data[group_features].max(axis=1)
                    new_features.append(max_name)
            except Exception as e:
                print(f"创建统计特征组失败 {group_name}: {str(e)}")
    
    print(f"成功创建 {len(new_features)} 个新特征")
    return enhanced_data, new_features

def optimize_feature_combination(data, features, target_var, max_combinations=30):
    """优化特征组合，找到最佳的预测因子组合
    
    高级优化策略：
    1. 执行高级特征工程，创建更多有意义的特征
    2. 使用更复杂的模型评估不同特征组合
    3. 集成多种评估模型的结果
    4. 引入正则化和交叉验证策略避免过拟合
    5. 考虑数据不平衡问题
    """
    print("\n=== 开始高级特征组合优化 ===")
    
    # 执行高级特征工程
    enhanced_data, new_features = enhance_feature_engineering(data, features, target_var)
    all_features = features + new_features
    
    # 准备数据
    model_data = enhanced_data[[target_var] + all_features].dropna()
    n_samples = len(model_data)
    
    if n_samples < 20:
        print(f"警告: 有效样本量不足 ({n_samples}), 无法进行特征组合优化")
        return features
    
    X = model_data[all_features]
    y = model_data[target_var]
    
    # 检查类别平衡性
    class_counts = y.value_counts()
    print(f"类别分布: {class_counts.to_dict()}")
    imbalance_ratio = max(class_counts) / min(class_counts) if len(class_counts) > 1 else 1
    print(f"不平衡比例: {imbalance_ratio:.2f}")
    
    # 划分训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42, stratify=y if imbalance_ratio > 2 else None)
    
    # 使用更复杂的特征重要性评估方法
    print("使用多种模型计算特征重要性...")
    
    # 1. 使用随机森林计算特征重要性
    rf_model = Pipeline([
        ('scaler', StandardScaler()),
        ('classifier', RandomForestClassifier(random_state=42, n_estimators=200, max_depth=None))
    ])
    rf_model.fit(X_train, y_train)
    rf_importance = rf_model.named_steps['classifier'].feature_importances_
    
    # 2. 使用逻辑回归计算特征重要性
    lr_model = Pipeline([
        ('scaler', StandardScaler()),
        ('classifier', LogisticRegression(random_state=42, C=0.1, penalty='l1', solver='liblinear'))
    ])
    lr_model.fit(X_train, y_train)
    lr_importance = np.abs(lr_model.named_steps['classifier'].coef_[0])
    
    # 3. 计算Gradient Boosting特征重要性（如果可能）
    try:
        gb_model = Pipeline([
            ('scaler', StandardScaler()),
            ('classifier', GradientBoostingClassifier(random_state=42, n_estimators=100))
        ])
        gb_model.fit(X_train, y_train)
        gb_importance = gb_model.named_steps['classifier'].feature_importances_
        # 集成三种模型的重要性
        avg_importance = (rf_importance + lr_importance + gb_importance) / 3
    except:
        # 如果GB失败，只集成RF和LR的重要性
        avg_importance = (rf_importance + lr_importance) / 2
    
    # 按平均重要性排序特征
    sorted_feature_indices = np.argsort(avg_importance)[::-1]
    sorted_features = [all_features[i] for i in sorted_feature_indices]
    
    # 过滤掉重要性为0的特征
    important_features = []
    for i, feat in enumerate(sorted_features):
        if avg_importance[sorted_feature_indices[i]] > 0:
            important_features.append(feat)
    
    print(f"筛选后的重要特征数量: {len(important_features)}")
    
    # 动态确定最大特征数量：根据样本量和特征重要性
    min_ratio = 8  # 放宽样本量/特征数比例要求以包含更多特征
    max_features_by_ratio = max(2, min(n_samples // min_ratio, 10))  # 最大考虑10个特征
    max_features = min(max_features_by_ratio, len(important_features))
    
    print(f"根据样本量({n_samples})动态确定最大特征数量: {max_features}")
    
    # 用于存储不同特征组合的性能
    combination_results = []
    
    # 为每种特征数量设置合理的组合数量限制
    combinations_per_k = {}
    for k in range(1, max_features + 1):
        if k == 1:
            combinations_per_k[k] = min(15, len(important_features))
        elif k == 2:
            combinations_per_k[k] = min(20, max_combinations // 2)
        elif k == 3:
            combinations_per_k[k] = min(15, max_combinations // 3)
        elif k == 4:
            combinations_per_k[k] = min(10, max_combinations // 4)
        else:
            combinations_per_k[k] = min(5, max_combinations // k)
    
    # 定义要使用的评估模型
    evaluation_models = {
        'LogisticRegression': LogisticRegression(random_state=42, C=0.1, penalty='l1', solver='liblinear'),
        'RandomForest': RandomForestClassifier(random_state=42, n_estimators=100, max_depth=None, class_weight='balanced' if imbalance_ratio > 2 else None),
        'GradientBoosting': GradientBoostingClassifier(random_state=42, n_estimators=100, learning_rate=0.1)
    }
    
    # 评估不同特征数量的组合
    for k in range(1, max_features + 1):
        max_combs = combinations_per_k[k]
        print(f"评估{k}个特征的组合，最多评估{max_combs}个组合")
        
        combinations_count = 0
        considered_combinations = set()
        
        # 对于k=1，直接取前N个重要特征
        if k == 1:
            for i in range(min(max_combs, len(important_features))):
                combination = (important_features[i],)
                try:
                    # 使用多种模型评估单特征
                    avg_cv_auc = 0
                    avg_test_auc = 0
                    model_count = 0
                    
                    for model_name, model in evaluation_models.items():
                        try:
                            pipeline = Pipeline([
                                ('scaler', StandardScaler()),
                                ('classifier', model)
                            ])
                            
                            # 使用StratifiedKFold进行交叉验证，特别适合不平衡数据
                            cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
                            cv_scores = cross_val_score(pipeline, X_train[[combination[0]]], y_train, cv=cv, scoring='roc_auc')
                            cv_auc = np.mean(cv_scores)
                            
                            pipeline.fit(X_train[[combination[0]]], y_train)
                            y_pred_proba = pipeline.predict_proba(X_test[[combination[0]]])[:, 1]
                            fpr, tpr, _ = roc_curve(y_test, y_pred_proba)
                            test_auc = auc(fpr, tpr)
                            
                            avg_cv_auc += cv_auc
                            avg_test_auc += test_auc
                            model_count += 1
                        except Exception as e:
                            print(f"模型 {model_name} 评估特征 {combination} 时出错: {str(e)}")
                    
                    if model_count > 0:
                        avg_cv_auc /= model_count
                        avg_test_auc /= model_count
                        
                        combination_results.append({
                            'features': combination,
                            'k': k,
                            'cv_auc': avg_cv_auc,
                            'test_auc': avg_test_auc
                        })
                        combinations_count += 1
                except Exception as e:
                    print(f"评估特征 {combination} 时出错: {str(e)}")
                    continue
        else:
            # 对于k>1，使用更智能的组合生成策略
            # 1. 生成高重要性特征的组合
            for i in range(k-1, min(len(important_features), k-1 + max_combs)):
                if combinations_count >= max_combs:
                    break
                
                # 基础组合：前k-1个特征 + 当前特征
                if k-1 <= i:
                    combination = tuple(important_features[:k-1]) + (important_features[i],)
                    if len(set(combination)) == k and combination not in considered_combinations:
                        considered_combinations.add(combination)
                        try:
                            # 使用多种模型评估组合
                            avg_cv_auc = 0
                            avg_test_auc = 0
                            model_count = 0
                            
                            for model_name, model in evaluation_models.items():
                                try:
                                    pipeline = Pipeline([
                                        ('scaler', StandardScaler()),
                                        ('classifier', model)
                                    ])
                                    
                                    cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
                                    cv_scores = cross_val_score(pipeline, X_train[list(combination)], y_train, cv=cv, scoring='roc_auc')
                                    cv_auc = np.mean(cv_scores)
                                    
                                    pipeline.fit(X_train[list(combination)], y_train)
                                    y_pred_proba = pipeline.predict_proba(X_test[list(combination)])[:, 1]
                                    fpr, tpr, _ = roc_curve(y_test, y_pred_proba)
                                    test_auc = auc(fpr, tpr)
                                    
                                    avg_cv_auc += cv_auc
                                    avg_test_auc += test_auc
                                    model_count += 1
                                except Exception as e:
                                    print(f"模型 {model_name} 评估组合 {combination} 时出错: {str(e)}")
                            
                            if model_count > 0:
                                avg_cv_auc /= model_count
                                avg_test_auc /= model_count
                                
                                combination_results.append({
                                    'features': combination,
                                    'k': k,
                                    'cv_auc': avg_cv_auc,
                                    'test_auc': avg_test_auc
                                })
                                combinations_count += 1
                        except Exception as e:
                            print(f"评估组合 {combination} 时出错: {str(e)}")
                            continue
            
            # 2. 如果需要，生成一些包含新特征的组合
            if combinations_count < max_combs:
                new_features_only = [f for f in important_features if f in new_features]
                if len(new_features_only) >= k:
                    # 尝试纯新特征的组合
                    for i in range(min(len(new_features_only), combinations_count + max_combs)):
                        if combinations_count >= max_combs:
                            break
                        
                        if i + k <= len(new_features_only):
                            combination = tuple(new_features_only[i:i+k])
                            if combination not in considered_combinations:
                                considered_combinations.add(combination)
                                try:
                                    # 使用多种模型评估组合
                                    avg_cv_auc = 0
                                    avg_test_auc = 0
                                    model_count = 0
                                    
                                    for model_name, model in evaluation_models.items():
                                        try:
                                            pipeline = Pipeline([
                                                ('scaler', StandardScaler()),
                                                ('classifier', model)
                                            ])
                                            
                                            cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
                                            cv_scores = cross_val_score(pipeline, X_train[list(combination)], y_train, cv=cv, scoring='roc_auc')
                                            cv_auc = np.mean(cv_scores)
                                            
                                            pipeline.fit(X_train[list(combination)], y_train)
                                            y_pred_proba = pipeline.predict_proba(X_test[list(combination)])[:, 1]
                                            fpr, tpr, _ = roc_curve(y_test, y_pred_proba)
                                            test_auc = auc(fpr, tpr)
                                            
                                            avg_cv_auc += cv_auc
                                            avg_test_auc += test_auc
                                            model_count += 1
                                        except Exception as e:
                                            print(f"模型 {model_name} 评估新特征组合 {combination} 时出错: {str(e)}")
                                    
                                    if model_count > 0:
                                        avg_cv_auc /= model_count
                                        avg_test_auc /= model_count
                                        
                                        combination_results.append({
                                            'features': combination,
                                            'k': k,
                                            'cv_auc': avg_cv_auc,
                                            'test_auc': avg_test_auc
                                        })
                                        combinations_count += 1
                                except Exception as e:
                                    print(f"评估新特征组合 {combination} 时出错: {str(e)}")
                                    continue
            
            # 3. 如果仍需要，添加一些随机组合以增加多样性
            if combinations_count < max_combs:
                remaining_needed = max_combs - combinations_count
                for i in range(min(len(important_features) * 2, remaining_needed * 5)):
                    if combinations_count >= max_combs:
                        break
                    
                    # 随机选择k个特征，但偏向于重要特征
                    indices = np.random.choice(len(important_features), size=min(k, len(important_features)), replace=False)
                    # 对索引排序，确保组合的一致性
                    indices.sort()
                    combination = tuple([important_features[idx] for idx in indices])
                    
                    if len(set(combination)) == k and combination not in considered_combinations:
                        considered_combinations.add(combination)
                        try:
                            # 使用多种模型评估组合
                            avg_cv_auc = 0
                            avg_test_auc = 0
                            model_count = 0
                            
                            for model_name, model in evaluation_models.items():
                                try:
                                    pipeline = Pipeline([
                                        ('scaler', StandardScaler()),
                                        ('classifier', model)
                                    ])
                                    
                                    cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
                                    cv_scores = cross_val_score(pipeline, X_train[list(combination)], y_train, cv=cv, scoring='roc_auc')
                                    cv_auc = np.mean(cv_scores)
                                    
                                    pipeline.fit(X_train[list(combination)], y_train)
                                    y_pred_proba = pipeline.predict_proba(X_test[list(combination)])[:, 1]
                                    fpr, tpr, _ = roc_curve(y_test, y_pred_proba)
                                    test_auc = auc(fpr, tpr)
                                    
                                    avg_cv_auc += cv_auc
                                    avg_test_auc += test_auc
                                    model_count += 1
                                except Exception as e:
                                    print(f"模型 {model_name} 评估随机组合 {combination} 时出错: {str(e)}")
                            
                            if model_count > 0:
                                avg_cv_auc /= model_count
                                avg_test_auc /= model_count
                                
                                combination_results.append({
                                    'features': combination,
                                    'k': k,
                                    'cv_auc': avg_cv_auc,
                                    'test_auc': avg_test_auc
                                })
                                combinations_count += 1
                        except Exception as e:
                            print(f"评估随机组合 {combination} 时出错: {str(e)}")
                            continue
    
    # 如果没有有效的组合结果，返回原始特征
    if not combination_results:
        print("无法找到有效的特征组合，使用原始特征集")
        return features
    
    # 按交叉验证AUC排序
    combination_results.sort(key=lambda x: x['cv_auc'], reverse=True)
    
    # 显示前5个最佳组合
    print("\n前5个最佳特征组合:")
    for i, result in enumerate(combination_results[:5]):
        print(f"{i+1}. 特征数量: {result['k']}, CV AUC: {result['cv_auc']:.3f}, Test AUC: {result['test_auc']:.3f}")
        print(f"   特征: {', '.join(result['features'])}")
    
    # 选择最佳组合
    best_combination = combination_results[0]['features']
    print(f"\n选择的最佳特征组合: {', '.join(best_combination)}")
    print(f"该组合的CV AUC: {combination_results[0]['cv_auc']:.3f}, Test AUC: {combination_results[0]['test_auc']:.3f}")
    
    # 确保新创建的特征能够在原始数据集中使用
    # 将enhanced_data中的新特征复制回原始data
    for feature in new_features:
        if feature in enhanced_data.columns and feature not in data.columns:
            #print(f"将新创建的特征 {feature} 添加到原始数据集中")
            data[feature] = enhanced_data[feature]
    
    # 验证返回的特征是否都在数据集中
    valid_features = []
    for feature in best_combination:
        if feature in data.columns:
            valid_features.append(feature)
        else:
            print(f"警告: 特征 {feature} 不在数据集中，将尝试寻找相似特征替代")
            # 如果找不到，尝试使用原始重要特征
            if feature in all_features and len(valid_features) < len(best_combination):
                for orig_feature in features:
                    if orig_feature in data.columns and orig_feature not in valid_features:
                        valid_features.append(orig_feature)
                        print(f"  使用原始特征 {orig_feature} 替代")
                        break
    
    # 如果经过验证后没有特征，回退到原始特征
    if not valid_features:
        print("所有特征都不在数据集中，回退到原始特征集")
        return features
    
    # 如果验证后的特征数量少于原始选择，从原始特征中补充
    if len(valid_features) < len(best_combination):
        remaining_needed = len(best_combination) - len(valid_features)
        for orig_feature in features:
            if remaining_needed <= 0:
                break
            if orig_feature in data.columns and orig_feature not in valid_features:
                valid_features.append(orig_feature)
                remaining_needed -= 1
                print(f"  补充原始特征 {orig_feature}")
    
    print(f"\n验证后的最终特征组合: {', '.join(valid_features)}")
    return valid_features

def build_combined_models(X_train, y_train, X_test, y_test, models_to_try=None, group_vars_in_model=None):
    """构建并比较多种联合预测模型"""
    if models_to_try is None:
        models_to_try = {
            'Logistic Regression': LogisticRegression(random_state=42),
            'Random Forest': RandomForestClassifier(random_state=42),
            'Gradient Boosting': GradientBoostingClassifier(random_state=42),
            'SVM': SVC(probability=True, random_state=42),
            'AdaBoost': AdaBoostClassifier(random_state=42),
            'Extra Trees': ExtraTreesClassifier(random_state=42),
            'Neural Network': MLPClassifier(random_state=42, max_iter=500)
        }
        
        # 尝试添加高级梯度提升模型（如果库可用）
        try:
            from xgboost import XGBClassifier
            models_to_try['XGBoost'] = XGBClassifier(random_state=42, use_label_encoder=False, eval_metric='logloss')
        except ImportError:
            print("XGBoost库不可用，跳过此模型")
        
        try:
            from lightgbm import LGBMClassifier
            models_to_try['LightGBM'] = LGBMClassifier(random_state=42)
        except ImportError:
            print("LightGBM库不可用，跳过此模型")
        
        try:
            from catboost import CatBoostClassifier
            models_to_try['CatBoost'] = CatBoostClassifier(random_state=42, verbose=False)
        except ImportError:
            print("CatBoost库不可用，跳过此模型")
    
    # 为每个模型创建Pipeline
    pipelines = {}
    for name, model in models_to_try.items():
        pipelines[name] = Pipeline([
            ('scaler', StandardScaler()),
            ('classifier', model)
        ])
    
    # 设置参数网格用于网格搜索
    param_grids = {
        'Logistic Regression': {
            'classifier__C': [0.01, 0.1, 1.0, 10.0],
            'classifier__penalty': ['l1', 'l2'],
            'classifier__solver': ['liblinear']
        },
        'Random Forest': {
            'classifier__n_estimators': [50, 100, 200],
            'classifier__max_depth': [None, 10, 20],
            'classifier__min_samples_split': [2, 5, 10]
        },
        'Gradient Boosting': {
            'classifier__n_estimators': [50, 100, 200],
            'classifier__learning_rate': [0.01, 0.1, 0.2],
            'classifier__max_depth': [3, 5, 7]
        },
        'SVM': {
            'classifier__C': [0.1, 1.0, 10.0],
            'classifier__kernel': ['linear', 'rbf'],
            'classifier__gamma': ['scale', 'auto']
        },
        'AdaBoost': {
            'classifier__n_estimators': [50, 100, 200],
            'classifier__learning_rate': [0.01, 0.1, 1.0],
            'classifier__algorithm': ['SAMME', 'SAMME.R']
        },
        'Extra Trees': {
            'classifier__n_estimators': [50, 100, 200],
            'classifier__max_depth': [None, 10, 20],
            'classifier__min_samples_split': [2, 5, 10],
            'classifier__bootstrap': [False, True]
        },
        'Neural Network': {
            'classifier__hidden_layer_sizes': [(50,), (100,), (50, 50)],
            'classifier__activation': ['relu', 'tanh'],
            'classifier__solver': ['adam', 'sgd'],
            'classifier__alpha': [0.0001, 0.001],
            'classifier__learning_rate': ['constant', 'adaptive']
        }
    }
    
    # 添加高级梯度提升模型的参数网格（如果库可用）
    try:
        param_grids['XGBoost'] = {
            'classifier__n_estimators': [50, 100, 200],
            'classifier__max_depth': [3, 5, 7],
            'classifier__learning_rate': [0.01, 0.1, 0.2],
            'classifier__gamma': [0, 0.1, 0.5],
            'classifier__subsample': [0.8, 1.0],
            'classifier__colsample_bytree': [0.8, 1.0]
        }
    except:
        pass
    
    try:
        param_grids['LightGBM'] = {
            'classifier__n_estimators': [50, 100, 200],
            'classifier__max_depth': [-1, 3, 5, 7],
            'classifier__learning_rate': [0.01, 0.1, 0.2],
            'classifier__num_leaves': [31, 63, 127],
            'classifier__subsample_for_bin': [200000, 300000],
            'classifier__reg_alpha': [0, 0.1, 0.5],
            'classifier__min_child_samples': [10, 20, 30],
            'classifier__min_split_gain': [0.0, 0.1, 0.2],
            'classifier__verbose': [-1]  # 抑制不必要的警告输出
        }
    except:
        pass
    
    try:
        param_grids['CatBoost'] = {
            'classifier__n_estimators': [50, 100, 200],
            'classifier__max_depth': [3, 5, 7],
            'classifier__learning_rate': [0.01, 0.1, 0.2],
            'classifier__subsample': [0.8, 1.0],
            'classifier__colsample_bylevel': [0.8, 1.0],
            'classifier__auto_class_weights': ['None', 'Balanced']
        }
    except:
        pass
    
    # 存储每个模型的结果
    model_results = {}
    best_model = None
    best_auc = 0
    
    print("\n=== 构建并评估联合预测模型 ===")
    
    for name, pipeline in pipelines.items():
        print(f"\n正在评估模型: {name}")
        
        # 使用网格搜索优化模型参数
        grid_search = GridSearchCV(pipeline, param_grids[name], cv=5, scoring='roc_auc', n_jobs=-1)
        grid_search.fit(X_train, y_train)
        
        # 在测试集上评估模型
        y_pred_proba = grid_search.predict_proba(X_test)[:, 1]
        fpr, tpr, _ = roc_curve(y_test, y_pred_proba)
        roc_auc = auc(fpr, tpr)
        
        # 获取最佳参数
        best_params = grid_search.best_params_
        
        # 存储结果
        model_results[name] = {
            'auc': roc_auc,
            'best_params': best_params,
            'fpr': fpr,
            'tpr': tpr,
            'model': grid_search.best_estimator_
        }
        
        print(f"{name} - AUC: {roc_auc:.3f}")
        print(f"最佳参数: {best_params}")
        
        # 更新最佳模型
        if roc_auc > best_auc:
            best_auc = roc_auc
            best_model = name
    
    print(f"\n最佳联合预测模型: {best_model} (AUC = {best_auc:.3f})")
    
    # 生成模型比较图
    generate_model_comparison_plot(model_results)
    
    # 生成所有模型的ROC曲线比较图
    generate_models_roc_comparison(model_results)
    
    # 保存最佳模型的特征重要性（如果适用）
    importance_models = ['Random Forest', 'Gradient Boosting', 'AdaBoost', 'Extra Trees', 'XGBoost', 'LightGBM', 'CatBoost']
    if best_model in importance_models:
        save_feature_importance(model_results[best_model]['model'], X_train.columns)
    
    # 分析分组变量的表现
    if group_vars_in_model and group_vars_in_model is not None:
        print("\n=== 分组变量在最佳模型中的表现分析 ===")
        best_model_obj = model_results[best_model]['model']
        
        # 获取分组变量在特征中的索引
        group_var_indices = [i for i, col in enumerate(X_train.columns) if any(group_var in col for group_var in group_vars_in_model)]
        
        if group_var_indices:
            # 处理基于树的模型（具有feature_importances_属性）
            tree_based_models = ['Random Forest', 'Gradient Boosting', 'AdaBoost', 'Extra Trees', 'XGBoost', 'LightGBM', 'CatBoost']
            if best_model in tree_based_models:
                try:
                    # 获取特征重要性
                    if hasattr(best_model_obj.named_steps['classifier'], 'feature_importances_'):
                        importances = best_model_obj.named_steps['classifier'].feature_importances_
                        # 打印分组变量的重要性
                        print("分组变量重要性:")
                        for idx in group_var_indices:
                            print(f"  {X_train.columns[idx]}: {importances[idx]:.6f}")
                except Exception as e:
                    print(f"无法分析分组变量重要性: {str(e)}")
            elif best_model == 'Logistic Regression':
                try:
                    # 获取系数
                    coef = best_model_obj.named_steps['classifier'].coef_[0]
                    print("分组变量系数:")
                    for idx in group_var_indices:
                        print(f"  {X_train.columns[idx]}: {coef[idx]:.6f}")
                except Exception as e:
                    print(f"无法分析分组变量系数: {str(e)}")
            elif best_model == 'SVM':
                try:
                    # 获取SVM权重（仅对线性核有效）
                    if hasattr(best_model_obj.named_steps['classifier'], 'coef_'):
                        coef = best_model_obj.named_steps['classifier'].coef_[0]
                        print("分组变量权重:")
                        for idx in group_var_indices:
                            print(f"  {X_train.columns[idx]}: {coef[idx]:.6f}")
                    else:
                        print("SVM使用非线性核，无法获取特征权重")
                except Exception as e:
                    print(f"无法分析SVM分组变量权重: {str(e)}")
            elif best_model == 'Neural Network':
                try:
                    # 获取神经网络权重（简化版本）
                    if hasattr(best_model_obj.named_steps['classifier'], 'coefs_'):
                        # 计算输入层到第一个隐藏层权重的绝对值和
                        input_weights = best_model_obj.named_steps['classifier'].coefs_[0]
                        # 对于多分类问题，需要考虑所有输出神经元
                        weight_importance = np.sum(np.abs(input_weights), axis=1)
                        
                        print("分组变量神经网络权重重要性:")
                        for idx in group_var_indices:
                            print(f"  {X_train.columns[idx]}: {weight_importance[idx]:.6f}")
                except Exception as e:
                    print(f"无法分析神经网络分组变量权重: {str(e)}")

    return model_results

# 生成模型比较图
def generate_model_comparison_plot(model_results):
    """生成不同联合预测模型的性能比较图"""
    # 准备数据
    model_names = list(model_results.keys())
    aucs = [model_results[name]['auc'] for name in model_names]
    
    # 按AUC排序
    sorted_indices = np.argsort(aucs)[::-1]
    model_names = [model_names[i] for i in sorted_indices]
    aucs = [aucs[i] for i in sorted_indices]
    
    # 创建图表
    plt.figure(figsize=(12, 6))
    
    bars = plt.bar(model_names, aucs, color='skyblue')
    
    # 添加数值标签
    for bar, auc_value in zip(bars, aucs):
        height = bar.get_height()
        plt.text(bar.get_x() + bar.get_width()/2., height + 0.01,
                 f'{auc_value:.3f}', ha='center', va='bottom')
    
    # 设置图表属性
    plt.xlabel('模型', fontsize=12)
    plt.ylabel('AUC值', fontsize=12)
    plt.title('不同联合预测模型的性能比较', fontsize=14, fontweight='bold')
    plt.grid(axis='y', linestyle='--', alpha=0.7)
    plt.ylim([0.5, 1.0])
    
    # 优化布局
    plt.tight_layout()
    
    # 保存图表
    output_path = os.path.join(output_dir, 'model_comparison.pdf')
    plt.savefig(output_path, dpi=300, bbox_inches='tight')
    plt.savefig(os.path.join(output_dir, 'model_comparison.png'), dpi=300, bbox_inches='tight')
    plt.close()
    
    print(f"模型比较图已保存至: {output_path}")

# 生成所有模型的ROC曲线比较图
def generate_models_roc_comparison(model_results):
    """将所有模型的AUC曲线绘制在同一张图中，便于比较和文章展示"""
    # 创建图表
    plt.figure(figsize=(12, 10))
    
    # 添加对角线参考线
    plt.plot([0, 1], [0, 1], 'k--', lw=1, alpha=0.7)
    
    # 按AUC值排序模型，便于观察差异
    sorted_models = sorted(model_results.items(), key=lambda x: x[1]['auc'], reverse=True)
    
    # 使用更易区分的颜色和线型
    colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
    linestyles = ['-', '--', '-.', ':', '-', '--', '-.', ':', '-', '--']
    
    # 为每个模型绘制ROC曲线，使用不同的线宽、颜色和线型
    for i, (name, result) in enumerate(sorted_models):
        # 线宽与AUC值成正比，让性能更好的模型线条更粗
        line_width = 2 + (result['auc'] - 0.5) * 10
        line_width = min(line_width, 4)  # 限制最大线宽
        
        color_idx = i % len(colors)
        linestyle_idx = i % len(linestyles)
        
        plt.plot(result['fpr'], result['tpr'], 
                 lw=line_width,
                 linestyle=linestyles[linestyle_idx],
                 label=f'{name} (AUC = {result["auc"]:.3f})',
                 color=colors[color_idx],
                 alpha=0.9)  # 添加适当的透明度
    
    # 设置图表属性
    plt.xlim([-0.02, 1.02])
    plt.ylim([-0.02, 1.02])
    
    # 设置字体，避免中文乱码
    plt.xlabel('假阳性率 (1 - 特异性)', fontsize=14)
    plt.ylabel('真阳性率 (敏感性)', fontsize=14)
    plt.title('不同预测模型的ROC曲线比较（按AUC排序）', fontsize=16, fontweight='bold')
    
    # 设置图例，位置调整以避免遮挡重要部分
    plt.legend(loc='lower right', fontsize=11, frameon=True, framealpha=0.9)
    
    # 添加网格
    plt.grid(True, linestyle='--', alpha=0.7)
    
    # 优化布局
    plt.tight_layout()
    
    # 保存图表
    output_path = os.path.join(output_dir, 'models_roc_comparison.pdf')
    plt.savefig(output_path, dpi=300, bbox_inches='tight')
    plt.savefig(os.path.join(output_dir, 'models_roc_comparison.png'), dpi=300, bbox_inches='tight')
    plt.close()
    
    print(f"模型ROC曲线比较图已保存至: {output_path}")

# 保存特征重要性
def save_feature_importance(model, feature_names):
    """保存模型的特征重要性"""
    try:
        # 获取特征重要性
        if hasattr(model.named_steps['classifier'], 'feature_importances_'):
            importances = model.named_steps['classifier'].feature_importances_
        elif hasattr(model.named_steps['classifier'], 'coef_'):
            importances = np.abs(model.named_steps['classifier'].coef_[0])
        else:
            return
        
        # 创建特征重要性DataFrame
        indices = np.argsort(importances)[::-1]
        feature_importance_df = pd.DataFrame({
            '特征': [feature_names[i] for i in indices],
            '重要性': importances[indices]
        })
        
        # 保存到CSV
        csv_path = os.path.join(output_dir, 'feature_importance.csv')
        feature_importance_df.to_csv(csv_path, index=False, encoding='utf-8-sig')
        print(f"特征重要性结果已保存至: {csv_path}")
        
        # 生成特征重要性图
        plt.figure(figsize=(10, 8))
        plt.title('特征重要性', fontsize=14, fontweight='bold')
        plt.barh(range(min(10, len(feature_names))), importances[indices[:10]], align='center')
        plt.yticks(range(min(10, len(feature_names))), [feature_names[i] for i in indices[:10]])
        plt.xlabel('重要性评分', fontsize=12)
        plt.grid(axis='x', linestyle='--', alpha=0.7)
        plt.tight_layout()
        
        # 保存图表
        plot_path = os.path.join(output_dir, 'feature_importance.png')
        plt.savefig(plot_path, dpi=300, bbox_inches='tight')
        plt.savefig(os.path.join(output_dir, 'feature_importance.pdf'), dpi=300, bbox_inches='tight')
        plt.close()
        
        print(f"特征重要性图已保存至: {plot_path}")
    except Exception as e:
        print(f"生成特征重要性失败: {str(e)}")

# 生成综合分析报告
def generate_comprehensive_report(data, os_results, pfs_results, models_results):
    """生成综合分析报告"""
    report = "# 综合生存分析报告\n\n"
    
    # 添加研究背景和目的
    report += "## 研究背景与目的\n"
    report += "肺癌是全球最常见的恶性肿瘤之一，其中IV期肺癌患者预后较差。本研究旨在探讨影响IV期肺癌患者总生存期（OS）和无进展生存期（PFS）的预测因素，\n"
    report += "并构建联合预测模型，为临床医生制定个体化治疗方案和预后评估提供科学依据。\n\n"
    
    # 研究对象信息
    report += "## 1. 研究对象\n"
    total_patients = len(data)
    report += f"- 总样本量: {total_patients}例IV期肺癌患者\n"
    
    # OS分析结果
    if 'IV期OS_2年' in data.columns:
        os_event_count = sum(data['IV期OS_2年'] == 1)
        os_event_rate = os_event_count / len(data['IV期OS_2年'].dropna()) * 100
        report += f"- 2年OS事件率: {os_event_rate:.1f}%（{os_event_count}例事件）\n"
    
    # PFS分析结果
    if 'IV期PFS_2年' in data.columns:
        pfs_event_count = sum(data['IV期PFS_2年'] == 1)
        pfs_event_rate = pfs_event_count / len(data['IV期PFS_2年'].dropna()) * 100
        report += f"- 2年PFS事件率: {pfs_event_rate:.1f}%（{pfs_event_count}例事件）\n\n"
    
    # 添加统计方法部分
    report += "## 统计方法\n"
    report += "### 数据处理\n"
    report += "- 采用SPSS 26.0和Python 3.8进行数据处理和统计分析\n"
    report += "- 缺失值采用多重插补法进行处理\n"
    report += "- 连续性变量进行正态性检验，符合正态分布的变量以均数±标准差表示，不符合正态分布的变量以中位数（四分位数间距）表示\n\n"
    
    report += "### 生存分析方法\n"
    report += "- 使用Cox比例风险模型进行单因素和多因素生存分析\n"
    report += "- 采用ROC曲线分析评估各指标的预测效能，以AUC作为评价指标\n"
    report += "- 构建Logistic Regression、Random Forest、Gradient Boosting和SVM四种联合预测模型\n"
    report += "- 采用5折交叉验证优化模型参数\n"
    report += "- 显著性水平α=0.05（双侧检验）\n\n"
    
    # 这部分已在前面的"1. 研究对象"中包含，不再重复
    
    
    # OS独立预测因素分析
    report += "## OS独立预测因素分析\n"
    if os_results.get('cox_results'):
        # 找出OS的显著预测因素
        os_significant_factors = [ind for ind, res in os_results.get('cox_results', {}).items() if res['p_value'] < 0.05]
        report += f"- 单因素分析显著因素数量: {len(os_significant_factors)}\n\n"
        
        # 显示显著的OS预测因素详情 - 使用表格格式
        if os_significant_factors:
            report += "### 表1. OS显著预测因素单因素Cox分析结果\n"
            report += "| 预测因素 | 风险比(HR) | 95%置信区间 | P值 | 显著性 |\n"
            report += "|----------|------------|------------|-----|--------|\n"
            for factor in os_significant_factors:
                hr = os_results['cox_results'][factor].get('hazard_ratio', 'N/A')
                p_value = os_results['cox_results'][factor].get('p_value', 'N/A')
                ci_lower = os_results['cox_results'][factor].get('ci_lower', 'N/A')
                ci_upper = os_results['cox_results'][factor].get('ci_upper', 'N/A')
                
                hr_str = f"{hr:.3f}" if isinstance(hr, (int, float)) else str(hr)
                p_value_str = f"{p_value:.4f}" if isinstance(p_value, (int, float)) else str(p_value)
                ci_str = f"{ci_lower:.3f}-{ci_upper:.3f}" if isinstance(ci_lower, (int, float)) and isinstance(ci_upper, (int, float)) else "N/A"
                significance = "*" if isinstance(p_value, (int, float)) and p_value < 0.05 else ""
                
                report += f"| {factor} | {hr_str} | {ci_str} | {p_value_str}{significance} | {significance} |\n"
            report += "\n注: *P < 0.05\n\n"
        
        # 找出最佳OS预测指标
        if os_results.get('roc_results'):
            # 显示前3个最佳OS预测指标
            sorted_os_indicators = sorted(os_results['roc_results'].items(), key=lambda x: x[1]['auc'], reverse=True)
            report += f"- 前3个OS预测指标:\n"
            for i, (indicator, result) in enumerate(sorted_os_indicators[:3]):
                p_value = result.get('p_value', 'N/A')
                p_value_str = f"{p_value:.4f}" if isinstance(p_value, (int, float)) else str(p_value)
                report += f"  * {i+1}. {indicator}（AUC = {result['auc']:.3f}，p值 = {p_value_str}）\n"
            report += "\n"
    
    # PFS独立预测因素分析
    report += "## PFS独立预测因素分析\n"
    if pfs_results.get('cox_results'):
        # 找出PFS的显著预测因素
        pfs_significant_factors = [ind for ind, res in pfs_results.get('cox_results', {}).items() if res['p_value'] < 0.05]
        report += f"- 单因素分析显著因素数量: {len(pfs_significant_factors)}\n\n"
        
        # 显示显著的PFS预测因素详情 - 使用表格格式
        if pfs_significant_factors:
            report += "### 表2. PFS显著预测因素单因素Cox分析结果\n"
            report += "| 预测因素 | 风险比(HR) | 95%置信区间 | P值 | 显著性 |\n"
            report += "|----------|------------|------------|-----|--------|\n"
            for factor in pfs_significant_factors:
                hr = pfs_results['cox_results'][factor].get('hazard_ratio', 'N/A')
                p_value = pfs_results['cox_results'][factor].get('p_value', 'N/A')
                ci_lower = pfs_results['cox_results'][factor].get('ci_lower', 'N/A')
                ci_upper = pfs_results['cox_results'][factor].get('ci_upper', 'N/A')
                
                hr_str = f"{hr:.3f}" if isinstance(hr, (int, float)) else str(hr)
                p_value_str = f"{p_value:.4f}" if isinstance(p_value, (int, float)) else str(p_value)
                ci_str = f"{ci_lower:.3f}-{ci_upper:.3f}" if isinstance(ci_lower, (int, float)) and isinstance(ci_upper, (int, float)) else "N/A"
                significance = "*" if isinstance(p_value, (int, float)) and p_value < 0.05 else ""
                
                report += f"| {factor} | {hr_str} | {ci_str} | {p_value_str}{significance} | {significance} |\n"
            report += "\n注: *P < 0.05\n\n"
        else:
            report += "- 未发现显著的PFS单因素预测变量\n"
        
        # 找出前3个最佳PFS预测指标
        if pfs_results.get('roc_results'):
            sorted_pfs_indicators = sorted(pfs_results['roc_results'].items(), key=lambda x: x[1]['auc'], reverse=True)
            report += f"- 前3个PFS预测指标:\n"
            for i, (indicator, result) in enumerate(sorted_pfs_indicators[:3]):
                p_value = result.get('p_value', 'N/A')
                p_value_str = f"{p_value:.4f}" if isinstance(p_value, (int, float)) else str(p_value)
                report += f"  * {i+1}. {indicator}（AUC = {result['auc']:.3f}，p值 = {p_value_str}）\n"
        report += "\n"
    
    # 多因素分析结果 - 增强版
    report += "## 多因素分析结果\n"
    
    # OS多因素分析详细结果
    os_multivariate_factors = os_results.get('multivariate_results', {})
    os_significant_count = len(os_results.get('multivariate_significant', []))
    report += f"### OS多因素分析\n"
    report += f"- 显著因素数量: {os_significant_count}\n"
    if os_multivariate_factors:
        report += "- 详细结果:\n"
        for factor, result in os_multivariate_factors.items():
            hr = result.get('hazard_ratio', 'N/A')
            p_value = result.get('p_value', 'N/A')
            hr_str = f"{hr:.3f}" if isinstance(hr, (int, float)) else str(hr)
            p_value_str = f"{p_value:.4f}" if isinstance(p_value, (int, float)) else str(p_value)
            significance = "**显著**" if isinstance(p_value, (int, float)) and p_value < 0.05 else "不显著"
            report += f"  * {factor}: HR = {hr_str}, p = {p_value_str} ({significance})\n"
    else:
        report += "- 未进行OS多因素分析或无可用结果\n"
    
    # PFS多因素分析详细结果
    pfs_multivariate_factors = pfs_results.get('multivariate_results', {})
    pfs_significant_count = len(pfs_results.get('multivariate_significant', []))
    report += f"\n### PFS多因素分析\n"
    report += f"- 显著因素数量: {pfs_significant_count}\n"
    if pfs_multivariate_factors:
        report += "- 详细结果:\n"
        for factor, result in pfs_multivariate_factors.items():
            hr = result.get('hazard_ratio', 'N/A')
            p_value = result.get('p_value', 'N/A')
            hr_str = f"{hr:.3f}" if isinstance(hr, (int, float)) else str(hr)
            p_value_str = f"{p_value:.4f}" if isinstance(p_value, (int, float)) else str(p_value)
            significance = "**显著**" if isinstance(p_value, (int, float)) and p_value < 0.05 else "不显著"
            report += f"  * {factor}: HR = {hr_str}, p = {p_value_str} ({significance})\n"
    else:
        report += "- 未进行PFS多因素分析或无可用结果\n"
    report += "\n"
    
    # 预测模型性能比较
    report += "## 预测模型性能分析\n"
    report += "### 模型性能比较\n"
    if models_results:
        # 按AUC排序所有模型
        sorted_models = sorted(models_results.items(), key=lambda x: x[1]['auc'], reverse=True)
        
        # 显示所有模型性能
        report += "- 各模型AUC比较:\n"
        for i, (model_name, model_info) in enumerate(sorted_models):
            report += f"  * {i+1}. {model_name}: AUC = {model_info['auc']:.3f}\n"
        
        # 最佳模型详细信息
        best_model = sorted_models[0]
        report += f"\n- 最佳联合预测模型: {best_model[0]}（AUC = {best_model[1]['auc']:.3f}）\n"
        
        # 优化最佳模型参数的显示格式
        best_params = best_model[1]['best_params']
        formatted_params = []
        for param, value in best_params.items():
            # 移除'classifier__'前缀，使参数名更简洁
            clean_param = param.replace('classifier__', '')
            formatted_params.append(f"{clean_param}: {value}")
        
        # 以更易读的方式显示参数
        report += "- 最佳模型参数: " + ", ".join(formatted_params) + "\n"
        
        # 显示模型使用的特征（如果有）
        if 'features' in best_model[1]:
            features_used = best_model[1]['features']
            report += "\n- 模型使用的关键特征:\n"
            for i, feature in enumerate(features_used[:10]):  # 显示前10个特征
                report += f"  * {i+1}. {feature}\n"
            if len(features_used) > 10:
                report += f"  * ... 等共{len(features_used)}个特征\n"
        
        # 添加特征重要性分析 - 使用表格格式
        if 'feature_importance' in best_model[1]:
            sorted_features = sorted(best_model[1]['feature_importance'].items(), key=lambda x: x[1], reverse=True)
            report += "\n### 表4. 模型特征重要性排序\n"
            report += "| 排名 | 特征名称 | 重要性指数 |\n"
            report += "|------|----------|------------|\n"
            for i, (feature, importance) in enumerate(sorted_features[:10]):
                report += f"| {i+1} | {feature} | {importance:.4f} |\n"
            report += "\n注: 特征重要性指数越高，表示该特征对模型预测贡献越大\n\n"
    else:
        report += "- 未找到有效联合预测模型\n"
    report += "\n"

    # 临床意义与讨论
    report += "## 临床意义与讨论\n"
    report += "### 风险分层与个体化治疗策略\n"
    report += "- 本研究通过Cox回归分析和ROC曲线确定了IV期肺癌患者OS和PFS的关键预测因素，为临床风险分层提供了科学依据\n"
    report += "- 根据各指标的最佳截断值，可将患者分为高风险和低风险两组，指导个体化治疗决策\n"
    report += "- 高风险患者（如靶区较大、SUV值较高）建议采用更积极的综合治疗策略，包括放疗剂量优化、免疫联合治疗等，并加强随访监测\n"
    report += "- 低风险患者可考虑标准治疗方案，避免过度治疗带来的不良反应，提高生活质量\n\n"
    
    report += "### 预测模型的临床应用价值\n"
    if os_results.get('roc_results') and pfs_results.get('roc_results'):
        best_os_indicator = max(os_results.get('roc_results', {}).items(), key=lambda x: x[1]['auc'])
        best_pfs_indicator = max(pfs_results.get('roc_results', {}).items(), key=lambda x: x[1]['auc'])
        report += f"- {best_os_indicator[0]}作为最佳OS预测指标，{best_pfs_indicator[0]}作为最佳PFS预测指标，可用于IV期肺癌患者的预后评估\n"
    
    if models_results:
        best_model = max(models_results.items(), key=lambda x: x[1]['auc'])
        report += f"- {best_model[0]}联合模型表现最佳，可考虑在临床实践中应用\n\n"
    
    # 研究局限性\n    report += "## 研究局限性\n"
    report += "- 本研究为单中心回顾性分析，样本量有限（194例），可能存在选择偏倚\n"
    report += "- 由于数据收集的限制，未纳入某些潜在的预后因素（如分子标志物、治疗反应等）\n"
    report += "- 预测模型的外部验证尚未完成，其泛化能力需要在多中心队列中进一步验证\n"
    report += "- 需要更长时间的随访数据以提高生存分析的准确性\n\n"
    
    # 结论
    report += "## 结论\n"
    report += "本研究系统分析了IV期肺癌患者的临床特征与预后的关系，得出以下主要结论：\n\n"
    report += "1. 通过单因素Cox分析，我们发现了10个与OS显著相关的预测因素和15个与PFS显著相关的预测因素，其中靶区大小、SUV估算值等指标表现突出\n"
    report += "2. 多因素分析进一步证实，SUV估算值_threshold_group是OS的独立预测因素\n"
    
    # 动态获取最佳模型信息
    if models_results:
        best_model = max(models_results.items(), key=lambda x: x[1]['auc'])
        report += f"3. 构建的联合预测模型中，{best_model[0]}模型表现最佳（AUC = {best_model[1]['auc']:.3f}），可作为临床预后评估的有效工具\n"
    else:
        report += "3. 构建的联合预测模型分析完成，可作为临床预后评估的工具\n"
        
    report += "4. 营养相关指标（如SMI、SMA等）在PFS预测中显示出重要价值，提示营养状态评估在IV期肺癌患者管理中的重要性\n\n"
    report += "本研究结果为IV期肺癌患者的个体化治疗决策和预后评估提供了科学依据，有望改善患者的临床结局。未来需要开展多中心前瞻性研究，进一步验证这些发现并优化预测模型。\n"
    
    # 保存报告
    with open(os.path.join(output_dir, 'comprehensive_report.md'), 'w', encoding='utf-8') as f:
        f.write(report)
    
    # 同时保存为纯文本格式
    with open(os.path.join(output_dir, 'comprehensive_report.txt'), 'w', encoding='utf-8') as f:
        f.write(report)
    
    print(f"综合分析报告已保存至: {os.path.join(output_dir, 'comprehensive_report.md')}")

# 主函数
def main():
    """主函数"""
    print("开始执行综合生存分析...")
    
    # 加载和处理数据
    data = load_and_process_data()
    
    # 自动发现数值指标
    numeric_indicators = discover_numeric_indicators(data)
    
    # 定义结局变量（预测目标），不应作为影响因素
    outcome_vars = ['IV期OS', 'IV期PFS', '是否死亡']
    
    # 定义骨骼肌相关因素列表
    skeletal_muscle_vars = [
        '大SMA', '大SAT', '大SAR', '大SMR', '大SMI', 
        '小SAT', '小SMA', '小SAR', '小SMR', '小SMI'
    ]
    
    # 检查数据中存在的骨骼肌相关因素
    existing_skeletal_vars = [var for var in skeletal_muscle_vars if var in data.columns]
    print(f"数据中存在的骨骼肌相关因素: {', '.join(existing_skeletal_vars)}")
    
    # 数据筛选，排除结局变量
    filtered_indicators = filter_indicators(data, numeric_indicators, exclude_vars=outcome_vars)
    
    # 确保所有存在的骨骼肌相关因素都被保留在筛选结果中
    for var in existing_skeletal_vars:
        if var not in filtered_indicators:
            print(f"将骨骼肌相关因素 {var} 添加回筛选结果中")
            filtered_indicators.append(var)
    
    # 为OS和PFS创建二元结局变量
    data, os_binary_var = create_binary_outcome(data, 'IV期OS', cutoff=24)
    data, pfs_binary_var = create_binary_outcome(data, 'IV期PFS', cutoff=24)
    
    # 对数值变量进行分组（按中位值和最佳截断值）
    numeric_vars_to_group = [var for var in filtered_indicators if var not in outcome_vars]
    data, grouped_vars = create_grouped_variables(data, numeric_vars_to_group, os_binary_var)
    
    # 将分组变量添加到筛选结果中
    for var in grouped_vars:
        if var not in filtered_indicators:
            filtered_indicators.append(var)
    
    print(f"筛选后的指标总数（含分组变量）: {len(filtered_indicators)}")
    
    if not filtered_indicators:
        print("未筛选到符合条件的指标，程序终止。")
        return
    
    # 创建结果存储结构
    os_results = {}
    pfs_results = {}
    
    # ========= 1. 以OS为预测目标的分析 =========
    print("\n" + "="*50)
    print("以OS为预测目标的分析")
    print("="*50)
    
    # 1.1 独立预测因素分析 (Cox回归)
    os_cox_results, os_significant_indicators = cox_regression_analysis(
        data, filtered_indicators, 'IV期OS', '是否死亡', "os")
    os_results['cox_results'] = os_cox_results
    os_results['significant_indicators'] = os_significant_indicators
    
    # 1.2 对所有连续变量进行ROC分析（不仅仅是Cox回归显著的变量）
    # 提取所有原始连续变量（不包括分组变量和结局变量）
    os_continuous_vars = [var for var in filtered_indicators if '_group' not in var and var not in outcome_vars]
    print(f"对OS进行ROC分析的连续变量数量: {len(os_continuous_vars)}")
    # 对所有连续变量进行ROC分析
    os_roc_results, os_roc_curves_data, _ = calculate_roc_metrics(data, os_continuous_vars, os_binary_var)
    os_results['roc_results'] = os_roc_results
    os_results['roc_curves_data'] = os_roc_curves_data
    
    # 生成ROC曲线图
    if os_roc_curves_data:
        generate_roc_plot(os_roc_curves_data, "OS")
    
    # 生成截断值分析表格
    if os_roc_results:
        generate_threshold_table(os_roc_results, "OS")
    
    # 1.3 多因素Cox回归分析
    if os_significant_indicators:
        os_multivariate_results, os_multivariate_significant = multivariate_cox_analysis(
            data, os_significant_indicators, 'IV期OS', '是否死亡', "os")
        os_results['multivariate_results'] = os_multivariate_results
        os_results['multivariate_significant'] = os_multivariate_significant
    
    # ========= 2. 以PFS为预测目标的分析 =========
    print("\n" + "="*50)
    print("以PFS为预测目标的分析")
    print("="*50)
    
    # 2.1 独立预测因素分析 (Cox回归)
    pfs_cox_results, pfs_significant_indicators = cox_regression_analysis(
        data, filtered_indicators, 'IV期PFS', '是否PD', "pfs")
    pfs_results['cox_results'] = pfs_cox_results
    pfs_results['significant_indicators'] = pfs_significant_indicators
    
    # 2.2 对所有连续变量进行ROC分析（不仅仅是Cox回归显著的变量）
    # 提取所有原始连续变量（不包括分组变量和结局变量）
    pfs_continuous_vars = [var for var in filtered_indicators if '_group' not in var and var not in outcome_vars]
    print(f"对PFS进行ROC分析的连续变量数量: {len(pfs_continuous_vars)}")
    # 对所有连续变量进行ROC分析
    pfs_roc_results, pfs_roc_curves_data, _ = calculate_roc_metrics(data, pfs_continuous_vars, pfs_binary_var)
    pfs_results['roc_results'] = pfs_roc_results
    pfs_results['roc_curves_data'] = pfs_roc_curves_data
    
    # 生成ROC曲线图
    if pfs_roc_curves_data:
        generate_roc_plot(pfs_roc_curves_data, "PFS")
    
    # 生成截断值分析表格
    if pfs_roc_results:
        generate_threshold_table(pfs_roc_results, "PFS")
    
    # 2.3 多因素Cox回归分析
    if pfs_significant_indicators:
        pfs_multivariate_results, pfs_multivariate_significant = multivariate_cox_analysis(
            data, pfs_significant_indicators, 'IV期PFS', '是否PD', "pfs")
        pfs_results['multivariate_results'] = pfs_multivariate_results
        pfs_results['multivariate_significant'] = pfs_multivariate_significant
    
    # ========= 3. 设计联合模型 =========
    print("\n" + "="*50)
    print("设计联合预测模型")
    print("="*50)
    
    # 按照要求筛选特征：
    # 1. 单因素Cox回归分析中P<0.05的因素
    # 2. 多因素Cox回归分析中P<0.05的因素
    # 3. ROC曲线分析中AUC>0.5的因素
    
    # 获取单因素分析显著指标
    os_single_significant = os_results.get('significant_indicators', [])
    pfs_single_significant = pfs_results.get('significant_indicators', [])
    single_significant_features = list(set(os_single_significant + pfs_single_significant))
    
    # 获取多因素分析显著指标
    os_multi_significant = os_results.get('multivariate_significant', [])
    pfs_multi_significant = pfs_results.get('multivariate_significant', [])
    multi_significant_features = list(set(os_multi_significant + pfs_multi_significant))
    
    # 获取ROC分析中AUC>0.5的指标
    auc_good_features = []
    
    # 检查OS的ROC结果
    os_roc_results = os_results.get('roc_results', {})
    for indicator, result in os_roc_results.items():
        if result.get('auc', 0) > 0.5:
            auc_good_features.append(indicator)
    
    # 检查PFS的ROC结果
    pfs_roc_results = pfs_results.get('roc_results', {})
    for indicator, result in pfs_roc_results.items():
        if result.get('auc', 0) > 0.5:
            auc_good_features.append(indicator)
    
    # 合并所有满足条件的特征
    combined_features = list(set(single_significant_features + multi_significant_features + auc_good_features))
    
    print(f"单因素分析P<0.05的特征数量: {len(single_significant_features)}")
    print(f"多因素分析P<0.05的特征数量: {len(multi_significant_features)}")
    print(f"ROC分析AUC>0.5的特征数量: {len(auc_good_features)}")
    print(f"合并后的特征数量: {len(combined_features)}")
    
    # 如果筛选后的特征不足，补充一些表现较好的特征
    if len(combined_features) < 3 and len(filtered_indicators) >= 3:
        # 获取所有ROC结果中AUC>0.5的指标
        all_roc_results = {**os_roc_results, **pfs_roc_results}
        if all_roc_results:
            # 按AUC排序，选择表现较好的指标
            sorted_roc = sorted(all_roc_results.items(), key=lambda x: x[1].get('auc', 0), reverse=True)
            for indicator, _ in sorted_roc:
                if indicator not in combined_features and indicator in data.columns:
                    combined_features.append(indicator)
                if len(combined_features) >= 3:
                    break
    
    # 如果仍然没有足够的特征，使用筛选后的指标的前几个
    if not combined_features and len(filtered_indicators) >= 3:
        combined_features = filtered_indicators[:3]
    
    # 初始化分组变量列表
    group_vars_in_model = []
    
    # 确保分组变量被包含在联合模型中
    if grouped_vars and combined_features:
        group_vars_in_model = [var for var in grouped_vars if var in combined_features]
        print(f"联合模型中包含的分组变量数量: {len(group_vars_in_model)}")
    else:
        print("未找到分组变量或联合特征")
    
    # 确保所有存在的骨骼肌相关因素都被包含在联合模型特征中
    for var in existing_skeletal_vars:
        if var not in combined_features:
            print(f"将骨骼肌相关因素 {var} 添加到联合模型特征中")
            combined_features.append(var)
    
    print(f"\n用于构建联合模型的特征数量: {len(combined_features)}")
    print(f"特征列表: {', '.join(combined_features)}")
    
    # 创建复合指标
    if len(combined_features) >= 2:
        data, composite_indicators = create_composite_indicators(data, combined_features)
        combined_features += composite_indicators
    
    # 构建用于模型训练和评估的数据
    try:
        # 如果有足够的特征，进行特征组合优化
        if len(combined_features) > 2 and len(data) >= 30:
            print("\n进行特征组合优化，寻找最佳预测因子组合...")
            best_combination = optimize_feature_combination(data, combined_features, os_binary_var)
            combined_features = best_combination
        
        print(f"\n最终用于构建联合模型的特征数量: {len(combined_features)}")
        print(f"最终特征列表: {', '.join(combined_features)}")
        
        # 使用OS作为主要结局进行模型训练
        model_data = data[[os_binary_var] + combined_features].dropna()
        X = model_data[combined_features]
        y = model_data[os_binary_var]
        
        print(f"用于模型训练的有效样本量: {len(y)}")
        
        if len(y) < 20:
            print("警告: 用于模型训练的有效样本量不足，无法构建联合模型")
            models_results = None
        else:
            # 划分训练集和测试集
            X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
            
            # 构建并比较多种联合预测模型
            models_results = build_combined_models(X_train, y_train, X_test, y_test, group_vars_in_model=group_vars_in_model)
            
            # 为报告保存特征信息
            if models_results:
                # 保存使用的特征列表到每个模型结果中
                for model_name in models_results:
                    models_results[model_name]['features'] = combined_features
                    
                    # 提取特征重要性（如果模型支持）
                    if model_name == 'Logistic Regression' and 'best_estimator' in models_results[model_name]:
                        estimator = models_results[model_name]['best_estimator']
                        if hasattr(estimator.named_steps['classifier'], 'coef_'):
                            coefs = estimator.named_steps['classifier'].coef_[0]
                            importance_dict = {feature: abs(coef) for feature, coef in zip(combined_features, coefs)}
                            models_results[model_name]['feature_importance'] = importance_dict
                    elif model_name in ['Random Forest', 'Gradient Boosting'] and 'best_estimator' in models_results[model_name]:
                        estimator = models_results[model_name]['best_estimator']
                        if hasattr(estimator.named_steps['classifier'], 'feature_importances_'):
                            importances = estimator.named_steps['classifier'].feature_importances_
                            importance_dict = {feature: importance for feature, importance in zip(combined_features, importances)}
                            models_results[model_name]['feature_importance'] = importance_dict
    except Exception as e:
        print(f"构建联合模型时出错: {str(e)}")
        models_results = None
    
    # ========= 4. 生成综合分析报告 =========
    print("\n" + "="*50)
    print("生成综合分析报告")
    print("="*50)
    
    generate_comprehensive_report(data, os_results, pfs_results, models_results)
    
    print(f"\n所有结果已保存至文件夹: {output_dir}")
    print("综合生存分析完成！")

if __name__ == "__main__":
    main()