import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.model_selection import train_test_split
from torch.utils.data import Dataset, DataLoader
from scipy.optimize import minimize
from lifelines.utils import concordance_index
from sklearn.preprocessing import StandardScaler
import warnings
import random

warnings.filterwarnings("ignore")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

def set_seed(seed=42):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False

set_seed(42)


# GSELO惩罚函数
def gselo_penalty(param, theta):
    if isinstance(param, torch.Tensor):
        return torch.sum(1 - torch.exp(-theta * torch.square(torch.abs(param))))
    else:
        return np.sum(1 - np.exp(-theta * np.square(np.abs(param))))


# DNN-PLCM架构
class DNN_PLCM(nn.Module):
    def __init__(self, input_dim_z, hidden_dims, dropout, theta, clamp_min, clamp_max):
        super(DNN_PLCM, self).__init__()
        self.theta = theta
        self.clamp_min = clamp_min
        self.clamp_max = clamp_max
        self.layers = nn.ModuleList()
        in_dim = input_dim_z

        for hidden_dim in hidden_dims:
            self.layers.append(nn.Linear(in_dim, hidden_dim))
            self.layers.append(nn.Linear(hidden_dim, hidden_dim))
            self.layers.append(nn.ReLU())
            self.layers.append(nn.Dropout(dropout))
            in_dim = hidden_dim

        self.output_layer = nn.Linear(in_dim, 1)

    def forward(self, z):
        h = z
        for i in range(0, len(self.layers), 4):
            # 第一线性层
            linear_out = self.layers[i](h)
            # 门控机制：使用独立的线性层生成门控信号
            gate = torch.sigmoid(self.layers[i + 1](linear_out))  # 恢复使用linear_out作为输入
            # 应用门控
            h = gate * linear_out
            # 激活函数
            h = self.layers[i + 2](h)
            # Dropout
            h = self.layers[i + 3](h)
        g_z = self.output_layer(h).squeeze(-1)
        return torch.clamp(g_z, min=self.clamp_min, max=self.clamp_max)

    def get_sparse_penalty(self):
        penalty = 0.0
        # 对第一层隐藏层增加更强的惩罚，以促进更好的变量选择
        if len(self.layers) > 0 and isinstance(self.layers[0], nn.Linear):
            # 第一层权重的惩罚系数增加到3倍
            penalty += 3 * gselo_penalty(self.layers[0].weight, self.theta)
            penalty += 3 * gselo_penalty(self.layers[0].bias, self.theta)
            
            # 其他层正常惩罚
            for i in range(1, len(self.layers)):
                layer = self.layers[i]
                if isinstance(layer, nn.Linear):
                    penalty += gselo_penalty(layer.weight, self.theta)
                    penalty += gselo_penalty(layer.bias, self.theta)
        else:
            # 如果没有隐藏层，对所有层正常惩罚
            for layer in self.layers:
                if isinstance(layer, nn.Linear):
                    penalty += gselo_penalty(layer.weight, self.theta)
                    penalty += gselo_penalty(layer.bias, self.theta)
        
        # 输出层正常惩罚
        penalty += gselo_penalty(self.output_layer.weight, self.theta)
        penalty += gselo_penalty(self.output_layer.bias, self.theta)
        
        return penalty


# Cox损失函数
def cox_loss(time, status, log_hazard, censor_type, lambda0, max_clamp_exp):
    if censor_type == "right":
        status = status.squeeze(-1)
        sort_idx = torch.argsort(time, descending=True)
        sorted_time = time[sort_idx]
        sorted_status = status[sort_idx]
        sorted_log_hazard = log_hazard[sort_idx]

        exp_hazard = torch.exp(torch.clamp(sorted_log_hazard, max=max_clamp_exp))
        cum_exp = torch.cumsum(exp_hazard, dim=0)
        log_cum_exp = torch.log(cum_exp + 1e-10)

        loss = -torch.mean(sorted_status * (sorted_log_hazard - log_cum_exp))
        return loss

    elif censor_type == "interval":
        L = status[:, 0].squeeze()
        event_mask = status[:, 1].squeeze()
        R = time.squeeze()

        log_hazard_clamped = torch.clamp(log_hazard, max=max_clamp_exp)
        S_L = torch.exp(-lambda0 * L * torch.exp(log_hazard_clamped))
        S_R = torch.exp(-lambda0 * R * torch.exp(log_hazard_clamped))

        valid_mask = event_mask == 1
        if torch.sum(valid_mask) == 0:
            return torch.tensor(0.0, device=device)
        loss = -torch.mean(torch.log(S_L[valid_mask] - S_R[valid_mask] + 1e-10))
        return loss

    else:
        raise ValueError("删失类型仅支持'right'和'interval'")


# 模型训练器
class PLCM_Trainer:
    def __init__(self, input_dim_x, input_dim_z, lambda_alpha, theta_dnn, theta_alpha, censor_type,
                 hidden_dims, dropout, lr, lr_scheduler_patience, lr_scheduler_factor, grad_clip_norm, 
                 alpha_optim_maxiter, lambda0, clamp_min, clamp_max, max_clamp_exp):
        self.alpha = torch.tensor(
            np.zeros(input_dim_x, dtype=np.float64),
            dtype=torch.float32,
            device=device,
            requires_grad=False
        )
        self.dnn = DNN_PLCM(
            input_dim_z=input_dim_z,
            hidden_dims=hidden_dims,
            dropout=dropout,
            theta=theta_dnn,
            clamp_min=clamp_min,
            clamp_max=clamp_max
        ).to(device)
        self.lambda_alpha = lambda_alpha
        self.theta_alpha = theta_alpha  # alpha惩罚项的theta参数
        self.censor_type = censor_type
        self.optimizer_dnn = optim.Adam(self.dnn.parameters(), lr=lr, weight_decay=1e-5)  # 添加权重衰减
        self.grad_clip_norm = grad_clip_norm
        self.alpha_optim_maxiter = alpha_optim_maxiter
        self.lambda0 = lambda0
        self.clamp_min = clamp_min
        self.clamp_max = clamp_max
        self.max_clamp_exp = max_clamp_exp
        # 学习率调度器
        self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.optimizer_dnn,
            mode='max',  # C-index越大越好
            factor=lr_scheduler_factor,
            patience=lr_scheduler_patience
        )

    def prepare_data(self, X, Z, time, status, test_size, random_state):
        # 先将数据集分为训练集和测试集
        train_idx, test_idx = train_test_split(
            np.arange(len(X)),
            test_size=test_size,
            random_state=random_state
        )
        
        # 使用训练集的统计信息对训练集和测试集进行标准化
        self.scaler_X = StandardScaler()
        self.scaler_Z = StandardScaler()
        
        X_train = X[train_idx]
        Z_train = Z[train_idx]
        X_test = X[test_idx]
        Z_test = Z[test_idx]
        
        X_train_scaled = self.scaler_X.fit_transform(X_train)
        Z_train_scaled = self.scaler_Z.fit_transform(Z_train)
        X_test_scaled = self.scaler_X.transform(X_test)
        Z_test_scaled = self.scaler_Z.transform(Z_test)

        if self.censor_type == "interval":
            train_status = torch.tensor(status[train_idx], dtype=torch.float32, device=device)
            test_status = status[test_idx]
        else:
            if len(status.shape) > 1:
                status = status[:, 1]
            train_status = torch.tensor(status[train_idx], dtype=torch.float32, device=device).unsqueeze(-1)
            test_status = status[test_idx]

        if np.isnan(X_train_scaled).any() or np.isnan(Z_train_scaled).any() or np.isnan(X_test_scaled).any() or np.isnan(Z_test_scaled).any() or np.isnan(time).any() or np.isnan(status).any():
            raise ValueError("输入数据中存在NaN，请预处理数据")

        train_data = {
            "X": torch.tensor(X_train_scaled, dtype=torch.float32, device=device),
            "Z": torch.tensor(Z_train_scaled, dtype=torch.float32, device=device),
            "time": torch.tensor(time[train_idx], dtype=torch.float32, device=device),
            "status": train_status
        }
        test_data = {
            "X": torch.tensor(X_test_scaled, dtype=torch.float32, device=device),
            "Z": torch.tensor(Z_test_scaled, dtype=torch.float32, device=device),
            "time": time[test_idx],
            "status": test_status
        }

        return train_data, test_data

    def train(self, train_data, batch_size, epochs, dnn_penalty_coef):
        self.dnn.train()
        train_dataset = DatasetFromDict(train_data)
        # 如需设置num_workers>0，需添加worker_init_fn固定种子（如：worker_init_fn=lambda x: set_seed(42+x)）
        train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)

        best_cindex = -float('inf')
        best_state_dict = None

        for epoch in range(epochs):
            total_loss = 0.0
            
            # 第一步：固定alpha，优化DNN参数
            self.dnn.train()
            for batch in train_loader:
                X_batch, Z_batch, time_batch, status_batch = batch

                self.optimizer_dnn.zero_grad()
                g_z = self.dnn(Z_batch)
                log_hazard = torch.clamp(torch.matmul(X_batch, self.alpha) + g_z,
                                        min=self.clamp_min, max=self.clamp_max)
                loss_dnn = cox_loss(
                    time_batch, status_batch, log_hazard,
                    self.censor_type, self.lambda0, self.max_clamp_exp
                ) + dnn_penalty_coef * self.dnn.get_sparse_penalty()

                if torch.isnan(loss_dnn):
                    print(f"警告：第{epoch}轮DNN优化Batch损失为NaN，跳过更新")
                    continue
                loss_dnn.backward()
                torch.nn.utils.clip_grad_norm_(self.dnn.parameters(), max_norm=self.grad_clip_norm)
                self.optimizer_dnn.step()
                total_loss += loss_dnn.item()
            
            # 第二步：固定DNN，优化alpha参数
            self.dnn.eval()
            with torch.no_grad():
                # 将所有数据收集起来进行alpha优化
                X_all = train_data["X"]
                Z_all = train_data["Z"]
                time_all = train_data["time"]
                status_all = train_data["status"]
                
                def loss_alpha(alpha_np):
                    alpha = torch.tensor(alpha_np, dtype=torch.float32, device=device)
                    g_z = self.dnn(Z_all)
                    log_hazard_alpha = torch.clamp(torch.matmul(X_all, alpha) + g_z,
                                                  min=self.clamp_min, max=self.clamp_max)
                    loss = cox_loss(
                        time_all, status_all, log_hazard_alpha,
                        self.censor_type, self.lambda0, self.max_clamp_exp
                    ).item() + self.lambda_alpha * gselo_penalty(alpha, theta=self.theta_alpha).item()
                    return loss

                alpha_np = self.alpha.cpu().numpy().astype(np.float64)
                res = minimize(
                    loss_alpha, alpha_np, method="SLSQP",
                    options={"maxiter": self.alpha_optim_maxiter, "ftol": 1e-6}
                )
                self.alpha.data = torch.tensor(res.x, dtype=torch.float32, device=device)

            # 每10轮评估一次模型
            if (epoch + 1) % 10 == 0:
                test_cindex = self.evaluate()
                print(f"Epoch {epoch + 1}/{epochs} | 平均训练损失: {total_loss / len(train_loader):.4f} | "
                      f"测试集C-index: {test_cindex:.4f}")
                
                # 学习率调度
                self.scheduler.step(test_cindex)
                
                # 保存最佳模型
                if test_cindex > best_cindex:
                    best_cindex = test_cindex
                    best_state_dict = {
                        'dnn': self.dnn.state_dict(),
                        'alpha': self.alpha.data.clone()
                    }
                    print(f"✓ 最佳模型更新：C-index = {best_cindex:.4f}")

        # 恢复最佳模型
        if best_state_dict is not None:
            self.dnn.load_state_dict(best_state_dict['dnn'])
            self.alpha.data = best_state_dict['alpha']
            print(f"\n训练完成，恢复最佳模型（C-index = {best_cindex:.4f}）")
            
        # 保存最佳C-index为实例属性
        self.c_index = best_cindex

    def evaluate(self):
        self.dnn.eval()
        with torch.no_grad():
            X_test = self.test_data["X"]
            Z_test = self.test_data["Z"]
            g_z_test = self.dnn(Z_test)
            log_hazard_test = torch.clamp(torch.matmul(X_test, self.alpha) + g_z_test,
                                         min=self.clamp_min, max=self.clamp_max)

            if torch.isnan(log_hazard_test).any():
                print("警告：预测log_hazard中存在NaN，已替换为0")
                log_hazard_test = torch.nan_to_num(log_hazard_test, nan=0.0)

            predicted_scores = log_hazard_test.cpu().numpy()
            event_times = self.test_data["time"]
            event_observed = self.test_data["status"]

            cindex = concordance_index(
                event_times=event_times,
                predicted_scores=predicted_scores,
                event_observed=event_observed
            )
        return cindex

    def get_selected_vars(self, var_names_x, var_names_z, zero_tol):
        # 对于线性变量，使用alpha系数的绝对值
        selected_x = [var_names_x[i] for i in range(len(self.alpha)) if abs(self.alpha[i].item()) > zero_tol]
        
        # 对于非线性变量，检查第一层隐藏层的权重
        if hasattr(self.dnn.layers[0], 'weight'):
            # 计算第一层隐藏层权重的绝对值之和
            first_layer_weights = torch.sum(torch.abs(self.dnn.layers[0].weight), dim=0).detach().cpu().numpy()
            
            # 选择权重绝对值大于阈值的变量
            selected_indices = [i for i in range(len(first_layer_weights)) if first_layer_weights[i] > zero_tol]
            
            # 按权重排序
            selected_indices.sort(key=lambda i: first_layer_weights[i], reverse=True)
            
            # 设置最大选中变量数，避免过多变量
            max_selected_vars = 10
            selected_indices = selected_indices[:max_selected_vars]
            
            # 获取对应的变量名
            selected_z = [var_names_z[i] for i in selected_indices]
        else:
            selected_z = []
            
        return {"线性筛选变量": selected_x, "非线性筛选变量": selected_z}


class DatasetFromDict(Dataset):
    def __init__(self, data_dict):
        self.data = data_dict

    def __len__(self):
        return len(self.data["X"])

    def __getitem__(self, idx):
        return (
            self.data["X"][idx],
            self.data["Z"][idx],
            self.data["time"][idx],
            self.data["status"][idx]
        )


# 模型接口：所有参数默认值在此定义
def run_plcm_model(
    # 核心输入数据
    X, Z, time, status, var_names_x, var_names_z,
    # 删失类型
    censor_type="right",
    # 数据处理参数
    test_size=0.2,
    random_state=42,
    batch_size=32,  # 增加批处理大小以稳定训练
    # 模型结构参数
    hidden_dims=[128, 64, 32],  # 增加隐藏层数量和神经元数量
    dropout=0.5,  # 增加dropout防止过拟合
    # 惩罚项参数
    lambda_alpha=1e1,  # 降低alpha惩罚系数
    theta_dnn=1.0,  # 降低DNN惩罚系数
    theta_alpha=3.0,  # 降低alpha惩罚强度
    dnn_penalty_coef=0.5,  # 降低DNN惩罚系数
    # 优化器参数
    lr=5e-4,  # 增加初始学习率
    lr_scheduler_patience=10,  # 学习率调度器耐心值
    lr_scheduler_factor=0.5,  # 学习率衰减因子
    grad_clip_norm=1.0,
    alpha_optim_maxiter=300,  # 增加alpha优化迭代次数
    # 损失函数参数
    lambda0=5e-4,
    max_clamp_exp=10.0,  # 增加最大指数钳制值
    # 数值稳定性参数
    clamp_min=-4.0,  # 扩大钳制范围
    clamp_max=4.0,
    # 训练参数
    epochs=200,  # 增加训练轮数
    # 变量筛选参数
    zero_tol=1e-3
):
    input_dim_x = X.shape[1]
    input_dim_z = Z.shape[1]
    print(f"数据集信息：{len(X)}个样本，X维度{input_dim_x}，Z维度{input_dim_z}")

    trainer = PLCM_Trainer(
        input_dim_x=input_dim_x,
        input_dim_z=input_dim_z,
        lambda_alpha=lambda_alpha,
        theta_dnn=theta_dnn,
        theta_alpha=theta_alpha,
        censor_type=censor_type,
        hidden_dims=hidden_dims,
        dropout=dropout,
        lr=lr,
        lr_scheduler_patience=lr_scheduler_patience,
        lr_scheduler_factor=lr_scheduler_factor,
        grad_clip_norm=grad_clip_norm,
        alpha_optim_maxiter=alpha_optim_maxiter,
        lambda0=lambda0,
        clamp_min=clamp_min,
        clamp_max=clamp_max,
        max_clamp_exp=max_clamp_exp
    )

    train_data, test_data = trainer.prepare_data(
        X=X, Z=Z, time=time, status=status,
        test_size=test_size, random_state=random_state
    )
    trainer.test_data = test_data

    print(f"开始训练（{censor_type}删失）")
    trainer.train(
        train_data=train_data,
        batch_size=batch_size,
        epochs=epochs,
        dnn_penalty_coef=dnn_penalty_coef
    )

    selected = trainer.get_selected_vars(var_names_x, var_names_z, zero_tol=zero_tol)
    print("\n=== 变量选择结果 ===")
    print("线性筛选变量:", selected['线性筛选变量'])
    print("非线性筛选变量:", selected['非线性筛选变量'])
    return trainer


# main函数
if __name__ == "__main__":
    def load_local_wpbc_data(missing_value_handle="median", use_feature_selection=True):
        import os
        import pandas as pd
        from sklearn.feature_selection import SelectKBest, f_classif
        
        file_path = os.path.join(os.getcwd(), "breast+cancer+wisconsin+prognostic", "wpbc.data")
        columns = [
            'id', 'outcome', 'time',
            'mean_radius', 'mean_texture', 'mean_perimeter', 'mean_area',
            'mean_smoothness', 'mean_compactness', 'mean_concavity',
            'mean_concave_points', 'mean_symmetry', 'mean_fractal_dimension',
            'radius_se', 'texture_se', 'perimeter_se', 'area_se',
            'smoothness_se', 'compactness_se', 'concavity_se',
            'concave_points_se', 'symmetry_se', 'fractal_dimension_se',
            'worst_radius', 'worst_texture', 'worst_perimeter', 'worst_area',
            'worst_smoothness', 'worst_compactness', 'worst_concavity',
            'worst_concave_points', 'worst_symmetry', 'worst_fractal_dimension',
            'follow_up_1', 'follow_up_2'
        ]
        
        # 加载数据
        df = pd.read_csv(file_path, header=None, names=columns)
        df = df.replace('?', np.nan)
        
        # 转换数值列
        numeric_cols = [col for col in columns if col not in ['id', 'outcome']]
        for col in numeric_cols:
            df[col] = pd.to_numeric(df[col], errors='coerce')
        
        # 处理缺失值
        if missing_value_handle == "drop":
            df = df.dropna()
        elif missing_value_handle == "mean":
            for col in numeric_cols:
                df[col].fillna(df[col].mean(), inplace=True)
        elif missing_value_handle == "median":
            for col in numeric_cols:
                df[col].fillna(df[col].median(), inplace=True)
        
        # 目标变量处理
        df['status'] = (df['outcome'] == 'R').astype(int)
        
        # 特征工程：对时间变量进行日志转换
        df['time_log'] = np.log1p(df['time'])
        
        # 特征工程：添加面积/周长比例特征
        df['mean_area_per_perimeter'] = df['mean_area'] / (df['mean_perimeter'] + 1e-10)
        df['worst_area_per_perimeter'] = df['worst_area'] / (df['worst_perimeter'] + 1e-10)
        
        # 特征工程：添加紧凑性相关特征
        df['mean_compactness_squared'] = df['mean_compactness'] ** 2
        df['worst_compactness_squared'] = df['worst_compactness'] ** 2
        
        # 扩展特征列表
        extended_columns = columns.copy()
        extended_columns.extend(['time_log', 'mean_area_per_perimeter', 'worst_area_per_perimeter', 
                               'mean_compactness_squared', 'worst_compactness_squared'])
        
        # 特征选择（如果启用）
        all_features = df[extended_columns[3:]]  # 所有特征（排除id, outcome, time）
        target = df['status']
        
        if use_feature_selection:
            selector = SelectKBest(score_func=f_classif, k='all')
            selector.fit(all_features, target)
            feature_scores = pd.DataFrame({'feature': all_features.columns, 'score': selector.scores_})
            feature_scores = feature_scores.sort_values('score', ascending=False)
            
            # 选择得分前25的特征
            selected_features = feature_scores.head(25)['feature'].tolist()
            
            # 将特征分为线性部分和非线性部分
            # 优先将原始mean_*特征放入线性部分X，其他特征放入非线性部分Z
            X_cols = [col for col in selected_features if col.startswith('mean_')][:10]  # 最多10个线性特征
            Z_cols = [col for col in selected_features if col not in X_cols]
            
            # 如果Z_cols不足，从其他特征中补充
            if len(Z_cols) < 15:
                remaining_features = [col for col in extended_columns[3:] if col not in selected_features]
                Z_cols.extend(remaining_features[:15 - len(Z_cols)])
        else:
            # 不使用特征选择，保持原有划分但添加新特征
            X_cols = columns[3:13]  # 原始mean_*特征
            Z_cols = columns[13:35]  # 原始其他特征
            # 添加新特征到Z部分
            Z_cols.extend(['time_log', 'mean_area_per_perimeter', 'worst_area_per_perimeter', 
                          'mean_compactness_squared', 'worst_compactness_squared'])
        
        return (df[X_cols].values, df[Z_cols].values,
                df['time'].values, df['status'].values, X_cols, Z_cols)

    # 加载数据
    X, Z, time, status, var_names_x, var_names_z = load_local_wpbc_data(missing_value_handle="median")
    print(f"数据集信息：{X.shape[0]}个样本，X维度{X.shape[1]}，Z维度{Z.shape[1]}")

    # 训练模型
    model = run_plcm_model(
        X=X,
        Z=Z,
        time=time,
        status=status,
        var_names_x=var_names_x,
        var_names_z=var_names_z,
        epochs=200,
        batch_size=32,
        lr=5e-4,
        dropout=0.3,
        hidden_dims=[128, 64, 32],
        lambda_alpha=1e-3,
        theta_dnn=0.1,
        theta_alpha=0.5,
        dnn_penalty_coef=0.1,
        lr_scheduler_factor=0.1,
        lr_scheduler_patience=10
    )
    