"""
基于APPNP解耦编码器的VAE-UNet图池化模型

完整实现4部分改进方案:
    第1部分: 解耦编码器（MLP + APPNP两分支）
    第2部分: 概率池化（VAE编码器）
    第3部分: 解码器（反池化 + APPNP精炼）
    第4部分: 输出与损失（任务预测 + 特征重构 + VAE正则化）

核心特性:
    1. 双预测头机制:
       - 图级预测头: 基于Z的全局表示（mean+max pooling）
       - 节点级预测头: 基于X_refined的节点表示
    2. 自适应任务类型:
       - 图分类任务（如DD数据集）: 使用图级预测头
       - 节点分类任务: 使用节点级预测头
    3. 多尺度特征融合: U-Net结构融合粗粒度和细粒度信息

使用示例:
    # 图分类任务（DD数据集）
    model = VAEUNetModel(...)
    model.set_task_type('graph')  # 使用图级预测头
    
    # 节点分类任务
    model = VAEUNetModel(...)
    model.set_task_type('node')   # 使用节点级预测头

作者: 基于用户改进方案
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
from appnp_layers import APPNPConv
from device_config import get_device


# ============================================================
# 第1部分：解耦编码器
# ============================================================

class MLP(nn.Module):
    """多层感知机"""
    
    def __init__(self, in_dim, hidden_dims, out_dim, dropout=0.5, 
                 batch_norm=True, activation='relu'):
        super(MLP, self).__init__()
        
        if activation == 'relu':
            self.activation = nn.ReLU()
        elif activation == 'elu':
            self.activation = nn.ELU()
        elif activation == 'leaky_relu':
            self.activation = nn.LeakyReLU(0.2)
        else:
            raise ValueError(f"Unknown activation: {activation}")
        
        layers = []
        dims = [in_dim] + hidden_dims + [out_dim]
        
        for i in range(len(dims) - 1):
            layers.append(nn.Linear(dims[i], dims[i+1]))
            if i < len(dims) - 2:
                if batch_norm:
                    layers.append(nn.BatchNorm1d(dims[i+1]))
                layers.append(self.activation)
                if dropout > 0:
                    layers.append(nn.Dropout(dropout))
        
        self.mlp = nn.Sequential(*layers)
    
    def forward(self, x):
        original_shape = x.shape
        if len(original_shape) == 3:
            batch_size, num_nodes, feat_dim = original_shape
            x = x.view(batch_size * num_nodes, feat_dim)
            out = self.mlp(x)
            out = out.view(batch_size, num_nodes, -1)
        else:
            out = self.mlp(x)
        return out


class DecoupledEncoder(nn.Module):
    """
    解耦编码器（第1部分）
    
    输入: X [N, F_in], A [N, N]
    输出: 
        - X_embed [N, F_embed]: 节点嵌入（分支1）
        - S [N, K]: 分配矩阵（分支2）
    """
    
    def __init__(self, 
                 in_dim, 
                 embed_dim, 
                 num_clusters,
                 hidden_dims=[128, 128],
                 K=10,
                 alpha=0.1,
                 dropout=0.5,
                 batch_norm=True):
        super(DecoupledEncoder, self).__init__()
        
        self.in_dim = in_dim
        self.embed_dim = embed_dim
        self.num_clusters = num_clusters
        
        # 分支1: 特征嵌入
        self.mlp_embed = MLP(
            in_dim=in_dim,
            hidden_dims=hidden_dims,
            out_dim=embed_dim,
            dropout=dropout,
            batch_norm=batch_norm,
            activation='relu'
        )
        self.appnp_embed = APPNPConv(K=K, alpha=alpha, add_self_loops=True)
        
        # 分支2: 聚类分配
        self.mlp_pool = MLP(
            in_dim=in_dim,
            hidden_dims=hidden_dims,
            out_dim=num_clusters,
            dropout=dropout,
            batch_norm=batch_norm,
            activation='relu'
        )
        self.appnp_pool = APPNPConv(K=K, alpha=alpha, add_self_loops=True)
    
    def forward(self, x, adj):
        # 分支1: 特征嵌入
        h_embed = self.mlp_embed(x)
        x_embed = self.appnp_embed(h_embed, adj)
        
        # 分支2: 聚类分配
        h_pool = self.mlp_pool(x)
        s_logits = self.appnp_pool(h_pool, adj)
        S = F.softmax(s_logits, dim=1)
        
        return x_embed, S


# ============================================================
# 第2部分：概率池化
# ============================================================

class ProbabilisticPooling(nn.Module):
    """
    概率池化（第2部分）
    
    输入: X_embed [N, F_embed], S [N, K]
    输出: Z [K, F_latent], mu_super, log_sigma_super
    """
    
    def __init__(self, 
                 embed_dim,
                 latent_dim,
                 hidden_dims=[128],
                 dropout=0.0):
        super(ProbabilisticPooling, self).__init__()
        
        self.embed_dim = embed_dim
        self.latent_dim = latent_dim
        
        # VAE编码器: MLP_mu
        layers_mu = []
        dims = [embed_dim] + hidden_dims + [latent_dim]
        for i in range(len(dims) - 1):
            layers_mu.append(nn.Linear(dims[i], dims[i+1]))
            if i < len(dims) - 2:
                layers_mu.append(nn.ReLU())
                if dropout > 0:
                    layers_mu.append(nn.Dropout(dropout))
        self.mlp_mu = nn.Sequential(*layers_mu)
        
        # VAE编码器: MLP_sigma
        layers_sigma = []
        for i in range(len(dims) - 1):
            layers_sigma.append(nn.Linear(dims[i], dims[i+1]))
            if i < len(dims) - 2:
                layers_sigma.append(nn.ReLU())
                if dropout > 0:
                    layers_sigma.append(nn.Dropout(dropout))
        self.mlp_sigma = nn.Sequential(*layers_sigma)
    
    def forward(self, X_embed, S):
        # 1. 确定性池化
        X_pooled = torch.matmul(S.t(), X_embed)  # [K, F_embed]
        
        # 2. VAE编码器
        mu_super = self.mlp_mu(X_pooled)
        log_sigma_super = self.mlp_sigma(X_pooled)
        
        # 3. 重参数化采样
        if self.training:
            epsilon = torch.randn_like(mu_super)
            Z = mu_super + epsilon * torch.exp(0.5 * log_sigma_super)
        else:
            Z = mu_super
        
        return Z, mu_super, log_sigma_super


# ============================================================
# 第3部分：解码器
# ============================================================

class Decoder(nn.Module):
    """
    解码器（第3部分）
    
    输入: Z [K, F_latent], S [N, K], adj [N, N]
    输出: X_refined [N, F_refined]
    """
    
    def __init__(self,
                 latent_dim,
                 refined_dim,
                 hidden_dims=[128],
                 appnp_K=10,
                 appnp_alpha=0.1,
                 dropout=0.0):
        super(Decoder, self).__init__()
        
        self.latent_dim = latent_dim
        self.refined_dim = refined_dim
        
        # MLP_3: 特征变换
        layers = []
        dims = [latent_dim] + hidden_dims + [refined_dim]
        for i in range(len(dims) - 1):
            layers.append(nn.Linear(dims[i], dims[i+1]))
            if i < len(dims) - 2:
                layers.append(nn.ReLU())
                if dropout > 0:
                    layers.append(nn.Dropout(dropout))
        self.mlp_decode = nn.Sequential(*layers)
        
        # APPNP: 拓扑精炼
        self.appnp_refine = APPNPConv(K=appnp_K, alpha=appnp_alpha)
    
    def forward(self, Z, S, adj):
        # 1. 反池化
        X_unpooled = torch.matmul(S, Z)  # [N, F_latent]
        
        # 2. 特征精炼
        H_decode = self.mlp_decode(X_unpooled)
        X_refined = self.appnp_refine(H_decode, adj)
        
        return X_refined, X_unpooled


# ============================================================
# 第4部分：输出与损失
# ============================================================

class TaskPredictor(nn.Module):
    """任务预测器（输出1）"""
    
    def __init__(self, refined_dim, num_classes, hidden_dims=[128], dropout=0.0):
        super(TaskPredictor, self).__init__()
        
        layers = []
        dims = [refined_dim] + hidden_dims + [num_classes]
        for i in range(len(dims) - 1):
            layers.append(nn.Linear(dims[i], dims[i+1]))
            if i < len(dims) - 2:
                layers.append(nn.ReLU())
                if dropout > 0:
                    layers.append(nn.Dropout(dropout))
        self.mlp_task = nn.Sequential(*layers)
    
    def forward(self, X_refined):
        return self.mlp_task(X_refined)


class FeatureReconstructor(nn.Module):
    """特征重构器（输出2）"""
    
    def __init__(self, refined_dim, input_dim, hidden_dims=[128], dropout=0.0):
        super(FeatureReconstructor, self).__init__()
        
        layers = []
        dims = [refined_dim] + hidden_dims + [input_dim]
        for i in range(len(dims) - 1):
            layers.append(nn.Linear(dims[i], dims[i+1]))
            if i < len(dims) - 2:
                layers.append(nn.ReLU())
                if dropout > 0:
                    layers.append(nn.Dropout(dropout))
        self.mlp_recon = nn.Sequential(*layers)
    
    def forward(self, X_refined):
        return self.mlp_recon(X_refined)


def compute_vae_kl_loss(mu_super, log_sigma_super):
    """计算VAE的KL散度损失（损失3）"""
    L_KL = -0.5 * torch.sum(
        1 + 2 * log_sigma_super - mu_super.pow(2) - torch.exp(2 * log_sigma_super)
    )
    L_KL = L_KL / (mu_super.size(0) * mu_super.size(1))
    return L_KL


# ============================================================
# 完整的VAE-UNet模型
# ============================================================

class VAEUNetModel(nn.Module):
    """
    完整的VAE-UNet图池化模型
    
    整合所有4个部分:
        1. 解耦编码器（生成X_embed和S）
        2. 概率池化（VAE编码为Z）
        3. 解码器（反池化并精炼）
        4. 输出与损失（任务预测 + 特征重构）
    """
    
    def __init__(self,
                 # 为了兼容原train.py，添加这些参数
                 max_num_nodes=None,
                 
                 # 输入输出维度
                 input_dim=10,
                 num_classes=2,
                 
                 # 网络结构参数
                 embed_dim=64,
                 latent_dim=32,
                 refined_dim=64,
                 num_clusters=20,
                 hidden_dims=[128],
                 
                 # APPNP参数
                 appnp_K=10,
                 appnp_alpha=0.1,
                 
                 # 训练参数
                 dropout=0.3,
                 bn=True,
                 
                 # 损失权重
                 lambda_task=1.0,
                 lambda_recon=1.0,
                 lambda_kl=0.001,
                 
                 # 兼容参数（原train.py需要）
                 args=None,
                 **kwargs):
        super(VAEUNetModel, self).__init__()
        
        self.input_dim = input_dim
        self.num_classes = num_classes
        self.num_clusters = num_clusters
        
        # 损失权重
        self.lambda_task = lambda_task
        self.lambda_recon = lambda_recon
        self.lambda_kl = lambda_kl
        
        # 第1部分: 解耦编码器
        self.encoder = DecoupledEncoder(
            in_dim=input_dim,
            embed_dim=embed_dim,
            num_clusters=num_clusters,
            hidden_dims=hidden_dims,
            K=appnp_K,
            alpha=appnp_alpha,
            dropout=dropout
        )
        
        # 第2部分: 概率池化
        self.prob_pool = ProbabilisticPooling(
            embed_dim=embed_dim,
            latent_dim=latent_dim,
            hidden_dims=hidden_dims,
            dropout=dropout
        )
        
        # 第3部分: 解码器
        self.decoder = Decoder(
            latent_dim=latent_dim,
            refined_dim=refined_dim,
            hidden_dims=hidden_dims,
            appnp_K=appnp_K,
            appnp_alpha=appnp_alpha,
            dropout=dropout
        )
        
        # 第4部分: 输出
        # 4.1 节点级预测器（用于节点分类任务）
        self.node_predictor = TaskPredictor(
            refined_dim=refined_dim,
            num_classes=num_classes,
            hidden_dims=hidden_dims,
            dropout=dropout
        )
        
        # 4.2 图级预测器（用于图分类任务，基于X_refined）
        self.graph_predictor = nn.Sequential(
            nn.Linear(refined_dim * 2, hidden_dims[0] if hidden_dims else refined_dim),  # mean + max pooling
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(hidden_dims[0] if hidden_dims else refined_dim, num_classes)
        )
        
        # 4.3 特征重构器
        self.feature_reconstructor = FeatureReconstructor(
            refined_dim=refined_dim,
            input_dim=input_dim,
            hidden_dims=hidden_dims,
            dropout=dropout
        )
        
        # 任务类型标志（默认为图级任务）
        self.task_type = 'graph'  # 'graph' 或 'node'
    
    def set_task_type(self, task_type):
        """
        设置任务类型
        
        Args:
            task_type: str - 'graph' 或 'node'
        """
        assert task_type in ['graph', 'node'], "task_type 必须是 'graph' 或 'node'"
        self.task_type = task_type
        print(f"任务类型已设置为: {task_type}")
    
    def forward(self, x, adj, batch_num_nodes=None, assign_x=None):
        """
        完整的前向传播（兼容原train.py接口）
        
        参数:
            x: Tensor [batch, N, input_dim] 或 [N, input_dim] - 原始节点特征
            adj: Tensor [batch, N, N] 或 [N, N] - 邻接矩阵
            batch_num_nodes: 批处理时的节点数（为了兼容，可选）
            assign_x: 分配特征（为了兼容，可选）
        
        返回:
            如果是批处理模式: y_logits_batch [batch, num_classes] - 图级预测
            如果是单图模式: outputs, intermediates
        """
        # 判断是否是批处理模式（原train.py的格式）
        is_batch = (len(x.shape) == 3)
        
        if is_batch:
            # 批处理模式：处理每个图，返回图级预测
            return self._forward_batch(x, adj, batch_num_nodes)
        else:
            # 单图模式：返回完整的输出和中间结果
            return self._forward_single(x, adj)
    
    def _forward_single(self, x, adj):
        """单图前向传播"""
        # 第1部分: 解耦编码器
        x_embed, S = self.encoder(x, adj)
        
        # 第2部分: 概率池化
        Z, mu_super, log_sigma_super = self.prob_pool(x_embed, S)
        
        # 第3部分: 解码器
        x_refined, x_unpooled = self.decoder(Z, S, adj)
        
        # 第4部分: 输出
        # 4.1 节点级预测（总是计算，用于重构损失）
        y_logits_node = self.node_predictor(x_refined)  # [N, num_classes]
        
        # 4.2 图级预测（基于X_refined的全局表示）
        # 使用 mean + max pooling 获得更robust的图表示
        x_refined_mean = x_refined.mean(dim=0)  # [refined_dim]
        x_refined_max = x_refined.max(dim=0)[0]  # [refined_dim]
        graph_embedding = torch.cat([x_refined_mean, x_refined_max], dim=0)  # [refined_dim * 2]
        y_logits_graph = self.graph_predictor(graph_embedding)  # [num_classes]
        
        # 4.3 特征重构
        x_recon = self.feature_reconstructor(x_refined)
        
        # 组织输出
        outputs = {
            'y_logits_node': y_logits_node,   # [N, num_classes] 节点级预测
            'y_logits_graph': y_logits_graph, # [num_classes] 图级预测
            'x_recon': x_recon,                # [N, input_dim]
        }
        
        intermediates = {
            'x_embed': x_embed,          # [N, embed_dim]
            'S': S,                      # [N, K]
            'Z': Z,                      # [K, latent_dim]
            'mu_super': mu_super,        # [K, latent_dim]
            'log_sigma_super': log_sigma_super,  # [K, latent_dim]
            'x_unpooled': x_unpooled,    # [N, latent_dim]
            'x_refined': x_refined,      # [N, refined_dim]
            'graph_embedding': graph_embedding  # [refined_dim * 2]
        }
        
        # 保存中间结果供loss函数使用
        self._intermediates = intermediates
        self._x_original = x
        
        return outputs, intermediates
    
    def _forward_batch(self, x, adj, batch_num_nodes=None):
        """
        批处理模式前向传播（兼容原train.py）
        
        根据task_type选择使用图级预测头或节点级预测
        """
        batch_size = x.size(0)
        graph_logits_list = []
        
        # 保存所有输出和中间结果用于损失计算
        self._batch_outputs = []
        self._batch_intermediates = []
        self._batch_x_original = []
        
        for i in range(batch_size):
            # 获取有效节点数
            if batch_num_nodes is not None:
                num_nodes = batch_num_nodes[i]
                x_i = x[i, :num_nodes, :]
                adj_i = adj[i, :num_nodes, :num_nodes]
            else:
                x_i = x[i]
                adj_i = adj[i]
            
            # 单图前向传播
            outputs, intermediates = self._forward_single(x_i, adj_i)
            
            # 根据任务类型选择预测方式
            if self.task_type == 'graph':
                # 图分类任务：直接使用图级预测头（基于Z）
                graph_logit = outputs['y_logits_graph'].unsqueeze(0)  # [1, num_classes]
            else:
                # 节点分类任务：对节点预测取平均
                y_logits_nodes = outputs['y_logits_node']  # [num_nodes, num_classes]
                graph_logit = y_logits_nodes.mean(dim=0, keepdim=True)  # [1, num_classes]
            
            graph_logits_list.append(graph_logit)
            
            # 保存完整的outputs和中间结果
            self._batch_outputs.append(outputs)
            self._batch_intermediates.append(intermediates)
            self._batch_x_original.append(x_i)
        
        # 拼接所有图的预测
        graph_logits = torch.cat(graph_logits_list, dim=0)  # [batch, num_classes]
        
        return graph_logits
    
    def loss(self, y_pred, y_true, adj=None, batch_num_nodes=None):
        """
        计算损失（兼容原train.py接口）
        
        参数:
            y_pred: Tensor [batch, num_classes] - 图级预测
            y_true: Tensor [batch] - 真实标签
            adj: 邻接矩阵（可选）
            batch_num_nodes: 节点数（可选）
        
        返回:
            total_loss: Tensor (标量) - 总损失
        """
        # 任务损失（图级分类）
        L_task = F.cross_entropy(y_pred, y_true)
        
        # 如果有保存的batch输出和中间结果，计算重构和KL损失
        if (hasattr(self, '_batch_outputs') and len(self._batch_outputs) > 0 and
            hasattr(self, '_batch_intermediates') and len(self._batch_intermediates) > 0):
            L_recon_total = 0.0
            L_KL_total = 0.0
            
            for i, (outputs, intermediates, x_orig) in enumerate(
                zip(self._batch_outputs, self._batch_intermediates, self._batch_x_original)):
                # 重构损失：使用已经计算好的x_recon
                x_recon = outputs['x_recon']
                L_recon_total += F.mse_loss(x_recon, x_orig)
                
                # KL损失
                L_KL_total += compute_vae_kl_loss(
                    intermediates['mu_super'],
                    intermediates['log_sigma_super']
                )
            
            # 平均
            L_recon = L_recon_total / len(self._batch_outputs)
            L_KL = L_KL_total / len(self._batch_intermediates)
            
            # 总损失
            total_loss = (self.lambda_task * L_task + 
                         self.lambda_recon * L_recon + 
                         self.lambda_kl * L_KL)
        else:
            # 如果没有中间结果，只用任务损失
            total_loss = L_task
        
        return total_loss
    
    def compute_loss(self, outputs, intermediates, x_original, y_true):
        """
        计算总损失（完整版，用于单图训练）
        
        参数:
            outputs: dict - forward()返回的输出
            intermediates: dict - forward()返回的中间结果
            x_original: Tensor [N, input_dim] - 原始特征
            y_true: Tensor [N] 或 标量 - 真实标签
        
        返回:
            total_loss: Tensor (标量) - 总损失
            loss_dict: dict - 各项损失的详细信息
        """
        # 损失1: 任务预测
        if y_true.dim() == 0 or self.task_type == 'graph':
            # 图级标签：使用图级预测头
            y_logits_graph = outputs['y_logits_graph']  # [num_classes]
            if y_true.dim() == 0:
                y_true_graph = y_true.unsqueeze(0)  # [1]
            else:
                y_true_graph = y_true[0].unsqueeze(0)  # 假设图内所有节点标签相同
            L_task = F.cross_entropy(y_logits_graph.unsqueeze(0), y_true_graph)
        else:
            # 节点级标签：使用节点级预测头
            L_task = F.cross_entropy(outputs['y_logits_node'], y_true)
        
        # 损失2: 特征重构
        L_recon = F.mse_loss(outputs['x_recon'], x_original)
        
        # 损失3: VAE正则化
        L_KL = compute_vae_kl_loss(
            intermediates['mu_super'], 
            intermediates['log_sigma_super']
        )
        
        # 总损失
        total_loss = (self.lambda_task * L_task + 
                     self.lambda_recon * L_recon + 
                     self.lambda_kl * L_KL)
        
        # 详细信息
        loss_dict = {
            'total': total_loss.item(),
            'task': L_task.item(),
            'recon': L_recon.item(),
            'kl': L_KL.item(),
            'weighted_task': (self.lambda_task * L_task).item(),
            'weighted_recon': (self.lambda_recon * L_recon).item(),
            'weighted_kl': (self.lambda_kl * L_KL).item()
        }
        
        return total_loss, loss_dict
    
    def predict(self, x, adj):
        """预测（评估模式）"""
        self.eval()
        with torch.no_grad():
            outputs, _ = self.forward(x, adj)
            if self.task_type == 'graph':
                # 图级预测：返回单个标签
                pred = torch.argmax(outputs['y_logits_graph'])
            else:
                # 节点级预测：返回每个节点的标签
                pred = torch.argmax(outputs['y_logits_node'], dim=1)
        return pred
    
    def get_cluster_assignments(self, x, adj):
        """获取节点的簇分配"""
        self.eval()
        with torch.no_grad():
            x_embed, S = self.encoder(x, adj)
            cluster_ids = torch.argmax(S, dim=1)
        return cluster_ids, S
    
    def __repr__(self):
        return (f'{self.__class__.__name__}(\n'
                f'  input_dim={self.input_dim},\n'
                f'  num_classes={self.num_classes},\n'
                f'  num_clusters={self.num_clusters},\n'
                f'  λ_task={self.lambda_task},\n'
                f'  λ_recon={self.lambda_recon},\n'
                f'  λ_KL={self.lambda_kl}\n'
                f')')


# ============================================================
# 测试函数
# ============================================================

def test_vae_unet_model():
    """测试完整模型"""
    print("=" * 60)
    print("测试完整的VAE-UNet模型")
    print("=" * 60)
    
    # 测试参数
    N = 100          # 节点数
    F_in = 10        # 输入特征维度
    num_classes = 5  # 分类类别数
    K = 20           # 超节点数
    
    # 创建测试数据
    x = torch.randn(N, F_in)
    adj = torch.rand(N, N)
    adj = (adj + adj.t()) / 2
    adj = (adj > 0.9).float()
    y_true = torch.randint(0, num_classes, (N,))
    
    print(f"\n输入数据:")
    print(f"  节点特征: {x.shape}")
    print(f"  邻接矩阵: {adj.shape}, 边数: {adj.sum().item()}")
    print(f"  真实标签: {y_true.shape}")
    print()
    
    # 创建模型
    model = VAEUNetModel(
        input_dim=F_in,
        num_classes=num_classes,
        embed_dim=64,
        latent_dim=32,
        refined_dim=64,
        num_clusters=K,
        hidden_dims=[128],
        appnp_K=10,
        appnp_alpha=0.1,
        dropout=0.3,
        lambda_task=1.0,
        lambda_recon=1.0,
        lambda_kl=0.001
    )
    
    print(f"模型:")
    print(model)
    print()
    print(f"总参数数: {sum(p.numel() for p in model.parameters()):,}")
    print()
    
    # 前向传播
    print("=" * 60)
    print("前向传播")
    print("=" * 60)
    
    model.train()
    outputs, intermediates = model(x, adj)
    
    print(f"\n输出:")
    for name, tensor in outputs.items():
        print(f"  {name}: {tensor.shape}")
    
    print(f"\n中间结果:")
    for name, tensor in intermediates.items():
        print(f"  {name}: {tensor.shape}")
    print()
    
    # 计算损失
    print("=" * 60)
    print("损失计算")
    print("=" * 60)
    
    total_loss, loss_dict = model.compute_loss(outputs, intermediates, x, y_true)
    
    print(f"\n总损失: {total_loss.item():.6f}")
    print(f"\n详细损失:")
    for name, value in loss_dict.items():
        print(f"  {name:20s}: {value:.6f}")
    print()
    
    # 预测
    print("=" * 60)
    print("预测")
    print("=" * 60)
    
    pred = model.predict(x, adj)
    accuracy = (pred == y_true).float().mean()
    
    print(f"\n预测标签: {pred.shape}")
    print(f"准确率: {accuracy.item():.4f}")
    print()
    
    # 簇分配
    print("=" * 60)
    print("簇分配")
    print("=" * 60)
    
    cluster_ids, S = model.get_cluster_assignments(x, adj)
    cluster_sizes = torch.bincount(cluster_ids, minlength=K)
    
    print(f"\n簇分配: {cluster_ids.shape}")
    print(f"每个簇的节点数: {cluster_sizes.tolist()}")
    print()
    
    # 梯度反向传播
    print("=" * 60)
    print("梯度反向传播")
    print("=" * 60)
    
    model.zero_grad()
    total_loss.backward()
    
    has_grad = sum(1 for p in model.parameters() if p.grad is not None and p.grad.abs().sum() > 0)
    total_params = sum(1 for p in model.parameters())
    
    print(f"\n有梯度的参数: {has_grad}/{total_params}")
    print()
    
    print("=" * 60)
    print("测试完成!")
    print("=" * 60)


if __name__ == "__main__":
    test_vae_unet_model()

