import torch
import torch.nn as nn
import torch.nn.functional as F
from einops.layers.torch import Rearrange
from einops import rearrange
import math
import numpy as np

class GradientReversalLayer(torch.autograd.Function):
    """
    Gradient Reversal Layer for Domain Adversarial Training
    """
    @staticmethod
    def forward(ctx, x, alpha):
        ctx.alpha = alpha
        return x.view_as(x)

    @staticmethod
    def backward(ctx, grad_output):
        output = grad_output.neg() * ctx.alpha
        return output, None

class FocalLoss(nn.Module):
    """
    Focal Loss for handling class imbalance
    """
    def __init__(self, alpha=0.25, gamma=2.0, reduction='mean'):
        super(FocalLoss, self).__init__()
        self.alpha = alpha
        self.gamma = gamma
        self.reduction = reduction
        
    def forward(self, inputs, targets):
        BCE_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction='none')
        pt = torch.exp(-BCE_loss)
        F_loss = self.alpha * (1-pt)**self.gamma * BCE_loss
        
        if self.reduction == 'mean':
            return torch.mean(F_loss)
        elif self.reduction == 'sum':
            return torch.sum(F_loss)
        else:
            return F_loss

class TemporalBlock(nn.Module):
    """
    Temporal convolution block for processing EEG signal time series
    """
    def __init__(self, in_channels, out_channels, kernel_size=3, dilation=1, dropout=0.2):
        super(TemporalBlock, self).__init__()
        
        # 计算正确的padding以确保输入和输出长度一致
        # 使用same padding保证输出大小与输入相同
        padding = (kernel_size - 1) * dilation // 2
        
        # 使用更多filter增强特征提取能力
        inner_channels = out_channels * 2
        
        self.conv1 = nn.Conv1d(
            in_channels, inner_channels, kernel_size, 
            padding=padding, dilation=dilation
        )
        self.conv2 = nn.Conv1d(
            inner_channels, out_channels, kernel_size, 
            padding=padding, dilation=dilation
        )
        
        self.norm1 = nn.BatchNorm1d(inner_channels)
        self.norm2 = nn.BatchNorm1d(out_channels)
        
        self.relu = nn.GELU()  # Changed from ELU to GELU for better gradient flow
        self.dropout = nn.Dropout(dropout)
        
        # Residual connection if dimensions don't match
        self.residual = nn.Conv1d(in_channels, out_channels, 1) if in_channels != out_channels else nn.Identity()
        
        # Channel attention module - 增强通道注意力
        self.channel_attention = nn.Sequential(
            nn.AdaptiveAvgPool1d(1),
            nn.Conv1d(out_channels, out_channels // 4, 1),
            nn.GELU(),
            nn.Conv1d(out_channels // 4, out_channels, 1),
            nn.Sigmoid()
        )
        
        # 添加空间注意力模块 - 增强空间信息整合
        self.spatial_attention = nn.Sequential(
            nn.Conv1d(2, 1, kernel_size=7, padding=3),
            nn.Sigmoid()
        )
        
    def forward(self, x):
        # x shape: [batch, channels, time]
        residual = self.residual(x)
        
        # First conv block
        x = self.conv1(x)
        x = self.norm1(x)
        x = self.relu(x)
        x = self.dropout(x)
        
        # Second conv block
        x = self.conv2(x)
        x = self.norm2(x)
        
        # Apply channel attention
        att = self.channel_attention(x)
        x = x * att
        
        # 应用空间注意力
        avg_out = torch.mean(x, dim=1, keepdim=True)
        max_out, _ = torch.max(x, dim=1, keepdim=True)
        spatial = torch.cat([avg_out, max_out], dim=1)
        spatial = self.spatial_attention(spatial)
        x = x * spatial
        
        # 确保尺寸匹配，如果不匹配则进行调整
        if x.size(2) != residual.size(2):
            # 使用插值调整尺寸
            x = F.interpolate(x, size=residual.size(2), mode='linear', align_corners=False)
        
        # 残差连接
        x = self.relu(x + residual)
        x = self.dropout(x)
        
        return x


class SpatialAttention(nn.Module):
    """
    Self-attention module for spatial relationships between EEG electrodes
    """
    def __init__(self, channels, reduction=8, heads=4):
        super(SpatialAttention, self).__init__()
        self.heads = heads
        self.query = nn.Conv1d(channels, channels, 1, groups=heads)
        self.key = nn.Conv1d(channels, channels, 1, groups=heads)
        self.value = nn.Conv1d(channels, channels, 1, groups=heads)
        self.gamma = nn.Parameter(torch.zeros(1))  # Learnable scaling parameter
        self.scale = (channels // heads) ** -0.5  # Scaling factor for attention
        
    def forward(self, x):
        # x shape: [batch, channels, time]
        batch_size, channels, length = x.size()
        
        # Split channels into heads
        q = self.query(x).view(batch_size, self.heads, channels // self.heads, length)
        k = self.key(x).view(batch_size, self.heads, channels // self.heads, length)
        v = self.value(x).view(batch_size, self.heads, channels // self.heads, length)
        
        # Transpose for attention calculation
        q = q.permute(0, 1, 3, 2)  # [batch, heads, length, channels//heads]
        
        # Compute attention map with improved numerical stability
        attention = torch.matmul(q, k) * self.scale  # [batch, heads, length, length]
        attention = F.softmax(attention, dim=-1)
        
        # Apply attention to value
        out = torch.matmul(attention, v.permute(0, 1, 3, 2))  # [batch, heads, length, channels//heads]
        out = out.permute(0, 1, 3, 2).contiguous().view(batch_size, channels, length)
        
        # Residual connection with learnable scaling
        return self.gamma * out + x


class GraphConvBlock(nn.Module):
    """
    Graph convolutional block for modeling electrode relationships
    """
    def __init__(self, in_features, out_features, dropout=0.2):
        super(GraphConvBlock, self).__init__()
        self.weight = nn.Parameter(torch.Tensor(in_features, out_features))
        self.bias = nn.Parameter(torch.Tensor(out_features))
        self.dropout = nn.Dropout(dropout)
        self.reset_parameters()
        
        # 添加电极数目与时间维度的映射，将时间维度特征转换为电极维度特征
        self.time_to_electrode_proj = nn.Linear(in_features, out_features)
        
        # 添加自注意力机制增强图卷积
        self.q_linear = nn.Linear(in_features, in_features)
        self.k_linear = nn.Linear(in_features, in_features)
        self.v_linear = nn.Linear(in_features, in_features)
        
        # 添加门控机制，控制特征传递
        self.gate = nn.Sequential(
            nn.Linear(in_features * 2, in_features),
            nn.Sigmoid()
        )
        
    def reset_parameters(self):
        nn.init.kaiming_uniform_(self.weight)
        fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)
        bound = 1 / math.sqrt(fan_in)
        nn.init.uniform_(self.bias, -bound, bound)
    
    def forward(self, x, adj):
        """
        x: node features [batch, nodes, features]
        adj: adjacency matrix [batch, num_electrodes, num_electrodes]
        """
        batch_size, num_nodes, in_features = x.size()
        num_electrodes = adj.size(1)
        
        # 判断输入是时间维度还是电极维度
        if num_nodes != num_electrodes:
            # 如果节点数量不等于电极数量，需要转换
            # 将时间维度特征投影到电极维度空间
            # 首先通过自适应池化调整节点数量
            x_t = x.transpose(1, 2)  # [batch, features, time_nodes]
            pooled_features = F.adaptive_avg_pool1d(x_t, num_electrodes)
            x = pooled_features.transpose(1, 2)  # [batch, electrodes, features]
        
        # 自注意力机制
        q = self.q_linear(x)  # [batch, nodes, features]
        k = self.k_linear(x)  # [batch, nodes, features]
        v = self.v_linear(x)  # [batch, nodes, features]
        
        # 计算注意力权重
        attn = torch.bmm(q, k.transpose(1, 2)) / math.sqrt(in_features)  # [batch, nodes, nodes]
        attn = F.softmax(attn, dim=2)
        
        # 将注意力权重与邻接矩阵结合
        # 融合图结构信息与自注意力
        attn = attn * adj  # 按元素乘法
        attn = F.normalize(attn, p=1, dim=2)  # 归一化
        
        # 应用注意力
        attn_output = torch.bmm(attn, v)  # [batch, nodes, features]
        
        # 门控机制 - 决定使用多少注意力输出与原始特征
        gate_input = torch.cat([x, attn_output], dim=2)
        gate_weight = self.gate(gate_input)
        gated_output = gate_weight * attn_output + (1 - gate_weight) * x
        
        # 应用图卷积
        support = torch.matmul(gated_output, self.weight)  # [batch, nodes, out_features]
        
        # 归一化邻接矩阵
        # 添加自环
        identity = torch.eye(num_electrodes, dtype=torch.float, 
                            device=adj.device).unsqueeze(0).expand(batch_size, -1, -1)
        adj_with_self = adj + identity
        
        # 计算度矩阵的逆平方根
        rowsum = adj_with_self.sum(dim=-1, keepdim=True)
        d_inv_sqrt = torch.pow(rowsum, -0.5)
        d_inv_sqrt[torch.isinf(d_inv_sqrt)] = 0.0
        d_mat_inv_sqrt = torch.diag_embed(d_inv_sqrt.squeeze(-1))
        
        # 归一化邻接矩阵: D^(-1/2) * A * D^(-1/2)
        adj_normalized = torch.bmm(torch.bmm(d_mat_inv_sqrt, adj_with_self), d_mat_inv_sqrt)
        
        # 图卷积操作
        output = torch.bmm(adj_normalized, support)  # [batch, nodes, out_features]
        
        # 添加偏置并应用激活函数
        output = output + self.bias
        output = F.gelu(output)
        output = self.dropout(output)
        
        return output


class ElectrodePositionalEncoding(nn.Module):
    """
    Learnable positional encoding for electrodes
    """
    def __init__(self, num_electrodes, embedding_dim):
        super(ElectrodePositionalEncoding, self).__init__()
        self.electrode_embedding = nn.Parameter(torch.randn(1, num_electrodes, embedding_dim))
        
    def forward(self, x):
        # x: [batch, electrodes, features]
        return x + self.electrode_embedding


class ClassBalancedLoss(nn.Module):
    """
    Class-balanced loss for handling imbalanced datasets
    """
    def __init__(self, beta=0.9999, samples_per_class=None):
        super(ClassBalancedLoss, self).__init__()
        self.beta = beta
        self.samples_per_class = samples_per_class
        
    def forward(self, logits, targets):
        # 计算类平衡权重
        if self.samples_per_class is not None:
            weights = (1 - self.beta) / (1 - torch.pow(self.beta, self.samples_per_class))
            weights = weights / weights.sum() * len(self.samples_per_class)
            
            # 将权重应用到每个样本
            sample_weights = torch.ones_like(targets, dtype=torch.float)
            for i, w in enumerate(weights):
                sample_weights[targets == i] = w
                
            # 计算带权重的交叉熵损失
            loss = F.cross_entropy(logits, targets, reduction='none')
            return (sample_weights * loss).mean()
        else:
            # 如果没有样本计数信息，则使用普通的交叉熵
            return F.cross_entropy(logits, targets)


class DomainDiscriminator(nn.Module):
    """
    Domain discriminator for adversarial domain adaptation
    """
    def __init__(self, feature_dim, hidden_dim=64):
        super(DomainDiscriminator, self).__init__()
        self.layer1 = nn.Linear(feature_dim, hidden_dim)
        self.layer2 = nn.Linear(hidden_dim, hidden_dim // 2)
        self.layer3 = nn.Linear(hidden_dim // 2, 1)
        
    def forward(self, x, alpha=1.0):
        # Apply gradient reversal
        x = GradientReversalLayer.apply(x, alpha)
        x = F.relu(self.layer1(x))
        x = F.relu(self.layer2(x))
        x = self.layer3(x)
        return x


class EEGCNT(nn.Module):
    """
    改进版 Graph Convolutional Network with Temporal modeling for EEG depression recognition
    专为128个电极和长序列EEG数据设计
    """
    def __init__(self,
                 input_channels=1,
                 num_electrodes=128,  # 设置为128个电极
                 num_time_points=1000,  # 窗口大小
                 output_dim=2,  # 输出维度
                 hidden_channels=128,
                 num_temporal_layers=2,
                 num_graph_layers=2,
                 dropout=0.4,
                 weight_decay=1e-4,
                 spectral_norm=True,
                 graph_type='distance',  # 使用基于电极距离的图结构
                 use_domain_adaptation=False,  # 关闭域适应（对静息态数据可能不适用）
                 consistency_weight=0.3,  # 添加回一致性权重参数以兼容训练代码
                 device=None):
        super(EEGCNT, self).__init__()
        self.device = device or torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.num_electrodes = num_electrodes
        self.num_time_points = num_time_points
        self.num_classes = output_dim
        self.use_domain_adaptation = use_domain_adaptation
        self.weight_decay = weight_decay
        self.consistency_weight = consistency_weight  # 存储参数但不使用
        
        # ----- 特征提取网络 -----
        # 增加initial_filters以增强初始特征提取能力
        initial_filters = 32
        
        # 分离空间和时间处理
        self.spatial_filters = nn.Sequential(
            nn.Conv2d(input_channels, initial_filters, kernel_size=(num_electrodes, 1), stride=1, bias=False),
            nn.BatchNorm2d(initial_filters),
            nn.ELU(),
            nn.Dropout(dropout * 0.5)
        )
        
        # 改进时间卷积：使用更大的卷积核和更深的层次
        self.temporal_conv = nn.Sequential(
            nn.Conv2d(1, 16, kernel_size=(1, 51), stride=(1, 2), padding=(0, 25), bias=False),
            nn.BatchNorm2d(16),
            nn.ELU(),
            nn.AvgPool2d((1, 4)),
            nn.Dropout(dropout * 0.5),
            
            nn.Conv2d(16, initial_filters, kernel_size=(1, 21), stride=1, padding=(0, 10), bias=False),
            nn.BatchNorm2d(initial_filters),
            nn.ELU(),
            nn.AvgPool2d((1, 2)),
            nn.Dropout(dropout)
        )
        
        # 计算卷积后的时间维度长度
        self.feature_dim = self._calculate_feature_dim(num_time_points)
        
        # ----- 图卷积处理 -----
        self.graph_type = graph_type
        if graph_type == 'distance':
            # 创建基于电极距离的图邻接矩阵
            adj = self._create_distance_based_graph(num_electrodes)
            self.register_buffer('adjacency', adj)
        elif graph_type == 'learned':
            # 可学习的邻接矩阵，但带有电极位置先验
            self.adjacency = nn.Parameter(torch.zeros(num_electrodes, num_electrodes))
            with torch.no_grad():
                # 初始化邻接矩阵，保留近邻关系
                for i in range(num_electrodes):
                    for j in range(num_electrodes):
                        if i == j:
                            self.adjacency[i, j] = 0.0  # 不使用自环
                        else:
                            # 基于电极间距离的初始化
                            dist = abs(i - j) / num_electrodes
                            if dist < 0.2:  # 增加连接范围
                                self.adjacency[i, j] = 1.0 - dist * 3
                            else:
                                self.adjacency[i, j] = 0.0
        
        # ----- 图卷积网络 -----
        # 增大 hidden_channels 以提高模型容量，128*2
        gcn_hidden = hidden_channels * 2
        
        self.graph_layers = nn.ModuleList()
        first_gcn = GraphConvBlock(initial_filters, gcn_hidden, dropout=dropout)
        self.graph_layers.append(first_gcn)
        
        for _ in range(num_graph_layers - 1):
            self.graph_layers.append(
                GraphConvBlock(gcn_hidden, gcn_hidden, dropout=dropout)
            )
        
        # ----- 改进的时间卷积网络 -----
        self.temporal_blocks = nn.ModuleList()
        for i in range(num_temporal_layers):
            dilation = 2 ** i  # 扩大感受野
            self.temporal_blocks.append(
                TemporalBlock(
                    gcn_hidden, gcn_hidden,
                    kernel_size=5,  # 使用更大的卷积核
                    dilation=dilation,
                    dropout=dropout
                )
            )
        
        # 残差连接
        self.temporal_residual = nn.Conv1d(initial_filters, gcn_hidden, 1)
        
        # ----- 特征融合 -----
        self.attention_pooling = nn.Sequential(
            nn.Linear(gcn_hidden, gcn_hidden // 4),
            nn.Tanh(),  # 使用Tanh代替ReLU获得更平滑的注意力权重
            nn.Linear(gcn_hidden // 4, 1),
            nn.Softmax(dim=1)
        )
        
        # ----- 分类网络 -----
        # 使用更复杂的分类头
        self.classifier = nn.Sequential(
            nn.Linear(gcn_hidden, gcn_hidden),
            nn.LayerNorm(gcn_hidden),
            nn.ELU(),
            nn.Dropout(dropout),
            nn.Linear(gcn_hidden, gcn_hidden // 2),
            nn.LayerNorm(gcn_hidden // 2),
            nn.ELU(),
            nn.Dropout(dropout * 0.5),
            nn.Linear(gcn_hidden // 2, output_dim)
        )
        
        # 初始化权重
        self.apply(self._init_weights)
    
    def _calculate_feature_dim(self, time_points):
        # 计算经过卷积和池化后的特征维度
        # 第一层: 卷积(kernel=51, stride=2, padding=25) + 池化(kernel=4)
        t1 = (time_points + 2*25 - 51) // 2 + 1
        t1 = t1 // 4
        
        # 第二层: 卷积(kernel=21, padding=10) + 池化(kernel=2)
        t2 = (t1 + 2*10 - 21) + 1
        t2 = t2 // 2
        
        return t2 * 32  # 32是第二个卷积层的通道数
    
    def _create_distance_based_graph(self, num_electrodes):
        """
        创建基于电极距离的图邻接矩阵
        """
        # 改进版：基于电极索引距离和额外的拓扑知识创建邻接矩阵
        # 在实际应用中应该使用真实的电极3D坐标
        adj = torch.zeros(num_electrodes, num_electrodes)
        
        # 定义大脑区域的聚类，模拟10-20系统的电极分组
        # 这是一个简化版，真实应用中应该使用实际的电极位置
        num_regions = min(8, num_electrodes // 16)  # 将电极分为8个区域
        region_size = num_electrodes // num_regions
        
        for i in range(num_electrodes):
            for j in range(num_electrodes):
                # 计算电极间的距离
                dist = abs(i - j)
                
                # 确定电极所属区域
                region_i = i // region_size
                region_j = j // region_size
                
                # 同一区域内的电极有更强的连接
                is_same_region = region_i == region_j
                
                # 相邻区域的电极有中等强度的连接
                is_neighbor_region = abs(region_i - region_j) == 1
                
                # 应用高斯核函数计算相似度
                if i != j:  # 不包括自环
                    # 根据距离和区域关系设置连接强度
                    if is_same_region:
                        # 同区域：距离越近，连接越强
                        if dist <= 3:  # 近邻强连接
                            adj[i, j] = math.exp(-(dist ** 2) / (2 * 2 ** 2))
                        elif dist <= 8:  # 中等距离有中等连接
                            adj[i, j] = 0.5 * math.exp(-(dist ** 2) / (2 * 5 ** 2))
                    elif is_neighbor_region:
                        # 相邻区域：只有较近的电极有连接
                        if dist <= 5:
                            adj[i, j] = 0.3 * math.exp(-(dist ** 2) / (2 * 4 ** 2))
                    else:
                        # 远距离区域：只有特定位置有弱连接，模拟长距离功能连接
                        if dist <= 10 and (i % 4 == 0 or j % 4 == 0):  # 稀疏长距离连接
                            adj[i, j] = 0.1 * math.exp(-(dist ** 2) / (2 * 8 ** 2))
        
        # 确保对称性
        adj = 0.5 * (adj + adj.t())
        
        # 归一化邻接矩阵
        d_inv_sqrt = torch.pow(adj.sum(dim=1) + 1e-10, -0.5)
        d_mat_inv_sqrt = torch.diag(d_inv_sqrt)
        adj_normalized = torch.mm(torch.mm(d_mat_inv_sqrt, adj), d_mat_inv_sqrt)
        
        return adj_normalized
    
    def _init_weights(self, m):
        if isinstance(m, nn.Linear):
            nn.init.xavier_uniform_(m.weight)
            if m.bias is not None:
                nn.init.zeros_(m.bias)
        elif isinstance(m, nn.Conv2d):
            nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            if m.bias is not None:
                nn.init.zeros_(m.bias)
        elif isinstance(m, (nn.BatchNorm2d, nn.LayerNorm)):
            nn.init.ones_(m.weight)
            nn.init.zeros_(m.bias)
    
    def get_adjacency(self, batch_size):
        """获取邻接矩阵"""
        if self.graph_type == 'learned':
            # 处理可学习邻接矩阵
            adj = torch.sigmoid(self.adjacency)
            
            # 确保稀疏性
            threshold = 0.1
            adj = torch.where(adj < threshold, torch.zeros_like(adj), adj)
            
            # 对称化
            adj = 0.5 * (adj + adj.transpose(0, 1))
            
            # 添加自环并归一化
            adj = adj + torch.eye(self.num_electrodes, device=adj.device)
            d_inv_sqrt = torch.pow(adj.sum(dim=1) + 1e-10, -0.5)
            d_mat_inv_sqrt = torch.diag(d_inv_sqrt)
            adj_normalized = torch.mm(torch.mm(d_mat_inv_sqrt, adj), d_mat_inv_sqrt)
            return adj_normalized.unsqueeze(0).expand(batch_size, -1, -1)
        else:
            # 使用预定义的邻接矩阵
            return self.adjacency.unsqueeze(0).expand(batch_size, -1, -1)
    
    def forward(self, x, domain_labels=None, return_features=False, apply_activation=True, training=True):
        # x shape: [batch, channels=1, electrodes, time]
        batch_size = x.size(0)
        
        # 1. 空间特征提取（将电极维度融合成特征维度）
        spatial_features = self.spatial_filters(x)  # [batch, 32, 1, time]
        
        # 2. 重塑并进行时间特征提取
        temporal_input = spatial_features.transpose(1, 2)  # [batch, 1, 32, time]
        temporal_features = self.temporal_conv(temporal_input)  # [batch, 32, 1, time//8]
        
        # 确保是3D tensor [batch, channels, time]
        if temporal_features.dim() == 4:
            # 处理[batch, 32, 32, 62]的情况
            b, c, h, w = temporal_features.size()
            # 将后两维展平为一维，保持通道数不变
            # 使用view而不是reshape以确保内存连续性
            temporal_features = temporal_features.view(b, c, h*w)
        
        # 创建残差连接
        residual = self.temporal_residual(temporal_features)  # [batch, hidden, time//8]
        
        # 3. 将特征转化为图表示
        # 首先获取邻接矩阵
        adj = self.get_adjacency(batch_size)
        
        # 调整形状以匹配图卷积输入要求 [batch, channels, time] -> [batch, time, channels]
        graph_input = temporal_features.transpose(1, 2)  # [batch, time//8, 32]
        
        # 4. 应用图卷积层
        graph_features = graph_input
        for i, layer in enumerate(self.graph_layers):
            if i == 0:
                # 第一层需要将特征维度从32转换为hidden_channels
                graph_features = layer(graph_features, adj)
            else:
                graph_features = layer(graph_features, adj)
        
        # 5. 时间卷积处理
        # 转回[batch, channels, time]格式
        tcn_input = graph_features.transpose(1, 2)  # [batch, hidden, time//8]
        tcn_output = tcn_input
        
        for block in self.temporal_blocks:
            tcn_output = block(tcn_output)
        
        # 添加残差连接 - 确保尺寸匹配
        if tcn_output.size() != residual.size():
            # 如果尺寸不匹配，通过插值调整
            residual = F.interpolate(residual, size=tcn_output.size(2), mode='linear', align_corners=False)
            # 如果通道数不匹配，通过1x1卷积调整
            if residual.size(1) != tcn_output.size(1):
                channel_adjust = nn.Conv1d(residual.size(1), tcn_output.size(1), kernel_size=1).to(tcn_output.device)
                residual = channel_adjust(residual)
        
        tcn_output = tcn_output + residual
        
        # 6. 注意力池化
        # 将特征转回[batch, time, channels]
        attention_input = tcn_output.transpose(1, 2)  # [batch, time//8, hidden]
        
        # 应用注意力权重
        attention_weights = self.attention_pooling(attention_input)  # [batch, time//8, 1]
        features = torch.sum(attention_input * attention_weights, dim=1)  # [batch, hidden]
        
        # 添加批归一化以稳定训练
        bn = nn.BatchNorm1d(features.size(1)).to(features.device)
        features = bn(features)
        
        # 返回特征如果需要
        if return_features:
            return features
        
        # 7. 分类
        logits = self.classifier(features)
        
        # 返回形式与训练代码期望匹配
        if not apply_activation:
            return logits
        
        # 应用激活函数
        if self.num_classes == 1 or (self.num_classes == 2 and logits.shape[1] == 1):
            probs = torch.sigmoid(logits)
        else:
            probs = F.softmax(logits, dim=1)
        
        return probs