import torch
from torch import nn
import torch.nn.functional as F
from typing import Literal, Tuple, Optional
import math

class CNNNet(nn.Module):
    def __init__(self):
        super(CNNNet, self).__init__()
        self.conv1 = nn.Conv2d(1, 16, 3)
        self.bn1 = nn.BatchNorm2d(16)       # 标准化
        self.conv2 = nn.Conv2d(16, 32, 3)
        self.bn2 = nn.BatchNorm2d(32)       # 标准化
        self.conv3 = nn.Conv2d(32, 64, 3)
        self.bn3 = nn.BatchNorm2d(64)       # 标准化
        self.pool = nn.MaxPool2d(2, 2)
        self.drop = nn.Dropout(0.5)         # 处理过拟合
        self.liner_1 = nn.Linear(64*14*14, 1024)
        self.bn_l1 = nn.BatchNorm1d(1024)  # 标准化
        self.liner_2 = nn.Linear(1024, 256)
        self.bn_l2 = nn.BatchNorm1d(256)  # 标准化
        self.liner_3 = nn.Linear(256, 5)

    def forward(self, input):
        x = F.relu(self.conv1(input))
        x = self.pool(x)
        x = self.bn1(x)

        x = F.relu(self.conv2(x))
        x = self.pool(x)
        x = self.bn2(x)

        x = F.relu(self.conv3(x))
        x = self.pool(x)
        x = self.bn3(x)
        # print(x.size())
        x = x.view(-1, x.size(1)*x.size(2)*x.size(3))
        x = F.relu(self.liner_1(x))
        x = self.bn_l1(x)
        x = self.drop(x)

        x = F.relu(self.liner_2(x))
        x = self.bn_l2(x)
        x = self.drop(x)

        x = self.liner_3(x)
        return x


# 嵌入网络不变
class ProtoNetEmbedding(nn.Module):
    def __init__(self, in_channels=3, hidden_size=64):
        super(ProtoNetEmbedding, self).__init__()
        self.encoder = nn.Sequential(
            nn.Conv2d(in_channels, hidden_size, 3, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(2),

            nn.Conv2d(hidden_size, hidden_size, 3, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(2),

            nn.Conv2d(hidden_size, hidden_size, 3, padding=1),
            nn.ReLU(),
            nn.AdaptiveAvgPool2d(1)  # (B, hidden_size, 1, 1)
        )

    def forward(self, x):
        x = self.encoder(x)
        return x.view(x.size(0), -1)  # (B, D)


class ProtoNetClassifier(nn.Module):
    def __init__(self, in_channels=3, hidden_size=64, num_classes=4):
        super(ProtoNetClassifier, self).__init__()
        self.embedding = ProtoNetEmbedding(in_channels, hidden_size)
        self.classifier = nn.Linear(hidden_size, num_classes)

    def forward(self, x):
        features = self.embedding(x)  # (B, D)
        logits = self.classifier(features)  # (B, num_classes)
        return logits


class DN4Net(nn.Module):
    def __init__(self, in_channels=3, num_classes=4):  # 改成 3
        super().__init__()
        self.encoder = nn.Sequential(
            self._block(in_channels, 64),
            self._block(64, 64),
            self._block(64, 64),
            self._block(64, 64),
        )
        self.classifier = nn.Linear(4096, num_classes)

    def _block(self, in_channels, out_channels):
        return nn.Sequential(
            nn.Conv2d(in_channels, out_channels, 3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(),
            nn.MaxPool2d(2)
        )

    def forward(self, x):
        x = self.encoder(x)
        x = x.view(x.size(0), -1)
        # print(x.shape)
        return self.classifier(x)


class GNNLayer(nn.Module):
    def __init__(self, in_dim, out_dim):
        super().__init__()
        self.fc = nn.Linear(in_dim * 2, out_dim)

    def forward(self, x, adj):
        B = x.size(0)
        h_out = []
        for i in range(B):
            neighbors = []
            for j in range(B):
                if adj[i, j] > 0:
                    h_ij = torch.cat([x[i], x[j]], dim=-1)
                    neighbors.append(self.fc(h_ij))
            h_i = torch.stack(neighbors, dim=0).mean(dim=0)
            h_out.append(F.relu(h_i))
        return torch.stack(h_out, dim=0)


class GNNImageClassifier(nn.Module):
    def __init__(self, in_channels=1, num_classes=4):
        super().__init__()
        self.encoder = nn.Sequential(
            self._block(in_channels, 32),
            self._block(32, 64),
            self._block(64, 64),
            nn.AdaptiveAvgPool2d((1, 1))  # 降维到 [B, 64, 1, 1]
        )
        self.gnn_layer1 = GNNLayer(64, 64)
        self.classifier = nn.Linear(64, num_classes)

    def _block(self, in_channels, out_channels):
        return nn.Sequential(
            nn.Conv2d(in_channels, out_channels, 3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(),
            nn.MaxPool2d(2)
        )

    def forward(self, x):
        x = self.encoder(x)  # shape: [B, 64, 1, 1]
        x = x.view(x.size(0), -1)  # shape: [B, 64]
        adj = self._build_adj(x)
        x = self.gnn_layer1(x, adj)
        return self.classifier(x)

    def _build_adj(self, x):
        B = x.size(0)
        adj = torch.eye(B, device=x.device)
        return adj + (1 - torch.eye(B, device=x.device)) * 0.1

    # 新增的 embedding 方法
    def embedding(self, x):
        x = self.encoder(x)  # [B, 64, 1, 1]
        x = x.view(x.size(0), -1)  # [B, 64]
        return x


class DepthwiseSeparableConv(nn.Module):
    """ 深度可分离卷积模块"""
    def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1):
        super().__init__()
        # 深度卷积
        self.depthwise = nn.Conv2d(
            in_channels, in_channels, kernel_size,
            stride=stride, padding=padding, groups=in_channels
        )
        self.bn1 = nn.BatchNorm2d(in_channels)

        # 逐点卷积
        self.pointwise = nn.Conv2d(
            in_channels, out_channels, kernel_size=1,
            stride=1, padding=0
        )
        self.bn2 = nn.BatchNorm2d(out_channels)

    def forward(self, x):
        x = self.depthwise(x)
        x = self.bn1(x)
        x = F.relu(x)

        x = self.pointwise(x)
        x = self.bn2(x)
        return F.relu(x)

class ResidualBlock(nn.Module):
    """ 改进的残差块（论文1.2.1节） """
    def __init__(self, in_channels, out_channels, stride=1):
        super().__init__()
        # 主干路径：两个3x3卷积
        self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3,
                              stride=stride, padding=1, bias=False)
        self.bn1 = nn.BatchNorm2d(out_channels)

        self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3,
                               stride=1, padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(out_channels)

        # 深度可分离卷积
        self.ds_conv = DepthwiseSeparableConv(out_channels, out_channels)

        # 快捷路径
        self.shortcut = nn.Sequential()
        if stride != 1 or in_channels != out_channels:
            self.shortcut = nn.Sequential(
                nn.Conv2d(in_channels, out_channels, kernel_size=1,
                          stride=stride, bias=False),
                nn.BatchNorm2d(out_channels)
            )

    def forward(self, x):
        identity = self.shortcut(x)

        # 主干路径
        out = self.conv1(x)
        out = self.bn1(out)
        out = F.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)
        out = F.relu(out)

        # 深度可分离卷积
        out = self.ds_conv(out)

        # 残差连接
        out = out + identity
        return F.relu(out, inplace=False)

class CBAM(nn.Module):
    '''添加CBAM注意力机制'''
    def __init__(self, channels, reduction=16, kernel_size=7):
        super(CBAM, self).__init__()
        # 平均池化和最大池化提取通道级全局信息,结果形状为 [B, C, 1, 1]
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.max_pool = nn.AdaptiveMaxPool2d(1)

        self.fc = nn.Sequential(
            nn.Conv2d(channels, channels // reduction, 1, bias=False),
            nn.ReLU(),
            nn.Conv2d(channels // reduction, channels, 1, bias=False)
        )
        self.sigmoid_channel = nn.Sigmoid()     # 将注意力值压缩到 [0, 1]

        # Spatial Attention
        self.conv_spatial = nn.Conv2d(2, 1, kernel_size, padding=kernel_size // 2, bias=False)
        self.sigmoid_spatial = nn.Sigmoid()

    def forward(self, x):
        # Channel attention
        avg_out = self.fc(self.avg_pool(x))
        max_out = self.fc(self.max_pool(x))
        channel_att = self.sigmoid_channel(avg_out + max_out)
        x = x * channel_att

        # Spatial attention
        avg_out = torch.mean(x, dim=1, keepdim=True)
        max_out, _ = torch.max(x, dim=1, keepdim=True)
        spatial = torch.cat([avg_out, max_out], dim=1)
        spatial_att = self.sigmoid_spatial(self.conv_spatial(spatial))
        x = x * spatial_att

        return x

class P2D_GE_Net(nn.Module):
    """ 完整的 P2D_GE_Net 模型 """
    def __init__(self, num_classes=4):
        super().__init__()
        # ======== GADF 分支 ========
        # 初始卷积层
        self.gadf_conv1 = nn.Conv2d(1, 16, kernel_size=3, padding=1, bias=False)
        self.gadf_bn1 = nn.BatchNorm2d(16)

        self.gadf_conv2 = nn.Conv2d(16, 16, kernel_size=3, padding=1, bias=False)
        self.gadf_bn2 = nn.BatchNorm2d(16)

        # 池化层
        self.gadf_pool1 = nn.MaxPool2d(kernel_size=5, stride=2, padding=2)

        # 残差块
        self.gadf_res1 = ResidualBlock(16, 32, stride=1)

        # 下采样
        self.gadf_pool2 = nn.MaxPool2d(kernel_size=2, stride=2)

        # ======== CWT 分支 ========
        # 初始卷积层
        self.cwt_conv1 = nn.Conv2d(1, 16, kernel_size=3, padding=1, bias=False)
        self.cwt_bn1 = nn.BatchNorm2d(16)

        self.cwt_conv2 = nn.Conv2d(16, 16, kernel_size=3, padding=1, bias=False)
        self.cwt_bn2 = nn.BatchNorm2d(16)

        # 池化层
        self.cwt_pool1 = nn.MaxPool2d(kernel_size=5, stride=2, padding=2)

        # 残差块
        self.cwt_res1 = ResidualBlock(16, 32, stride=1)

        # 下采样
        self.cwt_pool2 = nn.MaxPool2d(kernel_size=2, stride=2)

        # ======== 融合与分类 ========
        # 注意力机制模块（CBAM）
        self.attention = CBAM(32)

        # 全局平均池化 + 全连接层
        self.global_pool = nn.AdaptiveAvgPool2d(1)
        self.fc1 = nn.Linear(32, 16)
        self.fc2 = nn.Linear(16, num_classes)

    def forward(self, gadf_input, cwt_input):
        # -------- GADF 分支前向传播 --------
        gadf = F.relu(self.gadf_bn1(self.gadf_conv1(gadf_input)))
        gadf = F.relu(self.gadf_bn2(self.gadf_conv2(gadf)))
        gadf = self.gadf_pool1(gadf)

        gadf = self.gadf_res1(gadf)
        gadf = self.gadf_pool2(gadf)

        # -------- CWT 分支前向传播 --------
        cwt = F.relu(self.cwt_bn1(self.cwt_conv1(cwt_input)))
        cwt = F.relu(self.cwt_bn2(self.cwt_conv2(cwt)))
        cwt = self.cwt_pool1(cwt)

        cwt = self.cwt_res1(cwt)
        cwt = self.cwt_pool2(cwt)

        # -------- 特征融合（ADD 方式） --------
        fused = gadf + cwt
        # 添加注意力机制模块
        fused = self.attention(fused)

        # -------- 分类头 --------
        out = self.global_pool(fused)
        out = torch.flatten(out, 1)
        out = F.relu(self.fc1(out))
        out = self.fc2(out)

        return out



class DSResNet_GADF(nn.Module):
    """ GADF的P2D-DSResNet模型 """
    def __init__(self, num_classes=4):
        super().__init__()
        # ======== GADF分支 ========
        # 初始卷积层
        self.gadf_conv1 = nn.Conv2d(1, 16, kernel_size=3, padding=1, bias=False)
        self.gadf_bn1 = nn.BatchNorm2d(16)

        self.gadf_conv2 = nn.Conv2d(16, 16, kernel_size=3, padding=1, bias=False)
        self.gadf_bn2 = nn.BatchNorm2d(16)

        # 池化层
        self.gadf_pool1 = nn.MaxPool2d(kernel_size=5, stride=2, padding=2)

        # 残差块
        self.gadf_res1 = ResidualBlock(16, 32, stride=1)
        self.gadf_res2 = ResidualBlock(32, 64, stride=1)

        # 下采样
        self.gadf_pool2 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.fc1 = nn.Linear(65536, 64)
        self.fc2 = nn.Linear(64, num_classes)

    def forward(self, gadf_input):
        # GADF分支前向传播
        gadf = F.relu(self.gadf_bn1(self.gadf_conv1(gadf_input)))
        gadf = F.relu(self.gadf_bn2(self.gadf_conv2(gadf)))
        gadf = self.gadf_pool1(gadf)

        gadf = self.gadf_res1(gadf)
        gadf = self.gadf_res2(gadf)
        gadf = self.gadf_pool2(gadf)

        out = torch.flatten(gadf, 1)
        # print(out.shape)
        out = F.relu(self.fc1(out))
        out = self.fc2(out)

        return out


class P2D_DSResNet_CWT(nn.Module):
    """ CWT的P2D-DSResNet模型 """
    def __init__(self, num_classes=5):
        super().__init__()
        # ======== CWT分支 ========
        # 初始卷积层
        self.cwt_conv1 = nn.Conv2d(1, 16, kernel_size=3, padding=1, bias=False)
        self.cwt_bn1 = nn.BatchNorm2d(16)

        self.cwt_conv2 = nn.Conv2d(16, 16, kernel_size=3, padding=1, bias=False)
        self.cwt_bn2 = nn.BatchNorm2d(16)

        # 池化层
        self.cwt_pool1 = nn.MaxPool2d(kernel_size=5, stride=2, padding=2)

        # 残差块
        self.cwt_res1 = ResidualBlock(16, 32, stride=1)
        self.cwt_res2 = ResidualBlock(32, 64, stride=1)

        # 下采样
        self.cwt_pool2 = nn.MaxPool2d(kernel_size=2, stride=2)

        self.fc1 = nn.Linear(65536, 64)
        self.fc2 = nn.Linear(64, num_classes)

    def forward(self, cwt_input):
        # CWT分支前向传播
        cwt = F.relu(self.cwt_bn1(self.cwt_conv1(cwt_input)))
        cwt = F.relu(self.cwt_bn2(self.cwt_conv2(cwt)))
        cwt = self.cwt_pool1(cwt)

        cwt = self.cwt_res1(cwt)
        cwt = self.cwt_res2(cwt)
        cwt = self.cwt_pool2(cwt)

        # 分类头
        out = torch.flatten(cwt, 1)
        # print(out.shape)
        out = F.relu(self.fc1(out))
        out = self.fc2(out)

        return out


'''

# 动态坐标注意力机制（DCAM）
class DCAM(nn.Module):
    def __init__(self, channels, reduction=16):
        super(DCAM, self).__init__()

        self.pool_h = nn.AdaptiveAvgPool2d((None, 1))  # 纵向池化（压缩宽度）
        self.pool_w = nn.AdaptiveAvgPool2d((1, None))  # 横向池化（压缩高度）

        mid_channels = max(8, channels // reduction)

        self.conv1 = nn.Conv2d(channels, mid_channels, kernel_size=1)
        self.bn1 = nn.BatchNorm2d(mid_channels)
        self.act = nn.ReLU()

        self.conv_h = nn.Conv2d(mid_channels, channels, kernel_size=1)
        self.conv_w = nn.Conv2d(mid_channels, channels, kernel_size=1)

        self.dynamic_weight = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(channels, channels // 2, 1),
            nn.ReLU(),
            nn.Conv2d(channels // 2, 2, 1),
            nn.Softmax(dim=1)
        )

    def forward(self, x):
        identity = x
        n, c, h, w = x.size()                       # [16, 256, 16, 16]

        x_h = self.pool_h(x)                        # (N, C, H, 1) (16, 256, 16, 1)
        x_w = self.pool_w(x).permute(0, 1, 3, 2)    # (N, C, W, 1) (16, 256, 16, 1)

        y = torch.cat([x_h, x_w], dim=2)     # (N, C, H+W, 1) (16, 256, 32, 1)
        y = self.conv1(y)                           #
        y = self.bn1(y)
        y = self.act(y)

        x_h, x_w = torch.split(y, [h, w], dim=2)
        x_w = x_w.permute(0, 1, 3, 2)

        a_h = self.conv_h(x_h).sigmoid()
        a_w = self.conv_w(x_w).sigmoid()

        out = x * a_h * a_w

        weights = self.dynamic_weight(x)  # (N, 2, 1, 1)
        λ1 = weights[:, 0:1]
        λ2 = weights[:, 1:2]

        out = λ1 * x + λ2 * out + identity  # 残差连接
        return out


# 生成器
class Generator(nn.Module):
    def __init__(self, z_dim=100, y_dim=5, out_channels=1, feature_maps=64):
        super(Generator, self).__init__()
        self.input_dim = z_dim + y_dim  # 拼接 z 和 y

        self.net = nn.Sequential(
            # 全连接：压缩成 feature map 的基础形状，比如 (64, 8, 8)
            nn.Linear(self.input_dim, feature_maps * 8 * 8 * 8),
            nn.BatchNorm1d(feature_maps * 8 * 8 * 8),
            nn.LeakyReLU(0.2, inplace=True),

            # reshape → (batch, feature_maps*8, 8, 8)
            nn.Unflatten(1, (feature_maps * 8, 8, 8)),

            # 上采样 1：→ (batch, feature_maps*4, 16, 16)
            nn.ConvTranspose2d(feature_maps * 8, feature_maps * 4, kernel_size=4, stride=2, padding=1),
            nn.BatchNorm2d(feature_maps * 4),
            nn.LeakyReLU(0.2, inplace=True),

            # 上采样 2：→ (batch, feature_maps*2, 32, 32)
            nn.ConvTranspose2d(feature_maps * 4, feature_maps * 2, kernel_size=4, stride=2, padding=1),
            nn.BatchNorm2d(feature_maps * 2),
            nn.LeakyReLU(0.2, inplace=True),

            # 上采样 3：→ (batch, feature_maps, 64, 64)
            nn.ConvTranspose2d(feature_maps * 2, feature_maps, kernel_size=4, stride=2, padding=1),
            nn.BatchNorm2d(feature_maps),
            nn.LeakyReLU(0.2, inplace=True),

            # 上采样 4：→ (batch, out_channels, 128, 128)
            nn.ConvTranspose2d(feature_maps, out_channels, kernel_size=4, stride=2, padding=1),

            nn.Tanh()  # 输出范围变成 [-1, 1]
        )

        # 加上我们写的注意力模块
        self.dcam = DCAM(feature_maps * 4)

    def forward(self, z, y):
        # print("nGenerator")
        # 拼接噪声和标签
        input = torch.cat([z, y], dim=1)        # [16, 100] + [16,  5] = [16, 105]

        out = self.net[:7](input)  # 前面到 (64, 64, 64)
        # print(out.shape)
        out = self.dcam(out)  # DCAM 加强空间感知
        # print(out.shape)
        out = self.net[7:](out)  # 最后一步生成图像
        # print(out.shape)

        return out


#  判断器
class Discriminator(nn.Module):
    def __init__(self, num_classes=5):
        super().__init__()
        self.conv_blocks = nn.Sequential(
            # 卷积层1: 128x128 → 64x64
            nn.Conv2d(1, 64, 3, stride=2, padding=1),
            nn.LeakyReLU(0.2),
            nn.MaxPool2d(2),

            # 动态坐标注意力模块（DCAM）
            DCAM(channels=64),

            # 卷积层2: 64x64 → 32x32
            nn.Conv2d(64, 128, 3, stride=2, padding=1),
            nn.BatchNorm2d(128),
            nn.LeakyReLU(0.2),

            # 全局平均池化
            nn.AdaptiveAvgPool2d(1)
        )

        # 条件标签处理
        self.label_embedding = nn.Embedding(num_classes, 128)
        self.fc = nn.Sequential(
            nn.Linear(256, 1),
            nn.Sigmoid()  # 输出真实概率
        )

    def forward(self, img, labels):
        # print("Discriminator")
        features = self.conv_blocks(img)
        # print(features.shape)
        features = features.view(features.size(0), -1)
        # print(features.shape)
        # 拼接特征和条件标签
        labels = self.label_embedding(labels)
        # print(labels.shape)

        combined = torch.cat([features, labels], dim=1)
        # print(combined.shape)

        validity = self.fc(combined)
        # print(validity.shape)
        return validity


# 梯度惩罚函数（WGAN‑GP 核心）
def gradient_penalty(D, real, fake, y_real, device, lambda_gp=10):
    batch_size = real.size(0)
    # 在 real 和 fake 之间插值
    alpha = torch.rand(batch_size, 1, 1, 1, device=device)
    interpolates = (alpha * real + (1 - alpha) * fake).requires_grad_(True)
    # 判别器输出
    d_interpolates = D(interpolates, y_real)
    # 对插值样本的梯度
    grads = torch.autograd.grad(
        outputs=d_interpolates,
        inputs=interpolates,
        grad_outputs=torch.ones_like(d_interpolates),
        create_graph=True, retain_graph=True
    )[0]
    # 计算梯度范数
    grad_norm = grads.view(batch_size, -1).norm(2, dim=1)
    # 梯度惩罚
    gp = lambda_gp * ((grad_norm - 1) ** 2).mean()
    return gp

'''

class DepthwiseSeparableConv_1(nn.Module):
    """深度可分离卷积模块"""
    def __init__(self, in_channels: int, out_channels: int,
                 kernel_size: int = 3, stride: int = 1, padding: int = 1):
        super().__init__()
        # 深度卷积
        self.depthwise = nn.Conv2d(
            in_channels, in_channels, kernel_size,
            stride=stride, padding=padding, groups=in_channels, bias=False
        )
        self.bn1 = nn.BatchNorm2d(in_channels)

        # 逐点卷积
        self.pointwise = nn.Conv2d(
            in_channels, out_channels, kernel_size=1,
            stride=1, padding=0, bias=False
        )
        self.bn2 = nn.BatchNorm2d(out_channels)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        x = self.depthwise(x)
        x = self.bn1(x)
        x = F.relu(x, inplace=True)

        x = self.pointwise(x)
        x = self.bn2(x)
        return F.relu(x, inplace=True)


class ResidualBlock_1(nn.Module):
    """改进的残差块（论文1.2.1节）"""
    def __init__(self, in_channels: int, out_channels: int, stride: int = 1):
        super().__init__()
        # 主干路径：两个3x3卷积
        self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3,
                               stride=stride, padding=1, bias=False)
        self.bn1 = nn.BatchNorm2d(out_channels)

        self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3,
                               stride=1, padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(out_channels)

        # 深度可分离卷积
        self.ds_conv = DepthwiseSeparableConv_1(out_channels, out_channels)

        # 快捷路径
        if stride != 1 or in_channels != out_channels:
            self.shortcut = nn.Sequential(
                nn.Conv2d(in_channels, out_channels, kernel_size=1,
                          stride=stride, bias=False),
                nn.BatchNorm2d(out_channels)
            )
        else:
            self.shortcut = nn.Identity()

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        identity = self.shortcut(x)

        # 主干路径
        out = self.conv1(x)
        out = self.bn1(out)
        out = F.relu(out, inplace=True)

        out = self.conv2(out)
        out = self.bn2(out)
        out = F.relu(out, inplace=True)

        # 深度可分离卷积
        out = self.ds_conv(out)

        # 残差连接
        out = out + identity
        return F.relu(out, inplace=False)


class CBAM_1(nn.Module):
    """添加CBAM注意力机制"""
    def __init__(self, channels: int, reduction: int = 16, kernel_size: int = 7):
        super().__init__()
        assert channels % reduction == 0, "channels 必须能被 reduction 整除"

        # Channel Attention
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.max_pool = nn.AdaptiveMaxPool2d(1)

        self.fc = nn.Sequential(
            nn.Conv2d(channels, channels // reduction, 1, bias=False),
            nn.ReLU(inplace=True),
            nn.Conv2d(channels // reduction, channels, 1, bias=False)
        )
        self.sigmoid_channel = nn.Sigmoid()

        # Spatial Attention
        self.conv_spatial = nn.Conv2d(2, 1, kernel_size, padding=kernel_size // 2, bias=False)
        self.sigmoid_spatial = nn.Sigmoid()

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        # Channel attention
        avg_out = self.fc(self.avg_pool(x))
        max_out = self.fc(self.max_pool(x))
        channel_att = self.sigmoid_channel(avg_out + max_out)
        x = x * channel_att

        # Spatial attention
        avg_out = torch.mean(x, dim=1, keepdim=True)
        max_out, _ = torch.max(x, dim=1, keepdim=True)
        spatial = torch.cat([avg_out, max_out], dim=1)
        spatial_att = self.sigmoid_spatial(self.conv_spatial(spatial))
        x = x * spatial_att

        return x


class P2D_DSResNet_1(nn.Module):
    """
    完整的 P2D-DSResNet 模型（双分支：GADF / CWT）
    - fusion: 'add' 或 'concat'（concat 后接 1×1 Conv 做降维）
    """
    def __init__(self, num_classes: int = 5,
                 fusion: Literal['add', 'concat'] = 'add',
                 dropout_p: float = 0.0):
        super().__init__()
        assert fusion in ('add', 'concat')
        self.fusion = fusion
        self.dropout_p = dropout_p

        # ======== GADF分支 ========
        self.gadf_conv1 = nn.Conv2d(1, 16, kernel_size=3, padding=1, bias=False)
        self.gadf_bn1 = nn.BatchNorm2d(16)

        self.gadf_conv2 = nn.Conv2d(16, 16, kernel_size=3, padding=1, bias=False)
        self.gadf_bn2 = nn.BatchNorm2d(16)

        self.gadf_pool1 = nn.MaxPool2d(kernel_size=5, stride=2, padding=2)

        self.gadf_res1 = ResidualBlock_1(16, 32, stride=1)
        self.gadf_res2 = ResidualBlock_1(32, 64, stride=1)

        self.gadf_pool2 = nn.MaxPool2d(kernel_size=2, stride=2)

        # ======== CWT分支 ========
        self.cwt_conv1 = nn.Conv2d(1, 16, kernel_size=3, padding=1, bias=False)
        self.cwt_bn1 = nn.BatchNorm2d(16)

        self.cwt_conv2 = nn.Conv2d(16, 16, kernel_size=3, padding=1, bias=False)
        self.cwt_bn2 = nn.BatchNorm2d(16)

        self.cwt_pool1 = nn.MaxPool2d(kernel_size=5, stride=2, padding=2)

        self.cwt_res1 = ResidualBlock_1(16, 32, stride=1)
        self.cwt_res2 = ResidualBlock_1(32, 64, stride=1)

        self.cwt_pool2 = nn.MaxPool2d(kernel_size=2, stride=2)

        # ======== 融合与分类 ========
        # 融合输出通道
        fused_channels = 64
        if self.fusion == 'add':
            # add: 64 + 64 -> 64（逐元素加法，不改变通道）
            self.fuse_proj = nn.Identity()
            out_channels_after_fuse = fused_channels
        else:
            # concat: 64 || 64 -> 128，再用 1x1 Conv 压回 64
            self.fuse_proj = nn.Sequential(
                nn.Conv2d(128, 64, kernel_size=1, bias=False),
                nn.BatchNorm2d(64),
                nn.ReLU(inplace=True)
            )
            out_channels_after_fuse = 64

        self.attention = CBAM_1(out_channels_after_fuse)

        self.global_pool = nn.AdaptiveAvgPool2d(1)
        self.fc1 = nn.Linear(out_channels_after_fuse, 64)
        self.dropout = nn.Dropout(dropout_p) if dropout_p > 0 else nn.Identity()
        self.fc2 = nn.Linear(64, num_classes)

        # 参数初始化（可选）
        self._init_weights()

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, (nn.Conv2d, nn.Linear)):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if getattr(m, "bias", None) is not None:
                    nn.init.zeros_(m.bias)
            elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
                nn.init.ones_(m.weight)
                nn.init.zeros_(m.bias)

    def _branch_forward(self, x: torch.Tensor,
                        conv1: nn.Conv2d, bn1: nn.BatchNorm2d,
                        conv2: nn.Conv2d, bn2: nn.BatchNorm2d,
                        pool1: nn.Module, res1: nn.Module, res2: nn.Module, pool2: nn.Module
                        ) -> torch.Tensor:
        x = F.relu(bn1(conv1(x)), inplace=True)
        x = F.relu(bn2(conv2(x)), inplace=True)
        x = pool1(x)
        x = res1(x)
        x = res2(x)
        x = pool2(x)
        return x

    def forward(self, gadf_input: torch.Tensor, cwt_input: torch.Tensor) -> torch.Tensor:
        # GADF 分支
        gadf = self._branch_forward(
            gadf_input,
            self.gadf_conv1, self.gadf_bn1,
            self.gadf_conv2, self.gadf_bn2,
            self.gadf_pool1, self.gadf_res1, self.gadf_res2, self.gadf_pool2
        )
        # CWT 分支
        cwt = self._branch_forward(
            cwt_input,
            self.cwt_conv1, self.cwt_bn1,
            self.cwt_conv2, self.cwt_bn2,
            self.cwt_pool1, self.cwt_res1, self.cwt_res2, self.cwt_pool2
        )

        # 特征融合
        if self.fusion == 'add':
            fused = gadf + cwt
        else:
            fused = torch.cat([gadf, cwt], dim=1)  # (B, 128, H, W)
        fused = self.fuse_proj(fused)

        # 注意力
        fused = self.attention(fused)

        # 分类头
        out = self.global_pool(fused)     # (B, C, 1, 1)
        out = torch.flatten(out, 1)       # (B, C)
        out = F.relu(self.fc1(out), inplace=True)
        out = self.dropout(out)
        out = self.fc2(out)               # (B, num_classes)
        return out


'''################ p2d_dsresnet_v2 ################'''
# ------------------ Utils ------------------ #
class DropPath(nn.Module):
    """Stochastic Depth per-sample (when applied in residual blocks)."""
    # 实现随机深度 (Stochastic Depth)
    def __init__(self, drop_prob: float = 0.0):
        super().__init__()
        self.drop_prob = float(drop_prob)

    def forward(self, x):
        if self.drop_prob == 0.0 or not self.training:
            return x
        keep_prob = 1 - self.drop_prob
        shape = (x.shape[0],) + (1,) * (x.ndim - 1)
        random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
        random_tensor.floor_()
        return x / keep_prob * random_tensor


def label_smoothing_ce(pred, target, smoothing: float = 0.1):
    """CrossEntropy with label smoothing."""
    n_class = pred.size(-1)
    log_prob = F.log_softmax(pred, dim=-1)
    with torch.no_grad():
        true_dist = torch.zeros_like(log_prob)
        true_dist.fill_(smoothing / (n_class - 1))
        true_dist.scatter_(1, target.unsqueeze(1), 1 - smoothing)
    return torch.mean(torch.sum(-true_dist * log_prob, dim=-1))


def mixup_data(x1, x2, y, alpha: float = 0.4):
    """对双输入进行一致的 mixup（同一个 lam 应用于 GADF 和 CWT）"""
    if alpha <= 0:
        return x1, x2, y, y, 1.0
    lam = torch.distributions.Beta(alpha, alpha).sample().item()
    batch_size = x1.size(0)
    index = torch.randperm(batch_size, device=x1.device)
    x1_mix = lam * x1 + (1 - lam) * x1[index]
    x2_mix = lam * x2 + (1 - lam) * x2[index]
    y_a, y_b = y, y[index]
    return x1_mix, x2_mix, y_a, y_b, lam


def mixup_criterion(criterion, pred, y_a, y_b, lam):
    return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)


# ------------------ 深度可分卷积 Blocks ------------------ #
class DepthwiseSeparableConv_2(nn.Module):
    def __init__(self, in_channels, out_channels, k=3, s=1, p=1):
        super().__init__()
        self.dw = nn.Conv2d(in_channels, in_channels, k, s, p, groups=in_channels, bias=False)
        self.bn1 = nn.BatchNorm2d(in_channels)
        self.pw = nn.Conv2d(in_channels, out_channels, 1, bias=False)
        self.bn2 = nn.BatchNorm2d(out_channels)

    def forward(self, x):
        x = self.dw(x)
        x = self.bn1(x)
        x = F.silu(x, inplace=True)
        x = self.pw(x)
        x = self.bn2(x)
        x = F.silu(x, inplace=True)
        return x


class ResidualBlock_2(nn.Module):
    '''改进版的残差块（Residual Block）'''
    def __init__(self, in_c, out_c, stride=1, drop_path: float = 0.0):
        super().__init__()
        self.conv1 = nn.Conv2d(in_c, out_c, 3, stride, 1, bias=False)
        self.bn1 = nn.BatchNorm2d(out_c)
        self.conv2 = nn.Conv2d(out_c, out_c, 3, 1, 1, bias=False)
        self.bn2 = nn.BatchNorm2d(out_c)
        self.ds = DepthwiseSeparableConv_2(out_c, out_c)
        self.short = nn.Identity() if (stride == 1 and in_c == out_c) else nn.Sequential(
            nn.Conv2d(in_c, out_c, 1, stride, bias=False),
            nn.BatchNorm2d(out_c)
        )
        self.drop_path = DropPath(drop_path)

    def forward(self, x):
        idt = self.short(x)
        out = self.conv1(x); out = self.bn1(out); out = F.silu(out, inplace=True)
        out = self.conv2(out); out = self.bn2(out); out = F.silu(out, inplace=True)
        out = self.ds(out)
        out = self.drop_path(out)
        out = out + idt
        return F.silu(out)

class BranchAttentionFusion(nn.Module):     # 跨分支特征注意力融合模块
    """
    对两个分支的特征做注意力加权：alpha_gadf, alpha_cwt = softmax(MLP([GAP(gadf), GAP(cwt)]))
    输出：alpha_gadf * gadf + alpha_cwt * cwt
    然后 1x1 降维 + CBAM
    """
    def __init__(self, channels: int, reduce: int = 4):
        super().__init__()
        hid = max(channels // reduce, 8)
        self.gap = nn.AdaptiveAvgPool2d(1)     # 全局平均池化 → 把 (B, C, H, W) 转成 (B, C)
        # 小型多层感知机
        self.mlp = nn.Sequential(
            nn.Linear(2 * channels, hid, bias=False),
            nn.SiLU(inplace=True),
            nn.Linear(hid, 2, bias=False)
        )
        # 1x1 卷积 + BN + SiLU
        self.proj = nn.Sequential(
            nn.Conv2d(channels, channels, 1, bias=False),
            nn.BatchNorm2d(channels),
            nn.SiLU(inplace=True)
        )
        self.cbam = CBAM(channels)      # 卷积注意力模块

    def forward(self, gadf, cwt):
        g = self.gap(gadf).flatten(1)
        c = self.gap(cwt).flatten(1)
        logits = self.mlp(torch.cat([g, c], dim=1))          # (B, 2)
        alpha = torch.softmax(logits, dim=1)                 # (B, 2)
        a_g = alpha[:, 0].view(-1, 1, 1, 1)
        a_c = alpha[:, 1].view(-1, 1, 1, 1)
        fused = a_g * gadf + a_c * cwt                       # (B, C, H, W)
        fused = self.proj(fused)
        fused = self.cbam(fused)
        return fused, alpha


# ------------------ Model ------------------ #
class P2D_DSResNet_V2(nn.Module):
    """
    升级版 P2D-DSResNet：
      - 每分支可配置深度 n_blocks
      - SiLU / DropPath
      - 注意力加权融合 + CBAM
    """
    def __init__(self, num_classes: int = 5, n_blocks: int = 3,
                 base_channels: int = 16, drop_path_rate: float = 0.1,
                 dropout_p: float = 0.2):
        super().__init__()
        C = base_channels

        # GADF stem
        self.gadf_conv1 = nn.Conv2d(1, C, 3, padding=1, bias=False); self.gadf_bn1 = nn.BatchNorm2d(C)
        self.gadf_conv2 = nn.Conv2d(C, C, 3, padding=1, bias=False); self.gadf_bn2 = nn.BatchNorm2d(C)
        self.gadf_pool1 = nn.MaxPool2d(5, 2, 2)

        # CWT stem
        self.cwt_conv1 = nn.Conv2d(1, C, 3, padding=1, bias=False); self.cwt_bn1 = nn.BatchNorm2d(C)
        self.cwt_conv2 = nn.Conv2d(C, C, 3, padding=1, bias=False); self.cwt_bn2 = nn.BatchNorm2d(C)
        self.cwt_pool1 = nn.MaxPool2d(5, 2, 2)

        # Residual stages (same layout per branch): 16->32->64
        dpr = torch.linspace(0, drop_path_rate, steps=n_blocks*2)  # 分配给两个stage的dpr
        # stage1: 16->32,构建残差层
        self.gadf_res1 = nn.ModuleList([ResidualBlock_2(C if i==0 else 32, 32, stride=1 if i else 1, drop_path=float(dpr[i])) for i in range(n_blocks)])
        self.cwt_res1  = nn.ModuleList([ResidualBlock_2(C if i==0 else 32, 32, stride=1 if i else 1, drop_path=float(dpr[i])) for i in range(n_blocks)])
        # stage2: 32->64
        self.gadf_res2 = nn.ModuleList([ResidualBlock_2(32 if i==0 else 64, 64, stride=1 if i else 1, drop_path=float(dpr[n_blocks+i])) for i in range(n_blocks)])
        self.cwt_res2  = nn.ModuleList([ResidualBlock_2(32 if i==0 else 64, 64, stride=1 if i else 1, drop_path=float(dpr[n_blocks+i])) for i in range(n_blocks)])

        self.gadf_pool2 = nn.MaxPool2d(2, 2)
        self.cwt_pool2  = nn.MaxPool2d(2, 2)

        # Attention fusion (on 64-ch maps)
        self.fusion = BranchAttentionFusion(channels=64)

        # Head
        self.gap = nn.AdaptiveAvgPool2d(1)      # 自适应二维平均池化
        self.fc1 = nn.Linear(64, 128)
        self.drop = nn.Dropout(dropout_p) if dropout_p > 0 else nn.Identity()
        self.fc2 = nn.Linear(128, num_classes)

        self._init_weights()

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, (nn.Conv2d, nn.Linear)):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if getattr(m, "bias", None) is not None:
                    nn.init.zeros_(m.bias)
            elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
                nn.init.ones_(m.weight); nn.init.zeros_(m.bias)

    def _branch_forward(self, x, conv1, bn1, conv2, bn2, pool1, blocks1, blocks2, pool2):
        x = F.silu(bn1(conv1(x)), inplace=True)
        x = F.silu(bn2(conv2(x)), inplace=True)
        x = pool1(x)
        for b in blocks1:
            x = b(x)
        for b in blocks2:
            x = b(x)
        x = pool2(x)
        return x

    def forward(self, gadf_input: torch.Tensor, cwt_input: torch.Tensor):
        gadf = self._branch_forward(
            gadf_input, self.gadf_conv1, self.gadf_bn1, self.gadf_conv2, self.gadf_bn2,
            self.gadf_pool1, self.gadf_res1, self.gadf_res2, self.gadf_pool2
        )
        cwt = self._branch_forward(
            cwt_input, self.cwt_conv1, self.cwt_bn1, self.cwt_conv2, self.cwt_bn2,
            self.cwt_pool1, self.cwt_res1, self.cwt_res2, self.cwt_pool2
        )

        fused, alpha = self.fusion(gadf, cwt)  # alpha: (B,2) 可打印观察两路权重

        x = self.gap(fused).flatten(1)
        x = F.silu(self.fc1(x), inplace=True)
        x = self.drop(x)
        x = self.fc2(x)
        return x
