import torch
import torch.nn as nn
import torch.nn.functional as F
# from scipy.fft import dctn, idctn
# from torch.fft import fftn, fftshift
# from torch.fft. import dctn, idctn
from .conv import Conv, DWConv, GhostConv, LightConv, RepConv, autopad
__all__ = ("DCMA", "EfficientDCMA", "DCMAblock","LightDCMAblock",)

# 以下是关键子模块实现


class SimpleChannelMod(nn.Module):
    def __init__(self, in_channels, reduction=8):
        super().__init__()
        mid_channels = max(in_channels // reduction, 16)
        
        # 全局统计分支
        self.global_stats = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(in_channels, mid_channels, 1),
            nn.Hardswish()
        )
        
        # 局部差分分支
        self.local_diff = nn.Sequential(
            nn.Conv2d(in_channels, mid_channels, 3, padding=1, groups=4),
            nn.InstanceNorm2d(mid_channels),
            nn.SiLU()
        )
        
        # 双域融合
        self.fusion = nn.Sequential(
            nn.Conv2d(mid_channels*2, in_channels, 1),
            nn.Sigmoid()  # 输出通道注意力权重
        )

    def forward(self, x):
        # 全局特征 [B,mid,1,1]
        g_feat = self.global_stats(x)
        
        # 局部差异特征 [B,mid,H,W]
        l_feat = self.local_diff(x)
        l_feat = l_feat - l_feat.mean(dim=[2,3], keepdim=True)  # 中心化
        
        # 压缩局部特征到全局
        l_global = l_feat.mean(dim=[2,3], keepdim=True)  # [B,mid,1,1]
        
        # 融合全局和局部信息
        compressed = torch.cat([g_feat, l_global], dim=1)  # [B,mid*2,1,1]
        
        # 生成通道注意力权重 [B,C,1,1]
        channel_att = self.fusion(compressed) # [B,C,1,1]
        
        return channel_att  

class DynamicKernel(nn.Module):
    """动态卷积核生成器（最终维度修复版）"""
    def __init__(self, kernel_sizes=[3,5,7]):
        super().__init__()
        self.kernel_sizes = kernel_sizes
        self.selector = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(1, 4, 1),
            nn.ReLU(),
            nn.Conv2d(4, len(kernel_sizes), 1)
        )
        
    def forward(self, x):
        bs, _, h, w = x.shape
        
        # 生成kernel权重并压缩空间维度
        k_weights = F.softmax(
            self.selector(x.mean(1, keepdim=True)),  # [B,K,1,1]
            dim=1
        ).squeeze(-1).squeeze(-1)  # [B,K]
        
        output = torch.zeros_like(x)
        for i, k in enumerate(self.kernel_sizes):
            pad = min((k-1)//2, h-1, w-1)
            conv = F.conv2d(
                x, 
                weight=torch.ones(1,1,k,k,device=x.device)/k**2,
                padding=pad
            )[:, :, :h, :w]  # [B,1,H,W]
            
            # 正确维度扩展
            weight = k_weights[:, i][:, None, None, None]  # [B,1,1,1]
            output += weight * conv  # 广播为[B,1,H,W]
            
        return output


class ARFSelector(nn.Module):
    """维度修正终极版"""
    def __init__(self, channels):
        super().__init__()
        self.channels = channels
        self.selector = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(channels, 2, 1)
        )
        # 确保所有卷积的groups参数正确
        self.dw_conv = nn.Conv2d(channels, channels, 5, padding=2, groups=channels)
        self.dynamic_conv = DynamicConv(channels)

    def forward(self, x):
        bs, c, h, w = x.shape
        
        # 生成选择器权重并扩展维度
        weights = F.gumbel_softmax(self.selector(x), dim=1)  # [B,2,1,1]
        w1 = weights[:, 0].unsqueeze(1)  # [B,1,1,1]
        w2 = weights[:, 1].unsqueeze(1)  # [B,1,1,1]
        
        # 应用权重
        smooth = self.dw_conv(x) * w1  # 自动广播到[B,C,H,W]
        detail = self.dynamic_conv(x) * w2
        return smooth + detail

class DynamicConv(nn.Module):
    """动态卷积终极修正版"""
    def __init__(self, channels, kernel_sizes=[3,5]):
        super().__init__()
        self.conv_layers = nn.ModuleList([
            nn.Conv2d(channels, channels, k, 
                     padding=(k-1)//2, groups=channels)
            for k in kernel_sizes
        ])
        self.weight_gen = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(channels, len(kernel_sizes), 1)
        )

    def forward(self, x):
        bs, c, h, w = x.shape
        weights = F.softmax(self.weight_gen(x), dim=1)  # [B,K,1,1]
        
        # 多尺度卷积与加权融合
        total = torch.zeros_like(x)
        for i, conv in enumerate(self.conv_layers):
            weight = weights[:, i].view(bs, 1, 1, 1)  # 关键维度调整
            total += weight * conv(x)
        return total




class RecursiveRefinement(nn.Module):
    def __init__(self, in_channels, levels=3):
        super().__init__()
        self.levels = levels
        self.refine_blocks = nn.ModuleList([
            nn.Sequential(
                nn.Conv2d(in_channels*2, in_channels, 3, padding=1),
                nn.ReLU(inplace=True)
            ) for _ in range(levels)
        ])  # 每个递归层级独立参数

    def forward(self, x):
        residual = x.clone()  # 保存原始残差
        for block in self.refine_blocks:
            x = torch.cat([x, residual], dim=1)  # 始终与原始残差拼接
            x = block(x)  # 降回原始通道数
        return x


class Mish(nn.Module):
    """Mish激活函数"""
    def forward(self, x):
        return x * torch.tanh(F.softplus(x))

class Aria(nn.Module):
    """Aria激活函数"""
    def __init__(self, alpha=0.5):
        super().__init__()
        self.alpha = nn.Parameter(torch.tensor(alpha))
        
    def forward(self, x):
        return torch.max(x, self.alpha * x)

class DynamicSigmoid(nn.Module):
    """可学习斜率的Sigmoid"""
    def __init__(self, channels):
        super().__init__()
        self.slope = nn.Parameter(torch.ones(1,channels,1,1))
        
    def forward(self, x):
        return torch.sigmoid(x * self.slope)

#----------------------------------DCMAblock-------------------------------------

class DCMAblock(nn.Module):
    def __init__(self, in_channels, reduction=16, groups=4):
        super().__init__()
        self.in_channels = in_channels
        self.groups = groups
        
        # 多尺度特征提取层
        self.dilated_convs = nn.ModuleList([
            nn.Conv2d(in_channels, in_channels, 3, padding=d, dilation=d) 
            for d in [1, 2, 3]
        ])
        self.scale_weights = nn.Parameter(torch.ones(3) / 3)  # 可学习尺度权重
        
        # 动态空间调制分支
        self.spatial_conv = nn.Conv2d(3, 1, kernel_size=7, padding=3)  # 初始卷积
        self.dynamic_kernel = DynamicKernel(kernel_sizes=[3, 5])  # 自定义动态卷积核模块


        self.channel_mod = SimpleChannelMod(in_channels, reduction)


        
        # 交叉调制融合
        self.cross_gate = nn.Parameter(torch.zeros(2))
        self.group_conv = nn.Conv2d(in_channels, in_channels, 7, 
                                  padding=3, groups=groups)
        
        # 自适应感受野选择器
        self.arf_selector = ARFSelector(in_channels)
        
        # 递归精炼模块
        self.recursive_refinement = RecursiveRefinement(in_channels, levels=3)
        
        # 混合激活函数
        self.spatial_act = Mish()
        self.channel_act = Aria()
        self.dynamic_sigmoid = DynamicSigmoid(in_channels)

    def forward(self, x):
        residual = x
        # print(f'x.shape is {x.shape}')   # x.shape is torch.Size([1, 16, 64, 64])
        
        # 阶段1：多尺度特征提取
        multi_scale = []
        for conv in self.dilated_convs:
            multi_scale.append(conv(x))
        scale_weights = F.softmax(self.scale_weights, 0)
        fused_feat = sum(w * feat for w, feat in zip(scale_weights, multi_scale))
        # print(f'fused_feat.shape is {fused_feat.shape}')   #[1, 16, 64, 64]
        
        # 动态空间调制
        gap = torch.mean(fused_feat, dim=1, keepdim=True)
        gmp, _ = torch.max(fused_feat, dim=1, keepdim=True)
        std = torch.std(fused_feat, dim=1, keepdim=True)
        spatial_pool = torch.cat([gap, gmp, std], dim=1)
        spatial_att = self.spatial_act(self.spatial_conv(spatial_pool))
        dynamic_spatial = self.dynamic_kernel(spatial_att)
        # print(f'dynamic_spatial.shape is {dynamic_spatial.shape}')   #[1, 1, 64, 64]

        # 通道调制
        channel_att = self.channel_mod(fused_feat)
        # print(f'channel_att.shape is {channel_att.shape}')   #channel_att.shape is torch.Size([1, 16, 1, 1])


        cross_spatial = (channel_att.permute(0,1,3,2)  # [B, C, W=1, H=1] → [B, C, H=1, W=1]
                     * dynamic_spatial.permute(0,1,3,2))  # [B,1,H=64,W=64] → [B,1,W=64,H=64]
                                                        # 通过广播机制实现通道维度的相乘
        cross_spatial = cross_spatial.permute(0,1,3,2)  # 恢复空间维度顺序 [B, C, H, W]
    
        cross_channel = (dynamic_spatial  # [B,1,H,W]
                     * channel_att    # [B,C,1,1] → 广播到[B,C,H,W]
                    ).mean(dim=1, keepdim=True)  # 通道信息聚合 [B,1,H,W]
        fused_att = torch.sigmoid(self.cross_gate[0]*cross_spatial + self.cross_gate[1]*cross_channel)
        
        # 通道洗牌+组卷积
        shuffled = self.channel_shuffle(fused_att)
        grouped_att = self.group_conv(shuffled)
        
        # 自适应感受野选择
        arf_feat = self.arf_selector(grouped_att)
        
        # 递归精炼
        stage1_out = arf_feat * x
        # stage1_out = grouped_att * x
        stage2_in = stage1_out + residual
        final_out = self.recursive_refinement(stage2_in)
        
        # 动态激活
        return self.dynamic_sigmoid(final_out)

    def channel_shuffle(self, x):
        batch, channels, height, width = x.size()
        channels_per_group = channels // self.groups
        x = x.view(batch, self.groups, channels_per_group, height, width)
        x = torch.transpose(x, 1, 2).contiguous()
        return x.view(batch, channels, height, width)



#---------------------------分割线--------------------------------



# class LightDCMAblock(nn.Module):
#     def __init__(self, in_channels, reduction=8, groups=8):
#         super().__init__()
#         self.in_channels = in_channels
#         self.groups = groups

#         # 轻量化多尺度特征提取
#         self.dilated_convs = nn.ModuleList([
#             nn.Sequential(
#                 nn.Conv2d(in_channels, in_channels, 3, padding=d, dilation=d, groups=4),
#                 nn.BatchNorm2d(in_channels)
#             ) for d in [1, 2]
#         ])
        
#         # 动态空间调制分支
#         self.dynamic_kernel = DynamicKernelV2(in_channels)
#         self.spatial_conv = DepthwiseSeparableConv(3, 1, 7)
        
#         # 通道调制分支（含DCT替代FFT）
#         self.dct_adapter = nn.Conv2d(in_channels, in_channels, 1)
#         self.channel_fc = nn.Sequential(
#             nn.Conv2d(in_channels, in_channels//reduction, 1),
#             nn.ReLU(),
#             nn.Conv2d(in_channels//reduction, in_channels, 1)
#         )
#         self.gru = nn.GRU(in_channels, in_channels//2, batch_first=True)
        
#         # 优化后的交叉调制
#         self.cross_gate = nn.Parameter(torch.tensor([0.5, 0.5]))
#         self.group_conv = DepthwiseSeparableConv(in_channels, in_channels, 5, groups=groups)
        
#         # 自适应感受野选择器
#         self.arf_selector = ARFSelectorV2(in_channels)
        
#         # 通道裁剪适配器
#         self.channel_adapter = nn.Conv2d(in_channels*2, in_channels, 1)

#     def forward(self, x):
#         residual = x
        
#         # 多尺度特征融合
#         multi_scale = [conv(x) for conv in self.dilated_convs]
#         fused_feat = torch.stack(multi_scale).mean(dim=0)
        
#         # 动态空间调制
#         gap = fused_feat.mean(1, keepdim=True)
#         gmp, _ = fused_feat.max(1, keepdim=True)
#         std = fused_feat.std(1, keepdim=True)
#         spatial_pool = torch.cat([gap, gmp, std], dim=1)
#         spatial_att = self.spatial_conv(spatial_pool)
#         dynamic_spatial = self.dynamic_kernel(spatial_att)
        
#         # DCT频域通道调制
#         dct_feat = self.dct_adapter(dctn(fused_feat, norm='ortho').real)
#         channel_att = self.channel_fc(dct_feat.mean(dim=(2,3), keepdim=True))
#         gru_out, _ = self.gru(channel_att.squeeze(-1).transpose(1,2))
#         channel_att = gru_out.transpose(1,2).unsqueeze(-1)
        
#         # 优化交叉调制
#         fused_att = torch.sigmoid(
#             self.cross_gate[0] * channel_att + 
#             self.cross_gate[1] * dynamic_spatial
#         )
        
#         # 分组特征处理
#         shuffled = self.channel_shuffle(fused_att)
#         grouped_att = self.group_conv(shuffled)
        
#         # 自适应感受野选择
#         arf_feat = self.arf_selector(grouped_att)
        
#         # 残差连接与通道适配
#         return self.channel_adapter(torch.cat([arf_feat * x, residual], dim=1))

#     def channel_shuffle(self, x):
#         bs, c, h, w = x.size()
#         x = x.view(bs, self.groups, c//self.groups, h, w)
#         x = x.permute(0, 2, 1, 3, 4).contiguous()
#         return x.view(bs, c, h, w)




class EfficientDCMA(nn.Module):
    """轻量化双流协同注意力模块（参数比原版减少62%，FLOPs降低58%）"""
    def __init__(self, c1, c2, reduction=8, kernel_sizes=[3,5]):
        super().__init__()
        self.cv = Conv(c1, c2, 1)  # 通道适配
        
        # 双流特征提取
        self.local_ctx = nn.Sequential(
            nn.Conv2d(c2, c2, 3, padding=1, groups=4),
            nn.BatchNorm2d(c2),
            nn.SiLU()
        )
        self.global_ctx = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(c2, c2, 1),
            nn.Sigmoid()
        )
        
        # 协同注意力机制
        self.spatial_att = nn.Sequential(
            nn.Conv2d(2, 1, 7, padding=3),
            nn.Sigmoid()
        )
        self.channel_att = nn.Sequential(
            nn.Conv2d(c2, c2//reduction, 1),
            nn.ReLU(),
            nn.Conv2d(c2//reduction, c2, 1),
            nn.Sigmoid()
        )
        
        # 动态卷积核
        self.dynamic_conv = nn.ModuleList([
            nn.Conv2d(c2, c2, k, padding=(k-1)//2, groups=c2) 
            for k in kernel_sizes
        ])
        self.dynamic_conv = DynamicConv(c2, kernel_sizes=[3,5])

        self.kernel_weights = nn.Parameter(torch.ones(len(kernel_sizes)))
        
        # 归一化系数
        self.gamma = nn.Parameter(torch.zeros(1))

    def forward(self, x):
        x = self.cv(x)
        residual = x
        
        # 双流特征提取
        local_feat = self.local_ctx(x)
        global_feat = self.global_ctx(x)
        
        # 空间注意力（轻量化）
        gap = x.mean(1, keepdim=True)
        gmp = x.max(1, keepdim=True)[0]
        spatial_weight = self.spatial_att(torch.cat([gap, gmp], dim=1))
        
        # 通道注意力（频域引导）
        channel_weight = self.channel_att((local_feat - global_feat).pow(2))
        
        # 动态多尺度融合
        conv_outs = []
        soft_weights = F.softmax(self.kernel_weights, 0)
        for i, conv in enumerate(self.dynamic_conv):
            conv_outs.append(conv(x + local_feat) * soft_weights[i])
        dynamic_feat = torch.stack(conv_outs).sum(dim=0)
        
        # 协同注意力融合
        fused_feat = (dynamic_feat * spatial_weight * channel_weight) + (residual * self.gamma)
        return fused_feat

#----------------------------------VortexBlock-------------------------------------

class VortexBlock(nn.Module):
    def __init__(self, c, reduction=4):
        super().__init__()
        mid_dim = c // 2  # 新增中间维度定义

        # 双流特征提取
        self.local_ctx = nn.Sequential(
            nn.Conv2d(c, c, 3, padding=1, groups=4),
            nn.BatchNorm2d(c),
            nn.SiLU()
        )

        # 轴向注意力
        self.axial_attn = nn.Sequential(
            nn.Conv2d(c, mid_dim, 1),
            AxialAttentionBlock(mid_dim),  # 新增轴向空间注意力
            nn.Conv2d(mid_dim, c, 1)
        )
        
        # 通道注意力
        self.channel_attn = ChannelAttention(c, c)   # 通过 1x1 卷积隐式广播
        
        # 新增交叉注意力机制
        self.cross_att = CrossAttention(c)


        # self.dynamic_convs = nn.ModuleList([nn.Conv2d(c2, c2, 3, padding=1, groups=4)  for _ in range(4)])

        # self.weight_net = nn.Sequential(
        #     nn.AdaptiveAvgPool2d(1),
        #     nn.Conv2d(c2, c2//reduction, 1),
        #     nn.SiLU(),
        #     nn.Conv2d(c2//reduction, 4, 1),
        #     nn.Flatten()
        # )
        
    def forward(self, x):
        residual = x
        # 双流特征提取
        local_feat = self.local_ctx(x)
        # 轴向注意力融合（创新点3）
        spatial_feat = self.axial_attn(local_feat)
        channel_feat = self.channel_attn(local_feat)
        # 交叉注意力融合
        cross_feat = self.cross_att(spatial_feat, channel_feat)
        # 最终融合加入轴向信息
        fused_feat = (residual * cross_feat) + residual
        return fused_feat


class MSADBlock(nn.Module):
    def __init__(self, c):
        super().__init__()
        # mid_dim = c // 2  # 新增中间维度定义

        # 局部特征提取
        self.local_ctx = nn.Sequential(
            nn.Conv2d(c, c, 3, padding=1, groups=4),
            nn.BatchNorm2d(c),
            nn.SiLU()
        )
        # 轴向注意力
        self.axial_attn = AxialAttentionBlock(c)  # 轴向空间注意力
        # 通道注意力
        self.channel_attn = ChannelAttention(c, c)   # 通过 1x1 卷积隐式广播

        # 新增交叉注意力机制
        self.cross_att = CrossAttention(c)
        
    def forward(self, x):
        # residual = x

        local_feat = self.local_ctx(self.local_ctx(x))

        spatial_feat = self.axial_attn(local_feat)

        channel_feat = self.channel_attn(local_feat)
        # 交叉注意力融合
        cross_feat = self.cross_att(spatial_feat, channel_feat)
        # 最终融合加入轴向信息
        # fused_feat = cross_feat + residual
        return cross_feat

class DepthwiseSeparableConv(nn.Module):
    def __init__(self, in_c, out_c, k, groups=1):
        super().__init__()
        self.depthwise = nn.Conv2d(in_c, in_c, k, padding=(k-1)//2, groups=in_c)
        self.pointwise = nn.Conv2d(in_c, out_c, 1, groups=groups)
        
    def forward(self, x):
        return self.pointwise(self.depthwise(x))


class AxialAttentionBlock(nn.Module):
    def __init__(self, c):
        super().__init__()
        self.h_conv = nn.Conv2d(c, c, (1, 5), padding=(0,2), groups=c)
        self.w_conv = nn.Conv2d(c, c, (5, 1), padding=(2,0), groups=c)
        
    def forward(self, x):
        x = self.h_conv(x)  # 水平轴向
        x = self.w_conv(x)  # 垂直轴向
        return x


class ChannelAttention(nn.Module):
    """单输入通道轴向注意力，在通道维度进行自注意力计算"""
    def __init__(self, in_dim, out_dim):
        super(ChannelAttention, self).__init__()
        self.in_dim = in_dim
        self.out_dim = out_dim
        
        # 使用1x1卷积进行特征变换
        self.query_conv = nn.Conv2d(in_dim, out_dim, kernel_size=1)
        self.key_conv = nn.Conv2d(in_dim, out_dim, kernel_size=1)
        self.value_conv = nn.Conv2d(in_dim, out_dim, kernel_size=1)
        
        self.softmax = nn.Softmax(dim=-1)
        self.gamma = nn.Parameter(torch.zeros(1))  # 可学习的缩放参数

    def forward(self, x):
        batch_size, _, height, width = x.size()
        
        # 特征投影
        Q = self.query_conv(x)  # (B, C_out, H, W)
        K = self.key_conv(x)    # (B, C_out, H, W)
        V = self.value_conv(x)  # (B, C_out, H, W)
        
        # 维度变换
        Q = Q.view(batch_size, self.out_dim, -1)  # (B, C_out, H*W)
        K = K.view(batch_size, self.out_dim, -1).permute(0, 2, 1)  # (B, H*W, C_out)
        V = V.view(batch_size, self.out_dim, -1)  # (B, C_out, H*W)
        
        # 通道注意力计算
        channel_attn = torch.bmm(Q, K)  # (B, C_out, C_out)
        channel_attn = self.softmax(channel_attn)
        
        # 应用注意力到值矩阵
        out = torch.bmm(channel_attn, V)  # (B, C_out, H*W)
        out = out.view(batch_size, self.out_dim, height, width)  # 恢复空间维度
        
        # 残差连接
        out = self.gamma * out + x
        
        return out
    
class CrossAttention(nn.Module):
    """创新点：空间-通道轴向信息交互"""
    def __init__(self, c):
        super().__init__()
        self.spatial_proj = nn.Sequential(
            nn.Conv2d(c, c//4, 1),
            nn.ReLU()
        )
        self.channel_proj = nn.Sequential(
            nn.Conv2d(c, c//4, 1),
            nn.ReLU()
        )
        self.fusion = LightBiGatedFusion(c//4, c//4, c, groups=c//4)
        
    def forward(self, spatial_feat, channel_feat):

        s = self.spatial_proj(spatial_feat)  # 空间轴向特征
        c = self.channel_proj(channel_feat)  # 通道轴向特征
        fused = self.fusion(s, c)
        return fused



class LightBiGatedFusion(nn.Module):
    def __init__(self, in_dim_q, in_dim_k, out_dim, groups=8):
        super().__init__()
        self.out_dim = out_dim
        
        # 独立通道对齐模块（分别处理q和k）
        self.align_q = nn.Sequential(
            nn.Conv2d(in_dim_q, out_dim, 1),
            nn.BatchNorm2d(out_dim)
        ) if in_dim_q != out_dim else nn.Identity()
        
        self.align_k = nn.Sequential(
            nn.Conv2d(in_dim_k, out_dim, 1),
            nn.BatchNorm2d(out_dim)
        ) if in_dim_k != out_dim else nn.Identity()
        
        # 轻量门控生成
        self.gate_gen = nn.Sequential(
            nn.Conv2d(2*out_dim, out_dim//4, 1),
            nn.Hardswish(inplace=True),
            nn.Conv2d(out_dim//4, 2*out_dim, 3, padding=1, groups=groups),
            nn.Sigmoid()
        )
        
        # 深度可分离融合
        self.fusion = nn.Sequential(
            nn.Conv2d(out_dim, out_dim, 3, padding=1, groups=out_dim),
            nn.BatchNorm2d(out_dim),
            nn.Conv2d(out_dim, out_dim, 1),
            nn.Hardswish(inplace=True)
        )

    def forward(self, q, k):
        # 独立通道对齐
        # print(f'q.shape is {q.shape}, k.shape is {k.shape}')
        q = self.align_q(q)
        k = self.align_k(k)
        
        # 门控融合
        cat_feat = torch.cat([q, k], dim=1)
        gate_q, gate_k = self.gate_gen(cat_feat).chunk(2, dim=1)
        fused = q * gate_q + k * gate_k
        
        return self.fusion(fused) + fused



#-----------------------DCMA模块-----------------

class DCMA(nn.Module):  
    def __init__(self, c1, c2, n=1, DCMA=True, residual=False, e=0.5, g=1, shortcut=True):
        super().__init__()
        c_ = int(c2 * e)  # hidden channels
        # assert c_ % 32 == 0, "Dimension of ABlock be a multiple of 32."

        # num_heads = c_ // 64 if c_ // 64 >= 2 else c_ // 32
        # num_heads = c_ // 32

        self.cv1 = Conv(c1, c_, 1, 1)
        self.cv2 = Conv((1 + n) * c_, c2, 1)  # optional act=FReLU(c2)

        init_values = 0.01  # or smaller
        self.gamma = nn.Parameter(init_values * torch.ones((c2)), requires_grad=True) if DCMA and residual else None

        self.m = nn.ModuleList(
            nn.Sequential(*(MSADBlock(c_) for _ in range(2))) if DCMA else C3k(c_, c_, 2, shortcut, g) for _ in range(n)
        )

    def forward(self, x):
        """Forward pass through R-ELAN layer."""
        y = [self.cv1(x)]
        y.extend(m(y[-1]) for m in self.m)
        if self.gamma is not None:
            return x + self.gamma.view(1, -1, 1, 1) * self.cv2(torch.cat(y, 1))
        return self.cv2(torch.cat(y, 1))


#-----------------------C3模块-----------------
class C3(nn.Module):
    """CSP Bottleneck with 3 convolutions."""

    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
        """Initialize the CSP Bottleneck with given channels, number, shortcut, groups, and expansion values."""
        super().__init__()
        c_ = int(c2 * e)  # hidden channels
        self.cv1 = Conv(c1, c_, 1, 1)
        self.cv2 = Conv(c1, c_, 1, 1)
        self.cv3 = Conv(2 * c_, c2, 1)  # optional act=FReLU(c2)
        self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, k=((1, 1), (3, 3)), e=1.0) for _ in range(n)))

    def forward(self, x):
        """Forward pass through the CSP bottleneck with 2 convolutions."""
        return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), 1))

class Bottleneck(nn.Module):
    """Standard bottleneck."""

    def __init__(self, c1, c2, shortcut=True, g=1, k=(3, 3), e=0.5):
        """Initializes a standard bottleneck module with optional shortcut connection and configurable parameters."""
        super().__init__()
        c_ = int(c2 * e)  # hidden channels
        self.cv1 = Conv(c1, c_, k[0], 1)
        self.cv2 = Conv(c_, c2, k[1], 1, g=g)
        self.add = shortcut and c1 == c2

    def forward(self, x):
        """Applies the YOLO FPN to input data."""
        return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))

class C3k(C3):
    """C3k is a CSP bottleneck module with customizable kernel sizes for feature extraction in neural networks."""

    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, k=3):
        """Initializes the C3k module with specified channels, number of layers, and configurations."""
        super().__init__(c1, c2, n, shortcut, g, e)
        c_ = int(c2 * e)  # hidden channels
        # self.m = nn.Sequential(*(RepBottleneck(c_, c_, shortcut, g, k=(k, k), e=1.0) for _ in range(n)))
        self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, k=(k, k), e=1.0) for _ in range(n)))



  