import torch
import torch.nn as nn
import torch.nn.functional as F
# from scipy.fft import dctn, idctn
# from torch.fft import fftn, fftshift
# from torch.fft. import dctn, idctn
from .conv import Conv, DWConv, GhostConv, LightConv, RepConv, autopad
__all__ = ("DDA_Bottleneck",)

#-------------------------------------------DDA_Bottleneck模块---------------------------------------------
class DDA_Bottleneck(nn.Module):
    def __init__(self, c1, c2):
        super().__init__()
        self.groups = 8
        self.cv1 = nn.Sequential(
            Conv(c1, c2//2, 1),
            Conv(c2//2, c2//2, 3, depthwise=True))
        
        # 双粒度注意力
        self.coarse = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            Conv(c2//2, c2//8, 1, act=nn.ReLU()))
        
        self.detail = nn.Sequential(
            Conv(c2//2, c2//2, (1,7)),
            Conv(c2//2, c2//8, (7,1)))
        
        self.fusion = nn.Parameter(torch.tensor(0.5))  # 可学习融合系数
        
        # 动态重组
        # self.shuffle = channel_shuffle(groups=8)
        self.ghost = GhostConv(c1, c2)

    def forward(self, x):
        identity = self.ghost(x)
        x1, x2 = self.cv1(x).chunk(2, dim=1)
        
        # 注意力生成
        w_coarse = self.coarse(x1).sigmoid()
        w_detail = self.detail(x2).sigmoid()
        
        # 动态融合
        alpha = torch.sigmoid(self.fusion)
        attn = alpha*w_coarse + (1-alpha)*w_detail
        
        return identity + self.channel_shuffle(x1 * attn)
    

    def channel_shuffle(self, x):
        batch, channels, height, width = x.size()
        channels_per_group = channels // self.groups
        x = x.view(batch, self.groups, channels_per_group, height, width)
        x = torch.transpose(x, 1, 2).contiguous()
        return x.view(batch, channels, height, width)
    




#-------------------------------------------HCGA模块---------------------------------------------
#子模块
class HierarchicalSpatialAtt(nn.Module):
    def __init__(self, channels):
        super().__init__()
        # 三级特征提取（感受野7x7→5x5→3x3）
        self.conv_layers = nn.ModuleList([
            nn.Sequential(
                nn.Conv2d(2, 1, k, padding=(k-1)//2, bias=False),
                nn.BatchNorm2d(1),
                nn.Sigmoid()
            ) for k in [7,5,3]
        ])
        # 动态权重生成器
        self.weight_gen = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(2, 3, 1)  # 输入gap+gmp通道数=2
        )

    def forward(self, x):
        gap = x.mean(1, keepdim=True)
        gmp = x.max(1, keepdim=True)[0]
        feat = torch.cat([gap, gmp], dim=1)  # [B,2,H,W]
        
        # 多级注意力生成
        att_weights = [conv(feat) for conv in self.conv_layers]
        
        # 动态融合权重
        fusion_weights = F.softmax(self.weight_gen(feat), dim=1)  # [B,3,1,1]
        
        # 层次化融合
        return sum(w * att for w, att in zip(fusion_weights.unbind(1), att_weights))

class FFTEnhancedChannelAtt(nn.Module):
    def __init__(self, channels, reduction=16):
        super().__init__()
        self.freq_conv = nn.Sequential(
            nn.Conv2d(2, channels//4, 1),
            nn.ReLU(),
            nn.Conv2d(channels//4, channels, 1)
        )
        self.spatial_fc = nn.Sequential(
            nn.Conv2d(channels, channels//reduction, 1),
            nn.ReLU(),
            nn.Conv2d(channels//reduction, channels, 1),
            nn.Sigmoid()
        )
        # 初始化参数时强制指定dtype
        self.alpha = nn.Parameter(torch.tensor(0.6, dtype=torch.float32))
        self.beta = nn.Parameter(torch.tensor(0.4, dtype=torch.float32))
        # 注册类型转换缓冲区
        self.register_buffer('dummy', torch.zeros(1, dtype=torch.float32))

    def forward(self, x):
        B, C, H, W = x.shape
        
        # 阶段1：FP32环境下的FFT计算
        with torch.autocast(device_type='cuda', enabled=False):
            x_fp32 = x.to(torch.float32)
            fft_feat = torch.fft.fft2(x_fp32)
            real = fft_feat.real.mean(dim=1, keepdim=True)  # [B,1,H,W]
            imag = fft_feat.imag.mean(dim=1, keepdim=True)
            freq_feat = torch.cat([real, imag], dim=1).to(x.dtype)  # 转回原精度
        
        # 阶段2：正常精度下的卷积计算
        freq_att = self.freq_conv(freq_feat)
        freq_att = torch.sigmoid(freq_att.mean([2,3], keepdim=True))
        
        # 阶段3：空域特征提取（保持自动混合精度）
        spatial_att = self.spatial_fc(x.mean([2,3], keepdim=True))
        
        # 动态融合（确保参数类型匹配）
        alpha = self.alpha.to(x.dtype)
        beta = self.beta.to(x.dtype)
        return alpha * freq_att + beta * spatial_att



class FastFreqAtt(nn.Module):
    """兼容混合精度训练的极简频域注意力"""
    def __init__(self, channels):
        super().__init__()
        self.fc = nn.Linear(channels, channels)
        # 注册缓冲区确保类型安全
        self.register_buffer('dummy', torch.tensor(0, dtype=torch.float32))
        
    def forward(self, x):
        B, C, H, W = x.shape
        # 强制转换到FP32进行FFT计算
        with torch.autocast(device_type='cuda', enabled=False):
            x_fp32 = x.to(torch.float32)
            fft = torch.fft.rfft2(x_fp32, norm='ortho')
            
        # 计算幅度谱并转回原精度
        mag = torch.sqrt(fft.real**2 + fft.imag**2 + 1e-6)
        mag = mag.mean(dim=[2,3]).to(x.dtype)  # [B,C]
        
        # 注意力生成
        return torch.sigmoid(self.fc(mag)).view(B,C,1,1)



class DeformableDynamicConv(nn.Module):
    def __init__(self, channels, kernel_sizes=[3,5]):
        super().__init__()
        # 偏移量生成器（轻量设计）
        self.offset_gen = nn.Sequential(
            nn.Conv2d(channels, 8, 3, padding=1),
            nn.ReLU(),
            nn.Conv2d(8, 2*len(kernel_sizes), 1)  # 每个核生成(x,y)偏移
        )
        
        # 可变形卷积核
        self.convs = nn.ModuleList([
            nn.Conv2d(channels, channels, k, padding=(k-1)//2, groups=channels) 
            for k in kernel_sizes
        ])
        
        # 递归细化（仅1层防止过深）
        self.refine = nn.Conv2d(channels, channels, 1)

    def forward(self, x):
        offsets = self.offset_gen(x)  # [B, 2*K, H, W]
        B, _, H, W = x.shape
        K = len(self.convs)
        
        outputs = []
        for i in range(K):
            offset = offsets[:, 2*i:2*(i+1)]  # 提取对应核的偏移
            # 变形卷积
            conv_out = self.convs[i](x)
            # 应用偏移（简化版可变形卷积）
            grid = self._get_grid(offset, H, W)
            modulated = F.grid_sample(conv_out, grid, align_corners=True)
            outputs.append(modulated)
        
        fused = torch.stack(outputs).mean(dim=0)
        return self.refine(fused + x)  # 残差连接

    def _get_grid(self, offset, H, W):
        # 生成归一化网格坐标
        y, x = torch.meshgrid(torch.linspace(-1,1,H), torch.linspace(-1,1,W))
        grid = torch.stack((x,y), 2).unsqueeze(0).to(offset.device)
        return (grid + offset.permute(0,2,3,1)).clamp(-1,1)



class HCGA(nn.Module):
    def __init__(self, c1, c2):
        super().__init__()
        self.cv = Conv(c1, c2, 1)
        # 通道数对齐模块
        self.channel_align = Conv(c2, c2, 1)  
        self.spatial_att = HierarchicalSpatialAtt(c2)
        self.channel_att = FFTEnhancedChannelAtt(c2)
        self.dynamic_conv = DeformableDynamicConv(c2)
        # 添加可学习的缩放系数
        self.scale = nn.Parameter(torch.ones(1, c2, 1, 1))

    def forward(self, x):
        x = self.cv(x)
        residual = x
        
        # 通道维度对齐
        aligned_x = self.channel_align(x)
        
        # 保证空间注意力与通道数匹配
        spatial_mask = self.spatial_att(aligned_x).expand_as(x)
        channel_mask = self.channel_att(aligned_x)
        
        # 动态维度调整
        modulated = x * (spatial_mask + channel_mask)
        dynamic_feat = self.dynamic_conv(modulated)
        
        # 残差连接 + 可学习缩放
        return dynamic_feat * self.scale + residual
    

    

#-------------------------------------------MSAD模块-------------------------------
#结合 多尺度特征蒸馏 与轴向注意力，通过下采样-上采样结构捕捉跨尺度信息，替代原始单尺度计算。
#金字塔多尺度处理，增强特征多样性。
#蒸馏操作减少计算量（用1/4尺度计算注意力）。
#轴向注意力仅应用在低分辨率特征上，降低计算成本。
#封装进现有的轴向注意力融合中试试效果

class QKMSAD(nn.Module):
    """
    两个特征作为q和k融合时用的模块
    
    """
    def __init__(self, in_dim, out_dim):
        super().__init__()
        # 多尺度下采样
        self.downsample = nn.AvgPool2d(2)
        # 低分辨率轴向注意力
        self.axial_attn = nn.Sequential(
            nn.Conv2d(in_dim, in_dim//2, 1),
            nn.Conv2d(in_dim//2, in_dim//2, (1,3), padding=(0,1)),  # 行注意力
            nn.Conv2d(in_dim//2, in_dim//2, (3,1), padding=(1,0)),  # 列注意力
        )
        # 上采样恢复分辨率
        self.upsample = nn.Upsample(scale_factor=2, mode='bilinear')
        # 最终融合
        self.fusion = nn.Conv2d(in_dim + in_dim//2, out_dim, 1)
        
    def forward(self, q, k):
        # 下采样到1/4尺度
        low_res_q = self.downsample(self.downsample(q))
        low_res_k = self.downsample(self.downsample(k))
        # 低分辨率轴向注意力
        attn_feat = self.axial_attn(low_res_q + low_res_k)
        # 上采样并与原始特征融合
        attn_feat = self.upsample(attn_feat)
        return self.fusion(torch.cat([q, attn_feat], dim=1)) 
    
class MSAD(nn.Module):
    def __init__(self, in_dim, out_dim):
        super().__init__()
        self.downsample = nn.AvgPool2d(2)
        self.axial_attn = nn.Sequential(
            nn.Conv2d(in_dim, in_dim//2, 1),
            nn.Conv2d(in_dim//2, in_dim//2, (1,3), padding=(0,1)),
            nn.Conv2d(in_dim//2, in_dim//2, (3,1), padding=(1,0)),
        )
        self.upsample = nn.Sequential(
            nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False),
            # nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)
        )
        self.fusion = nn.Conv2d(in_dim + in_dim//2, out_dim, 1)
        
    def forward(self, x):
        orig_size = x.size()[2:]
        low_res = self.downsample(x)
        attn_feat = self.axial_attn(low_res)
        attn_feat = self.upsample(attn_feat)
        if attn_feat.shape[2:] != orig_size:
            attn_feat = F.interpolate(attn_feat, size=orig_size, mode='bilinear', align_corners=False)
        return self.fusion(torch.cat([x, attn_feat], dim=1))

class Bottleneck_MSAD(nn.Module):
    def __init__(self, c1, c2, shortcut=True, g=1, e=0.5):
        super().__init__()
        c_ = int(c2 * e)
        self.cv1 = Conv(c1, c_, 1, 1)
        self.cv2 = Conv(c_, c2, 3, 1, g=g)
        # self.lightda = LightDA(c2)
        self.msad = MSAD(c2, c2)
        self.add = shortcut and c1 == c2

    def forward(self, x):
        return x + self.msad(self.cv2(self.cv1(x))) if self.add else self.msad(self.cv2(self.cv1(x)))

class C2f_MSAD(nn.Module):
    def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5):
        super().__init__()
        self.c = int(c2 * e)
        self.cv1 = Conv(c1, 2 * self.c, 1, 1)
        self.cv2 = Conv((2 + n) * self.c, c2, 1)
        self.m = nn.ModuleList(Bottleneck_MSAD(self.c, self.c, shortcut, g, e=1.0) for _ in range(n))

    def forward(self, x):
        y = list(self.cv1(x).chunk(2, 1))
        y.extend(m(y[-1]) for m in self.m)
        return self.cv2(torch.cat(y, 1))
    


class simpleDepthwiseAxialConv(nn.Module):
    """Depthwise Axial Convolution for Efficiency"""
    def __init__(self, dim, kernel_size):
        super().__init__()
        if isinstance(kernel_size, int):
            kernel_size = (kernel_size, kernel_size)
        assert len(kernel_size) == 2, "Kernel size must be an integer or a tuple of length 2"
        row_kernel_size, col_kernel_size = kernel_size
        self.row_conv = nn.Conv2d(dim, dim, (1, row_kernel_size), padding=(0, row_kernel_size//2), groups=dim)
        self.col_conv = nn.Conv2d(dim, dim, (col_kernel_size, 1), padding=(col_kernel_size//2, 0), groups=dim)
        
    def forward(self, x):
        return self.col_conv(self.row_conv(x)) + x
    


class DynamicAxialRouter(nn.Module):
    """动态路由轴向注意力"""
    def __init__(self, dim, num_paths=3, deploy=False):
        super().__init__()
        self.deploy = deploy
        self.num_paths = num_paths
        
        # 候选操作池
        self.paths = nn.ModuleList([
            nn.Sequential(                  # 路径0：身份映射
                nn.Identity()
            ),
            nn.Sequential(                  # 路径1：轻量轴向注意力
                simpleDepthwiseAxialConv(dim, kernel_size=(1,5)),
                nn.Conv2d(dim, dim, 1)
            ),
            nn.Sequential(                  # 路径2：多尺度轴向
                MSAD(dim, dim)
            )
        ])
        
        # 动态路由器
        self.router = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Flatten(),
            nn.Linear(dim, num_paths, bias=False)
        )
        
        # 部署时参数融合
        if deploy:
            self.reparam = self._merge_paths()

    def forward(self, x):
        if self.deploy:
            return self.reparam(x)
        
        logits = self.router(x)
        weights = F.softmax(logits, dim=-1)
        
        # 多路径加权融合
        out = 0
        for i in range(self.num_paths):
            path_weight = weights[:, i].view(-1,1,1,1)
            out += path_weight * self.paths[i](x)
        return out

    def _merge_paths(self):
        """结构重参数化（部署时调用）"""
        merged_conv = nn.Conv2d(self.dim, self.dim, 3, padding=1)
        
        # 合并所有路径参数
        with torch.no_grad():
            identity_weight = torch.eye(self.dim).view(1,1,self.dim,self.dim)
            path1_weight = self._get_path_weight(self.paths[1])
            path2_weight = self._get_path_weight(self.paths[2])
            
            merged_weight = identity_weight + path1_weight + path2_weight
            merged_conv.weight.data = merged_weight
            
        return merged_conv

class UltraBottleneck(nn.Module):
    """超强瓶颈模块"""
    def __init__(self, c1, c2, deploy=False):
        super().__init__()
        self.conv = nn.Sequential(
            Conv(c1, c2, 1),
            # Conv(c2, c2, 3),
            DynamicAxialRouter(c2, deploy=deploy)
        )
        self.shortcut = c1 == c2

    def forward(self, x):
        return x + self.conv(x) if self.shortcut else self.conv(x)

class C2f_DynamicAxialRouter(nn.Module):
    """终极C2f模块"""
    def __init__(self, c1, c2, n=3, deploy=False):
        super().__init__()
        self.c = c2 // 2
        self.cv1 = Conv(c1, 2*self.c, 1)
        self.m = nn.ModuleList(UltraBottleneck(self.c, self.c, deploy) for _ in range(n))
        self.cv2 = Conv((2+n)*self.c, c2, 1)

    def forward(self, x):
        y = list(self.cv1(x).chunk(2, 1))
        y.extend(m(y[-1]) for m in self.m)
        return self.cv2(torch.cat(y, 1))


#-------------------------------------------LightChannelAttention模块---------------------------------------------

class LightChannelAttention(nn.Module):
    """修正后的通道注意力模块，特征维度处理准确"""
    def __init__(self, in_dim, out_dim, reduction_ratio=4, num_heads=8):
        super().__init__()
        assert out_dim % num_heads == 0, "out_dim必须能被num_heads整除"
        
        self.in_dim = in_dim
        self.out_dim = out_dim
        self.num_heads = num_heads
        self.head_dim = out_dim // num_heads
        self.reduction_ratio = reduction_ratio
        
        # 瓶颈投影层
        self.mid_dim = max(in_dim // reduction_ratio, 8)
        
        # QKV投影（深度可分离结构）
        self.q_proj = nn.Sequential(
            nn.Conv2d(in_dim, self.mid_dim, 1),
            nn.BatchNorm2d(self.mid_dim),
            nn.GELU(),
            nn.Conv2d(self.mid_dim, out_dim, 1)
        )
        
        self.kv_proj = nn.Sequential(
            nn.Conv2d(in_dim, 2*self.mid_dim, 1),
            nn.GELU(),
            nn.Conv2d(2*self.mid_dim, 2*out_dim, 1)
        )
        
        # 输出调整
        self.out_conv = nn.Conv2d(out_dim, out_dim, 1)
        self.gamma = nn.Parameter(torch.zeros(1))
        
        self._init_weights()

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)

    def forward(self, x):
        B, C, H, W = x.shape
        
        # 生成QKV
        Q = self.q_proj(x)  # (B, C_out, H, W)
        K, V = self.kv_proj(x).chunk(2, dim=1)  # 各(B, C_out, H, W)
        
        # ====== 关键修正部分 ======
        # 多头拆分（通道维度分组）
        Q = Q.view(B, self.num_heads, self.head_dim, H, W)  # (B, h, D, H, W)
        K = K.view(B, self.num_heads, self.head_dim, H, W)  # (B, h, D, H, W)
        V = V.view(B, self.num_heads, self.head_dim, H, W)  # (B, h, D, H, W)
        
        # 通道注意力计算（空间位置独立）
        Q = Q.permute(0, 1, 3, 4, 2)  # (B, h, H, W, D)
        K = K.permute(0, 1, 3, 4, 2)  # (B, h, H, W, D)
        
        # 缩放点积注意力
        attn = torch.matmul(Q, K.transpose(-1, -2))  # (B, h, H, W, W)
        attn = attn / (self.head_dim ** 0.5)
        attn = F.softmax(attn, dim=-1)
        
        # 应用注意力到V
        V = V.permute(0, 1, 3, 4, 2)  # (B, h, H, W, D)
        out = torch.matmul(attn, V)  # (B, h, H, W, D)
        
        # 合并多头
        out = out.permute(0, 1, 4, 2, 3)  # (B, h, D, H, W)
        out = out.reshape(B, self.out_dim, H, W)  # (B, C_out, H, W)
        # ====== 修正结束 ======
        
        # 输出调整
        out = self.out_conv(out)
        return self.gamma * out + x  # 残差连接



##############--------------------------------PMSFA模块---------------------------------------------


class C2f(nn.Module):
    """Faster Implementation of CSP Bottleneck with 2 convolutions."""

    def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5):
        """Initializes a CSP bottleneck with 2 convolutions and n Bottleneck blocks for faster processing."""
        super().__init__()
        self.c = int(c2 * e)  # hidden channels
        self.cv1 = Conv(c1, 2 * self.c, 1, 1)
        self.cv2 = Conv((2 + n) * self.c, c2, 1)  # optional act=FReLU(c2)
        self.m = nn.ModuleList(Bottleneck(self.c, self.c, shortcut, g, k=((3, 3), (3, 3)), e=1.0) for _ in range(n))

    def forward(self, x):
        """Forward pass through C2f layer."""
        y = list(self.cv1(x).chunk(2, 1))
        y.extend(m(y[-1]) for m in self.m)
        return self.cv2(torch.cat(y, 1))

    def forward_split(self, x):
        """Forward pass using split() instead of chunk()."""
        y = list(self.cv1(x).split((self.c, self.c), 1))
        y.extend(m(y[-1]) for m in self.m)
        return self.cv2(torch.cat(y, 1))
    
class Bottleneck(nn.Module):
    """Standard bottleneck."""

    def __init__(self, c1, c2, shortcut=True, g=1, k=(3, 3), e=0.5):
        """Initializes a standard bottleneck module with optional shortcut connection and configurable parameters."""
        super().__init__()
        c_ = int(c2 * e)  # hidden channels
        self.cv1 = Conv(c1, c_, k[0], 1)
        self.cv2 = Conv(c_, c2, k[1], 1, g=g)
        self.add = shortcut and c1 == c2

    def forward(self, x):
        """Applies the YOLO FPN to input data."""
        return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
    
class PMSFA(nn.Module):
    def __init__(self, inc) -> None:
        super().__init__()
        
        self.conv1 = Conv(inc, inc, k=3)
        self.conv2 = Conv(inc // 2, inc // 2, k=5, g=inc // 2)
        self.conv3 = Conv(inc // 4, inc // 4, k=7, g=inc // 4)
        self.conv4 = Conv(inc, inc, 1)
    
    def forward(self, x):
        conv1_out = self.conv1(x)
        conv1_out_1, conv1_out_2 = conv1_out.chunk(2, dim=1)
        conv2_out = self.conv2(conv1_out_1)
        conv2_out_1, conv2_out_2 = conv2_out.chunk(2, dim=1)
        conv3_out = self.conv3(conv2_out_1)
        
        out = torch.cat([conv3_out, conv2_out_2, conv1_out_2], dim=1)
        out = self.conv4(out) + x
        return out

class CSP_PMSFA(C2f):
    def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5):
        super().__init__(c1, c2, n, shortcut, g, e)
        self.m = nn.ModuleList(PMSFA(self.c) for _ in range(n))




# ---------------------------- 可重参数化改进 ----------------------------
class RepConv(nn.Module):
    """可重参数化卷积 (训练时多分支，推理时单分支)"""
    def __init__(self, c1, c2, k=3):
        super().__init__()
        # 训练时分支
        self.conv3x3 = Conv(c1, c2, k=k, act=False)
        self.conv1x1 = Conv(c1, c2, k=1, act=False)
        self.act = nn.SiLU()
        
        # 可学习融合权重
        self.alpha = nn.Parameter(torch.ones(1))
        self.beta = nn.Parameter(torch.zeros(1))
    
    def forward(self, x):
        if self.training:
            return self.act(
                self.alpha * self.conv3x3(x) + 
                self.beta * self.conv1x1(x)
            )
        else:
            return self.act(self.fused_conv(x))
    
    def reparameterize(self):
        # 合并 3x3 和 1x1 卷积核
        kernel = self.conv3x3.conv.weight + self._pad_1x1_to_3x3()
        bias = self.alpha * self.conv3x3.bn.bias + self.beta * self.conv1x1.bn.bias
        # 构建融合后的卷积
        self.fused_conv = nn.Conv2d(
            self.conv3x3.conv.in_channels,
            self.conv3x3.conv.out_channels,
            kernel_size=3,
            padding=1
        )
        self.fused_conv.weight.data = kernel
        self.fused_conv.bias.data = bias

    def _pad_1x1_to_3x3(self):
        """将 1x1 卷积核零填充为 3x3"""
        return torch.nn.functional.pad(
            self.conv1x1.conv.weight, [1,1,1,1]
        )

class RepPMSFA(nn.Module):
    """可重参数化的 PMSFA 模块"""
    def __init__(self, inc):
        super().__init__()
        # 多尺度分支（训练时并行）
        self.conv3 = RepConv(inc//4, inc//4)
        self.conv5 = RepConv(inc//2, inc//2)
        self.conv7 = RepConv(inc, inc)
        self.fuse_conv = Conv(inc, inc, 1)
        
    def forward(self, x):
        # 分阶段部分处理
        x1, x2 = x.chunk(2, dim=1)
        x3, x4 = x1.chunk(2, dim=1)
        
        out3 = self.conv3(x3)
        out5 = self.conv5(x3)
        out7 = self.conv7(x1)
        
        return self.fuse_conv(torch.cat([out3, out5, out7, x2, x4], dim=1)) + x

class RepCSP_PMSFA(C2f):
    """可重参数化 CSP-PMSFA (推理高效版)"""
    def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5):
        super().__init__(c1, c2, n, shortcut, g, e)
        self.m = nn.ModuleList(RepPMSFA(self.c) for _ in range(n))
    
    def reparameterize(self):
        for m in self.m:
            if isinstance(m, RepPMSFA):
                m.conv3.reparameterize()
                m.conv5.reparameterize()
                m.conv7.reparameterize()

# ---------------------------- 动态通道分配 ----------------------------
class ChannelGate(nn.Module):
    """可学习通道门控（动态分配通道）"""
    def __init__(self, inc):
        super().__init__()
        self.gate = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(inc, inc, 1),
            nn.Sigmoid()
        )
    
    def forward(self, x):
        mask = self.gate(x)
        return x * mask, x * (1 - mask)

class DynamicPMSFA(nn.Module):
    """动态通道分配的 PMSFA"""
    def __init__(self, inc):
        super().__init__()
        self.gate = ChannelGate(inc)
        self.conv1 = RepConv(inc, inc//2)
        self.conv2 = RepConv(inc//2, inc//4)
        self.conv3 = RepConv(inc//4, inc//8)
        self.fuse = Conv(inc//8 + inc, inc, 1)
    
    def forward(self, x):
        x_active, x_pass = self.gate(x)
        x1 = self.conv1(x_active)
        x2 = self.conv2(x1)
        x3 = self.conv3(x2)
        return self.fuse(torch.cat([x3, x_pass], dim=1)) + x

######--------------------------PMSFA模块 end--------------------------------------