import torch
import torch.nn as nn
import torch.nn.functional as F
# from mmcv.cnn import build_norm_layer
import math
from .MSAD import AMHA
from .BiGatedFusion import LightBiGatedFusion
from .conv import GhostConv, Conv, DWConv
from einops.layers.torch import Rearrange
from .DCMA import EfficientDCMA
from .block import C3
from .MSAD import ChannelAttention


class BiGhostConv(nn.Module):
    def __init__(self, in_ch, out_ch, kernel_size=1, stride=1, ratio=2, dw_size=3, groups=1):
        super().__init__()
        # 动态调整分组数确保整除
        self.groups = max(math.gcd(groups, in_ch), 1)
        self.ratio = ratio
        init_ch = out_ch // ratio
        
        # 主分支参数校验
        assert init_ch % self.groups == 0, f"主分支通道数{init_ch}必须能被分组数{self.groups}整除"
        
        # 主分支卷积
        # self.primary_conv = nn.Sequential(
        #     nn.Conv2d(in_ch, init_ch, kernel_size, stride,
        #              kernel_size//2, bias=False, groups=self.groups),
        #     nn.BatchNorm2d(init_ch),
        #     nn.ReLU(inplace=True) if ratio == 1 else nn.Identity()
        # )
        self.primary_conv = C3(in_ch, init_ch, kernel_size, stride)
        
        # 轻量分支动态调整
        self.cheap_groups = max(self.groups // 2, 1)  # 与主分支分组关联
        self.cheap_operation = nn.Sequential(
            nn.Conv2d(init_ch, init_ch*(ratio-1), dw_size, 1,
                     dw_size//2, 
                     groups=self.cheap_groups * (init_ch // self.groups),  # 关键调整
                     bias=False),
            nn.BatchNorm2d(init_ch*(ratio-1)),
            nn.ReLU(inplace=True)
        )
        self.oup = out_ch

    def forward(self, x):
        x1 = self.primary_conv(x)
        x2 = self.cheap_operation(x1)
        return torch.cat([x1, x2], dim=1)[:, :self.oup, :, :]  # 确保输出通道精确

    
# class SparseRouter(nn.Module):
#     """
#     动态稀疏路由模块（Dynamic Sparse Router with Shared Features, DS-Router）
#     核心改进：
#       1. 路由决策基于共享特征而非原始输入，提升空间感知能力
#       2. 使用两层MLP增强路由控制器决策能力
#       3. 采用Kaiming初始化保证训练稳定性
#       4. 专家聚合过程全量向量化计算，兼顾效率与表达力
#     参数：
#       - in_channels: 输入通道数
#       - out_channels: 输出通道数
#       - num_experts: 专家数量（推荐设置为4）
#       - top_k: 每个样本激活的专家数（推荐设置为2）
#     """
#     def __init__(self, in_channels, out_channels, num_experts=4, top_k=2):
#         super(SparseRouter, self).__init__()
#         self.in_channels = in_channels
#         self.out_channels = out_channels
#         self.num_experts = num_experts
#         self.top_k = top_k
        
#         # 共享特征提取层（保持空间信息）
#         self.shared_conv = nn.Sequential(
#             nn.Conv2d(in_channels, in_channels, kernel_size=3, 
#                       padding=1, groups=in_channels, bias=False),
#             nn.BatchNorm2d(in_channels),
#             nn.ReLU(inplace=True),
#             nn.Conv2d(in_channels, in_channels, kernel_size=1, bias=False),  # 增强特征融合
#             nn.BatchNorm2d(in_channels),
#             nn.ReLU(inplace=True)
#         )
        
#         # 专家网络（并行结构）
#         self.experts = nn.ModuleList([
#             nn.Sequential(
#                 nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False),
#                 nn.BatchNorm2d(out_channels),
#                 nn.ReLU(inplace=True)
#             ) for _ in range(num_experts)
#         ])
        
#         # 路由控制器（两层MLP增强决策能力）
#         self.router = nn.Sequential(
#             nn.AdaptiveAvgPool2d(1),
#             nn.Flatten(),
#             nn.Linear(in_channels, in_channels // 2),
#             nn.ReLU(inplace=True),
#             nn.Linear(in_channels // 2, num_experts)
#         )
        
#         # 残差连接条件（仅当输入输出通道数相同使用）
#         self.use_residual = (in_channels == out_channels)
        
#         # 参数初始化
#         for m in self.modules():
#             if isinstance(m, nn.Conv2d):
#                 nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
#             elif isinstance(m, nn.BatchNorm2d):
#                 nn.init.constant_(m.weight, 1)
#                 nn.init.constant_(m.bias, 0)
#             elif isinstance(m, nn.Linear):
#                 nn.init.normal_(m.weight, 0, 0.01)
#                 nn.init.constant_(m.bias, 0)

#     def forward(self, x):
#         B, C, H, W = x.shape
#         assert C == self.in_channels, f"输入通道数不匹配: 预期{self.in_channels}, 实际{C}"
        
#         # 共享特征提取
#         shared_feat = self.shared_conv(x)
        
#         # 路由决策（基于共享特征计算专家得分）
#         routing_logits = self.router(shared_feat)  # 形状 [B, num_experts]
        
#         # Top-k 专家选择：选取每个样本中得分最高的 top_k 专家
#         topk_logits, topk_indices = routing_logits.topk(self.top_k, dim=1)
#         mask = torch.full_like(routing_logits, float('-inf'))
#         mask.scatter_(1, topk_indices, topk_logits)
#         expert_weights = F.softmax(mask, dim=1)  # 归一化后的权重，形状 [B, num_experts]
        
#         # 专家并行计算：计算每个专家的输出
#         expert_outputs = []
#         for expert in self.experts:
#             expert_outputs.append(expert(shared_feat))
#         expert_outputs = torch.stack(expert_outputs, dim=1)  # 形状 [B, num_experts, out_channels, H, W]
        
#         # 加权融合专家输出
#         expert_weights = expert_weights.view(B, self.num_experts, 1, 1, 1)
#         fused_output = (expert_outputs * expert_weights).sum(dim=1)
        
#         # 残差连接（仅当in_channels==out_channels时使用）
#         if self.use_residual:
#             fused_output += x
        
#         return fused_output

#     def flops(self, H, W):
#         """FLOPs计算（单样本，理论估算）"""
#         flops = 0
#         # 共享卷积
#         flops += H * W * self.in_channels * 3 * 3  # Depthwise convolution
#         flops += H * W * self.in_channels * self.in_channels * 1 * 1  # Pointwise convolution
        
#         # 路由网络
#         flops += self.in_channels  # AdaptiveAvgPool2d（计算量较低）
#         flops += self.in_channels * (self.in_channels // 2) + (self.in_channels // 2) * self.num_experts  # 两层全连接
        
#         # 专家网络
#         flops += self.num_experts * H * W * self.in_channels * self.out_channels
        
#         return flops
    

# class SparseRouter(nn.Module):
#     """动态计算路径选择器（支持通道维度调整），固定阈值调整"""
#     def __init__(self, in_dim, out_dim, num_experts=4, threshold=0.5):
#         super().__init__()
#         # 参数校验
#         assert in_dim > 0 and out_dim > 0, "输入输出维度必须大于0"
#         assert num_experts >= 1, "专家数量至少为1"
        
#         self.in_dim = in_dim
#         self.out_dim = out_dim
        
#         # 专家网络（支持通道维度变换）
#         self.experts = nn.ModuleList([
#             nn.Sequential(
#                 # 深度可分离卷积
#                 nn.Conv2d(in_dim, in_dim, 3, padding=1, groups=in_dim),
#                 # 通道调整卷积
#                 nn.Conv2d(in_dim, out_dim, 1)
#             ) for _ in range(num_experts)
#         ])
        
#         # 路由控制器
#         self.router = nn.Sequential(
#             nn.AdaptiveAvgPool2d(1),
#             nn.Flatten(),
#             nn.Linear(in_dim, num_experts*2),  # 输入维度修正为in_dim
#             nn.Tanh()
#         )
#         self.threshold = threshold

#     def forward(self, x):
#         B, C, H, W = x.shape
#         # 输入通道校验
#         assert C == self.in_dim, f"输入通道数{C}与初始化参数in_dim={self.in_dim}不匹配"
        
#         # 生成路由权重 [B, num_experts, 2]
#         routing_weights = self.router(x).view(B, -1, 2)
        
#         # 稀疏激活掩码
#         active_mask = (routing_weights[:, :, 0] > self.threshold).float()  # [B, num_experts]
#         expert_weights = torch.softmax(routing_weights[:, :, 1], dim=-1)    # [B, num_experts]
        
#         # 动态计算（内存优化版）
#         outputs = torch.zeros(B, self.out_dim, H, W, device=x.device)
#         total_active = 1e-6  # 防止除零
        
#         for i, expert in enumerate(self.experts):
#             # 计算当前专家权重 [B]
#             weight = expert_weights[:, i] * active_mask[:, i]
            
#             if weight.sum() > 0:
#                 # 专家计算并加权
#                 expert_out = expert(x)  # [B, out_dim, H, W]
#                 weighted_out = expert_out * weight.view(B, 1, 1, 1)
                
#                 outputs += weighted_out
#                 total_active += weight.sum()
        
#         return outputs / total_active



class SparseRouter(nn.Module):
    """
    动态稀疏路由模块（改进版）
    改进点：
      1. 动态调整top_k确保不超过专家数量
      2. 增加参数合法性校验
      3. 优化路由计算效率
    """
    def __init__(self, in_channels, out_channels, num_experts=4, top_k=2):
        super().__init__()
        # 参数校验
        if num_experts < 1:
            raise ValueError(f"num_experts({num_experts})必须≥1")
        
        # 动态调整top_k
        self.num_experts = num_experts
        self.top_k = min(top_k, num_experts)
        
        self.in_channels = in_channels
        self.out_channels = out_channels
        
        # 共享特征提取层（优化计算效率）
        self.shared_conv = nn.Sequential(
            nn.Conv2d(in_channels, in_channels, kernel_size=3, 
                     padding=1, groups=in_channels, bias=False),
            nn.BatchNorm2d(in_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(in_channels, in_channels//4, kernel_size=1, bias=False),  # 通道压缩
            nn.BatchNorm2d(in_channels//4),
            nn.ReLU(inplace=True)
        )
        
        # 专家网络（轻量化设计）
        self.experts = nn.ModuleList([
            nn.Sequential(
                nn.Conv2d(in_channels//4, out_channels, kernel_size=1, bias=False),
                nn.BatchNorm2d(out_channels),
                nn.ReLU(inplace=True)
            ) for _ in range(num_experts)
        ])
        
        # 路由控制器（优化参数量）
        self.router = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Flatten(),
            nn.Linear(in_channels//4, max(2, num_experts//2)),  # 动态调整中间层
            nn.ReLU(inplace=True),
            nn.Linear(max(2, num_experts//2), num_experts)
        )
        
        # 残差连接条件
        self.use_residual = (in_channels == out_channels)
        
        # 参数初始化优化
        self._init_weights()

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.normal_(m.weight, 0, 0.01)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)

    def forward(self, x):
        B, C, H, W = x.shape
        
        # 共享特征提取
        shared_feat = self.shared_conv(x)
        
        # 路由决策
        routing_logits = self.router(shared_feat)  # [B, num_experts]
        
        # 动态调整稀疏度（应对训练/推理差异）
        effective_k = min(self.top_k, self.num_experts)
        
        # Top-k专家选择
        topk_logits, topk_indices = routing_logits.topk(effective_k, dim=1)
        mask = torch.full_like(routing_logits, float('-inf'))
        mask.scatter_(1, topk_indices, topk_logits)
        expert_weights = F.softmax(mask, dim=1)  # [B, num_experts]
        
        # 并行专家计算
        expert_outputs = []
        shared_feat = shared_feat.repeat(1, self.num_experts, 1, 1)  # [B, C*num_experts, H, W]
        shared_feat = shared_feat.view(B*self.num_experts, -1, H, W)
        
        for i, expert in enumerate(self.experts):
            expert_outputs.append(expert(shared_feat[i*B:(i+1)*B]))
        expert_outputs = torch.stack(expert_outputs, dim=1)  # [B, num_experts, C_out, H, W]
        
        # 加权融合
        expert_weights = expert_weights.view(B, self.num_experts, 1, 1, 1)
        fused_output = (expert_outputs * expert_weights).sum(dim=1)  # [B, C_out, H, W]
        
        # 残差连接
        if self.use_residual:
            fused_output += x
        
        return fused_output

class OmniFusion(nn.Module):
    """全上下文融合模块（支持多块堆叠）"""
    def __init__(self, in_dim, out_dim, num_blocks=2, head_dim=16, expansion=4):
        super().__init__()
        self.in_dim = in_dim
        self.out_dim = out_dim
        self.num_blocks = num_blocks
        
        # 局部分支
        self.local_convs = nn.ModuleList()
        for _ in range(num_blocks):
            block = nn.Sequential(
                BiGhostConv(in_dim, out_dim),
                nn.GELU()  # 可选的非线性激活
            )
            self.local_convs.append(block)
        
        # 全局分支
        self.global_att_conv = nn.Conv2d(in_dim, in_dim, 1)
        self.transformer = LinearAxialAttention(in_dim=in_dim, head_dim=head_dim)
        self.channel = ChannelAttention(in_dim=in_dim, out_dim=out_dim)

        # 动态门控
        self.gate = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(in_dim, in_dim//8, 1),
            nn.ReLU(),
            nn.Conv2d(in_dim//8, 2, 1),
            nn.Softmax(dim=1)
        )

    def forward(self, x):
        # 局部路径（逐块处理）
        local_feat = x
        for block in self.local_convs:
            local_feat = block(local_feat)  # [B, out_dim, H, W]
        
        # 全局路径
        global_feat = self.global_att_conv(x)
        global_feat = self.transformer(global_feat)
        global_feat = self.channel(global_feat)

        # 动态融合
        gate_weights = self.gate(x)
        return gate_weights[:,0:1]*local_feat + gate_weights[:,1:2]*global_feat + x

    


    


###############--------------------------------------------------------------###############


class DynamicSparseAttention(nn.Module):
    """基于通道分组和空间采样的轻量注意力"""
    def __init__(self, dim, num_heads=4, sr_ratio=4):
        super().__init__()
        self.num_heads = num_heads
        self.sr_ratio = sr_ratio
        self.scale = (dim // num_heads) ** -0.5
        
        # 动态稀疏采样
        self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio) if sr_ratio >1 else nn.Identity()
        self.qkv = nn.Conv2d(dim, dim*3, kernel_size=1)
        self.proj = nn.Sequential(
            nn.Conv2d(dim, dim//4, 1),  # 压缩通道
            nn.ReLU(),
            nn.Conv2d(dim//4, dim, 1)
        )
        
        # 通道分组归一化
        self.gn = nn.GroupNorm(num_heads, dim)

    def forward(self, x):
        B, C, H, W = x.shape
        qkv = self.qkv(self.sr(x)).chunk(3, dim=1)  # 降采样后计算QKV
        
        # 动态通道分组
        q, k, v = map(lambda t: t.reshape(B, self.num_heads, -1, H//self.sr_ratio * W//self.sr_ratio), qkv)
        
        attn = (q @ k.transpose(-2, -1)) * self.scale
        attn = attn.softmax(dim=-1)
        
        x = (attn @ v).reshape(B, C, H//self.sr_ratio, W//self.sr_ratio)
        x = F.interpolate(x, (H,W), mode='bilinear')  # 恢复分辨率
        return self.gn(self.proj(x)) + x
    
    

    

class mergeblock(nn.Module):
    def __init__(self, in_dim, out_dim, num_blocks=2):
        super().__init__()
        self.in_dim = in_dim
        self.out_dim = out_dim
        # self.merge1 = nn.Sequential(
        #     DWConv(in_dim, out_dim, k=3, s=1),
        #     DWConv(out_dim, out_dim, k=3, s=1),)
        # self.merge2 = OmniFusion(in_dim, out_dim)
        # self.layers = nn.ModuleList()
        #循环num_blocks次
        for i in range(num_blocks):
            layer = nn.Sequential(
                # self.merge1,
                self.merge2,
            )
            self.layers.append(layer)

    def forward(self, x):
        for layer in self.layers:
            x = layer(x) + x
        return x



# class LinearAxialAttention(nn.Module):
#     def __init__(self, in_dim, head_dim, eps=1e-6):
#         super().__init__()
#         self.head_dim = head_dim
#         self.eps = eps
#         self.q_proj = nn.Conv2d(in_dim, 2 * head_dim, 1)
#         self.kv_proj = nn.Conv2d(in_dim, 2 * head_dim, 1)
#         self.scale = nn.Parameter(torch.ones(1))

#     def forward(self, x):
#         B, C, H, W = x.shape
#         q = F.gelu(self.q_proj(x))
#         q_row, q_col = q.chunk(2, dim=1)
#         kv = F.gelu(self.kv_proj(x))
#         k_row, v_row = kv.chunk(2, dim=1)
#         k_col, v_col = kv.chunk(2, dim=1)

#         # --- 行注意力计算 ---
#         q_row = q_row.permute(0, 2, 3, 1)  # (B, H, W, D)
#         k_row = k_row.permute(0, 2, 3, 1)
#         v_row = v_row.permute(0, 2, 3, 1)

#         # 分子：Q(K^T V) 聚合同一行所有位置的k和v
#         kv_row = torch.einsum('bhwd,bhwe->bhde', k_row, v_row)
#         numerator_row = torch.einsum('bhwd,bhde->bhwe', q_row, kv_row)

#         # 分母：Q(sum_K) 同一行所有位置k的和
#         sum_k_row = torch.sum(k_row, dim=2)  # (B, H, D)
#         denominator_row = torch.einsum('bhwd,bhd->bhw', q_row, sum_k_row)
#         denominator_row = denominator_row.unsqueeze(-1)  # (B, H, W, 1)

#         out_row = (numerator_row / (denominator_row + self.eps)) * self.scale
#         out_row = out_row.permute(0, 3, 1, 2)  # (B, D, H, W)

#         # --- 列注意力计算 ---
#         k_col = k_col.permute(0, 3, 1, 2)  # (B, W, D, H)
#         v_col = v_col.permute(0, 3, 2, 1)  # (B, W, H, D)
#         q_col = q_col.permute(0, 3, 2, 1)  # (B, W, H, D)

#         # 计算KV乘积（沿H轴求和）
#         kv_col = torch.einsum('bwdh,bwhd->bwd', k_col, v_col)  # (B, W, D)
#         numerator_col = torch.einsum('bwhd,bwd->bwhd', q_col, kv_col)  # (B, W, H, D)

#         # 分母计算
#         sum_k_col = torch.sum(k_col, dim=3)  # (B, W, D)
#         denominator_col = torch.einsum('bwhd,bwd->bwh', q_col, sum_k_col).unsqueeze(-1)

#         out_col = (numerator_col / (denominator_col + self.eps)) * self.scale
#         out_col = out_col.permute(0, 3, 2, 1)  # (B, D, H, W)

#         return out_row + out_col
    
class Bottleneck(nn.Module):
    """Standard bottleneck."""

    def __init__(self, c1, c2, shortcut=True, g=1, k=(3, 3), e=0.5):
        """Initializes a bottleneck module with given input/output channels, shortcut option, group, kernels, and
        expansion.
        """
        super().__init__()
        c_ = int(c2 * e)  # hidden channels
        self.cv1 = Conv(c1, c_, k[0], 1)
        self.cv2 = Conv(c_, c2, k[1], 1, g=g)
        self.add = shortcut and c1 == c2

    def forward(self, x):
        """'forward()' applies the YOLO FPN to input data."""
        return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))


# class FocusC2f(nn.Module):
#     """Faster Implementation of CSP Bottleneck with 2 convolutions."""

#     def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5):
#         """Initialize CSP bottleneck layer with two convolutions with arguments ch_in, ch_out, number, shortcut, groups,
#         expansion.
#         """
#         super().__init__()
#         self.c = int(c2 * e)  # hidden channels
#         self.cv1 = Conv(c1, 2 * self.c, 1, 1)
#         self.cv2 = Conv((2 + n) * self.c, c2, 1)  # optional act=FReLU(c2)
#         self.m = nn.ModuleList(SparseRouter(self.c, self.c, num_experts=n) for _ in range(n))
#         self.attn = AMHA(c2, c2)

#     def forward(self, x):
#         """Forward pass through C2f layer."""
#         y = list(self.cv1(x).chunk(2, 1))
#         y.extend(m(y[-1]) for m in self.m)
#         return self.attn(self.cv2(torch.cat(y, 1)))

#     def forward_split(self, x):
#         """Forward pass using split() instead of chunk()."""
#         y = list(self.cv1(x).split((self.c, self.c), 1))
#         y.extend(m(y[-1]) for m in self.m)
#         return self.cv2(torch.cat(y, 1))
    
# class Bottleneck(nn.Module):
#     """Standard bottleneck."""

#     def __init__(self, c1, c2, shortcut=True, g=1, k=(3, 3), e=0.5):
#         """Initializes a bottleneck module with given input/output channels, shortcut option, group, kernels, and
#         expansion.
#         """
#         super().__init__()
#         c_ = int(c2 * e)  # hidden channels
#         self.cv1 = Conv(c1, c_, k[0], 1)
#         self.cv2 = Conv(c_, c2, k[1], 1, g=g)
#         self.add = shortcut and c1 == c2

#     def forward(self, x):
#         """'forward()' applies the YOLO FPN to input data."""
#         return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
    


class FocusC2f(nn.Module):
    """改进版C2f模块（安全参数传递）"""
    def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5):
        super().__init__()
        self.c = int(c2 * e)
        
        # 自动调整路由参数
        safe_n = max(n, 1)  # 确保n≥1
        self.cv1 = Conv(c1, 2 * self.c, 1, 1)
        self.cv2 = Conv((2 + safe_n) * self.c, c2, 1)
        
        # 动态路由配置
        self.m = nn.ModuleList(
            nn.Sequential(  SparseRouter(self.c, self.c, num_experts=safe_n, top_k=min(2, safe_n)),
                            Bottleneck(self.c, self.c, shortcut, g, e=1.0) ) 
            for _ in range(safe_n)
        )
        self.attn = AMHA(c2, c2)  # 假设已实现注意力模块

    def forward(self, x):
        y = list(self.cv1(x).chunk(2, 1))
        y.extend(m(y[-1]) for m in self.m)
        return self.attn(self.cv2(torch.cat(y, 1)))

# class Conv(nn.Module):
#     """标准卷积模块"""
#     def __init__(self, c1, c2, k=1, s=1, p=None, g=1):
#         super().__init__()
#         self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
#         self.bn = nn.BatchNorm2d(c2)
#         self.act = nn.ReLU(inplace=True)
    
#     def forward(self, x):
#         return self.act(self.bn(self.conv(x)))

# def autopad(k, p=None):
#     """自动填充计算"""
#     if p is None:
#         p = k // 2 if isinstance(k, int) else [x//2 for x in k]
#     return p



class LinearAxialAttention(nn.Module):
    """
    结合轴向注意力和 Performer 随机特征映射的低秩注意力模块。
    该模块沿行（W 轴）和列（H 轴）分别计算注意力，
    使用随机特征映射来近似 softmax 内积，从而达到线性复杂度。
    为保证输出与输入通道数一致，在每个分支后加入了 1x1 卷积进行升维，
    然后将两个方向的输出相加。
    """
    def __init__(self, in_dim, head_dim, nb_features=256, eps=1e-6):
        """
        Args:
            in_dim: 输入通道数（同时也是期望的输出通道数）
            head_dim: 注意力头的维度，即各分支初步输出的通道数
            nb_features: 随机特征映射的维度，控制低秩近似的精度
            eps: 数值稳定性常数
        """
        super().__init__()
        self.in_dim = in_dim
        self.head_dim = head_dim
        self.nb_features = nb_features
        self.eps = eps

        # 行注意力投影
        self.q_row = nn.Conv2d(in_dim, head_dim, kernel_size=1)
        self.k_row = nn.Conv2d(in_dim, head_dim, kernel_size=1)
        self.v_row = nn.Conv2d(in_dim, head_dim, kernel_size=1)
        
        # 列注意力投影
        self.q_col = nn.Conv2d(in_dim, head_dim, kernel_size=1)
        self.k_col = nn.Conv2d(in_dim, head_dim, kernel_size=1)
        self.v_col = nn.Conv2d(in_dim, head_dim, kernel_size=1)
        
        # 随机投影矩阵，用于 Performer 随机特征映射
        self.proj_matrix_row = nn.Parameter(torch.randn(head_dim, nb_features))
        self.proj_matrix_col = nn.Parameter(torch.randn(head_dim, nb_features))
        
        self.scale = nn.Parameter(torch.ones(1))

        # 修改：将输出通道数从 head_dim 改为 in_dim (即 256)，确保输出尺寸一致
        self.out_proj_r = nn.Conv2d(head_dim, in_dim, kernel_size=1)  # head_dim -> in_dim
        self.out_proj_c = nn.Conv2d(head_dim, in_dim, kernel_size=1)  # head_dim -> in_dim
    
    def _phi(self, x, proj_matrix):
        """
        随机特征映射函数。
        采用常用的近似：phi(x) = elu(x @ proj) + 1，
        保证映射后的特征非负，可用于后续归一化计算。
        
        Args:
            x: 张量，形状为 (..., D)
            proj_matrix: 随机投影矩阵，形状 (D, nb_features)
        Returns:
            映射后形状 (..., nb_features)
        """
        x_proj = torch.matmul(x, proj_matrix)  # (..., nb_features)
        return F.elu(x_proj) + 1

    def forward(self, x):
        B, C, H, W = x.shape

        # ----- 行注意力计算（沿 W 轴） -----
        # 将特征转换为 (B, H, W, D)
        q_r = self.q_row(x).permute(0, 2, 3, 1)  # (B, H, W, head_dim)
        k_r = self.k_row(x).permute(0, 2, 3, 1)
        v_r = self.v_row(x).permute(0, 2, 3, 1)
        # 随机特征映射，映射到 nb_features 维度
        q_r_phi = self._phi(q_r, self.proj_matrix_row)  # (B, H, W, nb_features)
        k_r_phi = self._phi(k_r, self.proj_matrix_row)  # (B, H, W, nb_features)
        # 沿 W 轴对 k_r_phi 求和，作为归一化因子的一部分
        k_r_sum = torch.sum(k_r_phi, dim=2, keepdim=True)  # (B, H, 1, nb_features)
        print(f'k_r_sum.shape: {k_r_sum.shape}')
        D_r = torch.sum(q_r_phi * k_r_sum, dim=-1, keepdim=True)  # (B, H, W, 1)
        print(f'D_r.shape: {D_r.shape}')
        # 计算 kv 聚合，得到 (B, H, nb_features, head_dim)
        kv_r = torch.einsum('bhwd,bhwe->bhde', k_r_phi, v_r)
        print(f'kv_r.shape: {kv_r.shape}')
        # 注意力输出： (B, H, W, head_dim)
        out_r = torch.einsum('bhwd,bhde->bhwe', q_r_phi, kv_r)
        print(f'out_r.shape: {out_r.shape}')
        out_r = out_r / (D_r + self.eps)
        print(f'out_r.shape: {out_r.shape}')
        # 转换回 (B, head_dim, H, W)
        out_r = out_r.permute(0, 3, 1, 2)

        # ----- 列注意力计算（沿 H 轴） -----
        # 将 q, k, v 调整为 (B, W, H, D)
        q_c = self.q_col(x).permute(0, 3, 2, 1)  # (B, W, H, head_dim)
        k_c = self.k_col(x).permute(0, 3, 2, 1)
        v_c = self.v_col(x).permute(0, 3, 2, 1)
        q_c_phi = self._phi(q_c, self.proj_matrix_col)  # (B, W, H, nb_features)
        k_c_phi = self._phi(k_c, self.proj_matrix_col)  # (B, W, H, nb_features)
        # 沿 H 轴对 k_c_phi 求和
        k_c_sum = torch.sum(k_c_phi, dim=2, keepdim=True)  # (B, W, 1, nb_features)
        print(f'k_c_sum.shape: {k_c_sum.shape}')
        D_c = torch.sum(q_c_phi * k_c_sum, dim=-1, keepdim=True)  # (B, W, H, 1)
        print(f'D_c.shape: {D_c.shape}')
        kv_c = torch.einsum('bwhd,bwhe->bwde', k_c_phi, v_c)  # (B, W, nb_features, head_dim)
        print(f'kv_c.shape: {kv_c.shape}')
        out_c = torch.einsum('bwhd,bwde->bwhd', q_c_phi, kv_c)  # (B, W, H, head_dim)
        print(f'out_c.shape: {out_c.shape}')
        out_c = out_c / (D_c + self.eps)
        print(f'out_c.shape: {out_c.shape}')
        # 转换回 (B, head_dim, H, W)
        out_c = out_c.permute(0, 3, 2, 1)
        
        # 将两个分支各自通过投影映射到 in_dim，再相加得到最终输出
        return self.scale * (self.out_proj_r(out_r) + self.out_proj_c(out_c))


class AxialLinearAttention(nn.Module):
    def __init__(self, dim, heads=4):
        super().__init__()
        self.heads = heads
        self.dim_head = dim // heads
        self.scale = self.dim_head ** -0.5
        
        # 水平轴参数
        self.h_qkv = nn.Conv2d(dim, dim*3, 1, bias=False)
        # 垂直轴参数
        self.v_qkv = nn.Conv2d(dim, dim*3, 1, bias=False)
        
        self.proj = nn.Conv2d(dim, dim, 1)
        
        # 位置编码改进方案
        self.pos_emb_h = nn.Parameter(torch.Tensor(1, heads, self.dim_head, 1, 1024))  # (H, dim_head, W)
        self.pos_emb_v = nn.Parameter(torch.Tensor(1, heads, self.dim_head, 1024, 1))  # (W, dim_head, H)
        nn.init.normal_(self.pos_emb_h, std=0.02)
        nn.init.normal_(self.pos_emb_v, std=0.02)
        
        # 归一化层
        self.norm_h = nn.LayerNorm(self.dim_head)
        self.norm_v = nn.LayerNorm(self.dim_head)
        self.act = nn.SiLU(inplace=False)

    def horizontal_attention(self, x):
        B, C, H, W = x.shape
        qkv = self.h_qkv(x).chunk(3, dim=1)
        
        # 重塑形状并添加位置编码
        q, k, v = map(lambda t: t.reshape(B*self.heads, self.dim_head, H, W), qkv)
        
        # 水平位置编码 [1, heads, dim, 1, W] -> [B*heads, H, W, dim]
        pos_h = self.pos_emb_h[:, :, :, :, :W]  # 动态截取宽度
        pos_h = pos_h.permute(0,1,4,3,2)        # [1, heads, W, 1, dim]
        pos_h = pos_h.expand(B, -1, -1, H, -1)  # [B, heads, W, H, dim]
        pos_h = pos_h.reshape(B*self.heads, W, H, self.dim_head).permute(0,2,1,3)  # [B*heads, H, W, dim]
        
        # 合并位置信息
        q = q.permute(0,2,3,1) + pos_h  # [B*heads, H, W, dim]
        k = k.permute(0,2,3,1) + pos_h
        v = v.permute(0,2,3,1)
        
        # 重塑为 (B*heads*H, W, dim)
        q = q.reshape(-1, W, self.dim_head).contiguous()
        k = k.reshape(-1, W, self.dim_head).contiguous()
        v = v.reshape(-1, W, self.dim_head).contiguous()
        
        # 归一化和激活
        q = self.act(self.norm_h(q))
        k = self.act(self.norm_h(k))
        v = self.act(v)
        
        # 线性注意力计算
        cumulative_kv = torch.einsum('b w c, b w d -> b c d', k, v)
        Z = 1 / (torch.einsum('b w c, b c -> b w', q, k.sum(dim=1)) + 1e-4)
        
        out = torch.einsum('b w c, b c d, b w -> b w d', q, cumulative_kv, Z)
        
        # 恢复形状
        out = out.reshape(B*self.heads, H, W, self.dim_head).permute(0,3,1,2)
        out = out.reshape(B, C, H, W)
        return out

    def vertical_attention(self, x):
        B, C, H, W = x.shape
        qkv = self.v_qkv(x).chunk(3, dim=1)
        
        # 重塑形状并添加位置编码
        q, k, v = map(lambda t: t.reshape(B*self.heads, self.dim_head, H, W), qkv)

        # print(f'1 q.shape: {q.shape}, k.shape: {k.shape}, v.shape: {v.shape}')
        
        # 垂直位置编码 [1, heads, dim, H, 1]
        pos_v = self.pos_emb_v[:, :, :, :H, :]  # 动态截取高度
        pos_v = pos_v.permute(0,1,3,4,2)        # [1, heads, H, 1, dim]
        pos_v = pos_v.expand(B, -1, -1, W, -1)  # [B, heads, H, W, dim]
        pos_v = pos_v.reshape(B*self.heads, H, W, self.dim_head)  # [B*heads, H, W, dim]
        
        # 合并位置信息
        q = q.permute(0,2,3,1) + pos_v  # [B*heads, H, W, dim]
        k = k.permute(0,2,3,1) + pos_v
        v = v.permute(0,2,3,1)
        # print(f'2 q.shape: {q.shape}, k.shape: {k.shape}, v.shape: {v.shape}')
        
        # 重塑为 (B*heads*W, H, dim)
        q = q.reshape(-1, H, self.dim_head).contiguous()
        k = k.reshape(-1, H, self.dim_head).contiguous()
        v = v.reshape(-1, H, self.dim_head).contiguous()    
        # print(f'3 q.shape: {q.shape}, k.shape: {k.shape}, v.shape: {v.shape}')
        
        # 归一化和激活
        q = self.act(self.norm_v(q))
        k = self.act(self.norm_v(k))
        v = self.act(v)
        # print(f'4 q.shape: {q.shape}, k.shape: {k.shape}, v.shape: {v.shape}')
        
        # 线性注意力计算
        cumulative_kv = torch.einsum('b h c, b h d -> b c d', k, v)
        Z = 1 / (torch.einsum('b h c, b c -> b h', q, k.sum(dim=1)) + 1e-4)
        
        out = torch.einsum('b h c, b c d, b h -> b h d', q, cumulative_kv, Z)
        
        # 恢复形状
        out = out.reshape(B*self.heads, W, H, self.dim_head).permute(0,3,2,1)
        out = out.reshape(B, C, H, W)
        return out

    def forward(self, x):
        identity = x
        x = self.horizontal_attention(x)
        x = self.vertical_attention(x)
        return self.proj(x) + identity  # 添加残差连接
    

###########------------------------


class FeatureCalibrator(nn.Module):
    def __init__(self, c1, c2, mode='adaptive_pool'):
        super().__init__()
        self.mode = mode
        self.align_conv = Conv(c1, c2, 1)
        
        # 空间对齐模块
        if mode == 'adaptive_pool':
            self.spatial_adjust = nn.AdaptiveAvgPool2d((None, None))  # 动态调整
        elif mode == 'interpolate':
            self.spatial_adjust = nn.Upsample(scale_factor=2, mode='bilinear')  # 示例
        
        self.spatial_att = nn.Sequential(
            Conv(c2, c2//4, 3),
            nn.Conv2d(c2//4, 1, 3, padding=1),
            nn.Sigmoid()
        )
        self.channel_att = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),                # [B,C2,1,1]
            nn.Conv2d(c2, c2//4, 1, bias=False),    # 普通卷积，无BN
            nn.ReLU(),                              # 添加非线性
            nn.Conv2d(c2//4, c2, 1, bias=False),    # 普通卷积，无BN
            nn.Sigmoid()
        )

    def forward(self, x, prev_feat):
        # 步骤1：通道对齐
        print(f"x.shape: {x.shape}    prev_feat.shape: {prev_feat.shape}")
        aligned = self.align_conv(prev_feat)  # [B,C2,H_prev,W_prev]
        
        # 步骤2：空间对齐
        if x.shape[-2:] != aligned.shape[-2:]:
            if self.mode == 'adaptive_pool':
                aligned = F.adaptive_avg_pool2d(aligned, x.shape[-2:])
            elif self.mode == 'interpolate':
                aligned = F.interpolate(aligned, size=x.shape[-2:], mode='bilinear')
            else:
                raise ValueError(f"Unsupported mode: {self.mode}")
        
        # 步骤3：注意力计算
        spatial_weight = self.spatial_att(x + aligned)  # 空间维度一致
        channel_weight = self.channel_att(x * aligned)
        
        # 步骤4：特征融合
        return x * spatial_weight + aligned * channel_weight



class RecursiveBottleneck(nn.Module):
    def __init__(self, c1, c2, depth=2, shortcut=False, reduction_factor=1.5):
        super().__init__()
        self.reduction_factor = reduction_factor
        self.convs = nn.ModuleList(
            Conv(c1, c2, 3) if i % 2 == 0 else Conv(c2, c1, 3)
            for i in range(depth * 2)
        )
        self.depth = depth
        self.shortcut = shortcut
        if self.shortcut:
            self.shortcut_conv = Conv(c1, c2, 1)
        
    def forward(self, x):
        identity = x
        for i in range(self.depth):
            x = self.convs[2*i](x)
            x = x + self.convs[2*i+1](x)
            if i != self.depth - 1:
                # 在连接之前进行降维，避免通道数过多
                x = torch.cat([x, identity], dim=1)
                x = x[:, :identity.shape[1]]  # 降维处理，保持通道数一致
        if self.shortcut:
            identity = self.shortcut_conv(identity)  # 调整shortcut的通道数
            x = x + identity  # 添加shortcut
        else:
            x = x + identity  # 直接加上identity

        return x


class MultiGranularitySplit(nn.Module):
    def __init__(self, c, groups=4):
        super().__init__()
        self.groups = groups
        self.split_conv = nn.Conv2d(c, groups, 1)  # 分组分割
        
    def forward(self, x):
        B, C, H, W = x.shape
        # 生成分组权重图
        weight_map = torch.softmax(self.split_conv(x), dim=1)  # [B,G,H,W]
        # 多粒度特征解耦
        split_feats = []
        for g in range(self.groups):
            masked = x * weight_map[:, g:g+1]  # 空间自适应加权
            split_feats.append(masked.sum(dim=(2,3), keepdim=True))  # 全局聚合
        return torch.cat(split_feats, dim=1)  # [B, G*C, 1, 1]


class ImprovedC2f_AMHA(nn.Module):
    def __init__(self, c1, c2, n=3, shortcut=False, g=4, e=0.5):
        super().__init__()
        self.c = int(c2 * e)
        self.cv1 = Conv(c1, 4 * self.c, 1)  # 扩展为4倍通道
        self.n = n
        # 多粒度特征解耦
        self.splitter = MultiGranularitySplit(4 * self.c, groups=4)
        
        # 动态路由权重生成
        self.router = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(4 * self.c, 16, 1),
            nn.ReLU(),
            nn.Conv2d(16, n + 2, 1),  # n+2条路径的权重
            nn.Softmax(dim=1)
        )
        
        # 新增1x1卷积用于将4*self.c通道降维到self.c通道
        self.reduce_conv = Conv(4 * self.c, self.c, 1)
        
        self.bottlenecks = nn.ModuleList(
            RecursiveBottleneck(self.c, self.c, n, shortcut=shortcut) for _ in range(n)
        )
        self.cv2 = Conv((n + 2) * self.c, c2, 1)
        self.amha = AMHA(c2, c2)

    def forward(self, x):
        # 特征扩展
        base = self.cv1(x)  # [B, 4*self.c, H, W]
        group_weights = self.splitter(base)  # 原注释期望为 [B, 4*self.c, 1, 1]
        route_weights = self.router(base)      # [B, n+2, 1, 1]

        # 对 base 进行降维，使得后续分支通道数匹配 Bottleneck 要求
        base_reduced = self.reduce_conv(base)  # [B, self.c, H, W]

        # 主路径：使用降维后的特征
        y = [
            base_reduced * route_weights[:, 0:1],  # 主路径1，保留维度
            base_reduced * route_weights[:, 1:2]   # 主路径2，保留维度
        ]
        for i in range(self.n):
            selected = torch.sigmoid(group_weights[:, i * self.c:(i + 1) * self.c])
            processed = self.bottlenecks[i](base_reduced * selected)
            y.append(processed * route_weights[:, i + 2:i + 3])  # 保留维度


        return self.amha(self.cv2(torch.cat(y, dim=1)))
    

class ImprovedC2f(nn.Module):
    def __init__(self, c1, c2, n=3, shortcut=False, g=4, e=0.5):
        super().__init__()
        self.c = int(c2 * e)
        self.cv1 = Conv(c1, 4 * self.c, 1)  # 扩展为4倍通道
        self.n = n
        # 多粒度特征解耦
        self.splitter = MultiGranularitySplit(4 * self.c, groups=4)
        
        # 动态路由权重生成
        self.router = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(4 * self.c, 16, 1),
            nn.ReLU(),
            nn.Conv2d(16, n + 2, 1),  # n+2条路径的权重
            nn.Softmax(dim=1)
        )
        
        # 新增1x1卷积用于将4*self.c通道降维到self.c通道
        self.reduce_conv = Conv(4 * self.c, self.c, 1)
        
        self.bottlenecks = nn.ModuleList(
            RecursiveBottleneck(self.c, self.c, n, shortcut=shortcut) for _ in range(n)
        )
        self.cv2 = Conv((n + 2) * self.c, c2, 1)
        # self.amha = AMHA(c2, c2)

    def forward(self, x):
        # 特征扩展
        base = self.cv1(x)  # [B, 4*self.c, H, W]
        group_weights = self.splitter(base)  # 原注释期望为 [B, 4*self.c, 1, 1]
        route_weights = self.router(base)      # [B, n+2, 1, 1]

        # 对 base 进行降维，使得后续分支通道数匹配 Bottleneck 要求
        base_reduced = self.reduce_conv(base)  # [B, self.c, H, W]

        # 主路径：使用降维后的特征
        y = [
            base_reduced * route_weights[:, 0:1],  # 主路径1，保留维度
            base_reduced * route_weights[:, 1:2]   # 主路径2，保留维度
        ]
        for i in range(self.n):
            selected = torch.sigmoid(group_weights[:, i * self.c:(i + 1) * self.c])
            processed = self.bottlenecks[i](base_reduced * selected)
            y.append(processed * route_weights[:, i + 2:i + 3])  # 保留维度


        return self.cv2(torch.cat(y, dim=1))



class FocusC2f(nn.Module):
    """Faster Implementation of CSP Bottleneck with 2 convolutions."""

    def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5):
        """Initialize CSP bottleneck layer with two convolutions with arguments ch_in, ch_out, number, shortcut, groups,
        expansion.
        """
        super().__init__()
        self.c = int(c2 * e)  # hidden channels
        self.cv1 = Conv(c1, 2 * self.c, 1, 1)
        self.cv2 = Conv((2 + n) * self.c, c2, 1)  # optional act=FReLU(c2)
        self.m = nn.ModuleList(Bottleneck(self.c, self.c, shortcut, g, k=((3, 3), (3, 3)), e=1.0) for _ in range(n))
        self.att = AMHA(c2, c2) 

    def forward(self, x):
        """Forward pass through C2f layer."""
        y = list(self.cv1(x).chunk(2, 1))
        y.extend(m(y[-1]) for m in self.m)
        return self.att(self.cv2(torch.cat(y, 1)))

    def forward_split(self, x):
        """Forward pass using split() instead of chunk()."""
        y = list(self.cv1(x).split((self.c, self.c), 1))
        y.extend(m(y[-1]) for m in self.m)
        return self.cv2(torch.cat(y, 1))