import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair
from .conv import Conv, DWConv, GhostConv, LightConv, RepConv, autopad
import torchvision
__all__ = ("C2f_DynamicAxialRouterV2","MSADExtract", "AMHA")


###########################################################################
############################## MSADExtract模块     begin##############################
###########################################################################




# ---------------------- 可变形卷积模块 ----------------------
class DeformableConv2d(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1):
        super().__init__()
        self.offset_conv = nn.Conv2d(in_channels, 2 * kernel_size**2, 
                                   kernel_size=kernel_size, stride=stride, 
                                   padding=padding)
        self.modulator_conv = nn.Conv2d(in_channels, 1 * kernel_size**2,
                                      kernel_size=kernel_size, stride=stride,
                                      padding=padding)
        self.regular_conv = nn.Conv2d(in_channels, out_channels,
                                    kernel_size=kernel_size, stride=stride,
                                    padding=padding)
        
        self.kernel_size = _pair(kernel_size)
        self.stride = _pair(stride)
        self.padding = _pair(padding)

    def forward(self, x):
        offset = self.offset_conv(x)
        modulator = 2. * torch.sigmoid(self.modulator_conv(x))
        
        x = torchvision.ops.deform_conv2d(
            input=x,
            offset=offset,
            weight=self.regular_conv.weight,
            bias=self.regular_conv.bias,
            mask=modulator,
            padding=self.padding,
            stride=self.stride
        )
        return x




#------------------------------------channelAttention模块--------------------------

class ChannelAttention(nn.Module):
    """单输入通道轴向注意力，在通道维度进行自注意力计算"""
    def __init__(self, in_dim, out_dim):
        super(ChannelAttention, self).__init__()
        self.in_dim = in_dim
        self.out_dim = out_dim
        
        # 使用1x1卷积进行特征变换
        self.query_conv = nn.Conv2d(in_dim, out_dim, kernel_size=1)
        self.key_conv = nn.Conv2d(in_dim, out_dim, kernel_size=1)
        self.value_conv = nn.Conv2d(in_dim, out_dim, kernel_size=1)
        
        self.softmax = nn.Softmax(dim=-1)
        self.gamma = nn.Parameter(torch.zeros(1))  # 可学习的缩放参数
        self.attention_weights = None

    def forward(self, x):
        batch_size, _, height, width = x.size()
        
        # 特征投影
        Q = self.query_conv(x)  # (B, C_out, H, W)
        K = self.key_conv(x)    # (B, C_out, H, W)
        V = self.value_conv(x)  # (B, C_out, H, W)
        
        # 维度变换
        Q = Q.view(batch_size, self.out_dim, -1)  # (B, C_out, H*W)
        K = K.view(batch_size, self.out_dim, -1).permute(0, 2, 1)  # (B, H*W, C_out)
        V = V.view(batch_size, self.out_dim, -1)  # (B, C_out, H*W)
        
        # 通道注意力计算
        channel_attn = torch.bmm(Q, K)  # (B, C_out, C_out)
        self.attention_weights = channel_attn.detach().cpu().numpy()  # 保存为NumPy数组
        channel_attn = self.softmax(channel_attn)
        
        # 应用注意力到值矩阵
        out = torch.bmm(channel_attn, V)  # (B, C_out, H*W)
        out = out.view(batch_size, self.out_dim, height, width)  # 恢复空间维度
        
        # 残差连接
        out = self.gamma * out + x
        
        return out


class MSADV2(nn.Module):

    '''轴向多尺度混合注意力机制 (Axial Multi-scale Hybrid Attention, AMHA)'''
    def __init__(self, in_dim, out_dim, expansion=0.5):
        super().__init__()
        mid_dim = int(in_dim * expansion)
        self.out_dim = out_dim
        
        self.down_conv = nn.Sequential(
            DeformableConv2d(in_dim, mid_dim, 3, stride=2),
            nn.GELU()
        )
        
        self.axial_attn = nn.Sequential(
            nn.Conv2d(mid_dim, mid_dim, (1,5), padding=(0,2), groups=mid_dim),
            nn.Conv2d(mid_dim, mid_dim, (5,1), padding=(2,0), groups=mid_dim),
            nn.Conv2d(mid_dim, mid_dim, 1)
        )
        
        self.channel_attn = ChannelAttention(mid_dim, mid_dim)
        
        self.up = nn.Sequential(
            nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False),
            DeformableConv2d(mid_dim, in_dim, 3)
        )
        
        self.fusion = nn.Conv2d(in_dim*2, in_dim, 1)

    def forward(self, x):
        identity = x
        
        # 多尺度处理
        low_res = self.down_conv(x)
        attn = self.axial_attn(low_res) * self.channel_attn(low_res)
        attn = self.up(attn)
        
        # 自适应尺寸对齐
        if attn.shape[-2:] != identity.shape[-2:]:
            attn = F.interpolate(attn, size=identity.shape[-2:], mode='bilinear')
        
        return self.fusion(torch.cat([identity, attn], dim=1))


# ---------------------- 改进版多尺度注意力蒸馏 ----------------------
class AMHA(nn.Module):

    '''轴向多尺度混合注意力机制 (Axial Multi-scale Hybrid Attention, AMHA)'''
    def __init__(self, in_dim, out_dim, expansion=0.5):
        super().__init__()
        mid_dim = int(in_dim * expansion)
        self.out_dim = out_dim
        
        self.down_conv = nn.Sequential(
            DeformableConv2d(in_dim, mid_dim, 3, stride=2),
            nn.GELU()
        )
        
        self.axial_attn = nn.Sequential(
            nn.Conv2d(mid_dim, mid_dim, (1,5), padding=(0,2), groups=mid_dim),
            nn.Conv2d(mid_dim, mid_dim, (5,1), padding=(2,0), groups=mid_dim),
            nn.Conv2d(mid_dim, mid_dim, 1)
        )
        
        self.channel_attn = ChannelAttention(mid_dim, mid_dim)
        
        self.up = nn.Sequential(
            nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False),
            DeformableConv2d(mid_dim, in_dim, 3)
        )
        
        self.fusion = nn.Conv2d(in_dim*2, in_dim, 1)

    def forward(self, x):
        identity = x
        
        # 多尺度处理
        low_res = self.down_conv(x)
        attn = self.axial_attn(low_res) * self.channel_attn(low_res)
        attn = self.up(attn)
        
        # 自适应尺寸对齐
        if attn.shape[-2:] != identity.shape[-2:]:
            attn = F.interpolate(attn, size=identity.shape[-2:], mode='bilinear')
        
        return self.fusion(torch.cat([identity, attn], dim=1))

#------------------------------------MSAD提取模块--------------------------

class MSADExtract(nn.Module):  
    def __init__(self, c1, c2, n=1, residual=False, e=0.5, g=1, shortcut=True):
        super().__init__()
        c_ = int(c2 * e)  # hidden channels
        # assert c_ % 32 == 0, "Dimension of ABlock be a multiple of 32."

        # num_heads = c_ // 64 if c_ // 64 >= 2 else c_ // 32
        # num_heads = c_ // 32

        self.cv1 = Conv(c1, c_, 1, 1)
        self.cv2 = Conv((1 + n) * c_, c2, 1)  # optional act=FReLU(c2)

        init_values = 0.01  # or smaller
        self.gamma = nn.Parameter(init_values * torch.ones((c2)), requires_grad=True) if residual else None

        self.m = nn.Sequential(*(AMHA(c_) for _ in range(n)))

    def forward(self, x):
        """Forward pass through R-ELAN layer."""
        y = [self.cv1(x)]
        y.extend(m(y[-1]) for m in self.m)
        if self.gamma is not None:
            return x + self.gamma.view(1, -1, 1, 1) * self.cv2(torch.cat(y, 1))
        return self.cv2(torch.cat(y, 1))
    



###########################################################################
############################## MSADExtract模块     end##############################
###########################################################################









# ---------------------- 增强型轴向卷积 ----------------------
class DepthwiseAxialConv(nn.Module):
    """支持动态核大小"""
    def __init__(self, dim, kernel_size=(3,3)):
        super().__init__()
        self.row_conv = nn.Conv2d(dim, dim, (1, kernel_size[1]), 
                                padding=(0, kernel_size[1]//2), groups=dim)
        self.col_conv = nn.Conv2d(dim, dim, (kernel_size[0], 1),
                                padding=(kernel_size[0]//2, 0), groups=dim)
        self.gate = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(dim, dim//8, 1),
            nn.ReLU(),
            nn.Conv2d(dim//8, 2, 1),
            nn.Sigmoid()
        )

    def forward(self, x):
        row_feat = self.row_conv(x)
        col_feat = self.col_conv(x)
        gate = self.gate(x).chunk(2, dim=1)
        return row_feat * gate[0] + col_feat * gate[1]

# ---------------------- 动态稀疏路由机制 ----------------------
class DynamicAxialRouterV2(nn.Module):
    """支持稀疏路由和结构重参数化"""
    def __init__(self, dim, num_paths=3, temp_init=0.5, deploy=False, div_weight=0.1, temp_decay=0.99):
        super().__init__()
        self.dim = dim
        self.deploy = deploy
        self.num_paths = num_paths
        self.temp = temp_init

        self.div_weight = div_weight  # 正则化强度系数
        self.temp_decay = temp_decay  # 温度衰减系数
        self.register_buffer('_temperature', torch.tensor(temp_init))  # 注册为缓冲区
        self.entropy_loss = None  # 初始化时设为None

        # 候选路径池
        self.paths = nn.ModuleList([
            nn.Identity(),  # 路径0：恒等映射
            nn.Sequential(  # 路径1：轻量轴向卷积
                DepthwiseAxialConv(dim, kernel_size=(1,5)),
                nn.Conv2d(dim, dim, 1)
            ),
            AMHA(dim)    # 路径2：改进版多尺度注意力
        ])

        # 动态路由器
        self.router = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Flatten(),
            nn.Linear(dim, num_paths, bias=False)
        )
        
        # # 部署时参数合并
        # if deploy:
        #     self.reparam = self._merge_weights()

    def forward(self, x):
        # if self.deploy:
        #     return self.reparam(x)
        
        # 动态路由计算
        logits = self.router(x)
        weights = F.gumbel_softmax(logits, tau=self._temperature.item(), hard=not self.training)

        if self.training:
            # 计算熵损失但不保留计算图
            # print(f"计算熵损失ing {self.training}")
            with torch.no_grad():
                probs = F.softmax(logits, dim=-1)
                entropy = -torch.sum(probs * torch.log(probs + 1e-8), dim=-1)
                self.entropy_loss = (1 - entropy.mean()).item()  # 转换为float
        else:
            self.entropy_loss = 0.0
            
        
        # 稀疏路径激活（训练时保留top2）
        if self.training:
            # print(f"稀疏路径激活ing {self.training}")
            topk_idx = torch.topk(weights, k=2, dim=-1).indices
            mask = torch.zeros_like(weights).scatter_(-1, topk_idx, 1.0)
            weights = weights * mask
        
        # 特征融合
        out = torch.zeros_like(x)
        for i in range(self.num_paths):
            path_weight = weights[:, i].view(-1,1,1,1)
            out += path_weight * self.paths[i](x)
        
        return out + x  # 残差连接
    
    def __deepcopy__(self, memo):
        # 自定义拷贝方法
        cls = self.__class__
        result = cls.__new__(cls)
        memo[id(self)] = result
        for k, v in self.__dict__.items():
            if k == "entropy_loss":  # 跳过不需要深拷贝的属性
                setattr(result, k, v)
            else:
                setattr(result, k, copy.deepcopy(v, memo))
        return result
    

    @property
    def entropy_loss(self):
        """对外暴露的熵损失属性"""
        return self._entropy_loss

    def update_temperature(self):
        """在每个训练step后调用以衰减温度"""
        if self.training:
            self._temperature *= self.temp_decay    
    

    # def _merge_weights(self):
    #     """结构重参数化（部署时合并权重）"""
    #     merged_conv = nn.Conv2d(dim, dim, 3, padding=1)
        
    #     # 合并所有路径权重
    #     with torch.no_grad():
    #         # 基础权重（路径0）
    #         base_weight = torch.eye(dim).view(1,1,dim,dim)
    #         base_bias = torch.zeros(dim)
            
    #         # 路径1权重转换
    #         path1_axial = self.paths[1][0]
    #         path1_conv = self.paths[1][1]
    #         axial_row = path1_axial.row_conv.weight.data
    #         axial_col = path1_axial.col_conv.weight.data
    #         path1_weight = path1_conv.weight.data @ (axial_col @ axial_row)
            
    #         # 路径2权重近似
    #         path2_down = self.paths[2].down_conv[0]
    #         path2_attn = self.paths[2].axial_attn
    #         path2_up = self.paths[2].up
    #         path2_weight = path2_up.weight.data @ path2_attn.weight.data @ path2_down.weight.data
            
    #         # 合并权重
    #         merged_weight = base_weight + 0.5*path1_weight + 0.3*path2_weight
    #         merged_conv.weight.data = merged_weight
    #         merged_conv.bias.data = base_bias + path1_conv.bias.data
        
    #     return merged_conv

# ---------------------- 最终C2f模块实现 ----------------------
class UltraBottleneckV2(nn.Module):
    """用于C2f模块的增强型Bottleneck,加入了动态稀疏路由机制"""
    def __init__(self, c1, c2, deploy=False, div_weight=0.1, temp_decay=0.99):
        super().__init__()
        self.shortcut = c1 == c2
        self.conv = nn.Sequential(
            Conv(c1, c2, 3),
            Conv(c2, c2, 3),
            # DynamicAxialRouterV2(c2, deploy=deploy, div_weight=div_weight, temp_decay=temp_decay)
        )
        self.shortcut = c1 == c2

    def forward(self, x):
        return x + self.conv(x) if self.shortcut else self.conv(x)

class C2f_DynamicAxialRouterV2(nn.Module):
    def __init__(self, c1, c2, n=3, deploy=False, div_weight=0.1, temp_decay=0.99):
        super().__init__()
        self.dim = c2
        self.c = c2 // 2
        self.cv1 = Conv(c1, 2*self.c, 1)
        self.m = nn.ModuleList(UltraBottleneckV2(self.c, self.c, deploy, div_weight=div_weight, temp_decay=temp_decay) for _ in range(n))
        self.cv2 = Conv((2+n)*self.c, c2, 1)
        self.attention = AMHA(c2, c2)  # 添加全局注意力

    def forward(self, x):
        y = list(self.cv1(x).split([self.c, self.c], 1))
        y.extend(m(y[-1]) for m in self.m)
        out = self.cv2(torch.cat(y, 1))
        return self.attention(out)  # 最终特征增强


class C2f_DynamicAxialRouterV2_v2(nn.Module):
    """
    C2f_DynamicAxialRouterV2_v2：
      与原始版本相比，将原本 cv1 中的 split 操作替换为独立的两个卷积层 cv0 与 cv1，
      从而避免 split 操作影响后续剪枝流程，同时保留动态稀疏路由和注意力模块。
    """
    def __init__(self, c1, c2, n=3, shortcut=False, g=1, e=0.5, deploy=False, div_weight=0.1, temp_decay=0.99):
        super().__init__()
        # 注意：e 参数一般设置为原模块 c / c2，此处转换时 e = child.c / child.cv2.conv.out_channels
        self.c = int(c2 * e)  # 隐藏通道数
        self.cv0 = Conv(c1, self.c, 1, 1)
        self.cv1 = Conv(c1, self.c, 1, 1)
        self.m = nn.ModuleList(
            UltraBottleneckV2(self.c, self.c, deploy, div_weight=div_weight, temp_decay=temp_decay)
            for _ in range(n)
        )
        self.cv2 = Conv((2 + n) * self.c, c2, 1)
        self.attention = AMHA(c2, c2)

    def forward(self, x):
        y = [self.cv0(x), self.cv1(x)]
        y.extend(m(y[-1]) for m in self.m)
        out = self.cv2(torch.cat(y, 1))
        return self.attention(out)

