import torch
import torch.nn as nn
import torch.nn.functional as F
# from mmcv.cnn import build_norm_layer
import math
from .MSAD import AMHA
from .BiGatedFusion import LightBiGatedFusion

class DropPath(nn.Module):
    """DropPath regularization (Stochastic Depth) 
    Adapted from timm library
    """
    def __init__(self, drop_prob=0.):
        super().__init__()
        self.drop_prob = drop_prob

    def forward(self, x):
        if self.drop_prob == 0. or not self.training:
            return x
        keep_prob = 1 - self.drop_prob
        shape = (x.shape[0],) + (1,) * (x.ndim - 1)
        random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
        random_tensor.floor_()  # binarize
        return x.div(keep_prob) * random_tensor


class EfficientChannelAttention(nn.Module):
    """高效通道注意力（ECA改进版）"""
    def __init__(self, channels, gamma=2, b=1):
        super().__init__()
        t = int(abs((math.log(channels, 2) + b) / gamma))
        k_size = t if t % 2 else t + 1
        
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.conv = nn.Conv1d(1, 1, kernel_size=k_size, 
                             padding=(k_size - 1) // 2, bias=False)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        y = self.avg_pool(x)
        y = self.conv(y.squeeze(-1).transpose(-1, -2))
        y = y.transpose(-1, -2).unsqueeze(-1)
        return x * self.sigmoid(y)


class BiGhostConv(nn.Module):
    """适配新版Ghost卷积（需与其它模块联调）"""
    def __init__(self, in_ch, out_ch, kernel_size=1, stride=1, ratio=2, dw_size=3, groups=1):
        super().__init__()
        # 动态调整分组数确保整除
        self.groups = max(math.gcd(groups, in_ch), 1)
        self.ratio = ratio
        init_ch = out_ch // ratio
        
        # 主分支参数校验
        assert init_ch % self.groups == 0, f"主分支通道数{init_ch}必须能被分组数{self.groups}整除"
        
        # 主分支卷积
        self.primary_conv = nn.Sequential(
            nn.Conv2d(in_ch, init_ch, kernel_size, stride,
                     kernel_size//2, bias=False, groups=self.groups),
            nn.BatchNorm2d(init_ch),
            nn.ReLU(inplace=True) if ratio == 1 else nn.Identity()
        )
        
        # 轻量分支动态调整
        self.cheap_groups = max(self.groups // 2, 1)  # 与主分支分组关联
        self.cheap_operation = nn.Sequential(
            nn.Conv2d(init_ch, init_ch*(ratio-1), dw_size, 1,
                     dw_size//2, 
                     groups=self.cheap_groups * (init_ch // self.groups),  # 关键调整
                     bias=False),
            nn.BatchNorm2d(init_ch*(ratio-1)),
            nn.ReLU(inplace=True)
        )
        self.oup = out_ch

    def forward(self, x):
        x1 = self.primary_conv(x)
        x2 = self.cheap_operation(x1)
        return torch.cat([x1, x2], dim=1)[:, :self.oup, :, :]  # 确保输出通道精确


class LiteDeformConv(nn.Module):
    """修正适配新版BiGhostConv的可变形卷积"""
    def __init__(self, channels, groups):
        super().__init__()
        self.channels = channels
        self.groups = groups
        
        # 动态偏移生成器参数适配
        self.offset_conv = nn.Sequential(
            BiGhostConv(in_ch=channels, out_ch=channels//2, kernel_size=3, groups=groups),
            nn.Conv2d(channels//2, 18 * groups, 1)  # 保持输出通道适配
        )
        
        # 动态权重生成器参数适配
        self.weight_conv = nn.Sequential(
            BiGhostConv(in_ch=channels, out_ch=channels//4, kernel_size=3, groups=groups),
            nn.Conv2d(channels//4, 9 * groups, 1),
            nn.Sigmoid()
        )
        
        # 最终融合层参数统一
        self.ghost_fusion = BiGhostConv(
            in_ch=channels, 
            out_ch=channels,
            kernel_size=3,
            groups=groups,
            dw_size=5  # 增大感受野
        )
        self.attn = AMHA(channels)

    def forward(self, x):
        b, c, h, w = x.size()
        g = self.groups
        
        # === 1. 生成动态参数 ===
        offsets = self.offset_conv(x)  # [b, 18*g, h, w]
        weights = self.weight_conv(x)  # [b, 9*g, h, w]
        
        # === 2. 参数形状重构 ===
        # 偏移量形状 [b, g, 18, h, w] → 每个group对应2*3*3偏移
        offsets = offsets.view(b, g, 18, h, w)
        
        # 权重形状 [b, g, 9, h, w] → 每个位置对应3x3卷积核
        weights = weights.view(b, g, 9, h, w)
        
        # === 3. 分组特征处理 ===
        x_group = x.view(b * g, c // g, h, w)  # [b*g, c/g, h, w]
        
        # 特征展开为滑动窗口
        x_unfold = F.unfold(x_group, kernel_size=3, padding=1)  # [b*g, (c/g)*9, h*w]
        x_unfold = x_unfold.view(b*g, -1, 9, h, w)  # [b*g, c/g, 9, h, w]
        
        # === 4. 动态卷积计算 ===
        # 权重调整 [b, g, 9, h, w] → [b*g, 1, 9, h, w]
        weights = weights.view(b*g, 1, 9, h, w)
        
        # 应用逐位置权重（使用广播机制）
        weighted_feat = x_unfold * weights  # [b*g, c/g, 9, h, w]
        weighted_feat = weighted_feat.sum(2)  # [b*g, c/g, h, w]
        
        # === 5. 特征重组 ===
        feat_out = weighted_feat.view(b, c, h, w)  # [b, c, h, w]
        
        # === 6. 融合输出 ===
        return self.attn(self.ghost_fusion(feat_out))



class MultiRepBlock(nn.Module):
    """适配新版BiGhostConv的多分支块"""
    def __init__(self, channels, groups):
        super().__init__()
        self.channels = channels
        self.groups = groups

        # 分支1结构调整
        self.branch1 = nn.Sequential(
            LiteDeformConv(channels, groups),
            BiGhostConv(
                in_ch=channels, 
                out_ch=channels*2,
                kernel_size=3,
                groups=groups,
                dw_size=5  # 保持参数一致
            ),
            nn.GELU(),
            BiGhostConv(
                in_ch=channels*2,
                out_ch=channels,
                kernel_size=3,
                groups=groups
            )
        )
        
        # 分支2参数对齐
        self.branch2 = nn.Sequential(
            BiGhostConv(
                in_ch=channels,
                out_ch=channels,
                kernel_size=3,
                groups=groups,
                dw_size=5
            ),
            BiGhostConv(
                in_ch=channels,
                out_ch=channels*2,
                kernel_size=3,
                groups=groups
            ),
            nn.GELU(),
            BiGhostConv(
                in_ch=channels*2,
                out_ch=channels,
                kernel_size=3,
                groups=groups
            )
        )
        
        # 融合层保持不变
        self.fusion = LightBiGatedFusion(channels, channels, channels)

    def forward(self, x):
        # if self.training:
        x1 = self.branch1(x)
        x2 = self.branch2(x)
        # return self.fusion(torch.cat([x1, x2], dim=1))
        return self.fusion(x1, x2)
        # else:
        #     return self.reparam_forward(x)

    # @torch.no_grad()
    # def reparam_forward(self, x):
    #     identity = torch.eye(self.channels, device=x.device).view(self.channels, self.channels, 1, 1)
        
    #     # 分支1融合
    #     branch1_kernel = identity.clone()
    #     for module in self.branch1:
    #         if isinstance(module, BiGhostConv):
    #             branch1_kernel = self._fuse_ghost_conv(module, branch1_kernel)
        
    #     # 分支2融合（类似处理）
    #     branch2_kernel = identity.clone()
    #     for module in self.branch2:
    #         if isinstance(module, BiGhostConv):
    #             branch2_kernel = self._fuse_ghost_conv(module, branch2_kernel)
        
    #     # 合并并保持卷积核形状
    #     fused_kernel = branch1_kernel + branch2_kernel
    #     return F.conv2d(x, fused_kernel, padding=1)

    # def _fuse_ghost_conv(self, conv, prev_kernel):
    #     """修复维度匹配的Ghost卷积融合方法"""
    #     # 主分支处理
    #     primary = conv.primary_conv[0]
    #     W_primary = primary.weight.data
    #     O1, I_per_group, K1, _ = W_primary.shape
    #     groups = primary.groups
    #     total_input = I_per_group * groups

    #     # 重构主卷积核 [O1, total_input, K1, K1]
    #     W_primary_full = torch.zeros(O1, total_input, K1, K1, device=W_primary.device)
    #     for g in range(groups):
    #         slice_size = O1 // groups
    #         W_primary_full[g*slice_size:(g+1)*slice_size, g*I_per_group:(g+1)*I_per_group] = W_primary[g*slice_size:(g+1)*slice_size]

    #     # 轻量分支处理
    #     cheap = conv.cheap_operation[0]
    #     W_cheap = cheap.weight.data
    #     O2, I_cheap, K2, _ = W_cheap.shape
        
    #     # 动态调整分组确保维度对齐
    #     expand_groups = cheap.groups * (O1 // groups)
    #     assert I_cheap * expand_groups == O1, f"通道不匹配: {I_cheap}*{expand_groups} vs {O1}"

    #     # 生成等效卷积核（保持K1尺寸）
    #     W_cheap_expanded = W_cheap.repeat(expand_groups, 1, 1, 1)
    #     cheap_effect = F.conv2d(
    #         W_primary_full.permute(1,0,2,3),  # [total_input, O1, K1, K1]
    #         W_cheap_expanded,
    #         padding=K2//2,  # 动态计算padding保持尺寸
    #         groups=expand_groups
    #     ).permute(1,0,2,3)  # [O1, total_input, K1, K1]

    #     # 统一尺寸后合并
    #     W_combined = torch.cat([
    #         W_primary_full, 
    #         cheap_effect[:, :, :K1, :K1]  # 裁剪到相同尺寸
    #     ], dim=0)[:conv.oup]
        
    #     # 维度对齐的融合计算
    #     return torch.einsum('oihw,jkhw->ikohw', prev_kernel, W_combined).sum(dim=(1,3))


class LightInternBlock(nn.Module):
    """深度可扩展轻量InternBlock（支持num_blocks参数）"""
    def __init__(self, in_ch, out_ch, num_blocks, groups, drop_path=0.):
        super().__init__()
        self.layers = nn.ModuleList()
        
        # 深度扩展核心：堆叠多个子模块
        for _ in range(num_blocks):
            layer = nn.Sequential(
                # build_norm_layer(dict(type='GN', num_groups=groups), in_ch)[1],
                MultiRepBlock(in_ch, groups),
                AMHA(in_ch),  
                DropPath(drop_path)
            )
            self.layers.append(layer)
        
        # 通道自适应（当输入输出通道不同时）
        if in_ch != out_ch:
            self.channel_align = BiGhostConv(in_ch, out_ch, 1)
        else:
            self.channel_align = nn.Identity()
            
        # 跨层注意力增强模块
        self.cross_attn = nn.Sequential(
            BiGhostConv(out_ch, out_ch//4, 3, groups=groups),
            nn.GELU(),
            AMHA(out_ch//4),
            nn.Conv2d(out_ch//4, out_ch, 1),
            nn.Sigmoid()
        )

    def forward(self, x):
        shortcut = x
        
        # 深度残差学习路径
        for layer in self.layers:
            x = layer(x) + x  # 逐层残差连接
            
        # 通道对齐
        x = self.channel_align(x)
        
        # 跨层注意力融合
        attn = self.cross_attn(x)
        return x * (1 + attn) + shortcut  # 全局残差连接


# class YOLOBackbone(nn.Module):
#     """适配YOLO的主干网络（输出3个尺度特征）"""
#     def __init__(self, 
#                  in_chans=3,
#                  base_dim=96,
#                  depths=[3, 3, 3],
#                  groups=[4, 8, 16],
#                  drop_path_rate=0.1):
#         super().__init__()
#         # STEM层
#         self.stem = nn.Sequential(
#             nn.Conv2d(in_chans, base_dim//2, 3, stride=2, padding=1),
#             nn.BatchNorm2d(base_dim//2),
#             nn.SiLU(inplace=True),
#             BiGhostConv(base_dim//2, base_dim, 3, stride=2)
#         )
        
#         # 多阶段构建
#         self.stages = nn.ModuleList()
#         dp_rates = torch.linspace(0, drop_path_rate, sum(depths))
#         curr = 0
        
#         # 三个主要阶段
#         for i in range(3):
#             stage = nn.Sequential(
#                 *[LightInternBlock(
#                     in_ch=base_dim * (2**i),
#                     out_ch=base_dim * (2**i),
#                     num_blocks=3,
#                     groups=groups[i],
#                     drop_path=dp_rates[curr + j]
#                 ) for j in range(depths[i])],
#             )
#             self.stages.append(stage)
#             curr += depths[i]
            
#             # 添加下采样层（最后一个阶段不添加）
#             if i < 2:
#                 self.stages.append(nn.Sequential(
#                     BiGhostConv(base_dim*(2**i), base_dim*(2**(i+1)), 3, stride=2),
#                     EfficientChannelAttention(base_dim*(2**(i+1)))
#                 ))
                
#         # YOLO需要的三个输出层
#         self.out_layers = nn.ModuleList([
#             nn.Identity(),  # 第1阶段输出
#             nn.Identity(),  # 第2阶段输出
#             nn.Identity()   # 第3阶段输出
#         ])

#     def forward(self, x):
#         """输出三个尺度的特征图：
#         - f1: [B, 96*1, H/8, W/8] 
#         - f2: [B, 96*2, H/16, W/16]
#         - f3: [B, 96*4, H/32, W/32]
#         """
#         x = self.stem(x)
#         outputs = []
#         stage_id = 0
        
#         for layer in self.stages:
#             x = layer(x)
#             if isinstance(layer, nn.Sequential) and len(layer) > 1:
#                 if stage_id < 3:  # 只记录三个主阶段输出
#                     outputs.append(self.out_layers[stage_id](x))
#                     stage_id += 1
                    
#         return outputs[-3:]  # 返回最后三个特征图


# import torch.optim as optim
# from torchsummary import summary
# from thop import profile, clever_format
# if __name__ == "__main__":
#     # 创建一个示例输入
#     x = torch.randn(1, 3, 256, 256)  # 假设输入大小为 [batch_size, channels, height, width]
    
#     # 实例化YOLOBackbone模型
#     model = YOLOBackbone(in_chans=3, base_dim=96, depths=[3, 3, 2], groups=[4, 8, 16], drop_path_rate=0.1)
    
#     # 测试模型输出
#     outputs = model(x)
#     for i, output in enumerate(outputs):
#         print(f"Output {i+1}: {output.shape}")

#     # 计算参数量和计算量
#     flops, params = profile(model, inputs=(x,))
#     flops, params = clever_format([flops, params], "%.3f")

#     print(f"Total FLOPs: {flops}")
#     print(f"Total Parameters: {params}")

#     # 使用summary查看模型的详细参数
#     print("\nModel Summary:")
#     summary(model, input_size=(3, 256, 256), batch_size=1, device="cpu")
