"""
LEE-YOLO 自定义模块
将这些模块添加到 ultralytics/nn/modules/block.py 或创建新文件 ultralytics/nn/modules/lee_modules.py
"""

import torch
import torch.nn as nn
import math

__all__ = ['GhostConv', 'GhostBottleneck', 'GSConv', 'SGC2f', 'ECA', 'EBiFPN', 'EMA']


# ==================== 1. GhostConv模块 ====================
class GhostConv(nn.Module):
    """
    Ghost Convolution
    论文: GhostNet: More Features from Cheap Operations
    用更少的参数生成更多特征图
    """

    def __init__(self, c1, c2, k=1, s=1, g=1, act=True, ratio=2):
        super().__init__()
        self.oup = c2
        init_channels = math.ceil(c2 / ratio)  # 内在特征通道数
        new_channels = init_channels * (ratio - 1)  # 幽灵特征通道数
        
        # 主卷积：生成内在特征
        self.primary_conv = nn.Sequential(
            nn.Conv2d(c1, init_channels, k, s, k//2, groups=g, bias=False),
            nn.BatchNorm2d(init_channels),
            nn.SiLU() if act else nn.Identity(),
        )
        
        # 廉价操作：生成幽灵特征
        self.cheap_operation = nn.Sequential(
            nn.Conv2d(init_channels, new_channels, 5, 1, 2, groups=init_channels, bias=False),
            nn.BatchNorm2d(new_channels),
            nn.SiLU() if act else nn.Identity(),
        )

    def forward(self, x):
        x1 = self.primary_conv(x)
        x2 = self.cheap_operation(x1)
        out = torch.cat([x1, x2], dim=1)
        return out[:, :self.oup, :, :]  # 确保输出通道数正确


# ==================== 2. GhostBottleneck模块 ====================
class GhostBottleneck(nn.Module):
    """
    Ghost Bottleneck
    支持stride=1和stride=2
    """

    def __init__(self, c1, c2, k=3, s=1, e=0.5):
        super().__init__()
        c_ = int(c2 * e)  # 隐藏通道数

        # 主路径
        self.conv = nn.Sequential(
            GhostConv(c1, c_, 1, 1),  # 第一个Ghost卷积
            nn.Conv2d(c_, c_, k, s, k // 2, groups=c_, bias=False) if s == 2 else nn.Identity(),  # 深度卷积（仅当s=2时）
            nn.BatchNorm2d(c_) if s == 2 else nn.Identity(),
            nn.ReLU(inplace=True) if s == 2 else nn.Identity(),
            GhostConv(c_, c2, 1, 1, act=False)  # 第二个Ghost卷积
        )

        # 残差连接
        self.shortcut = nn.Sequential(
            nn.Conv2d(c1, c1, k, s, k // 2, groups=c1, bias=False),
            nn.BatchNorm2d(c1),
            nn.Conv2d(c1, c2, 1, 1, bias=False),
            nn.BatchNorm2d(c2)
        ) if s == 2 or c1 != c2 else nn.Identity()

    def forward(self, x):
        return self.conv(x) + self.shortcut(x)


# ==================== 3. GSConv模块 ====================
class GSConv(nn.Module):
    """
    Group Shuffle Convolution
    论文: Slim-neck by GSConv
    结合标准卷积和深度可分离卷积
    """

    def __init__(self, c1, c2, k=1, s=1, g=1):
        super().__init__()
        c_ = c2 // 2

        # 标准卷积分支
        self.cv1 = nn.Conv2d(c1, c_, k, s, k // 2, bias=False)
        self.bn1 = nn.BatchNorm2d(c_)

        # 深度可分离卷积分支
        # 深度卷积：每个输入通道一个卷积核
        self.cv2 = nn.Conv2d(c1, c1, k, s, k // 2, groups=c1, bias=False)
        self.bn2 = nn.BatchNorm2d(c1)
        # 逐点卷积：1x1卷积改变通道数
        self.cv3 = nn.Conv2d(c1, c_, 1, 1, 0, bias=False)
        self.bn3 = nn.BatchNorm2d(c_)

        self.act = nn.SiLU()

    def forward(self, x):
        # 标准卷积分支
        x1 = self.act(self.bn1(self.cv1(x)))
        
        # 深度可分离卷积分支
        x2 = self.cv2(x)  # 深度卷积
        x2 = self.act(self.bn2(x2))
        x2 = self.act(self.bn3(self.cv3(x2)))  # 逐点卷积

        # 连接
        out = torch.cat([x1, x2], 1)

        # 通道混洗
        b, c, h, w = out.shape
        out = out.view(b, 2, c // 2, h, w)
        out = out.transpose(1, 2).contiguous()
        out = out.view(b, c, h, w)

        return out


# ==================== 4. SGC2f模块 ====================
class SGC2f(nn.Module):
    """
    Shuffle Ghost C2f
    LEE-YOLO的核心轻量化模块
    结合GSConv和GhostBottleneck
    """

    def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5):
        super().__init__()
        self.c = int(c2 * e)  # 隐藏通道
        self.shortcut = shortcut and c1 == c2  # 只有通道数相同才能残差连接

        # 输入卷积
        self.cv1 = GSConv(c1, 2 * self.c, 1, 1, g)

        # GhostBottleneck序列
        self.m = nn.ModuleList(GhostBottleneck(self.c, self.c, e=0.5) for _ in range(n))

        # 输出卷积
        self.cv2 = GSConv((2 + n) * self.c, c2, 1, 1, g)

    def forward(self, x):
        # 分割特征
        y = list(self.cv1(x).split((self.c, self.c), 1))

        # 通过GhostBottleneck
        y.extend(m(y[-1]) for m in self.m)

        # 连接并输出
        out = self.cv2(torch.cat(y, 1))
        
        # 添加残差连接
        return out + x if self.shortcut else out


# ==================== 5. ECA注意力模块 ====================
class ECA(nn.Module):
    """
    Efficient Channel Attention
    论文: ECA-Net: Efficient Channel Attention for Deep CNN
    使用1D卷积避免降维
    """

    def __init__(self, channels=None, gamma=2, b=1):
        super().__init__()
        if channels is not None:
            # 根据论文公式计算卷积核大小
            t = int(abs((math.log(channels, 2) + b) / gamma))
            k_size = t if t % 2 else t + 1  # 确保是奇数
        else:
            k_size = 3  # 默认值
            
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.conv = nn.Conv1d(1, 1, kernel_size=k_size, padding=(k_size - 1) // 2, bias=False)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        # 全局平均池化 [B, C, H, W] -> [B, C, 1, 1]
        y = self.avg_pool(x)
        
        # 维度变换为1D [B, C, 1, 1] -> [B, 1, C]
        y = y.squeeze(-1).transpose(-1, -2)
        
        # 1D卷积 [B, 1, C] -> [B, 1, C]
        y = self.conv(y)
        
        # 恢复维度 [B, 1, C] -> [B, C, 1, 1]
        y = y.transpose(-1, -2).unsqueeze(-1)
        
        # 应用Sigmoid激活
        y = self.sigmoid(y)

        return x * y


# ==================== 6. BiFPN块 ====================
class BiFPNBlock(nn.Module):
    """
    Bidirectional Feature Pyramid Network Block
    实现完整的双向特征金字塔网络
    包含自顶向下和自底向上两个路径
    """

    def __init__(self, channels, epsilon=1e-4):
        super().__init__()
        self.epsilon = epsilon

        # 为每个融合节点创建独立的卷积层
        # 自顶向下路径
        self.conv_p4_td = nn.Sequential(
            nn.Conv2d(channels, channels, 3, 1, 1, bias=False),
            nn.BatchNorm2d(channels),
            nn.SiLU()
        )
        self.conv_p3_td = nn.Sequential(
            nn.Conv2d(channels, channels, 3, 1, 1, bias=False),
            nn.BatchNorm2d(channels),
            nn.SiLU()
        )
        
        # 自底向上路径
        self.conv_p4_bu = nn.Sequential(
            nn.Conv2d(channels, channels, 3, 1, 1, bias=False),
            nn.BatchNorm2d(channels),
            nn.SiLU()
        )
        self.conv_p5_bu = nn.Sequential(
            nn.Conv2d(channels, channels, 3, 1, 1, bias=False),
            nn.BatchNorm2d(channels),
            nn.SiLU()
        )

        # 可学习的融合权重
        # 自顶向下路径权重
        self.w1_td = nn.Parameter(torch.ones(2, dtype=torch.float32), requires_grad=True)  # P4_td: P4 + P5
        self.w2_td = nn.Parameter(torch.ones(2, dtype=torch.float32), requires_grad=True)  # P3_td: P3 + P4_td
        
        # 自底向上路径权重
        self.w1_bu = nn.Parameter(torch.ones(3, dtype=torch.float32), requires_grad=True)  # P4_out: P4 + P4_td + P3_td
        self.w2_bu = nn.Parameter(torch.ones(2, dtype=torch.float32), requires_grad=True)  # P5_out: P5 + P4_out

    def forward(self, inputs):
        """
        inputs: [P3, P4, P5]
        返回: [P3_out, P4_out, P5_out]
        """
        P3, P4, P5 = inputs

        # ===== 自顶向下路径 (Top-down pathway) =====
        
        # P4_td = w1_td[0] * P4 + w1_td[1] * Resize(P5)
        w1_td = torch.relu(self.w1_td)
        w1_td = w1_td / (torch.sum(w1_td, dim=0) + self.epsilon)
        
        P4_td = self.conv_p4_td(
            w1_td[0] * P4 + w1_td[1] * nn.functional.interpolate(
                P5, size=P4.shape[-2:], mode='nearest'
            )
        )

        # P3_td = w2_td[0] * P3 + w2_td[1] * Resize(P4_td)
        w2_td = torch.relu(self.w2_td)
        w2_td = w2_td / (torch.sum(w2_td, dim=0) + self.epsilon)
        
        P3_out = self.conv_p3_td(
            w2_td[0] * P3 + w2_td[1] * nn.functional.interpolate(
                P4_td, size=P3.shape[-2:], mode='nearest'
            )
        )

        # ===== 自底向上路径 (Bottom-up pathway) =====
        
        # P4_out = w1_bu[0] * P4 + w1_bu[1] * P4_td + w1_bu[2] * Resize(P3_out)
        w1_bu = torch.relu(self.w1_bu)
        w1_bu = w1_bu / (torch.sum(w1_bu, dim=0) + self.epsilon)
        
        P4_out = self.conv_p4_bu(
            w1_bu[0] * P4 + w1_bu[1] * P4_td + w1_bu[2] * nn.functional.interpolate(
                P3_out, size=P4.shape[-2:], mode='nearest'
            )
        )

        # P5_out = w2_bu[0] * P5 + w2_bu[1] * Resize(P4_out)
        w2_bu = torch.relu(self.w2_bu)
        w2_bu = w2_bu / (torch.sum(w2_bu, dim=0) + self.epsilon)
        
        P5_out = self.conv_p5_bu(
            w2_bu[0] * P5 + w2_bu[1] * nn.functional.interpolate(
                P4_out, size=P5.shape[-2:], mode='nearest'
            )
        )

        return [P3_out, P4_out, P5_out]


# ==================== 7. EBiFPN模块 ====================
class EBiFPN(nn.Module):
    """
    Efficient Bidirectional Feature Pyramid Network
    结合BiFPN和ECA注意力机制
    在每个BiFPN层之后应用ECA注意力
    """

    def __init__(self, channels, num_layers=2):
        super().__init__()
        self.num_layers = num_layers

        # BiFPN层和ECA注意力层
        self.bifpn_blocks = nn.ModuleList()
        self.eca_blocks = nn.ModuleList()
        
        for _ in range(num_layers):
            self.bifpn_blocks.append(BiFPNBlock(channels))
            # 为每个层级创建独立的ECA注意力
            self.eca_blocks.append(nn.ModuleList([
                ECA(channels),  # P3的ECA
                ECA(channels),  # P4的ECA
                ECA(channels)   # P5的ECA
            ]))

    def forward(self, features):
        """
        features: [P3, P4, P5]
        返回: [P3_out, P4_out, P5_out]
        """
        # 通过每个BiFPN层和对应的ECA注意力
        for i in range(self.num_layers):
            # BiFPN特征融合
            features = self.bifpn_blocks[i](features)
            
            # 在每个层级上应用ECA注意力
            features = [
                self.eca_blocks[i][j](features[j]) for j in range(3)
            ]

        return features


# ==================== 8. EMA注意力模块 ====================
class EMA(nn.Module):
    """
    Efficient Multi-Scale Attention
    论文: Efficient Multi-Scale Attention Module with Cross-Spatial Learning
    """

    def __init__(self, channels=None, num_heads=8, reduction=4):
        super().__init__()
        self.channels = channels
        self.num_heads = num_heads
        self.reduction = reduction
        
        if channels is not None:
            self._build_layers(channels)
        
    def _build_layers(self, channels):
        self.head_dim = channels // self.num_heads

        # 通道注意力分支
        self.channel_conv = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(channels, channels // self.reduction, 1, bias=False),
            nn.SiLU(),
            nn.Conv2d(channels // self.reduction, channels, 1, bias=False),
            nn.Sigmoid()
        )

        # 空间注意力分支
        self.spatial_conv = nn.Sequential(
            nn.Conv2d(channels, channels, 3, 1, 1, groups=channels, bias=False),
            nn.BatchNorm2d(channels),
            nn.Conv2d(channels, channels, 1, bias=False),
            nn.Sigmoid()
        )

    def forward(self, x):
        b, c, h, w = x.shape
        
        # 如果还没有初始化，现在初始化
        if self.channels is None:
            self.channels = c
            self._build_layers(c)

        # 通道注意力
        channel_att = self.channel_conv(x)

        # 空间注意力
        spatial_att = self.spatial_conv(x)

        # 融合注意力
        attention = channel_att * spatial_att

        return x * attention


# ==================== 9. 测试函数 ====================
def test_modules():
    """测试所有模块"""
    print("测试LEE-YOLO模块...")

    # 测试输入
    x = torch.randn(1, 64, 80, 80)

    # 1. GhostConv
    ghost_conv = GhostConv(64, 128)
    out1 = ghost_conv(x)
    print(f"GhostConv: {x.shape} -> {out1.shape}")

    # 2. GhostBottleneck
    ghost_bottleneck = GhostBottleneck(64, 128)
    out2 = ghost_bottleneck(x)
    print(f"GhostBottleneck: {x.shape} -> {out2.shape}")

    # 3. GSConv
    gs_conv = GSConv(64, 128)
    out3 = gs_conv(x)
    print(f"GSConv: {x.shape} -> {out3.shape}")

    # 4. SGC2f
    sgc2f = SGC2f(64, 128, n=3)
    out4 = sgc2f(x)
    print(f"SGC2f: {x.shape} -> {out4.shape}")

    # 5. ECA
    eca = ECA(64)
    out5 = eca(x)
    print(f"ECA: {x.shape} -> {out5.shape}")

    # 6. EBiFPN
    features = [
        torch.randn(1, 128, 80, 80),
        torch.randn(1, 128, 40, 40),
        torch.randn(1, 128, 20, 20)
    ]
    ebifpn = EBiFPN(128)
    out6 = ebifpn(features)
    print(f"EBiFPN: {[f.shape for f in features]} -> {[o.shape for o in out6]}")

    # 7. EMA
    ema = EMA(64)
    out7 = ema(x)
    print(f"EMA: {x.shape} -> {out7.shape}")

    print("\n所有模块测试通过!")


if __name__ == "__main__":
    test_modules()