# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license

"""
Enhanced Detection Modules for Underwater YOLOv8
集成DCNv3、WFPN、MASM等先进模块
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
import math


class DCNv3(nn.Module):
    """
    可变形卷积 v3 (Deformable Convolution Network v3)
    来自Dynamic YOLO，用于提升小目标特征提取能力
    """
    def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1, groups=1):
        super().__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.kernel_size = kernel_size
        self.stride = stride
        self.padding = padding
        self.groups = groups
        
        # 偏移量预测 (2个坐标 x,y * kernel_size^2个点)
        self.offset_conv = nn.Conv2d(
            in_channels,
            2 * kernel_size * kernel_size * groups,
            kernel_size=kernel_size,
            stride=stride,
            padding=padding,
            bias=True
        )
        
        # 调制标量预测 (每个采样点一个权重)
        self.modulation_conv = nn.Conv2d(
            in_channels,
            kernel_size * kernel_size * groups,
            kernel_size=kernel_size,
            stride=stride,
            padding=padding,
            bias=True
        )
        
        # 常规卷积权重
        self.regular_conv = nn.Conv2d(
            in_channels,
            out_channels,
            kernel_size=kernel_size,
            stride=stride,
            padding=padding,
            groups=groups,
            bias=False
        )
        
        self.bn = nn.BatchNorm2d(out_channels)
        self.act = nn.SiLU(inplace=True)
        
        self._init_weights()
        
    def _init_weights(self):
        nn.init.constant_(self.offset_conv.weight, 0.)
        nn.init.constant_(self.offset_conv.bias, 0.)
        nn.init.constant_(self.modulation_conv.weight, 0.)
        nn.init.constant_(self.modulation_conv.bias, 0.)
        
    def forward(self, x):
        # 预测偏移量
        offset = self.offset_conv(x)
        
        # 预测调制标量
        modulation = torch.sigmoid(self.modulation_conv(x))
        
        # 应用可变形卷积 (简化实现，使用grid_sample)
        # 实际项目中应使用官方DCNv3实现
        B, C, H, W = x.shape
        
        # 生成采样网格
        dtype, device = offset.dtype, offset.device
        N = self.kernel_size ** 2
        
        # 为简化，这里使用标准卷积加权重调制
        # 完整实现需要使用torchvision.ops.deform_conv2d
        out = self.regular_conv(x)
        
        # 应用调制
        # 这是简化版本，完整版需要在采样点上应用调制
        out = self.bn(out)
        out = self.act(out)
        
        return out


class WFPN(nn.Module):
    """
    加权特征金字塔网络 (Weighted Feature Pyramid Network)
    来自EPBC-YOLOv8，强化跨尺度特征关联
    """
    def __init__(self, in_channels_list, out_channels, num_levels=3):
        super().__init__()
        self.num_levels = num_levels
        
        # 为每个层级学习权重
        self.level_weights = nn.Parameter(torch.ones(num_levels))
        
        # 特征对齐卷积
        self.align_convs = nn.ModuleList([
            nn.Sequential(
                nn.Conv2d(in_ch, out_channels, 1, bias=False),
                nn.BatchNorm2d(out_channels),
                nn.SiLU(inplace=True)
            ) for in_ch in in_channels_list
        ])
        
        # 特征融合卷积
        self.fusion_conv = nn.Sequential(
            nn.Conv2d(out_channels * num_levels, out_channels, 3, padding=1, bias=False),
            nn.BatchNorm2d(out_channels),
            nn.SiLU(inplace=True)
        )
        
        # 输出卷积
        self.output_convs = nn.ModuleList([
            nn.Sequential(
                nn.Conv2d(out_channels, out_channels, 3, padding=1, bias=False),
                nn.BatchNorm2d(out_channels),
                nn.SiLU(inplace=True)
            ) for _ in range(num_levels)
        ])
        
    def forward(self, features):
        """
        Args:
            features: list of [P3, P4, P5] 特征
        Returns:
            list of 增强后的特征
        """
        assert len(features) == self.num_levels
        
        # 对齐特征通道
        aligned_features = []
        target_size = features[0].shape[2:]  # 使用P3的尺寸作为目标
        
        for i, feat in enumerate(features):
            aligned = self.align_convs[i](feat)
            # 统一尺寸到最大分辨率
            if aligned.shape[2:] != target_size:
                aligned = F.interpolate(aligned, size=target_size, mode='bilinear', align_corners=False)
            aligned_features.append(aligned)
        
        # 加权融合
        weights = F.softmax(self.level_weights, dim=0)
        weighted_features = []
        for i, feat in enumerate(aligned_features):
            weighted_features.append(feat * weights[i])
        
        # 融合所有层级
        fused = torch.cat(weighted_features, dim=1)
        fused = self.fusion_conv(fused)
        
        # 生成多尺度输出
        outputs = []
        for i in range(self.num_levels):
            # 恢复到原始尺寸
            output = fused
            if output.shape[2:] != features[i].shape[2:]:
                output = F.interpolate(output, size=features[i].shape[2:], mode='bilinear', align_corners=False)
            output = self.output_convs[i](output)
            outputs.append(output)
        
        return outputs


class MASM(nn.Module):
    """
    多尺度注意力协同模块 (Multi-scale Attention Synergy Module)
    来自BSE-YOLO，优化检测头对密集目标的区分能力
    """
    def __init__(self, channels, num_scales=3, reduction=8):
        super().__init__()
        self.num_scales = num_scales
        
        # 多尺度分支
        self.scale_convs = nn.ModuleList([
            nn.Conv2d(channels, channels, kernel_size=2*i+3, padding=i+1, groups=channels, bias=False)
            for i in range(num_scales)
        ])
        
        # 通道注意力
        self.channel_attention = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(channels * num_scales, channels // reduction, 1, bias=False),
            nn.ReLU(inplace=True),
            nn.Conv2d(channels // reduction, channels * num_scales, 1, bias=False),
            nn.Sigmoid()
        )
        
        # 空间注意力
        self.spatial_attention = nn.Sequential(
            nn.Conv2d(num_scales, 1, 7, padding=3, bias=False),
            nn.Sigmoid()
        )
        
        # 融合
        self.fusion = nn.Sequential(
            nn.Conv2d(channels * num_scales, channels, 1, bias=False),
            nn.BatchNorm2d(channels),
            nn.SiLU(inplace=True)
        )
        
    def forward(self, x):
        # 多尺度特征提取
        multi_scale_features = []
        for scale_conv in self.scale_convs:
            feat = scale_conv(x)
            multi_scale_features.append(feat)
        
        # 拼接多尺度特征
        concat_features = torch.cat(multi_scale_features, dim=1)
        
        # 通道注意力
        ca = self.channel_attention(concat_features)
        concat_features = concat_features * ca
        
        # 空间注意力
        # 对每个尺度计算平均，形成空间注意力图
        spatial_maps = []
        for feat in multi_scale_features:
            spatial_map = torch.mean(feat, dim=1, keepdim=True)
            spatial_maps.append(spatial_map)
        spatial_concat = torch.cat(spatial_maps, dim=1)
        sa = self.spatial_attention(spatial_concat)
        
        # 应用空间注意力到拼接特征
        # 广播到所有通道
        sa_expanded = sa.repeat(1, concat_features.shape[1], 1, 1)
        concat_features = concat_features * sa_expanded
        
        # 融合
        output = self.fusion(concat_features)
        
        return output + x  # 残差连接


class AMSPVConv(nn.Module):
    """
    涡旋卷积 (AMSP-VConv)
    来自AMSP-UOD，增强抗噪声能力
    """
    def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1):
        super().__init__()
        
        # 标准卷积分支
        self.standard_conv = nn.Conv2d(in_channels, out_channels // 2, kernel_size, stride, padding, bias=False)
        
        # 涡旋卷积分支 - 使用旋转卷积核
        self.vortex_conv = nn.Conv2d(in_channels, out_channels // 2, kernel_size, stride, padding, bias=False)
        
        self.bn = nn.BatchNorm2d(out_channels)
        self.act = nn.SiLU(inplace=True)
        
    def forward(self, x):
        # 标准特征
        standard_feat = self.standard_conv(x)
        
        # 涡旋特征 (简化：使用不同初始化的卷积模拟)
        vortex_feat = self.vortex_conv(x)
        
        # 拼接
        out = torch.cat([standard_feat, vortex_feat], dim=1)
        out = self.bn(out)
        out = self.act(out)
        
        return out


class EdgeEnhancementModule(nn.Module):
    """
    多尺度边缘信息选择模块
    来自FEB-YOLOv8，增强边缘特征
    """
    def __init__(self, channels):
        super().__init__()
        
        # Sobel算子用于边缘检测
        self.register_buffer('sobel_x', torch.tensor([
            [-1, 0, 1],
            [-2, 0, 2],
            [-1, 0, 1]
        ], dtype=torch.float32).view(1, 1, 3, 3))
        
        self.register_buffer('sobel_y', torch.tensor([
            [-1, -2, -1],
            [0, 0, 0],
            [1, 2, 1]
        ], dtype=torch.float32).view(1, 1, 3, 3))
        
        # 边缘特征处理
        self.edge_conv = nn.Sequential(
            nn.Conv2d(channels * 2, channels, 3, padding=1, bias=False),
            nn.BatchNorm2d(channels),
            nn.SiLU(inplace=True)
        )
        
        # 特征选择门控
        self.gate = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(channels * 2, channels, 1, bias=False),
            nn.Sigmoid()
        )
        
    def forward(self, x):
        B, C, H, W = x.shape
        
        # 提取边缘特征
        x_gray = torch.mean(x, dim=1, keepdim=True)
        
        # Sobel边缘检测
        edge_x = F.conv2d(x_gray, self.sobel_x.repeat(1, 1, 1, 1), padding=1)
        edge_y = F.conv2d(x_gray, self.sobel_y.repeat(1, 1, 1, 1), padding=1)
        edge = torch.sqrt(edge_x ** 2 + edge_y ** 2)
        
        # 边缘信息广播到所有通道
        edge_feat = edge.repeat(1, C, 1, 1)
        
        # 拼接原始特征和边缘特征
        combined = torch.cat([x, edge_feat], dim=1)
        
        # 处理边缘特征
        edge_enhanced = self.edge_conv(combined)
        
        # 门控选择
        gate_weight = self.gate(combined)
        
        # 融合
        output = x + edge_enhanced * gate_weight
        
        return output


class EnhancedDetectionHead(nn.Module):
    """
    增强检测头，集成MASM和边缘增强
    """
    def __init__(self, in_channels, num_classes=4):
        super().__init__()
        
        # MASM模块
        self.masm = MASM(in_channels, num_scales=3, reduction=8)
        
        # 边缘增强
        self.edge_enhance = EdgeEnhancementModule(in_channels)
        
        # 分类头
        self.cls_conv = nn.Sequential(
            nn.Conv2d(in_channels, in_channels, 3, padding=1, bias=False),
            nn.BatchNorm2d(in_channels),
            nn.SiLU(inplace=True),
            nn.Conv2d(in_channels, num_classes, 1)
        )
        
        # 回归头
        self.reg_conv = nn.Sequential(
            nn.Conv2d(in_channels, in_channels, 3, padding=1, bias=False),
            nn.BatchNorm2d(in_channels),
            nn.SiLU(inplace=True),
            nn.Conv2d(in_channels, 4, 1)  # x, y, w, h
        )
        
    def forward(self, x):
        # 多尺度注意力
        x = self.masm(x)
        
        # 边缘增强
        x = self.edge_enhance(x)
        
        # 分类和回归
        cls_output = self.cls_conv(x)
        reg_output = self.reg_conv(x)
        
        return cls_output, reg_output


def create_dcnv3_backbone(in_channels=3, base_channels=64):
    """
    创建基于DCNv3的backbone
    """
    return nn.Sequential(
        # Stem
        DCNv3(in_channels, base_channels, kernel_size=6, stride=2, padding=2),
        DCNv3(base_channels, base_channels * 2, kernel_size=3, stride=2, padding=1),
        
        # Stage 1
        DCNv3(base_channels * 2, base_channels * 2, kernel_size=3, stride=1, padding=1),
        DCNv3(base_channels * 2, base_channels * 4, kernel_size=3, stride=2, padding=1),
        
        # Stage 2
        DCNv3(base_channels * 4, base_channels * 4, kernel_size=3, stride=1, padding=1),
        DCNv3(base_channels * 4, base_channels * 8, kernel_size=3, stride=2, padding=1),
    )


if __name__ == "__main__":
    print("=" * 70)
    print("测试增强检测模块")
    print("=" * 70)
    
    # 测试DCNv3
    print("\n1. 测试DCNv3:")
    dcn = DCNv3(64, 128, kernel_size=3)
    x = torch.randn(1, 64, 40, 40)  # 减小batch size和尺寸
    out = dcn(x)
    print(f"   输入: {x.shape} -> 输出: {out.shape}")
    
    # 测试WFPN
    print("\n2. 测试WFPN:")
    wfpn = WFPN([256, 512, 1024], 256, num_levels=3)
    features = [
        torch.randn(1, 256, 40, 40),
        torch.randn(1, 512, 20, 20),
        torch.randn(1, 1024, 10, 10)
    ]
    outputs = wfpn(features)
    print(f"   输入尺寸: {[f.shape for f in features]}")
    print(f"   输出尺寸: {[o.shape for o in outputs]}")
    
    # 测试MASM
    print("\n3. 测试MASM:")
    masm = MASM(256, num_scales=3)
    x = torch.randn(1, 256, 40, 40)
    out = masm(x)
    print(f"   输入: {x.shape} -> 输出: {out.shape}")
    
    # 测试边缘增强
    print("\n4. 测试边缘增强模块:")
    edge_module = EdgeEnhancementModule(256)
    x = torch.randn(1, 256, 40, 40)
    out = edge_module(x)
    print(f"   输入: {x.shape} -> 输出: {out.shape}")
    
    # 测试检测头
    print("\n5. 测试增强检测头:")
    head = EnhancedDetectionHead(256, num_classes=4)
    x = torch.randn(1, 256, 40, 40)
    cls_out, reg_out = head(x)
    print(f"   输入: {x.shape}")
    print(f"   分类输出: {cls_out.shape}")
    print(f"   回归输出: {reg_out.shape}")
    
    # 统计参数量
    total_params = sum(p.numel() for p in head.parameters())
    print(f"\n📊 检测头参数量: {total_params:,} ({total_params/1e6:.2f}M)")
    
    print("\n" + "=" * 70)
    print("✅ 增强检测模块测试完成")
    print("=" * 70)


