import torch
import torch.nn as nn
import math

def autopad(k, p=None, d=1):  # kernel, padding, dilation
    """自动计算padding大小"""
    if d > 1:
        k = d * (k - 1) + 1 if isinstance(k, int) else [d * (x - 1) + 1 for x in k]
    if p is None:
        p = k // 2 if isinstance(k, int) else [x // 2 for x in k]
    return p

class Conv(nn.Module):
    """标准卷积模块"""
    default_act = nn.SiLU()  # default activation

    def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True):
        super().__init__()
        self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p, d), groups=g, dilation=d, bias=False)
        self.bn = nn.BatchNorm2d(c2)
        self.act = self.default_act if act is True else act if isinstance(act, nn.Module) else nn.Identity()

    def forward(self, x):
        return self.act(self.bn(self.conv(x)))

class SEBlock(nn.Module):
    """Squeeze-and-Excitation模块"""
    def __init__(self, c, r=16):
        super().__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Sequential(
            nn.Linear(c, c // r, bias=False),
            nn.ReLU(inplace=True),
            nn.Linear(c // r, c, bias=False),
            nn.Sigmoid()
        )

    def forward(self, x):
        b, c, _, _ = x.size()
        y = self.avg_pool(x).view(b, c)
        y = self.fc(y).view(b, c, 1, 1)
        return x * y.expand_as(x)

class GhostConv(nn.Module):
    """Ghost卷积模块"""
    def __init__(self, c1, c2, k=1, s=1, p=None, d=1, act=True):
        super().__init__()
        self.primary_conv = nn.Sequential(
            nn.Conv2d(c1, c2, k, s, autopad(k, p, d), groups=1, bias=False),
            nn.BatchNorm2d(c2),
            nn.SiLU() if act else nn.Identity()
        )
        self.cheap_operation = nn.Sequential(
            nn.Conv2d(c2, c2, 3, 1, 1, groups=c2, bias=False),
            nn.BatchNorm2d(c2),
            nn.SiLU() if act else nn.Identity()
        )

    def forward(self, x):
        x1 = self.primary_conv(x)
        x2 = self.cheap_operation(x1)
        return torch.cat([x1, x2], dim=1)

class BiFPN(nn.Module):
    """双向特征金字塔网络"""
    def __init__(self, c1, c2):
        super().__init__()
        self.conv1 = Conv(c1, c2, 1)
        self.conv2 = Conv(c1, c2, 1)
        self.conv3 = Conv(c2, c2, 3, 1, 1)

    def forward(self, p3, p4, p5):
        p4 = self.conv1(p4)
        p5 = self.conv2(p5)
        p4 = p4 + nn.functional.interpolate(p5, scale_factor=2, mode='nearest')
        p3 = p3 + nn.functional.interpolate(p4, scale_factor=2, mode='nearest')
        return self.conv3(p3)

class CoordAtt(nn.Module):
    """Coordinate Attention Module"""
    def __init__(self, inp, oup, reduction=32):
        super().__init__()
        self.pool_h = nn.AdaptiveAvgPool2d((None, 1))
        self.pool_w = nn.AdaptiveAvgPool2d((1, None))
        
        mip = max(8, inp // reduction)
        
        self.conv1 = nn.Conv2d(inp, mip, kernel_size=1, stride=1, padding=0)
        self.bn1 = nn.BatchNorm2d(mip)
        self.act = nn.SiLU()
        
        self.conv_h = nn.Conv2d(mip, oup, kernel_size=1, stride=1, padding=0)
        self.conv_w = nn.Conv2d(mip, oup, kernel_size=1, stride=1, padding=0)

    def forward(self, x):
        identity = x
        
        n, c, h, w = x.size()
        x_h = self.pool_h(x)
        x_w = self.pool_w(x).permute(0, 1, 3, 2)
        
        y = torch.cat([x_h, x_w], dim=2)
        y = self.conv1(y)
        y = self.bn1(y)
        y = self.act(y)
        
        x_h, x_w = torch.split(y, [h, w], dim=2)
        x_w = x_w.permute(0, 1, 3, 2)
        
        a_h = self.conv_h(x_h).sigmoid()
        a_w = self.conv_w(x_w).sigmoid()
        
        return identity * a_h * a_w

class RepConv(nn.Module):
    """RepVGG style Conv"""
    def __init__(self, c1, c2, k=3, s=1, p=None, g=1, d=1, act=True):
        super().__init__()
        self.conv_3x3 = Conv(c1, c2, k, s, p, g, d, act=False)
        self.conv_1x1 = Conv(c1, c2, 1, s, 0, 1, 1, act=False)
        self.bn = nn.BatchNorm2d(c2)
        self.act = nn.SiLU() if act else nn.Identity()

    def forward(self, x):
        return self.act(self.bn(self.conv_3x3(x) + self.conv_1x1(x)))

    def fuse(self):
        # 推理时融合为单个卷积
        kernel = self.conv_3x3.conv.weight + F.pad(self.conv_1x1.conv.weight, [1, 1, 1, 1])
        bias = self.conv_3x3.conv.bias + self.conv_1x1.conv.bias
        self.conv = nn.Conv2d(self.conv_3x3.conv.in_channels, 
                            self.conv_3x3.conv.out_channels,
                            self.conv_3x3.conv.kernel_size, 
                            self.conv_3x3.conv.stride,
                            self.conv_3x3.conv.padding,
                            bias=True)
        self.conv.weight.data = kernel
        self.conv.bias.data = bias

class HGBlock(nn.Module):
    """HGNetV2的基本模块"""
    def __init__(self, c1, c2, stride=1, expansion=4):
        super().__init__()
        c_ = int(c2 * expansion)
        self.shortcut = stride == 1 and c1 == c2
        
        # HGNetV2特色: 组卷积和通道重组
        self.conv1 = Conv(c1, c_, 1, 1)
        self.conv2 = Conv(c_, c_, 3, stride, groups=c_//8)
        self.shuffle = ChannelShuffle(groups=8)
        self.conv3 = Conv(c_, c2, 1, 1)
        self.attention = CPCA(c2)  # 新的注意力机制

class MobileV4Block(nn.Module):
    """MobileNetV4 Block"""
    def __init__(self, c1, c2, stride, expansion=4):
        super().__init__()
        c_ = int(c1 * expansion)
        self.conv1 = Conv(c1, c_, 1)
        self.dwconv = Conv(c_, c_, 3, stride, groups=c_)
        self.attention = CPCA(c_)  # 新的注意力机制
        self.conv2 = Conv(c_, c2, 1, act=False)
        self.shortcut = stride == 1 and c1 == c2

class CPCA(nn.Module):
    """Channel-Point Cross Attention"""
    def __init__(self, dim, reduction=8):
        super().__init__()
        self.point_wise = nn.Conv2d(dim, 1, 1)
        self.channel_wise = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(dim, dim//reduction, 1),
            nn.SiLU(),
            nn.Conv2d(dim//reduction, dim, 1),
            nn.Sigmoid()
        )
        
    def forward(self, x):
        # 点注意力
        point_attn = self.point_wise(x).sigmoid()
        # 通道注意力
        channel_attn = self.channel_wise(x)
        return x * point_attn * channel_attn

class HybridBlock(nn.Module):
    """混合HGNetV2和MobileV4的模块"""
    def __init__(self, c1, c2, stride=1):
        super().__init__()
        self.hg = HGBlock(c1, c2//2, stride)
        self.mv4 = MobileV4Block(c1, c2//2, stride)
        self.fusion = Conv(c2, c2, 1)
        
    def forward(self, x):
        return self.fusion(torch.cat([self.hg(x), self.mv4(x)], dim=1))

class YOLOv8Enhance(nn.Module):
    """YOLOv8Enhance object detection model"""
    def __init__(self, nc=80, ch=3, model_size='n'):  # number of classes, channels
        super().__init__()
        # 根据模型大小选择深度和宽度因子
        depth_dict = {'n': 0.33, 's': 0.33, 'm': 0.67}
        width_dict = {'n': 0.25, 's': 0.50, 'm': 0.75}
        depth = depth_dict[model_size]
        width = width_dict[model_size]
        
        # 基础通道数
        channels = {
            'n': (16, 32, 64, 128, 256),
            's': (32, 64, 128, 256, 512),
            'm': (48, 96, 192, 384, 768),
        }[model_size]

        # Backbone
        self.backbone = nn.ModuleList([
            # P1/2
            HybridBlock(ch, channels[0], stride=2),
            # P2/4
            HybridBlock(channels[0], channels[1], stride=2),
            C2f(channels[1], channels[1], round(3 * depth)),
            # P3/8
            HybridBlock(channels[1], channels[2], stride=2),
            C2f(channels[2], channels[2], round(6 * depth)),
            # P4/16
            HybridBlock(channels[2], channels[3], stride=2),
            C2f(channels[3], channels[3], round(6 * depth)),
            # P5/32
            HybridBlock(channels[3], channels[4], stride=2),
            C2f(channels[4], channels[4], round(3 * depth)),
            SPPF(channels[4], channels[4], k=5)
        ])

        # Neck
        self.neck = nn.ModuleList([
            # 上采样路径
            Conv(channels[4], channels[3], 1, 1),
            nn.Upsample(None, 2, 'nearest'),
            BiFPN(channels[3] * 2, channels[3]),
            
            Conv(channels[3], channels[2], 1, 1),
            nn.Upsample(None, 2, 'nearest'),
            BiFPN(channels[2] * 2, channels[2]),
            
            # 下采样路径
            Conv(channels[2], channels[2], 3, 2),
            BiFPN(channels[2] * 2, channels[3]),
            
            Conv(channels[3], channels[3], 3, 2),
            BiFPN(channels[3] * 2, channels[4])
        ])

        # Head
        self.head = nn.ModuleList([
            nn.Conv2d(channels[2], nc + 4, 1),  # P3/8
            nn.Conv2d(channels[3], nc + 4, 1),  # P4/16
            nn.Conv2d(channels[4], nc + 4, 1)   # P5/32
        ])

        self.nc = nc
        self.reg_max = 16  # DFL channels
        self.stride = torch.tensor([8., 16., 32.])
        
        # 初始化权重
        self.initialize_weights()

    def initialize_weights(self):
        """Initialize model weights"""
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

    def forward(self, x):
        # Backbone
        features = []
        for i, m in enumerate(self.backbone):
            if i in [2, 4, 6, 9]:  # 保存特征图
                features.append(x)
            x = m(x)
        features.append(x)
        
        # Neck
        fpn_features = []
        x = features[-1]
        for i, m in enumerate(self.neck):
            if i in [0, 3]:  # 上采样前的卷积
                x = m(x)
            elif i in [1, 4]:  # 上采样
                x = m(x)
                x = torch.cat([x, features[3-i//3]], 1)
            elif i in [6, 8]:  # 下采样
                x = m(x)
                x = torch.cat([x, fpn_features[i//3-2]], 1)
            else:  # BiFPN
                x = m(x)
                fpn_features.append(x)
        
        # Head
        outputs = []
        for i, m in enumerate(self.head):
            outputs.append(m(fpn_features[i]))
            
        return outputs

    def _make_grid(self, nx=20, ny=20):
        """生成网格点"""
        yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)], indexing='ij')
        return torch.stack((xv, yv), 2).view(-1, 2).float()

def build_model(name='yolov8enhancen', nc=80):
    """构建YOLOv8Enhance模型"""
    size_map = {'yolov8enhancen': 'n', 'yolov8enhances': 's', 'yolov8enhancem': 'm'}
    if name not in size_map:
        raise ValueError(f"Unsupported model name: {name}")
    return YOLOv8Enhance(nc=nc, model_size=size_map[name]) 