import torch
import torch.nn as nn
import torch.nn.functional as F
from .conv import Conv                                    
import warnings
warnings.filterwarnings("ignore")
__all__ = ("LightDA","C2f_lightda",
)


def window_partition(x, window_size):
    B, C, H, W = x.shape
    x = x.view(B, C, H//window_size, window_size, W//window_size, window_size)
    windows = x.permute(0, 2, 4, 1, 3, 5).contiguous().view(-1, C, window_size, window_size)
    return windows

def window_reverse(windows, window_size, H, W):
    B = int(windows.shape[0] / (H * W / window_size / window_size))
    C = windows.shape[1]
    x = windows.view(B, H//window_size, W//window_size, C, window_size, window_size)
    x = x.permute(0, 3, 1, 4, 2, 5).contiguous().view(B, C, H, W)
    return x

class DynamicWeightGenerator(nn.Module):
    def __init__(self, in_dim, num_kernels=4):
        super().__init__()
        self.convs = nn.ModuleList([
            nn.Conv2d(in_dim, in_dim, 3, padding=1, groups=in_dim)
            for _ in range(num_kernels)
        ])
        self.weight_net = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(in_dim, num_kernels, 1),
            nn.Softmax(dim=1)
        )
        self.qk_gen = nn.Conv2d(in_dim, in_dim*2, 1)  # 新增QK生成层

    def forward(self, x):
        weights = self.weight_net(x)
        outputs = [self.qk_gen(conv(x)) for conv in self.convs]  # 每个分支输出2C通道
        outputs = torch.stack(outputs, dim=1)  # [B, K, 2C, H, W]
        return (weights.unsqueeze(2) * outputs).sum(dim=1)  # 动态融合

class LightDA(nn.Module):
    def __init__(self, in_dim, window_size=7):
        super().__init__()
        self.window_size = window_size
        self.in_dim = in_dim
        self.dynamic_attn = DynamicWeightGenerator(in_dim)
        
        # 通道注意力简化
        self.channel_attn = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(in_dim, in_dim//4, 1),
            nn.ReLU(),
            nn.Conv2d(in_dim//4, in_dim, 1),
            nn.Sigmoid()
        )
        
        # 空间注意力标准化
        self.norm = nn.LayerNorm(in_dim)
        
    def pad_to_window_size(self, x):
        B, C, H, W = x.shape
        new_H = (H + self.window_size - 1) // self.window_size * self.window_size
        new_W = (W + self.window_size - 1) // self.window_size * self.window_size
        x_padded = F.pad(x, (0, new_W-W, 0, new_H-H))
        return x_padded, new_H, new_W

    def forward(self, x):
        B, C, H, W = x.shape
        
        # 自动填充并获取填充后尺寸
        x_padded, H_pad, W_pad = self.pad_to_window_size(x)
        
        # 动态生成QK
        qk = self.dynamic_attn(x_padded)  # [B, 2C, H_pad, W_pad]
        Q, K = torch.chunk(qk, 2, dim=1)  # 各C通道
        
        # 窗口划分
        Q_windows = window_partition(Q, self.window_size)  # [B*num_win, C, win, win]
        K_windows = window_partition(K, self.window_size)
        
        # 高效注意力计算
        Q_flat = Q_windows.flatten(2)  # [B*N, C, S]
        K_flat = K_windows.flatten(2)  # [B*N, C, S]
        attn = torch.bmm(Q_flat.transpose(1,2), K_flat)  # [B*N, S, S]
        attn = F.softmax(attn / (self.in_dim**0.5), dim=-1)
        
        # 值矩阵处理
        V = self.norm(x_padded.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)  # 保持[C,H,W]顺序
        V_windows = window_partition(V, self.window_size).flatten(2)  # [B*N, C, S]
        
        # 注意力加权
        out = torch.bmm(attn, V_windows.transpose(1,2))  # [B*N, S, C]
        out = out.view(-1, self.window_size, self.window_size, C)
        out = out.permute(0, 3, 1, 2).contiguous()  # [B*N, C, win, win]
        out = window_reverse(out, self.window_size, H_pad, W_pad)  # [B, C, H_pad, W_pad]
        
        # 裁剪回原始尺寸
        out = out[:, :, :H, :W]
        
        # 通道注意力增强
        channel_attn = self.channel_attn(x)
        return x * channel_attn + out

class Bottleneck_lightda(nn.Module):
    def __init__(self, c1, c2, shortcut=True, g=1, e=0.5):
        super().__init__()
        c_ = int(c2 * e)
        self.cv1 = Conv(c1, c_, 1, 1)
        self.cv2 = Conv(c_, c2, 3, 1, g=g)
        self.lightda = LightDA(c2)
        self.add = shortcut and c1 == c2

    def forward(self, x):
        return x + self.lightda(self.cv2(self.cv1(x))) if self.add else self.lightda(self.cv2(self.cv1(x)))

class C2f_lightda(nn.Module):
    def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5):
        super().__init__()
        self.c = int(c2 * e)
        self.cv1 = Conv(c1, 2 * self.c, 1, 1)
        self.cv2 = Conv((2 + n) * self.c, c2, 1)
        self.m = nn.ModuleList(Bottleneck_lightda(self.c, self.c, shortcut, g, e=1.0) for _ in range(n))

    def forward(self, x):
        y = list(self.cv1(x).chunk(2, 1))
        y.extend(m(y[-1]) for m in self.m)
        return self.cv2(torch.cat(y, 1))