import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from torch.nn import Softmax
from torch.nn import init
import einops
import warnings
warnings.filterwarnings("ignore")


__all__ = "FA", "DA", "DySample"

class EMSConv(nn.Module):
    # Efficient Multi-Scale Conv
    def __init__(self, channel=256, kernels=[3, 5]):
        super().__init__()
        self.groups = len(kernels)
        min_ch = channel // 4
        assert min_ch >= 16, f'channel must Greater than {64}, but {channel}'
        
        self.convs = nn.ModuleList([])
        for ks in kernels:
            self.convs.append(Conv(c1=min_ch, c2=min_ch, k=ks))
        self.conv_1x1 = Conv(channel, channel, k=1)
        
    def forward(self, x):
        _, c, _, _ = x.size()
        x_cheap, x_group = torch.split(x, [c // 2, c // 2], dim=1)
        x_group = rearrange(x_group, 'bs (g ch) h w -> bs ch h w g', g=self.groups)
        x_group = torch.stack([self.convs[i](x_group[..., i]) for i in range(len(self.convs))])
        x_group = rearrange(x_group, 'g bs ch h w -> bs (g ch) h w')
        x = torch.cat([x_cheap, x_group], dim=1)
        x = self.conv_1x1(x)
        
        return x

class EMSConvP(nn.Module):
    # Efficient Multi-Scale Conv Plus
    def __init__(self, channel=256, kernels=[1, 3, 5, 7]):
        super().__init__()
        self.groups = len(kernels)
        min_ch = channel // self.groups
        assert min_ch >= 16, f'channel must Greater than {16 * self.groups}, but {channel}'
        
        self.convs = nn.ModuleList([])
        for ks in kernels:
            self.convs.append(Conv(c1=min_ch, c2=min_ch, k=ks))
        self.conv_1x1 = Conv(channel, channel, k=1)
        
    def forward(self, x):
        x_group = rearrange(x, 'bs (g ch) h w -> bs ch h w g', g=self.groups)
        x_convs = torch.stack([self.convs[i](x_group[..., i]) for i in range(len(self.convs))])
        x_convs = rearrange(x_convs, 'g bs ch h w -> bs (g ch) h w')
        x_convs = self.conv_1x1(x_convs)
        
        return x_convs

def normal_init(module, mean=0, std=1, bias=0):
    if hasattr(module, 'weight') and module.weight is not None:
        nn.init.normal_(module.weight, mean, std)
    if hasattr(module, 'bias') and module.bias is not None:
        nn.init.constant_(module.bias, bias)


def constant_init(module, val, bias=0):
    if hasattr(module, 'weight') and module.weight is not None:
        nn.init.constant_(module.weight, val)
    if hasattr(module, 'bias') and module.bias is not None:
        nn.init.constant_(module.bias, bias)


class DySample(nn.Module):
    def __init__(self, in_channels, scale=2, style='lp', groups=4, dyscope=False):
        super().__init__()
        self.scale = scale
        self.style = style
        self.groups = groups
        assert style in ['lp', 'pl']
        if style == 'pl':
            assert in_channels >= scale ** 2 and in_channels % scale ** 2 == 0
        assert in_channels >= groups and in_channels % groups == 0

        if style == 'pl':
            in_channels = in_channels // scale ** 2
            out_channels = 2 * groups
        else:
            out_channels = 2 * groups * scale ** 2

        self.offset = nn.Conv2d(in_channels, out_channels, 1)
        normal_init(self.offset, std=0.001)
        if dyscope:
            self.scope = nn.Conv2d(in_channels, out_channels, 1, bias=False)
            constant_init(self.scope, val=0.)

        self.register_buffer('init_pos', self._init_pos())

    def _init_pos(self):
        h = torch.arange((-self.scale + 1) / 2, (self.scale - 1) / 2 + 1) / self.scale
        return torch.stack(torch.meshgrid([h, h])).transpose(1, 2).repeat(1, self.groups, 1).reshape(1, -1, 1, 1)

    def sample(self, x, offset):
        B, _, H, W = offset.shape
        offset = offset.view(B, 2, -1, H, W)
        coords_h = torch.arange(H) + 0.5
        coords_w = torch.arange(W) + 0.5
        coords = torch.stack(torch.meshgrid([coords_w, coords_h])
                             ).transpose(1, 2).unsqueeze(1).unsqueeze(0).type(x.dtype).to(x.device)
        normalizer = torch.tensor([W, H], dtype=x.dtype, device=x.device).view(1, 2, 1, 1, 1)
        coords = 2 * (coords + offset) / normalizer - 1
        coords = F.pixel_shuffle(coords.contiguous().view(B, -1, H, W), self.scale).contiguous().view(
            B, 2, -1, self.scale * H, self.scale * W).permute(0, 2, 3, 4, 1).contiguous().flatten(0, 1)
        return F.grid_sample(x.reshape(B * self.groups, -1, H, W), coords, mode='bilinear',
                             align_corners=False, padding_mode="border").view(B, -1, self.scale * H, self.scale * W)

    def forward_lp(self, x):
        if hasattr(self, 'scope'):
            offset = self.offset(x) * self.scope(x).sigmoid() * 0.5 + self.init_pos
        else:
            offset = self.offset(x) * 0.25 + self.init_pos
        return self.sample(x, offset)

    def forward_pl(self, x):
        x_ = F.pixel_shuffle(x, self.scale)
        if hasattr(self, 'scope'):
            offset = F.pixel_unshuffle(self.offset(x_) * self.scope(x_).sigmoid(), self.scale) * 0.5 + self.init_pos
        else:
            offset = F.pixel_unshuffle(self.offset(x_), self.scale) * 0.25 + self.init_pos
        return self.sample(x, offset)

    def forward(self, x):
        if self.style == 'pl':
            return self.forward_pl(x)
        return self.forward_lp(x)


def autopad(k, p=None, d=1):  # kernel, padding, dilation
    # Pad to 'same' shape outputs
    if d > 1:
        k = d * (k - 1) + 1 if isinstance(k, int) else [d * (x - 1) + 1 for x in k]  # actual kernel-size
    if p is None:
        p = k // 2 if isinstance(k, int) else [x // 2 for x in k]  # auto-pad
    return p
        
class Conv(nn.Module):
    # Standard convolution with args(ch_in, ch_out, kernel, stride, padding, groups, dilation, activation)
    default_act = nn.SiLU()  # default activation

    def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True):
        super().__init__()
        self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p, d), groups=g, dilation=d, bias=False)
        self.bn = nn.BatchNorm2d(c2)
        self.act = self.default_act if act is True else act if isinstance(act, nn.Module) else nn.Identity()

    def forward(self, x):
        return self.act(self.bn(self.conv(x)))

    def forward_fuse(self, x):
        return self.act(self.conv(x))
class Bottleneck(nn.Module):
    # Standard bottleneck
    def __init__(self, c1, c2, shortcut=True, g=1, e=0.5):  # ch_in, ch_out, shortcut, groups, expansion
        super().__init__()
        c_ = int(c2 * e)  # hidden channels
        self.cv1 = Conv(c1, c_, 1, 1)
        self.cv2 = Conv(c_, c2, 3, 1, g=g)
        self.add = shortcut and c1 == c2

    def forward(self, x):
        return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
class C2f(nn.Module):
    """Faster Implementation of CSP Bottleneck with 2 convolutions."""

    def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5):
        """Initialize CSP bottleneck layer with two convolutions with arguments ch_in, ch_out, number, shortcut, groups,
        expansion.
        """
        super().__init__()
        self.c = int(c2 * e)  # hidden channels
        self.cv1 = Conv(c1, 2 * self.c, 1, 1)
        self.cv2 = Conv((2 + n) * self.c, c2, 1)  # optional act=FReLU(c2)
        self.m = nn.ModuleList(Bottleneck(self.c, self.c, shortcut, g, e=1.0) for _ in range(n))

    def forward(self, x):
        """Forward pass through C2f layer."""
        y = list(self.cv1(x).chunk(2, 1))
        y.extend(m(y[-1]) for m in self.m)
        return self.cv2(torch.cat(y, 1))

    def forward_split(self, x):
        """Forward pass using split() instead of chunk()."""
        y = list(self.cv1(x).split((self.c, self.c), 1))
        y.extend(m(y[-1]) for m in self.m)
        return self.cv2(torch.cat(y, 1))
    


class ConvAttention(nn.Module):
    def __init__(self, in_dim, out_dim, kernel_size=1):
        super(ConvAttention, self).__init__()
        self.conv = nn.Conv2d(in_dim, out_dim, kernel_size)
        self.softmax = nn.Softmax(dim=-1)

    def forward(self, q, k):
        # 使用卷积层计算注意力权重
        attn_weights = self.conv(q) * self.conv(k)
        attn_weights = self.softmax(attn_weights)
        return attn_weights
    
    
class FA(nn.Module):
    # 原始FA
    def __init__(self,input_dim1, input_dim2, output_dim, num_c2f_layers, num_heads = 1):
        super(FA, self).__init__()

        v_indim = output_dim * 2
        self.conv_v = Conv(v_indim, output_dim, k=1)
        self.conv1_1 = Conv(input_dim1, output_dim, k=1)
        self.conv1_2 = Conv(input_dim2, output_dim, k=1)
        self.upsample = DySample(output_dim, scale=2, style='lp')
        self.c2f_layers = nn.ModuleList([C2f(output_dim, output_dim) for _ in range(num_c2f_layers)])
        self.Relu = nn.ReLU()
        self.num_heads = num_heads
        self.softmax = nn.Softmax(dim=-1)
        self.head_dim = output_dim // self.num_heads

    def forward(self, x1, x2):  # 浅层x1: 256*40*40, x2: 512*20*20
        # print(f'Original x1 shape: {x1.shape}, x2 shape: {x2.shape}')
        x1 = self.conv1_1(x1)
        # print(f'After con1_1 x1 shape: {x1.shape}')
        x2 =self.conv1_2(x2)  # C*H*W = 512*40*40
        # print(f'After con1_2 x2 shape: {x2.shape}')
        x2 = self.upsample(x2)
        # print(f'After upsample x2 shape: {x2.shape}')
        q = x1
        v1 = x1
        k = x2  # C*H*W = 512*40*40
        v2 = x2
        
        
        # k = self.conv1_2(x2)

        B, C1, H, W = q.shape
        _, C2, _, _ = k.shape

        
        q = q.view(B, C1, -1).transpose(1, 2)  # (B, H*W, C1)
        k = k.view(B, C2, -1).transpose(1, 2)  # (B, H*W, C2)
        trans_k = k.transpose(-1, -2)  # (B, C2, H*W)
        
        qk = torch.matmul(q, trans_k) // (self.head_dim ** 0.5) # (B, H*W, H*W)
        qk = self.softmax(qk)
        v = torch.add(v1, v2)
        # v = self.conv_v(v)  # (B, output_dim, H, W)
        
        v = v.view(B, self.num_heads, self.head_dim, -1).transpose(2, 3)  # (B, num_heads, H*W, head_dim)
        
        qkv = torch.matmul(qk.unsqueeze(1), v)  # (B, num_heads, H*W, head_dim)
        qkv = qkv.transpose(2, 3).reshape(B, -1, H, W)  # (B, output_dim, H, W)
        out = qkv
        for c2f in self.c2f_layers:
            out = c2f(out)
        # out = self.relu(out)
        
        return out






# class FA(nn.Module):
#     #修改后的卷积FA
#     def __init__(self, input_dim1, input_dim2, output_dim, num_c2f_layers):
#         super(FA, self).__init__()

#         self.conv1_1 = nn.Conv2d(input_dim1, output_dim, kernel_size=1)
#         self.conv1_2 = nn.Conv2d(input_dim2, output_dim, kernel_size=1)
#         self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
#         self.c2f_layers = nn.ModuleList([C2f(output_dim, output_dim) for _ in range(num_c2f_layers)])
#         self.conv_attn = ConvAttention(output_dim, output_dim)
#         self.conv_final = nn.Conv2d(output_dim * 2, output_dim, kernel_size=1)
#         self.relu = nn.ReLU()

#     def forward(self, x1, x2):  # 浅层x1: 256*40*40, 深层x2: 512*20*20
#         x1 = self.conv1_1(x1)  # Q
#         x2 = self.conv1_2(x2)  # K
#         x2 = self.upsample(x2)  # 使深层特征图与浅层特征图尺寸一致

#         # 使用卷积计算注意力权重
#         attn_weights = self.conv_attn(x1, x2)
#         attn_applied = attn_weights * x2

#         # 拼接 Q 和 注意力应用后的 K 作为 V
#         v = torch.cat((x1, attn_applied), dim=1)
#         v = self.conv_final(v)  # 对拼接后的特征图进行卷积

#         out = v
#         for c2f in self.c2f_layers:
#             out = c2f(out)
#         out = self.relu(out)

#         return out




class DA(nn.Module):
    # 原始DA
    def __init__(self,input_dim1, input_dim2, output_dim, num_c2f_layers, num_heads = 1):
        super(DA, self).__init__()
        v_indim = output_dim *2
        self.conv_v = Conv(v_indim, output_dim, k=1)
        self.conv1_1 = Conv(input_dim1, output_dim, k=1)
        self.downsample = Conv(input_dim2, output_dim, k=3, s=2, p=1)
        self.c2f_layers = nn.ModuleList([C2f(output_dim, output_dim) for _ in range(num_c2f_layers)])
        self.Relu = nn.ReLU()
        self.num_heads = num_heads
        self.softmax = nn.Softmax(dim=-1)
        self.head_dim = output_dim // self.num_heads        
    def forward(self, x1, x2):  # 浅层x1: 512*40*40, x2: 256*80*80
      
        
        x1 = self.conv1_1(x1) 
        q = x1
        v1 = x1  

        x2 = self.downsample(x2)
        k = x2
        v2 = x2
        

        B, C1, H, W = q.shape
        _, C2, _, _ = k.shape
        
        q = q.view(B, C1, -1).transpose(1, 2)  # (B, H*W, C1)
        k = k.view(B, C2, -1).transpose(1, 2)  # (B, H*W, C2)
        trans_k = k.transpose(-1, -2)  # (B, C2, H*W)
        
        qk = torch.matmul(q, trans_k) // (self.head_dim ** 0.5) # (B, H*W, H*W)
        qk = self.softmax(qk)
        v = torch.add(v1, v2)
        # v = self.conv_v(v)  # (B, output_dim, H, W)
        
        v = v.view(B, self.num_heads, self.head_dim, -1).transpose(2, 3)  # (B, num_heads, H*W, head_dim)
        
        qkv = torch.matmul(qk.unsqueeze(1), v)  # (B, num_heads, H*W, head_dim)
        qkv = qkv.transpose(2, 3).reshape(B, -1, H, W)  # (B, output_dim, H, W)
        out = qkv
        for c2f in self.c2f_layers:
            out = c2f(out)
        # out = self.relu(out)
        
        return out



# class DA(nn.Module):
#     # 修改后的DA
#     def __init__(self, input_dim1, input_dim2, output_dim, num_c2f_layers):
#         super(DA, self).__init__()

#         self.conv1_1 = nn.Conv2d(input_dim1, output_dim, kernel_size=1)
#         self.downsample = nn.Conv2d(input_dim2, output_dim, kernel_size=3, stride=2, padding=1)
#         self.c2f_layers = nn.ModuleList([C2f(output_dim, output_dim) for _ in range(num_c2f_layers)])
#         self.conv_attn = ConvAttention(output_dim, output_dim)
#         self.conv_final = nn.Conv2d(output_dim * 2, output_dim, kernel_size=1)
#         self.relu = nn.ReLU()

#     def forward(self, x1, x2):  # 浅层x1: 512*40*40, 深层x2: 256*80*80
#         x1 = self.conv1_1(x1)  # Q
#         x2 = self.downsample(x2)  # K

#         # 使用卷积计算注意力权重
#         attn_weights = self.conv_attn(x1, x2)
#         attn_applied = attn_weights * x2

#         # 拼接 Q 和 注意力应用后的 K 作为 V
#         v = torch.cat((x1, attn_applied), dim=1)
#         v = self.conv_final(v)  # 对拼接后的特征图进行卷积

#         out = v
#         for c2f in self.c2f_layers:
#             out = c2f(out)
#         out = self.relu(out)

#         return out


class EfficientAdditiveAttention(nn.Module):
    def __init__(self, in_dims=512, token_dim=256, num_heads=2):
        super().__init__()
        
        self.to_query = nn.Linear(in_dims, token_dim * num_heads)
        self.to_key = nn.Linear(in_dims, token_dim * num_heads)
        
        self.scale_factor = token_dim ** -0.5
        self.Proj = nn.Linear(token_dim * num_heads, token_dim * num_heads)
        self.final = nn.Linear(token_dim * num_heads, token_dim)

    def forward(self, x):
        query = self.to_query(x)
        key = self.to_key(x)
        
        query = torch.nn.functional.normalize(query, dim=-1)  # BxNxD
        key = torch.nn.functional.normalize(key, dim=-1)  # BxNxD
        
        # Global Pooling
        global_descriptor = torch.mean(query, dim=1, keepdim=True)  # Bx1xD
        
        # Scaling the global descriptor
        A = global_descriptor * self.scale_factor  # Bx1xD
        
        # Normalizing the attention weights
        A = torch.nn.functional.normalize(A, dim=1)  # Bx1xD
        
        G = torch.sum(A * query, dim=1)  # BxD
        
        G = einops.repeat(G, "b d -> b repeat d", repeat=key.shape[1])  # BxNxD
        
        out = self.Proj(G * key) + query  # BxNxD
        
        out = self.final(out)  # BxNxD
        
        return out



class AxialAttention(nn.Module):
    # channel self-axial attention
    def __init__(self, in_channels, reduction=16):
        super(AxialAttention, self).__init__()
        self.in_channels = in_channels
        self.reduction = reduction
        self.qkv_conv = nn.Conv1d(in_channels, in_channels * 3, kernel_size=1, bias=False)
        self.softmax = nn.Softmax(dim=-1)
        self.output_conv = nn.Conv1d(in_channels, in_channels, kernel_size=1, bias=False)
    
    def forward(self, x):
        batch_size, channels, height, width = x.size()
        x = x.view(batch_size, channels, -1)  # Flatten spatial dimensions
        qkv = self.qkv_conv(x)  # Compute Q, K, V
        q, k, v = torch.chunk(qkv, 3, dim=1)
        
        attn = torch.bmm(q.transpose(1, 2), k)  # Compute attention weights
        attn = self.softmax(attn / (self.in_channels ** 0.5))
        
        out = torch.bmm(v, attn.transpose(1, 2))  # Apply attention weights to V
        out = self.output_conv(out)
        out = out.view(batch_size, channels, height, width)  # Reshape to original dimensions
        return out


class HybridAttention(nn.Module):
    def __init__(self, dim, num_heads, reduction_ratio=1):
        super(HybridAttention, self).__init__()
        self.num_heads = num_heads
        self.dim = dim
        self.reduction_ratio = reduction_ratio
        
        self.qkv_conv = nn.Conv2d(dim, dim * 3, kernel_size=1)
        self.attention_conv = nn.Conv2d(dim, dim, kernel_size=1)
        self.softmax = nn.Softmax(dim=-1)
        self.norm = nn.LayerNorm(dim)
        self.fc = nn.Linear(dim * 2, dim)

    def forward(self, x):
        B, C, H, W = x.size()

        # Step 1: Global Spatial Attention
        qkv = self.qkv_conv(x).view(B, 3 * C, -1).permute(0, 2, 1)
        q, k, v = torch.chunk(qkv, 3, dim=-1)
        attn_weights = torch.bmm(q, k.transpose(1, 2)) / (C ** 0.5)
        attn_weights = self.softmax(attn_weights)
        global_context = torch.bmm(attn_weights, v).permute(0, 2, 1).view(B, C, H, W)
        global_context = self.attention_conv(global_context)

        # Step 2: Channel Attention
        qkv_channel = self.qkv_conv(global_context).view(B, 3 * C, H * W).permute(0, 2, 1)
        q_channel, k_channel, v_channel = torch.chunk(qkv_channel, 3, dim=-1)
        attn_weights_channel = torch.bmm(q_channel.transpose(1, 2), k_channel) / (H * W ** 0.5)
        attn_weights_channel = self.softmax(attn_weights_channel)
        channel_context = torch.bmm(attn_weights_channel, v_channel.transpose(1, 2)).view(B, C, H, W)

        # Step 3: Normalize and Concat
        global_context_norm = self.norm(global_context.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
        channel_context_norm = self.norm(channel_context.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)
        combined_context = torch.cat((global_context_norm, channel_context_norm), dim=1)

        # Step 4: Fusion with Value
        combined_context = combined_context.view(B, -1, H * W)
        fused_output = torch.bmm(combined_context, v.view(B, C, -1).permute(0, 2, 1)).view(B, -1, H, W)
        fused_output = self.fc(fused_output.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)

        return fused_output
    

class SEAttention(nn.Module):

    def __init__(self, channel=512,reduction=16):
        super().__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Sequential(
            nn.Linear(channel, channel // reduction, bias=False),
            nn.ReLU(inplace=True),
            nn.Linear(channel // reduction, channel, bias=False),
            nn.Sigmoid()
        )


    def init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                init.kaiming_normal_(m.weight, mode='fan_out')
                if m.bias is not None:
                    init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                init.constant_(m.weight, 1)
                init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                init.normal_(m.weight, std=0.001)
                if m.bias is not None:
                    init.constant_(m.bias, 0)

    def forward(self, x):
        b, c, _, _ = x.size()
        y = self.avg_pool(x).view(b, c)
        y = self.fc(y).view(b, c, 1, 1)
        return x * y.expand_as(x)



class RowAttention(nn.Module):
   
    def __init__(self, in_dim_q, in_dim_k, out_dim):

        super(RowAttention, self).__init__()
        self.in_dim_q = in_dim_q
        self.in_dim_k = in_dim_k
        self.q_k_dim = in_dim_q  # 假设这里的q和k维度一致，不一致可以改

        self.in_dim_v = out_dim
        self.out_dim = out_dim
       
        self.query_conv = nn.Conv2d(in_channels = self.in_dim_q, out_channels = self.q_k_dim, kernel_size=1)
        self.key_conv = nn.Conv2d(in_channels = self.in_dim_k, out_channels = self.q_k_dim, kernel_size=1)
        self.value_conv = nn.Conv2d(in_channels = self.in_dim_v, out_channels = self.out_dim, kernel_size=1)
        self.softmax = Softmax(dim=2)
        self.gamma = nn.Parameter(torch.zeros(1))
       
    def forward(self, x1, x2, x3):
          
        b, _, h, w = x1.size()

        Q = self.query_conv(x1) #size = (b,c2, h,w)
        K = self.key_conv(x2)   #size = (b, c2, h, w)
        V = self.value_conv(x3) #size = (b, c1,h,w)

      

        Q = Q.permute(0,2,1,3).contiguous().view(b*h, -1,w).permute(0,2,1) #size = (b*h,w,c2)
        K = K.permute(0,2,1,3).contiguous().view(b*h, -1,w)  #size = (b*h,c2,w)
        V = V.permute(0,2,1,3).contiguous().view(b*h, -1,w)  #size = (b*h, c1,w)
       
        row_attn = torch.bmm(Q,K) # size = (b*h, w, w)
        row_attn = self.softmax(row_attn) 
        out = torch.bmm(V,row_attn.permute(0,2,1))
        out = out.view(b,h,-1,w).permute(0,2,1,3)
       
        # out = self.gamma*out + x1
 
        return out


class ColAttention(nn.Module):
    def __init__(self, in_dim_q, in_dim_k, out_dim):

        super(ColAttention, self).__init__()
        self.in_dim_q = in_dim_q
        self.in_dim_k = in_dim_k
        self.in_dim_v = in_dim_q + in_dim_k
        self.q_k_dim = in_dim_q # 假设这里的q和k维度一致，不一致可以改

        self.out_dim = out_dim
       
        self.query_conv = nn.Conv2d(in_channels = self.in_dim_q, out_channels = self.q_k_dim, kernel_size=1)
        self.key_conv = nn.Conv2d(in_channels = self.in_dim_k, out_channels = self.q_k_dim, kernel_size=1)
        self.value_conv = nn.Conv2d(in_channels = self.in_dim_v, out_channels = self.out_dim, kernel_size=1)
        self.softmax = Softmax(dim=2)
        self.gamma = nn.Parameter(torch.zeros(1))
       
    def forward(self, x1, x2):

        b, _, h, w = x1.size()
        Q = self.query_conv(x1) 
        K = self.key_conv(x2)   
        v = torch.cat((x1, x2),dim=1)
        V = self.value_conv(v) 
       
        Q = Q.permute(0,3,1,2).contiguous().view(b*w, -1,h).permute(0,2,1) # b*w, h, c  
        K = K.permute(0,3,1,2).contiguous().view(b*w, -1,h)  # b*w, c, h  
        V = V.permute(0,3,1,2).contiguous().view(b*w, -1,h)  # b*w, cout, h 
       
        col_attn = torch.bmm(Q,K)  # b*w, h, h  
        col_attn = self.softmax(col_attn) 
        out = torch.bmm(V,col_attn.permute(0,2,1))   # b*w, cout, h     b*w, h, h =  b*w, cout, h
       
        out = out.view(b,w,-1,h).permute(0,2,3,1)
       
        # out = self.gamma*out + x1
 
        return out
    


class ChannelAttention(nn.Module):
    def __init__(self, in_dim_q, in_dim_k, out_dim):

        super(ChannelAttention, self).__init__()
        self.in_dim_q = in_dim_q
        self.in_dim_k = in_dim_k
        self.in_dim_v = in_dim_q + in_dim_k
        self.q_k_dim = out_dim # 假设这里的q和k维度一致

        self.out_dim = out_dim
       
        self.query_conv = nn.Conv2d(in_channels = self.in_dim_q, out_channels = self.q_k_dim, kernel_size=1)
        self.key_conv = nn.Conv2d(in_channels = self.in_dim_k, out_channels = self.q_k_dim, kernel_size=1)
        self.value_conv = nn.Conv2d(in_channels = self.in_dim_v, out_channels = self.out_dim, kernel_size=1)
        self.softmax = Softmax(dim = -1)
        self.gamma = nn.Parameter(torch.zeros(1))
       
    def forward(self, x1, x2): # x1 is query,x2 is key

        b, _, h, w = x1.size()
        Q = self.query_conv(x1) #size = (b,c2, h,w)
        K = self.key_conv(x2)   #size = (b, c2, h, w)
        v = torch.cat((x1, x2),dim=1)
        V = self.value_conv(v)  #size = (b, c1,h,w)
       
        Q = Q.permute(0,3,1,2).contiguous().view(b, -1, h*w)  #size = (b, c, h*w)
        K = K.permute(0,3,1,2).contiguous().view(b, -1,h*w).permute(0,2,1)  #size = (b, h*w, c)
        V = V.permute(0,3,1,2).contiguous().view(b, -1,h*w)  #size = (b,c1,h*w)
       
       
        chan_attn = torch.bmm(Q,K) # size = (b, c, c)
        chan_attn = self.softmax(chan_attn) 
        out = torch.bmm(V.permute(0,2,1), chan_attn)  
        # b,h*w,c   b, c, c = (b,h*w,c) 这里的维度b,h*w, out_dim 和  b, q_k_dim, q_k_dim不匹配，在self中强行设置匹配，根据后续调整方案
        out = out.view(b,w,-1,h).permute(0,2,3,1)
       
        # out = self.gamma*out + x1
 
        return out



class AxialAttention(nn.Module):
    def __init__(self, in_dim_q, in_dim_k, out_dim):
        super(AxialAttention, self).__init__()
        self.row_attention = RowAttention(in_dim_q, in_dim_k, out_dim)
        self.col_attention = ColAttention(in_dim_q, in_dim_k, out_dim)

    def forward(self, q, k):
        x = self.col_attention(q, k)
        x = self.row_attention(q, k, x)
        return x



class h_sigmoid(nn.Module):
    def __init__(self, inplace=True):
        super(h_sigmoid, self).__init__()
        self.relu = nn.ReLU6(inplace=inplace)

    def forward(self, x):
        return self.relu(x + 3) / 6


class h_swish(nn.Module):
    def __init__(self, inplace=True):
        super(h_swish, self).__init__()
        self.sigmoid = h_sigmoid(inplace=inplace)

    def forward(self, x):
        return x * self.sigmoid(x)


class CoordAtt(nn.Module):
    def __init__(self, inp, reduction=32):
        super(CoordAtt, self).__init__()
        self.pool_h = nn.AdaptiveAvgPool2d((None, 1))
        self.pool_w = nn.AdaptiveAvgPool2d((1, None))

        mip = max(8, inp // reduction)

        self.conv1 = nn.Conv2d(inp, mip, kernel_size=1, stride=1, padding=0)
        self.bn1 = nn.BatchNorm2d(mip)
        self.act = h_swish()

        self.conv_h = nn.Conv2d(mip, inp, kernel_size=1, stride=1, padding=0)
        self.conv_w = nn.Conv2d(mip, inp, kernel_size=1, stride=1, padding=0)

    def forward(self, x):
        identity = x

        n, c, h, w = x.size()
        x_h = self.pool_h(x)
        x_w = self.pool_w(x).permute(0, 1, 3, 2)

        y = torch.cat([x_h, x_w], dim=2)
        y = self.conv1(y)
        y = self.bn1(y)
        y = self.act(y)

        x_h, x_w = torch.split(y, [h, w], dim=2)
        x_w = x_w.permute(0, 1, 3, 2)

        a_h = self.conv_h(x_h).sigmoid()
        a_w = self.conv_w(x_w).sigmoid()

        out = identity * a_w * a_h

        return out


class CoorC_fusion(nn.Module):
    def __init__(self, in_channels_q, in_channels_k, out_channels):
        super(CoorC_fusion, self).__init__()
        self.coor_q = CoordAtt(in_channels_q)
        self.coor_k = CoordAtt(in_channels_k)
        self.chanatt = ChannelAttention(in_channels_q, in_channels_k, out_channels)
        self.conv_out = nn.Conv2d(in_channels_q , out_channels, kernel_size=1)

    def forward(self, feature_q, feature_k):
        q = self.coor_q(feature_q)
        k = self.coor_k(feature_k)
        output = self.chanatt(q, k)
        output = self.conv_out(output)
        return output



class CAxial_fusion(nn.Module):
    def __init__(self, in_channels_q, in_channels_k, out_channels):
        super(CAxial_fusion, self).__init__()
        self.se_q = SEAttention(in_channels_q)
        self.se_k = SEAttention(in_channels_k)
        self.axial_attention = AxialAttention(in_channels_q, in_channels_k, out_channels)
        self.conv_out = nn.Conv2d(in_channels_q , out_channels, kernel_size=1)

    def forward(self, feature_q, feature_k):
        q = self.se_q(feature_q)
        k = self.se_k(feature_k)
        output = self.axial_attention(q, k)
        output = self.conv_out(output)
        return output




if __name__ == "__main__":

    input_dim1 = 128
    input_dim2 = 128
    output_dim = 256
    num_c2f_layers = 3
    x1 = torch.randn(1, input_dim1, 40, 40)  # 浅层特征
    x2 = torch.randn(1, input_dim2, 40, 40)  # 深层特征

    # fa_module = FA(input_dim1, input_dim2, output_dim, num_c2f_layers = 3)
    # output = fa_module(x1, x2)
    # print(output.shape)  # 输出应为 (1, 512, 40, 40)
    chan_model = ChannelAttention(input_dim1, input_dim2, output_dim)
    output = chan_model(x1, x2)
    print(output.shape)



    # input_dim1 = 512
    # input_dim2 = 256
    # output_dim = 512
    # num_c2f_layers = 3
    # x1 = torch.randn(1, input_dim1, 40, 40)  # 浅层特征
    # x2 = torch.randn(1, input_dim2, 80, 80)  # 深层特征

    # da_module = DA(input_dim1, input_dim2, output_dim, num_c2f_layers = 3)
    # print(da_module.modules)
    # output = da_module(x1, x2)
    # print(output.shape)  # 输出应为 (1, 512, 40, 40)