from torch import nn
import torch
from torch.nn import functional as F

class CoTAttention(nn.Module):

    def __init__(self, dim=512, out_dim=512, kernel_size=3):
        super().__init__()
        self.dim = dim
        self.out_dim = out_dim
        self.kernel_size = kernel_size

        self.key_embed = nn.Sequential(
            nn.Conv2d(dim, dim, kernel_size=kernel_size, padding=kernel_size//2, groups=4, bias=False),
            nn.BatchNorm2d(dim),
            nn.ReLU()
        )
        self.value_embed = nn.Sequential(
            nn.Conv2d(dim, dim, 1, bias=False),
            nn.BatchNorm2d(dim)
        )

        factor = 4
        self.attention_embed = nn.Sequential(
            nn.Conv2d(2*dim, 2*dim//factor, 1, bias=False),
            nn.BatchNorm2d(2*dim//factor),
            nn.ReLU(),
            nn.Conv2d(2*dim//factor, kernel_size*kernel_size*dim, 1)
        )

        self.output_conv = nn.Conv2d(dim, out_dim, 1)  # New layer to change output dimensions

    def forward(self, x):
        bs, c, h, w = x.shape
        k1 = self.key_embed(x)  # bs, c, h, w
        # print("k1",k1.size())
        v = self.value_embed(x).view(bs, c, -1)  # bs, c, h, w
        # print("v",v.size())

        y = torch.cat([k1, x], dim=1)  # bs, 2c, h, w
        # print("y",y.size())
        att = self.attention_embed(y)  # bs, c*k*k, h, w
        att = att.reshape(bs, c, self.kernel_size*self.kernel_size, h, w)
        att = att.mean(2, keepdim=False).view(bs, c, -1)  # bs, c, h*w
        k2 = F.softmax(att, dim=-1) * v
        k2 = k2.view(bs, c, h, w)

        out = k1 + k2
        out = self.output_conv(out)  # Apply the new layer to change output dimensions

        return out


class Partial_conv3(nn.Module):

    def __init__(self, dim, o_dim, n_div, forward):
        super().__init__()
        self.dim_conv3 = dim // n_div
        self.dim_untouched = dim - self.dim_conv3
        self.partial_conv3 = nn.Conv2d(self.dim_conv3, self.dim_conv3, 3, 1, 1, bias=True)
        self.conv1d = nn.Conv1d(in_channels=dim, out_channels=o_dim, kernel_size=3, padding=1, bias=False)
        if forward == 'slicing':
            self.forward = self.forward_slicing
        elif forward == 'split_cat':
            self.forward = self.forward_split_cat
        else:
            raise NotImplementedError

    def forward_slicing(self, x):
        # only for inference
        x = x.clone()  # !!! Keep the original input intact for the residual connection later
        x[:, :self.dim_conv3, :, :] = self.partial_conv3(x[:, :self.dim_conv3, :, :])

        return x

    def forward_split_cat(self, x):
        # for training/inference
        x1, x2 = torch.split(x, [self.dim_conv3, self.dim_untouched], dim=1)
        x1 = self.partial_conv3(x1)
        x = torch.cat((x1, x2), 1)
        # print(x.size())
        height, width = x.size(2), x.size(3)
        x = x.view(x.size(0), x.size(1), -1)  # Reshape the tensor
        # print(x.size())  # Print the shape of x to check
        x = self.conv1d(x)
        x = x.view(x.size(0), x.size(1), height, width)

        return x


class LKA(nn.Module):
    def __init__(self, dim, out_dim):
        super().__init__()
        # 深度卷积
        self.conv0 = nn.Conv2d(dim, dim, 5, padding=2, groups=dim)
        # 深度空洞卷积
        self.conv_spatial = nn.Conv2d(
            dim, dim, 7, stride=1, padding=9, groups=dim, dilation=3)
        # 逐点卷积
        self.conv1 = nn.Conv2d(dim, out_dim, 1)

    def forward(self, x):
        u = x.clone()
        u = self.conv1(u)
        attn = self.conv0(x)
        attn = self.conv_spatial(attn)
        attn = self.conv1(attn)

        # 注意力操作
        return u * attn


# 把LKA和Partial_conv3并联起来
class PCLKA(nn.Module):
    def __init__(self, dim, out_dim):
        super().__init__()
        self.lka = LKA(dim, out_dim)
        self.pc = Partial_conv3(dim, out_dim, 2, 'split_cat')
        self.co = CoTAttention(dim, out_dim)

    def forward(self, x):
        res = x
        x1 = self.pc(res)
        # print("x1{}".format(x1.size()))
        x2 = self.lka(res)
        # print("x2{}".format(x2.size()))
        x3 = self.co(res)
        x = x1 + x2 + x3
        return x


if __name__ == '__main__':
    block = PCLKA(64, 128).cuda()
    input = torch.rand(1, 64, 256, 256).cuda()
    output = block(input)
    print(input.size(), output.size())
