import math
import time
from torch import nn
import torch
from einops import rearrange
from torch.nn import functional as F


class Mlp(nn.Module):
    def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
        super().__init__()
        out_features = out_features or in_features
        hidden_features = hidden_features or in_features
        self.fc1 = nn.Linear(in_features, hidden_features)
        self.act = act_layer()
        self.drop1 = nn.Dropout(drop)
        self.fc2 = nn.Linear(hidden_features, out_features)
        self.drop2 = nn.Dropout(drop)

    def forward(self, x):
        # x = x.permute(0, 2, 3, 1)
        x = self.fc1(x)
        x = self.act(x)
        x = self.drop1(x)
        x = self.fc2(x)
        x = self.drop2(x)
        # x = x.permute(0, 3, 1, 2)
        return x

class Attention(nn.Module):
    def __init__(self,
                 dim,  # 输入token的dim,768
                 num_heads=8,  # 8个头,实例化的时候是12个头
                 qkv_bias=False,
                 qk_scale=None,
                 attn_drop_ratio=0.,
                 proj_drop_ratio=0.):
        super(Attention, self).__init__()
        self.num_heads = num_heads
        # 计算每个head的dim,直接均分操作。
        head_dim = dim // num_heads
        # 计算分母，q,k相乘之后要除以一个根号下dk。
        self.scale = qk_scale or head_dim ** -0.5
        # 直接使用一个全连接实现q,k,v。
        self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
        self.attn_drop = nn.Dropout(attn_drop_ratio)
        # 多头拼接之后通过W进行映射，跟上面的q,k,v一样，也是通过全连接实现。
        self.proj = nn.Linear(dim, dim)
        self.proj_drop = nn.Dropout(proj_drop_ratio)

    def forward(self, x):
        B, N, C = x.shape
        qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
        q, k, v = qkv[0], qkv[1], qkv[2]  # make torchscript happy (cannot use tensor as tuple)
        attn = (q @ k.transpose(-2, -1)) * self.scale
        attn = attn.softmax(dim=-1)
        attn = self.attn_drop(attn)
        x = (attn @ v).transpose(1, 2).reshape(B, N, C)
        x = self.proj(x)
        x = self.proj_drop(x)
        return x

class CrossAttention(nn.Module):
    def __init__(self,
                 dim,  # 输入token的dim,768
                 num_heads=8, # 8个头,实例化的时候是12个头
                 qkv_bias=False,
                 qk_scale=None,
                 attn_drop_ratio=0.,
                 proj_drop_ratio=0.):
        super(CrossAttention, self).__init__()
        self.num_heads = num_heads
        # 计算每个head的dim,直接均分操作。
        head_dim = dim // num_heads
        # 计算分母，q,k相乘之后要除以一个根号下dk。
        self.scale = qk_scale or head_dim ** -0.5
        # 直接使用一个全连接实现q,k,v。
        self.qkv1 = nn.Linear(dim, dim * 3, bias=qkv_bias)
        self.qkv2 = nn.Linear(dim, dim * 3, bias=qkv_bias)
        self.attn_drop1 = nn.Dropout(attn_drop_ratio)
        self.attn_drop2 = nn.Dropout(attn_drop_ratio)
        # 多头拼接之后通过W进行映射，跟上面的q,k,v一样，也是通过全连接实现。
        self.proj1 = nn.Linear(dim, dim)
        self.proj2 = nn.Linear(dim, dim)
        self.proj_drop1 = nn.Dropout(proj_drop_ratio)
        self.proj_drop2 = nn.Dropout(proj_drop_ratio)

    def forward(self, x1, x2):
        # B, C, H, W = x1.shape# [B, C, H, W]
        # N = H * W
        B, N, C = x1.shape

        x1 = x1.view(B, C, -1).permute(0, 2, 1)# [B, N, C]
        x2 = x2.view(B, C, -1).permute(0, 2, 1)

        qkv1 = self.qkv1(x1).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
        q1, k1, v1 = qkv1[0], qkv1[1], qkv1[2]  # make torchscript happy (cannot use tensor as tuple)

        qkv2 = self.qkv2(x2).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
        q2, k2, v2 = qkv2[0], qkv2[1], qkv2[2]  # make torchscript happy (cannot use tensor as tuple)

        attn1 = (q1 @ k2.transpose(-2, -1)) * self.scale
        attn1 = attn1.softmax(dim=-1)
        attn1 = self.attn_drop1(attn1)
        x1 = (attn1 @ v2).transpose(1, 2).reshape(B, N, C)
        x1 = self.proj1(x1)
        x1 = self.proj_drop1(x1)

        attn2 = (q2 @ k1.transpose(-2, -1)) * self.scale
        attn2 = attn2.softmax(dim=-1)
        attn2 = self.attn_drop2(attn2)
        x2 = (attn2 @ v1).transpose(1, 2).reshape(B, N, C)
        x2 = self.proj2(x2)
        x2 = self.proj_drop2(x2)

        # x1 = x1.permute(0, 2, 1).view(B, C, H, W)
        # x2 = x2.permute(0, 2, 1).view(B, C, H, W)
        return x1, x2

class SelfCrossAttention(nn.Module):
    def __init__(self,
                 dim,  # 输入token的dim,768
                 num_heads=8, # 8个头,实例化的时候是12个头
                 qkv_bias=False,
                 qk_scale=None,
                 attn_drop_ratio=0.,
                 proj_drop_ratio=0.):
        super(SelfCrossAttention, self).__init__()
        self.num_heads = num_heads
        # 计算每个head的dim,直接均分操作。
        head_dim = dim // num_heads
        # 计算分母，q,k相乘之后要除以一个根号下dk。
        self.scale = qk_scale or head_dim ** -0.5
        # 直接使用一个全连接实现q,k,v。
        self.qkv1 = nn.Linear(dim, dim * 3, bias=qkv_bias)
        self.qkv2 = nn.Linear(dim, dim * 3, bias=qkv_bias)
        self.attn_drop1 = nn.Dropout(attn_drop_ratio)
        self.attn_drop2 = nn.Dropout(attn_drop_ratio)
        # 多头拼接之后通过W进行映射，跟上面的q,k,v一样，也是通过全连接实现。
        self.proj1 = nn.Linear(dim, dim)
        self.proj2 = nn.Linear(dim, dim)
        self.proj_drop1 = nn.Dropout(proj_drop_ratio)
        self.proj_drop2 = nn.Dropout(proj_drop_ratio)

        self.self_attn_rate1 = torch.nn.Parameter(torch.Tensor(1))
        self.self_attn_rate2 = torch.nn.Parameter(torch.Tensor(1))
        self.cross_attn_rate1 = torch.nn.Parameter(torch.Tensor(1))
        self.cross_attn_rate2 = torch.nn.Parameter(torch.Tensor(1))

    def reset_parameters(self):
        init_rate_half(self.self_attn_rate1)
        init_rate_half(self.self_attn_rate2)
        init_rate_half(self.cross_attn_rate1)
        init_rate_half(self.cross_attn_rate2)

    def forward(self, x1, x2):
        B, N, C = x1.shape

        x1 = x1.view(B, C, -1).permute(0, 2, 1)# [B, N, C]
        x2 = x2.view(B, C, -1).permute(0, 2, 1)

        qkv1 = self.qkv1(x1).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
        q1, k1, v1 = qkv1[0], qkv1[1], qkv1[2]  # make torchscript happy (cannot use tensor as tuple)

        qkv2 = self.qkv2(x2).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
        q2, k2, v2 = qkv2[0], qkv2[1], qkv2[2]  # make torchscript happy (cannot use tensor as tuple)

        self_attn1 = (q1 @ k1.transpose(-2, -1)) * self.scale * self.self_attn_rate1
        cross_attn1 = (q2 @ k1.transpose(-2, -1)) * self.scale * self.cross_attn_rate1
        attn1 = self_attn1 + cross_attn1
        attn1 = attn1.softmax(dim=-1)
        attn1 = self.attn_drop1(attn1)
        x1 = (attn1 @ v1).transpose(1, 2).reshape(B, N, C)
        x1 = self.proj1(x1)
        x1 = self.proj_drop1(x1)

        self_attn2 = (q2 @ k2.transpose(-2, -1)) * self.scale * self.self_attn_rate2
        cross_attn2 = (q1 @ k1.transpose(-2, -1)) * self.scale * self.cross_attn_rate2
        attn2 = self_attn2 + cross_attn2
        attn2 = attn2.softmax(dim=-1)
        attn2 = self.attn_drop2(attn2)
        x2 = (attn2 @ v1).transpose(1, 2).reshape(B, N, C)
        x2 = self.proj2(x2)
        x2 = self.proj_drop2(x2)

        # x1 = x1.permute(0, 2, 1).view(B, C, H, W)
        # x2 = x2.permute(0, 2, 1).view(B, C, H, W)
        return x1, x2

def init_rate_half(tensor):
    if tensor is not None:
        tensor.data.fill_(0.5)

class ADCAM(nn.Module):
    def __init__(self, dim=512, kernel_size=3, p_h=2, p_w=2, H=56, W=56):
        super().__init__()
        self.ph, self.pw = p_h, p_w
        self.patch_area = p_h * p_w
        self.conv_local1 = nn.Conv2d(dim, dim, kernel_size=kernel_size, padding=kernel_size // 2)
        self.conv_local2 = nn.Conv2d(dim, dim, kernel_size=kernel_size, padding=kernel_size // 2)
        self.self_cross_attn = SelfCrossAttention(dim)
        self.mlp1 = Mlp(dim, 4 * dim)
        self.mlp2 = Mlp(dim, 4 * dim)
        self.layerNorm1_1 = nn.LayerNorm(dim)
        self.layerNorm1_2 = nn.LayerNorm(dim)
        self.layerNorm2_1 = nn.LayerNorm(dim)
        self.layerNorm2_2 = nn.LayerNorm(dim)
        self.conv_mlp1 = nn.Conv2d(dim, dim, kernel_size=1)
        self.conv_mlp2 = nn.Conv2d(dim, dim, kernel_size=1)
        self.conv_fusion1 = nn.Conv2d(dim * 2, dim, kernel_size=1)
        self.conv_fusion2 = nn.Conv2d(dim * 2, dim, kernel_size=1)

        self.pe_hf = nn.Parameter(torch.zeros(1, dim, H, W))
        self.pe_lf = nn.Parameter(torch.zeros(1, dim, H, W))

        # 权重初始化
        self._init_weights()

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m ,nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.LayerNorm):
                nn.init.constant_(m.bias, 0)
                nn.init.constant_(m.weight, 1.0)
            elif isinstance(m, nn.Linear):
                nn.init.trunc_normal_(m.weight, std=.01)
                if m.bias is not None:
                    nn.init.zeros_(m.bias)


    def unfold(self, x):
        p_h, p_w = self.ph, self.pw
        patch_area = p_h * p_w

        batch_size, in_channels, orig_h, orig_w = x.shape
        new_h = int(math.ceil(orig_h / self.ph) * self.ph)
        new_w = int(math.ceil(orig_w / self.pw) * self.pw)

        #check: whether need interpolate
        interpolate = False
        if new_w != orig_w or new_h != orig_h:
            x = F.interpolate(x, size=(new_h, new_w), mode="bilinear", align_corners=False)
            interpolate = True

        num_patch_w = new_w // p_w #n_w
        num_patch_h = new_h // p_h #n_h
        num_patches = num_patch_w * num_patch_h #N

        #[B, C, H, W] ===> [B, C, n_h, p_h, n_w, p_w]
        x = x.reshape(batch_size, in_channels, num_patch_h, p_h, num_patch_w, p_w)
        #[B, C, n_h, p_h, n_w, p_w] ===> [B, C, n_h, n_w, p_h, p_w]
        x = x.transpose(3, 4)
        #[B, C, n_h, n_w, p_h, p_w] ===> [B, C, N, P] where P = p_h * p_w and N = n_h * n_w
        x = x.reshape(batch_size, in_channels, num_patches, patch_area)

        #[B, C, N, P] ===> [B, P, N, C]
        x = x.transpose(1, 3)
        #[B, P, N, C] ===> [BP, N, C]
        x = x.reshape(batch_size * patch_area, num_patches, -1)

        info_dict = {
            "orig_size": (orig_h, orig_w),
            "batch_size": batch_size,
            "interpolate": interpolate,
            "total_patches": num_patches,
            "num_patches_w": num_patch_w,
            "num_patches_h": num_patch_h
        }
        return x, info_dict

    def fold(self, x, info_dict):
        #[BP, N, C] ===> [B, P, N, C]
        x = x.contiguous().view(info_dict["batch_size"], self.patch_area, info_dict["total_patches"], -1)
        batch_size, pixels, num_patches, channels = x.size()
        num_patch_h = info_dict["num_patches_h"]
        num_patch_w = info_dict["num_patches_w"]

        #[B, P, N, C] ===> [B, C, N, P]
        x = x.transpose(1, 3)
        #[B, C, N, P] ===> [B, C, n_h, n_w, p_h, p_w]
        x = x.reshape(batch_size, channels, num_patch_h, num_patch_w, self.ph, self.pw)
        #[B, C, n_h, n_w, p_h, p_w] ===> [B, C, n_h, p_h, n_w, p_w]
        x = x.transpose(3, 4)
        #[B, C, n_h, p_h, n_w, p_w] ===> [B, C, H, W]
        x = x.reshape(batch_size, channels, num_patch_h * self.ph, num_patch_w * self.pw)

        if info_dict["interpolate"]:
            x = F.interpolate(x, size=info_dict["orig_size"], mode="bilinear", align_corners=False)
        return x


    def forward(self, x1, x2):
        res1 = x1.clone()  # b,c,h,w
        res2 = x2.clone()
        y1 = x1.clone()
        y2 = x2.clone()
        # y1 = x1.clone() + self.pe_hf
        # y2 = x2.clone() + self.pe_lf

        ## Local Representation
        out_conv1 = self.conv_local1(x1)
        out_conv2 = self.conv_local2(x2)

        ## Global Representation
        #[B, C, H, W] ===> [BP, N, C]
        y1, info_dict1 = self.unfold(y1)
        y2, info_dict2 = self.unfold(y2)
        y1_res = y1.clone()
        y2_res = y2.clone()
        y1 = self.layerNorm1_1(y1)
        y2 = self.layerNorm2_1(y2)
        out_attn1, out_attn2 = self.self_cross_attn(y1, y2)
        out_attn1 = y1_res + out_attn1
        out_attn2 = y2_res + out_attn2
        out_attn1 = out_attn1 + self.mlp1(self.layerNorm1_2(out_attn1))
        out_attn2 = out_attn2 + self.mlp2(self.layerNorm2_2(out_attn2))
        # [BP, N, C] ===> [B, C, H, W]
        out_attn1 = self.fold(out_attn1, info_dict1)
        out_attn2 = self.fold(out_attn2, info_dict2)

        out_attn1 = self.conv_mlp1(out_attn1)
        out_attn2 = self.conv_mlp2(out_attn2)

        ## Fusion
        # print(f"out_conv1:{out_conv1.shape}, out_attn1:{out_attn1.shape}")
        out1 = torch.cat([out_conv1, out_attn1], dim=1)  # bs,2*dim,h,w
        out1 = self.conv_fusion1(out1) + res1 # bs,c,h,w
        out2 = torch.cat(([out_conv2, out_attn2]), dim=1)
        out2 = self.conv_fusion2(out2) + res2
        return out1, out2

if __name__ == '__main__':
    x1 = torch.rand(16, 1024, 56, 56)
    x2 = torch.rand(16, 1024, 56, 56)
    model = ADCAM(dim=1024, kernel_size=7, p_h=8, p_w=8)
    start_time = time.time()
    attn1, attn2 = model(x1, x2)
    end_time = time.time()
    print(attn1.shape)
    print(attn2.shape)
    print(f"time:{end_time - start_time}")
