
import math
from einops import rearrange
import torch.nn as nn
import torch

class Mlp(nn.Module):
    def __init__(self, hidden_size, dropout_rate=0.1):
        super(Mlp, self).__init__()
        self.fc1 = nn.Linear(hidden_size, 2*hidden_size)
        self.fc2 = nn.Linear(2*hidden_size, hidden_size)
        self.act_fn = nn.GELU()
        self.dropout = nn.Dropout(dropout_rate)

    def forward(self, x):
        x = self.fc1(x)
        x = self.act_fn(x)
        x = self.dropout(x)
        x = self.fc2(x)
        x = self.dropout(x)
        return x
    
class CrossAttention(nn.Module):
    def __init__(self, hidden_size, attention_dropout_rate, ):
        super(CrossAttention, self).__init__()

        self.hidden_size = hidden_size
        self.query = nn.Linear(hidden_size, hidden_size)
        self.key = nn.Linear(hidden_size, hidden_size)
        self.value = nn.Linear(hidden_size, hidden_size)

        self.out = nn.Linear(hidden_size, hidden_size)
        self.attn_dropout = nn.Dropout(attention_dropout_rate)
        self.proj_dropout = nn.Dropout(attention_dropout_rate)

        self.softmax = nn.Softmax(dim=-1)

    def forward(self, hidden_states, kv):
        query_layer = self.query(hidden_states)
        key_layer = self.key(kv)
        value_layer = self.value(kv)

        # query_layer = self.transpose_for_scores(mixed_query_layer)
        # key_layer = self.transpose_for_scores(mixed_key_layer)
        # value_layer = self.transpose_for_scores(mixed_value_layer)

        attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
        attention_scores = attention_scores / math.sqrt(16)
        attention_probs = self.softmax(attention_scores)
        attention_probs = self.attn_dropout(attention_probs)

        context_layer = torch.matmul(attention_probs, value_layer)
        # print(context_layer.shape, "1")
        # context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
        # new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size,)
        # print(new_context_layer_shape, "2")
        # context_layer = context_layer.view(*new_context_layer_shape)
        attention_output = self.out(context_layer)
        attention_output = self.proj_dropout(attention_output)
        return attention_output

class CrossAttentionLong(nn.Module):
    def __init__(self, hidden_size, attention_dropout_rate):
        super(CrossAttentionLong, self).__init__()

        self.hidden_size = hidden_size
        self.query = nn.Conv2d(hidden_size, 1, kernel_size=3, padding=1, stride=1)
        self.key = nn.Conv2d(hidden_size, 1, kernel_size=3, padding=1, stride=1)
        self.value = nn.Conv2d(hidden_size, hidden_size, kernel_size=3, padding=1, stride=1)

        self.out = nn.Conv2d(hidden_size, hidden_size, kernel_size=3, padding=1, stride=1)
        self.attn_dropout = nn.Dropout(attention_dropout_rate)
        self.proj_dropout = nn.Dropout(attention_dropout_rate)

        self.softmax = nn.Softmax(dim=-1)

    def forward(self, hidden_states, kv):
        # hidden_states: (B, 1, C, W, H)
        # kv: (B, T, C, W, H)
        B = hidden_states.shape[0]
        C = hidden_states.shape[2]
        T = kv.shape[1]
        W = hidden_states.shape[3]
        H = hidden_states.shape[4]

        hidden_states = hidden_states.reshape(B, C, W, H)
        query_layer = self.query(hidden_states)
        # query_layer : (B, 1, W, H)
        # query_layer = query_layer.unsqueeze(dim=1)
        query_layer = query_layer.reshape(B, 1, W*H)

        kv = kv.reshape(B*T, C, W, H)

        key_layer = self.key(kv)
        key_layer = key_layer.reshape(B, T, W*H)

        # (B*T, C, W, H)
        value_layer = self.value(kv)
        value_layer = value_layer.reshape(B, T, -1)

        ## (B, 1, T) we get the correlation
        attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
        attention_scores = attention_scores / math.sqrt(16)
        attention_probs = self.softmax(attention_scores)
        attention_probs = self.attn_dropout(attention_probs)


        # B, 1, -1
        context_layer = torch.matmul(attention_probs, value_layer)
        context_layer = context_layer.reshape(B, C, W, H)

        attention_output = self.out(context_layer)
        attention_output = self.proj_dropout(attention_output)
        return attention_output
    

class CrossAttentionShort(nn.Module):
    ## short attention is to fuse the pixel information but not the frame information
    def __init__(self, hidden_size, attention_dropout_rate, scale_factor):
        super(CrossAttentionShort, self).__init__()

        self.hidden_size = hidden_size
        compress_hidden_size = int(hidden_size * scale_factor)
        self.query = nn.Conv2d(hidden_size, compress_hidden_size, kernel_size=3, padding=1, stride=1)
        self.key = nn.Conv2d(hidden_size, compress_hidden_size, kernel_size=3, padding=1, stride=1)
        self.value = nn.Conv2d(hidden_size, hidden_size, kernel_size=3, padding=1, stride=1)

        self.key2 = nn.Conv2d(hidden_size, compress_hidden_size, kernel_size=3, padding=1, stride=1)
        self.value2 = nn.Conv2d(hidden_size, hidden_size, kernel_size=3, padding=1, stride=1)

        self.out = nn.Conv2d(2*hidden_size, hidden_size, kernel_size=3, padding=1, stride=1)
        self.attn_dropout = nn.Dropout(attention_dropout_rate)
        self.proj_dropout = nn.Dropout(attention_dropout_rate)

        self.softmax = nn.Softmax(dim=-1)

    def forward(self, hidden_states, kv):
        # hidden_states: (B, 1, C, W, H)
        # kv: (B, 2, C, W, H)
        B = hidden_states.shape[0]
        C = hidden_states.shape[2]
        T = kv.shape[1]
        W = hidden_states.shape[3]
        H = hidden_states.shape[4]

        # print(hidden_states.shape)
        hidden_states = hidden_states.reshape(B, C, W, H)
        query_layer = self.query(hidden_states)
        C_low = query_layer.shape[1]
        # query_layer : (B, C', W, H)
        query_layer = query_layer.reshape(B, W*H, C_low)

        # kv = kv.reshape(B*T, C, W, H)
        kv1 = kv[:, 0]
        kv2 = kv[:, -1]
        
        key_layer_1 = self.key(kv1)
        key_layer_1 = key_layer_1.reshape(B, W*H, C_low)

        key_layer_2 = self.key2(kv2)
        key_layer_2 = key_layer_2.reshape(B, W*H, C_low)

        # (B, C, W, H)
        value_layer = self.value(kv1)
        value_layer = value_layer.reshape(B, W*H, C)

        # (B, C, W, H)
        value_layer_2 = self.value2(kv2)
        value_layer_2 = value_layer_2.reshape(B, W*H, C)

        ## (B, 1, T) we get the correlation
        attention_scores = torch.matmul(query_layer, key_layer_1.transpose(-1, -2))
        attention_scores = attention_scores / math.sqrt(16)
        attention_probs = self.softmax(attention_scores)
        attention_probs = self.attn_dropout(attention_probs)
        # B, 1, -1
        context_layer = torch.matmul(attention_probs, value_layer)
        context_layer = context_layer.reshape(B, C, W, H)

        ## (B, 1, T) we get the correlation
        attention_scores = torch.matmul(query_layer, key_layer_2.transpose(-1, -2))
        attention_scores = attention_scores / math.sqrt(16)
        attention_probs = self.softmax(attention_scores)
        attention_probs = self.attn_dropout(attention_probs)
        # B, 1, -1
        context_layer_2 = torch.matmul(attention_probs, value_layer_2)
        context_layer_2 = context_layer_2.reshape(B, C, W, H)

        context_layer = torch.cat([context_layer, context_layer_2], dim=1)
        
        attention_output = self.out(context_layer)
        attention_output = self.proj_dropout(attention_output)
        return attention_output
    
class CrossAttentionShortEfficient(nn.Module):
    ## short attention is to fuse the pixel information but not the frame information
    def __init__(self, hidden_size, attention_dropout_rate, sr_ratio=1):
        super(CrossAttentionShortEfficient, self).__init__()

        self.hidden_size = hidden_size
        # compress_hidden_size = int(hidden_size * scale_factor)
        self.query = nn.Linear(hidden_size, hidden_size)
        self.key = nn.Linear(hidden_size, hidden_size)
        self.value = nn.Linear(hidden_size, hidden_size)

        self.out = nn.Linear(hidden_size, hidden_size)
        self.attn_dropout = nn.Dropout(attention_dropout_rate)
        self.proj_dropout = nn.Dropout(attention_dropout_rate)

        self.softmax = nn.Softmax(dim=-1)

        if sr_ratio > 1:
            self.sr = nn.Conv2d(
                hidden_size, hidden_size, kernel_size=sr_ratio, stride=sr_ratio
            )
            self.layer_norm = nn.LayerNorm(hidden_size)
        self.sr_ratio = sr_ratio

    def forward(self, hidden_states, kv):
        # hidden_states: (B, 1, C, W, H)
        # kv: (B, 2, C, W, H)
        B = hidden_states.shape[0]
        C = hidden_states.shape[2]
        T = kv.shape[1]
        W = hidden_states.shape[3]
        H = hidden_states.shape[4]

        # print(hidden_states.shape)
        hidden_states = hidden_states.reshape(B, C, W*H).permute(0, 2, 1)
        query_layer = self.query(hidden_states)
        # query_layer : (B, W*H, C)

        ## kv: B, 2, C, H, W
        kv = kv.reshape(B, C, T*W*H).permute(0, 2, 1)
        
        if self.sr_ratio > 1:
            batch_size, seq_len, num_channels = kv.shape
            # Reshape to (batch_size, num_channels, height, width)
            kv = kv.permute(0, 2, 1).reshape(batch_size*T, num_channels, W, H)
            # Apply sequence reduction
            kv = self.sr(kv)
            # Reshape back to (batch_size, seq_len, num_channels)
            kv = kv.reshape(batch_size, num_channels, -1).permute(0, 2, 1)
            kv = self.layer_norm(kv)

        key_state = self.key(kv)
        value_state = self.value(kv)
        
        ## (B, 1, T) we get the correlation
        attention_scores = torch.matmul(query_layer, key_state.transpose(-1, -2))
        attention_scores = attention_scores / math.sqrt(16)
        attention_probs = self.softmax(attention_scores)
        attention_probs = self.attn_dropout(attention_probs)
        # B, 1, -1
        context_layer = torch.matmul(attention_probs, value_state)
        # context_layer = context_layer.reshape(B, C, W, H)

        attention_output = self.out(context_layer)
        attention_output = self.proj_dropout(attention_output)
        attention_output = attention_output.reshape(B, C, W, H)

        return attention_output
    
# class CrossAttBlock(nn.Module):
#     def __init__(self, config):
#         super().__init__()
#         self.hidden_size = config.hidden_size
#         self.config = config
#         self.attention_norm = nn.LayerNorm(self.hidden_size, eps=1e-6)
#         self.attention_norm_cross = nn.LayerNorm(self.hidden_size, eps=1e-6)
#         self.ffn_norm = nn.LayerNorm(self.hidden_size, eps=1e-6)
#         self.ffn = Mlp(config)
#         self.attn_cross = AttentionCrossModal(config)

#     def forward(self, q, kv):
#         # q是其他模态特征。
#         h = q

#         x = self.attn_cross(q, kv)
#         x = x + h
#         x = self.attention_norm_cross(x)

#         h = x
#         x = self.ffn(x)
#         x = x + h
#         x = self.ffn_norm(x)

#         return x

# class TokenLearner(nn.Module):
#     def __init__(self, in_channels, S):

#         super().__init__()
#         self.token_conv = nn.Conv3d(in_channels=in_channels, out_channels=S, kernel_size=3, stride=1, padding=1)

#     def forward(self, x):
#         selected = self.token_conv(x)
#         selected = rearrange(selected, "b s d w h -> b s (d w h) 1")
#         selected = torch.sigmoid(selected)

#         x = rearrange(x, "b c d w h -> b 1 (d w h) c")

#         out = (x * selected).mean(dim=2)
#         return out

# class CrossModalityFusion(nn.Module):
#     def __init__(self, model_num, in_channels,
#                  hidden_size,
#                  img_size, mlp_size=256,
#                  token_mixer_size=32,
#                  token_learner=False):
#         super().__init__()
#         self.embeddings = nn.ModuleList([])
#         self.in_channels = in_channels
#         self.hidden_size = hidden_size
#         patch_size = (1, 1, 1)
#         self.config = get_config(in_channels=in_channels, hidden_size=hidden_size, patch_size=patch_size, img_size=img_size, mlp_dim=mlp_size)
#         self.model_num = model_num
#         self.img_size = img_size
#         patch_num = (img_size[0] // patch_size[0]) * (img_size[1] // patch_size[1]) * (img_size[2] // patch_size[2])
#         self.token_learner = token_learner
#         if token_learner:
#             self.token_mixer = TokenLearner(in_channels=in_channels, S = token_mixer_size)
#         else :
#             self.token_mixer = nn.Linear(patch_num, token_mixer_size)

#         for i in range(model_num):
#             self.embeddings.append(Embeddings(self.config))

#         self.cross_attention = CrossAttBlock(config=self.config)

#     def forward(self, q, kv):

#         q = rearrange(q, "b c d w h -> b (d w h) c")
#         embed_x = []
#         for i in range(self.model_num):
#             x = self.embeddings[i](kv[:, i])
#             if self.token_learner:
#                 x = rearrange(x, "b (d w h) c -> b c d w h", d=self.img_size[0], w=self.img_size[1], h=self.img_size[2])
#                 x = self.token_mixer(x)

#             else :
#                 x = x.transpose(-1, -2)
#                 x = self.token_mixer(x)
#                 x = x.transpose(-1, -2)

#             embed_x.append(x)

#         embed_x = torch.cat(embed_x, dim=1)
#         batch_size = embed_x.shape[0]

#         corss_out = self.cross_attention(q, embed_x)
#         corss_out = corss_out.transpose(-1, -2)
#         corss_out = corss_out.view((batch_size, self.hidden_size, self.img_size[0], self.img_size[1], self.img_size[2]))

#         return corss_out

if __name__ == '__main__':  

    q = torch.tensor([[[1, 1, 2], [1, 1, 1]]]).float()
    print(f"q1 shape is {q.shape}")

    kv = torch.tensor([[[1, 1, 1], [1, 1, 1]]]).float()
    print(f"kv shape is {kv.shape}")

    model = CrossAttention(3, 0)

    out = model(q, kv)

    print(out)

    q = torch.tensor([[[1, 1, 1], [1, 1, 2]]]).float()
    print(f"q2 shape is {q.shape}")

    out = model(q, kv)
    print(out)




    # q = torch.rand(1, 64, 16, 16, 16)
    # kv = torch.rand(1, 3, 64, 16, 16, 16)

    # model = CrossModalityFusion(model_num=3,
    #                             in_channels=64,
    #                             hidden_size=64,
    #                             img_size=(16,16,16),
    #                             token_learner=True,
    #                             token_mixer_size=32)

    # out = model(q, kv)

    # print(out.shape)

