import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
from .module import Attention, PreNorm, FeedForward
import numpy as np

class Transformer(nn.Module):
    def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0.):
        super().__init__()
        self.layers = nn.ModuleList([])
        self.norm = nn.LayerNorm(dim)
        for _ in range(depth):
            self.layers.append(nn.ModuleList([
                PreNorm(dim, Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout)),
                PreNorm(dim, FeedForward(dim, mlp_dim, dropout = dropout))
            ]))

    def forward(self, x):
        for attn, ff in self.layers:
            x = attn(x) + x
            x = ff(x) + x
        return self.norm(x)


  
class ViViT(nn.Module):
    def __init__(self, num_segments, num_segments_inside, image_size = 224, patch_size = 16, num_classes = 100, dim = 1024, depth = 4, heads = 3, pool = 'cls', in_channels = 3, dim_head = 64, dropout = 0.,
                 emb_dropout = 0., scale_dim = 4, ):
        super().__init__()
        
        assert pool in {'cls', 'mean'}, 'pool type must be either cls (cls token) or mean (mean pooling)'


        assert image_size % patch_size == 0, 'Image dimensions must be divisible by the patch size.'
        num_patches = (image_size // patch_size) ** 2
        patch_dim = in_channels * patch_size ** 2
        self.to_patch_embedding = nn.Sequential(
            Rearrange('b t c (h p1) (w p2) -> b t (h w) (p1 p2 c)', p1 = patch_size, p2 = patch_size),
            nn.Linear(patch_dim, dim),
        )

        ###########space_transformer改为segment_transformer，算的是段内多张图片的整体特征，只有一层##############
        
        self.pos_embedding = nn.Parameter(torch.randn(1, num_segments, num_segments_inside + 1, dim))
        self.pos_embedding_seg = nn.Parameter(torch.randn(1, num_segments + 1, dim))
        self.segment_token = nn.Parameter(torch.randn(1, 1, dim))
        self.segment_transformer = Transformer(dim, depth, heads, dim_head, dim*scale_dim, dropout)
        
        ####################################################################################################
        
        
        ###########temporal_transformer处理段之间特征关系，为12层，在第6层时需要输出CLS用来进行align#############
        
        self.temporal_token = nn.Parameter(torch.randn(1, 1, dim))
#         self.temporal_transformer = Transformer(dim, depth, heads, dim_head, dim*scale_dim, dropout)
        self.temporal_transformer_1_to_6 = nn.ModuleList([Transformer(dim, depth, heads, dim_head, dim * scale_dim, dropout) for _ in range(6)])
        self.temporal_transformer_7_to_12 = nn.ModuleList([Transformer(dim, depth, heads, dim_head, dim * scale_dim, dropout) for _ in range(6)])
        
        ####################################################################################################
        
        self.dropout = nn.Dropout(emb_dropout)
        self.pool = pool

        self.mlp_head = nn.Sequential(
            nn.LayerNorm(dim),
            nn.Linear(dim, num_classes)
        )

    def forward(self, x):
        #不再需要利用ViT对图片进行编码处理
#         x = self.to_patch_embedding(x)

        #[batch, time: num_segments, n: num_segments_inside, d: dim]
        b, t, n, _ = x.shape
        
        
        #段内不进行位置编码，段之间需要
        cls_segment_tokens = repeat(self.segment_token, '() n d -> b t n d', b = b, t=t)
        x = torch.cat((cls_segment_tokens, x), dim=2)
#         x += self.pos_embedding[:, :, :(n + 1)]
        x = self.dropout(x)
        
        ######################对段内处理（总共10帧）：计算注意力+位置编码#######################
        x = rearrange(x, 'b t n d -> (b t) n d')
        x = self.segment_transformer(x)
        #取出第一个特征，即为段内多张图片的整体特征cls
        #但这个特征没有段之间的注意力，不能作为最后特征返回
        x = rearrange(x[:, 0], '(b t) ... -> b t ...', b=b) # cls 

        cls_temporal_tokens = repeat(self.temporal_token, '() n d -> b n d', b=b)
        x = torch.cat((cls_temporal_tokens, x), dim=1)   # concat 到段特征上
        x += self.pos_embedding_seg[:, :(t + 1)]
        
        
        
        ##################1-6层temporal_transformer处理####################
        for temporal_transformer in self.temporal_transformer_1_to_6:
            x = temporal_transformer(x)
        #取出第一个特征，即为该视频的整体特征cls,align 特征
        video_features_for_align = x[:, 0]
        
        ##################7-12层temporal_transformer处理###################
        for temporal_transformer in self.temporal_transformer_7_to_12:
            x = temporal_transformer(x)
        video_features = x.mean(dim = 1) if self.pool == 'mean' else x[:, 0]
        

        #这里需要将计算过注意力的特征返回
        image_features = x[:, 1:]
        
        print('image_features.size():', image_features.size())
        print('video_features_for_align.size():', video_features_for_align.size())
        print('video_features.size():', video_features.size())

        return image_features, video_features_for_align, video_features
    
#在ViViT基础上写MusicTemporalTransformer
class MuTT(nn.Module):
    def __init__(self, num_segments, dim = 1024, depth = 4, heads = 3, pool = 'cls', dim_head = 64, dropout = 0.,
                 emb_dropout = 0., scale_dim = 4, ):
        super().__init__()
        
        assert pool in {'cls', 'mean'}, 'pool type must be either cls (cls token) or mean (mean pooling)'

        self.pos_embedding = nn.Parameter(torch.randn(1, num_segments + 1, dim))

        self.temporal_token = nn.Parameter(torch.randn(1, 1, dim))
#         self.temporal_transformer = Transformer(dim, depth, heads, dim_head, dim*scale_dim, dropout)
        self.temporal_transformer_1_to_6 = nn.ModuleList([Transformer(dim, depth, heads, dim_head, dim * scale_dim, dropout) for _ in range(6)])
        self.temporal_transformer_7_to_12 = nn.ModuleList([Transformer(dim, depth, heads, dim_head, dim * scale_dim, dropout) for _ in range(6)])
        self.dropout = nn.Dropout(emb_dropout)
        self.pool = pool

    def forward(self, x):

        #[batch, n: num_segments, d: dim]
        b, n, _ = x.shape

        cls_temporal_tokens = repeat(self.temporal_token, '() n d -> b n d', b=b)
        x = torch.cat((cls_temporal_tokens, x), dim=1)
        x += self.pos_embedding[:, :(n + 1)]
#         x = self.dropout(x)

        ###########################进行对比学习计算所需的两个cls特征############################
        #1-6层temporal_transformer处理
        for temporal_transformer in self.temporal_transformer_1_to_6:
            x = temporal_transformer(x)
        #取出第一个特征，即为该音乐的整体特征cls
        music_features_for_align = x[:, 0]   # size(5,1024) 5:音乐数量 1024：cls
        
        #7-12层temporal_transformer处理
        for temporal_transformer in self.temporal_transformer_7_to_12:
            x = temporal_transformer(x)
        music_features = x[:, 0]  # size(5,1024) 
        
        #####################################################################################
        
        #注意，music_original_features没有计算注意力，也没有位置编码
        #这里需要将计算过注意力的特征返回
        audio_features = x[:, 1:]  #size(5,5,1024) 5个音乐，5段，1024
        
        print('music_features_for_align.size():', music_features_for_align.size())
        print('audio_features.size():', audio_features.size())
        print('music_features.size():', music_features.size())

        return audio_features, music_features_for_align, music_features
    
    
    

if __name__ == "__main__":
    
    img = torch.ones([1, 16, 3, 224, 224]).cuda()
    
    model = ViViT(224, 16, 100, 16).cuda()
    parameters = filter(lambda p: p.requires_grad, model.parameters())
    parameters = sum([np.prod(p.size()) for p in parameters]) / 1_000_000
    print('Trainable Parameters: %.3fM' % parameters)
    
    out = model(img)
    
    print("Shape of out :", out.shape)      # [B, num_classes]

    
    