from .cdn_transformer import Transformer as Trans
import torch
import torch.nn as nn

#from .module.multi_transformerdecoder import TransformerDecoderLayer, TransformerDecoder

def build_transformer(args):
    return Transformer(
        d_model=args.hidden_dim,
        dropout=args.dropout,
        nhead=args.nheads,
        dim_feedforward=args.dim_feedforward,
        num_encoder_layers=args.enc_layers,
        num_dec_layers_hopd=args.dec_layers_hopd,
        num_dec_layers_interaction=args.dec_layers_interaction,
        normalize_before=args.pre_norm,
        return_intermediate_dec=True,
        epoch_limit = args.epoch_limit
    )


class Transformer(Trans):
    def __init__(self, 
                 d_model=512, 
                 nhead=8, 
                 num_encoder_layers=6,
                 num_dec_layers_hopd=3, 
                 num_dec_layers_interaction=3, #new
                 dim_feedforward=2048, 
                 dropout=0.1,
                 activation="relu", 
                 normalize_before=False,
                 return_intermediate_dec=False,
                 epoch_limit=0):
        super(Transformer,self).__init__(d_model, nhead, num_encoder_layers,num_dec_layers_hopd, 
                 num_dec_layers_interaction, dim_feedforward, dropout, activation, normalize_before, return_intermediate_dec)

        self.encoder = self.build_encode(d_model, nhead, 6, dim_feedforward, dropout, activation, normalize_before)
        self.interaction_encoder = self.build_encode(d_model, nhead, 6, dim_feedforward, dropout, activation, normalize_before, return_intermediate_dec)
        
        self.decoder = self.build_decode(d_model, nhead, 6, dim_feedforward, dropout, activation, normalize_before, return_intermediate_dec)
        self.human_decoder = self.build_decode(d_model, nhead, 6, dim_feedforward, dropout, activation, normalize_before, return_intermediate_dec)
        self.interaction_decoder = self.build_decode(d_model, nhead, 6, dim_feedforward, dropout, activation, normalize_before, return_intermediate_dec)
        # self.interaction_decoder = None
        
        self.epoch_limit = epoch_limit     
        self.object_location_head = nn.Sequential(nn.Linear(256, 256))
        self.linear_human = nn.Sequential(nn.Linear(256 * 2, 256))
        
        self.human_location_head = nn.Sequential(nn.Linear(256, 256))
        self.linear_interaction = nn.Sequential(nn.Linear(256 * 3, 256))
        
        self.memory_linear = nn.Sequential(nn.Linear(256 * 2, 256))
        

        self._reset_parameters()


    def forward(self, srcs, masks, query_embed, pos_embed, human_query_embed=None, epoch=0):
        bs, c, h, w = srcs[-1].shape 
        
        ######################################################################
        ##   encoder-H-O
        ###################################################################### 
        src, mask, pos_embed, spatial_shapes = self.encode_input_reshape(srcs, masks, pos_embed)
        #with torch.no_grad(): 
        memory, memory_attn = self.encode_forward(src, mask, pos_embed, self.encoder)# 35x35, N, 256
        
        # # ######################################################################
        # # ##   encoder-verb
        # # ######################################################################
        ACT_memory, ACT_memory_attn = self.encode_forward(src, mask, pos_embed, self.interaction_encoder)# 35x35, N, 256
        
        ######################################################################
        ##   decoder-Object 
        ######################################################################
        tgt, query_embed = self.decode_input_reshape(query_embed, bs)
        if epoch >= self.epoch_limit:
            with torch.no_grad(): 
                object_decoder_out, self_attn, cross_attn = self.decode_forward(tgt, memory, mask, query_embed, pos_embed, self.decoder)# 6, N, 100, 256
        else: 
            object_decoder_out, self_attn, cross_attn = self.decode_forward(tgt, memory, mask, query_embed, pos_embed, self.decoder)# 6, N, 100, 256            
        
        ######################################################################
        ##    decoder-Human
        ######################################################################
        object_location = self.object_location_head(object_decoder_out[-1])
        human_query_embed = self.linear_human(torch.cat([query_embed.permute(1, 0, 2), object_location], dim=-1)).permute(1, 0, 2) 

        human_tgt, human_query_embed = self.decode_input_reshape(human_query_embed, bs)   
        if epoch >= self.epoch_limit:
            with torch.no_grad(): 
                human_decoder_out, human_self_attn, human_cross_attn = self.decode_forward(human_tgt, memory, mask, human_query_embed, pos_embed, self.human_decoder)# 6, N, 100, 256  
        else: 
             human_decoder_out, human_self_attn, human_cross_attn = self.decode_forward(human_tgt, memory, mask, human_query_embed, pos_embed, self.human_decoder)
        
        ######################################################################
        ##   decoder-Interaction
        ######################################################################
        human_location = self.human_location_head(human_decoder_out[-1])
        interaction_query_embed = self.linear_interaction(torch.cat([query_embed.permute(1, 0, 2), object_location, human_location], dim=-1)).permute(1, 0, 2)
        interaction_tgt, interaction_query_embed = self.decode_input_reshape(interaction_query_embed, bs)
        
        interaction_tgt =  self.memory_linear(torch.cat([human_decoder_out[-1], object_decoder_out[-1]], -1)).permute(1, 0, 2)
        interaction_query_embed = None
        interaction_decoder_out, interaction_self_attn, interaction_cross_attn = self.decode_forward(interaction_tgt, ACT_memory[-1], mask, interaction_query_embed, pos_embed, self.interaction_decoder)# 6, N, 100, 256     
        # interaction_decoder_out, memory_attn = self.encode_forward(interaction_tgt, None, None, self.interaction_encoder)
        # interaction_decoder_out = interaction_decoder_out.permute(0, 2, 1, 3)
        
        memory = memory.permute(1, 2, 0)[:, :, -h*w:].view(bs, c, h, w) 
        outputs = {'src': srcs[-1], 'memory': memory,
                   'out_query': object_decoder_out, 
                   'human_decoder_out': human_decoder_out, 
                   'interaction_decoder_out': interaction_decoder_out}
        return outputs