from .conditional_transformer import Transformer as trans
from .module.transformerdecoder import TransformerDecoderLayer, TransformerDecoder
from torch import nn

def build_transformer(args):
    return Transformer(
        d_model=args.hidden_dim,
        dropout=args.dropout,
        nhead=args.nheads,
        dim_feedforward=args.dim_feedforward,
        num_encoder_layers=args.enc_layers,
        num_dec_layers_hopd=args.dec_layers_hopd,
        num_dec_layers_interaction=args.dec_layers_interaction,
        normalize_before=args.pre_norm,
        return_intermediate_dec=True)

class Transformer(trans):
    def __init__(self, 
                 d_model=512, 
                 nhead=8, 
                 num_encoder_layers=6,
                 num_dec_layers_hopd=3, 
                 num_dec_layers_interaction=3, #new
                 dim_feedforward=2048, 
                 dropout=0.1,
                 activation="relu", 
                 normalize_before=False,
                 return_intermediate_dec=False):
        super(Transformer,self).__init__(d_model,  nhead,  num_encoder_layers, num_dec_layers_hopd, dim_feedforward,  dropout,
                    activation,  normalize_before, return_intermediate_dec)
        #d_model, nhead, num_decoder_layers, dim_feedforward, dropout, activation, return_intermediate_dec, num_feature_levels, dec_n_points
        self.human_decoder = self.build_decode(d_model, nhead, num_dec_layers_interaction, dim_feedforward, dropout, activation, normalize_before, return_intermediate_dec)
        self.interaction_decoder = self.build_decode(d_model, nhead, num_dec_layers_interaction, dim_feedforward, dropout, activation, normalize_before, return_intermediate_dec)
        #self.interaction_decoder = self.build_decode_origin(d_model, nhead, num_dec_layers_interaction, dim_feedforward, dropout, activation, normalize_before, return_intermediate_dec)
        self._reset_parameters()
    
    
    def build_decode_origin(self, d_model, nhead, num_decoder_layers, dim_feedforward, dropout, activation, normalize_before, return_intermediate_dec):
        decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward, dropout, activation, normalize_before)
        decoder_norm = nn.LayerNorm(d_model)
        decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm, return_intermediate=return_intermediate_dec)
        return decoder

    def decode_forward_origin(self, tgt, memory, mask, query_embed, pos_embed, decoder):
        hs, self_attn, cross_attn = decoder(tgt, memory, memory_key_padding_mask=mask, query_pos=query_embed, pos=pos_embed)
        if self.return_intermediate_dec:
            hs = hs.transpose(1, 2)
        else:
            hs = hs.unsqueeze(0).transpose(1, 2)
            self_attn = self_attn.unsqueeze(0)
            cross_attn = cross_attn.unsqueeze(0)
        return hs, self_attn, cross_attn
     
    def forward(self, srcs, masks, query_embed, pos_embed):
        bs, c, h, w = srcs[-1].shape 
        src, mask, pos_embed, spatial_shapes = self.encode_input_reshape(srcs, masks, pos_embed)
        memory   = self.encode_forward(src, mask, pos_embed, self.encoder)                    # 35x35, N, 256
        #   Object       
        tgt, query_embed = self.decode_input_reshape( query_embed, bs)
        hopd_out, object_references= self.decode_forward(tgt, memory, mask, query_embed, pos_embed, self.decoder)
        #   Human   
        human_query_embed =  hopd_out[-1].permute(1, 0, 2)
        human_tgt, human_query_embed = self.decode_input_reshape(human_query_embed, bs)
        human_references_out, human_references= self.decode_forward(human_tgt, memory, mask, human_query_embed, pos_embed, self.human_decoder)

        #   HOI 
        interaction_query_embed = hopd_out[-1].permute(1, 0, 2) + human_references_out[-1].permute(1, 0, 2)
        interaction_tgt, interaction_query_embed = self.decode_input_reshape(interaction_query_embed, bs)      
        interaction_decoder_out, interaction_references = self.decode_forward(interaction_tgt, memory, mask, interaction_query_embed, pos_embed, self.interaction_decoder)
        #interaction_decoder_out, interaction_self_attn, interaction_cross_attn = self.decode_forward_origin(interaction_tgt, memory, mask, interaction_query_embed, pos_embed, self.interaction_decoder)# 6, N, 100, 256  
        
        memory = memory.permute(1, 2, 0)[:, :, -h*w:].view(bs, c, h, w) 
        outputs = {'src':srcs[-1], 'memory':memory, 
                   'out_query':hopd_out, 'reference':object_references,
                   'human_references_out':human_references_out, 'human_references':human_references,
                   'interaction_decoder_out':interaction_decoder_out }
        return outputs