from .transformer import Transformer as trans


def build_transformer(args):
    return Transformer(
        d_model=args.hidden_dim,
        dropout=args.dropout,
        nhead=args.nheads,
        dim_feedforward=args.dim_feedforward,
        num_encoder_layers=args.enc_layers,
        num_dec_layers_hopd=args.dec_layers_hopd,
        num_dec_layers_interaction=args.dec_layers_interaction,
        normalize_before=args.pre_norm,
        return_intermediate_dec=True,
    )


class Transformer(trans):
    def __init__(self, 
                 d_model=512, 
                 nhead=8, 
                 num_encoder_layers=6,
                 num_dec_layers_hopd=3, 
                 num_dec_layers_interaction=3, #new
                 dim_feedforward=2048, 
                 dropout=0.1,
                 activation="relu", 
                 normalize_before=False,
                 return_intermediate_dec=False):
        super(Transformer,self).__init__(d_model,nhead, num_encoder_layers, num_dec_layers_hopd,
                                 dim_feedforward, dropout, activation,normalize_before, return_intermediate_dec)
        self.interaction_decoder = self.build_decode(d_model, nhead, num_dec_layers_interaction, dim_feedforward, dropout, activation, normalize_before, return_intermediate_dec)
        self._reset_parameters()


    def forward(self, srcs, masks, query_embed, pos_embed):
        bs, c, h, w = srcs[-1].shape 
        src, mask, pos_embed, spatial_shapes = self.encode_input_reshape(srcs, masks, pos_embed)
        memory, memory_attn = self.encode_forward(src, mask, pos_embed, self.encoder)                    # 35x35, N, 256
        #   tcn
        tgt, query_embed = self.decode_input_reshape(query_embed, bs)   # 100, 256
        hopd_out, hopd_self_attn, hopd_cross_attn = self.decode_forward(tgt, memory, mask, query_embed, pos_embed, self.decoder)    # 6, N, 100, 256
        
        #   change
        interaction_query_embed = hopd_out[-1].permute(1, 0, 2)# 100, N, 256
        #interaction_query_embed = hopd_out.permute(0, 2, 1, 3)# 3, 100, N, 256
        interaction_tgt, interaction_query_embed = self.decode_input_reshape(interaction_query_embed, bs)
        interaction_decoder_out,  interaction_self_attn, interaction_cross_attn = self.decode_forward(interaction_tgt, memory, mask, interaction_query_embed, pos_embed, self.interaction_decoder)# 6, N, 100, 256
        
        memory = memory.permute(1, 2, 0)[:, :, -h*w:].view(bs, c, h, w)#N, 256, 35, 35
        outputs = {'src':srcs[-1], 
                   'memory':memory, 
                   'out_query':hopd_out, 
                   'interaction_decoder_out':interaction_decoder_out}
        return outputs


