from torch import nn
from .transformer import Transformer as trans

from .module.transformerdecoder import TransformerDecoderLayer
from .module.interaction_transformerdecoder import InteractionLayer, InteractionTransformerDecoder


def build_transformer(args):
    return Transformer(
        d_model=args.hidden_dim,
        dropout=args.dropout,
        nhead=args.nheads,
        dim_feedforward=args.dim_feedforward,
        num_encoder_layers=args.enc_layers,
        num_dec_layers_hopd=args.dec_layers_hopd,
        num_dec_layers_interaction=args.dec_layers_interaction,
        normalize_before=args.pre_norm,
        return_intermediate_dec=True,
        num_decoder_layers=args.enc_layers,
        num_rel_decoder_layers=args.enc_layers,
    )



class Transformer(trans):

    def __init__(self, d_model=256, nhead=8, num_encoder_layers=6,
                 num_decoder_layers=6, num_rel_decoder_layers=6,
                 dim_feedforward=2048, dropout=0.1,
                 activation="relu", normalize_before=False,
                 return_intermediate_dec=False):
        super(Transformer,self).__init__(d_model,nhead, num_encoder_layers, num_decoder_layers,
                                 dim_feedforward, dropout, activation,normalize_before, return_intermediate_dec)


        self._reset_parameters()
        
        
                
    def build_decode(self, d_model, nhead, num_decoder_layers, dim_feedforward, dropout, activation, normalize_before, return_intermediate_dec):
        # interaction branch
        rel_decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward,
                                                    dropout, activation, normalize_before)
        rel_decoder_norm = nn.LayerNorm(d_model)
        # instance branch
        decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward,
                                                dropout, activation, normalize_before)
        decoder_norm = nn.LayerNorm(d_model)
        # branch aggregation: instance-aware attention
        interaction_layer = InteractionLayer(d_model, d_model, dropout)

        decoder = InteractionTransformerDecoder(
                                    decoder_layer,
                                    rel_decoder_layer,
                                    num_decoder_layers,
                                    interaction_layer,
                                    decoder_norm,
                                    rel_decoder_norm,
                                    return_intermediate_dec)
        return decoder
        
    def decode_forward(self, rel_tgt, tgt, memory, mask, query_embed,rel_query_embed, pos_embed, decoder):
        hs, rel_hs = decoder(tgt, rel_tgt, memory, memory_key_padding_mask=mask,
                                pos=pos_embed, query_pos=query_embed, rel_query_pos=rel_query_embed)
        if self.return_intermediate_dec:
            hs = hs.transpose(1, 2)
            rel_hs = rel_hs.transpose(1, 2)
        else:
            hs = hs.unsqueeze(0).transpose(1, 2)
            rel_hs = rel_hs.unsqueeze(0).transpose(1, 2)
        return hs, rel_hs 



    def forward(self, srcs, masks, query_embed, rel_query_embed, pos_embed):
        # flatten NxCxHxW to HWxNxC
        bs, c, h, w = srcs[-1].shape
        # generate feature sequence
        src, mask, pos_embed, spatial_shapes = self.encode_input_reshape(srcs, masks, pos_embed)
        # refine the feature sequence using encoder
        memory = self.encode_forward(src, mask, pos_embed, self.encoder)
        # interaction query set
        tgt, rel_query_embed = self.decode_input_reshape(rel_query_embed, bs)
        rel_tgt, _ = self.decode_input_reshape(rel_query_embed, bs)
        # memory shape: (W*H, bs, d_model)
        hs, rel_hs = self.decode_forward(rel_tgt, tgt, memory, mask, query_embed,rel_query_embed, pos_embed, self.decoder)
        memory = memory.permute(1, 2, 0)[:, :, -h*w:].view(bs, c, h, w)
        outputs = {'src':srcs[-1],'out_query':rel_hs,'memory':memory, 'hs':hs}
        return outputs


