# ------------------------------------------------------------------------
# Conditional DETR Transformer class.
# Copyright (c) 2021 Microsoft. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# ------------------------------------------------------------------------



import torch
from torch import nn

from .smca_transformer import Transformer as trans
from .module.transformerdecoder import TransformerDecoderLayer, TransformerDecoder

def build_transformer(args):
    return Transformer(
        d_model=args.hidden_dim,
        dropout=args.dropout,
        nhead=args.nheads,
        dim_feedforward=args.dim_feedforward,
        num_encoder_layers=args.enc_layers,
        num_decoder_layers=args.dec_layers_hopd,
        normalize_before=args.pre_norm,
        return_intermediate_dec=True,
        smooth=args.smooth,
        dynamic_scale=args.dynamic_scale)


class Transformer(trans):

    def __init__(self, d_model=512, 
                 nhead=8, 
                 num_encoder_layers=6,
                 num_decoder_layers=6, 
                 dim_feedforward=2048, 
                 dropout=0.1,
                 activation="relu", 
                 normalize_before=False,
                 return_intermediate_dec=False, 
                 smooth=8, 
                 dynamic_scale=None):
        super(Transformer,self).__init__(d_model, nhead, num_encoder_layers,num_decoder_layers, 
                 dim_feedforward, dropout,activation, normalize_before,return_intermediate_dec, 
                 smooth, dynamic_scale)
        #self.interaction_decoder = self.build_decode(d_model, nhead, num_decoder_layers, dim_feedforward, dropout, activation, normalize_before, return_intermediate_dec,
        #                                             smooth, dynamic_scale)
        self.interaction_decoder = self.build_decode_origin(d_model, nhead, num_decoder_layers, dim_feedforward, dropout, activation, normalize_before, return_intermediate_dec)
        self._reset_parameters()


    
    def build_decode_origin(self, d_model, nhead, num_decoder_layers, dim_feedforward, dropout, activation, normalize_before, return_intermediate_dec):
        decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward, dropout, activation, normalize_before)
        decoder_norm = nn.LayerNorm(d_model)
        decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm, return_intermediate=return_intermediate_dec)
        return decoder

    def decode_forward_origin(self, tgt, memory, mask, query_embed, pos_embed, decoder):
        hs, self_attn, cross_attn = decoder(tgt, memory, memory_key_padding_mask=mask, query_pos=query_embed, pos=pos_embed)
        if self.return_intermediate_dec:
            hs = hs.transpose(1, 2)
        else:
            hs = hs.unsqueeze(0).transpose(1, 2)
            self_attn = self_attn.unsqueeze(0)
            cross_attn = cross_attn.unsqueeze(0)
        return hs, self_attn, cross_attn


    def forward(self, srcs, masks, query_embed, pos_embed, h_w):
        # flatten NxCxHxW to HWxNxC
        bs, c, h, w = srcs[-1].shape
        src, mask, pos_embed, spatial_shapes = self.encode_input_reshape(srcs, masks, pos_embed)
        grid_y, grid_x = torch.meshgrid(torch.arange(0, h), torch.arange(0, w))
        grid = torch.stack((grid_x, grid_y), 2).float().to(src.device)
        grid = grid.reshape(-1, 2).unsqueeze(1).repeat(1, bs * 8, 1)

        memory = self.encode_forward(src, mask, pos_embed, self.encoder)
        tgt, query_embed = self.decode_input_reshape(query_embed, bs)
        hs, points  = self.decode_forward(grid, h_w, tgt, memory, mask, query_embed, pos_embed, self.decoder)

        interaction_query_embed = hs[-1].permute(1, 0, 2) 
        interaction_tgt, interaction_query_embed = self.decode_input_reshape(interaction_query_embed, bs)    
        #interaction_decoder_out, points  = self.decode_forward(grid, h_w, interaction_tgt, memory, mask, interaction_query_embed, pos_embed, self.interaction_decoder)
        interaction_decoder_out, self_attn, cross_attn  = self.decode_forward_origin(interaction_tgt, memory, mask, interaction_query_embed, pos_embed, self.interaction_decoder)
        memory = memory.permute(1, 2, 0)[:, :, -h*w:].view(bs, c, h, w) 
        outputs = {'src':srcs[-1], 
                   'memory':memory,
                   'out_query':hs, 'points':points,  
                   'interaction_decoder_out':interaction_decoder_out, 'self_attn':self_attn,'cross_attn':cross_attn}
        return outputs


