# ------------------------------------------------------------------------
# Conditional DETR Transformer class.
# Copyright (c) 2021 Microsoft. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# ------------------------------------------------------------------------



import torch
from torch import nn

from .module.transformerencoder import TransformerEncoderLayer, TransformerEncoder
from .module.smca_transformerdecoder import TransformerDecoderLayer, TransformerDecoder

def build_transformer(args):
    return Transformer(
        d_model=args.hidden_dim,
        dropout=args.dropout,
        nhead=args.nheads,
        num_queries=args.num_queries,
        dim_feedforward=args.dim_feedforward,
        num_encoder_layers=args.enc_layers,
        num_decoder_layers=args.dec_layers_hopd,
        normalize_before=args.pre_norm,
        return_intermediate_dec=True,
        smooth=args.smooth,
        dynamic_scale=args.dynamic_scale
    )


class Transformer(nn.Module):

    def __init__(self, d_model=512, 
                 nhead=8, 
                 num_encoder_layers=6,
                 num_decoder_layers=6, 
                 dim_feedforward=2048, 
                 dropout=0.1,
                 activation="relu", 
                 normalize_before=False,
                 return_intermediate_dec=False, 
                 smooth=8, 
                 dynamic_scale=None):
        super().__init__()
        self.d_model = d_model
        self.nhead = nhead
        self.dec_layers = num_decoder_layers
        self.return_intermediate_dec = return_intermediate_dec
        self.encoder = self.build_encode(d_model, nhead, num_encoder_layers, dim_feedforward, dropout, activation, normalize_before)
        self.decoder = self.build_decode(d_model, nhead, num_decoder_layers, dim_feedforward, dropout, activation, normalize_before, return_intermediate_dec,
                                        smooth, dynamic_scale)
        self._reset_parameters()

    def build_encode(self, d_model, nhead, num_encoder_layers, dim_feedforward, dropout, activation, normalize_before):
        encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout, activation, normalize_before)
        encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
        encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
        return encoder

    def build_decode(self, d_model, nhead, num_decoder_layers, dim_feedforward, dropout, activation, normalize_before, return_intermediate_dec, dynamic_scale, smooth):
        decoder_layers = []
        for layer_index in range(num_decoder_layers):
            decoder_layer = TransformerDecoderLayer(smooth, dynamic_scale, layer_index, d_model, nhead, dim_feedforward, dropout,
                                                    activation, normalize_before)
            decoder_layers.append(decoder_layer)
        decoder_norm = nn.LayerNorm(d_model)
        decoder = TransformerDecoder(decoder_layers, num_decoder_layers, decoder_norm, return_intermediate=return_intermediate_dec)
        return decoder

    def _reset_parameters(self):
        for p in self.parameters():
            if p.dim() > 1:
                nn.init.xavier_uniform_(p)

    def encode_input_reshape(self, srcs, masks, pos_embed):
        '''
            src:               [N, 256, 35, 35] -> 1050, N, 256
            mask:              N, 35, 35      -> N, 35x35
            #nn.Embedding(num_queries, self.hidden_dim)   
            #   backbone, pos
            pos_embed:         N, 256, 35, 35 -> 35x35, N, 256     
        '''
        src_flatten = []
        mask_flatten = []
        spatial_shapes = []
        lvl_pos_embed_flatten = []
        for lvl, (src, mask, pos_embed) in enumerate(zip(srcs, masks, pos_embed)):
            #   N, 256, 35, 35
            bs, c, h, w = src.shape
            spatial_shape = (h, w)
            spatial_shapes.append(spatial_shape)
            #   35x35, N, 256
            src = src.flatten(2).permute(2, 0, 1)  
            #   N, 35x35
            mask = mask.flatten(1)
            #   35x35, N, 256
            pos_embed = pos_embed.flatten(2).permute(2, 0, 1)  
            lvl_pos_embed = pos_embed #+ self.level_embed[lvl].view(1, 1, -1)
            lvl_pos_embed_flatten.append(lvl_pos_embed)

            src_flatten.append(src)
            mask_flatten.append(mask)
        src_flatten = torch.cat(src_flatten, 0)
        mask_flatten = torch.cat(mask_flatten, 1)
        spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=src_flatten.device)
        pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 0)

        return src_flatten, mask_flatten, pos_embed_flatten, spatial_shapes


    def encode_forward(self, src, mask, pos_embed, encoder):
        memory, memory_attn = encoder(src, src_key_padding_mask=mask, pos=pos_embed)
        return memory, memory_attn
    
    
    def decode_input_reshape(self, query_embed, bs):
        if query_embed.dim()==2:               
            query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1)
        tgt = torch.zeros_like(query_embed) 
        return tgt, query_embed


    def decode_forward(self,grid, h_w, tgt, memory, mask, query_embed, pos_embed, decoder):
        hs, points  = decoder(grid, h_w, tgt, memory, memory_key_padding_mask=mask,
                                  pos=pos_embed, query_pos=query_embed)
        if self.return_intermediate_dec:
            hs = hs.transpose(1, 2)
            points = points.transpose(0, 1)
        else:
            hs = hs.unsqueeze(0).transpose(1, 2)
            points = points.transpose(0, 1)
        return hs, points 



    def forward(self, srcs, masks, query_embed, pos_embed, h_w):
        # flatten NxCxHxW to HWxNxC
        bs, c, h, w = srcs[-1].shape
        src, mask, pos_embed, spatial_shapes = self.encode_input_reshape(srcs, masks, pos_embed)
        grid_y, grid_x = torch.meshgrid(torch.arange(0, h), torch.arange(0, w))
        grid = torch.stack((grid_x, grid_y), 2).float().to(src.device)
        grid = grid.reshape(-1, 2).unsqueeze(1).repeat(1, bs * 8, 1)

        memory, memory_attn = self.encode_forward(src, mask, pos_embed, self.encoder)
        tgt, query_embed = self.decode_input_reshape(query_embed, bs)
        hs, points  = self.decode_forward(grid, h_w, tgt, memory, mask, query_embed, pos_embed, self.decoder)
        
        memory = memory.permute(1, 2, 0)[:, :, -h*w:].view(bs, c, h, w) 
        outputs = {'src':srcs[-1], 
                   'memory':memory,'memory_attn':memory_attn,
                   'out_query':hs, 'points':points, 'memory':memory}
        return outputs


