
import torch
import torch.nn.functional as F
from torch import nn

from .module.transformerencoder import TransformerEncoderLayer, TransformerEncoder
from .module.transformerdecoder import TransformerDecoderLayer, TransformerDecoder
from .transformer import Transformer as trans

import CLIP.clip as Clip

def build_transformer(args):
    return GEN(
        d_model=args.hidden_dim,
        dropout=args.dropout,
        nhead=args.nheads,
        dim_feedforward=args.dim_feedforward,
        num_encoder_layers=args.enc_layers,
        num_dec_layers=args.dec_layers,
        normalize_before=args.pre_norm,
        return_intermediate_dec=True,
    )


class GEN(trans):

    def __init__(self, d_model=512, 
                 nhead=8, 
                 num_encoder_layers=6,
                 num_dec_layers=3, 
                 dim_feedforward=2048, 
                 dropout=0.1,
                 activation="relu", 
                 normalize_before=False,
                 return_intermediate_dec=False):
        super(GEN, self).__init__(d_model, nhead, num_encoder_layers,
                 num_dec_layers, dim_feedforward, dropout, activation, 
                 normalize_before, return_intermediate_dec)
        self.decoder = None
        self.instance_decoder = self.build_decode(d_model, nhead, num_dec_layers, dim_feedforward, dropout, activation, normalize_before, return_intermediate_dec)
        self.interaction_decoder = self.build_decode(d_model, nhead, num_dec_layers, dim_feedforward, dropout, activation, normalize_before, return_intermediate_dec)
        self._reset_parameters()



    def forward(self, srcs, masks, query_embed_h, query_embed_o, pos_guided_embed, pos_embed):
        # flatten NxCxHxW to HWxNxC
        bs, c, h, w = srcs[-1].shape

        src, mask, pos_embed, spatial_shapes = self.encode_input_reshape(srcs, masks, pos_embed)
        memory, memory_attn = self.encode_forward(src, mask, pos_embed, self.encoder) 


        query_embed_o = query_embed_o + pos_guided_embed
        query_embed_h = query_embed_h + pos_guided_embed
        query_embed_o = query_embed_o.unsqueeze(1).repeat(1, bs, 1)
        query_embed_h = query_embed_h.unsqueeze(1).repeat(1, bs, 1)
        ins_query_embed = torch.cat((query_embed_h, query_embed_o), dim=0)
        ins_tgt, ins_query_embed = self.decode_input_reshape(ins_query_embed, bs) 
        ins_hs,  ins_hs_self_attn, ins_hs_cross_attn = self.decode_forward(ins_tgt, memory, mask, ins_query_embed, pos_embed, self.instance_decoder)

        num_queries = query_embed_h.shape[0]        
        h_hs = ins_hs[:, :, :num_queries, :]
        o_hs = ins_hs[:, :, num_queries:, :]
        ins_guided_embed = (h_hs + o_hs) / 2.0
        ins_guided_embed = ins_guided_embed.permute(0, 2, 1, 3)
        inter_tgt, ins_guided_embed = self.decode_input_reshape(ins_guided_embed, bs)
        interaction_decoder_out, interaction_self_attn, interaction_cross_attn = self.decode_forward(inter_tgt, memory, mask, ins_guided_embed, pos_embed, self.interaction_decoder)
        
        memory = memory.permute(1, 2, 0)[:, :, -h*w:].view(bs, c, h, w)
        outputs = {'src' : srcs[-1], 
                   'memory' : memory, 
                   'h_hs' : h_hs, 
                   'o_hs' : o_hs,
                   'inter_hs' : interaction_decoder_out}

        return outputs



