import torch
from torch import nn
import torch.nn as nn


from .module.transformerencoder import TransformerEncoderLayer, TransformerEncoder
from .module.transformerdecoder import TransformerDecoderLayer, TransformerDecoder, DecoderEmbeddings

def build_transformer(args):
    return Transformer(
        d_model=args.hidden_dim,
        dropout=args.dropout,
        nhead=args.nheads,
        dim_feedforward=args.dim_feedforward,
        num_encoder_layers=args.enc_layers,
        num_decoder_layers=args.dec_layers,
        normalize_before=args.pre_norm,
        return_intermediate_dec=True,)


class Transformer(nn.Module):

    def __init__(self, d_model=512, nhead=8, num_encoder_layers=6,
                 num_decoder_layers=6, dim_feedforward=2048, dropout=0.1,
                 activation="relu", normalize_before=False,
                 return_intermediate_dec=False):
        super().__init__()
        self.embedding = DecoderEmbeddings(2003, d_model, 2000, 501, dropout)
        encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward,
                                                dropout, activation, normalize_before)
        encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
        self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)

        decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward,
                                                dropout, activation, normalize_before)
        decoder_norm = nn.LayerNorm(d_model)
        self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm,
                                          return_intermediate=return_intermediate_dec)

        self._reset_parameters()
#        self.token_drop = SpatialDropout(drop=0.5)
        self.d_model = d_model
        self.nhead = nhead

    def _reset_parameters(self):
        for p in self.parameters():
            if p.dim() > 1:
                nn.init.xavier_uniform_(p)

    def forward(self, src, mask, query_embed, pos_embed, seq, vocab_embed):
        # flatten NxCxHxW to HWxNxC
        bs, c, h, w = src.shape
        src = src.flatten(2).permute(2, 0, 1)
        pos_embed = pos_embed.flatten(2).permute(2, 0, 1)
        mask = mask.flatten(1)
        tgt = self.embedding(seq).permute(1, 0, 2)
#        tgt = self.token_drop(self.embedding(seq), noise_shape=(bs, 501, 1)).permute(1, 0, 2)
        query_embed = self.embedding.position_embeddings.weight.unsqueeze(1)
        query_embed = query_embed.repeat(1, bs, 1)
        memory, memory_attn = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed.half())

        if self.training:
           hs, self_attn, cross_attn = self.decoder(tgt, memory, memory_key_padding_mask=mask,
                                                    pos=pos_embed, query_pos=query_embed[:len(tgt)],
                                                    tgt_mask=generate_square_subsequent_mask(len(tgt)).to(tgt.device))
           return hs.transpose(1, 2), memory.permute(1, 2, 0).view(bs, c, h, w)
        else:
           values = []
           cls_pos = [(i + 1) * 5 - 1 for i in range(100)]
           box_pos = [i for i in range(500) if i not in cls_pos] 
           for i in range(500):

               tgt = self.embedding(seq).permute(1, 0, 2)
               query_embed = self.embedding.position_embeddings.weight.unsqueeze(1)
               query_embed = query_embed.repeat(1, bs, 1)
               hs, self_attn, cross_attn = self.decoder(tgt, memory, memory_key_padding_mask=mask,
                                                        pos=pos_embed, query_pos=query_embed[:len(tgt)],
                                                        tgt_mask=generate_square_subsequent_mask(len(tgt)).to(tgt.device))
               out = vocab_embed(hs.transpose(1, 2)[-1, :, -1, :])
               out = out.softmax(-1)
               out = out[:, :1997]
               if i in box_pos:
#                  pdb.set_trace()
                  out = out[:, :1000]
#               if i in [0, 1, 2, 3]:
#                   out = out[:, :1000]
               value, extra_seq = out.topk(dim=-1, k=1)[0], out.topk(dim=-1, k=1)[1]
               seq = torch.cat([seq, extra_seq], dim=-1)
               values.append(value)
#           seq[:, -1] = 2000
           return seq, torch.cat(values, dim=-1)



def generate_square_subsequent_mask(sz):
    r"""Generate a square mask for the sequence. The masked positions are filled with float('-inf').
        Unmasked positions are filled with float(0.0).
    """
    mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
    mask = mask.float().masked_fill(mask == 0, float(
        '-inf')).masked_fill(mask == 1, float(0.0))
    return mask