# ------------------------------------------------------------------------
# Conditional DETR Transformer class.
# Copyright (c) 2021 Microsoft. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# ------------------------------------------------------------------------

import math
import copy
from typing import Optional, List

import torch
import torch.nn.functional as F
from torch import nn, Tensor

from .module.anchor_transformerencoder import TransformerEncoderLayerSpatial, TransformerEncoderLayerLevel
from .module.anchor_transformerdecoder import TransformerDecoderLayer
from .module.utils import _get_clones
from models.module.modules import MLP

from util.misc import (inverse_sigmoid)


def build_transformer(args):
    return Transformer(
        d_model=args.hidden_dim,
        nhead=args.nheads,
        num_encoder_layers=args.enc_layers,
        num_decoder_layers=args.dec_layers,
        dim_feedforward=args.dim_feedforward,
        dropout=args.dropout,
        activation="relu",
        num_feature_levels=args.num_feature_levels,
        num_query_position=args.num_query_position,
        num_query_pattern=args.num_query_pattern,
        spatial_prior=args.spatial_prior,
        attention_type=args.attention_type,)




class Transformer(nn.Module):
    def __init__(self, d_model=256, nhead=8,
                 num_encoder_layers=6, num_decoder_layers=6, dim_feedforward=1024, dropout=0.,
                 activation="relu", num_feature_levels=3,num_query_position = 300,num_query_pattern=3,
                 spatial_prior="learned",attention_type="RCDA", num_classes=91, num_verb_classes=0):
        super().__init__()

        self.d_model = d_model
        self.nhead = nhead
        self.attention_type = attention_type
        self.num_layers = num_decoder_layers
        
        self.encoder_layers, self.encoder_layers_level = self.build_encode(self, d_model, nhead, num_encoder_layers, dim_feedforward, dropout, activation)
        self.decoder_layers = self.build_decode(self, d_model, nhead, num_decoder_layers, dim_feedforward, dropout, activation, num_feature_levels)
        
        if num_feature_levels == 1:
            self.num_encoder_layers_level = 0
        else:
            self.num_encoder_layers_level = num_encoder_layers // 2
        self.num_encoder_layers_spatial = num_encoder_layers - self.num_encoder_layers_level

        self.spatial_prior=spatial_prior

        if num_feature_levels>1:
            self.level_embed = nn.Embedding(num_feature_levels, d_model)
        self.num_pattern = num_query_pattern
        self.pattern = nn.Embedding(self.num_pattern, d_model)

        self.num_position = num_query_position
        if self.spatial_prior == "learned":
            self.position = nn.Embedding(self.num_position, 2)
            nn.init.uniform_(self.position.weight.data, 0, 1)

        self.adapt_pos2d = nn.Sequential(
            nn.Linear(d_model, d_model),
            nn.ReLU(),
            nn.Linear(d_model, d_model),)
        self.adapt_pos1d = nn.Sequential(
            nn.Linear(d_model, d_model),
            nn.ReLU(),
            nn.Linear(d_model, d_model),)
        
        self.build_head(self, num_classes, num_verb_classes)


    def build_encode(self, d_model, nhead, num_encoder_layers, dim_feedforward, dropout, activation):
        encoder_layer = TransformerEncoderLayerSpatial(d_model, dim_feedforward, dropout, activation, nhead, self.attention_type)
        encoder_layers = _get_clones(encoder_layer, self.num_encoder_layers_spatial)
        encoder_layer_level = TransformerEncoderLayerLevel(d_model, dim_feedforward, dropout, activation, nhead)
        encoder_layers_level = _get_clones(encoder_layer_level, self.num_encoder_layers_level)
        return encoder_layers, encoder_layers_level

    def build_decode(self, d_model, nhead, num_decoder_layers, dim_feedforward, dropout, activation, num_feature_levels):
        decoder_layer = TransformerDecoderLayer(d_model, dim_feedforward, dropout, activation, nhead, num_feature_levels, self.attention_type)
        decoder_layers = _get_clones(decoder_layer, num_decoder_layers)
        return decoder_layers

    def build_head(self, num_classes, num_verb_classes):
        d_model = self.d_model
        self.class_embed = nn.Linear(d_model, num_classes)
        num_pred = self.num_layers
        prior_prob = 0.01
        bias_value = -math.log((1 - prior_prob) / prior_prob)
        self.class_embed.bias.data = torch.ones(num_classes) * bias_value

        self.bbox_embed = MLP(d_model, d_model, 4, 3)
        nn.init.constant_(self.bbox_embed.layers[-1].weight.data, 0)
        nn.init.constant_(self.bbox_embed.layers[-1].bias.data, 0)
        nn.init.constant_(self.bbox_embed.layers[-1].bias.data[2:], -2.0)
        self.class_embed = nn.ModuleList([self.class_embed for _ in range(num_pred)])
        self.bbox_embed = nn.ModuleList([self.bbox_embed for _ in range(num_pred)])



    def encode_input_reshape(self, srcs, masks):
        src_flatten = []
        mask_flatten = []
        spatial_shapes = []
        lvl_pos_embed_flatten = []
        for lvl, (src, mask) in enumerate(zip(srcs, masks)):
            #   N, 256, 35, 35
            bs, l, c, h, w = srcs.shape
            spatial_shape = (h, w)
            spatial_shapes.append(spatial_shape)
            #   35x35, N, 256
            src = src.flatten(2).permute(2, 0, 1)  
            #   N, 35x35
            mask = mask.unsqueeze(1).repeat(1,l,1,1).reshape(bs*l,h,w)
            src_flatten.append(src)
            mask_flatten.append(mask)
            
        src_flatten = torch.cat(src_flatten, 0)
        mask_flatten = torch.cat(mask_flatten, 0)
        spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=src_flatten.device)

        pos_col, pos_row = mask2pos(mask)
        if self.attention_type=="RCDA":
            posemb_row = self.adapt_pos1d(pos2posemb1d(pos_row))
            posemb_col = self.adapt_pos1d(pos2posemb1d(pos_col))
            posemb_2d = None
        else:
            pos_2d = torch.cat([pos_row.unsqueeze(1).repeat(1, h, 1).unsqueeze(-1), pos_col.unsqueeze(2).repeat(1, 1, w).unsqueeze(-1)],dim=-1)
            posemb_2d = self.adapt_pos2d(pos2posemb2d(pos_2d))
            posemb_row = posemb_col = None

        return src_flatten, mask_flatten, spatial_shapes, posemb_row, posemb_col, posemb_2d

    def encode_forward(self, src, mask, posemb_row, posemb_col, posemb_2d, encoder_layers, encoder_layers_level):
        bs, l, c, h, w = src.shape
        outputs = src.reshape(bs * l, c, h, w)
        for idx in range(len(self.encoder_layers)):
            outputs = encoder_layers[idx](outputs, mask, posemb_row, posemb_col,posemb_2d)
            if idx < self.num_encoder_layers_level:
                outputs = encoder_layers_level[idx](outputs, level_emb=self.level_embed.weight.unsqueeze(1).unsqueeze(0).repeat(bs,1,1,1).reshape(bs*l,1,c))
        memory = outputs.reshape(bs, l, c, h, w)
        return memory

    def decode_input_reshape(self, c, bs):
        tgt = self.pattern.weight.reshape(1, self.num_pattern, 1, c).repeat(bs, 1, self.num_position, 1).reshape(bs, self.num_pattern * self.num_position, c)
        
        if self.spatial_prior == "learned":
            reference_points = self.position.weight.unsqueeze(0).repeat(bs, self.num_pattern, 1)
        elif self.spatial_prior == "grid":
            nx=ny=round(math.sqrt(self.num_position))
            self.num_position=nx*ny
            x = (torch.arange(nx) + 0.5) / nx
            y = (torch.arange(ny) + 0.5) / ny
            xy=torch.meshgrid(x,y)
            reference_points=torch.cat([xy[0].reshape(-1)[...,None],xy[1].reshape(-1)[...,None]],-1).cuda()
            reference_points = reference_points.unsqueeze(0).repeat(bs, self.num_pattern, 1)
        else:
            raise ValueError(f'unknown {self.spatial_prior} spatial prior')
        return tgt, reference_points


    def decode_forward(self, output, memory, mask, reference_points, posemb_row, posemb_col, posemb_2d, decoder_layers):
        outputs_classes, outputs_coords = [], []
        for lid, layer in enumerate(decoder_layers):
            output = layer(output, reference_points, memory, mask, adapt_pos2d=self.adapt_pos2d, adapt_pos1d=self.adapt_pos1d, 
                           posemb_row=posemb_row, posemb_col=posemb_col,posemb_2d=posemb_2d)
            reference = inverse_sigmoid(reference_points)
            outputs_class = self.class_embed[lid](output)
            tmp = self.bbox_embed[lid](output)
            if reference.shape[-1] == 4:
                tmp += reference
            else:
                assert reference.shape[-1] == 2
                tmp[..., :2] += reference
            outputs_coord = tmp.sigmoid()
            outputs_classes.append(outputs_class[None,])
            outputs_coords.append(outputs_coord[None,])
        outputs_class = torch.cat(outputs_classes, dim=0)
        outputs_coord = torch.cat(outputs_coords, dim=0)
        return outputs_class, outputs_coord


    def forward(self, srcs, masks):
        # prepare input for decoder
        bs, l, c, h, w = srcs[-1].shape
        src, mask, spatial_shapes, posemb_row, posemb_col, posemb_2d = self.encode_input_reshape(srcs, masks)
        memory = self.encode_forward(src, mask, 
                                     posemb_row, posemb_col, posemb_2d, self.encoder_layers, self.encoder_layers_level)
        tgt, reference_points = self.decode_input_reshape(c, bs)
        outputs_class, outputs_coord = self.decode_forward(tgt, memory, mask, reference_points, 
                                                           posemb_row, posemb_col, posemb_2d, self.decoder_layers)

        outputs = {'outputs_class':outputs_class,
                   'outputs_coord':outputs_coord}
        return outputs




def pos2posemb2d(pos, num_pos_feats=128, temperature=10000):
    scale = 2 * math.pi
    pos = pos * scale
    dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos.device)
    dim_t = temperature ** (2 * (dim_t // 2) / num_pos_feats)
    pos_x = pos[..., 0, None] / dim_t
    pos_y = pos[..., 1, None] / dim_t
    pos_x = torch.stack((pos_x[..., 0::2].sin(), 
                         pos_x[..., 1::2].cos()), dim=-1).flatten(-2)
    pos_y = torch.stack((pos_y[..., 0::2].sin(), 
                         pos_y[..., 1::2].cos()), dim=-1).flatten(-2)
    posemb = torch.cat((pos_y, pos_x), dim=-1)
    return posemb


def pos2posemb1d(pos, num_pos_feats=256, temperature=10000):
    scale = 2 * math.pi
    pos = pos * scale
    dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos.device)
    dim_t = temperature ** (2 * (dim_t // 2) / num_pos_feats)
    pos_x = pos[..., None] / dim_t
    posemb = torch.stack((pos_x[..., 0::2].sin(), 
                          pos_x[..., 1::2].cos()), dim=-1).flatten(-2)
    return posemb


def mask2pos(mask):
    not_mask = ~mask
    y_embed = not_mask[:, :, 0].cumsum(1, dtype=torch.float32)
    x_embed = not_mask[:, 0, :].cumsum(1, dtype=torch.float32)
    y_embed = (y_embed - 0.5) / y_embed[:, -1:]
    x_embed = (x_embed - 0.5) / x_embed[:, -1:]
    return y_embed, x_embed
