# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name： dab_decoder
Description :
Author : 'li'
date： 2022/7/5
Change Activity:
2022/7/5:
-------------------------------------------------
"""
import torch
from torch import nn

from ...layers.norm import build_norm_layer
from ....algorithms.denoise.denoise import Denoise
from .....dl.misc.transform import inverse_sigmoid
from .dab_decoder_layer import DABDecoderLayer
from .. import MLP
from ...base import BaseModule
from ...position_embedding.gen_position_sine_embed import gen_position_sine_embed


class DABDecoder(BaseModule):
    def __init__(self, num_layers, norm=None, return_intermediate=True, embed_dim=256, num_heads=8, hidden_channels=2048, query_dim=4, keep_query_pos=False,
                 query_scale_type='cond_elewise', dropout_rate=0.1, activation='ReLU', normalize_before=False, add_self_attention_module=True,
                 modulate_hw_attn=False, bbox_embed_diff_each_layer=True, hidden_dim=256, num_classes=10):
        super().__init__()
        self.num_heads = num_heads
        self.dropout_rate = dropout_rate
        self.embed_dim = embed_dim
        self.hidden_channels = hidden_channels
        self.activation = activation
        self.num_layers = num_layers
        self.keep_query_pos = keep_query_pos
        self.normalize_before = normalize_before
        self.add_self_attention_module = add_self_attention_module
        self.modulate_hw_attn = modulate_hw_attn
        self.bbox_embed_diff_each_layer = bbox_embed_diff_each_layer
        self.layers = self._build_layers()
        self.norm = build_norm_layer(norm)
        self.return_intermediate = return_intermediate
        assert self.return_intermediate is True
        self.query_dim = query_dim
        assert query_scale_type in ['cond_elewise', 'cond_scalar', 'fix_elewise']
        self.query_scale_type = query_scale_type
        if query_scale_type == 'cond_elewise':
            self.query_scale = MLP(embed_dim, embed_dim, embed_dim, 2)
        elif query_scale_type == 'cond_scalar':
            self.query_scale = MLP(embed_dim, embed_dim, 1, 2)
        elif query_scale_type == 'fix_elewise':
            self.query_scale = nn.Embedding(num_layers, embed_dim)
        else:
            raise NotImplementedError("Unknown query_scale_type: {}".format(query_scale_type))
        self.reference_point_head = MLP(query_dim // 2 * embed_dim, embed_dim, embed_dim, 2)
        self.bbox_embed = None
        if modulate_hw_attn:
            self.ref_anchor_head = MLP(embed_dim, embed_dim, 2, 2)
        if not keep_query_pos:
            for layer_id in range(num_layers - 1):
                self.layers[layer_id + 1].ca_qpos_proj = None
        if bbox_embed_diff_each_layer:  # generate bbox embedding.
            self.bbox_embed = nn.ModuleList([MLP(hidden_dim, hidden_dim, 4, 3) for _ in range(self.num_layers)])
        else:
            self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
        self.class_embed = nn.Linear(hidden_dim, num_classes)

    def _build_layers(self):
        layers = nn.ModuleList()
        for i in range(self.num_layers):
            _layer = DABDecoderLayer(embed_dim=self.embed_dim, num_heads=self.num_heads, hidden_channels=self.hidden_channels, dropout_rate=self.dropout_rate,
                                     activation=self.activation, normalize_before=False, keep_query_pos=False, add_self_attention_module=True)
            layers.append(_layer)
        return layers

    def forward(self, label_query, memory, pos, attention_mask, feature_padding_mask, tgt_key_padding_mask=None,
                memory_key_padding_mask=None, ref_points_unsigmoid=None, mask_dict=None):
        """

        Args:
            mask_dict:
            label_query:
            memory:
            pos:
            attention_mask:
            feature_padding_mask:
            tgt_key_padding_mask:
            memory_key_padding_mask:
            ref_points_unsigmoid: bbox embedding. (L,B,4)

        Returns:

        """
        output = label_query
        intermediate = []
        reference_points = ref_points_unsigmoid.sigmoid()
        ref_points = [reference_points]
        for layer_id, layer in enumerate(self.layers):
            object_center = reference_points[..., :self.query_dim]
            bbox_sine_pos_embed = gen_position_sine_embed(object_center)
            query_pos = self.reference_point_head(bbox_sine_pos_embed)
            if self.query_scale_type != 'fix_elewise':
                if layer_id == 0:
                    pos_transformation = 1
                else:
                    pos_transformation = self.query_scale(output)
            else:
                pos_transformation = self.query_scale.weight[layer_id]
            query_sine_embed = bbox_sine_pos_embed[..., :self.embed_dim] * pos_transformation
            if self.modulate_hw_attn:
                ref_hw_cond = self.ref_anchor_head(output).sigmoid()
                query_sine_embed[..., self.embed_dim // 2:] *= (ref_hw_cond[..., 0] / object_center[..., 2]).unsqueeze(-1)
                query_sine_embed[..., :self.embed_dim // 2] *= (ref_hw_cond[..., 1] / object_center[..., 3]).unsqueeze(-1)
                # label_query, feature, attention_mask, feature_padding_mask, pos, bbox_query, label_query_key_padding_mask = None,
                # feature_key_padding_mask = None, query_sine_embed = None,
                # is_first = False):
            output = layer(label_query=label_query, feature=memory, attention_mask=attention_mask,
                           feature_padding_mask=feature_padding_mask, pos=pos,
                           label_query_key_padding_mask=tgt_key_padding_mask,
                           feature_key_padding_mask=memory_key_padding_mask,
                           query_pos=query_pos, query_sine_embed=query_sine_embed,
                           is_first=(layer_id == 0))
            if self.bbox_embed is not None:
                if self.bbox_embed_diff_each_layer:
                    tmp = self.bbox_embed[layer_id](output)
                else:
                    tmp = self.bbox_embed(output)
                # import ipdb; ipdb.set_trace()
                tmp[..., :self.query_dim] += inverse_sigmoid(reference_points)
                new_reference_points = tmp[..., :self.query_dim].sigmoid()
                if layer_id != self.num_layers - 1:
                    ref_points.append(new_reference_points)
                reference_points = new_reference_points.detach()
            if self.return_intermediate:
                intermediate.append(output)
        if self.norm is not None:
            output = self.norm(output)
            if self.return_intermediate:
                intermediate.pop()
                intermediate.append(output)
        if self.return_intermediate:
            if self.bbox_embed is not None:
                hs, ref_points = torch.stack(intermediate).transpose(1, 2), torch.stack(ref_points).transpose(1, 2)
                return self._decoder_post_process(hs, ref_points, mask_dict)
            else:
                hs, ref_points = torch.stack(intermediate).transpose(1, 2), reference_points.unsqueeze(0).transpose(1, 2)
                return self._decoder_post_process(hs, ref_points, mask_dict)
        return output.unsqueeze(0)

    def _decoder_post_process(self, hs, reference, mask_dict):
        """

        Args:
            hs:
            reference:

        Returns:

        """
        if not self.bbox_embed_diff_each_layer:
            reference_before_sigmoid = inverse_sigmoid(reference)
            tmp = self.bbox_embed(hs)
            tmp[..., :self.query_dim] += reference_before_sigmoid
            outputs_coord = tmp.sigmoid()
        else:
            reference_before_sigmoid = inverse_sigmoid(reference)
            outputs_coords = []
            for lvl in range(hs.shape[0]):
                tmp = self.bbox_embed[lvl](hs[lvl])
                tmp[..., :self.query_dim] += reference_before_sigmoid[lvl]
                outputs_coord = tmp.sigmoid()
                outputs_coords.append(outputs_coord)
            outputs_coord = torch.stack(outputs_coords)
        outputs_class = self.class_embed(hs)
        # dn post process
        outputs_class, outputs_coord = Denoise.dn_post_process(outputs_class, outputs_coord, mask_dict)
        out = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord[-1],
               'aux_outputs': self._set_aux_loss(outputs_class, outputs_coord)}
        # if self.aux_loss:
        return out, mask_dict

    @staticmethod
    def _set_aux_loss(outputs_class, outputs_coord):
        # this is a workaround to make torchscript happy, as torchscript
        # doesn't support dictionary with non-homogeneous values, such
        # as a dict having both a Tensor and a list.
        return [{'pred_logits': a, 'pred_boxes': b}
                for a, b in zip(outputs_class[:-1], outputs_coord[:-1])]
