# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name： dab_detr
Description :
Author : 'li'
date： 2022/6/19
Change Activity:
2022/6/19:
-------------------------------------------------
"""
import torch
from torch import nn
import torch.nn.functional as tf

from .. import build_backbone
from ..transformer import build_transformer
from ...algorithms.denoise.denoise import Denoise
from ...misc.transform import inverse_sigmoid
from ...model.base.base_detector import BaseDetector
from ...model.position_embedding import build_positional_embedding
from ...misc.concat_images_tensor import ConcatImageTensor


class DabDETR(BaseDetector):

    def __init__(self,
                 backbone_cfg=None,
                 positional_embedding_cfg=None,
                 transformer_cfg=None,
                 denoise_cfg=None,
                 num_queries=300,
                 query_dim=4,
                 hidden_dim=256,
                 num_classes=91,
                 backbone_out_channels=256,
                 random_ref_points_xy=False,
                 bbox_embed_diff_each_layer=False,
                 ):
        super().__init__()
        # save config
        self.backbone_cfg = backbone_cfg
        self.positional_embedding_cfg = positional_embedding_cfg
        self.denoise_cfg = denoise_cfg
        # build
        self.backbone = build_backbone(self.backbone_cfg)
        self.positional_embedding = build_positional_embedding(positional_embedding_cfg)
        self.feature_projection = nn.Conv2d(backbone_out_channels, hidden_dim, kernel_size=1)
        assert query_dim in (2, 4)
        self.ref_points_embedding = nn.Embedding(num_queries, query_dim)  # (dx,dy,dw,dh) points embedding.
        self.label_embedding = nn.Embedding(num_classes + 1, hidden_dim - 1)  # label embedding.
        if random_ref_points_xy:  # init weight
            self.ref_points_embedding.weight.data[:, :2].uniform_(0, 1)
            self.ref_points_embedding.weight.data[:, :2] = inverse_sigmoid(self.ref_points_embedding.weight.data[:, :2])
            self.ref_points_embedding.weight.data[:, :2].requires_grad = False

        self.denoise = Denoise(**self.denoise_cfg)
        self.transformer = build_transformer(transformer_cfg)

    def extract_feat(self, x: ConcatImageTensor):
        """99
        extract feats
        Args:
            x:

        Returns:

        """
        images = x.images_tensor
        feature_lst = self.backbone(images)  # feature list
        out = []
        for feature in feature_lst:
            m = x.mask_tensor  # original mask
            assert m is not None
            mask_shape = feature.shape[-2:]
            mask = tf.interpolate(m[None].float(), size=mask_shape).to(torch.bool)[0]
            concat_tensor = ConcatImageTensor(feature, masks=mask)
            out.append(concat_tensor)
        return out

    def _gen_position_list(self, concat_feature_lst):
        """

        Args:
            concat_feature_lst:

        Returns:

        """
        pos_lst = []
        for concat_feature in concat_feature_lst:
            pos = self.positional_embedding(concat_feature.images_tensor, concat_feature.mask_tensor)
            pos_lst.append(pos)
        return pos_lst

    def forward(self, x, img_metas=None):
        feature_lst = self.extract_feat(x)
        pos_embedding_lst = self._gen_position_list(feature_lst)  # position embedding
        input_query_label, input_query_bbox, attention_mask, mask_dict = self.denoise(img_metas, self.ref_points_embedding.weight, self.label_embedding)
        src, mask = feature_lst[-1].decompose()
        out, mask_dict = self.transformer(self.feature_projection(src), mask,
                                          input_query_bbox,
                                          pos_embedding_lst[-1],
                                          label_query=input_query_label,
                                          attn_mask=attention_mask,
                                          mask_dict=mask_dict)

        return out, mask_dict
