# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name： dab_decoder_layer
Description :
Author : 'li'
date： 2022/7/5
Change Activity:
2022/7/5:
-------------------------------------------------
"""
import torch
from torch import nn

from ml.dl.model.base import BaseModule
from ml.dl.model.layers.activation import build_activation_layer
from ml.dl.model.modules.multi_attention.dab_multi_head_attention import DABMultiHeadAttention


class DABDecoderLayer(BaseModule):
    def __init__(self, embed_dim=256, num_heads=6, hidden_channels=2048, dropout_rate=0.1, activation='ReLU', normalize_before=False, keep_query_pos=False,
                 add_self_attention_module=True):
        """
        Args:
            embed_dim:
            num_heads:
            hidden_channels:
            dropout_rate:
            activation:
            normalize_before:
            keep_query_pos:
            add_self_attention_module:
        """
        super().__init__()
        self.add_self_attention_module = add_self_attention_module
        if add_self_attention_module:  # self attention
            self.self_attention_query_content_projection = nn.Linear(embed_dim, embed_dim)
            self.self_attention_query_pos_projection = nn.Linear(embed_dim, embed_dim)
            self.self_attention_key_pos_projection = nn.Linear(embed_dim, embed_dim)
            self.self_attention_key_content_projection = nn.Linear(embed_dim, embed_dim)
            self.self_attention_value_projection = nn.Linear(embed_dim, embed_dim)
            self.self_attn = DABMultiHeadAttention(embed_dim, num_heads, dropout=dropout_rate)
            self.norm1 = nn.LayerNorm(embed_dim)
            self.dropout1 = nn.Dropout(dropout_rate)
        # cross attention
        self.cross_attention_query_content_projection = nn.Linear(embed_dim, embed_dim)
        self.cross_attention_query_pos_projection = nn.Linear(embed_dim, embed_dim)
        self.cross_attention_key_content_projection = nn.Linear(embed_dim, embed_dim)
        self.cross_attention_key_pos_projection = nn.Linear(embed_dim, embed_dim)
        self.cross_attention_value_projection = nn.Linear(embed_dim, embed_dim)
        self.cross_attn = DABMultiHeadAttention(embed_dim * 2, num_heads, dropout=dropout_rate, value_dim=embed_dim)
        self.num_heads = num_heads
        self.cross_attention_query_pos_sine_projection = nn.Linear(embed_dim, embed_dim)
        # Feedforward model
        self.linear1 = nn.Linear(embed_dim, hidden_channels)
        self.dropout = nn.Dropout(dropout_rate)
        self.linear2 = nn.Linear(hidden_channels, embed_dim)
        self.norm2 = nn.LayerNorm(embed_dim)
        self.norm3 = nn.LayerNorm(embed_dim)
        self.dropout2 = nn.Dropout(dropout_rate)
        self.dropout3 = nn.Dropout(dropout_rate)
        self.activation = build_activation_layer(activation)
        self.normalize_before = normalize_before
        self.keep_query_pos = keep_query_pos

    def _self_attention(self, label_query, attention_mask, label_query_key_padding_mask, query_pos):
        """

        Args:
            label_query:
            attention_mask:
            label_query_key_padding_mask:
            query_pos: generate from bbox embedding.

        Returns:

        """
        label_query_content = self.self_attention_query_content_projection(label_query)
        bbox_query_content = self.self_attention_query_pos_projection(query_pos)
        label_key_content = self.self_attention_key_content_projection(label_query)
        bbox_key_content = self.self_attention_key_pos_projection(query_pos)
        v = self.self_attention_value_projection(label_query)
        q = label_query_content + bbox_query_content
        k = label_key_content + bbox_key_content
        self_attention_query = self.self_attn(q, k, v, key_padding_mask=label_query_key_padding_mask, attention_mask=attention_mask)[0]
        label_query = label_query + self.dropout1(self_attention_query)
        label_query = self.norm1(label_query)
        return label_query

    def forward(self, label_query, feature, attention_mask, feature_padding_mask, pos, label_query_key_padding_mask=None,
                feature_key_padding_mask=None, query_sine_embed=None, query_pos=None, is_first=False):
        """

        Args:
            query_pos:
            query_sine_embed:
            is_first:
            label_query: num_queries x batch_size x 256
            feature:
            attention_mask:
            feature_padding_mask:
            pos:
            label_query_key_padding_mask:
            feature_key_padding_mask:

        Returns:

        """
        if self.add_self_attention_module:
            # Start of self-attention.
            label_query = self._self_attention(label_query, attention_mask, label_query_key_padding_mask, query_pos)
        label_query_content = self.cross_attention_query_content_projection(label_query)
        key_content = self.cross_attention_key_content_projection(feature)
        v = self.cross_attention_value_projection(feature)

        num_queries, batch_size, embed_dim = label_query_content.shape
        hw, _, _ = key_content.shape
        key_pos = self.cross_attention_key_pos_projection(pos)
        if is_first or self.keep_query_pos:
            query_pos = self.cross_attention_query_pos_projection(query_pos)
            q = query_pos + label_query_content
            k = key_pos + key_content
        else:
            q = label_query_content
            k = key_content
        q = q.view(num_queries, batch_size, self.num_heads, embed_dim // self.num_heads)
        query_sine_embed = self.cross_attention_query_pos_sine_projection(query_sine_embed)
        query_sine_embed = query_sine_embed.view(num_queries, batch_size, self.num_heads, embed_dim // self.num_heads)
        q = torch.cat([q, query_sine_embed], dim=3).view(num_queries, batch_size, embed_dim * 2)
        k = k.view(hw, batch_size, self.num_heads, embed_dim // self.num_heads)
        k_pos = key_pos.view(hw, batch_size, self.num_heads, embed_dim // self.num_heads)
        k = torch.cat([k, k_pos], dim=3).view(hw, batch_size, embed_dim * 2)
        reverse_padding_mask = ~feature_key_padding_mask
        tmp_query = self.cross_attn(q, k, v, attention_mask=feature_padding_mask, key_padding_mask=reverse_padding_mask)[0]  # cross attention
        label_query = label_query + self.dropout2(tmp_query)
        label_query = self.norm2(label_query)
        tgt2 = self.linear2(self.dropout(self.activation(self.linear1(label_query))))
        label_query = label_query + self.dropout3(tgt2)
        label_query = self.norm3(label_query)
        return label_query
