# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name： dab_multi_head_attention
Description :
Author : 'li'
date： 2022/7/5
Change Activity:
2022/7/5:
-------------------------------------------------
"""
import warnings
from typing import Optional

import torch
from torch import nn, Tensor
import torch.nn.functional as tf
from torch.nn.init import constant_

from ml.dl.model.base import BaseModule


class DABMultiHeadAttention(BaseModule):
    bias_k: Optional[torch.Tensor]
    bias_v: Optional[torch.Tensor]

    def __init__(self, embed_dim, num_heads, dropout=0, add_zero_attention=False, key_dim=None, value_dim=None):
        super(DABMultiHeadAttention, self).__init__()
        self.embed_dim = embed_dim
        self.key_dim = key_dim if key_dim is not None else embed_dim
        self.value_dim = value_dim if value_dim is not None else embed_dim
        self.is_qkv_same_dim = ((self.key_dim == embed_dim) and (self.value_dim == embed_dim))

        self.num_heads = num_heads
        self.dropout = dropout
        self.head_dim = embed_dim // num_heads
        assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
        self.out_proj = nn.Linear(self.value_dim, self.value_dim)

        self.in_proj_bias = None
        self.in_proj_weight = None
        self.bias_k = self.bias_v = None
        self.q_proj_weight = None
        self.k_proj_weight = None
        self.v_proj_weight = None
        self.add_zero_attention = add_zero_attention
        self._reset_parameters()

    def _reset_parameters(self):
        constant_(self.out_proj.bias, 0.)

    def forward(self, query, key, value, key_padding_mask=None, need_weights=True, attention_mask=None):
        """

        Args:
            query:`(L, N, E)` where L is the target sequence length, N is the batch size, E is the embedding dimension.
            key:
            value:
            key_padding_mask: if provided, specified padding elements in the key will be ignored by the attention. When given a binary mask and a value is True,
            the corresponding value on the attention layer will be ignored. When given a byte mask and a value is non-zero, the corresponding value on the attention
            layer will be ignored
            need_weights:
            attention_mask:

        Returns:

        """
        if not self.is_qkv_same_dim:
            return self.multi_head_attention_forward(
                query, key, value, self.embed_dim, self.num_heads,
                self.in_proj_weight, self.in_proj_bias,
                self.bias_k, self.bias_v, self.add_zero_attention,
                self.dropout, self.out_proj.weight, self.out_proj.bias,
                training=self.training,
                key_padding_mask=key_padding_mask, need_weights=need_weights,
                attn_mask=attention_mask, use_separate_proj_weight=True,
                q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
                v_proj_weight=self.v_proj_weight, out_dim=self.value_dim)
        else:  # same size of qkv.
            return self.multi_head_attention_forward(
                query, key, value, self.embed_dim, self.num_heads,
                self.in_proj_weight, self.in_proj_bias,
                self.bias_k, self.bias_v, self.add_zero_attention,
                self.dropout, self.out_proj.weight, self.out_proj.bias,
                training=self.training,
                key_padding_mask=key_padding_mask, need_weights=need_weights,
                attn_mask=attention_mask, out_dim=self.value_dim)

    @staticmethod
    def _check_attention_mask(attn_mask, query, key, batch_size, num_heads):
        """

        Args:
            attn_mask:

        Returns:

        """
        if attn_mask is not None:
            assert attn_mask.dtype == torch.float32 or attn_mask.dtype == torch.float64 or attn_mask.dtype == torch.float16 or attn_mask.dtype == torch.uint8 or \
                   attn_mask.dtype == torch.bool, 'Only float, byte, and bool types are supported for attn_mask, not {}'.format(attn_mask.dtype)
            if attn_mask.dtype == torch.uint8:
                warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
                attn_mask = attn_mask.to(torch.bool)
            if attn_mask.dim() == 2:
                attn_mask = attn_mask.unsqueeze(0)  # (h,w)==>(1,h,w)
                if list(attn_mask.size()) != [1, query.size(0), key.size(0)]:
                    raise RuntimeError('The size of the 2D attn_mask is not correct.')
            elif attn_mask.dim() == 3:
                if list(attn_mask.size()) != [batch_size * num_heads, query.size(0), key.size(0)]:
                    raise RuntimeError('The size of the 3D attn_mask is not correct.')
            else:
                raise RuntimeError("attn_mask's dimension {} is not supported".format(attn_mask.dim()))
        return attn_mask

    @staticmethod
    def multi_head_attention_forward(query: Tensor, key: Tensor, value: Tensor, embed_dim_to_check: int, num_heads: int, in_proj_weight: Tensor, in_proj_bias: Tensor,
                                     bias_k: Optional[Tensor], bias_v: Optional[Tensor], add_zero_attn: bool, dropout_p: float, out_proj_weight: Tensor,
                                     out_proj_bias: Tensor, training: bool = True, key_padding_mask: Optional[Tensor] = None, need_weights: bool = True,
                                     attn_mask: Optional[Tensor] = None, use_separate_proj_weight: bool = False, q_proj_weight: Optional[Tensor] = None,
                                     k_proj_weight: Optional[Tensor] = None, v_proj_weight: Optional[Tensor] = None, static_k: Optional[Tensor] = None,
                                     static_v: Optional[Tensor] = None, out_dim: Optional[Tensor] = None):
        """

        Args:
            query:
            key:
            value:
            embed_dim_to_check:
            num_heads:
            in_proj_weight:
            in_proj_bias:
            bias_k:
            bias_v:
            add_zero_attn:
            dropout_p:
            out_proj_weight:
            out_proj_bias:
            training:
            key_padding_mask:
            need_weights:
            attn_mask:
            use_separate_proj_weight:
            q_proj_weight:
            k_proj_weight:
            v_proj_weight:
            static_k:
            static_v:
            out_dim:

        Returns:

        """
        seq_len, batch_size, embed_dim = query.size()
        assert key.size(0) == value.size(0) and key.size(1) == value.size(1)
        head_dim = embed_dim // num_heads
        value_head_dim = out_dim // num_heads
        assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
        scaling = float(head_dim) ** -0.5
        q = query * scaling
        k = key
        v = value
        attn_mask = DABMultiHeadAttention._check_attention_mask(attn_mask, query, key, batch_size, num_heads)
        if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8:
            warnings.warn("Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
            key_padding_mask = key_padding_mask.to(torch.bool)

        if bias_k is not None and bias_v is not None:
            if static_k is None and static_v is None:
                k = torch.cat([k, bias_k.repeat(1, batch_size, 1)])
                v = torch.cat([v, bias_v.repeat(1, batch_size, 1)])
                if attn_mask is not None:
                    attn_mask = tf.pad(attn_mask, (0, 1))
                if key_padding_mask is not None:
                    key_padding_mask = tf.pad(key_padding_mask, (0, 1))
            else:
                assert static_k is None, "bias cannot be added to static key."
                assert static_v is None, "bias cannot be added to static value."
        else:
            assert bias_k is None
            assert bias_v is None
        # to n heads to (B,L,C) B= batch_size * num_head.
        q = q.contiguous().view(seq_len, batch_size * num_heads, head_dim).transpose(0, 1)
        if k is not None:
            k = k.contiguous().view(-1, batch_size * num_heads, head_dim).transpose(0, 1)
        if v is not None:
            v = v.contiguous().view(-1, batch_size * num_heads, value_head_dim).transpose(0, 1)

        if static_k is not None:
            assert static_k.size(0) == batch_size * num_heads
            assert static_k.size(2) == head_dim
            k = static_k

        if static_v is not None:
            assert static_v.size(0) == batch_size * num_heads
            assert static_v.size(2) == value_head_dim
            v = static_v

        src_len = k.size(1)

        if key_padding_mask is not None:
            assert key_padding_mask.size(0) == batch_size
            assert key_padding_mask.size(1) == src_len

        if add_zero_attn:
            src_len += 1
            t = torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)
            k = torch.cat([k, t], dim=1)
            t = torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)
            v = torch.cat([v, t], dim=1)
            if attn_mask is not None:
                attn_mask = tf.pad(attn_mask, (0, 1))
            if key_padding_mask is not None:
                key_padding_mask = tf.pad(key_padding_mask, (0, 1))

        attn_output_weights = torch.bmm(q, k.transpose(1, 2))
        assert list(attn_output_weights.size()) == [batch_size * num_heads, seq_len, src_len]

        if attn_mask is not None:
            if attn_mask.dtype == torch.bool:
                attn_output_weights.masked_fill_(attn_mask, float('-inf'))
            else:
                attn_output_weights += attn_mask

        if key_padding_mask is not None:
            attn_output_weights = attn_output_weights.view(batch_size, num_heads, seq_len, src_len)
            tmp_mask = key_padding_mask.unsqueeze(1).unsqueeze(2)
            attn_output_weights = attn_output_weights.masked_fill(tmp_mask, float('-inf'))
            attn_output_weights = attn_output_weights.view(batch_size * num_heads, seq_len, src_len)

        # attn_output_weights = softmax(
        #     attn_output_weights, dim=-1)
        attn_output_weights = tf.softmax(  # ?
            attn_output_weights - attn_output_weights.max(dim=-1, keepdim=True)[0], dim=-1)
        attn_output_weights = tf.dropout(attn_output_weights, p=dropout_p, training=training)

        attn_output = torch.bmm(attn_output_weights, v)
        assert list(attn_output.size()) == [batch_size * num_heads, seq_len, value_head_dim]
        attn_output = attn_output.transpose(0, 1).contiguous().view(seq_len, batch_size, out_dim)
        attn_output = tf.linear(attn_output, out_proj_weight, out_proj_bias)

        if need_weights:
            # average attention weights over heads
            attn_output_weights = attn_output_weights.view(batch_size, num_heads, seq_len, src_len)
            return attn_output, attn_output_weights.sum(dim=1) / num_heads
        else:
            return attn_output, None
