# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name： dab_encoder_layer
Description :
Author : 'li'
date： 2022/7/3
Change Activity:
2022/7/3:
-------------------------------------------------
"""
from torch import nn

from ...layers.activation import build_activation_layer
from ....model.base import BaseModule


class DABEncoderLayer(BaseModule):
    def __init__(self, embed_dim=256, num_heads=6, hidden_channels=2048, dropout=0.1, activation='ReLU', normalize_before=False):
        super(DABEncoderLayer, self).__init__()
        self.self_attention = nn.MultiheadAttention(embed_dim=embed_dim, num_heads=num_heads, dropout=dropout)

        self.linear1 = nn.Linear(embed_dim, hidden_channels)
        self.dropout = nn.Dropout(dropout)
        self.linear2 = nn.Linear(hidden_channels, embed_dim)

        self.norm1 = nn.LayerNorm(embed_dim)
        self.norm2 = nn.LayerNorm(embed_dim)
        self.dropout1 = nn.Dropout(dropout)
        self.dropout2 = nn.Dropout(dropout)

        self.activation = build_activation_layer(activation)
        self.normalize_before = normalize_before

    def forward(self, src, src_mask, src_key_padding_mask, pos):
        """

        Args:
            src:
            src_mask: encoder process don`t use mask attention.
            src_key_padding_mask:
            pos:

        Returns:

        """
        if pos is not None:
            query = src + pos
        else:
            query = src.clone()
        key = query.clone()
        tmp_mask = ~src_key_padding_mask
        _src, _ = self.self_attention(query, key, value=src, attn_mask=src_mask, key_padding_mask=tmp_mask)
        src = src + self.dropout1(_src)
        src = self.norm1(src)
        _src = self.linear2(self.dropout(self.activation(self.linear1(src))))
        src = src + self.dropout2(_src)
        src = self.norm2(src)
        return src
