from torch import nn

from .attention import MultiHeadAttention, PoswiseFeedForwardNet


class EncoderLayer(nn.Module):
    """
    Encoder Layer 包含两个部分，多头注意力机制和前馈神经网络
    """

    def __init__(self, d_model, n_heads, d_k, d_v, d_ff, dropout):
        super(EncoderLayer, self).__init__()
        self.self_attention = MultiHeadAttention(d_model, n_heads, d_k, d_v, dropout)
        self.pos_ffn = PoswiseFeedForwardNet(d_model, d_ff, dropout)

    def forward(self, batch_inputs, self_attention_mask):
        """
        :param batch_inputs: 批次输入数据 shape(batch_size, context_length, d_model)
        :param self_attention_mask: shape(batch_size, context_length_q, context_length_k)
        :return:
        """
        attention_out = self.self_attention(batch_inputs, batch_inputs, batch_inputs, self_attention_mask)
        self_attention_out, attention = attention_out
        encode_outputs = self.pos_ffn(self_attention_out)
        return encode_outputs, attention


class DecoderLayer(nn.Module):
    """
    Decoder Layer包含了三个部分：掩码自注意力、交叉注意力、基于位置的前馈网络
    """

    def __init__(self, d_model, n_heads, d_k, d_v, d_ff, dropout):
        super(DecoderLayer, self).__init__()
        self.self_attention = MultiHeadAttention(d_model, n_heads, d_k, d_v, dropout)
        self.decoder_encoder_attention = MultiHeadAttention(d_model, n_heads, d_k, d_v, dropout)
        self.pos_ffn = PoswiseFeedForwardNet(d_model, d_ff, dropout)

    def forward(self, decoder_inputs, encoder_outputs, self_attention_mask, decoder_encoder_attention_mask):
        """
        :param decoder_inputs: 批次输入数据 shape(batch_size, max_len, d_model)
        :param encoder_outputs: shape(batch_size, max_len, d_model)
        :param self_attention_mask: shape(batch_size, max_len, max_len)
        :param decoder_encoder_attention_mask:shape(batch_size, max_len, max_len)
        :return:
        """
        self_attention_out = self.self_attention(decoder_inputs, decoder_inputs, decoder_inputs, self_attention_mask)
        decoder_outputs, self_attention = self_attention_out

        decoder_encoder_attention_out = self.decoder_encoder_attention(decoder_outputs, encoder_outputs,
                                                                       encoder_outputs, decoder_encoder_attention_mask)
        decoder_outputs, decoder_encoder_attention = decoder_encoder_attention_out

        dec_outputs = self.pos_ffn(decoder_outputs)
        return dec_outputs, self_attention, decoder_encoder_attention
