import torch
import torch.nn as nn
import numpy as np


class DecoderLayer(nn.Module):
    def __init__(self):
        super(DecoderLayer, self).__init__()
        self.dec_multiHeadattn_add_norm = MultiHeadAttention_Add_Norm()
        self.dec_enc_multiHeadattn_add_norm = MultiHeadAttention_Add_Norm()
        self.pos_ffn_add_norm = PoswiseFeedForwardNet_Add_Norm()

    def forward(self, dec_inputs, enc_outputs, dec_self_attn_mask, dec_enc_attn_mask):
        '''
        dec_inputs: [batch_size, tgt_len, d_model]
        enc_outputs: [batch_size, src_len, d_model]
        dec_self_attn_mask: [batch_size, tgt_len, tgt_len]
        dec_enc_attn_mask: [batch_size, tgt_len, src_len]
        '''

        dec_outputs_1 = self.dec_multiHeadattn_add_norm(dec_inputs,# Q
                                                        dec_inputs,# K
                                                        dec_inputs,# V
                                                        dec_self_attn_mask)
        # dec_outputs_1: [batch_size, tgt_len, d_model]
        # 先是decoder的self-attention

        dec_outputs_2 = self.dec_enc_multiHeadattn_add_norm(dec_outputs_1,#Q 第一层的输出
                                                            enc_outputs, #K Encoder的输出
                                                            enc_outputs, #V
                                                            dec_enc_attn_mask)
        # 再是encoder-decoder attention部分

        dec_outputs = self.pos_ffn_add_norm(dec_outputs_2)  
        # [batch_size, tgt_len, d_model]
        # 特征提取
        return dec_outputs