import torch.nn as nn

from Encoder import MultiHeadAttention, FeedForward
from PE import Positional
from config import *


class DecoderLayer(nn.Module):
    def __init__(self):
        super().__init__()

        self.multiheadattention1 = MultiHeadAttention(masked=True)
        self.norm1 = nn.LayerNorm(normalized_shape=[d_model])  # 每个单词内的d维度进行标准化
        self.multiheadattention2 = MultiHeadAttention()
        self.norm2 = nn.LayerNorm(normalized_shape=[d_model])
        self.feedforward = FeedForward()
        self.norm3 = nn.LayerNorm(normalized_shape=[d_model])

    def forward(self, x, en_output):
        x = self.norm1(x + self.multiheadattention1(x, x, x))
        x = self.norm2(x + self.multiheadattention2(x, en_output, en_output))
        x = self.norm3(x + self.feedforward(x))
        return x


class Decoder(nn.Module):
    def __init__(self):
        super().__init__()
        self.embedding = nn.Embedding(vocab_target_len, d_model)
        self.decoder_list = nn.ModuleList([DecoderLayer() for _ in range(de_N)])

    def forward(self, x, en_output):
        x = self.embedding(x)
        x = x + Positional(x)
        for idx in range(en_N):
            x = self.decoder_list[idx](x, en_output)
        return x
