import torch.nn as nn

from mask_multi_head_attention_layer import MaskedMultiHeadAttention
from add_and_norm_layer import AddAndNorm
from cross_multi_head_attention_layer import CrossMultiHeadAttention
from feed_forward_layer import FeedForwardNet


class DecoderLayer(nn.Module):
    def __init__(self, d_model=512, num_heads=8, d_ff=2048):
        super(DecoderLayer, self).__init__()
        self.masked_multi_head_attention = MaskedMultiHeadAttention(d_model, num_heads)
        self.norm1 = nn.LayerNorm(d_model)
        self.cross_multi_head_attention = CrossMultiHeadAttention(d_model, num_heads)
        self.norm2 = nn.LayerNorm(d_model)
        self.feed_forward = FeedForwardNet(d_model, d_ff)
        self.add_and_norm = AddAndNorm(d_model)

    def forward(self, x, causal=True, padding_mask_key=None, memory=None, memory_mask_key=None):
        masked_multi_head_attention = self.masked_multi_head_attention(x, causal, padding_mask_key)
        x = self.norm1(x + masked_multi_head_attention)
        cross_multi_head_attention = self.cross_multi_head_attention(x, memory, memory, memory_mask_key)
        x = self.norm2(x + cross_multi_head_attention)
        x = self.add_and_norm(x, self.feed_forward)
        return x


if __name__ == '__main__':
    import torch
    from embedding_add_positional_encoding import EmbeddingAddPositionalEncoding

    test_input = torch.randint(0, 10, (5, 8))
    key_padding_mask = torch.Tensor([[True, True, True, True, True, True, True, True],
                                     [True, True, True, True, True, True, True, False],
                                     [True, True, True, True, True, True, False, False],
                                     [True, True, True, True, False, False, False, False],
                                     [True, True, True, True, True, False, False, False]])

    embedding = EmbeddingAddPositionalEncoding(10, 512, seq_len=8, d_model=512)
    test_input = embedding(test_input)
    decoder_layer = DecoderLayer(d_model=512, num_heads=8, d_ff=2048)
    out_put = decoder_layer(test_input, True, key_padding_mask, test_input, key_padding_mask)
    print(out_put)
