import torch
import torch.nn as nn

from utils import clone_module
from layer_norm import LayerNorm


class Encoder(nn.Module):

    def __init__(self, encoder_layer, num_encoder_layer):
        super(Encoder, self).__init__()
        self.encoder_layers = clone_module(encoder_layer, num_encoder_layer)
        self.lay_norm = LayerNorm(encoder_layer.feature_dim)

    def forward(self, x, mask):
        for layer in self.encoder_layers:
            x = layer(x, mask)
        return self.lay_norm(x)


if __name__ == '__main__':

    import torch

    from multi_headed_attention import MultiHeadedAttention
    from positionwise_feed_forward import PositionwiseFeedForward
    from utils import subsequent_mask
    from encoder_layer import EncoderLayer

    batch_size = 3
    seq_len = 4
    feature_dim = 6
    num_head = 2
    d_fff = 54
    dropout = 0.2
    num_encoder_layer = 2

    self_attn = MultiHeadedAttention(
        num_head=num_head,
        feature_dim=feature_dim,
        dropout=dropout,
    )
    feed_forward = PositionwiseFeedForward(
        feature_dim=feature_dim,
        d_ff=d_fff,
        dropout=dropout,
    )
    encoder_layer = EncoderLayer(
        feature_dim=feature_dim,
        self_attn=self_attn,
        feed_forward=feed_forward,
        dropout=dropout,
    )

    encoder = Encoder(encoder_layer, num_encoder_layer=num_encoder_layer)

    mask = subsequent_mask(size=seq_len)
    x = torch.randn(batch_size, seq_len, feature_dim)
    result = encoder_layer(x, mask)
    print('x size: ', x.size())  # [batch_size, seq_len, feature_dim]
    print('result size: ', result.size())  # [batch_size, seq_len, feature_dim]
    print(result)
