import torch.nn as nn

from utils import clone_module
from sublayer_connection import SublayerConnection


class EncoderLayer(nn.Module):

    def __init__(self, feature_dim, self_attn, feed_forward, dropout):
        '''
        self_attn 多头自注意力子层实例化对象
        feed_forward 前馈全连接层实例化对象
        '''
        super(EncoderLayer, self).__init__()
        self.self_attn = self_attn
        self.feed_forward = feed_forward
        self.feature_dim = feature_dim
        self.sublayer = clone_module(SublayerConnection(feature_dim, dropout), 2)

    def forward(self, x, mask):
        x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))
        return self.sublayer[1](x, self.feed_forward)


if __name__ == '__main__':
    import torch

    from multi_headed_attention import MultiHeadedAttention
    from positionwise_feed_forward import PositionwiseFeedForward
    from utils import subsequent_mask

    batch_size = 3
    seq_len = 4
    feature_dim = 6
    num_head = 2
    d_fff = 54
    dropout = 0.2

    self_attn = MultiHeadedAttention(
        num_head=num_head,
        feature_dim=feature_dim,
        dropout=dropout,
    )
    feed_forward = PositionwiseFeedForward(
        feature_dim=feature_dim,
        d_ff=d_fff,
        dropout=dropout,
    )
    encoder_layer = EncoderLayer(
        feature_dim=feature_dim,
        self_attn=self_attn,
        feed_forward=feed_forward,
        dropout=dropout,
    )

    mask = subsequent_mask(size=seq_len)
    x = torch.randn(batch_size, seq_len, feature_dim)
    result = encoder_layer(x, mask)
    print('x size: ', x.size())  # [batch_size, seq_len, feature_dim]
    print('result size: ', result.size())  # [batch_size, seq_len, feature_dim]
    print(result)
