import torch
from torch import nn
import mul_head_attention as mha
import residual_connection as rc
import feed_forward as ff
class EncoderBlock(nn.Module):
    def __init__(self, dim_model, ffn_hidden, head_num, dropout_p=0.1):
        super(EncoderBlock, self).__init__()
        self.attention = mha.MultiHeadAttention(dim_model, head_num, dropout_p)
        self.ffn = ff.PositionwiseFeedForward(dim_model, ffn_hidden, dropout_p)
        self.residual_connection = nn.ModuleList(
            [rc.ResidualConnection(dim_model, dropout_p) for _ in range(2)])
    
    def forward(self, x, mask=None):
        x = self.residual_connection[0](x, lambda x: self.attention(x, x, x, mask))
        x = self.residual_connection[1](x, self.ffn)
        return x
import layer_norm as ln
class Encoder(nn.Module):
    def __init__(self, dim_model, 
                 ffn_hidden, head_num, block_num, dropout=0.1):
        super(Encoder, self).__init__()
        self.layers = nn.ModuleList(
            [
                EncoderBlock (dim_model, ffn_hidden, head_num, dropout)
                for _ in range(block_num)
            ]
        )
        self.norm = ln.LayerNorm(dim_model)
    
    def forward(self, x, s_mask):
        for layer in self.layers:
            x = layer(x, s_mask)
        return self.norm(x)



