# -*- coding: utf-8 -*-

import torch.nn as nn
from transformer.module.multi_head_attention import MultiHeadAttention
from transformer.module.residualfeedforward import ResidualFeedForward


class EncoderLayer(nn.Module):
    def __init__(self, d_model, n_head, dropout):
        super(EncoderLayer, self).__init__()
        self.d_model = d_model
        self.n_head = n_head
        single_head_dim = self.d_model // self.n_head
        self.multi_attention = MultiHeadAttention(n_head=n_head,
                                                  d_model=self.d_model,
                                                  d_k=single_head_dim,
                                                  d_v=single_head_dim,
                                                  dropout=dropout)
        self.feedforward = ResidualFeedForward(d_model=d_model,
                                               dropout=dropout)

    def forward(self, x, mask=None):
        """

        :param x:       B, L, H
        :param mask:    B, 1, 1, L
        :return:
        """
        q = k = v = x
        x, _ = self.multi_attention.forward(q, k, v, mask)
        x = self.feedforward.forward(x)
        return x
