import torch.nn as nn
from modules.encoders.base_encoder import BaseEncoder
from modules.multi_head_attn import MultiHeadAttention
import torch.nn.functional as F
from utils.tensor import sequence_mask


class PositionWiseFeedForward(nn.Module):
    """

    Args:
        d_model : int
        d_ff : int
        dropout : drop_out radio
        activation_fn : F.relu or F.gelu
    Input:
        x : [batch, len, model_dim]
    Output:
        output : [batch, len, model_dim]
    """

    def __init__(self, d_model, d_ff, dropout=0.1, activation_fn=F.relu):
        super(PositionWiseFeedForward, self).__init__()
        self.w_1 = nn.Linear(d_model, d_ff)
        self.w_2 = nn.Linear(d_ff, d_model)
        self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
        self.dropout_1 = nn.Dropout(dropout)
        self.activation = activation_fn
        self.dropout_2 = nn.Dropout(dropout)

    def forward(self, x):
        output = self.layer_norm(x)
        output = self.w_1(output)
        output = self.activation(output)
        output = self.dropout_1(output)
        output = self.w_2(output)
        output = self.dropout_2(output)
        return output + x


class TransformerEncoderLayer(nn.Module):
    """

    Args:
        d_model : int
        heads : int
        d_ff : int
        dropout :
        pos_ffn_activation_fn : F.relu or F.gelu
    Input:
        inputs : [batch, src_len, model_dim]
        mask : [batch, 1, src_len]
    Output:
        output : [batch, src_len, model_dim]
    """

    def __init__(self,
                 d_model,
                 heads,
                 d_ff,
                 dropout,
                 attention_dropout,
                 max_relative_positions=0,
                 pos_ffn_activation_fn=F.relu):
        super(TransformerEncoderLayer, self).__init__()

        self.self_attn = MultiHeadAttention(
            head_count=heads,
            model_dim=d_model,
            dropout=attention_dropout,
            max_relative_positions=max_relative_positions)

        self.feed_forward = PositionWiseFeedForward(d_model, d_ff, dropout, pos_ffn_activation_fn)
        self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
        self.dropout = nn.Dropout(dropout)

    def forward(self, inputs, mask):
        input_norm = self.layer_norm(inputs)
        context, _ = self.self_attn(input_norm, input_norm, input_norm,
                                    mask=mask, attn_type="self")
        out = self.dropout(context) + inputs
        return self.feed_forward(out)


class TransformerEncoder(BaseEncoder):
    """

    Args:
        num_layers :
        d_model :
        heads :
        dropout :
        embedding :
        pos_ffn_activation_fn : F.relu or F.gelu
    Input:
        src : [batch, max_src_len]
        lengths : [batch, ]
    Output:
        embedding [batch_size , src_len, model_dim]
        memory_bank [batch_size, src_len, model_dim]
        lengths : [batch, ]
    """

    def __init__(self,
                 num_layers,
                 d_model,
                 heads,
                 d_ff,
                 dropout,
                 attention_dropout,
                 embedding,
                 max_relative_positions,
                 pos_ffn_activation_fn=F.relu):
        super(TransformerEncoder, self).__init__()
        self.embedding = embedding
        self.transformer = nn.ModuleList([
            TransformerEncoderLayer(
                d_model=d_model,
                heads=heads,
                d_ff=d_ff,
                dropout=dropout,
                attention_dropout=attention_dropout,
                max_relative_positions=max_relative_positions,
                pos_ffn_activation_fn=pos_ffn_activation_fn
            )
            for _ in range(num_layers)])
        self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)

    def forward(self, src, lengths=None):
        emb = self.embedding(src)
        out = emb
        mask = ~sequence_mask(lengths).unsqueeze(1)
        for layer in self.transformer:
            out = layer(out, mask)
        out = self.layer_norm(out)
        return emb, out, lengths

    @classmethod
    def from_opt(cls, opt, embedding=None):
        pass


if __name__ == '__main__':
    import torch

    t_embedding = torch.nn.Embedding(6, 12, 0)
    t_src = torch.tensor([
        [1, 2, 3, 4, 5],
        [2, 3, 4, 5, 0],
        [3, 4, 5, 0, 0],
        [4, 5, 0, 0, 0]
    ])
    t_src_len = torch.tensor([5, 4, 3, 2])
    t_transformer_encoder = TransformerEncoder(
        num_layers=3,
        d_model=12,
        heads=3,
        d_ff=8,
        dropout=0.1,
        attention_dropout=0.1,
        embedding=t_embedding,
        max_relative_positions=0,
        pos_ffn_activation_fn=F.relu
    )
    fs, mb, leng = t_transformer_encoder(t_src, t_src_len)
    print(mb.shape, leng.shape)
