import math
import torch
import torch.nn as nn

from src.net.multi_head_attention import *


class EncoderBlock(nn.Module):
    def __init__(self, key_size, query_size, value_size, num_hiddens,
                 norm_shape, ffn_num_input, ffn_num_hiddens, num_heads,
                 dropout, use_bias=False, **kwargs):
        super(EncoderBlock, self).__init__(**kwargs)
        self.self_attn = MultiHeadAttention(key_size, query_size,
                                            value_size, num_hiddens,
                                            num_heads, dropout, use_bias)
        self.addnorm1 = AddNorm(norm_shape, dropout)
        self.ffn = PositionWiseFFN(ffn_num_input, ffn_num_hiddens,
                                   num_hiddens)
        self.addnorm2 = AddNorm(norm_shape, dropout)

    def forward(self, X, valid_lens):
        Y = self.addnorm1(X, self.self_attn(X, X, X, valid_lens))
        return self.addnorm2(Y, self.ffn(Y))


class TransformerEncoder(nn.Module):
    def __init__(self, feature_dim, key_size, query_size, value_size,
                 num_hiddens, norm_shape, ffn_num_input, ffn_num_hiddens,
                 num_heads, num_layers, dropout, use_bias=False, **kwargs):
        super(TransformerEncoder, self).__init__(**kwargs)
        self.num_hiddens = num_hiddens
        # self.embedding = nn.Embedding(vocab_size, num_hiddens)
        self.dense = nn.Linear(feature_dim, num_hiddens)
        self.pos_encoding = PositionalEncoding(num_hiddens, dropout)
        self.blks = nn.Sequential()
        for i in range(num_layers):
            self.blks.add_module(
                "block" + str(i),
                EncoderBlock(key_size, query_size, value_size, num_hiddens,
                             norm_shape, ffn_num_input, ffn_num_hiddens,
                             num_heads, dropout, use_bias))

    def forward(self, X, valid_lens):
        # Since positional encoding values are between -1 and 1, the embedding
        # values are multiplied by the square root of the embedding dimension
        # to rescale before they are summed up
        X = self.pos_encoding(self.dense(X) * math.sqrt(self.num_hiddens))
        self.attention_weights = [None] * len(self.blks)
        for i, blk in enumerate(self.blks):
            X = blk(X, valid_lens)
            self.attention_weights[
                i] = blk.self_attn.self_attn.attention_weights
        return X, valid_lens


class TransformerEncoder_torch(nn.Module):
    def __init__(self, feature_dim, num_hiddens, num_heads, num_layers, dropout=0.0, **kwargs):
        super(TransformerEncoder_torch, self).__init__(**kwargs)
        self.num_hiddens = num_hiddens
        self.dense = nn.Linear(feature_dim, num_hiddens)
        self.pos_encoding = PositionalEncoding(num_hiddens, dropout)
        encoder_layer = nn.TransformerEncoderLayer(d_model=num_hiddens, nhead=num_heads)
        encoder_norm = nn.LayerNorm(num_hiddens)
        self.encoder = nn.TransformerEncoder(
            encoder_layer,
            num_layers,
            encoder_norm
        )

    def forward(self, X, valid_lens):
        r"""
    Args:
        X: input
        valid_lens: input valid length
    Shape:
        Inputs:
        - X: :math:`(N, L, E)` where L is the target sequence length, N is the batch size, E is
          the embedding dimension.
        - valid_lens: :math:`(N)` where N is the batch size
        Outputs:
        - output: :math:`(N, L, E)` where L is the target sequence length, N is the batch size, E is
          the embedding dimension.
        - valid_lens: :math:`(N)` where N is the batch size
        """
        # Since positional encoding values are between -1 and 1, the embedding
        # values are multiplied by the square root of the embedding dimension
        # to rescale before they are summed up
        X = self.pos_encoding(self.dense(X) * math.sqrt(self.num_hiddens))

        # print("X.shape:", X.shape)
        X = X.permute(1, 0, 2)
        # Shape of input `X`: (L, N, H).
        maxlen = X.size(0)
        mask = torch.arange(maxlen, device=X.device)[None, :] >= valid_lens[:, None]
        # Shape of `mask`: (N, L).
        output = self.encoder(X, src_key_padding_mask=mask)
        output = output.permute(1, 0, 2)

        return output, valid_lens


class TransformerEncoder_torch_without_pos_encoding(nn.Module):
    def __init__(self, feature_dim, num_hiddens, num_heads, num_layers, dropout=0.0, **kwargs):
        super(TransformerEncoder_torch_without_pos_encoding, self).__init__(**kwargs)
        self.num_hiddens = num_hiddens
        self.dense = nn.Linear(feature_dim, num_hiddens)
        self.pos_encoding = PositionalEncoding(num_hiddens, dropout)
        encoder_layer = nn.TransformerEncoderLayer(d_model=num_hiddens, nhead=num_heads)
        encoder_norm = nn.LayerNorm(num_hiddens)
        self.encoder = nn.TransformerEncoder(
            encoder_layer,
            num_layers,
            encoder_norm
        )

    def forward(self, X, valid_lens):
        r"""
    Args:
        X: input
        valid_lens: input valid length
    Shape:
        Inputs:
        - X: :math:`(N, L, E)` where L is the target sequence length, N is the batch size, E is
          the embedding dimension.
        - valid_lens: :math:`(N)` where N is the batch size
        Outputs:
        - output: :math:`(N, L, E)` where L is the target sequence length, N is the batch size, E is
          the embedding dimension.
        - valid_lens: :math:`(N)` where N is the batch size
        """
        # Since positional encoding values are between -1 and 1, the embedding
        # values are multiplied by the square root of the embedding dimension
        # to rescale before they are summed up
        X = self.dense(X) * math.sqrt(self.num_hiddens)

        # print("X.shape:", X.shape)
        X = X.permute(1, 0, 2)
        # Shape of input `X`: (L, N, H).
        maxlen = X.size(0)
        mask = torch.arange(maxlen, device=X.device)[None, :] >= valid_lens[:, None]
        # Shape of `mask`: (N, L).
        output = self.encoder(X, src_key_padding_mask=mask)
        output = output.permute(1, 0, 2)

        return output, valid_lens
