# -*- coding: UTF-8 -*-
# @author: caoyang
# @email: caoyang@163.sufe.edu.cn
# PyTorch implementation for *Attention is all you need*
# Paper download: https://arxiv.org/abs/1706.03762.pdf

import torch
import numpy


class PositionalEncoding(torch.nn.Module):
    """Section 3.5: Implementation for Positional Encoding"""

    def __init__(self, d_input: int, d_output: int, n_position: int) -> None:
        """
        Transformer takes the position index of element in a sequence as input features.
        Positional Encoding can be formulated as below:
        $$
        {\rm PE}({\rm pos}, 2i) = \sin\left(\frac{\rm pos}{10000^{\frac{2i}{d_{\rm model}}}}\right) \\
        {\rm PE}({\rm pos}, 2i+1) = \cos\left(\frac{\rm pos}{10000^{\frac{2i}{d_{\rm model}}}}\right)
        $$
        where:
          - $\rm pos$ is the position index of element.
          - $i$ is the index of embedding vector of `PositionalEncoding`.

        :param d_input		: Input dimension of `PositionalEncoding` module.
        :param d_output		: Output dimension of `PositionalEncoding` module.
        :param n_position	: Total number of position, that is the length of sequence.
        """
        super(PositionalEncoding, self).__init__()

        def _generate_static_positional_encoding():
            sinusoid_table = numpy.array(
                [[pos / numpy.power(10000, (i - i % 2) / d_output) for i in range(d_output)] for pos in
                 range(n_position)])
            sinusoid_table[:, 0::2] = numpy.sin(sinusoid_table[:, 0::2])
            sinusoid_table[:, 1::2] = numpy.cos(sinusoid_table[:, 1::2])
            return torch.FloatTensor(sinusoid_table).unsqueeze(
                0)  # Unsequeeze: wrap position encoding tensor with '[' and ']'.

        self.linear = torch.nn.Linear(in_features=d_input, out_features=d_output)
        self.register_buffer('pos_table', _generate_static_positional_encoding())

    # print(self.pos_table.shape)									 # (1, `n_position`, `d_output`)

    def forward(self, input: torch.FloatTensor) -> torch.FloatTensor:
        """
        Add static postional encoding table to input tensor for output tensor.
        :param input: 				The shape is (*, `n_position`, `d_input`)
        :return position_encoding: 	The shape is (*, `n_position`, `d_output`)
        """
        x = self.linear(input.float())  # (*, `n_position`, `d_output`)
        position_encoding = x + self.pos_table[:, :x.shape[1]].clone().detach()
        return position_encoding  # (*, `n_position`, `d_output`)


class PositionWiseFeedForwardNetworks(torch.nn.Module):
    """Section 3.3: Implementation for Position-wise Feed-Forward Networks"""

    def __init__(self, d_input: int, d_hidden: int) -> None:
        """
        The Position-wise Feed-Forward Networks can be formulated as below:
        $$
        {\rm FFN}(x) = \max(0, xW_1 + b_1)W_2 + b_2
        $$
        Note:
          - Input dimension is the same as output dimension, which is set as $d_{\rm model}=512$ in paper.
          - Hidden dimenstion is set as $\d_{ff}=2048$ in paper.

        :param d_input	: Input dimension, default 512 in paper, which is the size of $d_{\rm model}$
        :param d_hidden	: Hidden dimenstion, default 2048 in paper.
        """
        super(PositionWiseFeedForwardNetworks, self).__init__()
        self.linear_1 = torch.nn.Linear(in_features=d_input, out_features=d_hidden)
        self.linear_2 = torch.nn.Linear(in_features=d_hidden, out_features=d_input)

    def forward(self, input: torch.FloatTensor) -> torch.FloatTensor:
        """
        :param input	: Shape is (*, `d_input`).
        :return output	: Shape is (*, `d_input`).
        """
        x = self.linear_1(input)
        x = torch.nn.ReLU()(x)  # Relu(x) = max(0, x)
        output = self.linear_2(x)
        return output


class ScaledDotProductAttention(torch.nn.Module):
    """Section 3.2.1: Implementation for Scaled Dot-Product Attention"""

    def __init__(self) -> None:
        super(ScaledDotProductAttention, self).__init__()

    def forward(self, q: torch.FloatTensor, k: torch.FloatTensor, v: torch.FloatTensor,
                mask: torch.LongTensor = None) -> (torch.FloatTensor, torch.FloatTensor):
        """
        The Scaled Dot-Product Attention can be formulated as below:
        $$
        {\rm Attention}(Q, K, V) = {\rm softmax}\left(\frac{QK^\top}{\sqrt{d_k}}\right)V
        $$

        :param q		: This is $Q$ above, whose shape is (batch_size, n_head, len_q, $d_q$)
        :param k		: This is $K$ above, whose shape is (batch_size, n_head, len_k, $d_k$)
        :param v		: This is $V$ above, whose shape is (batch_size, n_head, len_v, $d_v$)
        :param mask		: The value in `scores` will be replaced with 1e-9 if the corresponding value in mask, whose shape is (len_q, len_k), is 0.

        Note:
          - $d_q$ = $d_k$ holds and let $d_q$ = $d_k$ = d_output.
          - `len_k = len_v` holds.

        :return scores	: (batch_size, n_head, len_q, len_k)
        :return output	: (batch_size, n_head, len_q, $d_v$)
        """
        # batch_size: batch size of training input.
        # n_head	: The number of multi-heads.
        # d_output	: The dimension of $Q$, $K$, $V$.
        d_q, d_k = q.shape[-1], k.shape[-1]  # `d_k` is d_output.
        assert d_q == d_k  # Assumption: $d_q$ = $d_k$
        scores = torch.matmul(q, k.transpose(2, 3)) / (
                    d_k ** 0.5)  # (batch_size, n_head, len_q, d_output) * (batch_size, n_head, d_output, len_k) -> (batch_size, n_head, len_q, len_k)
        if mask is not None:
            scores = scores.masked_fill(mask.unsqueeze(0).unsqueeze(0) == 0, 1e-9)
        scores = torch.nn.Softmax(dim=-1)(
            scores)  # (batch_size, n_head, len_q, len_k) -> (batch_size, n_head, len_q, len_k)
        output = torch.matmul(scores,
                              v)  # (batch_size, n_head, len_q, len_k) * (batch_size, n_head, len_k, $d_v$) -> (batch_size, n_head, len_q, $d_v$)
        return output, scores


class MultiHeadAttention(torch.nn.Module):
    """Section 3.2.2: Implementation for Multi-Head Attention"""

    def __init__(self, d_input: int, d_output: int, n_head: int) -> None:
        """
        The Multi-Head Attention can be formulated as below:
        $$
        {\rm MultiHead}(Q, K, V) = {\rm Concat}({\rm head}_1, ... , {\rm head}_h)W^O \\
        {\rm head}_i = {\rm Attention}(QW_i^Q, KW_i^K, VW_i^V)
        $$
        where:
          - $W_i^Q \in \mathcal{R}^{d_{\rm model} × d_q}$
          - $W_i^K \in \mathcal{R}^{d_{\rm model} × d_k}$
          - $W_i^V \in \mathcal{R}^{d_{\rm model} × d_v}$
          - $W^O \in \mathcal{R}^{hd_v × d_{\rm model}}$
          - $h$ is the total number of heads.
          - Note that $d_q = d_k$ holds.
          - As is mentioned in paper, $h = 8$ and $d_k = d_v = \frac{d_{\rm model}}{h} = 64$ is set as default.

        Below we set:
          - `d_input` = $d_{\rm model}$
          - `d_output` = $d_q$ = $d_k$ = $d_v$
          - Usually, `d_input` = `d_output` is assumed so that residual connection is easy to calculate.

        :param d_input	: Input dimension of `MultiHeadAttention` module.
        :param d_output	: Output dimension of `MultiHeadAttention` module.
        :param n_head	: Total number of heads.
        """
        super(MultiHeadAttention, self).__init__()
        self.linear = torch.nn.Linear(in_features=n_head * d_output,
                                      out_features=d_input)  # $W^O \in \mathcal{R}^{hd_v × d_{\rm model}}$
        self.linear_q = torch.nn.Linear(in_features=d_input,
                                        out_features=n_head * d_output)  # $W^Q \in \mathcal{R}^{d_{\rm model} × hd_q}$
        self.linear_k = torch.nn.Linear(in_features=d_input,
                                        out_features=n_head * d_output)  # $W^K \in \mathcal{R}^{d_{\rm model} × hd_k}$
        self.linear_v = torch.nn.Linear(in_features=d_input,
                                        out_features=n_head * d_output)  # $W^V \in \mathcal{R}^{d_{\rm model} × hd_v}$
        self.n_head = n_head
        self.d_output = d_output
        self.scaled_dot_product_attention = ScaledDotProductAttention()

    def forward(self, q: torch.FloatTensor, k: torch.FloatTensor, v: torch.FloatTensor,
                mask: torch.LongTensor = None) -> (torch.FloatTensor, torch.FloatTensor):
        """
        $$
        {\rm MultiHead}(Q, K, V) = {\rm Concat}({\rm head}_1, ... , {\rm head}_h)W^O \\
        {\rm head}_i = {\rm Attention}(QW_i^Q, KW_i^K, VW_i^V)
        $$
        where:
          - $d_q = d_k$ holds.
          - `d_input` = $d_q$ = $d_k$ = $d_v$

        :param q		: This is $Q$ above, whose shape is (`batch_size`, `len_q`, $d_q$)
        :param k		: This is $K$ above, whose shape is (`batch_size`, `len_k`, $d_k$)
        :param v		: This is $V$ above, whose shape is (`batch_size`, `len_v`, $d_v$)
        :param mask		: The value in `scores` will be replaced with 1e-9 if the corresponding value in mask, whose shape is (len_q, len_k), is 0.

        Note that `len_k` = `len_v` holds.

        :return scores	: (batch_size, n_head, len_q, len_k)
        :return output	: (batch_size, len_q, d_input)
        """
        batch_size, len_q, len_k, len_v = q.shape[0], q.shape[1], k.shape[1], v.shape[1]
        assert len_k == len_v  # Assumption: seq_len = `len_k` = `len_v`
        q = self.linear_q(q).contiguous().view(batch_size, len_q, self.n_head,
                                               self.d_output)  # (batch_size, len_q, d_input) -> (batch_size, len_q, n_head * d_output) -> (batch_size, len_q, n_head, d_output)
        k = self.linear_k(k).contiguous().view(batch_size, len_k, self.n_head,
                                               self.d_output)  # (batch_size, len_k, d_input) -> (batch_size, len_k, n_head * d_output) -> (batch_size, len_k, n_head, d_output)
        v = self.linear_v(v).contiguous().view(batch_size, len_v, self.n_head,
                                               self.d_output)  # (batch_size, len_v, d_input) -> (batch_size, len_v, n_head * d_output) -> (batch_size, len_v, n_head, d_output)
        q = q.transpose(1, 2)  # (batch_size, len_q, n_head, d_output) -> (batch_size, n_head, len_q, d_output)
        k = k.transpose(1, 2)  # (batch_size, len_k, n_head, d_output) -> (batch_size, n_head, len_k, d_output)
        v = v.transpose(1, 2)  # (batch_size, len_v, n_head, d_output) -> (batch_size, n_head, len_v, d_output)
        output, scores = self.scaled_dot_product_attention(q=q, k=k, v=v,
                                                           mask=mask)  # (batch_size, n_head, len_q, d_output), (batch_size, n_head, len_q, len_k)
        output = output.transpose(1, 2).contiguous().view(batch_size, len_q,
                                                          -1)  # (batch_size, n_head, len_q, d_output) -> (batch_size, len_q, n_head, d_output) -> (batch_size, len_q, n_head * d_output)
        output = self.linear(output)  # (batch_size, len_q, n_head * d_output) -> (batch_size, len_q, d_input)
        return output, scores  # (batch_size, len_q, d_input), (batch_size, n_head, len_q, len_k)


class Encoder(torch.nn.Module):
    """Section 3.1: Implementation for Encoder"""

    def __init__(self, d_input: int, d_output: int, d_hidden: int, n_head: int, n_position: int) -> None:
        """
        This is an implementation for one-time Encoder, which is repeated for six times in paper.

        :param d_input		: Input dimension of `Encoder` module.
        :param d_output		: Output dimension of `Encoder` module.
        :param d_hidden		: Hidden dimension of `PositionWiseFeedForwardNetworks` module.
        :param n_head		: Total number of heads.
        :param n_position	: Total number of position, that is the length of sequence.
        """
        super(Encoder, self).__init__()
        self.position_encoding = PositionalEncoding(d_input=d_input, d_output=d_output, n_position=n_position)
        self.multi_head_attention = MultiHeadAttention(d_input=d_output, d_output=d_output, n_head=n_head)
        self.layer_norm_1 = torch.nn.LayerNorm(d_output)
        self.layer_norm_2 = torch.nn.LayerNorm(d_output)
        self.position_wise_feed_forward_networks = PositionWiseFeedForwardNetworks(d_input=d_output, d_hidden=d_hidden)

    def forward(self, input: torch.FloatTensor, mask: torch.LongTensor = None) -> torch.FloatTensor:
        """
        See https://img-blog.csdnimg.cn/20210114103418782.png

        :param input	: Shape is (batch_size, `n_position`, `d_input`)
        :param mask		: Shape is (`n_position`, `n_position`)
        :return output	: Shape is (batch_size, `n_position`, `d_output`)
        """
        q = self.position_encoding(input=input)  # (*, n_position, d_input) -> (*, n_position, d_output)
        k, v = q.clone(), q.clone()  # $Q$, $K$, $V$ are just the same in Encoder, but it is a little different in Decoder.
        residual_1 = q.clone()  # `.clone()` is used for safety.
        output_1, scores = self.multi_head_attention(q=q, k=k, v=v, mask=mask)
        x = self.layer_norm_1(output_1 + residual_1)  # Add & Norm: residual connection.
        residual_2 = x.clone()  # `.clone()` is used for safety.
        output_2 = self.position_wise_feed_forward_networks(input=x)  # Feed Forward
        output = self.layer_norm_2(output_2 + residual_2)  # Add & Norm: residual connection.
        return output


class Decoder(torch.nn.Module):
    """Section 3.1: Implementation for Decoder"""

    def __init__(self, d_input: int, d_output: int, d_hidden: int, n_head: int, n_position: int) -> None:
        super(Decoder, self).__init__()
        self.position_encoding = PositionalEncoding(d_input=d_input, d_output=d_output, n_position=n_position)
        self.multi_head_attention_1 = MultiHeadAttention(d_input=d_output, d_output=d_output, n_head=n_head)
        self.multi_head_attention_2 = MultiHeadAttention(d_input=d_output, d_output=d_output, n_head=n_head)
        self.layer_norm_1 = torch.nn.LayerNorm(d_output)
        self.layer_norm_2 = torch.nn.LayerNorm(d_output)
        self.layer_norm_3 = torch.nn.LayerNorm(d_output)
        self.position_wise_feed_forward_networks = PositionWiseFeedForwardNetworks(d_input=d_output, d_hidden=d_hidden)

    def generate_subsequence_mask(self, subsequence):
        """
        The subsequence mask is defined as a lower-triangle matrix with value 1.
        """
        seq_len = subsequence.shape[1]
        ones_tensor = torch.ones((seq_len, seq_len), dtype=torch.int, device=subsequence.device)
        subsequence_mask = 1 - torch.triu(ones_tensor, diagonal=1)  # Note that the value on diagonal is 1.
        return subsequence_mask

    def forward(self, encoder_output: torch.FloatTensor, target: torch.FloatTensor,
                mask: torch.LongTensor = None) -> torch.FloatTensor:
        """
        See https://img-blog.csdnimg.cn/20210114103418782.png

        :param encoder_output	: Output of Encoder, whose shape is (batch_size, `n_position`, `d_output_encoder`)
        :param target			: Target tensor in dataset, whose shape is (batch_size, `n_position`, `d_input_decoder`)
        :return output			: Shape is (batch_size, `n_position`, `d_output`)
        """
        q = self.position_encoding(input=target)  # (*, n_position, d_input) -> (*, n_position, d_output)
        k, v = q.clone(), q.clone()  # $Q$, $K$, $V$ are just the same in Encoder, but it is a little different in Decoder.
        residual_1 = q.clone()  # `.clone()` is used for safety.
        output_1, _ = self.multi_head_attention_1(q=q, k=k, v=v, mask=self.generate_subsequence_mask(target))
        x = self.layer_norm_1(output_1 + residual_1)  # Add & Norm: residual connection.
        residual_2 = x.clone()  # `.clone()` is used for safety.
        output_2, _ = self.multi_head_attention_2(q=x, k=encoder_output, v=encoder_output, mask=mask)
        x = self.layer_norm_2(output_2 + residual_2)  # Add & Norm: residual connection.
        residual_3 = x.clone()  # `.clone()` is used for safety.
        output_3 = self.position_wise_feed_forward_networks(input=x)  # Feed Forward
        output = self.layer_norm_3(output_3 + residual_3)  # Add & Norm: residual connection.
        return output


class Transformer(torch.nn.Module):
    """Attention is all you need: Implementation for Transformer"""

    def __init__(self, d_input_encoder: int, d_input_decoder: int, d_output_encoder: int, d_output_decoder: int,
                 d_output: int, d_hidden_encoder: int, d_hidden_decoder: int, n_head_encoder: int, n_head_decoder: int,
                 n_position_encoder: int, n_position_decoder) -> None:
        """
        :param d_input_encoder		: Input dimension of Encoder.
        :param d_input_decoder		: Input dimension of Decoder.
        :param d_output_encoder		: Output dimension of Encoder.
        :param d_output_decoder		: Output dimension of Decoder.
        :param d_output				: Final output dimension of Transformer.
        :param d_hidden_encoder		: Hidden dimension of linear layer in Encoder.
        :param d_hidden_decoder		: Hidden dimension of linear layer in Decoder.
        :param n_head_encoder=4		: Total number of heads in Encoder.
        :param n_head_decoder=4		: Total number of heads in Decoder.
        :param n_position_encoder	: Sequence Length of Encoder Input, e.g. max padding length of Chinese sentences.
        :param n_position_decoder	: Sequence Length of Encoder Input, e.g. max padding length of English sentences.
        """
        super(Transformer, self).__init__()
        self.encoder = Encoder(d_input=d_input_encoder, d_output=d_output_encoder, d_hidden=d_hidden_encoder,
                               n_head=n_head_encoder, n_position=n_position_encoder)
        self.decoder = Decoder(d_input=d_input_decoder, d_output=d_output_decoder, d_hidden=d_hidden_decoder,
                               n_head=n_head_decoder, n_position=n_position_decoder)
        self.linear = torch.nn.Linear(in_features=d_output_decoder, out_features=d_output)

    def forward(self, source, target) -> torch.FloatTensor:
        """
        See https://img-blog.csdnimg.cn/20210114103418782.png
        """
        encoder_output = self.encoder(source)
        decoder_output = self.decoder(encoder_output, target)
        x = self.linear(decoder_output)
        output = torch.nn.Softmax(dim=-1)(x)
        return output

    def size(self):
        size = sum([p.numel() for p in self.parameters()])
        print('%.2fKB' % (size * 4 / 1024))


if __name__ == '__main__':
    # -----------------------------------------------------------------
    # Positional Encoding
    # pe = PositionalEncoding(d_input=37, d_output=64, n_position=50)
    # x = torch.Tensor(50, 37)
    # print(pe(x).shape) # 1 50 64
    # x = torch.Tensor(128, 50, 37)
    # print(pe(x).shape) # 128 50 64
    # -----------------------------------------------------------------
    # Scaled Dot-Product Attention
    # sdpa = ScaledDotProductAttention()
    # q = torch.Tensor(128, 8, 25, 64)
    # k = torch.Tensor(128, 8, 50, 64)
    # v = torch.Tensor(128, 8, 50, 32)
    # mask = torch.Tensor(25, 50)
    # output, scores = sdpa(q, k, v, mask=mask)
    # print(scores.shape) # 128, 8, 25, 50
    # print(output.shape) # 128, 8, 25, 32
    # -----------------------------------------------------------------
    # Multi-Head Attention
    # mha = MultiHeadAttention(64, 64, 8)
    # q = torch.Tensor(128, 50, 64)
    # k = torch.Tensor(128, 50, 64)
    # v = torch.Tensor(128, 50, 64)
    # mask = torch.Tensor(50, 50)
    # output, scores = mha(q, k, v, mask)
    # print(scores.shape) # 128, 8, 50, 50
    # print(output.shape) # 128, 50, 64
    # -----------------------------------------------------------------
    # Transformer
    params = dict(
        d_input_encoder=37,
        d_input_decoder=12,
        d_output_encoder=64,
        d_output_decoder=64,
        d_output=12,
        d_hidden_encoder=128,
        d_hidden_decoder=128,
        n_head_encoder=4,
        n_head_decoder=4,
        n_position_encoder=10,
        n_position_decoder=7,
    )
    batch_size = 128
    model = Transformer(**params)
    model.size()
    batch_x = torch.randn(batch_size, params.get('n_position_encoder'), params.get('d_input_encoder'))
    batch_y = torch.randn(batch_size, params.get('n_position_decoder'), params.get('d_input_decoder'))
    pred = model(batch_x, batch_y)
    print(pred.shape)
    print(pred[0][0])
