from typing import Optional, List
from torch import Tensor
import torch.nn as nn
import torch
import copy
from torch.nn import MultiheadAttention
from torch.nn import Transformer
import math


class CustomizedTransformerEncoderLayer(nn.Module):
    __constants__ = ["batch_first", "norm_first"]

    def __init__(
        self,
        d_model: int,
        nhead: int,
        dim_feedforward: int = 2048,
        dropout: float = 0.1,
        activation=nn.LeakyReLU,
        norm_eps: float = 1e-5,
        batch_first: bool = True,
        norm_first: bool = False,
    ) -> None:
        super(CustomizedTransformerEncoderLayer, self).__init__()
        self.self_attn = MultiheadAttention(
            d_model, nhead, dropout=dropout, batch_first=batch_first
        )

        # Implementation of K, Q, V feature pre extraction.
        self.k_embedding = nn.Sequential(
            nn.Linear(d_model, d_model),
            nn.LayerNorm(d_model, eps=norm_eps),
            activation(),
            nn.Linear(d_model, d_model),
            nn.LayerNorm(d_model, eps=norm_eps),
            activation(),
            nn.Linear(d_model, d_model),
            nn.LayerNorm(d_model, eps=norm_eps),
            activation(),
        )
        self.q_embedding = copy.deepcopy(self.k_embedding)
        self.v_embedding = copy.deepcopy(self.k_embedding)

        # Implementation of Feedforward model
        self.linear1 = nn.Linear(d_model, dim_feedforward)
        self.dropout2 = nn.Dropout(dropout)
        self.linear2 = nn.Linear(dim_feedforward, d_model)

        self.norm_first = norm_first
        self.norm1 = nn.LayerNorm(d_model, eps=norm_eps)
        self.norm3 = nn.LayerNorm(d_model, eps=norm_eps)
        self.dropout1 = nn.Dropout(dropout)
        self.dropout3 = nn.Dropout(dropout)

        self.activation = activation()

    def forward(
        self,
        src: Tensor,
        src_mask: Optional[Tensor] = None,
        src_key_padding_mask: Optional[Tensor] = None,
    ) -> Tensor:
        r"""Pass the inputs (and mask) through the decoder layer.

        Args:
            src: the sequence to the decoder layer (required).
            memory: the sequence from the last layer of the encoder (required).
            src_mask: the mask for the tgt sequence (optional).
            memory_mask: the mask for the memory sequence (optional).
            src_key_padding_mask: the mask for the tgt keys per batch (optional).
            memory_key_padding_mask: the mask for the memory keys per batch (optional).

        Shape:
            see the docs in Transformer class.
        """
        # see Fig. 1 of https://arxiv.org/pdf/2002.04745v1.pdf

        x = src  # N, T, C

        if self.norm_first:
            x = x + self._sa_block(self.norm1(x), src_mask, src_key_padding_mask)
            x = x + self._ff_block(self.norm3(x))

        else:
            x = self.norm1(x + self._sa_block(x, src_mask, src_key_padding_mask))
            x = self.norm3(x + self._ff_block(x))

        return x

    # self-attention block
    def _sa_block(
        self, x: Tensor, attn_mask: Optional[Tensor], key_padding_mask: Optional[Tensor]
    ) -> Tensor:
        k = self.k_embedding(x)
        q = self.q_embedding(x)
        v = self.v_embedding(x)
        x, _ = self.self_attn.forward(
            q,
            k,
            v,
            attn_mask=attn_mask,
            key_padding_mask=key_padding_mask,
            need_weights=False,
        )
        return self.dropout1(x)

    # feed forward block
    def _ff_block(self, x: Tensor) -> Tensor:
        x = self.linear2(self.dropout2(self.activation(self.linear1(x))))
        return self.dropout3(x)


class CustomizedTransformer(nn.Module):
    def __init__(
        self,
        input_chn,
        output_chn,
        d_model,
        num_layers,
        n_heads,
        is_causal=True,
        batch_first=True,
    ):
        super(CustomizedTransformer, self).__init__()
        self.input_linear = nn.Sequential(*[nn.Linear(input_chn, d_model)])
        self.is_causal = is_causal

        self.backbone: List[CustomizedTransformerEncoderLayer] = nn.Sequential(
            *[
                CustomizedTransformerEncoderLayer(
                    d_model,
                    n_heads,
                    batch_first=batch_first,
                    dim_feedforward=1024,
                    norm_first=True,
                )
                for _ in range(num_layers)
            ]
        )
        # layer_norm_eps: float = 1e-5
        self.head = nn.Sequential(
            *[
                nn.Linear(d_model, d_model // 2),
                nn.LayerNorm(d_model // 2),
                nn.LeakyReLU(),
                nn.Linear(d_model // 2, d_model // 2),
                nn.LayerNorm(d_model // 2),
                nn.LeakyReLU(),
                nn.Linear(d_model // 2, output_chn),
            ]
        )
        self.pos_encoding = PositionalEncoding(d_model)

    def forward(self, src: torch.FloatTensor, src_padding_mask: torch.BoolTensor):
        """_summary_

        Parameters
        ----------
        x : _type_
            (N, T, C)

        Returns
        -------
        _type_
            _description_
        """
        src = self.input_linear(src)
        src = self.pos_encoding(src)
        if self.is_causal:
            tgt_mask = Transformer.generate_square_subsequent_mask(
                src.shape[1], device=src.device
            )
        else:
            tgt_mask = torch.zeros(
                (src.size(1), src.size(1)), dtype=torch.float32, device=src.device
            )

        for layer in self.backbone:
            src = layer.forward(src, tgt_mask, ~src_padding_mask)
            src = src.masked_fill(torch.isnan(src), 0)

        src = self.head(src)
        return src


class PositionalEncoding(nn.Module):
    def __init__(self, d_model: int, dropout: float = 0.1, max_len: int = 50):
        super().__init__()
        self.dropout = nn.Dropout(p=dropout)

        position = torch.arange(max_len).unsqueeze(1)  # (1, max_len)
        div_term = torch.exp(
            torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model)
        )  # (1, d_model // 2)
        pe = torch.zeros(1, max_len, d_model)
        pe[0, :, 0::2] = torch.sin(position * div_term)
        pe[0, :, 1::2] = torch.cos(position * div_term)
        self.register_buffer("pe", pe)

    def forward(self, x: Tensor) -> Tensor:
        """
        Arguments:
            x: Tensor, shape ``[batch_size, seq_len, in_chn]``
        """
        x = x + self.pe[:, : x.size(1)]
        return self.dropout(x)


if __name__ == "__main__":
    device = torch.device(1)
    batch_size = 10
    time_len = 8
    d_model = 512
    num_head = 8
    chn_in = 10
    chn_out = 3

    test_model = CustomizedTransformer(chn_in, chn_out, 512, 5, 8, True, True).to(
        device
    )

    test_input = torch.randn((batch_size, time_len, chn_in)).to(device)
    test_input_mask = (torch.randn((batch_size, time_len)) > 0).to(device)

    test_model.train()
    output = test_model.forward(test_input, test_input_mask)
    print()
