############################################################
'''
The implementation of proposed method of [Attention is All you need].arXiv 1706.03762
The working staff : chingching.
This not include the training code. 
Reference : http://nlp.seas.harvard.edu/2018/04/03/attention.html
'''


############################################################

import math
import torch.nn as nn
import torch.nn.functional as F
from copy import deepcopy
from torch import zeros, sqrt, arange, exp, sin, cos
from torch import Tensor
from torch import tensor
from torch.nn import Module
from torch.autograd import Variable
from utils import LayerNorm
from utils import clones
from utils import scaledDotProductAttention
from einops import rearrange
from parameters import SPECIAL_TOKENS
from typing import Any, Callable, Union, Dict


# [batch_size, in_seq_len,d_model]
# MultiHead [batch_size, in_seq_len,d_model / heads]

# I utilized some technics
#   1.Pre-norm for all residual connections to accelerate training.
#   2.Optional for DeepNorm.


class ResidualConnection(nn.Module):
    '''
    To facilitate these residual connections, all sub-layers in the model, as well as the embedding layers,
    produce outputs of dimension d_model=512.This class only matains a LayerNorm and a dropout.
    '''

    def __init__(self, 
                 features: int, 
                 dropout: float, 
                 norm_method: str = 'deep_norm', 
                 alpha: Union[None, int, float] = 2., *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)

        if norm_method == 'deep_norm':
            if alpha is None:
                raise "alpha must specified as a number when deep_norm is activated."
            if alpha < 1:
                raise "The alpha is set to less than one,which will cause something unpleasure for your model."

            def opt(x, sublayer):
                return self.dropout(self.norm(x*self.alpha + sublayer(x)))

        if norm_method == 'pre_norm':
            def opt(x, sublayer):
                return x + self.dropout(sublayer(self.norm(x)))
        if norm_method == 'post_norm':
            def opt(x, sublayer):
                return self.norm(x + self.dropout(sublayer(x)))

        self.opt = opt
        self.norm = LayerNorm(features)
        self.dropout = nn.Dropout(dropout)
        self.norm_method = norm_method
        self.alpha = alpha

    def forward(self, x, sublayer) -> Tensor:
        """
        apply residual connection to any sublayer with the same size
        """
        return self.opt(x, sublayer)


class ScaledDotProductAttention(nn.Module):
    '''
    This class only mantains a dropout.
    '''

    def __init__(self, dropout=0., *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)

        self.drop_out_layer = nn.Dropout(dropout)

    def forward(self, Q, K, V, mask=None):
        '''
        Q of shape [b,n,d_k] or [B,h,token,d_k]\n
        K of shape [b,m,d_k] or [b,h,token,d_k]\n
        V of shape [b,m,d_v] or [b,h,token,d_v]
        '''
        return scaledDotProductAttention(Q, K, V, mask=mask, dropout=self.drop_out_layer)


class MultiHeadAttention(nn.Module):
    def __init__(self, heads: int, d_model: int, dropout: float = 0.1, *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)

        if not d_model % heads == 0:
            raise f"d_model must be a multiple of heads. recived heads:{heads} d_model :{d_model}"

        self.heads = heads
        self.linears = clones(nn.Linear(d_model, d_model), 4)
        self.attention = None
        self.dropout = nn.Dropout(dropout)

    def forward(self, Q, K, V, mask: Tensor = None):

        # Q of shape [b,token,d_model]
        # K of shape [b,token,d_model]
        # V of shape [b,token,d_model]

        # mask of shape [1,size,size]
        if mask is not None:
            # Same mask applied to all h heads.
            mask = mask.unsqueeze(1)
            # this will be the tensor of shape [1,1,size,size]
            # which will be boradcasted to shape [1,h,size,size]
            # when masking.

        Q, K, V = [rearrange(linear(x), 'b token (heads d_k) -> b heads token d_k',
                             heads=self.heads) for linear, x in zip(self.linears, (Q, K, V))]
        # [b,token,d_model]-> [b,token,heads=8,64]-> [b,heads=8,token,64] for each
        x, self.attention = scaledDotProductAttention(
            Q, K, V, mask, self.dropout)
        # x of shape [b,heads,token,d_v]

        x = rearrange(x, 'b heads token d_v -> b token (heads d_v)')

        return self.linears[-1](x)


class PositionWiseFeedForward(nn.Module):
    def __init__(self, d_model: int, d_ff: int, dropout: float = 0.1, *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)

        self.w_1 = nn.Linear(d_model, d_ff)
        self.w_2 = nn.Linear(d_ff, d_model)
        self.dropout = nn.Dropout(dropout)

    def forward(self, x):
        return self.w_2(self.dropout(F.relu(self.w_1(x))))


class Embeddings(nn.Module):
    """
    Leadned emmbeddings to convert the input tokens and out put tokens\n
    to vectors of dimension d_model and then multiply those outputs by\n
    sqrt(d_model)

    """

    def __init__(self, d_model: int, vocab: int, *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)

        self.lut = nn.Embedding(
            vocab, d_model, padding_idx=SPECIAL_TOKENS['<pad>'])
        self.d_model = tensor(d_model)

    def forward(self, x):
        return self.lut(x) * sqrt(self.d_model)


class PositionalEncoding(nn.Module):
    r"""
        input a tensor of shape
                [B,tokens,d_model]
        when calling this Module.
        Notice, the number of tokens should not bigger than max_len the parameter when creating
        Module.
    """

    def __init__(self, d_model: int, dropout: float, max_len: int = 5000, *args, **kwargs) -> None:

        super().__init__(*args, **kwargs)

        self.dropout = nn.Dropout(dropout)

        pe = zeros(max_len, d_model)
        # To create Postion infomation matrix which is constant given token_postion and
        # feature_position,what we need are only two postions which are predictable without
        # giving any data.
        # The strategy is , we precreate large enough pe matrix so that it could be split
        # to any length and used to any given data.
        token_positon = arange(0, max_len).unsqueeze(1)  # [max_len,1]
        #
        feature_position = arange(0, d_model, 2)  # [d_model / 2]
        # feature_positon is the half of whole features
        # this is because f_postion index is both used in odd positons and mean postions
        # computation,both takes half of the postions of total.
        div_term = exp(
            feature_position *
            -(math.log(10_000.0)/d_model)
        )
        #
        #
        pe[:, 0::2] = sin(token_positon * div_term)
        pe[:, 1::2] = cos(token_positon * div_term)
        #
        pe = pe.unsqueeze(0)
        self.register_buffer('pe', pe)
        # we maintain this pe matrix in this class.
        #
        #

    def forward(self, x):
        # 3 dimension tensor
        x = x + Variable(self.pe[:, :x.size(1)], requires_grad=False)

        return self.dropout(x)

# Reusage


class Generator(nn.Module): 
    """Define standard linear + softmax generation step."""

    def __init__(self, d_model: int, vocab: int, *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)

        self.proj = nn.Linear(d_model, vocab)

    def forward(self, x):
        return F.log_softmax(self.proj(x), -1)


class Encoder(nn.Module):

    """Core encoder is a stack of N layers"""

    def __init__(self, layer, N, *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)

        self.layers = clones(layer, N)
        self.norm = LayerNorm(layer.size)

    def forward(self, x, mask):
        for layer in self.layers:
            x = layer(x, mask)

        return self.norm(x)


class Decoder(nn.Module):

    """
     Generic N layer decoder with masking.
    """

    def __init__(self, layer, N, *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)

        self.layers = clones(layer, N)
        self.norm = LayerNorm(layer.size)

    def forward(self, x, memory, memory_mask, tar_mask):
        for layer in self.layers:
            x = layer(x, memory, memory_mask, tar_mask)
        return self.norm(x)


class EncoderLayer(nn.Module):
    def __init__(self,
                 d_model: int, dropout: float,
                 self_Attention: nn.Module, Feed_Forward_Network: nn.Module,
                 norm_method: str = 'deep_norm',
                 deep_norm_alpha: Union[int, float, None] = 2.,

                 *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)

        self.self_Attention = self_Attention
        self.Feed_Forward_Network = Feed_Forward_Network

        self.sublayer = clones(ResidualConnection(
            d_model, dropout, norm_method=norm_method, alpha=deep_norm_alpha), 2)

        self.d_model = d_model

    def forward(self, x, mask):

        residual_Connection_1 = self.sublayer[0]
        residual_Connection_2 = self.sublayer[1]

        x = residual_Connection_1(
            x, lambda x: self.self_Attention(x, x, x, mask))

        return residual_Connection_2(x, self.Feed_Forward_Network)


class DecoderLayer(nn.Module):
    """
    "Decoder is made of self-attn, src-attn, and feed forward (defined below)"

    """

    def __init__(self,
                 d_model: int, dropout: float,
                 self_Attention: Union[Callable, nn.Module],
                 cross_Attention: Union[Callable, nn.Module, None],
                 Feed_Forward_Network: nn.Module,
                 use_decoder_only: bool = False,
                 norm_method: str = 'deep_norm',
                 deep_norm_alpha: Union[int, float, None] = 2.,
                 * args, **kwargs) -> None:
        super().__init__(*args, **kwargs)

        self.size = d_model
        self.self_Attention = self_Attention

        if not use_decoder_only:
            self.cross_Attention = cross_Attention
            self.sublayer = clones(ResidualConnection(d_model, dropout), 3)
        else:
            self.sublayer = clones(ResidualConnection(d_model, dropout), 2)

        self.Feed_Forward_Network = Feed_Forward_Network
        self.__use_decoder_only = use_decoder_only

        self.__d_model = d_model
        self.__dropout = dropout
        self.__norm_method = norm_method
        self.__deep_norm_alpha = deep_norm_alpha

    @property
    def use_decoder_only(self):
        return self.__use_decoder_only

    @use_decoder_only.setter
    def use_decoder_only(self, value: bool):
        if self._use_decoder_only == value:
            return
        else:
            if value == True:
                self.sublayer.pop(-1)
            else:
                self.sublayer.append(ResidualConnection(
                    self.__d_model, self.__dropout, norm_method=self.__norm_method, alpha=self.__deep_norm_alpha))
            self._use_decoder_only = value

    def forward(self, x, tar_mask, memory: Union[None, Tensor] = None,  memory_mask: Union[None, Tensor] = None):

        # sublayer[0] self_attn_res_block
        # sublayer[1] ffn_res_block
        # sublayer[2] cross_attn_res_block ,if we have cross_attn.

        x = self.sublayer[0](
            x, lambda x: self.self_Attention(x, x, x, tar_mask))

        if len(self.sublayer) == 3:
            if memory is None:
                raise f"Transformer Decoder Layer recived None as Memory while property use_decoder_only was setted to False"
            m = memory
            x = self.sublayer[2](
                x, lambda x: self.cross_Attention(x, m, m, memory_mask))

        return self.sublayer[1](x, self.Feed_Forward_Network)


class EncoderDecoder(nn.Module):
    def __init__(self, encoder: Encoder, decoder: Decoder, generator: Generator, src_embd: Module, tar_embd: Module,  *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)
        self.encoder = encoder
        self.decoder = decoder

        self.src_embd = src_embd
        self.tar_embd = tar_embd

        self.generator = generator

    def forward(self, src, target, memory_mask=None, target_mask=None):
        memory = self.encode(src, memory_mask)
        o = self.decode(memory, memory_mask, target, target_mask)
        return self.generator(o)

    def encode(self, src, memory_mask):
        src_embded = self.src_embd(src)
        return self.encoder(src_embded, memory_mask)

    def decode(self, memory, memory_mask, target, tar_mask):
        tar_embded = self.tar_embd(target)
        return self.decoder(tar_embded, memory, memory_mask, tar_mask)

# It's not recommanded to use this function to create model.
# It's an earlier manual craft that produces model with less flexibility on module combinations.
# Recommanded: Transformer TransformerDecoderLayer TransformerEncoderLayer


def make_model(encoder_layers: int = 6,
               decoder_layers: int = 6,
               d_model: int = 512,
               d_ff: int = 2048,
               h: int = 8,
               dropout: float = 0.1,
               src_vocab: int | None = None,
               tgt_vocab: int | None = None,):
    """
    A function that takes all hyperparameters and produces a full model;
    Notice : d_model must be a multiple of heads
    """
    multi_head_attention = MultiHeadAttention(
        heads=h, d_model=d_model, dropout=dropout)
    ffn = PositionWiseFeedForward(d_model=d_model, d_ff=d_ff, dropout=dropout)
    positon_encoding = PositionalEncoding(d_model=d_model, dropout=dropout)

    if isinstance(src_vocab, int) and isinstance(tgt_vocab, int):

        src_embd = nn.Sequential(Embeddings(d_model=d_model, vocab=src_vocab),
                                 deepcopy(positon_encoding))
        tar_embd = nn.Sequential(Embeddings(d_model=d_model, vocab=tgt_vocab),
                                 deepcopy(positon_encoding))
        generator = Generator(d_model=d_model, vocab=tgt_vocab)
    else:
        src_embd = nn.Identity()
        tar_embd = nn.Identity()
        generator = nn.Identity()

    model = EncoderDecoder(
        encoder=Encoder(
            EncoderLayer(d_model=d_model, dropout=dropout,
                         self_Attention=deepcopy(multi_head_attention),
                         Feed_Forward_Network=deepcopy(ffn)), N=encoder_layers),

        decoder=Decoder(
            DecoderLayer(d_model=d_model, dropout=dropout,
                         self_Attention=deepcopy(multi_head_attention),
                         cross_Attention=deepcopy(multi_head_attention),
                         Feed_Forward_Network=deepcopy(ffn)),
            N=decoder_layers),

        src_embd=src_embd,
        tar_embd=tar_embd,
        generator=generator
    )
    for p in model.parameters():
        if p.dim() > 1:
            nn.init.xavier_uniform_(p)
    return model


# interface


class TransformerEncoderLayer(nn.Module):
    def __init__(self, d_model: int, d_ff: int, heads: int,
                 norm_method: str = 'deep_norm',
                 deep_norm_alpha: Union[int, float, None] = 2.,
                 FFN_dropout: float = 0.1,
                 self_attn_dropout: float = 0.1,
                 residual_dropout: float = 0.1,
                 init_params: bool = True,
                 *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)

        assert FFN_dropout < 1.
        assert self_attn_dropout < 1.
        assert residual_dropout < 1.
        assert d_model % heads == 0

        ffn = PositionWiseFeedForward(
            d_model=d_model, d_ff=d_ff, dropout=FFN_dropout)

        attn = MultiHeadAttention(
            heads=heads, d_model=d_model, dropout=self_attn_dropout)

        self.block = EncoderLayer(norm_method=norm_method, deep_norm_alpha=deep_norm_alpha,
                                  d_model=d_model, dropout=residual_dropout, self_Attention=attn, Feed_Forward_Network=ffn)
        
        
        if init_params:
            for p in self.block.parameters():
                if p.dim() > 1:
                    nn.init.xavier_uniform_(p)

    def forward(self, x:Tensor, mask:Tensor):

        # TODO Validation of inputs




        

        return self.block(x, mask)
    

class TransformerDecoderLayer(nn.Module):
    def __init__(self,  d_model: int, heads: int, d_ff: int,
                 norm_method: str = 'deep_norm',
                 deep_norm_alpha: Union[int, float, None] = 2.,
                 use_decoder_only: bool = False,
                 FFN_dropout: float = 0.1,
                 self_attn_dropout: float = 0.1,
                 residual_dropout: float = 0.1,
                 cross_attn_dropout: Union[None, float] = 0.1,
                 init_params: bool = True,
                 *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)

        assert FFN_dropout < 1.
        assert cross_attn_dropout < 1.
        assert self_attn_dropout < 1.
        assert residual_dropout < 1.
        assert d_model % heads == 0

        ffn = PositionWiseFeedForward(
            d_model=d_model, d_ff=d_ff, dropout=FFN_dropout)

        attn = MultiHeadAttention(
            heads=heads, d_model=d_model, dropout=self_attn_dropout)

        cross_attn = MultiHeadAttention(
            heads=heads, d_model=d_model, dropout=cross_attn_dropout)

        self.block = DecoderLayer(norm_method=norm_method, deep_norm_alpha=deep_norm_alpha,
                                  d_model=d_model, dropout=residual_dropout,
                                  self_Attention=attn,
                                  cross_Attention=cross_attn,
                                  use_decoder_only=use_decoder_only,
                                  Feed_Forward_Network=ffn)

        if init_params:
            for p in self.block.parameters():
                if p.dim() > 1:
                    nn.init.xavier_uniform_(p)

    def forward(self, x:Tensor, mask:Tensor, memory: Tensor | None = None, memory_mask: Tensor | None = None):
        return self.block(x, mask, memory, memory_mask)

    @property
    def use_decoder_only(self):
        return self.block.use_decoder_only

    @use_decoder_only.setter
    def use_decoder_only(self, value: bool):
        self.block.use_decoder_only = value


class TransformerEncoder(nn.Module):
    def __init__(self, *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)


class TransformerDecoder(nn.Module):
    def __init__(self, *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)


class Transformer(nn.Module):
    def __init__(self, *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)

    def forward(self,):
        pass


if __name__ == "__main__":
    from torch import randn
    from utils import get_subsequent_mask
    # temp_model = make_model(10, 10, 2)
    # y = temp_model(randn([4, 10, 512]))
    b = DecoderLayer(d_model=64, dropout=0.1,
                     self_Attention=scaledDotProductAttention,
                     cross_Attention=scaledDotProductAttention,
                     Feed_Forward_Network=PositionWiseFeedForward(64, 128, 0.1), use_decoder_only=False)

    print(len(b.sublayer))
    ls = b.sublayer[0]
    b.use_decoder_only = True
    print(len(b.sublayer))
    assert ls == b.sublayer[0]
    # b.use_decoder_only = False
    # print(len(b.sublayer))
    # assert ls == b.sublayer[0]
    x = randn([4, 8, 64])
    msk = get_subsequent_mask(8)
    print(msk.shape)
    y = b(x, msk)
    # block = TransformerEncoderLayer()
