



from torch import nn
from torch import ones
from torch import zeros
from torch import Tensor
from torch import matmul
from torch import nn
from torch import triu
from torch import full
from copy import deepcopy
import math
import parameters as p


def clones(layer, N=1) -> nn.ModuleList:
    return nn.ModuleList([deepcopy(layer) for _ in range(N)])


class LayerNorm(nn.Module):
    def __init__(self, features, eps=1e-6, *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)

        self.alpha = nn.Parameter(ones(features))
        self.beta = nn.Parameter(zeros(features))
        self.eps = eps

    def forward(self, x: Tensor):
        mean = x.mean(-1, keepdim=True)
        std = x.std(-1, keepdim=True)

        return self.alpha * (x - mean) / (std + self.eps) + self.beta

 
def get_subsequent_mask(size: int) -> Tensor:
    '''
    mask out susequent positions.
    the returned mask with shape [1,size,size] will be used to keep the values of a lower triangle region
    and mask out the rest region .
    '''
    return  triu(full([size,size],fill_value=True),diagonal=1) == True
    
def get_key_padding_mask(tokens:Tensor):
    return (tokens == p.PAD_INDEX).unsqueeze(-2)

def get_key_padding_mask_with_subsequent_mask(tokens:Tensor):

    '''
    input shape : [b,token_indices]
    '''
    return get_subsequent_mask(tokens.size(-1)) | get_key_padding_mask(tokens)






# Skinkhorn Sorting Network https://arxiv.org/abs/2002.11296
# Learnable patterns for attention computing.

# Routing Transformer 
# Efficient Content-Based Sparse Attention with Routing Transformers
# https://arxiv.org/pdf/2003.05997.pdf
# cluster attention.

# Reformer 
# https://openreview.net/attachment?id=rkgNKkHtvB&name=original_pdf
# LSA attention

# Low rank repersentation of K.
# 1. compressed attention https://www.arxiv.org/abs/1801.10198
# 2. Linformer https://www.arxiv.org/abs/2006.04768


# k,q dot first -> v,k dot first
# Linear Transformer  https://arxiv.org/abs/2210.10340
#


# Swin Transformer
# Window MSA
# Shifted Window MSA






def scaledDotProductAttention(Q: Tensor, K: Tensor, V: Tensor, mask=None, dropout: nn.Module = None):
    '''
        Q of shape [batch,token,dk] or [b,token,heads,dk]\n
        K of shape [batch,token,dk] or [b,token,heads,dk]\n
        V of shape [batch,token,dv] or [b,token,heads,dv]\n
    returns attention scores and the output of softmax.
    '''

    K_Transpose = K.transpose(-1, -2)
    d_k = Q.size(-1)

    scores = matmul(Q, K_Transpose) / math.sqrt(d_k)  # token x token

    if mask is not None:
        scores.masked_fill_(mask, float('-inf'))

    p_attn = nn.functional.softmax(scores, -1)

    if dropout is not None:
        p_attn = dropout(p_attn)

    return matmul(p_attn, V)  # token x d_v

def multiheadAttention():
    pass


def local_attention(Q: Tensor, K: Tensor, V: Tensor, mask=None, dropout: nn.Module = None):
    # also called truncated_attention
    pass

def stride_attention(Q: Tensor, K: Tensor, V: Tensor, mask=None, dropout: nn.Module = None):
    pass

def global_attention(Q: Tensor, K: Tensor, V: Tensor, mask=None, dropout: nn.Module = None):
    pass

def random_attention(Q: Tensor, K: Tensor, V: Tensor, mask=None, dropout: nn.Module = None):
    pass

def clustering_attention(Q: Tensor, K: Tensor, V: Tensor, mask=None, dropout: nn.Module = None):
    pass

def sparse_attention(Q: Tensor, K: Tensor, V: Tensor, mask=None, dropout: nn.Module = None):
    pass

def windowAttention():
    pass
def shiftedWindowAttention():
    pass


if __name__ == '__main__':
    from torch import tensor
    # print(get_subsequent_mask(6))
    tokens = tensor([[1,3,4,2,2,2],[1,1,4,4,4,2]])
    print(get_key_padding_mask(tokens))

