import torch 
import torch.nn as nn 
import numpy as np

# class SelfAttention(nn.Module):
#     def __init__(self):
#         super(SelfAttention,self).__init__()

#     def forward(self,Q,K,V,mask):
#         d_k=Q.size(-1)
#         attn=torch.matmul(Q,K.transpose(-1,-2))/np.sqrt(d_k)
#         attn.masked_fill_(mask,1e-9)
#         attn=nn.Softmax(dim=-1)(attn)
#         out = torch.matmul(attn,V)
#         return out 
    
# class MultiheadAttention(nn.Module):
#     def __init__(self,d_model,n_heads=4):
#         super(MultiheadAttention,self).__init__()
#         self.n_heads=n_heads
#         self.d_model=d_model
#         self.W_Q = nn.Linear(d_model,d_model)
#         self.W_K = nn.Linear(d_model,d_model)
#         self.W_V = nn.Linear(d_model,d_model)
#         self.W_cat = nn.Linear(d_model,d_model)

#     def forward(self,input_Q,input_K,input_V,mask):
#         bs=input_Q.size(0)
#         res=input_Q
#         Q=self.W_Q(input_Q).view(bs,-1,self.n_heads,self.d_model//self.n_heads).transpose(1,2)
#         K=self.W_K(input_K).view(bs,-1,self.n_heads,self.d_model//self.n_heads).transpose(1,2)
#         V=self.W_V(input_V).view(bs,-1,self.n_heads,self.d_model//self.n_heads).transpose(1,2)
#         mask=mask.unsqueeze(1).repeat(1,self.n_heads,1,1)
#         out = SelfAttention()(Q,K,V,mask)
#         out = torch.cat([out[:,i,:,:] for i in range(self.n_heads)],dim=-1)
#         out = self.W_cat(out)
#         out = out+res 
#         out= nn.LayerNorm(self.d_model)(out)
#         return out 
    
# class PosEncoding(nn.Module):
#     def __init__(self,d_model,max_len,drop_out=0.01):
#         super(PosEncoding,self).__init__()
#         self.dropout=nn.Dropout(drop_out)
#         pos= torch.arange(0,max_len,dtype=torch.float).unsqueeze(1)
#         pe=torch.zeros(max_len,d_model)
#         div_term=pos/pow(10000,torch.arange(0,d_model,2,dtype=torch.float)/d_model)
#         pe[:,0::2]=torch.sin(div_term)
#         pe[:,1::2]=torch.cos(div_term)
#         self.register_buffer('pe',pe.unsqueeze(0))
    
#     def forward(self,x):
#         out = x+self.pe[:,:x.size(1),:]
#         return self.dropout(out)
    

class SelfAttention(nn.Module):
    def __init__(self):
        super(SelfAttention,self).__init__()

    def forward(self,Q,K,V,mask):
        d_k = Q.size(-1)
        attn = torch.matmul(Q,K.transpose(-1,-2))/np.math.sqrt(d_k)
        attn.masked_fill_(mask,1e-9)
        out = torch.matmul(nn.Softmax(dim=-1)(attn),V)
        return out 


class MultiheadAttention(nn.Module):
    def __init__(self,d_model,n_heads):
        super(MultiheadAttention,self).__init__()
        self.d_model = d_model 
        self.n_heads = n_heads 
        self.W_Q = nn.Linear(d_model,d_model)
        self.W_K = nn.Linear(d_model,d_model)
        self.W_V = nn.Linear(d_model,d_model)
        self.W_cat = nn.Linear(d_model,d_model)

    def forward(self,input_Q,input_K,input_V,mask):
        bs = input_Q.size(0)
        res = input_Q
        Q = self.W_Q(input_Q).reshape(bs,-1,self.n_heads,self.d_model//self.n_heads).transpose(1,2)
        K = self.W_K(input_K).reshape(bs,-1,self.n_heads,self.d_model//self.n_heads).transpose(1,2)
        V = self.W_V(input_V).reshape(bs,-1,self.n_heads,self.d_model//self.n_heads).transpose(1,2)
        mask = mask.unsqueeze(1).repeat(1,self.n_heads,1,1)
        out = SelfAttention()(Q,K,V,mask)
        out = torch.cat([out[:,i,:,:] for i in range(self.n_heads)],-1)
        out = nn.LayerNorm(self.d_model)(self.W_cat(out) + res)
        return out
    

class PosEncoding(nn.Module):
    def __init__(self,d_model,max_len,drop_out):
        super(PosEncoding,self).__init__()
        pe = torch.zeros(max_len,d_model)
        self.dropout = nn.Dropout(drop_out)
        pos = torch.arange(0,max_len,dtype=torch.float).unsqueeze(1)
        div_term = pos/pow(10000,torch.arange(0,d_model,2,dtype=torch.float)/d_model)
        pe[:,0::2] = torch.sin(div_term)
        pe[:,1::2] = torch.cos(div_term)
        self.register_buffer('pe',pe.unsqueeze(0)) #**

    def forward(self,x):
        return x + self.pe[:,:x.size(1),:]


if __name__=="__main__":
    input_Q = torch.randn(32,3,16)
    input_K = torch.randn(32,4,16)
    input_V = torch.randn(32,4,16)
    mask = torch.zeros(32,3,4,dtype=bool)
    model = MultiheadAttention(d_model=16,n_heads=4)
    out = model(input_Q,input_K,input_V,mask)
    print(out.shape)

    x=torch.randn(32,50,8)
    pe=PosEncoding(d_model=8,max_len=128,drop_out=0.01)
    out=pe(x)
    print(out.shape)



    


