import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
from torch.autograd import Variable
from one_embedding import Embeddings
from two_position import PositionEmbeddings
import copy
'''

[1,1,1]
[1,1,1]
[1,1,1]

[1,0,0]
[1,1,0]
[1,1,1]

'''

# class Attention(nn.Module):
#     # 单头注意力机制
#     def __init__(self,dp=0):
#         super().__init__()
#         self.dropout = nn.Dropout(dp)

def attention(q:torch.Tensor,k:torch.Tensor,v:torch.Tensor,mask=None,dp=0):
    # q,k,v: [batch_size, seq_len, dim]
    dropout = nn.Dropout(dp)
    d_k = q.size(-1)
    res = torch.matmul(q,k.transpose(-2,-1))/math.sqrt(d_k) # (batch_size, seq_len, seq_len)
    if mask is not None:
        # mask必须和输入的序列长度一致 batch_size, seq_len,seq_len
        res = res.masked_fill(mask==0,-1e9)
    res = F.softmax(res,dim=-1)

    out = torch.matmul(res,v)
    out = dropout(out)
    return out,res

def clone_model(dim=512,n=4):
    li = nn.Linear(dim,dim)
    return nn.ModuleList([copy.deepcopy(li) for _ in range(4)])

class MultiAttention(nn.Module):
    def __init__(self,num_heads,dim,dp=0):
        '''
        num_heades: 多头注意力的多头个数  例如 512维度 num_heads=8 就分成8个头
        dim: 输入的维度
        '''
        super().__init__()
        self.num_heads = num_heads
        self.dim = dim
        self.dp = dp
        assert dim%num_heads==0,'dim must be divisible by num_heads'
        self.head_dim = dim//num_heads # 每个头 xx份，例如 8*64=512
        # self.linears = nn.ModuleList([nn.Linear(dim,dim) for _ in range(4)])
        self.linears = clone_model(512,4)
        
        
    def forward(self,q1,k1,v1,mask=None):
        batch_size = q1.size(0)
        # 线性变换  batch_size, seq_len, dim  -> batch_size, seq_len,num_heads, head_dim  -> batch_size, num_heads, seq_len, head_dim
        
        # for li,item in zip(self.linears,(q1,k1,v1)):
        #     print(li)
        #     print(item.shape)
        #     print(li(item).view(batch_size,-1,self.num_heads,self.head_dim).transpose(1,2).shape)
        #     # break
        
        q,k,v = [li(item).view(batch_size,-1,self.num_heads,self.head_dim).transpose(1,2) for li,item in zip(self.linears,(q1,k1,v1))] 
        # batch_size, num_heads, seq_len, head_dim 需要转换下
        out,p_atten = attention(q,k,v,mask,dp=self.dp)
        out:torch.Tensor
        out = out.transpose(1,2).contiguous().view(batch_size,-1,self.num_heads*self.head_dim)
        return self.linears[-1](out)







if __name__ == '__main__':
    # 单头注意力测试
    q=k=v=torch.rand(5,8,10,512)
    out,res = attention(q,k,v,mask=None)
    # print(out)
    print(res)
    print(res.size())
    
    
    quit()
    
    
    # 生成词向量
    embed = Embeddings(100,512)
    input = torch.randint(0,100,size=(5,10))
    output1 = embed(input)# (5,10,512)
    # 添加位置向量
    positionembed = PositionEmbeddings(dim=512)
    output = positionembed(output1)
    print(output.shape) # (5,10,512)
    
    q=k=v=output
    # mask=torch.ones(5,10,10)
    mask=torch.zeros(5,8,10,10) # (10,10) (5,8,10,10) (10,10) (1,1,10,10) (1,10,10) 都可以 只要是能和 (5,8,10,10) 能够进行广播机制都行
    mask = torch.triu(mask,diagonal=0)
    
    # # 注意力机制(单头 自注意力)  (5,10,512)
    # res,_ = attention(q,k,v,mask=None)
    # print(_.int())
    
    # 多头注意力 (5,10,512)
    multiattention = MultiAttention(8,512)
    
    for name ,v1 in multiattention.named_parameters():
        print(name,v1.shape)
    
    res = multiattention(q,k,v,mask=mask)
    print(res)
    print(res.size())