import torch
import math
import torch.nn.functional as F
from input_part.test_positional import pe_result
from torch.autograd import Variable


def attention(query, key, value, mask=None, dropout=None):
    """注意力机制的实现, 输入分别是query, key, value, mask：掩码张量,
       dropout是nn.Dropout层的实例化对象, 默认为None"""
    # 在函数中, 首先取query的最后一维的大小, 一般情况下就等同于我们的词嵌入维度, 命名为d_k
    d_k = query.size(-1)
    # 按照注意力公式, 将query与key的转置相乘, 这里面key是将最后两个维度进行转置, 再除以缩放系数根号下d_k, 这种计算方法也称为缩放点积注意力计算.
    # 得到注意力得分张量scores
    scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)

    # 接着判断是否使用掩码张量
    if mask is not None:
        # 使用tensor的masked_fill方法, 将掩码张量和scores张量每个位置一一比较, 如果掩码张量处为0
        # 则对应的scores张量用-1e9这个值来替换, 如下演示
        scores = scores.masked_fill(mask == 0, -1e9)

    # 对scores的最后一维进行softmax操作, 使用F.softmax方法, 第一个参数是softmax对象, 第二个是目标维度.
    # 这样获得最终的注意力张量
    p_attn = F.softmax(scores, dim = -1)

    # 之后判断是否使用dropout进行随机置0
    if dropout is not None:
        # 将p_attn传入dropout对象中进行'丢弃'处理
        p_attn = dropout(p_attn)

    # 最后, 根据公式将p_attn与value张量相乘获得最终的query注意力表示, 同时返回注意力张量
    return torch.matmul(p_attn, value), p_attn


query = key = value = pe_result
print("pe_result:", pe_result)
'''
pe_result: tensor([[[ 29.7518, -15.3727,  -2.8602,  ..., -29.2424,  -7.0830,   0.0000],
         [ 11.5115,  19.4461,  -8.7785,  ...,  -6.1099, -55.0080,  -9.8379],
         [-48.8230, -16.4052,  -0.0000,  ...,  -4.5749,   0.0000,   0.0000],
         [-16.1348, -24.1537, -25.1769,  ..., -13.8004,  46.3363,  27.4483]],

        [[-33.6334,  -2.1790,   0.0000,  ...,   7.0321,  -6.0657,  24.3996],
         [  0.0000, -29.2411, -16.3258,  ...,   0.0000,  14.9724,  11.7952],
         [ -3.7538,  -4.1581,  -7.5715,  ...,   0.0000, -17.7000, -12.1912],
         [ 18.7931,   0.0000,  22.7199,  ...,  10.0451,   5.1121,  -4.3693]]],
       grad_fn=<MulBackward0>) size 2x4x512
'''

# 自注意力机制
attn, p_attn = attention(query, key, value)
print("attn:", attn)        #query注意力表示
print("p_attn:", p_attn)    #注意力张量

'''
attn: tensor([[[ 29.7518, -15.3727,  -2.8602,  ..., -29.2424,  -7.0830,   0.0000],
         [ 11.5115,  19.4461,  -8.7785,  ...,  -6.1099, -55.0080,  -9.8379],
         [-48.8230, -16.4052,   0.0000,  ...,  -4.5749,   0.0000,   0.0000],
         [-16.1348, -24.1537, -25.1769,  ..., -13.8004,  46.3363,  27.4483]],

        [[-33.6334,  -2.1790,   0.0000,  ...,   7.0321,  -6.0657,  24.3996],
         [  0.0000, -29.2411, -16.3258,  ...,   0.0000,  14.9724,  11.7952],
         [ -3.7538,  -4.1581,  -7.5715,  ...,   0.0000, -17.7000, -12.1912],
         [ 18.7931,   0.0000,  22.7199,  ...,  10.0451,   5.1121,  -4.3693]]],
       grad_fn=<UnsafeViewBackward0>)
p_attn: tensor([[[1., 0., 0., 0.],
         [0., 1., 0., 0.],
         [0., 0., 1., 0.],
         [0., 0., 0., 1.]],

        [[1., 0., 0., 0.],
         [0., 1., 0., 0.],
         [0., 0., 1., 0.],
         [0., 0., 0., 1.]]], grad_fn=<SoftmaxBackward0>)'''

# 令mask为一个2x4x4的零张量
mask = Variable(torch.zeros(2, 4, 4))
print("mask:", mask)
'''
mask: tensor([[[0., 0., 0., 0.],
         [0., 0., 0., 0.],
         [0., 0., 0., 0.],
         [0., 0., 0., 0.]],

        [[0., 0., 0., 0.],
         [0., 0., 0., 0.],
         [0., 0., 0., 0.],
         [0., 0., 0., 0.]]]) size 2x4x4 '''
# 带有mask的输入参数
attn, p_attn = attention(query, key, value, mask=mask)
print("attn:", attn)        #query注意力表示
print("p_attn:", p_attn)    #注意力张量

'''
attn: tensor([[[ -3.2952,  -4.0384,  -2.6232,  ...,  -9.8717,   2.2404,  10.5469],
         [ -3.2952,  -4.0384,  -2.6232,  ...,  -9.8717,   2.2404,  10.5469],
         [ -3.2952,  -4.0384,  -2.6232,  ...,  -9.8717,   2.2404,  10.5469],
         [ -3.2952,  -4.0384,  -2.6232,  ...,  -9.8717,   2.2404,  10.5469]],

        [[  1.0739, -12.6822,   3.4819,  ...,  -1.7687,  -3.7717,   9.7256],
         [  1.0739, -12.6822,   3.4819,  ...,  -1.7687,  -3.7717,   9.7256],
         [  1.0739, -12.6822,   3.4819,  ...,  -1.7687,  -3.7717,   9.7256],
         [  1.0739, -12.6822,   3.4819,  ...,  -1.7687,  -3.7717,   9.7256]]],
       grad_fn=<UnsafeViewBackward0> size 2x4x512)
p_attn: tensor([[[0.2500, 0.2500, 0.2500, 0.2500],
         [0.2500, 0.2500, 0.2500, 0.2500],
         [0.2500, 0.2500, 0.2500, 0.2500],
         [0.2500, 0.2500, 0.2500, 0.2500]],

        [[0.2500, 0.2500, 0.2500, 0.2500],
         [0.2500, 0.2500, 0.2500, 0.2500],
         [0.2500, 0.2500, 0.2500, 0.2500],
         [0.2500, 0.2500, 0.2500, 0.2500]]], grad_fn=<SoftmaxBackward0> size 2x4x4)
'''
