"""自注意力机制定义： K = V = Q"""
import torch
import torch.nn as nn
from torch.autograd import Variable
import math
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt

class Embeddings(nn.Module):
    """详看 014 文件"""
    def __init__(self, d_model, vocab):
        super(Embeddings, self).__init__()
        self.lut = nn.Embedding(vocab, d_model)
        self.d_model = d_model

    def forward(self, x):
        return self.lut(x) * math.sqrt(self.d_model)

class PositionalEncoding(nn.Module):
    """详看 015 文件"""
    def __init__(self, d_model, dropout, max_len=5000):
        super(PositionalEncoding, self).__init__()
        self.dropout = nn.Dropout(p=dropout)
        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len).unsqueeze(1)
        """这只是一种初始化方式，作者是这么做的，知道就好，估计这么做效果比较好，当然换成别的初始化方式估计也是可以的，不要纠结懂不懂的问题"""
        div_term = torch.exp(torch.arange(0, d_model, 2) * -(math.log(10000.0) / d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0)
        self.register_buffer('pe', pe)

    def forward(self, x):
        x = x + Variable(self.pe[:, :x.size(1)],
                         requires_grad=False)
        return self.dropout(x)

"""采用转置矩阵相乘的方法构造注意力机制函数"""
def attention(query, key, value, mask=None, dropout=None):
    """详见 012_注意力机制2.py """
    d_k = query.size(-1)
    scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)

    if mask is not None:
        scores = scores.masked_fill(mask == 0, -1e9)
    p_attn = F.softmax(scores, dim = -1)

    if dropout is not None:
        # 将p_attn传入dropout对象中进行'丢弃'处理
        p_attn = dropout(p_attn)
    # 最后, 根据公式将p_attn与value张量相乘获得最终的query注意力表示, 同时返回注意力张量
    return torch.matmul(p_attn, value), p_attn


if __name__ == '__main__':
    """         3. 位置编码实例化              """
    # 词嵌入维度是512维
    d_model = 512
    # 词表大小是1000
    vocab = 1000  # self.lut.weight将会由1000行向量
    # 置0比率为0.1
    dropout = 0.1
    # 句子最大长度
    max_len = 60

    # 输入x是一个使用Variable封装的长整型张量, 形状是2 x 4
    x1 = Variable(torch.LongTensor([[100, 2, 421, 508], [491, 998, 1, 221]]))
    emb = Embeddings(d_model, vocab)
    embr = emb(x1)

    pe = PositionalEncoding(d_model, dropout, max_len)
    pe_result = pe(embr)
    # print("pe_result:", pe_result)

    mask = Variable(torch.zeros(2, 4, 4))
    query = key = value = pe_result
    # attn, p_attn = attention(query, key, value)
    attn, p_attn = attention(query, key, value, mask = mask)
    print(attn)
    print(attn.shape)
    print(p_attn)
    print(p_attn.shape)


