import matplotlib.pyplot as plt
import numpy as np
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
# torch中变量封装函数Variable.
from torch.autograd import Variable
from Tuatara_Transfromer.embedding import Embeddings

def attention(query, key, value, mask=None, dropout=None):
    """
    :param query:
    :param key:
    :param value:
    :param mask: 掩码张量
    :param dropout: dropout对象（是一个层，不是参数）
    :return:
    """

    print("")
    print("Attention Input: query")
    print(query)

    print("")
    print("Attention Input: key")
    print(key)

    print("")
    print("Attention Input: value")
    print(value)


    # query的最后⼀维等于词嵌⼊维度, 即模型维度
    d_k = query.size(-1)

    # step 1 计算注意力分数
    # 将key最后两个维度进⾏转置,
    # 3维矩阵， #1维度 是句子 #2维度是词 #3维度是句子嵌入（即模型维度）
    # transpose(0, 1) 是指将x y对调，即从 2行3列 变成 3行两列
    key_t = torch.transpose(key, -2, -1) # 将 #2维度 和 #3维度对调，不影响#1维度
    print("")
    print("Key Transpose")
    print(key_t)
    # 将query与key的转置相乘
    q_multiple_k_tran = torch.matmul(query, key_t)
    print("")
    print("Query X Key_Transpose")
    print(q_multiple_k_tran)
    # 除以根号下d_k, 进行缩放，防止模型维度太高，导致矩阵乘法获得的数值太大，不易训练
    scale_value = math.sqrt(d_k)
    print("")
    print("scale_value", scale_value)
    scores = q_multiple_k_tran / scale_value
    print("Query X Key (Similarity Scores)")
    print(scores)

    # Step2: 是否使⽤掩码张量（Transformer模型，编码器环节不需要mask，解码器环节需要使用mask）
    if mask is not None:
        # tensor自带masked_fill⽅法
        # scores矩阵：是原始数据矩阵
        # mask矩阵：用来控制遮掩的形状的矩阵，即mask矩阵中，遇到0则使用-1e9值来替换，
        scores = scores.masked_fill(mask == 0, -1e9)

    # Step3 计算注意力张量：
    # 对scores的最后⼀维进⾏softmax操作, 使⽤torch.softmax⽅法, 第⼀个参数是softmax对象, 第⼆个是⽬标维度.
    attention_weights = F.softmax(scores, dim = -1)
    print("")
    print("attention_weights")
    print(attention_weights)
    print(attention_weights.shape)

    # Step4 之后判断是否使⽤dropout进⾏随机置0 (是否需要进行失活处理)
    if dropout is not None:
        attention_weights = dropout(attention_weights)

    # Query x Key x Value 获得最后输出
    attention_output = torch.matmul(attention_weights, value)
    print("")
    print("attention_output")
    print(attention_output)
    print(attention_output.shape)
    #
    return attention_output, attention_weights


def test():

    # x = Variable(torch.LongTensor([[0,1,2,3],[6,7,8,9]]))
    # x = Variable(torch.LongTensor([[100,2,421,508],[491,998,1,221]]))
    x = Variable(torch.LongTensor([[0, 1, 2, 3], [6, 7, 8, 9]]))
    # x = Variable(torch.LongTensor([[0,1,2,3],[3,2,1,0]]))
    # print(x)

    d_model = 32
    vocab = 1000
    my_embeddings = Embeddings(d_model, vocab)
    embedded_x = my_embeddings(x)

    # 生成一个例子矩阵（自然数序列）
    x = torch.arange(12).reshape(2,3,2)
    x = x.to(dtype=torch.float)

    embedded_x = Variable(x)

    print("embedded_x")
    print(embedded_x)
    print(embedded_x.shape)

    # 自注意力机制就是三个矩阵都一样
    query = key = value = embedded_x

    #
    attention_mat, softemaxed_score = attention(query, key, value)

    #
    print("")
    print("attention_mat(Q x K x V)")
    print(attention_mat)
    print(attention_mat.shape)



def test_transpose():
    x = torch.arange(24).reshape(2,3,4)
    print(x)
    x_t = torch.transpose(x, 0, 1)
    print(x_t)


if __name__ == '__main__':
    test()
    # test_transpose()