import torch

from config import *

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


# 位置编码
def Positional(x, em_add_pe=True):
    PE = torch.zeros([Max_len, d_model], dtype=torch.float32)
    pos = torch.arange(0, Max_len, dtype=torch.float32).unsqueeze(1)
    i2 = torch.div(1, torch.tensor(10000).pow(torch.arange(0, d_model, 2) / d_model)).unsqueeze(
        0)  # 偶数位置的10000(2i/d)次方，这是一组向量
    i2_1 = torch.div(1, torch.tensor(10000).pow(torch.arange(1, d_model, 2) / d_model)).unsqueeze(
        0)  # 奇数位置的10000(2i/d)次方，这是一组向量
    pe_i2 = torch.sin(torch.matmul(pos, i2))
    pe_i2_1 = torch.cos(torch.matmul(pos, i2_1))
    PE[:, ::2] = pe_i2
    PE[:, 1::2] = pe_i2_1
    # PE:[max_len, 1]*[1, d]=[max_len, d]
    PE.requires_grad_(False)

    if em_add_pe:
        # 我这里是做了更普遍化，如果embed的维度不是常规的[batch,sentence_len,em_dim]也可以加起来
        max_len = x.shape[-2]
        num = torch.prod(torch.tensor(x.shape[:-2]))
        tensor_list = [PE[:max_len].clone() for _ in range(num)]
        stacked_tensor = torch.stack(tensor_list, dim=0)
        pe = stacked_tensor.view(x.shape)
        # 以上步骤目的是让pe和x的维度保持一致
        pe = pe.to(device)
        return x + pe
    else:
        max_len = x.shape[-1]
        return PE[:max_len]

# input = torch.ones([2, 100, 512], dtype=torch.int64)
# data = Positional(input, em_add_pe=False)
#
# def figheatmap(data):
#     from matplotlib import pyplot as plt
#     import seaborn as sns
#     import pandas as pd
#
#     # 练习的数据：
#
#     data = pd.DataFrame(data)
#     plt.figure(dpi=120)
#     # 绘制热度图：
#     plot = sns.heatmap(data, cmap=sns.diverging_palette(10, 220, sep=80, n=7))
#     plt.title("The X-axis is d_dim\nand the Y-axis is pos")
#     plt.show()
#
# figheatmap(data)
