import math

import torch
from torch import nn


class PositionalEncoding(nn.Module):
    def __init__(self, d_model, max_seq_len):
        super(PositionalEncoding, self).__init__()
        self.dropout = nn.Dropout(p=0.1)

        pe = torch.zeros(max_seq_len, d_model)
        position = torch.arange(0, max_seq_len, dtype=torch.float).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))

        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0).transpose(0, 1)
        self.register_buffer('pe', pe)

    def forward(self, x):
        print("x=", x, "---x_shape=", x.shape)
        size_ = self.pe[:x.size(0), :]
        print("---self.pe_shape=", self.pe.shape, "x.size(0)=", x.size(0), "self.pe[:x.size(0), :]=size_=", size_,
              "size_.shape=", size_.shape,
              "self.pe=", self.pe)
        x = x + size_
        # res = self.dropout(x)
        return x


d_model = 4  # 模型的维度（必须为偶数，因为有奇数偶数正弦余弦的数组取值）
max_seq_len = 6  # 最大序列长度

pos_encoder = PositionalEncoding(d_model, max_seq_len)

data_tensor = torch.tensor([[1, 1, 1, 1], [1, 1, 1, 1]])
unsqueeze = data_tensor.view(-1)
print(unsqueeze)
print("data_tensor=", data_tensor)
out = pos_encoder(data_tensor)
print(out)

# data_tensor = torch.tensor([1, 2])  # 1. 原始数据：一维张量：tensor(20,)
# '''
# data_tensor=tensor([1, 2])
# '''
# print(data_tensor)
# # tensor = torch.tensor([1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5])  # 1. 原始数据：一维张量：tensor(4,)
# embedding = nn.Embedding(max_seq_len, d_model)
# embedding_tensor = embedding(data_tensor)  # 2. 词嵌入：二维张量：tensor(20,256)
# '''
# embedding_tensor=tensor([[ 0.5387,  0.5235, -0.9567,  0.4590],
#                          [-1.4952, -0.5089,  0.2001, -0.9652]])
# '''
# print(embedding_tensor)
#
# pos_encoder = PositionalEncoding(d_model, max_seq_len)
# # x = torch.randn(max_seq_len, d_model)  # 输入张量，形状为 (seq_len, d_model)
# Positional_tensor = pos_encoder(embedding_tensor)  # 3. 位置编码：三维张量：tensor(20,20,256)
# '''
# Positional_tensor=tensor([
#                           [
#                            [ 0.5985,  1.6928, -1.0630,  1.6211],
#                            [-1.6613,  0.5457,  0.2224,  0.0387]
#                           ],
#                           [
#                            [ 1.5335,  1.1820, -1.0519,  1.6211],
#                            [-0.7264,  0.0349,  0.2335,  0.0387]
#                           ]
#                          ])
# '''
# print(Positional_tensor)
#

'''
假设维度是4，数据tensor(2,)，词嵌入编码后是tensor(2,4)+位置编码项tensor(2,1,4)=位置编码后tensor(2,2,4)
'''
if __name__ == '__main__':
    print("over")
