# 导入必备的工具包
import matplotlib.pyplot as plt
import numpy as np

import torch

# 预定义的网络层torch.nn, 工具开发者已经帮助我们开发好的一些常用层,
# 比如，卷积层, lstm层, embedding层等, 不需要我们再重新造轮子.
import torch.nn as nn

# 数学计算工具包
import math

# torch中变量封装函数Variable.
from torch.autograd import Variable

import warnings

warnings.filterwarnings('ignore')


# 定义位置编码器类, 我们同样把它看做一个层, 因此会继承nn.Module
class PositionalEncoding(nn.Module):
    def __init__(self, d_model, dropout, max_len=5000):
        super(PositionalEncoding, self).__init__()
        self.dropout = nn.Dropout(p=dropout)
        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2) *
                             -(math.log(10000.0) / d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        # [5000,4]---》[1，5000,4]
        pe = pe.unsqueeze(0)
        self.register_buffer('pe', pe)
    # x=[2,4]
    def forward(self, x):
        # pe=[1，5000,4]
        # return self.pe
        # return  self.pe[:, :pe.size(1)]
        # cc=[1,4,4],5000行取前4行
        cc = self.pe[:, :x.size(1)]
        print(cc)
        print(x)
        x += x + cc
        print(x)
        # x = x + Variable(self.pe[:, :x.size(1)],
        #                  requires_grad=False)
        return x
        # return self.dropout(x)


data = torch.rand(2, 4)
encoding = PositionalEncoding(4, 0.1)
encoding1 = encoding(data)
print(encoding1)
# 必须为偶数
# data = torch.rand(2,4)
# print(data[:, :data.size(1)]) # 第一维度取所有，第二维度取4
# print(data[:, :data.size(0)]) # 第一维度取所有，第二维度取2
'''
tensor([[0.7693, 0.4567, 0.9566, 0.5424],
        [0.9279, 0.7374, 0.3833, 0.9136]])
tensor([[0.7693, 0.4567],
        [0.9279, 0.7374]])
'''

# print(data[:, :data.size(1)].shape)
#
# encoding = PositionalEncoding(2, 0.1)
# encoding1 = encoding(torch.rand(10, 2, 4))
# encoding = PositionalEncoding(2, 0.1)
# encodingData = encoding(data)
# print(encodingData)

if __name__ == '__main__':
    print("over")
