import torch
from torch import nn
import math

class Embedding(nn.Module):
    def __init__(self, vocab_size, d_model):
        super().__init__()
        self.lut = nn.Embedding(vocab_size, d_model)
        self.d_model = d_model

    def forward(self, x):
        return self.lut(x) * math.sqrt(self.d_model)

class PositionalEncoding(nn.Module):
    #max_len表示句子的长度
    def __init__(self,d_model,max_len=5000,dropout=0.1):
        super().__init__()
        self.dropout = nn.Dropout(dropout)
        pe = torch.zeros(max_len,d_model)
        pos = torch.arange(0,max_len).unsqueeze(1)
        div_term = torch.pow(10000,torch.arange(0,d_model,2).float() / d_model)
        #偶数列
        pe[:,0::2] = torch.sin(pos / div_term)
        #奇数列
        pe[:,1::2] = torch.cos(pos / div_term)
        pe = pe.unsqueeze(0)
        self.register_buffer('pe', pe)
    def forward(self, x):
        print(self.pe[:,:x.size(1)])
        x = x + self.pe[:,:x.size(1)]
        return self.dropout(x)


emb = Embedding(10,8)
inputs = torch.tensor(
    [
        [1,2,3],
        [4,5,6],
    ]
)
emb_out = emb(inputs)

pos = PositionalEncoding(8)
pos_out = pos(emb_out)
print(pos_out)
print(pos_out.shape)