import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
from torch.autograd import Variable
from one_embedding import Embeddings


'''

矩阵的乘法,使用广播机制
[
    
    [1]
    [1]
    [1]
    [1]
    [1]
    [1]
    [1]
]

[1,2,3,4,5]

=

[
    [1,2,3,4,5]
    [1,2,3,4,5]
    [1,2,3,4,5]
    [1,2,3,4,5]
    [1,2,3,4,5]
    [1,2,3,4,5]
    [1,2,3,4,5]
]

[
    [1,2,3,4,5,1,2,3,4,5]
    [1,2,3,4,5,1,2,3,4,5]
    [1,2,3,4,5,1,2,3,4,5]
    [1,2,3,4,5,1,2,3,4,5]
    [1,2,3,4,5,1,2,3,4,5]
    [1,2,3,4,5,1,2,3,4,5]
    [1,2,3,4,5,1,2,3,4,5]

]






'''

class PositionEmbeddings(nn.Module):
    def __init__(self,dp=0,max_len=5000,dim=255):
        super().__init__()
        self.drop = nn.Dropout(dp)
        self.embed = nn.Embedding(max_len,dim)
        self.pe = torch.zeros(max_len, dim)
        self.position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1) # (max_len,1)
        self.sub_dim = torch.arange(0, dim, 2).float()*-(math.log(1000.0)) # (dim,)
        self.pe[:,0::2] = torch.sin(self.position*self.sub_dim)
        self.pe[:,1::2] = torch.cos(self.position*self.sub_dim)
        
        
    def forward(self,x):
        input_length = x.size(1)
        pe = self.pe.unsqueeze(0)
        return pe[:,:input_length,:] + x

if __name__ == '__main__':
    
    embed = Embeddings(100,512)
    input = torch.randint(0,100,size=(5,10))
    output1 = embed(input)# (5,10,512)
    
    positionembed = PositionEmbeddings(dim=512)
    output = positionembed(output1)
    print(output.shape) # (5,10,512)
    print(output)
    
    # pe = PositionalEncoding(10,0,100)
    # x = torch.zeros(1,100,10)
    # print(pe(x).shape)