import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
from torch.autograd import Variable


class Embeddings(nn.Module):
    def __init__(self, num,dim):
        super().__init__()
        self.num = num
        self.dim = dim
        self.embedding = nn.Embedding(num_embeddings=num, embedding_dim=dim)
        
    def forward(self, x):
        return self.embedding(x)*math.sqrt(self.dim)



class PositionalEncoding(nn.Module):
    def __init__(self, dim,dp=0, max_len=5000):
        super().__init__()
        self.dim = dim
        self.max_len = max_len
        self.pe = torch.zeros(max_len, dim)
        self.position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        # div_term = torch.exp(torch.arange())
        self.drop = nn.Dropout(dp)
        
        div_term = torch.arange(0, self.dim, 2)*-(math.log(10000.0))
        part_left = torch.sin(self.position * div_term)   # 10,1   4
        part_right = torch.cos(self.position * div_term)
        self.pe[:,0::2] = part_left
        self.pe[:,1::2] = part_right
        # self.register_buffer('pe', self.pe)
        
    def forward(self,x):
        # x  batch_size, seq_len, dim
        length = x.size(1)
        self.pe = self.pe.unsqueeze(0)
        print(self.pe.requires_grad)
        print(x.requires_grad)
        return self.pe[:,:length,:]+x

        

if __name__ == '__main__':
    embed = Embeddings(10, 6)
    # x = torch.LongTensor([[1,2,3,4,5,6,7,8,9,0]])
    # res = embed(x)
    # print(res) 
    # print(embed(x).shape)
    # # print(embed.embedding.weight)

    y= torch.randint(0,10,(3,2))
    res = embed(y)
    print(res.shape)
    
    
    pos = PositionalEncoding(6)
    
    res2 = pos(res)
    print(res2.shape)
    


