import torch
import torch.nn as nn
from torch.nn import Dropout
import torch.nn.functional as F
import math
from a_1_embedding import Embedding

Device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(Device)


class PositionEmbedding(nn.Module):
    def __init__(self, d_model, max_len=5000):
        super(PositionEmbedding, self).__init__()
        self.pe = torch.zeros(max_len, d_model, device=Device)
        self.pe.requires_grad = False
        position = torch.arange(0, max_len, dtype=torch.float, device=Device).unsqueeze(1)
        div_term = torch.arange(0, d_model, 2, device=Device).float()
        div_term_sin = torch.sin(position / 1000 ** (2 * div_term / d_model))
        div_term_cos = torch.cos(position / 1000 ** (2 * div_term / d_model))
        self.pe[:, 0::2] = div_term_sin
        self.pe[:, 1::2] = div_term_cos

    def forward(self, x):
        x = x.to(Device)
        batch, seq_len = x.size()
        return self.pe[:seq_len, :]


class PositionalEncoding(nn.Module):
    def __init__(self, d_model, max_len=5000, dropout=0.1):
        super(PositionalEncoding, self).__init__()
        self.dropout = nn.Dropout(p=dropout)

        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))

        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)

        pe = pe.unsqueeze(0).transpose(0, 1)
        pe = pe.to(Device)
        self.register_buffer('pe', pe)

    def forward(self, x):
        x = x.to(Device)
        x = x + self.pe[:x.size(0), :]
        return self.dropout(x)

# class TransformEmbedding(nn.Module):
#     def __init__(self, vocab_size, embedding_dim, drop=0.1):
#         super(TransformEmbedding, self).__init__()
#         self.embedding = Embedding(vocab_size, embedding_dim)
#         self.position_embedding = PositionEmbedding(embedding_dim)
#         self.dropout = Dropout(drop)
#
#     def forward(self, x):
#         # x （batch_size, seq_len)
#         x = x.to(Device)
#         y = self.embedding(x)
#         y = y + self.position_embedding(x)
#         return self.dropout(y)

class TransformEmbedding(nn.Module):
    def __init__(self, vocab_size, embedding_dim, drop=0.1):
        super(TransformEmbedding, self).__init__()
        self.embedding = Embedding(vocab_size, embedding_dim)
        self.position_embedding = PositionalEncoding(embedding_dim)
        # self.position_embedding = PositionEmbedding(embedding_dim)
        self.dropout = Dropout(drop)

    def forward(self, x):
        # x （batch_size, seq_len)
        x = x.to(Device)
        x = self.embedding(x)
        y = x + self.position_embedding(x)
        return self.dropout(y)





if __name__ == '__main__':
    x = torch.randint(0, 100, (5, 10))
    transform = TransformEmbedding(100, 128)
    res = transform(x)
    print(res)
    print(res.shape)

    # x = torch.randint(0,100,(5,10))
    # embeding = nn.Embedding(100,512)
    # x = embeding(x)
    # pos = PositionalEncoding(512)
    # res = pos(x)
    # print(res.size())


