import torch
import torch.nn as nn
from torch.nn import Dropout
import torch.nn.functional as F
import math
from a_1_embedding import Embedding
import numpy as np

Device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(Device)


# class PositionalEncoding(nn.Module):
#     def __init__(self,d_model,max_len=5000,dropout=0.1):
#         super(PositionalEncoding,self).__init__()
#         self.dropout = nn.Dropout(p=dropout)
#         pos_table = np.array([
#         [pos / np.power(10000, 2 * i / d_model) for i in range(d_model)]
#         if pos != 0 else np.zeros(d_model) for pos in range(max_len)])
#         pos_table[1:, 0::2] = np.sin(pos_table[1:, 0::2])                  # 字嵌入维度为偶数时
#         pos_table[1:, 1::2] = np.cos(pos_table[1:, 1::2])                  # 字嵌入维度为奇数时
#         self.pos_table = torch.FloatTensor(pos_table)               # enc_inputs: [seq_len, d_model]
#     def forward(self,enc_inputs):
#         # enc_inputs: [batch_size, seq_len, d_model]
#         enc_inputs = enc_inputs.to(Device)
#         enc_inputs += self.pos_table[:enc_inputs.size(1),:]
#         return self.dropout(enc_inputs)

#
class PositionalEncoding(nn.Module):
    def __init__(self, dim, max_len=5000, dp=0.1):
        super(PositionalEncoding, self).__init__()
        self.dropout = nn.Dropout(dp)
        self.dim = dim
        self.max_len = max_len
        pe = torch.zeros(max_len, dim)
        self.position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        self.sub_dim = torch.arange(0, dim, 2).float() * -(math.log(1000.0))
        pe[:, 0::2] = torch.sin(self.position * self.sub_dim)
        pe[:, 1::2] = torch.cos(self.position * self.sub_dim)

        self.register_buffer('pe', pe.unsqueeze(0))

    def forward(self, x):
        x = x.to(Device)
        d_size = x.size(1)
        pe = self.pe[:, :d_size, :]
        x = x + pe
        return self.dropout(x)

#
# class PositionalEncoding(nn.Module):
#     def __init__(self,embed_model_dim, max_seq_len=5000):
#         """
#         Args:
#             seq_len: length of input sequence
#             embed_model_dim: demension of embedding
#         """
#         super(PositionalEncoding, self).__init__()
#         self.embed_dim = embed_model_dim
#
#         pe = torch.zeros(max_seq_len, self.embed_dim)
#         for pos in range(max_seq_len):
#             for i in range(0, self.embed_dim, 2):
#                 pe[pos, i] = math.sin(pos / (10000 ** ((2 * i) / self.embed_dim)))
#                 pe[pos, i + 1] = math.cos(pos / (10000 ** ((2 * (i + 1)) / self.embed_dim)))
#         pe = pe.unsqueeze(0)
#         self.register_buffer('pe', pe)
#
#     def forward(self, x):
#         """
#         Args:
#             x: input vector
#         Returns:
#             x: output
#         """
#
#         # make embeddings relatively larger
#         x = x * math.sqrt(self.embed_dim)
#         # add constant to embedding
#         seq_len = x.size(1)
#         x = x + torch.autograd.Variable(self.pe[:, :seq_len], requires_grad=False)
#         return x


#
class PositionEmbedding(nn.Module):
    def __init__(self, d_model, max_len=5000):
        super(PositionEmbedding, self).__init__()
        self.pe = torch.zeros(max_len, d_model, device=Device)
        self.pe.requires_grad = False
        position = torch.arange(0, max_len, dtype=torch.float, device=Device).unsqueeze(1)
        div_term = torch.arange(0, d_model, 2, device=Device).float()
        div_term_sin = torch.sin(position / 1000 ** (2 * div_term / d_model))
        div_term_cos = torch.cos(position / 1000 ** (2 * div_term / d_model))
        self.pe[:, 0::2] = div_term_sin
        self.pe[:, 1::2] = div_term_cos

    def forward(self, x):
        x = x.to(Device)
        batch, seq_len = x.size()
        return self.pe[:seq_len, :]

#
# class PositionalEncoding(nn.Module):
#     def __init__(self, d_model, max_len=5000, dropout=0.1):
#         super(PositionalEncoding, self).__init__()
#         self.dropout = nn.Dropout(p=dropout)
#
#         pe = torch.zeros(max_len, d_model)
#         position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
#         div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
#
#         pe[:, 0::2] = torch.sin(position * div_term)
#         pe[:, 1::2] = torch.cos(position * div_term)
#
#         pe = pe.unsqueeze(0).transpose(0, 1)
#         self.register_buffer('pe', pe)
#
#     def forward(self, x):
#         x = x + self.pe[:x.size(0), :]
#         return self.dropout(x)

# class TransformEmbedding(nn.Module):
#     def __init__(self, vocab_size, embedding_dim, drop=0.1):
#         super(TransformEmbedding, self).__init__()
#         self.embedding = Embedding(vocab_size, embedding_dim)
#         self.position_embedding = PositionEmbedding(embedding_dim)
#         self.dropout = Dropout(drop)
#
#     def forward(self, x):
#         # x （batch_size, seq_len)
#         x = x.to(Device)
#         y = self.embedding(x)
#         y = y + self.position_embedding(x)
#         return self.dropout(y)

class TransformEmbedding(nn.Module):
    def __init__(self, vocab_size, embedding_dim, drop=0.1):
        super(TransformEmbedding, self).__init__()
        self.embedding = Embedding(vocab_size, embedding_dim)
        # self.position_embedding = PositionalEncoding(embedding_dim)
        self.position_embedding = PositionEmbedding(embedding_dim)
        self.dropout = Dropout(drop)

    def forward(self, x):
        # x （batch_size, seq_len)
        x = x.to(Device)
        _x = self.embedding(x)
        y = self.position_embedding(x)
        return self.dropout(_x+y)





if __name__ == '__main__':
    x = torch.randint(0, 100, (5, 10))
    transform = TransformEmbedding(100, 128)
    res = transform(x)
    print(res)
    print(res.shape)

    # x = torch.randint(0,100,(5,10))
    # embeding = nn.Embedding(100,512)
    # x = embeding(x)
    # pos = PositionalEncoding(512)
    # res = pos(x)
    # print(res.size())


