import torch
import torch.nn as nn
import torch.nn.functional as F
import math

torch.cuda.manual_seed_all(1234)
torch.manual_seed(1234)

torch.backends.cudnn.benchmark = True
# torch.autograd.set_detect_anomaly(True)

# Hyper Parameters
DEVICE = 'cuda'


class Encoder(nn.Module):
    def __init__(self, d_model, p):
        super(Encoder, self).__init__()

        # Use nn.Transformer
        self.trsm_layer = nn.TransformerEncoderLayer(
            d_model, nhead=2, dim_feedforward=4 * d_model, activation='gelu', dropout=0.2)

        self.trsm = nn.TransformerEncoder(self.trsm_layer, num_layers=2)

    def forward(self, inputs: torch.Tensor, padding: torch.Tensor):
        output = self.trsm(inputs.transpose(
            0, 1), src_key_padding_mask=padding)

        return output.transpose(0, 1)


class Embedding(nn.Module):
    def __init__(self, d_model, max_pos, vocab_cnt):
        super(Embedding, self).__init__()

        self.PE = torch.zeros((max_pos, d_model), dtype=torch.float).to(DEVICE)
        for pos in range(max_pos):
            for i in range(d_model//2):
                self.PE[pos, 2 *
                        i] = math.sin(pos / (10000 ** (2 * i / d_model)))
                self.PE[pos, 2 *
                        i + 1] = math.cos(pos / (10000 ** (2 * i / d_model)))

        # Learnable PE
        self.vocab_cnt = vocab_cnt
        self.embedding_table = nn.Parameter(
            torch.Tensor(vocab_cnt, d_model), requires_grad=True)
        self.register_parameter('EmbedTable', self.embedding_table)
        self.position_embedding = nn.Embedding(
            num_embeddings=max_pos,
            embedding_dim=d_model
        )
        self.seq_len = max_pos

        self.LayerNorm = nn.LayerNorm(d_model)
        self.dropout = nn.Dropout(p=0.2)
        self.reset_parameters()

    def reset_parameters(self):
        nn.init.normal_(self.embedding_table, mean=0.0, std=0.02)

    def forward(self, inputs: torch.Tensor):
        embedded_inputs = nn.functional.embedding(inputs, self.embedding_table)
        embed = embedded_inputs + self.PE

        # Layer Norm
        embed = self.dropout(self.LayerNorm(embed))

        return embed, self.embedding_table


class Decoder(nn.Module):
    def __init__(self, d_model, d_item):
        super(Decoder, self).__init__()
        self.linear = nn.Linear(d_model, d_item)
        self.b_o = nn.Parameter(torch.Tensor(d_item), requires_grad=True)
        self.register_parameter('B_o', self.b_o)
        self.LayerNorm = nn.LayerNorm(d_model)

        self.reset_parameters()

    def reset_parameters(self):
        nn.init.normal_(self.b_o, mean=0.0, std=0.02)

    def forward(self, encoded, embedding_table):
        output = encoded.matmul(embedding_table.t()) + self.b_o
        output = nn.functional.softmax(output, dim=2)

        return output


class Transformer(nn.Module):
    def __init__(self, hyper_params):
        '''
        d_model: Features in an item.
        d_item: Number of all items.
        max_pos: The maximal number of the items in a sequence.
        p: The probability of Binomial Distribution.
        '''

        super(Transformer, self).__init__()
        self.embedding = Embedding(
            hyper_params['d_model'], hyper_params['seq_len'], hyper_params['total_items'] + 2)
        self.encoder = Encoder(hyper_params['d_model'], p=0.2)
        self.decoder = Decoder(
            hyper_params['d_model'], hyper_params['total_items'] + 2)

    def forward(self, inputs, padding):
        """
        inputs: B * S * D
        padding: B * S
        """
        inputs, embedding_table = self.embedding(inputs)
        out = self.encoder(inputs, padding)
        output = self.decoder(out, embedding_table)
        return output
