import torch
import torch.nn as nn
import torch.nn.functional as F
import math

torch.cuda.manual_seed_all(1234)
torch.manual_seed(1234)

torch.backends.cudnn.benchmark = True
# torch.autograd.set_detect_anomaly(True)

# Hyper Parameters
DEVICE = 'cuda'


class Attention(nn.Module):
    def __init__(self, d_model, d_k, d_v):
        super(Attention, self).__init__()
        self.d_k = d_k
        self.d_v = d_v

        self.linear_q = nn.Linear(d_model, d_k)
        self.linear_k = nn.Linear(d_model, d_k)
        self.linear_v = nn.Linear(d_model, d_v)
        self.linear_o = nn.Linear(d_v, d_model)

    def forward(self, inputs):
        q = self.linear_q(inputs)
        k = self.linear_k(inputs)
        v = self.linear_v(inputs)

        output = torch.softmax(
            q.matmul(k.transpose(1, 2)) / math.sqrt(self.d_k), 2).matmul(v)
        output = nn.functional.gelu(self.linear_o(output))
        return output


class Encoder(nn.Module):
    def __init__(self, d_model, p):
        super(Encoder, self).__init__()

        # Use nn.Transformer
        self.trsm_layer = nn.TransformerEncoderLayer(
            d_model, nhead=2, dim_feedforward=4 * d_model, activation='gelu', dropout=0.2)

        self.trsm = nn.TransformerEncoder(self.trsm_layer, num_layers=2)

    def forward(self, inputs: torch.Tensor, padding: torch.Tensor):
        output = self.trsm(inputs.transpose(0, 1), src_key_padding_mask=padding)
    
        return output.transpose(0, 1)


class VEncoder(nn.Module):
    def __init__(self, d_model, seq_length, p):
        super(VEncoder, self).__init__()

        self.trsm_g = VAttn(d_model, d_model, seq_length, 2 * d_model, p)
        self.trsm_l = VAttn(d_model, d_model, seq_length, 2 * d_model, p)

    def forward(self, inputs, padding, user_embed):
        z_g, mu_g, sigma_g = self.trsm_g(inputs, padding, user_embed)

        # Sample latent
        z_l, mu_l, sigma_l = self.trsm_l(z_g, padding, user_embed)

        return z_g, z_l, mu_g, sigma_g, mu_l, sigma_l


class Embedding(nn.Module):
    def __init__(self, d_model, max_pos, vocab_cnt):
        super(Embedding, self).__init__()

        self.PE = torch.zeros((max_pos, d_model), dtype=torch.float).to(DEVICE)
        for pos in range(max_pos):
            for i in range(d_model//2):
                self.PE[pos, 2 *
                        i] = math.sin(pos / (10000 ** (2 * i / d_model)))
                self.PE[pos, 2 *
                        i + 1] = math.cos(pos / (10000 ** (2 * i / d_model)))

        # Learnable PE
        self.vocab_cnt = vocab_cnt
        self.embedding_table = nn.Parameter(
            torch.Tensor(vocab_cnt, d_model), requires_grad=True)
        self.register_parameter('EmbedTable', self.embedding_table)
        self.position_embedding = nn.Embedding(
            num_embeddings=max_pos,
            embedding_dim=d_model
        )
        self.seq_len = max_pos

        # self.embedded_bias = nn.Parameter(
        #     torch.Tensor(d_model), requires_grad=True)
        # self.register_parameter('EmbedBias', self.embedded_bias)

        # self.register_parameter('PositionEmbed', self.position_embedding)
        self.LayerNorm = nn.LayerNorm(d_model)
        self.dropout = nn.Dropout(p=0.2)
        self.reset_parameters()

    def reset_parameters(self):
        nn.init.normal_(self.embedding_table, mean=0.0, std=0.02)
        # nn.init.normal_(self.position_embedding, mean=0.0, std=0.02)

    def forward(self, inputs: torch.Tensor):
        embedded_inputs = nn.functional.embedding(inputs, self.embedding_table)
        # pos = torch.arange(self.seq_len, dtype=torch.long, device=DEVICE)
        # pos = pos.unsqueeze(0).expand_as(inputs)
        # position_embed = self.position_embedding(pos)
        embed = embedded_inputs + self.PE

        # Layer Norm
        embed = self.dropout(self.LayerNorm(embed))

        return embed, self.embedding_table


class Decoder(nn.Module):
    def __init__(self, d_model, d_item):
        super(Decoder, self).__init__()
        self.linear1 = nn.Linear(2 * d_model, d_model)
        self.linear2 = nn.Linear(d_model, d_item)
        self.b_o = nn.Parameter(torch.Tensor(d_item), requires_grad=True)
        self.register_parameter('B_o', self.b_o)
        self.LayerNorm = nn.LayerNorm(d_model)
        
        self.reset_parameters()

    def reset_parameters(self):
        nn.init.normal_(self.b_o, mean=0.0, std=0.02)

    def forward(self, encoded, embedding_table):
        # Get the final output
        inputs = nn.functional.gelu(self.linear1(encoded))
        inputs = self.LayerNorm(inputs)

        output = inputs.matmul(embedding_table.t()) + self.b_o
        # output = self.linear2(inputs)
        output = nn.functional.softmax(output, dim=2)

        return output


class VAttn(nn.Module):
    """
    Variational Attention.
    """
    def __init__(self, d_model, seq_embed_length, seq_length, latent_size, attn_dropout_p=0.2):
        super(VAttn, self).__init__()

        self.linear_v = nn.Sequential(
            nn.Linear(d_model, seq_embed_length),
            nn.GELU()
        )
        self.linear_q = nn.Sequential(
            nn.Linear(d_model, d_model),
            nn.GELU()
        )

        self.cnn1 = nn.Conv1d(
            in_channels=d_model,
            out_channels=d_model,
            groups=d_model,
            kernel_size=11,
            stride=5,
            padding=5
        )
        self.pool1 = nn.AvgPool1d(
            kernel_size=2,
            stride=2
        )
        self.cnn2 = nn.Conv1d(
            in_channels=d_model,
            out_channels=seq_embed_length,
            groups=d_model,
            kernel_size=9,
            stride=5,
            padding=4
        )
        self.pool2 = nn.AvgPool1d(
            kernel_size=4,
            stride=4
        )
        self.seq_embed_length = seq_embed_length

        self.linear_mu = nn.Linear(2 * d_model, latent_size)
        self.linear_log_sigma = nn.Linear(2 * d_model, latent_size)


        self.linear_score = nn.Sequential(
            nn.Linear(latent_size, seq_length),
            nn.Tanh()
        )
        self.attn_dropout_p = attn_dropout_p

        self.linear_context = nn.Sequential(
            nn.Linear(seq_embed_length, seq_embed_length),
            nn.Tanh()
        )
        self.layer_norm = nn.LayerNorm(d_model)
        self.embed_layer_norm = nn.LayerNorm(seq_embed_length)
        self.dropout = nn.Dropout(p=attn_dropout_p)

    def forward(self, inputs, padding, user_embed):
        input_v = self.linear_v(inputs)
        input_q = self.linear_q(inputs)

        # cnn1 = F.gelu(self.pool1(self.cnn1(torch.transpose(input_q, 1, 2))))
        # cnn2 = F.gelu(self.pool2(self.cnn2(cnn1)))
        # seq_embed_input = torch.transpose(cnn2, 1, 2).expand([-1, inputs.shape[1], -1])
        # seq_embed_input = self.dropout(self.embed_layer_norm(seq_embed_input))

        # seq_embed_score = torch.sum(user_embed.unsqueeze(1) * input_q, dim=2, keepdim=True)
        # seq_embed_score = torch.softmax(seq_embed_score, dim=1)
        # seq_embed_input = seq_embed_score * input_v
        # seq_embed_input = self.layer_norm(seq_embed_input)

        # seq_embed_input = user_embed.view(inputs.shape[0], 1, -1).expand(-1, inputs.shape[1], -1)


        # AVG.
        seq_embed_input = torch.mean(input_q, dim=1, keepdim=True).expand([-1, inputs.shape[1], -1])

        input_embed = torch.cat([seq_embed_input, input_q], dim=2)
        
        # Generate mu and sigma.
        mu = torch.tanh(self.linear_mu(input_embed))
        log_sigma = torch.tanh(self.linear_log_sigma(input_embed))
        sigma = torch.exp(log_sigma)

        # Sample the latent.
        eps = torch.randn_like(mu, requires_grad=False)
        latent = eps * sigma + mu

        # Score.
        score = self.linear_score(mu)
        
        # Padding.
        padding = padding.float().unsqueeze(1).expand_as(score)
        score = score + padding * (-10000.0)

        # Probs.
        attention_probs = F.softmax(score, dim=2)
        attention_probs = self.dropout(attention_probs)

        # Context.
        context_result = torch.matmul(attention_probs, input_v)
        context_result = self.linear_context(context_result)
        context_result = self.layer_norm(context_result)

        return context_result, mu, sigma
        


class VTransformer(nn.Module):
    def __init__(self, d_model, d_k, d_v, d_item, max_pos, p, user_cnt):
        '''
        d_model: Features in an item.
        d_item: Number of all items.
        max_pos: The maximal number of the items in a sequence.
        p: The probability of Binomial Distribution.
        '''

        super(VTransformer, self).__init__()
        self.embedding = Embedding(d_model, max_pos, d_item + 2)
        # self.encoder = Encoder(d_model, p=0.2)
        self.encoder = VEncoder(d_model, max_pos, p=0.2)
        self.decoder = Decoder(d_model, d_item + 2)
        self.user_embedding = nn.Embedding(num_embeddings=user_cnt, embedding_dim=d_model)

    def forward(self, inputs, padding, user_id):
        """
        inputs: B * S * D
        padding: B * S
        """
        inputs, embedding_table = self.embedding(inputs)
        user_embed = self.user_embedding(user_id)
        z_g, z_l, mu_g, sigma_g, mu_l, sigma_l = self.encoder(
            inputs, padding, user_embed)
        output = self.decoder(torch.cat([z_g, z_l], dim=2), embedding_table)
        return output, mu_g, sigma_g, mu_l, sigma_l
        # output = self.encoder(inputs, padding)
        # output = self.decoder(output, embedding_table)
        # return output
