"""
@Filename       : cascade_encoder.py
@Create Time    : 2021/9/29 14:43
@Author         : Rylynn
@Description    : 

"""
import math

import torch
import torch as th
import torch.nn as nn
import numpy as np
from torch.autograd import Variable


class DotProductAttention(nn.Module):
    def __init__(self):
        super(DotProductAttention, self).__init__()
        self.softmax = nn.Softmax(dim=2)

    def forward(self, seq_embed, mask):
        batch_size = seq_embed.shape[0]
        embed_size = seq_embed.shape[2]
        score = th.bmm(seq_embed, seq_embed.transpose(1, 2))

        mask = th.where(mask == 1, th.tensor(float('-inf')).cuda(), th.tensor(0.).cuda())

        for i in range(batch_size):
            score[i] += mask
        scaled_score = self.softmax(score / math.sqrt(embed_size))
        return th.bmm(scaled_score, seq_embed)


def get_previous_user_mask(seq, user_size):
    """
    Mask previous activated users.
    """
    assert seq.dim() == 2
    prev_shape = (seq.size(0), seq.size(1), seq.size(1))
    seqs = seq.repeat(1, 1, seq.size(1)).view(seq.size(0), seq.size(1), seq.size(1))
    previous_mask = np.tril(np.ones(prev_shape)).astype('float32')
    previous_mask = th.from_numpy(previous_mask)
    if seq.is_cuda:
        previous_mask = previous_mask.cuda()

    masked_seq = previous_mask * seqs.data.float()

    # force the 0th dimension (PAD) to be masked
    PAD_tmp = th.zeros(seq.size(0), seq.size(1), 1)
    if seq.is_cuda:
        PAD_tmp = PAD_tmp.cuda()
    masked_seq = th.cat([masked_seq, PAD_tmp], dim=2)
    ans_tmp = th.zeros(seq.size(0), seq.size(1), user_size)
    if seq.is_cuda:
        ans_tmp = ans_tmp.cuda()
    masked_seq = ans_tmp.scatter_(2, masked_seq.long(), float('-inf'))
    return masked_seq


class GRUCascadeEncoder(nn.Module):
    def __init__(self, config):
        super(GRUCascadeEncoder, self).__init__()
        self.pos_dim = config['pos_dim']
        self.user_size = config['node_num'] + 1

        self.pos_embed = nn.Embedding(1000, self.pos_dim)

        self.knowledge_aware = config['knowledge_aware']
        self.content_aware = config['content_aware']

        self.gru = nn.GRUCell(input_size=config['embed_dim'] + self.pos_dim, hidden_size=config['hidden_dim'])

        self.dropout = nn.Dropout(0.2)
        self.seq_attention = DotProductAttention()

    def forward(self, batch_seqs, batch_seqs_embed, gru=True):
        batch_size = batch_seqs_embed.shape[0]
        max_length = batch_seqs_embed.shape[1]
        embed_size = batch_seqs_embed.shape[2]
        batch_t = th.arange(batch_seqs.shape[1]).expand(batch_seqs.size()).cuda()

        order_embed = self.dropout(self.pos_embed(batch_t))
        batch_seqs_embed = self.dropout(batch_seqs_embed)

        final_embed = th.cat([batch_seqs_embed, order_embed], dim=-1)
        #
        # att_out = self.decoder_attention(final_embed, final_embed, final_embed, mask=mask)
        # ----------------- GRU encoder -----------------

        out = Variable(th.zeros(batch_size, max_length, embed_size)).cuda()
        if gru:
            hi = th.zeros(batch_size, embed_size).cuda()
            for i in range(0, max_length):
                hi = self.gru(final_embed[:, i, :].view(batch_size, embed_size + self.pos_dim), hi)
                hi = self.dropout(hi)
                out[:, i] = hi.view(batch_size, embed_size)
        else:
            out = batch_seqs_embed
        mask = th.triu(th.ones(max_length, max_length), 1)
        mask = mask.cuda()
        out = self.seq_attention(out, mask)
        return out


class TransformerCascadeEncoder(nn.Module):
    def __init__(self, config):
        super(TransformerCascadeEncoder, self).__init__()
        self.config = config
        activation = nn.LeakyReLU()
        self.node_num = config['node_num']
        self.user_embed = nn.Embedding(config['node_num'] + 1, config['embed_dim'], padding_idx=0)
        self.encoder_layer = nn.TransformerEncoderLayer(d_model=256, nhead=8)
        self.transformer_encoder = nn.TransformerEncoder(encoder_layer=self.encoder_layer, num_layers=6)
        self.linear = nn.Linear(config['embed_dim'], config['node_num'])
        self.softmax = nn.Softmax(dim=1)
        self.cross_entropy = nn.CrossEntropyLoss()

    def forward(self, batch_seqs, batch_seqs_length):
        out = self.encode(batch_seqs, batch_seqs_length)
        out = self.linear(out)
        # prob = self.softmax(out)
        return out

    def get_sinusoid_encoding_table(self, n_position, d_model):
        def cal_angle(position, hid_idx):  # hid_idx为维度索引
            return position / np.power(10000, 2 * (hid_idx // 2) / d_model)  # //为整数除法

        def get_posi_angle_vec(position):  # position为序列中的位置
            return [cal_angle(position, hid_j) for hid_j in range(d_model)]

        sinusoid_table = np.array([get_posi_angle_vec(pos_i) for pos_i in range(n_position)])
        sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2])  # 2i为双数索引位
        sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2])  # 2i+1为单数索引位
        return th.FloatTensor(sinusoid_table)

    def encode(self, batch_seqs, batch_seqs_length):
        mask_mtx = th.zeros(batch_seqs.shape[0], batch_seqs.shape[1], 1)
        for idx, seqs_length in enumerate(batch_seqs_length):
            mask_mtx[idx, :seqs_length] = th.FloatTensor([[1]] * seqs_length)
        mask_mtx = mask_mtx.cuda()

        position_embed = self.get_sinusoid_encoding_table(batch_seqs.shape[1], 256)
        position_embed = position_embed.view(1, position_embed.shape[0], position_embed.shape[1])
        batch_position_embed = th.cat([position_embed for _ in range(batch_seqs.shape[0])])
        batch_position_embed = batch_position_embed.cuda()
        batch_seqs_embed = self.user_embed(batch_seqs)
        # seqs_embed = torch.cat((batch_seqs_embed, batch_position_embed), dim=2)
        seqs_embed = batch_position_embed + batch_seqs_embed
        out = self.transformer_encoder(seqs_embed)
        # TODO: Time-decay effect
        out = out * mask_mtx

        out = th.sum(out, dim=1) / th.sum(mask_mtx, dim=1)
        return out

    def query_embed(self, key):
        return self.user_embed(key)

    def loss(self, probs, true_nodes):
        return self.cross_entropy(probs, true_nodes)
