"""
@Filename       : kg_diffuse.py
@Create Time    : 2021/9/22 8:24
@Author         : Rylynn
@Description    : 

"""
import json
import os.path
import pickle

import dgl
import numpy as np
import torch
import torch as th
import torch.nn as nn

from dgl.nn import GATConv

from torch.autograd import Variable

from model.kg.builder.uikg_construct import load_uikg
from model.kg.cascade_encoder import GRUCascadeEncoder, get_previous_user_mask
from model.kg.kg_encoder import KGAttentionPooling
from model.kg.text_encoder import BertTextEncoder
from util.preprocess import load_content


class KgDiffusePlus(nn.Module):
    def __init__(self, config):
        super(KgDiffusePlus, self).__init__()
        self.dataset = config['dataset']
        self.pos_dim = config['pos_dim']
        self.user_size = config['node_num'] + 1
        self.knowledge_aware = config['knowledge_aware']
        self.content_aware = config['content_aware']
        self.content_dict = load_content('../data/{}/'.format(config['dataset']))

        # mode: ['sequence', 'content', 'knowledge']
        self.user_embed = nn.Embedding(self.user_size, config['embed_dim'])
        self.pos_embed = nn.Embedding(1000, self.pos_dim)
        if self.content_aware:
            self.text_encoder = BertTextEncoder(config)

        if self.knowledge_aware:
            self.uikg_dict = load_uikg('../data', config['dataset'])
            self.content_kg_dict = json.load(open('../data/{}/content.json'.format(config['dataset']), 'r'))
            self.kg_attention_pooling = KGAttentionPooling(config, self.content_kg_dict, self.uikg_dict)
        self.cascade_encoder = GRUCascadeEncoder(config)

        self.softmax = nn.Softmax(dim=1)
        self.cross_ent = nn.CrossEntropyLoss(ignore_index=0, reduction='sum')
        self.layer_norm = nn.LayerNorm(config['hidden_dim'])

        self.linear = nn.Linear(
            config['hidden_dim'] + config['embed_dim'] if self.content_aware else config['hidden_dim'], self.user_size,
            bias=True)

        initrange = 0.1
        self.linear.bias.data.fill_(0)
        self.linear.weight.data.uniform_(-initrange, initrange)
        self.user_embed.weight.data.uniform_(-initrange, initrange)
        self.dropout_ = nn.Dropout(0.1)

    def forward(self, content, batch_seqs):
        batch_seqs = batch_seqs[:, :-1]
        batch_seqs_embed = self.user_embed(batch_seqs)
        batch_size = batch_seqs_embed.shape[0]
        max_length = batch_seqs_embed.shape[1]
        embed_size = batch_seqs_embed.shape[2]

        content_embedding = None
        if self.content_aware:
            content_embedding = self.text_encoder(content)

        out = self.cascade_encoder(batch_seqs, batch_seqs_embed, content_embedding)
        out = self.dropout_(out)
        out = self.layer_norm(out)
        if content_embedding is not None:
            content_cat_temp = th.repeat_interleave(content_embedding.unsqueeze(0), max_length, -2)
            content_cat_temp = content_cat_temp.reshape(batch_size, -1, embed_size)
            out = th.cat([out, content_cat_temp], -1)

        out = self.linear(out)  # (bsz, user_len, |U|)
        if self.knowledge_aware:
            o_min, _ = th.min(out, dim=1)
            o_max, _ = th.max(out, dim=1)

            knowledge_regularization = self.kg_attention_pooling(content)
            knowledge_regularization = (knowledge_regularization - o_min) / (o_max - o_min)
            knowledge_regularization = knowledge_regularization.reshape(batch_size, 1, self.user_size)
            knowledge_regularization = th.repeat_interleave(knowledge_regularization, max_length, -2)
            out = knowledge_regularization + out

        previous_mask = Variable(get_previous_user_mask(batch_seqs, self.user_size), requires_grad=False).cuda()
        out = out + previous_mask

        return out.view(-1, out.size(-1))

    def loss(self, prob, labels):
        return self.cross_ent(prob, labels)