"""
@Filename       : kg_diffuse.py
@Create Time    : 2021/9/22 8:24
@Author         : Rylynn
@Description    : 

"""
import json
import os.path
import pickle

import dgl
import numpy as np
import torch
import torch as th
import torch.nn as nn

from dgl.nn import GATConv

from torch.autograd import Variable

from model.kg.cascade_encoder import GRUCascadeEncoder, get_previous_user_mask
from model.kg.text_encoder import BertTextEncoder
from util.preprocess import load_content, load_vocab_dict


class KgDiffuse(nn.Module):
    def __init__(self, config):
        super(KgDiffuse, self).__init__()
        self.dataset = config['dataset']
        self.pos_dim = config['pos_dim']
        self.user_size = config['node_num'] + 1
        self.knowledge_aware = config['knowledge_aware']
        self.content_aware = config['content_aware']
        self.content_dict = load_content('../data/{}/'.format(config['dataset']))

        # mode: ['sequence', 'content', 'knowledge']
        self.user_embed = nn.Embedding(self.user_size, config['embed_dim'])
        self.pos_embed = nn.Embedding(1000, self.pos_dim)
        if self.content_aware:
            self.text_encoder = BertTextEncoder(config)

        if self.knowledge_aware:
            # self.uikg_dict = load_uikg('../data', config['dataset'])
            self.content_kg_dict = json.load(open('../data/{}/content.json'.format(config['dataset']), 'r'))
            self.init_discrete_kg_score()

            # self.ke = np.load('../data/{}/entity.npy'.format(config['dataset']))
            # self.kg_encoder = MeanKGPooling(config, self.kg, self.ke)

        self.cascade_encoder = GRUCascadeEncoder(config)
        # self.user_interest_kg = dgl.load_graphs('../{}/{}_uikg.pkl'.format(dataset, dataset))
        # self.kg_encoder = UIKGPooling(config['entity_num'], config['embed_dim'], config['entity_feat'])

        self.softmax = nn.Softmax(dim=1)
        self.cross_ent = nn.CrossEntropyLoss(ignore_index=0, reduction='sum')
        self.layer_norm = nn.LayerNorm(config['hidden_dim'])

        self.linear = nn.Linear(
            config['hidden_dim'] + config['embed_dim'] if self.content_aware else config['hidden_dim'], self.user_size,
            bias=True)

        initrange = 0.1
        self.linear.bias.data.fill_(0)
        self.linear.weight.data.uniform_(-initrange, initrange)
        self.user_embed.weight.data.uniform_(-initrange, initrange)
        self.dropout_ = nn.Dropout(0.1)

    def forward(self, content, batch_seqs):
        # user_interest_kg = self.user_interest_kg[batch_seqs]
        # user_interest_encoding = self.kg_encoder(user_interest_kg, user_encoding)
        batch_seqs = batch_seqs[:, :-1]
        batch_seqs_embed = self.user_embed(batch_seqs)
        batch_size = batch_seqs_embed.shape[0]
        max_length = batch_seqs_embed.shape[1]
        embed_size = batch_seqs_embed.shape[2]

        content_embedding = None
        if self.content_aware:
            content_embedding = self.text_encoder(content)

        out = self.cascade_encoder(batch_seqs, batch_seqs_embed, content_embedding)
        out = self.dropout_(out)
        out = self.layer_norm(out)
        if content_embedding is not None:
            content_cat_temp = th.repeat_interleave(content_embedding.unsqueeze(0), max_length, -2)
            content_cat_temp = content_cat_temp.reshape(batch_size, -1, embed_size)
            out = th.cat([out, content_cat_temp], -1)

        out = self.linear(out)  # (bsz, user_len, |U|)
        if self.knowledge_aware:
            o_min, _ = th.min(out, dim=1)
            o_max, _ = th.max(out, dim=1)
            # o_min = o_min.detach()
            # o_max = o_max.detach()
            # print(self.adative_regularization_max)
            # print(self.adative_regularization_min)
            knowledge_regularization = self.discrete_measure(content)
            knowledge_regularization = knowledge_regularization * (o_max - o_min)
            knowledge_regularization = knowledge_regularization.reshape(batch_size, 1, self.user_size)
            knowledge_regularization = th.repeat_interleave(knowledge_regularization, max_length, -2)
            out = knowledge_regularization + out

        previous_mask = Variable(get_previous_user_mask(batch_seqs, self.user_size), requires_grad=False).cuda()
        out = out + previous_mask

        return out.view(-1, out.size(-1))

    def loss(self, prob, labels):
        return self.cross_ent(prob, labels)

    def kg_to_set(self, kg):
        entity_set = set()
        if type(kg) is dict:
            for h, rt in kg.items():
                entity_set.add(h)
                for r, t in rt:
                    entity_set.add(t)
            return entity_set
        elif type(kg) is list:
            for h, r, t in kg:
                entity_set.add(h)
                entity_set.add(t)
            return entity_set

    def init_discrete_kg_score(self):
        vocab_dict = load_vocab_dict('../data', self.dataset)
        if os.path.exists('../data/{}/kg_discrete_score.pkl'.format(self.dataset)):
            f = open('../data/{}/kg_discrete_score.pkl'.format(self.dataset), 'rb')
            self.discrete_kg_score = pickle.load(f)
            f.close()
            print('Init content-user kg score matrix from local file')
            return
        else:
            self.discrete_kg_score = {}
        print('Init content-user kg score matrix...')
        user_dict = {}
        with open('../data/{}/cascade.txt'.format(self.dataset)) as f:
            for line in f.readlines():
                cu = line.strip().split(' ')
                cid = cu[0]
                users = cu[1:]
                for ut in users:
                    u = ut.split(',')[0]
                    u = vocab_dict[u]
                    if user_dict.get(u):
                        user_dict[u].append(cid)
                    else:
                        user_dict[u] = [cid]

        user_entity_set = {}
        for u, cid_list in user_dict.items():
            user_entity_set[u] = set()
            for cid in cid_list:
                user_entity_set[u] = user_entity_set[u].union(self.kg_to_set(self.content_kg_dict[cid]))
        count = 0
        for cid, ckg in self.content_kg_dict.items():
            count += 1
            if count % 500 == 0:
                print('{}/{}'.format(count, len(self.content_kg_dict.keys())))
            self.discrete_kg_score[cid] = th.zeros(self.user_size)
            ckg_set = self.kg_to_set(ckg)
            if len(ckg_set) == 0:
                continue
            for uid, ukg_set in user_entity_set.items():
                similarity = len(ckg_set.intersection(ukg_set)) / len(ckg_set)
                self.discrete_kg_score[cid][uid] = similarity
        f = open('../data/{}/kg_discrete_score.pkl'.format(self.dataset), 'wb')
        pickle.dump(self.discrete_kg_score, f)
        f.close()

    def discrete_measure(self, content_id):
        kg_score = th.zeros(len(content_id), self.user_size).cuda()
        for i, cid in enumerate(content_id):
            if cid not in self.discrete_kg_score.keys():
                kg_score[i] = th.FloatTensor([1 / self.user_size] * self.user_size).cuda()
            else:
                kg_score[i] = self.discrete_kg_score[cid]
        return kg_score

