"""
@Filename       : kg_encoder.py
@Create Time    : 2021/9/26 15:33
@Author         : Rylynn
@Description    : 

"""
import random

import numpy as np
import torch
import torch.nn as nn
from dgl.nn import AvgPooling
from dgl.nn import GraphConv


class MeanKGPooling(nn.Module):
    def __init__(self, config, kg, ke):
        super(MeanKGPooling, self).__init__()
        ke = np.row_stack((ke, np.zeros(shape=(1, ke.shape[1]))))
        print('Shape of knowledge embedding: {} x {}'.format(ke.shape[0], ke.shape[1]))

        self.ke = torch.from_numpy(ke).cuda().float()
        self.kg = kg

        self.user_size = config['node_num'] + 1
        self.embed_dim = config['embed_dim']
        self.ke_transform = nn.Linear(config['embed_dim'], config['embed_dim']).cuda()
        self.zero_one_embedding = torch.rand((2, config['embed_dim']))
        self.zero_one_embedding.requires_grad = True

        self.pooling_layer = AvgPooling()

    def forward(self, batch_seqs):
        user_map = {0: 0, 1: 1}
        max_user_id = 2
        for seqs in batch_seqs:
            for user in seqs:
                if user.item() not in user_map.keys():
                    user_map[user.item()] = max_user_id
                    max_user_id += 1

        new_batch_seqs = torch.LongTensor(batch_seqs.shape[0], batch_seqs.shape[1]).cuda()

        for i, seqs in enumerate(batch_seqs):
            for j, user in enumerate(seqs):
                new_batch_seqs[i][j] = user_map[user.item()]

        user_map.pop(0)
        user_map.pop(1)
        user_num = len(user_map.keys())
        sorted_user_map = sorted(user_map.items(), key=lambda x: x[1], reverse=False)
        kg_list = list(map(lambda u: self.kg[u[0]], sorted_user_map))

        ke = self.ke_transform(self.ke)
        kg_pooling = torch.zeros(user_num + 2, self.embed_dim).cuda()
        kg_pooling[:2] = self.zero_one_embedding

        for i, g in enumerate(kg_list):
            kg_pooling[2 + i] = self.pooling_layer(g, ke)
        # kg_batch = dgl.batch(kg_list)
        #
        # kg_pooling = self.pooling_layer(kg_batch, ke.repeat(user_num, 1))

        return kg_pooling[new_batch_seqs]


class KGAttentionPooling(nn.Module):
    def __init__(self, config, content_kg, uikg_dict):
        super(KGAttentionPooling, self).__init__()
        entity_num = {'memetracker': 48007 + 1, 'lastfm': 20877 + 1, 'dblp': 14406 + 1, 'dblp_new': 12532 + 1}
        self.embed_dim = config['embed_dim']
        self.user_num = config['node_num'] + 1
        self.hidden_dim = self.embed_dim
        self.uikg_dict = uikg_dict
        self.content_kg = self.preprocess_content_kg(content_kg, config['dataset'])

        self.inner_batch = 256
        self.kg_feat = nn.Embedding(entity_num[config['dataset']], config['embed_dim'])
        self.kg_conv = GraphConv(self.embed_dim, self.hidden_dim)
        self.kg_attention = nn.Linear(self.hidden_dim, 1, bias=False)

        self.content_attention = nn.Sequential(nn.Linear(self.embed_dim, self.hidden_dim), nn.Tanh(),
                                               nn.Linear(self.hidden_dim, 1, bias=False))

    def forward(self, contents_id):
        if not hasattr(self, 'uikg_pooling'):
            self.init_uikg_pooling()
        # UI Attention-based KG Pooling to require a knowledge-aware user interest embedding

        # Stochastic Learning on Cross-Interest Attention
        sampled_user_batch = set(random.sample(self.uikg_dict.keys(), self.inner_batch))
        self.update_uikg_pooling(sampled_user_batch)

        ckg_list = [self.content_kg[cid] if cid in self.content_kg.keys() else [] for cid in contents_id]
        max_ckg_length = max(map(lambda x: len(x), ckg_list))

        # if all content have no entity
        if max_ckg_length == 0:
            return torch.zeros(len(contents_id), self.user_num).cuda()

        content_entities_feat = torch.zeros(len(contents_id), max_ckg_length, self.embed_dim).cuda()
        mask = torch.ones(len(contents_id), max_ckg_length).cuda() * float('-inf')
        for idx, ckg in enumerate(ckg_list):
            if len(ckg) != 0:
                content_entities_feat[idx][:len(ckg)] = self.kg_feat(torch.LongTensor(ckg).cuda())
                mask[idx][:len(ckg)] = 0
            else:
                mask[idx][0] = 0

        content_entities_score = self.content_attention(content_entities_feat).squeeze(-1)
        content_entities_score = mask + content_entities_score
        content_entities_score = torch.softmax(content_entities_score, dim=1)

        content_pooling = torch.bmm(content_entities_score.reshape(len(ckg_list), 1, max_ckg_length),
                                    content_entities_feat)
        content_pooling = content_pooling.reshape(len(ckg_list), self.embed_dim)
        return torch.matmul(content_pooling, torch.transpose(self.uikg_pooling, -1, -2))

    def preprocess_content_kg(self, content_kg, dataset):
        entity_map_dict = {}
        with open('../data/{}/entities.tsv'.format(dataset), 'r') as f:
            for line in f.readlines():
                idx, entities_id = line.strip().split('\t')
                entity_map_dict[entities_id] = eval(idx)

        for cid, ckg in content_kg.items():
            c_entities = [entity_map_dict[entity] if entity in entity_map_dict.keys() else None for entity in
                          self.kg_to_list(ckg)]
            c_entities = list(filter(lambda x: x is not None, c_entities))
            content_kg[cid] = c_entities

        return content_kg

    def kg_to_list(self, kg):
        entity_set = set()
        if type(kg) is dict:
            for h, rt in kg.items():
                entity_set.add(h)
                for r, t in rt:
                    entity_set.add(t)
            return entity_set
        elif type(kg) is list:
            for h, r, t in kg:
                entity_set.add(h)
                entity_set.add(t)
            return list(entity_set)

    @torch.no_grad()
    def init_uikg_pooling(self):
        # Stochastic Learning on Cross-Interest Attention
        self.uikg_pooling = torch.zeros(self.user_num, self.hidden_dim).cuda()
        for uid, (uikg, origin_nid) in self.uikg_dict.items():
            feature = self.kg_feat(torch.LongTensor(origin_nid).cuda())
            feat = self.kg_conv(uikg, feature)  # batch * max_node_num * feat_size
            feat_score = self.kg_attention(feat)
            feat_score = torch.softmax(feat_score, dim=0)
            pooling = torch.sum(feat * feat_score, dim=0)
            self.uikg_pooling[uid] = pooling

    def update_uikg_pooling(self, updated_uid):
        for uid in updated_uid:
            uikg, origin_nid = self.uikg_dict[uid]
            feature = self.kg_feat(torch.LongTensor(origin_nid).cuda())
            feat = self.kg_conv(uikg, feature)  # batch * max_node_num * feat_size
            feat_score = self.kg_attention(feat)
            feat_score = torch.softmax(feat_score, dim=0)
            pooling = torch.sum(feat * feat_score, dim=0)
            self.uikg_pooling[uid] = pooling

    def clear_uikg_pooling_gradient(self):
        self.uikg_pooling = self.uikg_pooling.detach()
        # self.uikg_pooling = self.uikg_pooling.

    # CODE for batch graph pooling

    # cursor = 0
    # sorted_uikg = sorted(self.uikg_dict.items(), key=lambda x: x[0])
    # while cursor < len(sorted_uikg):
    #     cursor_end = min(cursor + self.inner_batch, len(sorted_uikg))
    #     bs = cursor_end - cursor
    #     inner_batch_sorted_uikg = sorted_uikg[cursor: cursor_end]
    #     total_nodes_num = sum(map(lambda x: x[1][0].number_of_nodes(), inner_batch_sorted_uikg))
    #     max_node_num = max(map(lambda x: x[1][0].number_of_nodes(), inner_batch_sorted_uikg))
    #
    #     uid_list = []
    #     node_num_list = []
    #     inner_batch_g = []
    #     inner_batch_nodes_features = torch.zeros(total_nodes_num, self.embed_dim).cuda()
    #     kg_conv_features = torch.zeros(bs, max_node_num, self.embed_dim).cuda()
    #     mask = torch.ones(bs, max_node_num).cuda() * float('-inf')
    #
    #     node_cursor = 0
    #     for uid, (uikg, origin_nid) in inner_batch_sorted_uikg:
    #         uid_list.append(uid)
    #         node_num_list.append(len(origin_nid))
    #         inner_batch_g.append(uikg)
    #
    #         inner_batch_nodes_features[node_cursor: node_cursor + len(origin_nid)] = self.kg_feat(torch.LongTensor(origin_nid).cuda())
    #         node_cursor += len(origin_nid)
    #
    #     feat = self.kg_conv(dgl.batch(inner_batch_g), inner_batch_nodes_features)  # batch * max_node_num * feat_size
    #
    #     node_cursor = 0
    #     for idx, node_num in enumerate(node_num_list):
    #         kg_conv_features[idx][:node_num] = feat[node_cursor: node_cursor + node_num]
    #         node_cursor += node_num
    #
    #     feat_score = self.kg_attention(kg_conv_features).squeeze(-1)
    #     feat_score = mask * feat_score
    #     feat_score = torch.softmax(feat_score, dim=0)
    #
    #     uikg_pooling[cursor: cursor_end]  = torch.bmm(feat_score.reshape(bs, 1, max_node_num), kg_conv_features).reshape(bs, self.hidden_dim)
    #
    #     cursor += self.inner_batch
