"""
@Filename       : het_diffuse.py
@Create Time    : 2020/12/11 21:43
@Author         : Rylynn
@Description    : Heterogeneous Diffusion Graph part of the code is reference from
    https://github.com/dmlc/dgl/blob/master/examples/pytorch/han/model.py
"""

import dgl
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn_utils
import numpy as np

from dgl.nn.pytorch import GATConv, GraphConv
from torch.autograd import Variable
from torch.nn import init

from model import HiDAN
from model.het_diffuse.het_graph_builder import build_het_graph, metapath_reachable_graph
from model.het_diffuse.transformer import TransformerBlock


class SemanticAttention(nn.Module):
    def __init__(self, in_size, hidden_size=128):
        super(SemanticAttention, self).__init__()

        self.project = nn.Sequential(
            nn.Linear(in_size, hidden_size),
            nn.Tanh(),
            nn.Linear(hidden_size, 1, bias=False)
        )

    def forward(self, z):
        w = self.project(z).mean(0)  # (M, 1)
        beta = torch.softmax(w, dim=0)  # (M, 1)
        beta = beta.expand((z.shape[0],) + beta.shape)  # (N, M, 1)

        return (beta * z).sum(1)  # (N, D * K)


class HANLayer(nn.Module):
    """
    HAN layer.
    Arguments
    ---------
    meta_paths : list of metapaths, each as a list of edge types
    in_size : input feature dimension
    out_size : output feature dimension
    layer_num_heads : number of attention heads
    dropout : Dropout probability
    Inputs
    ------
    g : DGLHeteroGraph
        The heterogeneous graph
    h : tensor
        Input features
    Outputs
    -------
    tensor
        The output feature
    """

    def __init__(self, meta_paths, threshold, in_size, out_size, layer_num_heads, dropout):
        super(HANLayer, self).__init__()

        # One GAT layer for each meta path based adjacency matrix
        self.gat_layers = nn.ModuleList()
        for i in range(len(meta_paths)):
            self.gat_layers.append(GraphConv(in_size, out_size * layer_num_heads, allow_zero_in_degree=True))
            # self.gat_layers.append(GATConv(in_size, out_size, layer_num_heads,
            #                                dropout, dropout, activation=F.relu,
            #                                allow_zero_in_degree=True))
        self.semantic_attention = SemanticAttention(in_size=out_size * layer_num_heads)
        self.meta_paths = list(tuple(meta_path) for meta_path in meta_paths)
        self.threshold = threshold

        self._cached_graph = None
        self._cached_coalesced_graph = {}

    def forward(self, g, h, sample_graph):
        if sample_graph is None:
            sample_graph = {}

        semantic_embeddings = []

        if self._cached_graph is None or self._cached_graph is not g:
            self._cached_graph = g
            self._cached_coalesced_graph.clear()
            for meta_path, t in zip(self.meta_paths, self.threshold):
                self._cached_coalesced_graph[meta_path] = metapath_reachable_graph(
                    g, meta_path, t)

        for i, meta_path in enumerate(self.meta_paths):
            new_g = self._cached_coalesced_graph[meta_path]
            if not sample_graph.get(meta_path):
                sample_graph[meta_path] = dgl.sampling.sample_neighbors(new_g.to('cpu'), torch.arange(0, new_g.number_of_nodes()), fanout=20).to('cuda')

            new_g = sample_graph[meta_path]
            semantic_embeddings.append(self.gat_layers[i](new_g, h).flatten(1))
        semantic_embeddings = torch.stack(semantic_embeddings, dim=1)  # (N, M, D * K)

        return self.semantic_attention(semantic_embeddings), sample_graph  # (N, D * K)


class HAN(nn.Module):
    def __init__(self, meta_paths, threshold, in_size, hidden_size, num_heads, dropout):
        super(HAN, self).__init__()

        self.layers = nn.ModuleList()
        self.layers.append(HANLayer(meta_paths, threshold, in_size, hidden_size, num_heads[0], dropout))
        for l in range(1, len(num_heads)):
            self.layers.append(HANLayer(meta_paths, threshold, hidden_size * num_heads[l - 1],
                                        hidden_size, num_heads[l], dropout))
        self.dropout = nn.Dropout(dropout)

    def forward(self, g, h):
        sample_graph = None
        for idx, gnn in enumerate(self.layers):
            h, sample_graph = gnn(g, h, sample_graph)
            if idx == 0:
                h = self.dropout(h)
        return h

def get_previous_user_mask(seq, user_size):
    ''' Mask previous activated users.'''
    assert seq.dim() == 2
    prev_shape = (seq.size(0), seq.size(1), seq.size(1))
    seqs = seq.repeat(1, 1, seq.size(1)).view(seq.size(0), seq.size(1), seq.size(1))
    previous_mask = np.tril(np.ones(prev_shape)).astype('float32')
    previous_mask = torch.from_numpy(previous_mask)
    if seq.is_cuda:
        previous_mask = previous_mask.cuda()

    masked_seq = previous_mask * seqs.data.float()
    # print(masked_seq.size())

    # force the 0th dimension (PAD) to be masked
    PAD_tmp = torch.zeros(seq.size(0), seq.size(1), 1)
    if seq.is_cuda:
        PAD_tmp = PAD_tmp.cuda()
    masked_seq = torch.cat([masked_seq, PAD_tmp], dim=2)
    ans_tmp = torch.zeros(seq.size(0), seq.size(1), user_size)
    if seq.is_cuda:
        ans_tmp = ans_tmp.cuda()
    masked_seq = ans_tmp.scatter_(2, masked_seq.long(), float('-inf'))
    masked_seq = Variable(masked_seq, requires_grad=False)
    return masked_seq


class HetDiffuse(nn.Module):
    def __init__(self, config):
        super(HetDiffuse, self).__init__()
        self.han = HAN(meta_paths=config['meta_paths'],
                       threshold=config['threshold'],
                       in_size=config['feat_dim'],
                       hidden_size=config['hidden_dim'],
                       num_heads=config['num_heads'],
                       dropout=config['dropout'],
                       )

        self.window_size = config['window_size']
        self.seq_head = config['seq_head']
        self.user_size = config['node_num'] + 1
        self.user_embed = nn.Embedding(self.user_size, config['embed_dim'])
        self.pos_dim = 8
        # TODO: Time embedding
        assert config['user_feat'].shape[0] == self.user_size
        assert config['user_feat'].shape[1] == config['feat_dim']
        self.user_feat = nn.Embedding(self.user_size, config['feat_dim'])
        self.user_feat.from_pretrained(embeddings=config['user_feat'], freeze=True)
        self.pos_embed = nn.Embedding(1000, self.pos_dim)
        self.decoder_attention = TransformerBlock(input_size=config['embed_dim'] + self.pos_dim, n_heads=config['seq_head'])

        # self.lstm = nn.LSTM(config['embed_dim'] + self.pos_dim , config['hidden_dim'])
        self.linear = nn.Linear(config['hidden_dim']+ self.pos_dim, self.user_size, bias=True)

        self.cross_ent = nn.CrossEntropyLoss(ignore_index=0, reduction='sum')
        self.het_graph = build_het_graph(root_path='../data', dataset=config['dataset'], maxlen=500).to('cuda:0')
        self.dropout = nn.Dropout(0.1)

        nn.init.xavier_normal_(self.user_embed.weight)
        nn.init.xavier_normal_(self.pos_embed.weight)
        nn.init.xavier_normal_(self.linear.weight)

    def forward(self, batch_seqs, batch_seqs_length):
        batch_seqs = batch_seqs[:, :-1]
        semantics_embed = self.han(self.het_graph, self.user_feat.weight)
        batch_seqs_embed = self.user_embed(batch_seqs)
        batch_size = batch_seqs_embed.shape[0]
        max_length = batch_seqs_embed.shape[1]
        embed_size = batch_seqs_embed.shape[2]

        mask = (batch_seqs == 0)
        mask = mask.cuda()

        batch_t = torch.arange(batch_seqs.shape[1]).expand(batch_seqs.size()).cuda()

        order_embed = self.dropout(self.pos_embed(batch_t))
        batch_seqs_embed = self.dropout(batch_seqs_embed)
        final_embed = torch.cat([batch_seqs_embed, order_embed], dim=-1)

        att_out = self.decoder_attention(final_embed, final_embed, final_embed, mask=mask)
        # ----------------- LSTM encoder -----------------

        # batch_seqs_pack, _ = rnn_utils.pack_padded_sequence(final_embed, batch_seqs_length, batch_first=True)
        # out = torch.zeros(max_length - 1, batch_size, embed_size).cuda()
        # hi = torch.zeros(1, batch_size, embed_size).cuda()
        # ci = torch.zeros(1, batch_size, embed_size).cuda()
        # for i in range(0, max_length - 1):
        #     _, (hi, ci) = self.lstm(batch_seqs_embed[:, i, :].view(1, batch_size, embed_size), (hi, ci))
        #     output = hi.view(batch_size, embed_size)
        #     out[i] = output
        # out = out.view(-1, embed_size)

        # ------- Heterogeneous network encoder ----------
        semantics_batch_embed = semantics_embed[batch_seqs]

        semantics_mean = torch.zeros(batch_size, max_length, embed_size).cuda()
        semantics_mean += semantics_batch_embed
        for i in range(1, min(self.window_size, max_length)):
            semantics_mean[:, i:, :] += semantics_batch_embed[:, :-i, :]
            semantics_mean[:, i, :] /= (i + 1)

        semantics_mean[:, self.window_size:, :] /= self.window_size

        # output = torch.cat([semantics_mean, att_out], dim=-1)
        output = att_out
        output = self.dropout(output)

        output = self.linear(output)  # (bsz, user_len, |U|)
        # mask = get_previous_user_mask(batch_seqs, self.user_size)
        # output = output + mask

        return output.view(-1, output.size(-1))

    def loss(self, prob, labels):
        return self.cross_ent(prob, labels)

