import dgl
import math
# import torch
# import torch.nn as nn
# import torch.nn.functional as F

from .layer import RelTemporalEncoding, CellDict, DotProduct, HeteroHGTLayer

import mindspore as ms
from mindspore import nn, ops, numpy as mnp
from mindspore.common.initializer import initializer, XavierUniform
import dgl
import numpy as np
import math


class HGTLayer(nn.Cell):
    def __init__(self, in_dim, out_dim, types, relations, n_heads,
                 dropout=0.2, use_norm=False, use_time=False):
        super(HGTLayer, self).__init__()

        self.in_dim = in_dim
        self.out_dim = out_dim
        self.types = types
        self.relations = relations
        self.use_time = use_time
        if use_time:
            self.time_encoder = RelTemporalEncoding(out_dim)
        self.n_heads = n_heads
        self.d_k = out_dim // n_heads
        self.sqrt_dk = math.sqrt(self.d_k)

        self.drop = nn.Dropout(1 - dropout)

        self.relation_map = {str(r): i for i, r in enumerate(relations)}
        self.type_map = {str(r): i for i, r in enumerate(types)}
        self.k_linears = nn.CellList()
        self.q_linears = nn.CellList()
        self.v_linears = nn.CellList()
        self.a_linears = nn.CellList()
        self.norms = nn.CellList()
        self.use_norm = use_norm

        for ty in types:
            self.k_linears.append(nn.Dense(in_dim, out_dim))
            self.q_linears.append(nn.Dense(in_dim, out_dim))
            self.v_linears.append(nn.Dense(in_dim, out_dim))
            self.a_linears.append(nn.Dense(out_dim, out_dim))
            if use_norm:
                self.norms.append(nn.LayerNorm([out_dim]))
        self.skip = ms.ParameterTuple([ms.Parameter(mnp.ones(1), name='skip_' + t) for t in types])

        self.relation_pri = ms.ParameterTuple([ms.Parameter(mnp.ones(self.n_heads),
                                                            name='pri_' + r)
                                               for r in self.relations])
        self.relation_att = ms.ParameterTuple(
            [ms.Parameter(initializer(XavierUniform(), (n_heads, self.d_k, self.d_k)),
                          name='att_' + r)
             for r in self.relations])
        self.relation_msg = ms.ParameterTuple(
            [ms.Parameter(initializer(XavierUniform(), (n_heads, self.d_k, self.d_k)),
                          name='msg_' + r)
             for r in self.relations])

    def edge_attention(self, edges):
        if len(edges.src['k']) == 0:
            dtype = edges.src['k'].dtype
            return {'a': ms.Tensor([], dtype), 'v': ms.Tensor([], dtype)}
        srctype, etype, dsttype = edges.canonical_etype
        # etype = str(edges.canonical_etype)
        sid, eid = self.type_map[srctype], self.relation_map[etype]
        relation_att = self.relation_att[eid]
        relation_pri = self.relation_pri[eid]
        relation_msg = self.relation_msg[eid]
        if self.use_time and 'timestamp' in edges.data:
            time_encoding = self.time_encoder(edges.data['timestamp'])
            edges.src['k'] += self.k_linears[sid](time_encoding).view(-1, self.n_heads, self.d_k)
            edges.src['v'] += self.v_linears[sid](time_encoding).view(-1, self.n_heads, self.d_k)
        key = mnp.matmul(edges.src['k'].transpose(1, 0, 2), relation_att).transpose(1, 0, 2)
        att = (edges.dst['q'] * key).sum(-1) * relation_pri / self.sqrt_dk
        val = mnp.matmul(edges.src['v'].transpose(1, 0, 2), relation_msg).transpose(1, 0, 2)
        return {'a': att, 'v': val}

    def message_func(self, edges):
        return {'v': edges.data['v'], 'a': edges.data['a']}

    def reduce_func(self, nodes):
        att = ops.Softmax(axis=1)(nodes.mailbox['a'])
        h = mnp.sum(att.expand_dims(-1) * nodes.mailbox['v'], axis=1)
        return {'t': h.view(-1, self.out_dim)}

    def construct(self, G, inp_key, out_key):
        res = {}
        with G.local_scope():
            for srctype in G.srctypes:
                sid = self.type_map[srctype]
                G.srcnodes[srctype].data['k'] = self.k_linears[sid](
                    G.srcnodes[srctype].data[inp_key]).view(-1, self.n_heads, self.d_k)
                G.srcnodes[srctype].data['v'] = self.v_linears[sid](
                    G.srcnodes[srctype].data[inp_key]).view(-1, self.n_heads, self.d_k)
            for dsttype in G.dsttypes:
                if G.num_dst_nodes(dsttype) == 0:
                    continue
                did = self.type_map[dsttype]
                G.dstnodes[dsttype].data['q'] = self.q_linears[did](
                    G.dstnodes[dsttype].data[inp_key]).view(-1, self.n_heads, self.d_k)
                G.dstnodes[dsttype].data['t'] = G.dstdata[inp_key][dsttype]
            etypes = [etype for etype in G.canonical_etypes if etype[1] in self.relations]
            G.multi_update_all({etype: (self.edge_attention, self.reduce_func) \
                                for etype in etypes}, cross_reducer='mean')
            for ntype in G.dsttypes:
                if G.num_dst_nodes(ntype) == 0:
                    continue
                tid = self.type_map[ntype]
                alpha = ops.Sigmoid()(self.skip[tid])
                trans_out = G.dstdata['t'].pop(ntype)
                trans_out = ops.GeLU()(trans_out)
                trans_out = self.drop(self.a_linears[tid](trans_out))
                mask = sum([G.in_degrees(etype=etype) == 0
                            for etype in G.canonical_etypes if etype[-1] == ntype])
                trans_out += mask.expand_dims(-1) * G.dstdata[inp_key][ntype]
                trans_out = trans_out * alpha + G.dstdata[inp_key].pop(ntype) * (1 - alpha)
                if self.use_norm:
                    trans_out = self.norms[tid](trans_out)
                res[ntype] = trans_out
        for ntype in G.dsttypes:
            if G.num_dst_nodes(ntype):
                G.dstnodes[ntype].data[out_key] = res[ntype]


class BaseHeterGNN(nn.Cell):
    def __init__(self, g, sampler=None, negative_sampler=None, key='h'):
        super().__init__()
        self.g = g
        self.sampler = sampler
        self.negative_sampler = negative_sampler
        self.key = key
        self.classifier = DotProduct(key=self.key)

    def set_sampler(self, sampler, negative_sampler):
        self.sampler = sampler
        self.negative_sampler = negative_sampler

    def _prepare_pos_neg_pairs(self, eids, g):
        subgraph = g.edge_subgraph(eids)
        input_nodes = subgraph.ndata[dgl.NID]
        neg_pairs = self.negative_sampler(g, eids)
        input_nodes, _, blocks = self.sampler.sample(g, input_nodes)
        pos_pairs = {}
        for pred_etype in eids.keys():
            full_etype = g.to_canonical_etype(pred_etype)
            pos_pairs[full_etype] = g.find_edges(eids[pred_etype], etype=full_etype)
            # neg_pairs[full_etype] = neg_pairs_full[full_etype]
        return input_nodes, pos_pairs, neg_pairs, blocks

    def _prepare_pairs(self, input_batch):
        input_nodes, pos_graph, neg_graph, blocks = self.sampler.sample(self.g, input_batch)
        pos_pairs, neg_pairs = {}, {}
        for pred_etype in input_batch.keys():
            full_etype = self.g.to_canonical_etype(pred_etype)
            pos_pairs[full_etype] = pos_graph.edges(etype=full_etype)
            neg_pairs[full_etype] = neg_graph.edges(etype=full_etype)
        return input_nodes, pos_pairs, neg_pairs, blocks

    def forward(self, *inputs):
        raise NotImplementedError

    def _predict_edges(self, block, pos_pair_graph, neg_pair_graph=None):
        pos_score = self.classifier.forward(pos_pair_graph, block)
        pos_score = {k[1]: v for k, v in pos_score.items()}
        if neg_pair_graph is not None:
            neg_score = self.classifier.forward(neg_pair_graph, block)
            neg_score = {k[1]: v for k, v in neg_score.items()}
            return pos_score, neg_score
        else:
            return pos_score

    def construct(self):
        block = self.forward(self.blocks)
        return self._predict_edges(block, self.pos_graph, self.neg_graph)


class HGT_dgl(BaseHeterGNN):
    def __init__(self, g, n_inp, n_hid, n_layers, n_heads, use_norm=True, use_time=False, embedding_layer=None,
                 selected_etype=None, sampler=None, negative_sampler=None, dropout=0.2):
        super().__init__(g, sampler, negative_sampler)
        self.gcs = nn.CellList()
        self.n_inp = n_inp
        self.n_hid = n_hid
        # self.n_out = n_out
        self.n_layers = n_layers
        # self.adapt_ws = CellDict()
        self.embedding_layer = None if embedding_layer is None else embedding_layer
        # for t in G.ntypes:
        #     self.adapt_ws[t] = nn.Dense(n_inp[t], n_hid)
        for _ in range(n_layers):
            self.gcs.append(HGTLayer(n_hid, n_hid,
                                     g.ntypes,
                                     selected_etype if selected_etype else g.etypes,
                                     n_heads, dropout=dropout,
                                     use_norm=use_norm, use_time=use_time))
        # self.out = nn.Dense(n_hid, n_out)


    def forward(self, blocks, x=None, key='h'):
        for ntype in blocks[0].srctypes:
            if x is not None:
                blocks[0].srcnodes[ntype].data[key] = x[ntype]
            elif self.embedding_layer:
                blocks[0].srcnodes[ntype].data[key] = self.embedding_layer(
                    blocks[0].srcnodes[ntype].data['features'], ntype)
            # x[ntype] = torch.tanh(self.adapt_ws[ntype](blocks[0].srcnodes[ntype].data['features']))
        return self.aggregate(blocks, key)

    def aggregate(self, blocks, key='h'):
        for i in range(self.n_layers):
            for ntype in blocks[i].srctypes:
                if i > 0:
                    blocks[i].srcnodes[ntype].data[key] = blocks[i - 1].dstdata[key].pop(ntype)
            for ntype in blocks[i].dsttypes:
                blocks[i].dstnodes[ntype].data[key] = \
                    blocks[i].srcnodes[ntype].data[key][: blocks[i].num_dst_nodes(ntype)]
            self.gcs[i](blocks[i], key, key)
        return blocks[-1]

    def __repr__(self):
        return '{}(n_inp={}, n_hid={}, n_layers={})'.format(
            self.__class__.__name__, self.n_inp, self.n_hid, self.n_layers)


class HGT(BaseHeterGNN):
    def __init__(self, g, n_inp, n_hid, n_layers, n_heads, use_norm=True, use_time=False, embedding_layer=None,
                 selected_etype=None, sampler=None, negative_sampler=None, dropout=0.2):
        super().__init__(g, sampler, negative_sampler)
        self.gcs = nn.CellList()
        self.n_inp = n_inp
        self.n_hid = n_hid
        self.n_layers = n_layers
        self.embedding_layer = None if embedding_layer is None else embedding_layer
        self._ntype2id = {ntype: i for i, ntype in enumerate(g.ntypes)}
        self._etype2id = {etype: i for i, etype in enumerate(g.canonical_etypes)}
        self.canonical_etypes = [(self._ntype2id[etype[0]],
                                  self._etype2id[etype],
                                  self._ntype2id[etype[2]]) for etype in g.canonical_etypes]
        for _ in range(n_layers):
            self.gcs.append(HeteroHGTLayer(len(g.ntypes),
                                           len(g.canonical_etypes),
                                           self.canonical_etypes,
                                           n_hid, n_hid, dropout,
                                           n_heads, use_norm))
        # self.out = nn.Dense(n_hid, n_out)


    def forward(self, blocks, x=None, key='h'):
        h = [None] * len(blocks[0].srctypes)
        for ntype in blocks[0].srctypes:
            nid = self._ntype2id[ntype]
            if x is None:
                h[nid] = self.embedding_layer(blocks[0].srcnodes[ntype].data['features'], ntype)
            else:
                h[nid] = x[ntype]
            # x[ntype] = torch.tanh(self.adapt_ws[ntype](blocks[0].srcnodes[ntype].data['features']))
        for i in range(len(self.gcs)):
            n_e = len(self.canonical_etypes)
            src_idx, dst_idx, n_nodes, n_edges = [None] * n_e, [None] * n_e, [None] * n_e, [None] * n_e
            for etype in blocks[i].canonical_etypes:
                eid = self._etype2id[etype]
                num_e = blocks[i].num_edges(etype=etype)
                if num_e == 0:
                    continue
                n_edges[eid] = num_e
                src_idx[eid], dst_idx[eid] = blocks[i].edges(etype=etype)
                n_nodes[eid] = blocks[i].num_nodes(etype[2])
            h = self.gcs[i](h, src_idx, dst_idx, n_nodes, n_edges)
            for ntype in blocks[i].dsttypes:
                nid = self._ntype2id[ntype]
                if blocks[i].num_dst_nodes(ntype):
                    h[nid] = h[nid][:blocks[i].num_dst_nodes(ntype)]
                else:
                    h[nid] = None
        for ntype in blocks[-1].dsttypes:
            nid = self._ntype2id[ntype]
            if blocks[-1].num_dst_nodes(ntype):
                blocks[-1].dstnodes[ntype].data[key] = h[nid]
        return blocks[-1]


class AttnSeqGNN(BaseHeterGNN):
    def __init__(self, gnn, snapshots, dim_rep, dim_hid, neighbour_sampler, negative_sampler=None, embedding_layer=None,
                 target_etype=None, ignore_etype=None, depth=-1, rnn_like=True, dropout=0.2):
        super().__init__(gnn.g)
        self.gnn = gnn
        self.dim_hid = dim_hid
        self.snapshots = snapshots
        self.neighbour_sampler = neighbour_sampler
        self.negative_sampler = negative_sampler
        if embedding_layer:
            self.embedding_layer = embedding_layer
        self.target_etype = target_etype
        self.depth = depth
        self.rnn_like = rnn_like
        self.drop = nn.Dropout(1 - dropout)
        self.position_emb = nn.Embedding(len(snapshots) - depth + 1, dim_hid)
        self.trans_W = nn.Dense(dim_hid, dim_hid)
        self.ignore_etype = ignore_etype if ignore_etype else []
        self.residual = CellDict()
        for ntype in self.snapshots[-1].ntypes:
            self.residual[ntype] = nn.Dense(dim_hid, dim_hid)

    def forward(self, cur_blocks, compute_loss=None,
                cur_pos_pair=None, cur_neg_pair=None, classifier=None,
                sid=None):
        if sid is None:
            sid = len(self.snapshots) - 1

        dst_id = cur_blocks[-1].dstdata[dgl.NID]
        all_x, ht, mask = {}, {}, {}
        h = {ntype: [] for ntype in cur_blocks[-1].dsttypes}
        num_nei = {ntype: [] for ntype in cur_blocks[-1].dsttypes}
        for ntype in self.snapshots[sid].ntypes:
            feature = self.embedding_layer(self.snapshots[-1].ndata['features'][ntype], ntype)
            all_x[ntype] = feature
            if dst_id[ntype].shape[0]:
                ht[ntype] = all_x[ntype][dst_id[ntype]]
                h[ntype].append(ht[ntype].expand_dims(1))
                num_nei[ntype].append(mnp.zeros_like(dst_id[ntype]).expand_dims(-1))

        for i, snapshot in enumerate(self.snapshots[sid + 1 - self.depth: sid + 1]
                                     if self.depth > 0 else self.snapshots[: sid + 1]):
            if i < sid:
                sample_res = self.neighbour_sampler.sample(snapshot, dst_id)
                blocks = sample_res[-1]
            else:
                blocks = cur_blocks
            src_id = blocks[0].srcdata[dgl.NID]
            x = {}
            for ntype in src_id.keys():
                if dst_id[ntype].shape[0] and i > 0 and self.rnn_like:
                    update = mnp.zeros_like(all_x[ntype])
                    update[dst_id[ntype]] = ht[ntype] - all_x[ntype][dst_id[ntype]]
                    x[ntype] = all_x[ntype][src_id[ntype]] + update[src_id[ntype]]
                else:
                    x[ntype] = all_x[ntype][src_id[ntype]]
            ht = self.gnn.forward(blocks, x, key='xt').dstdata.pop('xt')
            for ntype in ht.keys():
                if ht[ntype].shape[0] > 0:
                    num_in = [blocks[-1].in_degrees(etype=etype)
                              for etype in snapshot.canonical_etypes
                              if etype not in self.ignore_etype and ntype == etype[2]]
                    num_nei[ntype].append(sum(num_in).expand_dims(-1))

                    h[ntype].append(ht[ntype].expand_dims(1))
                    ht[ntype] = self.residual[ntype](ht[ntype]) * (num_nei[ntype][-1] > 0)
                    ht[ntype] += all_x[ntype][dst_id[ntype]]

        if sid == 0:
            for ntype in cur_blocks[-1].dsttypes:
                if cur_blocks[-1].num_dst_nodes(ntype):
                    cur_blocks[-1].dstnodes[ntype].data['h'] = h[ntype][-1][0]
            return cur_blocks[-1]

        for ntype in cur_blocks[-1].dsttypes:
            if cur_blocks[-1].num_dst_nodes(ntype):

                num_nei[ntype] = ops.concat(num_nei[ntype], -1)
                h_seq = ops.concat(h[ntype], 1)
                mask = num_nei[ntype] == 0
                not_alone = mnp.sum(~mask, -1, keepdims=False) > 0
                mask[:, 0] = ops.logical_and(mask[:, 0], not_alone)
                query = h[ntype][-1]

                attn = ops.matmul(query, h_seq.transpose(0, 2, 1)).squeeze(1)
                attn = nn.Softmax()(attn.masked_fill(mask, float('-inf')))
                ht = (attn.expand_dims(-1) * h_seq).sum(1)

                cur_blocks[-1].dstnodes[ntype].data['h'] = ht
        else:
            return cur_blocks[-1]
