import dgl
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl.function as fn
from dgl.ops import edge_softmax
import dgl.nn as dglnn
import math
from dgl.nn.pytorch import GATConv


class HGTLayer (nn.Module):
    def __init__(self,
                 in_dim,
                 out_dim,
                 node_dict,
                 edge_dict,
                 n_heads,
                 dropout=0.2,
                 use_norm=False):
        super (HGTLayer, self).__init__ ()

        self.in_dim = in_dim
        self.out_dim = out_dim
        self.node_dict = node_dict
        self.edge_dict = edge_dict
        self.num_types = len (node_dict)
        self.num_relations = len (edge_dict)
        self.total_rel = self.num_types * self.num_relations * self.num_types
        self.n_heads = n_heads
        self.d_k = out_dim // n_heads
        self.sqrt_dk = math.sqrt (self.d_k)
        self.att = None

        self.k_linears = nn.ModuleList ()
        self.q_linears = nn.ModuleList ()
        self.v_linears = nn.ModuleList ()
        self.a_linears = nn.ModuleList ()
        self.norms = nn.ModuleList ()
        self.use_norm = use_norm

        for t in range (self.num_types):
            self.k_linears.append (nn.Linear (in_dim, out_dim))
            self.q_linears.append (nn.Linear (in_dim, out_dim))
            self.v_linears.append (nn.Linear (in_dim, out_dim))
            self.a_linears.append (nn.Linear (out_dim, out_dim))
            if use_norm:
                self.norms.append (nn.LayerNorm (out_dim))

        self.relation_pri = nn.Parameter (torch.ones (self.num_relations, self.n_heads))
        self.relation_att = nn.Parameter (torch.Tensor (self.num_relations, n_heads, self.d_k, self.d_k))
        self.relation_msg = nn.Parameter (torch.Tensor (self.num_relations, n_heads, self.d_k, self.d_k))
        self.skip = nn.Parameter (torch.ones (self.num_types))
        self.drop = nn.Dropout (dropout)

        nn.init.xavier_uniform_ (self.relation_att)
        nn.init.xavier_uniform_ (self.relation_msg)

    def forward(self, G, h=None, src_h=None, dst_h=None):
        # 全图数据
        if not G.is_block:
            with G.local_scope ():
                node_dict, edge_dict = self.node_dict, self.edge_dict
                for srctype, etype, dsttype in G.canonical_etypes:
                    sub_graph = G[srctype, etype, dsttype]

                    k_linear = self.k_linears[node_dict[srctype]]
                    v_linear = self.v_linears[node_dict[srctype]]
                    q_linear = self.q_linears[node_dict[dsttype]]

                    k = k_linear (h[srctype]).view (-1, self.n_heads, self.d_k)
                    v = v_linear (h[srctype]).view (-1, self.n_heads, self.d_k)
                    q = q_linear (h[dsttype]).view (-1, self.n_heads, self.d_k)

                    e_id = self.edge_dict[etype]

                    relation_att = self.relation_att[e_id]
                    relation_pri = self.relation_pri[e_id]
                    relation_msg = self.relation_msg[e_id]

                    k = torch.einsum ("bij,ijk->bik", k, relation_att)
                    v = torch.einsum ("bij,ijk->bik", v, relation_msg)

                    sub_graph.srcdata['k'] = k
                    sub_graph.dstdata['q'] = q
                    sub_graph.srcdata['v'] = v

                    sub_graph.apply_edges (fn.v_dot_u ('q', 'k', 't'))
                    attn_score = sub_graph.edata.pop ('t').sum (-1) * relation_pri / self.sqrt_dk
                    attn_score = edge_softmax (sub_graph, attn_score, norm_by='dst')

                    sub_graph.edata['t'] = attn_score.unsqueeze (-1)

                G.multi_update_all ({etype: (fn.u_mul_e ('v', 't', 'm'), fn.sum ('m', 't')) \
                                     for etype in edge_dict}, cross_reducer='mean')

                new_h = {}
                for ntype in G.ntypes:
                    '''
                        Step 3: Target-specific Aggregation
                        x = norm( W[node_type] * gelu( Agg(x) ) + x )
                    '''
                    n_id = node_dict[ntype]
                    alpha = torch.sigmoid (self.skip[n_id])
                    t = G.nodes[ntype].data['t'].view (-1, self.out_dim)
                    trans_out = self.drop (self.a_linears[n_id] (t))
                    trans_out = trans_out * alpha + h[ntype] * (1 - alpha)
                    if self.use_norm:
                        new_h[ntype] = self.norms[n_id] (trans_out)
                    else:
                        new_h[ntype] = trans_out
                return new_h
        else:
            with G.local_scope ():
                node_dict, edge_dict = self.node_dict, self.edge_dict
                for srctype, etype, dsttype in G.canonical_etypes:
                    sub_graph = G[srctype, etype, dsttype]

                    k_linear = self.k_linears[node_dict[srctype]]
                    v_linear = self.v_linears[node_dict[srctype]]
                    q_linear = self.q_linears[node_dict[dsttype]]

                    k = k_linear (src_h[srctype]).view (-1, self.n_heads, self.d_k)
                    v = v_linear (src_h[srctype]).view (-1, self.n_heads, self.d_k)
                    q = q_linear (dst_h[dsttype]).view (-1, self.n_heads, self.d_k)

                    e_id = self.edge_dict[etype]

                    relation_att = self.relation_att[e_id]
                    relation_pri = self.relation_pri[e_id]
                    relation_msg = self.relation_msg[e_id]

                    k = torch.einsum ("bij,ijk->bik", k, relation_att)
                    v = torch.einsum ("bij,ijk->bik", v, relation_msg)

                    sub_graph.srcdata['k'] = k
                    sub_graph.dstdata['q'] = q
                    sub_graph.srcdata['v'] = v

                    sub_graph.apply_edges (fn.v_dot_u ('q', 'k', 't'))
                    attn_score = sub_graph.edata.pop ('t').sum (-1) * relation_pri / self.sqrt_dk
                    attn_score = edge_softmax (sub_graph, attn_score, norm_by='dst')

                    sub_graph.edata['t'] = attn_score.unsqueeze (-1)

                G.multi_update_all ({etype: (fn.u_mul_e ('v', 't', 'm'), fn.sum ('m', 't')) \
                                     for etype in edge_dict}, cross_reducer='mean')

                new_dst_h = {}
                for ntype in G.ntypes:
                    '''
                        Step 3: Target-specific Aggregation
                        x = norm( W[node_type] * gelu( Agg(x) ) + x )
                    '''
                    n_id = node_dict[ntype]
                    alpha = torch.sigmoid (self.skip[n_id])
                    t = G.dstnodes[ntype].data['t'].view (-1, self.out_dim)
                    trans_out = self.drop (self.a_linears[n_id] (t))
                    trans_out = trans_out * alpha + dst_h[ntype] * (1 - alpha)
                    if self.use_norm:
                        new_dst_h[ntype] = self.norms[n_id] (trans_out)
                    else:
                        new_dst_h[ntype] = trans_out
                return new_dst_h


class HGT (nn.Module):
    def __init__(self, G, node_dict, edge_dict, n_inp, n_hid, n_out, n_layers, n_heads, device, use_norm=True):
        super (HGT, self).__init__ ()
        self.node_dict = node_dict
        self.edge_dict = edge_dict
        self.gcs = nn.ModuleList ()
        self.n_inp = n_inp
        self.n_hid = n_hid
        self.n_out = n_out
        self.n_layers = n_layers
        self.adapt_ws = nn.ModuleList ()
        self.device = device
        for t in range (len (node_dict)):
            self.adapt_ws.append (nn.Linear (n_inp, n_hid))
        for _ in range (n_layers):
            self.gcs.append (HGTLayer (n_hid, n_hid, node_dict, edge_dict, n_heads, use_norm=use_norm))
        self.out = nn.Linear (n_hid, n_out)

    def forward(self, G, out_key):
        if not type (G) == list:
            h = {}
            # full graph
            for ntype in G.ntypes:
                n_id = self.node_dict[ntype]
                h[ntype] = F.gelu (self.adapt_ws[n_id] (G.nodes[ntype].data['inp']))
            for i in range (self.n_layers):
                h = self.gcs[i] (G, h)
        else:
            src_h = {}
            dst_h = {}
            # mini batch
            for i, (layer, block) in enumerate (zip (self.gcs, G)):
                for ntype in block.ntypes:
                    # initial
                    n_id = self.node_dict[ntype]
                    layer_li = self.adapt_ws[n_id]
                    if i == 0:
                        try:
                            src_h[ntype] = F.gelu (layer_li (block.srcnodes[ntype].data['inp']))
                        except Exception as e:
                            raise e
                    dst_h[ntype] = F.gelu (layer_li (block.dstnodes[ntype].data['inp']))
                dst_h = layer (block, src_h=src_h, dst_h=dst_h)
                src_h = dst_h.copy ()
        out = self.out (dst_h[out_key])
        out = torch.sigmoid(out)
        return out


#
# class HeteroRGCNLayer (nn.Module):
#     def __init__(self, in_size, out_size, etypes,
#                  num_bases,
#                  *,
#                  weight=True,
#                  bias=True,
#                  activation=None,
#                  self_loop=False,
#                  dropout=0.0
#                  ):
#         super (HeteroRGCNLayer, self).__init__ ()
#         # W_r for each relation
#         self.weight = nn.ModuleDict ({
#             name: nn.Linear (in_size, out_size) for name in etypes
#         })
#
#
#     def forward(self, G, feat_dict):
#         # The input is a dictionary of node features for each type
#         funcs = {}
#         for srctype, etype, dsttype in G.canonical_etypes:
#             # Compute W_r * h
#             Wh = self.weight[etype] (feat_dict[srctype])
#             # Save it in graph for message passing
#             G.dstnodes[srctype].data['Wh_%s' % etype] = Wh
#             # Specify per-relation message passing functions: (message_func, reduce_func).
#             # Note that the results are saved to the same destination feature 'h', which
#             # hints the type wise reducer for aggregation.
#             funcs[etype] = (fn.copy_u ('Wh_%s' % etype, 'm'), fn.mean ('m', 'h'))
#         # Trigger message passing of multiple types.
#         # The first argument is the message passing functions for each relation.
#         # The second one is the type wise reducer, could be "sum", "max",
#         # "min", "mean", "stack"
#         G.multi_update_all (funcs, 'sum')
#         a = G.dstnodes["user"].data
#         # return the updated node feature dictionary
#         return {ntype: G.nodes[ntype].data['h'] for ntype in G.ntypes}
#
#
# class HeteroRGCN (nn.Module):
#     def __init__(self, G, in_size, hidden_size, out_size):
#         super (HeteroRGCN, self).__init__ ()
#         # create layers
#         self.layer1 = HeteroRGCNLayer (in_size, hidden_size, G.etypes)
#         self.layer_mid = HeteroRGCNLayer (hidden_size, hidden_size, G.etypes)
#         self.layer2 = HeteroRGCNLayer (hidden_size, out_size, G.etypes)
#         self.layers = nn.ModuleList ()
#         # i2h
#         self.layers.append (HeteroRGCNLayer (
#             in_size, hidden_size, G.etypes,
#             self.num_bases, activation=F.relu, self_loop=self.use_self_loop,
#             dropout=self.dropout, weight=False))
#         # h2h
#         for i in range (self.num_hidden_layers):
#             self.layers.append (HeteroRGCNLayer (
#                 hidden_size, hidden_size, G.etypes,
#                 self.num_bases, activation=F.relu, self_loop=self.use_self_loop,
#                 dropout=self.dropout))
#         # h2o
#         self.layers.append (HeteroRGCNLayer (
#             hidden_size, out_size, G.etypes,
#             self.num_bases, activation=None,
#             self_loop=self.use_self_loop))
#
#     def forward(self, G, out_key,h=None):
#         if h is None:
#             # full graph training
#             h = self.embed_layer ()
#         if not type(G) == list:
#             input_dict = {ntype: G.nodes[ntype].data['inp'] for ntype in G.ntypes}
#             h_dict = self.layer1 (G, input_dict)
#             h_dict = {k: F.leaky_relu (h) for k, h in h_dict.items ()}
#             h_dict = self.layer2 (G, h_dict)
#             # get paper logits
#             return h_dict[out_key]
#         else:
#             for layer, block in zip (self.layers, G):
#                 h = layer (block, h)
#
#                 #     if i == 0:
#                 #         src_h[ntype] = F.gelu (layer_li (block.srcnodes[ntype].data['inp']))
#                 #     dst_h[ntype] = F.gelu (layer_li (block.dstnodes[ntype].data['inp']))
#                 # dst_h = layer (block, src_h=src_h, dst_h=dst_h)
#                 # src_h = dst_h.copy()


############-------------RGCN---------------########################
class RelGraphConvLayer (nn.Module):
    r"""Relational graph convolution layer.
    Parameters
    ----------
    in_feat : int
        Input feature size.
    out_feat : int
        Output feature size.
    rel_names : list[str]
        Relation names.
    num_bases : int, optional
        Number of bases. If is none, use number of relations. Default: None.
    weight : bool, optional
        True if a linear layer is applied after message passing. Default: True
    bias : bool, optional
        True if bias is added. Default: True
    activation : callable, optional
        Activation function. Default: None
    self_loop : bool, optional
        True to include self loop message. Default: False
    dropout : float, optional
        Dropout rate. Default: 0.0
    """

    def __init__(self,
                 in_feat,
                 out_feat,
                 rel_names,
                 num_bases,
                 *,
                 weight=True,
                 bias=True,
                 activation=None,
                 self_loop=False,
                 dropout=0.0):
        super (RelGraphConvLayer, self).__init__ ()
        self.in_feat = in_feat
        self.out_feat = out_feat
        self.rel_names = rel_names
        self.num_bases = num_bases
        self.bias = bias
        self.activation = activation
        self.self_loop = self_loop

        self.conv = dglnn.HeteroGraphConv ({
            rel: dglnn.GraphConv (in_feat, out_feat, norm='right', weight=False, bias=False)
            for rel in rel_names
        })

        self.use_weight = weight
        self.use_basis = num_bases < len (self.rel_names) and weight
        if self.use_weight:
            if self.use_basis:
                self.basis = dglnn.WeightBasis ((in_feat, out_feat), num_bases, len (self.rel_names))
            else:
                self.weight = nn.Parameter (torch.Tensor (len (self.rel_names), in_feat, out_feat))
                nn.init.xavier_uniform_ (self.weight, gain=nn.init.calculate_gain ('relu'))

        # bias
        if bias:
            self.h_bias = nn.Parameter (torch.Tensor (out_feat))
            nn.init.zeros_ (self.h_bias)

        # weight for self loop
        if self.self_loop:
            self.loop_weight = nn.Parameter (torch.Tensor (in_feat, out_feat))
            nn.init.xavier_uniform_ (self.loop_weight,
                                     gain=nn.init.calculate_gain ('relu'))

        self.dropout = nn.Dropout (dropout)

    def forward(self, g, inputs):
        """Forward computation
        Parameters
        ----------
        g : DGLHeteroGraph
            Input graph.
        inputs : dict[str, torch.Tensor]
            Node feature for each node type.
        Returns
        -------
        dict[str, torch.Tensor]
            New node features for each node type.
        """
        g = g.local_var ()
        if self.use_weight:
            weight = self.basis () if self.use_basis else self.weight
            wdict = {self.rel_names[i]: {'weight': w.squeeze (0)}
                     for i, w in enumerate (torch.split (weight, 1, dim=0))}
        else:
            wdict = {}

        if g.is_block:
            inputs_src = inputs
            inputs_dst = {k: v[:g.number_of_dst_nodes (k)] for k, v in inputs.items ()}
        else:
            inputs_src = inputs_dst = inputs

        hs = self.conv (g, inputs, mod_kwargs=wdict)

        def _apply(ntype, h):
            if self.self_loop:
                h = h + torch.matmul (inputs_dst[ntype], self.loop_weight)
            if self.bias:
                h = h + self.h_bias
            if self.activation:
                h = self.activation (h)
            return self.dropout (h)

        return {ntype: _apply (ntype, h) for ntype, h in hs.items ()}


class RelGraphEmbed (nn.Module):
    r"""Embedding layer for featureless heterograph."""

    def __init__(self,
                 g,
                 embed_size,
                 embed_name='embed',
                 activation=None,
                 dropout=0.0):
        super (RelGraphEmbed, self).__init__ ()
        self.g = g
        self.embed_size = embed_size
        self.embed_name = embed_name
        self.activation = activation
        self.dropout = nn.Dropout (dropout)

        # create weight embeddings for each node for each relation
        self.embeds = nn.ParameterDict ()
        for ntype in g.ntypes:
            embed = nn.Parameter (torch.Tensor (g.number_of_nodes (ntype), self.embed_size))
            nn.init.xavier_uniform_ (embed, gain=nn.init.calculate_gain ('relu'))
            self.embeds[ntype] = embed

    def forward(self, block=None):
        """Forward computation
        Parameters
        ----------
        block : DGLHeteroGraph, optional
            If not specified, directly return the full graph with embeddings stored in
            :attr:`embed_name`. Otherwise, extract and store the embeddings to the block
            graph and return.
        Returns
        -------
        DGLHeteroGraph
            The block graph fed with embeddings.
        """
        return self.embeds


class EntityClassify (nn.Module):
    def __init__(self,
                 g,
                 h_dim, out_dim,
                 num_bases,
                 num_hidden_layers=1,
                 dropout=0,
                 use_self_loop=False):
        super (EntityClassify, self).__init__ ()
        self.g = g
        self.h_dim = h_dim
        self.out_dim = out_dim
        self.rel_names = list (set (g.etypes))
        self.rel_names.sort ()
        if num_bases < 0 or num_bases > len (self.rel_names):
            self.num_bases = len (self.rel_names)
        else:
            self.num_bases = num_bases
        self.num_hidden_layers = num_hidden_layers
        self.dropout = dropout
        self.use_self_loop = use_self_loop

        self.embed_layer = RelGraphEmbed (g, self.h_dim)
        self.layers = nn.ModuleList ()
        # i2h
        self.layers.append (RelGraphConvLayer (
            self.h_dim, self.h_dim, self.rel_names,
            self.num_bases, activation=F.relu, self_loop=self.use_self_loop,
            dropout=self.dropout, weight=False))
        # h2h
        for i in range (self.num_hidden_layers):
            self.layers.append (RelGraphConvLayer (
                self.h_dim, self.h_dim, self.rel_names,
                self.num_bases, activation=F.relu, self_loop=self.use_self_loop,
                dropout=self.dropout))
        # h2o
        self.layers.append (RelGraphConvLayer (
            self.h_dim, self.out_dim, self.rel_names,
            self.num_bases, activation=None,
            self_loop=self.use_self_loop))

    def forward(self, h=None, blocks=None):
        if h is None:
            # full graph training
            h = self.embed_layer ()
        if blocks is None:
            # full graph training
            for layer in self.layers:
                h = layer (self.g, h)
        else:
            # minibatch training
            for layer, block in zip (self.layers, blocks):
                h = layer (block, h)
        return h


######################  HAN implementation ##################
"""This model shows an example of using dgl.metapath_reachable_graph on the original heterogeneous
graph.
Because the original HAN implementation only gives the preprocessed homogeneous graph, this model
could not reproduce the result in HAN as they did not provide the preprocessing code, and we
constructed another dataset from ACM with a different set of papers, connections, features and
labels.
"""

class SemanticAttention(nn.Module):
    def __init__(self, in_size, hidden_size=128):
        super(SemanticAttention, self).__init__()

        self.project = nn.Sequential(
            nn.Linear(in_size, hidden_size),
            nn.Tanh(),
            nn.Linear(hidden_size, 1, bias=False)
        )

    def forward(self, z):
        w = self.project(z).mean(0)                    # (M, 1)
        beta = torch.softmax(w, dim=0)                 # (M, 1)
        beta = beta.expand((z.shape[0],) + beta.shape) # (N, M, 1)

        return (beta * z).sum(1)                       # (N, D * K)

class HANLayer(nn.Module):
    """
    HAN layer.
    Arguments
    ---------
    meta_paths : list of metapaths, each as a list of edge types
    in_size : input feature dimension
    out_size : output feature dimension
    layer_num_heads : number of attention heads
    dropout : Dropout probability
    Inputs
    ------
    g : DGLHeteroGraph
        The heterogeneous graph
    h : tensor
        Input features
    Outputs
    -------
    tensor
        The output feature
    """
    def __init__(self, meta_paths, in_size, out_size, layer_num_heads, dropout):
        super(HANLayer, self).__init__()

        # One GAT layer for each meta path based adjacency matrix
        self.gat_layers = nn.ModuleList()
        for i in range(len(meta_paths)):
            self.gat_layers.append(GATConv(in_size, out_size, layer_num_heads,
                                           dropout, dropout, activation=F.elu,
                                           allow_zero_in_degree=True))
        self.semantic_attention = SemanticAttention(in_size=out_size * layer_num_heads)
        self.meta_paths = list(tuple(meta_path) for meta_path in meta_paths)

        self._cached_graph = None
        self._cached_coalesced_graph = {}

    def forward(self, g, h):
        semantic_embeddings = []

        if self._cached_graph is None or self._cached_graph is not g:
            self._cached_graph = g
            self._cached_coalesced_graph.clear()
            for meta_path in self.meta_paths:
                self._cached_coalesced_graph[meta_path] = dgl.metapath_reachable_graph(
                        g, meta_path)

        for i, meta_path in enumerate(self.meta_paths):
            new_g = self._cached_coalesced_graph[meta_path]
            semantic_embeddings.append(self.gat_layers[i](new_g, h).flatten(1))
        semantic_embeddings = torch.stack(semantic_embeddings, dim=1)                  # (N, M, D * K)

        return self.semantic_attention(semantic_embeddings)                            # (N, D * K)

class HAN(nn.Module):
    def __init__(self, meta_paths, in_size, hidden_size, out_size, num_heads, dropout):
        super(HAN, self).__init__()

        self.layers = nn.ModuleList()
        self.layers.append(HANLayer(meta_paths, in_size, hidden_size, num_heads[0], dropout))
        for l in range(1, len(num_heads)):
            self.layers.append(HANLayer(meta_paths, hidden_size * num_heads[l-1],
                                        hidden_size, num_heads[l], dropout))
        self.predict = nn.Linear(hidden_size * num_heads[-1], out_size)

    def forward(self, g, h):
        for gnn in self.layers:
            h = gnn(g, h)

        return self.predict(h)