from typing import List, Tuple

import mindspore as ms
from mindspore import nn
from mindspore.common.initializer import initializer
from mindspore.common.initializer import XavierUniform

from mindspore_gl import Graph
from mindspore_gl.nn import GNNCell
import math


class CellDict(nn.CellList):
    def __getitem__(self, key: str) -> nn.Cell:
        return self._cells[key]

    def __setitem__(self, key: str, cell: nn.Cell) -> None:
        if self._auto_prefix:
            prefix, _ = ms.nn.layer.container._get_prefix_and_index(self._cells)
            cell.update_parameters_name(prefix + key + ".")
        self._cells[key] = cell


class FeatureEncoder(nn.Cell):
    def __init__(self, num_dict, dim, encoder='sum', out_dim=None):
        super().__init__()
        self.embeddings = CellDict()
        self.out_dim = {}
        self._map = {}
        for ntype, num_ntypes in num_dict.items():
            self.embeddings[ntype] = nn.CellList()
            self.out_dim[ntype] = []
            self._map[ntype] = len(self._map)
            if len(num_ntypes) == 1:
                self.embeddings[ntype].append(nn.Embedding(int(num_ntypes[0]), out_dim if out_dim else dim, embedding_table='xavier_uniform'))
                self.out_dim[ntype].append(out_dim if out_dim else dim)
            else:
                for i, num_ntype in enumerate(num_ntypes):
                    self.embeddings[ntype].append(nn.Embedding(int(num_ntype), dim, embedding_table='xavier_uniform'))
                    self.out_dim[ntype].append(dim)
        if encoder == 'concat':
            self.encoder = lambda x, _: ms.ops.concat(x, -1)
            self.out_dim = {ntype: sum(self.out_dim[ntype]) for ntype in num_dict.keys()}
        elif encoder == 'sum':
            self.encoder = lambda x, _: sum(x)
            self.out_dim = {ntype: self.out_dim[ntype][0] for ntype in num_dict.keys()}
        elif encoder == 'mlp':
            self.mlp = CellDict()
            for ntype, nums in num_dict.items():
                self.mlp[ntype] = nn.Dense(dim * len(nums), out_dim)
            self.encoder = lambda x, ntype: self.mlp[ntype](ms.ops.concat(x, -1)) if len(x) > 1 else x[0]
            self.out_dim = {ntype: out_dim for ntype in num_dict.keys()}

    def construct(self, x, ntype):
        if ntype not in self._map:
            raise ValueError('Type is not registered')
        return self.encoder([self.embeddings[ntype][i](x[:, i]) for i in range(x.shape[-1])], ntype)


class RelTemporalEncoding(nn.Cell):
    '''
        Implement the Temporal Encoding (Sinusoid) function.
    '''
    def __init__(self, n_hid):
        super(RelTemporalEncoding, self).__init__()
        self.div_term = ms.Parameter(ms.ops.exp(ms.numpy.arange(0, n_hid, 2) *
                                                     -(math.log(10000.0) / n_hid)).expand_dims(0), requires_grad=False)
        self.n_hid = n_hid
        self.lin = nn.Dense(n_hid, n_hid)

    def construct(self, t):
        t = t.expand_dims(-1)
        emb = ms.numpy.zeros((t.shape[0], self.n_hid))
        emb[:, 0::2] = ms.ops.sin(t * self.div_term) / math.sqrt(self.n_hid)
        emb[:, 1::2] = ms.ops.cos(t * self.div_term) / math.sqrt(self.n_hid)
        return self.lin(emb)


class HomoHGTLayer(GNNCell):
    """homo HGT layer"""

    def __init__(self, n_heads, d_k,
                 k_cell, q_cell, v_cell, time_encoding=False):
        super().__init__()
        gain = math.sqrt(2)
        self.pri = ms.Parameter(ms.ops.Ones()((n_heads, 1), ms.float32))
        self.msg = ms.Parameter(initializer(XavierUniform(gain), [n_heads, d_k * d_k], ms.float32), name="relation_msg")
        self.att = ms.Parameter(initializer(XavierUniform(gain), [n_heads, d_k * d_k], ms.float32), name="relation_att")
        self.n_heads = n_heads
        self.d_k = d_k
        self.sqrt_dk = math.sqrt(d_k)
        self.kc = k_cell
        self.qc = q_cell
        self.vc = v_cell
        self.exp = ms.ops.Exp()
        self.reduce = ms.ops.ReduceMin()
        self.time_encoding = time_encoding

    def construct(self, src_x, x, g: Graph):
        """homo HGT layer forward"""
        k = ms.ops.Reshape()(self.kc(src_x), (-1, self.n_heads, self.d_k))
        v = ms.ops.Reshape()(self.vc(src_x), (-1, self.n_heads, self.d_k))
        q = ms.ops.Reshape()(self.qc(x), (-1, self.n_heads, self.d_k))
        k_tran = ms.ops.Transpose()(ms.ops.BatchMatMul()(ms.ops.Transpose()(k, (1, 0, 2)),
                                                         ms.ops.Reshape()(self.att, (-1, self.d_k, self.d_k))),
                                    (1, 0, 2))
        v_tran = ms.ops.Transpose()(ms.ops.BatchMatMul()(ms.ops.Transpose()(v, (1, 0, 2)),
                                                         ms.ops.Reshape()(self.msg, (-1, self.d_k, self.d_k))),
                                    (1, 0, 2))
        g.set_vertex_attr({"qe": q, "ke": k_tran, "ve": v_tran})
        for v in g.dst_vertex:
            e = [ms.ops.Exp()(ms.ops.ReduceSum(keep_dims=True)(v.qe * u.ke, -1) * self.pri / self.sqrt_dk) for u in
                 v.innbs]
            attn_score = [c / g.sum(e) for c in e]
            a = [u.ve for u in v.innbs]
            v.ret = g.sum(attn_score * a)
        ret = [v.ret for v in g.dst_vertex]
        return ret


class HeteroHGTLayer(ms.nn.Cell):
    """Hetero HGT layer"""

    def __init__(self,
                 num_node_types: int,
                 num_edge_types: int,
                 canonical_etypes: List[Tuple],
                 hidden_size: int,
                 output_size: int,
                 dropout: float = 0.2,
                 n_heads: int = 4,
                 use_norm=True) -> None:
        super().__init__()
        self.num_ntypes = num_node_types
        self.num_etypes = num_edge_types
        self.canoical_etypes = canonical_etypes
        self.output_size = output_size
        self.use_norm = use_norm
        cl_k_tmp = []
        cl_q_tmp = []
        cl_v_tmp = []
        cl_a_tmp = []
        if use_norm:
            cl_norm_tmp = []
        for i in range(num_node_types):
            cl_k_tmp.append(ms.nn.Dense(hidden_size, output_size))
            cl_q_tmp.append(ms.nn.Dense(hidden_size, output_size))
            cl_v_tmp.append(ms.nn.Dense(hidden_size, output_size))
            cl_a_tmp.append(ms.nn.Dense(output_size, output_size))
            if use_norm:
                cl_norm_tmp.append(ms.nn.LayerNorm((output_size,)))
        cl_k = ms.nn.CellList(cl_k_tmp)
        cl_q = ms.nn.CellList(cl_q_tmp)
        cl_v = ms.nn.CellList(cl_v_tmp)
        self.cl_a = ms.nn.CellList(cl_a_tmp)
        self.skip = ms.Parameter(ms.ops.Ones()((num_node_types,), ms.float32), name="skip{}".format(i))
        if use_norm:
            self.cl_norm = ms.nn.CellList(cl_norm_tmp)
        d_k = output_size // n_heads
        self.drop = ms.nn.Dropout(1-dropout)
        layer = []
        for stype, _, dtype in canonical_etypes:
            layer.append(HomoHGTLayer(n_heads, d_k, cl_k[stype], cl_q[dtype], cl_v[stype]))
        self.layers = ms.nn.CellList(layer)

    def construct(self, h, src_idx, dst_idx, n_nodes, n_edges):
        """Hetero HGT layer forward"""
        out = []
        count = []
        for i in range(self.num_ntypes):
            out.append(ms.ops.Zeros()((1,), ms.float32))
            count.append(0)
        for src_type, etype, dst_type in self.canoical_etypes:
            if src_idx[etype] is not None:
                out[dst_type] += self.layers[etype](h[src_type], h[dst_type], src_idx[etype], dst_idx[etype],
                                                    n_nodes[etype], n_edges[etype])
                count[dst_type] += 1
        for i in range(self.num_ntypes):
            out[i] = out[i] / count[i]

        new_h = [ms.Tensor([], dtype=ms.float32)] * len(range(self.num_ntypes))
        for ntype in range(self.num_ntypes):
            if count[ntype] == 0:
                continue
            alpha = ms.ops.Sigmoid()(self.skip[ntype])
            t = ms.ops.Reshape()(out[ntype], (-1, self.output_size))
            emb = self.cl_a[ntype](ms.nn.GELU()(t))
            dropped = self.drop(emb)
            trans_out = dropped * alpha + h[ntype] * (1 - alpha)
            if self.use_norm:
                new_h[ntype] = self.cl_norm[ntype](trans_out)
            else:
                new_h[ntype] = trans_out
        return new_h


class DotProduct:
    def __init__(self, key='h', out_key='score'):
        super().__init__()
        self.key = key
        self.out_key = out_key

    def predict(self, u_h, v_h):
        # return u_h @ v_h.transpose(-2, -1)
        return (u_h * v_h).sum(-1)

    def u_dot_v(self, edges):
        return {self.out_key: self.predict(edges.src[self.key], edges.dst[self.key])}

    def predict_pairs(self, edge_pairs, graph, key='h'):
        ret = {}
        for (srctype, etype, dsttype), (u, v) in edge_pairs.items():
            # etid = graph.get_etype_id(etype)
            # stid, dtid = graph._graph.metagraph.find_edge(etid)
            # srcdata = graph._node_frames[stid].subframe(u)[key]
            # dstdata = graph._node_frames[dtid].subframe(v)[key]
            srcdata = graph.dstnodes[srctype].data[key][u]
            dstdata = graph.dstnodes[dsttype].data[key][v]
            ret[etype] = self.predict(srcdata, dstdata)
        return ret

    def forward(self, edge_subgraph, block, key='h'):
        self.key = key
        for ntype in edge_subgraph.ntypes:
            if edge_subgraph.num_nodes(ntype):
                edge_subgraph.nodes[ntype].data[key] = block.dstnodes[ntype].data[key]
        for etype in edge_subgraph.canonical_etypes:
            if edge_subgraph.num_edges(etype=etype):
                edge_subgraph.apply_edges(
                    self.u_dot_v, etype=etype)
        return edge_subgraph.edata.pop('score')

