import torch
import torch.nn as nn
import dgl
import math
import torch.nn.functional as F


class HGTConv(nn.Module):
    def __init__(self, in_dim, out_dim, num_types, num_relations, n_heads, dropout=0.2, **kwargs):
        super(HGTConv, self).__init__()
        self.in_dim = in_dim
        self.out_dim = out_dim
        self.num_types = num_types
        self.num_relations = num_relations
        self.total_rel = num_types * num_relations * num_types
        self.n_heads = n_heads
        self.d_k = out_dim // n_heads
        self.sqrt_dk = math.sqrt(self.d_k)
        self.att = None
        self.softmax = nn.Softmax()

        self.k_linears = nn.ModuleList()
        self.q_linears = nn.ModuleList()
        self.v_linears = nn.ModuleList()
        self.a_linears = nn.Linear(out_dim, out_dim)

        for t in range(num_types):
            self.k_linears.append(nn.Linear(in_dim, out_dim))
            self.q_linears.append(nn.Linear(in_dim, out_dim))
            self.v_linears.append(nn.Linear(in_dim, out_dim))
            

        '''
            TODO: make relation_pri smaller, as not all <st, rt, tt> pair exist in meta relation list.
        '''
        self.relation_pri = nn.Parameter(torch.ones(num_types, num_relations, num_types, self.n_heads))
        self.relation_att = nn.Parameter(torch.Tensor(num_relations, n_heads, self.d_k, self.d_k))
        self.relation_msg = nn.Parameter(torch.Tensor(num_relations, n_heads, self.d_k, self.d_k))
        self.skip = nn.Parameter(torch.ones(num_types))
        self.drop = nn.Dropout(dropout)
        self.emb = RelTemporalEncoding(in_dim)

        nn.init.xavier_uniform_(self.relation_att)
        nn.init.xavier_uniform_(self.relation_msg)
        #在init中定义message函数，可以使用init中的参数
        def gcn_message(edges):
            data_size = edges.src['h'].size(0)
            res_att = torch.zeros(data_size, self.n_heads).cuda()
            res_msg = torch.zeros(data_size, self.n_heads, self.d_k).cuda()
            for source_type in range(self.num_types):
                sb = (edges.src['type'] == int(source_type))
                k_linear = self.k_linears[source_type]
                v_linear = self.v_linears[source_type]
                ###########
                for target_type in range(self.num_types):
                    tb = (edges.dst['type'] == int(target_type)) & sb
                    q_linear = self.q_linears[target_type]
                    #############
                    for relation_type in range(self.num_relations):
                        '''
                            idx is all the edges with meta relation <source_type, relation_type, target_type>
                        '''
                        idx = (edges.data['type'] == int(relation_type)) & tb
                        if idx.sum() == 0:
                            continue
                        '''
                            Get the corresponding input node representations by idx.
                            Add tempotal encoding to source representation (j)
                        '''
                        target_node_vec = edges.dst['h'][idx]
                        source_node_vec = self.emb(edges.src['h'][idx], edges.data['time'][idx])

                        '''
                            Step 1: Heterogeneous Mutual Attention
                        '''
                        q_mat = q_linear(target_node_vec).view(-1, self.n_heads, self.d_k)
                        k_mat = k_linear(source_node_vec).view(-1, self.n_heads, self.d_k)
                        k_mat = torch.bmm(k_mat.transpose(1, 0), self.relation_att[relation_type]).transpose(1, 0)

                        res_att[idx] = (q_mat * k_mat).sum(dim=-1) * \
                                       self.relation_pri[target_type][relation_type][source_type] / self.sqrt_dk
                        '''
                            Step 2: Heterogeneous Message Passing
                        '''
                        v_mat = v_linear(source_node_vec).view(-1, self.n_heads, self.d_k)
                        res_msg[idx] = torch.bmm(v_mat.transpose(1, 0), self.relation_msg[relation_type]).transpose(1,
                                                                                                                    0)
            '''
                Softmax based on target node's id (edge_index_i). Store attention value in self.att for later visualization.
            '''
            
            self.att = self.softmax(res_att[edges.edges()[0]])
            res = res_msg * self.att.view(-1, self.n_heads, 1)
            del res_att, res_msg

            #import pdb;pdb.set_trace()
            return {'msg': res.view(-1, self.out_dim)}

        def gcn_reduce(nodes):
            '''
                        Step 3: Target-specific Aggregation
                        x = W[node_type] * gelu(Agg(x)) + x
            '''
            aggr_out = (F.gelu(nodes.mailbox['msg']))
            aggr_out = torch.sum(self.drop(aggr_out), dim=1)
            res = torch.zeros(aggr_out.size(0), self.out_dim).cuda()
            for target_type in range(self.num_types):
                idx = (nodes.data['type'] == int(target_type))
                if idx.sum() == 0:
                    continue
                '''
                    Add skip connection with learnable weight self.skip[t_id]
                '''
                alpha = F.sigmoid(self.skip[target_type])
                #import pdb;pdb.set_trace()
                res = self.a_linears(aggr_out * 0.5 + nodes.data['h'] * 0.5)

            return {'h': res}

        self.gcn_m = gcn_message
        self.gcn_r = gcn_reduce

    def forward(self, node_inp, node_type, edge_index, edge_type, edge_time):
        g = dgl.DGLGraph()
        node_num = node_inp.shape[0]
        g.add_nodes(node_num)
        g.add_edges(edge_index[0],edge_index[1])
        g.edata['type'] = edge_type
        g.edata['time'] = edge_time
        g.ndata['h'] = node_inp
        g.ndata['type'] = node_type

        g.send(g.edges(), self.gcn_m)
        print(g.nodes())
        g.recv(g.nodes(), self.gcn_r)
        h = g.ndata.pop('h')
        return h




class RelTemporalEncoding(nn.Module):
    '''
        Implement the Temporal Encoding (Sinusoid) function.
    '''

    def __init__(self, n_hid, max_len=240, dropout=0.2):
        super(RelTemporalEncoding, self).__init__()
        self.drop = nn.Dropout(dropout)
        position = torch.arange(0., max_len).unsqueeze(1)
        div_term = 1 / (10000 ** (torch.arange(0., n_hid * 2, 2.)) / n_hid / 2)
        self.emb = nn.Embedding(max_len, n_hid * 2)
        self.emb.weight.data[:, 0::2] = torch.sin(position * div_term) / math.sqrt(n_hid)
        self.emb.weight.data[:, 1::2] = torch.cos(position * div_term) / math.sqrt(n_hid)
        self.emb.requires_grad = False
        self.lin = nn.Linear(n_hid * 2, n_hid)

    def forward(self, x, t):
        return x + self.lin(self.drop(self.emb(t)))

class GeneralConv(nn.Module):
    def __init__(self, conv_name, in_hid, out_hid, num_types, num_relations, n_heads, dropout):
        super(GeneralConv, self).__init__()
        self.conv_name = conv_name
        if self.conv_name == 'hgt':
            self.base_conv = HGTConv(in_hid, out_hid, num_types, num_relations, n_heads, dropout)


    def forward(self, meta_xs, node_type, edge_index, edge_type, edge_time):
        if self.conv_name == 'hgt':
            return self.base_conv(meta_xs, node_type, edge_index, edge_type, edge_time)
        elif self.conv_name == 'gcn':
            return self.base_conv(meta_xs, edge_index)
        elif self.conv_name == 'gat':
            return self.base_conv(meta_xs, edge_index)


