import torch
import torch.nn as nn
import torch.nn.functional as F

import dgl

"""
    GCN: Graph Convolutional Networks
    Thomas N. Kipf, Max Welling, Semi-Supervised Classification with Graph Convolutional Networks (ICLR 2017)
    http://arxiv.org/abs/1609.02907
"""
from layers.gcn_layer import GCNLayer
from layers.mlp_readout_layer import MLPReadout


class GCNNet(nn.Module):
    def __init__(self, net_params):
        super().__init__()
        #
        in_dim = net_params.in_dim_node
        hidden_dim = net_params.hidden_dim
        out_dim = net_params.out_dim
        n_classes = net_params.n_classes
        in_feat_dropout = net_params.in_feat_dropout
        dropout = net_params.dropout
        n_layers = net_params.n_layers

        self.batch_norm = net_params.batch_norm
        self.residual = net_params.residual
        self.n_classes = n_classes
        self.device = net_params.device
        # embedding_h：嵌入层，nn.Linear类，输入维度为in_dim，输出维度为hidden_dim,将2维节点特征嵌入到120维度
        self.embedding_h = nn.Linear(in_dim, hidden_dim)
        self.in_feat_dropout = nn.Dropout(in_feat_dropout)
        # n_layers=4，4层图网络，进行实例化，GCNLayer类，ModuleList存放子模块，最后一层的输出维度为out_dim
        # layers的真正结构是 前 n_layers-1层的GCNLayer，输入维度为hidden_dim，输出维度为hidden_dim，激活函数为ReLU，dropout为dropout
        self.layers = nn.ModuleList(
            [GCNLayer(hidden_dim, hidden_dim, F.relu, dropout, self.batch_norm, self.residual) for _ in
             range(n_layers - 1)])
        # 前面有n_layers-1层的GCNLayer，最后一层的输出维度为out_dim
        self.layers.append(GCNLayer(hidden_dim, out_dim, F.relu, dropout, self.batch_norm, self.residual))
        # n_classes分类的MLP层，输入维度为2*out_dim，输出维度为n_classes=2 也就是边存在和不存在
        self.MLP_layer = MLPReadout(2 * out_dim, n_classes)

    def forward(self, g, h, e):
        # 嵌入层，将节点特征h进行嵌入，输入维度为in_dim，输出维度为hidden_dim ：embedding_h = nn.Linear(in_dim, hidden_dim)
        h = self.embedding_h(h.float())
        # dropout
        h = self.in_feat_dropout(h)
        # 卷积层
        for conv in self.layers:
            h = conv(g, h)
        # 更新图上节点特征
        g.ndata['h'] = h

        # 获取边的特征
        def _edge_feat(edges):
            # 将源节点和目标节点的特征进行拼接concat后，经过一个全连接层
            a = edges.src['h']
            b = edges.dst['h']

            e = torch.cat([edges.src['h'], edges.dst['h']], dim=1)
            # 全连接层（输入：2*out_dim, 输出：边分类的类别2），
            e = self.MLP_layer(e)
            return {'e': e}

        g.apply_edges(_edge_feat)

        return g.edata['e']

    def loss(self, pred, label):
        # 获取损失值，使用交叉熵损失
        criterion = nn.CrossEntropyLoss(weight=None)
        loss = criterion(pred, label)
        return loss
