import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import GCNConv, global_mean_pool
import os.path as osp
from lib.utils.get_config import get_cfg


# read config file
cfg = get_cfg(osp.join(osp.abspath("./"), 'configs/config.yaml'))

# TODO:之后可以把GNN换成GAT, GraphTransformer等


class GNN(nn.Module):
    def __init__(self, input_dim, output_dim, task='node', hidden_num=2, hidden_dim=32):
        super(GNN, self).__init__()
        self.task = task
        # create the convolution layers
        self.convs = nn.ModuleList()
        self.convs.append(GCNConv(input_dim, hidden_dim))
        for _ in range(hidden_num):
            self.convs.append(GCNConv(hidden_dim, hidden_dim))

        # create a linear layer
        self.linear = nn.Sequential(nn.Linear(hidden_dim, output_dim))
        if self.task not in ['node', 'graph']:
            raise RuntimeError('Unknown task.')

        self.dropout = cfg['DROPOUT']
        self.num_layers = hidden_num

    def forward(self, data):
        x, edge_index, batch = data.x, data.edge_index, data.batch

        if data.num_node_features == 0:  # if there is no feature, use a constant feature
            x = torch.ones(data.num_nodes, 1)

        for i in range(self.num_layers):
            x = self.convs[i](x, edge_index)
            emb = x
            x = F.relu(x)
            x = F.dropout(x, p=self.dropout, training=self.training)

        if self.task == 'graph':  # add pooling mechanism for graph classification
            x = global_mean_pool(x, batch)

        x = self.linear(x)

        return emb, F.log_softmax(x, dim=1)

    def loss(self, pred, label):
        return F.nll_loss(pred, label)
