from dgl.nn import GraphConv
import torch.nn.functional as F
import torch.nn as nn
from dgl.nn import SAGEConv
from dgl.nn import GATConv

class GCN(nn.Module):
    """
    GCN network
    """
    def __init__(self, in_feats, num_classes):
        super(GCN, self).__init__()
        self.conv1 = GraphConv(in_feats, 64)
        self.conv2 = GraphConv(64, 32)
        self.conv3 = GraphConv(32, num_classes)
        
    def forward(self, g, in_feat):
        h = self.conv1(g, in_feat)
        h = F.relu(h)
        h = self.conv2(g, h)
        h = F.relu(h)
        h = self.conv3(g, h)
        return h


class SAGE(nn.Module):
    def __init__(self, in_feats, out_feats):
        super().__init__()
        # 实例化SAGEConve，in_feats是输入特征的维度，out_feats是输出特征的维度，aggregator_type是聚合函数的类型
        self.conv1 = SAGEConv(
            in_feats=in_feats, out_feats=64, aggregator_type='mean')
        self.conv2 = SAGEConv(
            in_feats=64, out_feats=32, aggregator_type='mean')
        self.conv3 = SAGEConv(
            in_feats=32, out_feats=out_feats, aggregator_type='mean')

    def forward(self, graph, inputs):
        # 输入是节点的特征
        h = self.conv1(graph, inputs)
        h = F.relu(h)
        h = self.conv2(graph, h)
        h = F.relu(h)
        h = self.conv3(graph, h)

        return h


class GAT(nn.Module):
    def __init__(self, in_feats, num_classes, num_heads=4):
        super(GAT, self).__init__()
        # Initialize the GAT layers
        self.conv1 = GATConv(in_feats, 64, num_heads=num_heads, activation=F.relu)
        self.conv2 = GATConv(64 * num_heads, 32, num_heads=num_heads, activation=F.relu)
        # Output layer
        self.conv3 = GATConv(32 * num_heads, num_classes, num_heads=num_heads)

    def forward(self, g, in_feat):
        # Input features
        h = in_feat
        # Apply the first GAT layer and concatenate the heads
        h = self.conv1(g, h).flatten(1)
        # Apply the second GAT layer and concatenate the heads
        h = self.conv2(g, h).flatten(1)
        # Apply the third GAT layer and use mean of heads for output
        h = self.conv3(g, h).mean(1)
        return h
