Spaces:
Sleeping
Sleeping
| import torch | |
| import torch.nn as nn | |
| from torch_geometric.nn import GINConv, global_add_pool, global_mean_pool | |
| import torch.nn.functional as F | |
| import numpy as np | |
| class GIN(torch.nn.Module): | |
| def __init__(self, num_features, num_classes, dropout, hidden_dim=128, num_layers=5, add_or_mean="add"): | |
| super().__init__() | |
| self.num_layers = num_layers | |
| self.hidden_dim = hidden_dim | |
| self.add_or_mean = add_or_mean | |
| self.dropout = dropout | |
| self.conv_layers = nn.ModuleList() | |
| # input features → hidden_dim | |
| mlp = nn.Sequential( | |
| nn.Linear(num_features, hidden_dim), | |
| nn.ReLU(), | |
| nn.Linear(hidden_dim, hidden_dim), | |
| nn.BatchNorm1d(hidden_dim) | |
| ) | |
| self.conv_layers.append(GINConv(mlp, train_eps=True)) | |
| # hidden GIN layers | |
| for _ in range(num_layers - 1): | |
| mlp = nn.Sequential( | |
| nn.Linear(hidden_dim, hidden_dim), | |
| nn.ReLU(), | |
| nn.Linear(hidden_dim, hidden_dim), | |
| nn.BatchNorm1d(hidden_dim) | |
| ) | |
| self.conv_layers.append(GINConv(mlp, train_eps=True)) | |
| # Final classifier (after pooling) | |
| self.fc = nn.Linear(hidden_dim, num_classes) | |
| def forward(self, x, edge_index, batch): | |
| for conv in self.conv_layers: | |
| x = conv(x, edge_index) | |
| x = F.relu(x) | |
| x = F.dropout(x, p=self.dropout, training=self.training) | |
| # Pool to get graph-level representation | |
| if self.add_or_mean == "mean": | |
| x = global_mean_pool(x, batch) | |
| elif self.add_or_mean == "add": | |
| x = global_add_pool(x, batch) | |
| x = F.dropout(x, p=0.5, training=self.training) | |
| return self.fc(x) | |