Upload folder using huggingface_hub
Browse files- edge_aware_gnn_link.py +213 -0
- edge_aware_gnn_node.py +220 -0
- link_prediction_log/EdgeConv_GPT.txt +38 -0
- link_prediction_log/GINE_GPT.txt +38 -0
- link_prediction_log/GeneralConv_GPT.txt +38 -0
- link_prediction_log/GraphSAGE_GPT.txt +58 -0
- link_prediction_log/GraphTransformer_GPT.txt +58 -0
- link_prediction_log/MLP_GPT.txt +38 -0
- models/__pycache__/edge_conv.cpython-39.pyc +0 -0
- models/__pycache__/mlp.cpython-39.pyc +0 -0
- models/__pycache__/sage_edge_conv.cpython-39.pyc +0 -0
- models/edge_conv.py +52 -0
- models/mlp.py +22 -0
- models/sage_edge_conv.py +113 -0
- node_classification_log/EdgeConv_GPT.txt +30 -0
- node_classification_log/GINE_GPT.txt +30 -0
- node_classification_log/GeneralConv_GPT.txt +30 -0
- node_classification_log/GraphSAGE_GPT.txt +30 -0
- node_classification_log/GraphTransformer_GPT.txt +30 -0
- node_classification_log/MLP_GPT.txt +30 -0
edge_aware_gnn_link.py
ADDED
@@ -0,0 +1,213 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import os
|
3 |
+
|
4 |
+
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../../..', '..', '..')))
|
5 |
+
|
6 |
+
import pickle
|
7 |
+
import numpy as np
|
8 |
+
import torch
|
9 |
+
import torch.nn.functional as F
|
10 |
+
import torch_geometric.transforms as T
|
11 |
+
import tqdm
|
12 |
+
from sklearn.metrics import roc_auc_score, f1_score
|
13 |
+
from torch_geometric import seed_everything
|
14 |
+
from torch_geometric.loader import LinkNeighborLoader
|
15 |
+
from torch_geometric.nn import SAGEConv, TransformerConv, GINEConv, GeneralConv, EdgeConv
|
16 |
+
from torch.nn import Linear
|
17 |
+
from models.edge_conv import EdgeConvConv
|
18 |
+
from models.sage_edge_conv import SAGEEdgeConv
|
19 |
+
from models.mlp import MLP
|
20 |
+
import argparse
|
21 |
+
|
22 |
+
class GNN(torch.nn.Module):
|
23 |
+
def __init__(self, hidden_channels, edge_dim, num_layers, model_type):
|
24 |
+
super().__init__()
|
25 |
+
|
26 |
+
self.convs = torch.nn.ModuleList()
|
27 |
+
|
28 |
+
if model_type == 'GraphSAGE':
|
29 |
+
self.conv = SAGEEdgeConv(hidden_channels, hidden_channels, edge_dim=edge_dim)
|
30 |
+
elif model_type == 'GraphTransformer':
|
31 |
+
self.conv = TransformerConv((-1, -1), hidden_channels, edge_dim=edge_dim)
|
32 |
+
elif model_type == 'GINE':
|
33 |
+
self.conv = GINEConv(Linear(hidden_channels, hidden_channels), edge_dim=edge_dim)
|
34 |
+
elif model_type == 'EdgeConv':
|
35 |
+
self.conv = EdgeConvConv(Linear(2 * hidden_channels + edge_dim, hidden_channels), train_eps=True,
|
36 |
+
edge_dim=edge_dim)
|
37 |
+
elif model_type == 'GeneralConv':
|
38 |
+
self.conv = GeneralConv((-1, -1), hidden_channels, in_edge_channels=edge_dim)
|
39 |
+
else:
|
40 |
+
raise NotImplementedError('Model type not implemented')
|
41 |
+
|
42 |
+
for _ in range(num_layers):
|
43 |
+
self.convs.append(self.conv)
|
44 |
+
|
45 |
+
def forward(self, x, edge_index, edge_attr):
|
46 |
+
for i, conv in enumerate(self.convs):
|
47 |
+
x = conv(x, edge_index, edge_attr)
|
48 |
+
x = x.relu() if i != len(self.convs) - 1 else x
|
49 |
+
return x
|
50 |
+
|
51 |
+
class Classifier(torch.nn.Module):
|
52 |
+
def __init__(self, hidden_channels):
|
53 |
+
super().__init__()
|
54 |
+
self.lin1 = Linear(2 * hidden_channels, hidden_channels)
|
55 |
+
self.lin2 = Linear(hidden_channels, 1)
|
56 |
+
|
57 |
+
def forward(self, x, edge_label_index):
|
58 |
+
# Convert node embeddings to edge-level representations:
|
59 |
+
edge_feat_src = x[edge_label_index[0]]
|
60 |
+
edge_feat_dst = x[edge_label_index[1]]
|
61 |
+
|
62 |
+
z = torch.cat([edge_feat_src, edge_feat_dst], dim=-1)
|
63 |
+
z = self.lin1(z).relu()
|
64 |
+
z = self.lin2(z)
|
65 |
+
return z.view(-1)
|
66 |
+
|
67 |
+
class Model(torch.nn.Module):
|
68 |
+
def __init__(self, hidden_channels, edge_dim, num_layers, model_type):
|
69 |
+
super().__init__()
|
70 |
+
self.model_type = model_type
|
71 |
+
if model_type != 'MLP':
|
72 |
+
self.gnn = GNN(hidden_channels, edge_dim, num_layers, model_type=model_type)
|
73 |
+
|
74 |
+
self.classifier = Classifier(hidden_channels)
|
75 |
+
|
76 |
+
def forward(self, data):
|
77 |
+
x = data.x
|
78 |
+
if self.model_type != 'MLP':
|
79 |
+
x = self.gnn(x, data.edge_index, data.edge_attr)
|
80 |
+
|
81 |
+
pred = self.classifier(x, data.edge_label_index)
|
82 |
+
return pred, x
|
83 |
+
|
84 |
+
|
85 |
+
if __name__ == "__main__":
|
86 |
+
seed_everything(66)
|
87 |
+
|
88 |
+
parser = argparse.ArgumentParser()
|
89 |
+
parser.add_argument('--data_type', '-dt', type=str, default='reddit', help='Data type')
|
90 |
+
parser.add_argument('--emb_type', '-et', type=str, default='GPT-3.5-TURBO', help='Embedding type') # TODO: set edge dim
|
91 |
+
parser.add_argument('--model_type', '-mt', type=str, default='MLP', help='Model type')
|
92 |
+
args = parser.parse_args()
|
93 |
+
|
94 |
+
# Dataset = Children(root='.')
|
95 |
+
# data = Dataset[0] # TODO: Citation code in TAG
|
96 |
+
with open(f'./reddit_graph.pkl', 'rb') as f:
|
97 |
+
data = pickle.load(f)
|
98 |
+
|
99 |
+
num_nodes = len(data.text_nodes)
|
100 |
+
num_edges = len(data.text_edges)
|
101 |
+
|
102 |
+
del data.text_nodes
|
103 |
+
del data.text_node_labels
|
104 |
+
del data.text_edges
|
105 |
+
|
106 |
+
# set hidden channels and edge dim for diff emb type
|
107 |
+
if args.emb_type != 'None':
|
108 |
+
data.edge_attr = torch.load(f'./reddit_graph-openai-edge.pt').squeeze().float()
|
109 |
+
data.x = torch.load(f'./reddit_graph-openai-node.pt').squeeze().float()
|
110 |
+
if args.emb_type == 'GPT-3.5-TURBO':
|
111 |
+
edge_dim = 1536
|
112 |
+
node_dim = 1536
|
113 |
+
elif args.emb_type == 'Large_Bert':
|
114 |
+
edge_dim = 1024
|
115 |
+
node_dim = 1024
|
116 |
+
elif args.emb_type == 'BERT':
|
117 |
+
edge_dim = 768
|
118 |
+
node_dim = 768
|
119 |
+
else:
|
120 |
+
raise NotImplementedError('Embedding type not implemented')
|
121 |
+
else:
|
122 |
+
data.x = torch.load(f'./reddit_graph-openai-node.pt').squeeze().float()
|
123 |
+
data.edge_attr = torch.randn(num_edges, 1024).squeeze().float()
|
124 |
+
edge_dim = 1024
|
125 |
+
node_dim = 1024
|
126 |
+
|
127 |
+
print(data)
|
128 |
+
|
129 |
+
train_data, val_data, test_data = T.RandomLinkSplit(
|
130 |
+
num_val=0.8,
|
131 |
+
num_test=0.1,
|
132 |
+
disjoint_train_ratio=0.3,
|
133 |
+
neg_sampling_ratio=1.0,
|
134 |
+
)(data)
|
135 |
+
|
136 |
+
# Perform a link-level split into training, validation, and test edges:
|
137 |
+
edge_label_index = train_data.edge_label_index
|
138 |
+
edge_label = train_data.edge_label
|
139 |
+
train_loader = LinkNeighborLoader(
|
140 |
+
data=train_data,
|
141 |
+
num_neighbors=[20, 10],
|
142 |
+
edge_label_index=(edge_label_index),
|
143 |
+
edge_label=edge_label,
|
144 |
+
batch_size=1024,
|
145 |
+
shuffle=True,
|
146 |
+
)
|
147 |
+
|
148 |
+
edge_label_index = val_data.edge_label_index
|
149 |
+
edge_label = val_data.edge_label
|
150 |
+
val_loader = LinkNeighborLoader(
|
151 |
+
data=val_data,
|
152 |
+
num_neighbors=[20, 10],
|
153 |
+
edge_label_index=(edge_label_index),
|
154 |
+
edge_label=edge_label,
|
155 |
+
batch_size=1024,
|
156 |
+
shuffle=False,
|
157 |
+
)
|
158 |
+
|
159 |
+
edge_label_index = test_data.edge_label_index
|
160 |
+
edge_label = test_data.edge_label
|
161 |
+
test_loader = LinkNeighborLoader(
|
162 |
+
data=test_data,
|
163 |
+
num_neighbors=[20, 10],
|
164 |
+
edge_label_index=(edge_label_index),
|
165 |
+
edge_label=edge_label,
|
166 |
+
batch_size=1024,
|
167 |
+
shuffle=False,
|
168 |
+
)
|
169 |
+
|
170 |
+
model = Model(hidden_channels=node_dim, edge_dim=edge_dim, num_layers=2, model_type=args.model_type) # TODO: edge dim
|
171 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
172 |
+
print(device)
|
173 |
+
|
174 |
+
model = model.to(device)
|
175 |
+
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
|
176 |
+
|
177 |
+
for epoch in range(1, 10):
|
178 |
+
total_loss = total_examples = 0
|
179 |
+
for sampled_data in tqdm.tqdm(train_loader):
|
180 |
+
optimizer.zero_grad()
|
181 |
+
sampled_data = sampled_data.to(device)
|
182 |
+
pred, x = model(sampled_data)
|
183 |
+
ground_truth = sampled_data.edge_label
|
184 |
+
loss = F.binary_cross_entropy_with_logits(pred, ground_truth)
|
185 |
+
loss.backward()
|
186 |
+
optimizer.step()
|
187 |
+
total_loss += float(loss) * pred.numel()
|
188 |
+
total_examples += pred.numel()
|
189 |
+
print(f"Epoch: {epoch:03d}, Loss: {total_loss / total_examples:.4f}")
|
190 |
+
|
191 |
+
# validation
|
192 |
+
if epoch % 1 == 0 and epoch != 0:
|
193 |
+
print('Validation begins')
|
194 |
+
with torch.no_grad():
|
195 |
+
preds = []
|
196 |
+
ground_truths = []
|
197 |
+
for sampled_data in tqdm.tqdm(test_loader):
|
198 |
+
with torch.no_grad():
|
199 |
+
sampled_data = sampled_data.to(device)
|
200 |
+
pred = model(sampled_data)[0]
|
201 |
+
preds.append(pred)
|
202 |
+
ground_truths.append(sampled_data.edge_label)
|
203 |
+
positive_pred = pred[sampled_data.edge_label == 1].cpu().numpy()
|
204 |
+
negative_pred = pred[sampled_data.edge_label == 0].cpu().numpy()
|
205 |
+
pred = torch.cat(preds, dim=0).cpu().numpy()
|
206 |
+
|
207 |
+
ground_truth = torch.cat(ground_truths, dim=0).cpu().numpy()
|
208 |
+
y_label = np.where(pred >= 0.5, 1, 0)
|
209 |
+
f1 = f1_score(ground_truth, y_label)
|
210 |
+
print(f"F1 score: {f1:.4f}")
|
211 |
+
# AUC
|
212 |
+
auc = roc_auc_score(ground_truth, pred)
|
213 |
+
print(f"Validation AUC: {auc:.4f}")
|
edge_aware_gnn_node.py
ADDED
@@ -0,0 +1,220 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import pickle
|
3 |
+
import sys
|
4 |
+
|
5 |
+
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../../..', '..', '..')))
|
6 |
+
|
7 |
+
from models.edge_conv import EdgeConvConv
|
8 |
+
from models.sage_edge_conv import SAGEEdgeConv
|
9 |
+
from models.mlp import MLP
|
10 |
+
import torch.nn.functional as F
|
11 |
+
import numpy as np
|
12 |
+
import torch
|
13 |
+
import torch_geometric.transforms as T
|
14 |
+
from torch_geometric.loader import NeighborLoader
|
15 |
+
from torch_geometric import seed_everything
|
16 |
+
import tqdm
|
17 |
+
from sklearn.metrics import roc_auc_score, f1_score, accuracy_score
|
18 |
+
from torch_geometric.loader import NeighborSampler
|
19 |
+
from torch_geometric.nn import SAGEConv, TransformerConv, GINEConv, EdgeConv, GeneralConv
|
20 |
+
from torch.nn import Linear
|
21 |
+
import argparse
|
22 |
+
|
23 |
+
class GNN(torch.nn.Module):
|
24 |
+
def __init__(self, hidden_channels, edge_dim, num_layers, model_type):
|
25 |
+
super().__init__()
|
26 |
+
self.convs = torch.nn.ModuleList()
|
27 |
+
|
28 |
+
if model_type == 'GraphSAGE':
|
29 |
+
self.conv = SAGEEdgeConv(hidden_channels, hidden_channels, edge_dim=edge_dim)
|
30 |
+
elif model_type == 'GraphTransformer':
|
31 |
+
self.conv = TransformerConv((-1, -1), hidden_channels, edge_dim=edge_dim)
|
32 |
+
elif model_type == 'GINE':
|
33 |
+
self.conv = GINEConv(Linear(hidden_channels, hidden_channels), edge_dim=edge_dim)
|
34 |
+
elif model_type == 'EdgeConv':
|
35 |
+
self.conv = EdgeConvConv(Linear(2 * hidden_channels + edge_dim, hidden_channels), train_eps=True,
|
36 |
+
edge_dim=edge_dim)
|
37 |
+
elif model_type == 'GeneralConv':
|
38 |
+
self.conv = GeneralConv((-1, -1), hidden_channels, in_edge_channels=edge_dim)
|
39 |
+
else:
|
40 |
+
raise NotImplementedError('Model type not implemented')
|
41 |
+
|
42 |
+
for _ in range(num_layers):
|
43 |
+
self.convs.append(self.conv)
|
44 |
+
|
45 |
+
def forward(self, x, edge_index, edge_attr):
|
46 |
+
for i, conv in enumerate(self.convs):
|
47 |
+
x = conv(x, edge_index, edge_attr=edge_attr)
|
48 |
+
x = x.relu() if i != len(self.convs) - 1 else x
|
49 |
+
return x
|
50 |
+
|
51 |
+
|
52 |
+
class Classifier(torch.nn.Module):
|
53 |
+
def __init__(self, hidden_channels, out_channels):
|
54 |
+
super().__init__()
|
55 |
+
self.lin1 = Linear(hidden_channels, hidden_channels // 4)
|
56 |
+
self.lin2 = Linear(hidden_channels // 4, out_channels)
|
57 |
+
|
58 |
+
def forward(self, x):
|
59 |
+
x = self.lin1(x).relu()
|
60 |
+
x = self.lin2(x)
|
61 |
+
return x
|
62 |
+
|
63 |
+
|
64 |
+
class Model(torch.nn.Module):
|
65 |
+
def __init__(self, hidden_channels, out_channels, edge_dim, num_layers, model_type):
|
66 |
+
super().__init__()
|
67 |
+
self.model_type = model_type
|
68 |
+
if model_type != 'MLP':
|
69 |
+
self.gnn = GNN(hidden_channels, edge_dim, num_layers, model_type=model_type)
|
70 |
+
|
71 |
+
self.classifier = Classifier(hidden_channels, out_channels)
|
72 |
+
|
73 |
+
def forward(self, data):
|
74 |
+
x = data.x
|
75 |
+
if self.model_type != 'MLP':
|
76 |
+
x = self.gnn(x, data.edge_index, data.edge_attr)
|
77 |
+
|
78 |
+
pred = self.classifier(x)
|
79 |
+
return pred
|
80 |
+
|
81 |
+
if __name__ == '__main__':
|
82 |
+
seed_everything(66)
|
83 |
+
|
84 |
+
parser = argparse.ArgumentParser()
|
85 |
+
parser.add_argument('--data_type', '-dt', type=str, default='reddit', help='Data type')
|
86 |
+
parser.add_argument('--emb_type', '-et', type=str, default='GPT-3.5-TURBO', help='Embedding type') # TODO: set edge dim
|
87 |
+
parser.add_argument('--model_type', '-mt', type=str, default='MLP', help='Model type')
|
88 |
+
args = parser.parse_args()
|
89 |
+
|
90 |
+
|
91 |
+
# Dataset = Children(root='.')
|
92 |
+
# data = Dataset[0] # TODO: Citation code in TAG
|
93 |
+
with open(f'./reddit_graph.pkl', 'rb') as f:
|
94 |
+
data = pickle.load(f)
|
95 |
+
print(data)
|
96 |
+
|
97 |
+
num_nodes = len(data.text_nodes)
|
98 |
+
num_edges = len(data.text_edges)
|
99 |
+
|
100 |
+
# map node labels
|
101 |
+
node_labels=data.node_labels.tolist()
|
102 |
+
label_to_int = {label: i for i, label in enumerate(set(node_labels))}
|
103 |
+
data.y = torch.tensor([label_to_int[label] for label in node_labels]).long()
|
104 |
+
|
105 |
+
# data split
|
106 |
+
train_ratio = 0.8
|
107 |
+
val_ratio = 0.1
|
108 |
+
|
109 |
+
num_train_paper = int(num_nodes * train_ratio)
|
110 |
+
num_val_paper = int(num_nodes * val_ratio)
|
111 |
+
num_test_paper = num_nodes - num_train_paper - num_val_paper
|
112 |
+
|
113 |
+
paper_indices = torch.randperm(num_nodes)
|
114 |
+
|
115 |
+
data.train_mask = torch.zeros(num_nodes, dtype=torch.bool)
|
116 |
+
data.val_mask = torch.zeros(num_nodes, dtype=torch.bool)
|
117 |
+
data.test_mask = torch.zeros(num_nodes, dtype=torch.bool)
|
118 |
+
|
119 |
+
data.train_mask[paper_indices[:num_val_paper]] = 1
|
120 |
+
data.val_mask[paper_indices[num_val_paper:num_val_paper + num_val_paper ]] = 1
|
121 |
+
data.test_mask[paper_indices[-num_test_paper:]] = 1
|
122 |
+
|
123 |
+
data.num_classes = max(data.y) + 1
|
124 |
+
data.num_nodes = num_nodes
|
125 |
+
|
126 |
+
del data.text_nodes
|
127 |
+
del data.text_node_labels
|
128 |
+
del data.text_edges
|
129 |
+
|
130 |
+
# set hidden channels and edge dim for diff emb type
|
131 |
+
|
132 |
+
if args.emb_type != 'None':
|
133 |
+
data.x = torch.load(f'./reddit_graph-openai-node.pt').squeeze().float()
|
134 |
+
data.edge_attr = torch.load(f'./reddit_graph-openai-edge.pt').squeeze().float()
|
135 |
+
if args.emb_type == 'GPT-3.5-TURBO':
|
136 |
+
edge_dim = 1536
|
137 |
+
node_dim = 1536
|
138 |
+
elif args.emb_type == 'Large_Bert':
|
139 |
+
edge_dim = 1024
|
140 |
+
node_dim = 1024
|
141 |
+
elif args.emb_type == 'BERT':
|
142 |
+
edge_dim = 768
|
143 |
+
node_dim = 768
|
144 |
+
else:
|
145 |
+
raise NotImplementedError('Embedding type not implemented')
|
146 |
+
else:
|
147 |
+
data.x = torch.load(f'./reddit_graph-openai-node.pt').squeeze().float()
|
148 |
+
data.edge_attr = torch.randn(num_edges, 1024).squeeze().float()
|
149 |
+
edge_dim = 1024
|
150 |
+
node_dim = 1024
|
151 |
+
|
152 |
+
# Make sure all attributes of data are contiguous
|
153 |
+
data.x = data.x.contiguous()
|
154 |
+
data.edge_index = data.edge_index.contiguous()
|
155 |
+
|
156 |
+
print(data)
|
157 |
+
|
158 |
+
# Now create the NeighborLoaders
|
159 |
+
train_loader = NeighborLoader(data, input_nodes=data.train_mask, num_neighbors=[10, 10], batch_size=1024, shuffle=True)
|
160 |
+
val_loader = NeighborLoader(data, input_nodes=data.val_mask, num_neighbors=[10, 10], batch_size=1024, shuffle=False)
|
161 |
+
test_loader = NeighborLoader(data, input_nodes=data.test_mask, num_neighbors=[10, 10], batch_size=1024, shuffle=False)
|
162 |
+
|
163 |
+
train_loader = NeighborLoader(data, input_nodes=data.train_mask, num_neighbors=[10, 10], batch_size=1024, shuffle=True)
|
164 |
+
val_loader = NeighborLoader(data, input_nodes=data.val_mask, num_neighbors=[10, 10], batch_size=1024, shuffle=False)
|
165 |
+
test_loader = NeighborLoader(data, input_nodes=data.test_mask, num_neighbors=[10, 10], batch_size=1024, shuffle=False)
|
166 |
+
|
167 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
168 |
+
print(device)
|
169 |
+
|
170 |
+
model = Model(hidden_channels=node_dim, out_channels=data.num_classes, edge_dim=edge_dim, num_layers=2, model_type=args.model_type)
|
171 |
+
model = model.to(device)
|
172 |
+
|
173 |
+
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
|
174 |
+
|
175 |
+
criterion = torch.nn.CrossEntropyLoss()
|
176 |
+
|
177 |
+
for epoch in range(1, 10):
|
178 |
+
model.train()
|
179 |
+
total_examples = total_loss = 0
|
180 |
+
|
181 |
+
for batch in tqdm.tqdm(train_loader):
|
182 |
+
optimizer.zero_grad()
|
183 |
+
batch = batch.to(device)
|
184 |
+
batch_size = batch.batch_size
|
185 |
+
|
186 |
+
out = model(batch)
|
187 |
+
loss = criterion(out, batch.y)
|
188 |
+
loss.backward()
|
189 |
+
optimizer.step()
|
190 |
+
|
191 |
+
total_examples += batch_size
|
192 |
+
total_loss += float(loss) * batch_size
|
193 |
+
|
194 |
+
if epoch % 1 == 0 and epoch != 0:
|
195 |
+
print('Validation begins')
|
196 |
+
|
197 |
+
model.eval()
|
198 |
+
with torch.no_grad():
|
199 |
+
preds = []
|
200 |
+
ground_truths = []
|
201 |
+
for batch in tqdm.tqdm(val_loader):
|
202 |
+
batch = batch.to(device)
|
203 |
+
|
204 |
+
out = model(batch)
|
205 |
+
pred = F.softmax(out, dim=1)
|
206 |
+
|
207 |
+
preds.append(pred)
|
208 |
+
ground_truths.append(batch.y)
|
209 |
+
|
210 |
+
pred = torch.cat(preds, dim=0).cpu().numpy()
|
211 |
+
ground_truth = torch.cat(ground_truths, dim=0).cpu().numpy()
|
212 |
+
|
213 |
+
# F1 Score
|
214 |
+
y_pred_labels = np.argmax(pred, axis=1) # 获得预测类别
|
215 |
+
f1 = f1_score(ground_truth, y_pred_labels, average='weighted')
|
216 |
+
print(f"F1 score: {f1:.4f}")
|
217 |
+
|
218 |
+
# ACC
|
219 |
+
accuracy = accuracy_score(ground_truth, y_pred_labels)
|
220 |
+
print(f"Validation Accuracy: {accuracy:.4f}")
|
link_prediction_log/EdgeConv_GPT.txt
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Data(edge_index=[2, 676684], node_labels=[478022], edge_score_labels=[676684], edge_special_labels=[676684], edge_attr=[676684, 1536], x=[478022, 1536])
|
2 |
+
cuda
|
3 |
+
Epoch: 001, Loss: 1.1276
|
4 |
+
Validation begins
|
5 |
+
F1 score: 0.9799
|
6 |
+
Validation AUC: 0.9925
|
7 |
+
Epoch: 002, Loss: 0.4067
|
8 |
+
Validation begins
|
9 |
+
F1 score: 0.9818
|
10 |
+
Validation AUC: 0.9922
|
11 |
+
Epoch: 003, Loss: 0.1547
|
12 |
+
Validation begins
|
13 |
+
F1 score: 0.9812
|
14 |
+
Validation AUC: 0.9918
|
15 |
+
Epoch: 004, Loss: 0.1403
|
16 |
+
Validation begins
|
17 |
+
F1 score: 0.9800
|
18 |
+
Validation AUC: 0.9926
|
19 |
+
Epoch: 005, Loss: 0.1717
|
20 |
+
Validation begins
|
21 |
+
F1 score: 0.9801
|
22 |
+
Validation AUC: 0.9921
|
23 |
+
Epoch: 006, Loss: 0.1315
|
24 |
+
Validation begins
|
25 |
+
F1 score: 0.9800
|
26 |
+
Validation AUC: 0.9919
|
27 |
+
Epoch: 007, Loss: 0.1300
|
28 |
+
Validation begins
|
29 |
+
F1 score: 0.9800
|
30 |
+
Validation AUC: 0.9922
|
31 |
+
Epoch: 008, Loss: 0.1417
|
32 |
+
Validation begins
|
33 |
+
F1 score: 0.9800
|
34 |
+
Validation AUC: 0.9924
|
35 |
+
Epoch: 009, Loss: 0.1294
|
36 |
+
Validation begins
|
37 |
+
F1 score: 0.9800
|
38 |
+
Validation AUC: 0.9921
|
link_prediction_log/GINE_GPT.txt
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Data(edge_index=[2, 676684], node_labels=[478022], edge_score_labels=[676684], edge_special_labels=[676684], edge_attr=[676684, 1536], x=[478022, 1536])
|
2 |
+
cuda
|
3 |
+
Epoch: 001, Loss: 0.4447
|
4 |
+
Validation begins
|
5 |
+
F1 score: 0.9805
|
6 |
+
Validation AUC: 0.9961
|
7 |
+
Epoch: 002, Loss: 0.0825
|
8 |
+
Validation begins
|
9 |
+
F1 score: 0.9807
|
10 |
+
Validation AUC: 0.9961
|
11 |
+
Epoch: 003, Loss: 0.0617
|
12 |
+
Validation begins
|
13 |
+
F1 score: 0.9804
|
14 |
+
Validation AUC: 0.9960
|
15 |
+
Epoch: 004, Loss: 0.0525
|
16 |
+
Validation begins
|
17 |
+
F1 score: 0.9809
|
18 |
+
Validation AUC: 0.9962
|
19 |
+
Epoch: 005, Loss: 0.0456
|
20 |
+
Validation begins
|
21 |
+
F1 score: 0.9809
|
22 |
+
Validation AUC: 0.9961
|
23 |
+
Epoch: 006, Loss: 0.0403
|
24 |
+
Validation begins
|
25 |
+
F1 score: 0.9803
|
26 |
+
Validation AUC: 0.9957
|
27 |
+
Epoch: 007, Loss: 0.0332
|
28 |
+
Validation begins
|
29 |
+
F1 score: 0.9806
|
30 |
+
Validation AUC: 0.9957
|
31 |
+
Epoch: 008, Loss: 0.0271
|
32 |
+
Validation begins
|
33 |
+
F1 score: 0.9801
|
34 |
+
Validation AUC: 0.9960
|
35 |
+
Epoch: 009, Loss: 0.0212
|
36 |
+
Validation begins
|
37 |
+
F1 score: 0.9804
|
38 |
+
Validation AUC: 0.9952
|
link_prediction_log/GeneralConv_GPT.txt
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Data(edge_index=[2, 676684], node_labels=[478022], edge_score_labels=[676684], edge_special_labels=[676684], edge_attr=[676684, 1536], x=[478022, 1536])
|
2 |
+
cuda
|
3 |
+
Epoch: 001, Loss: 0.1678
|
4 |
+
Validation begins
|
5 |
+
F1 score: 0.9809
|
6 |
+
Validation AUC: 0.9964
|
7 |
+
Epoch: 002, Loss: 0.0631
|
8 |
+
Validation begins
|
9 |
+
F1 score: 0.9809
|
10 |
+
Validation AUC: 0.9961
|
11 |
+
Epoch: 003, Loss: 0.0540
|
12 |
+
Validation begins
|
13 |
+
F1 score: 0.9802
|
14 |
+
Validation AUC: 0.9959
|
15 |
+
Epoch: 004, Loss: 0.0472
|
16 |
+
Validation begins
|
17 |
+
F1 score: 0.9795
|
18 |
+
Validation AUC: 0.9961
|
19 |
+
Epoch: 005, Loss: 0.0411
|
20 |
+
Validation begins
|
21 |
+
F1 score: 0.9798
|
22 |
+
Validation AUC: 0.9959
|
23 |
+
Epoch: 006, Loss: 0.0349
|
24 |
+
Validation begins
|
25 |
+
F1 score: 0.9803
|
26 |
+
Validation AUC: 0.9962
|
27 |
+
Epoch: 007, Loss: 0.0312
|
28 |
+
Validation begins
|
29 |
+
F1 score: 0.9801
|
30 |
+
Validation AUC: 0.9958
|
31 |
+
Epoch: 008, Loss: 0.0264
|
32 |
+
Validation begins
|
33 |
+
F1 score: 0.9804
|
34 |
+
Validation AUC: 0.9959
|
35 |
+
Epoch: 009, Loss: 0.0266
|
36 |
+
Validation begins
|
37 |
+
F1 score: 0.9799
|
38 |
+
Validation AUC: 0.9960
|
link_prediction_log/GraphSAGE_GPT.txt
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Data(edge_index=[2, 676684], node_labels=[478022], edge_score_labels=[676684], edge_special_labels=[676684], edge_attr=[676684, 1536], x=[478022, 1536])
|
2 |
+
cuda
|
3 |
+
Epoch: 001, Loss: 0.1693
|
4 |
+
Validation begins
|
5 |
+
F1 score: 0.9807
|
6 |
+
Validation AUC: 0.9883
|
7 |
+
Epoch: 002, Loss: 0.0656
|
8 |
+
Validation begins
|
9 |
+
F1 score: 0.9810
|
10 |
+
Validation AUC: 0.9900
|
11 |
+
Epoch: 003, Loss: 0.0556
|
12 |
+
Validation begins
|
13 |
+
F1 score: 0.9808
|
14 |
+
Validation AUC: 0.9906
|
15 |
+
Epoch: 004, Loss: 0.0473
|
16 |
+
Validation begins
|
17 |
+
F1 score: 0.9807
|
18 |
+
Validation AUC: 0.9906
|
19 |
+
Epoch: 005, Loss: 0.0397
|
20 |
+
Validation begins
|
21 |
+
F1 score: 0.9793
|
22 |
+
Validation AUC: 0.9898
|
23 |
+
Epoch: 006, Loss: 0.0337
|
24 |
+
Validation begins
|
25 |
+
F1 score: 0.9786
|
26 |
+
Validation AUC: 0.9901
|
27 |
+
Epoch: 007, Loss: 0.0291
|
28 |
+
Validation begins
|
29 |
+
F1 score: 0.9796
|
30 |
+
Validation AUC: 0.9908
|
31 |
+
Epoch: 008, Loss: 0.0247
|
32 |
+
Validation begins
|
33 |
+
F1 score: 0.9805
|
34 |
+
Validation AUC: 0.9893
|
35 |
+
Epoch: 009, Loss: 0.0262
|
36 |
+
Validation begins
|
37 |
+
F1 score: 0.9787
|
38 |
+
Validation AUC: 0.9908
|
39 |
+
Epoch: 010, Loss: 0.0218
|
40 |
+
Validation begins
|
41 |
+
F1 score: 0.9805
|
42 |
+
Validation AUC: 0.9900
|
43 |
+
Epoch: 011, Loss: 0.0176
|
44 |
+
Validation begins
|
45 |
+
F1 score: 0.9802
|
46 |
+
Validation AUC: 0.9896
|
47 |
+
Epoch: 012, Loss: 0.0143
|
48 |
+
Validation begins
|
49 |
+
F1 score: 0.9796
|
50 |
+
Validation AUC: 0.9897
|
51 |
+
Epoch: 013, Loss: 0.0121
|
52 |
+
Validation begins
|
53 |
+
F1 score: 0.9806
|
54 |
+
Validation AUC: 0.9896
|
55 |
+
Epoch: 014, Loss: 0.0094
|
56 |
+
Validation begins
|
57 |
+
F1 score: 0.9804
|
58 |
+
Validation AUC: 0.9883
|
link_prediction_log/GraphTransformer_GPT.txt
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Data(edge_index=[2, 676684], node_labels=[478022], edge_score_labels=[676684], edge_special_labels=[676684], edge_attr=[676684, 1536], x=[478022, 1536])
|
2 |
+
cuda
|
3 |
+
Epoch: 001, Loss: 0.1325
|
4 |
+
Validation begins
|
5 |
+
F1 score: 0.9808
|
6 |
+
Validation AUC: 0.9921
|
7 |
+
Epoch: 002, Loss: 0.0609
|
8 |
+
Validation begins
|
9 |
+
F1 score: 0.9810
|
10 |
+
Validation AUC: 0.9944
|
11 |
+
Epoch: 003, Loss: 0.0496
|
12 |
+
Validation begins
|
13 |
+
F1 score: 0.9808
|
14 |
+
Validation AUC: 0.9944
|
15 |
+
Epoch: 004, Loss: 0.0392
|
16 |
+
Validation begins
|
17 |
+
F1 score: 0.9792
|
18 |
+
Validation AUC: 0.9942
|
19 |
+
Epoch: 005, Loss: 0.0326
|
20 |
+
Validation begins
|
21 |
+
F1 score: 0.9804
|
22 |
+
Validation AUC: 0.9934
|
23 |
+
Epoch: 006, Loss: 0.0236
|
24 |
+
Validation begins
|
25 |
+
F1 score: 0.9805
|
26 |
+
Validation AUC: 0.9935
|
27 |
+
Epoch: 007, Loss: 0.0204
|
28 |
+
Validation begins
|
29 |
+
F1 score: 0.9800
|
30 |
+
Validation AUC: 0.9900
|
31 |
+
Epoch: 008, Loss: 0.0185
|
32 |
+
Validation begins
|
33 |
+
F1 score: 0.9801
|
34 |
+
Validation AUC: 0.9915
|
35 |
+
Epoch: 009, Loss: 0.0154
|
36 |
+
Validation begins
|
37 |
+
F1 score: 0.9804
|
38 |
+
Validation AUC: 0.9917
|
39 |
+
Epoch: 010, Loss: 0.0128
|
40 |
+
Validation begins
|
41 |
+
F1 score: 0.9797
|
42 |
+
Validation AUC: 0.9911
|
43 |
+
Epoch: 011, Loss: 0.0106
|
44 |
+
Validation begins
|
45 |
+
F1 score: 0.9794
|
46 |
+
Validation AUC: 0.9903
|
47 |
+
Epoch: 012, Loss: 0.0095
|
48 |
+
Validation begins
|
49 |
+
F1 score: 0.9799
|
50 |
+
Validation AUC: 0.9878
|
51 |
+
Epoch: 013, Loss: 0.0087
|
52 |
+
Validation begins
|
53 |
+
F1 score: 0.9799
|
54 |
+
Validation AUC: 0.9894
|
55 |
+
Epoch: 014, Loss: 0.0077
|
56 |
+
Validation begins
|
57 |
+
F1 score: 0.9801
|
58 |
+
Validation AUC: 0.9898
|
link_prediction_log/MLP_GPT.txt
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Data(edge_index=[2, 676684], node_labels=[478022], edge_score_labels=[676684], edge_special_labels=[676684], edge_attr=[676684, 1536], x=[478022, 1536])
|
2 |
+
cuda
|
3 |
+
Epoch: 001, Loss: 0.2748
|
4 |
+
Validation begins
|
5 |
+
F1 score: 0.9365
|
6 |
+
Validation AUC: 0.9856
|
7 |
+
Epoch: 002, Loss: 0.1289
|
8 |
+
Validation begins
|
9 |
+
F1 score: 0.9579
|
10 |
+
Validation AUC: 0.9885
|
11 |
+
Epoch: 003, Loss: 0.1131
|
12 |
+
Validation begins
|
13 |
+
F1 score: 0.9603
|
14 |
+
Validation AUC: 0.9893
|
15 |
+
Epoch: 004, Loss: 0.1022
|
16 |
+
Validation begins
|
17 |
+
F1 score: 0.9523
|
18 |
+
Validation AUC: 0.9900
|
19 |
+
Epoch: 005, Loss: 0.0955
|
20 |
+
Validation begins
|
21 |
+
F1 score: 0.9582
|
22 |
+
Validation AUC: 0.9903
|
23 |
+
Epoch: 006, Loss: 0.0898
|
24 |
+
Validation begins
|
25 |
+
F1 score: 0.9599
|
26 |
+
Validation AUC: 0.9906
|
27 |
+
Epoch: 007, Loss: 0.0850
|
28 |
+
Validation begins
|
29 |
+
F1 score: 0.9650
|
30 |
+
Validation AUC: 0.9908
|
31 |
+
Epoch: 008, Loss: 0.0820
|
32 |
+
Validation begins
|
33 |
+
F1 score: 0.9651
|
34 |
+
Validation AUC: 0.9909
|
35 |
+
Epoch: 009, Loss: 0.0763
|
36 |
+
Validation begins
|
37 |
+
F1 score: 0.9598
|
38 |
+
Validation AUC: 0.9908
|
models/__pycache__/edge_conv.cpython-39.pyc
ADDED
Binary file (2.13 kB). View file
|
|
models/__pycache__/mlp.cpython-39.pyc
ADDED
Binary file (1.1 kB). View file
|
|
models/__pycache__/sage_edge_conv.cpython-39.pyc
ADDED
Binary file (3.95 kB). View file
|
|
models/edge_conv.py
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
edge_conv.py includes edge_attr to edge_conv
|
3 |
+
"""
|
4 |
+
from typing import Callable, Optional, Union
|
5 |
+
|
6 |
+
import torch
|
7 |
+
from torch import Tensor
|
8 |
+
from torch.nn import Linear
|
9 |
+
from torch_geometric.nn.conv import MessagePassing
|
10 |
+
from torch_geometric.nn.dense.linear import Linear
|
11 |
+
from torch_geometric.typing import Adj, OptPairTensor, OptTensor, Size
|
12 |
+
|
13 |
+
|
14 |
+
class EdgeConvConv(MessagePassing):
|
15 |
+
def __init__(self, nn: Callable, eps: float = 0., train_eps: bool = False,
|
16 |
+
edge_dim: Optional[int] = None, **kwargs):
|
17 |
+
kwargs.setdefault('aggr', 'add')
|
18 |
+
super().__init__(**kwargs)
|
19 |
+
self.nn = nn
|
20 |
+
self.initial_eps = eps
|
21 |
+
if train_eps:
|
22 |
+
self.eps = torch.nn.Parameter(torch.Tensor([eps]))
|
23 |
+
else:
|
24 |
+
self.register_buffer('eps', torch.Tensor([eps]))
|
25 |
+
if edge_dim is not None:
|
26 |
+
if hasattr(self.nn, 'in_features'):
|
27 |
+
in_channels = self.nn.in_features
|
28 |
+
else:
|
29 |
+
in_channels = self.nn.in_channels
|
30 |
+
self.lin = Linear(edge_dim, in_channels)
|
31 |
+
else:
|
32 |
+
self.lin = None
|
33 |
+
|
34 |
+
def forward(self, x: Union[Tensor, OptPairTensor], edge_index: Adj,
|
35 |
+
edge_attr: OptTensor = None, size: Size = None) -> Tensor:
|
36 |
+
""""""
|
37 |
+
if isinstance(x, Tensor):
|
38 |
+
x: OptPairTensor = (x, x)
|
39 |
+
|
40 |
+
# propagate_type: (x: OptPairTensor, edge_attr: OptTensor)
|
41 |
+
out = self.propagate(edge_index, x=x, edge_attr=edge_attr, size=size)
|
42 |
+
|
43 |
+
return out
|
44 |
+
|
45 |
+
def message(self, x_i: Tensor, x_j: Tensor, edge_attr: Tensor) -> Tensor:
|
46 |
+
|
47 |
+
temp = torch.cat([x_i, x_j, edge_attr], dim=1)
|
48 |
+
|
49 |
+
return self.nn(temp)
|
50 |
+
|
51 |
+
def __repr__(self) -> str:
|
52 |
+
return f'{self.__class__.__name__}(nn={self.nn})'
|
models/mlp.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn.functional as F
|
3 |
+
from torch.nn import Linear, ModuleList
|
4 |
+
from torch_geometric.nn.dense.linear import Linear
|
5 |
+
|
6 |
+
|
7 |
+
class MLP(torch.nn.Module):
|
8 |
+
def __init__(self, in_channels, hidden_channels, out_channels, num_layers=2):
|
9 |
+
super().__init__()
|
10 |
+
self.mlp = ModuleList()
|
11 |
+
self.mlp.append(Linear(in_channels, hidden_channels))
|
12 |
+
if num_layers >= 2:
|
13 |
+
for _ in range(num_layers - 2):
|
14 |
+
self.mlp.append(Linear(hidden_channels, hidden_channels))
|
15 |
+
self.mlp.append(Linear(hidden_channels, out_channels))
|
16 |
+
|
17 |
+
def forward(self, x):
|
18 |
+
for layer in self.mlp[:-1]:
|
19 |
+
x = layer(x)
|
20 |
+
x = F.relu(x)
|
21 |
+
x = self.mlp[-1](x)
|
22 |
+
return x
|
models/sage_edge_conv.py
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Optional, Tuple, Union
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import torch.nn.functional as F
|
5 |
+
from torch import Tensor
|
6 |
+
from torch.nn import LSTM
|
7 |
+
from torch.nn import Linear
|
8 |
+
from torch_geometric.nn.conv import MessagePassing
|
9 |
+
from torch_geometric.nn.dense.linear import Linear
|
10 |
+
from torch_geometric.typing import Adj, OptPairTensor, OptTensor, Size
|
11 |
+
from torch_geometric.utils import to_dense_batch
|
12 |
+
from torch_scatter import scatter
|
13 |
+
from torch_sparse import SparseTensor, matmul
|
14 |
+
|
15 |
+
|
16 |
+
class SAGEEdgeConv(MessagePassing):
|
17 |
+
def __init__(
|
18 |
+
self,
|
19 |
+
in_channels: Union[int, Tuple[int, int]],
|
20 |
+
out_channels: int,
|
21 |
+
edge_dim: int,
|
22 |
+
aggr: str = 'mean',
|
23 |
+
normalize: bool = False,
|
24 |
+
root_weight: bool = True,
|
25 |
+
project: bool = False,
|
26 |
+
bias: bool = True,
|
27 |
+
**kwargs,
|
28 |
+
):
|
29 |
+
super().__init__(aggr=aggr if aggr != 'lstm' else None, node_dim=0)
|
30 |
+
|
31 |
+
self.in_channels = in_channels
|
32 |
+
self.out_channels = out_channels
|
33 |
+
self.normalize = normalize
|
34 |
+
self.root_weight = root_weight
|
35 |
+
self.project = project
|
36 |
+
|
37 |
+
if isinstance(in_channels, int):
|
38 |
+
in_channels = (in_channels, in_channels)
|
39 |
+
|
40 |
+
if self.project:
|
41 |
+
self.lin = Linear(in_channels[0], in_channels[0], bias=True)
|
42 |
+
|
43 |
+
if self.aggr is None:
|
44 |
+
self.fuse = False # No "fused" message_and_aggregate.
|
45 |
+
self.lstm = LSTM(in_channels[0], in_channels[0], batch_first=True)
|
46 |
+
|
47 |
+
self.lin_t = Linear(edge_dim, in_channels[0], bias=bias)
|
48 |
+
self.lin_l = Linear(in_channels[0], out_channels, bias=bias)
|
49 |
+
if self.root_weight:
|
50 |
+
self.lin_r = Linear(in_channels[1], out_channels, bias=False)
|
51 |
+
|
52 |
+
self.reset_parameters()
|
53 |
+
|
54 |
+
def reset_parameters(self):
|
55 |
+
if self.project:
|
56 |
+
self.lin.reset_parameters()
|
57 |
+
if self.aggr is None:
|
58 |
+
self.lstm.reset_parameters()
|
59 |
+
self.lin_l.reset_parameters()
|
60 |
+
if self.root_weight:
|
61 |
+
self.lin_r.reset_parameters()
|
62 |
+
|
63 |
+
def forward(self, x: Union[Tensor, OptPairTensor], edge_index: Adj, edge_attr: OptTensor = None,
|
64 |
+
size: Size = None) -> Tensor:
|
65 |
+
if isinstance(x, Tensor):
|
66 |
+
x: OptPairTensor = (x, x)
|
67 |
+
|
68 |
+
if self.project and hasattr(self, 'lin'):
|
69 |
+
x = (self.lin(x[0]).relu(), x[1])
|
70 |
+
|
71 |
+
# propagate_type: (x: OptPairTensor, edge_attr: OptTensor)
|
72 |
+
out = self.propagate(edge_index, x=x, edge_attr=edge_attr, size=size)
|
73 |
+
out = self.lin_l(out)
|
74 |
+
|
75 |
+
x_r = x[1]
|
76 |
+
if self.root_weight and x_r is not None:
|
77 |
+
out += self.lin_r(x_r)
|
78 |
+
|
79 |
+
if self.normalize:
|
80 |
+
out = F.normalize(out, p=2., dim=-1)
|
81 |
+
|
82 |
+
return out
|
83 |
+
|
84 |
+
def message(self, x_j: Tensor, edge_attr: Tensor) -> Tensor:
|
85 |
+
return x_j + self.lin_t(edge_attr)
|
86 |
+
|
87 |
+
def message_and_aggregate(self, adj_t: SparseTensor,
|
88 |
+
x: OptPairTensor) -> Tensor:
|
89 |
+
adj_t = adj_t.set_value(None, layout=None)
|
90 |
+
return matmul(adj_t, x[0], reduce=self.aggr)
|
91 |
+
|
92 |
+
def aggregate(self, x: Tensor, index: Tensor, ptr: Optional[Tensor] = None,
|
93 |
+
dim_size: Optional[int] = None) -> Tensor:
|
94 |
+
if self.aggr is not None:
|
95 |
+
return scatter(x, index, dim=self.node_dim, dim_size=dim_size,
|
96 |
+
reduce=self.aggr)
|
97 |
+
|
98 |
+
# LSTM aggregation:
|
99 |
+
if ptr is None and not torch.all(index[:-1] <= index[1:]):
|
100 |
+
raise ValueError(f"Can not utilize LSTM-style aggregation inside "
|
101 |
+
f"'{self.__class__.__name__}' in case the "
|
102 |
+
f"'edge_index' tensor is not sorted by columns. "
|
103 |
+
f"Run 'sort_edge_index(..., sort_by_row=False)' "
|
104 |
+
f"in a pre-processing step.")
|
105 |
+
|
106 |
+
x, mask = to_dense_batch(x, batch=index, batch_size=dim_size)
|
107 |
+
out, _ = self.lstm(x)
|
108 |
+
return out[:, -1]
|
109 |
+
|
110 |
+
def __repr__(self) -> str:
|
111 |
+
aggr = self.aggr if self.aggr is not None else 'lstm'
|
112 |
+
return (f'{self.__class__.__name__}({self.in_channels}, '
|
113 |
+
f'{self.out_channels}, aggr={aggr})')
|
node_classification_log/EdgeConv_GPT.txt
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Data(edge_index=[2, 676684], text_nodes=[478022], text_edges=[676684], node_labels=[478022], edge_score_labels=[676684], edge_special_labels=[676684])
|
2 |
+
Data(edge_index=[2, 676684], node_labels=[478022], edge_score_labels=[676684], edge_special_labels=[676684], y=[478022], train_mask=[478022], val_mask=[478022], test_mask=[478022], num_classes=3, num_nodes=478022, x=[478022, 1536], edge_attr=[676684, 1536])
|
3 |
+
cuda
|
4 |
+
Validation begins
|
5 |
+
F1 score: 0.9957
|
6 |
+
Validation Accuracy: 0.9972
|
7 |
+
Validation begins
|
8 |
+
F1 score: 0.9957
|
9 |
+
Validation Accuracy: 0.9972
|
10 |
+
Validation begins
|
11 |
+
F1 score: 0.9957
|
12 |
+
Validation Accuracy: 0.9971
|
13 |
+
Validation begins
|
14 |
+
F1 score: 0.9959
|
15 |
+
Validation Accuracy: 0.9972
|
16 |
+
Validation begins
|
17 |
+
F1 score: 0.9958
|
18 |
+
Validation Accuracy: 0.9972
|
19 |
+
Validation begins
|
20 |
+
F1 score: 0.9958
|
21 |
+
Validation Accuracy: 0.9972
|
22 |
+
Validation begins
|
23 |
+
F1 score: 0.9957
|
24 |
+
Validation Accuracy: 0.9971
|
25 |
+
Validation begins
|
26 |
+
F1 score: 0.9960
|
27 |
+
Validation Accuracy: 0.9973
|
28 |
+
Validation begins
|
29 |
+
F1 score: 0.9957
|
30 |
+
Validation Accuracy: 0.9972
|
node_classification_log/GINE_GPT.txt
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Data(edge_index=[2, 676684], text_nodes=[478022], text_edges=[676684], node_labels=[478022], edge_score_labels=[676684], edge_special_labels=[676684])
|
2 |
+
Data(edge_index=[2, 676684], node_labels=[478022], edge_score_labels=[676684], edge_special_labels=[676684], y=[478022], train_mask=[478022], val_mask=[478022], test_mask=[478022], num_classes=3, num_nodes=478022, x=[478022, 1536], edge_attr=[676684, 1536])
|
3 |
+
cuda
|
4 |
+
Validation begins
|
5 |
+
F1 score: 0.9955
|
6 |
+
Validation Accuracy: 0.9970
|
7 |
+
Validation begins
|
8 |
+
F1 score: 0.9957
|
9 |
+
Validation Accuracy: 0.9972
|
10 |
+
Validation begins
|
11 |
+
F1 score: 0.9958
|
12 |
+
Validation Accuracy: 0.9971
|
13 |
+
Validation begins
|
14 |
+
F1 score: 0.9961
|
15 |
+
Validation Accuracy: 0.9973
|
16 |
+
Validation begins
|
17 |
+
F1 score: 0.9960
|
18 |
+
Validation Accuracy: 0.9973
|
19 |
+
Validation begins
|
20 |
+
F1 score: 0.9959
|
21 |
+
Validation Accuracy: 0.9972
|
22 |
+
Validation begins
|
23 |
+
F1 score: 0.9961
|
24 |
+
Validation Accuracy: 0.9969
|
25 |
+
Validation begins
|
26 |
+
F1 score: 0.9959
|
27 |
+
Validation Accuracy: 0.9967
|
28 |
+
Validation begins
|
29 |
+
F1 score: 0.9962
|
30 |
+
Validation Accuracy: 0.9972
|
node_classification_log/GeneralConv_GPT.txt
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Data(edge_index=[2, 676684], text_nodes=[478022], text_edges=[676684], node_labels=[478022], edge_score_labels=[676684], edge_special_labels=[676684])
|
2 |
+
Data(edge_index=[2, 676684], node_labels=[478022], edge_score_labels=[676684], edge_special_labels=[676684], y=[478022], train_mask=[478022], val_mask=[478022], test_mask=[478022], num_classes=3, num_nodes=478022, x=[478022, 1536], edge_attr=[676684, 1536])
|
3 |
+
cuda
|
4 |
+
Validation begins
|
5 |
+
F1 score: 0.9959
|
6 |
+
Validation Accuracy: 0.9972
|
7 |
+
Validation begins
|
8 |
+
F1 score: 0.9957
|
9 |
+
Validation Accuracy: 0.9971
|
10 |
+
Validation begins
|
11 |
+
F1 score: 0.9958
|
12 |
+
Validation Accuracy: 0.9972
|
13 |
+
Validation begins
|
14 |
+
F1 score: 0.9960
|
15 |
+
Validation Accuracy: 0.9972
|
16 |
+
Validation begins
|
17 |
+
F1 score: 0.9957
|
18 |
+
Validation Accuracy: 0.9963
|
19 |
+
Validation begins
|
20 |
+
F1 score: 0.9959
|
21 |
+
Validation Accuracy: 0.9966
|
22 |
+
Validation begins
|
23 |
+
F1 score: 0.9962
|
24 |
+
Validation Accuracy: 0.9972
|
25 |
+
Validation begins
|
26 |
+
F1 score: 0.9959
|
27 |
+
Validation Accuracy: 0.9968
|
28 |
+
Validation begins
|
29 |
+
F1 score: 0.9966
|
30 |
+
Validation Accuracy: 0.9975
|
node_classification_log/GraphSAGE_GPT.txt
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Data(edge_index=[2, 676684], text_nodes=[478022], text_edges=[676684], node_labels=[478022], edge_score_labels=[676684], edge_special_labels=[676684])
|
2 |
+
Data(edge_index=[2, 676684], node_labels=[478022], edge_score_labels=[676684], edge_special_labels=[676684], y=[478022], train_mask=[478022], val_mask=[478022], test_mask=[478022], num_classes=3, num_nodes=478022, x=[478022, 1536], edge_attr=[676684, 1536])
|
3 |
+
cuda
|
4 |
+
Validation begins
|
5 |
+
F1 score: 0.9957
|
6 |
+
Validation Accuracy: 0.9971
|
7 |
+
Validation begins
|
8 |
+
F1 score: 0.9957
|
9 |
+
Validation Accuracy: 0.9971
|
10 |
+
Validation begins
|
11 |
+
F1 score: 0.9960
|
12 |
+
Validation Accuracy: 0.9973
|
13 |
+
Validation begins
|
14 |
+
F1 score: 0.9962
|
15 |
+
Validation Accuracy: 0.9974
|
16 |
+
Validation begins
|
17 |
+
F1 score: 0.9961
|
18 |
+
Validation Accuracy: 0.9970
|
19 |
+
Validation begins
|
20 |
+
F1 score: 0.9962
|
21 |
+
Validation Accuracy: 0.9972
|
22 |
+
Validation begins
|
23 |
+
F1 score: 0.9960
|
24 |
+
Validation Accuracy: 0.9967
|
25 |
+
Validation begins
|
26 |
+
F1 score: 0.9961
|
27 |
+
Validation Accuracy: 0.9970
|
28 |
+
Validation begins
|
29 |
+
F1 score: 0.9962
|
30 |
+
Validation Accuracy: 0.9970
|
node_classification_log/GraphTransformer_GPT.txt
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Data(edge_index=[2, 676684], text_nodes=[478022], text_edges=[676684], node_labels=[478022], edge_score_labels=[676684], edge_special_labels=[676684])
|
2 |
+
Data(edge_index=[2, 676684], node_labels=[478022], edge_score_labels=[676684], edge_special_labels=[676684], y=[478022], train_mask=[478022], val_mask=[478022], test_mask=[478022], num_classes=3, num_nodes=478022, x=[478022, 1536], edge_attr=[676684, 1536])
|
3 |
+
cuda
|
4 |
+
Validation begins
|
5 |
+
F1 score: 0.9958
|
6 |
+
Validation Accuracy: 0.9972
|
7 |
+
Validation begins
|
8 |
+
F1 score: 0.9959
|
9 |
+
Validation Accuracy: 0.9973
|
10 |
+
Validation begins
|
11 |
+
F1 score: 0.9961
|
12 |
+
Validation Accuracy: 0.9973
|
13 |
+
Validation begins
|
14 |
+
F1 score: 0.9958
|
15 |
+
Validation Accuracy: 0.9972
|
16 |
+
Validation begins
|
17 |
+
F1 score: 0.9962
|
18 |
+
Validation Accuracy: 0.9973
|
19 |
+
Validation begins
|
20 |
+
F1 score: 0.9963
|
21 |
+
Validation Accuracy: 0.9973
|
22 |
+
Validation begins
|
23 |
+
F1 score: 0.9962
|
24 |
+
Validation Accuracy: 0.9972
|
25 |
+
Validation begins
|
26 |
+
F1 score: 0.9961
|
27 |
+
Validation Accuracy: 0.9970
|
28 |
+
Validation begins
|
29 |
+
F1 score: 0.9962
|
30 |
+
Validation Accuracy: 0.9971
|
node_classification_log/MLP_GPT.txt
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Data(edge_index=[2, 676684], text_nodes=[478022], text_edges=[676684], node_labels=[478022], edge_score_labels=[676684], edge_special_labels=[676684])
|
2 |
+
Data(edge_index=[2, 676684], node_labels=[478022], edge_score_labels=[676684], edge_special_labels=[676684], y=[478022], train_mask=[478022], val_mask=[478022], test_mask=[478022], num_classes=3, num_nodes=478022, x=[478022, 1536], edge_attr=[676684, 1536])
|
3 |
+
cuda
|
4 |
+
Validation begins
|
5 |
+
F1 score: 0.9532
|
6 |
+
Validation Accuracy: 0.9687
|
7 |
+
Validation begins
|
8 |
+
F1 score: 0.9530
|
9 |
+
Validation Accuracy: 0.9685
|
10 |
+
Validation begins
|
11 |
+
F1 score: 0.9653
|
12 |
+
Validation Accuracy: 0.9738
|
13 |
+
Validation begins
|
14 |
+
F1 score: 0.9751
|
15 |
+
Validation Accuracy: 0.9791
|
16 |
+
Validation begins
|
17 |
+
F1 score: 0.9787
|
18 |
+
Validation Accuracy: 0.9815
|
19 |
+
Validation begins
|
20 |
+
F1 score: 0.9798
|
21 |
+
Validation Accuracy: 0.9824
|
22 |
+
Validation begins
|
23 |
+
F1 score: 0.9817
|
24 |
+
Validation Accuracy: 0.9835
|
25 |
+
Validation begins
|
26 |
+
F1 score: 0.9810
|
27 |
+
Validation Accuracy: 0.9834
|
28 |
+
Validation begins
|
29 |
+
F1 score: 0.9817
|
30 |
+
Validation Accuracy: 0.9839
|