HUANGYIFEI commited on
Commit
9301fb5
1 Parent(s): becf496

Delete DataInspect.ipynb

Browse files
Files changed (1) hide show
  1. DataInspect.ipynb +0 -622
DataInspect.ipynb DELETED
@@ -1,622 +0,0 @@
1
- {
2
- "cells": [
3
- {
4
- "cell_type": "code",
5
- "execution_count": 2,
6
- "id": "1517383df6eb646",
7
- "metadata": {
8
- "ExecuteTime": {
9
- "end_time": "2024-12-13T13:13:56.347478Z",
10
- "start_time": "2024-12-13T13:13:52.210350Z"
11
- }
12
- },
13
- "outputs": [],
14
- "source": [
15
- "import os\n",
16
- "import time\n",
17
- "from rdkit import Chem\n",
18
- "from rdkit import RDLogger;\n",
19
- "from torch.utils.data import Dataset\n",
20
- "import torch.nn.functional as F\n",
21
- "from tqdm import tqdm\n",
22
- "RDLogger.DisableLog('rdApp.*')\n",
23
- "import torch\n",
24
- "import torch.nn as nn\n",
25
- "import torch.optim as optim\n",
26
- "import pickle\n",
27
- "import numpy as np\n",
28
- "import matplotlib.pyplot as plt\n",
29
- "import math\n",
30
- "import dgl\n",
31
- "import networkx as nx"
32
- ]
33
- },
34
- {
35
- "cell_type": "code",
36
- "execution_count": 3,
37
- "id": "697783252f244e50",
38
- "metadata": {
39
- "ExecuteTime": {
40
- "end_time": "2024-12-13T04:02:54.040212Z",
41
- "start_time": "2024-12-13T04:02:54.034215Z"
42
- }
43
- },
44
- "outputs": [],
45
- "source": [
46
- "atom_number_index_dict ={\n",
47
- " 1:0, # H\n",
48
- " 6:1, # C\n",
49
- " 7:2, # N\n",
50
- " 8:3, # O\n",
51
- " 9:4 # F\n",
52
- "} \n",
53
- "# device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
54
- "atom_index_number_dict = {v: k for k, v in atom_number_index_dict.items()}\n",
55
- "max_atom_number = max(atom_number_index_dict.keys())\n",
56
- "atom_number2index_tensor = torch.full((max_atom_number + 1,), -1)\n",
57
- "for k, v in atom_number_index_dict.items():\n",
58
- " atom_number2index_tensor[k] = v\n",
59
- "\n",
60
- "atom_index2number_tensor = torch.tensor([atom_index_number_dict[i] for i in range(len(atom_index_number_dict))])\n",
61
- "def atom_number2index(atom_number):\n",
62
- " return atom_number_index_dict[atom_number]\n",
63
- "def atom_index2number(atom_index):\n",
64
- " return atom_index_number_dict[atom_index]"
65
- ]
66
- },
67
- {
68
- "cell_type": "code",
69
- "execution_count": 4,
70
- "id": "7074f5a11a15ebc6",
71
- "metadata": {
72
- "ExecuteTime": {
73
- "end_time": "2024-12-13T04:05:20.426859Z",
74
- "start_time": "2024-12-13T04:02:57.613812Z"
75
- }
76
- },
77
- "outputs": [
78
- {
79
- "name": "stderr",
80
- "output_type": "stream",
81
- "text": [
82
- "100%|██████████| 130831/130831 [02:22<00:00, 916.44it/s] \n"
83
- ]
84
- }
85
- ],
86
- "source": [
87
- "from dgl.data import QM9Dataset\n",
88
- "from torch.utils.data import SubsetRandomSampler\n",
89
- "from dgl.dataloading import GraphDataLoader\n",
90
- "from multiprocessing import Pool\n",
91
- "\n",
92
- "dataset = QM9Dataset(label_keys=['mu', 'gap'], cutoff=5.0)\n",
93
- "dataset_length = len(dataset)\n",
94
- "train_idx = torch.arange(dataset_length)\n",
95
- "class PreprocessedQM9Dataset(Dataset):\n",
96
- " def __init__(self, dataset):\n",
97
- " self.dataset = dataset\n",
98
- " self.processed_data = []\n",
99
- " self._preprocess()\n",
100
- "\n",
101
- " def _preprocess(self):\n",
102
- " for g, label in tqdm(self.dataset):\n",
103
- " g.ndata[\"Z_index\"] = torch.tensor([atom_number2index(z.item()) for z in g.ndata[\"Z\"]])\n",
104
- " self.processed_data.append((g, label))\n",
105
- "\n",
106
- " def __len__(self):\n",
107
- " return len(self.processed_data)\n",
108
- "\n",
109
- " def __getitem__(self, idx):\n",
110
- " return self.processed_data[idx]\n",
111
- "\n",
112
- "# 包装数据集\n",
113
- "processed_dataset = PreprocessedQM9Dataset(dataset)"
114
- ]
115
- },
116
- {
117
- "cell_type": "code",
118
- "execution_count": 1,
119
- "id": "d1f69b7e2e1aa945",
120
- "metadata": {
121
- "ExecuteTime": {
122
- "end_time": "2024-12-13T03:55:50.314260Z",
123
- "start_time": "2024-12-13T03:55:50.115978Z"
124
- }
125
- },
126
- "outputs": [
127
- {
128
- "ename": "NameError",
129
- "evalue": "name 'processed_dataset' is not defined",
130
- "output_type": "error",
131
- "traceback": [
132
- "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
133
- "\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)",
134
- "Cell \u001b[1;32mIn[1], line 1\u001b[0m\n\u001b[1;32m----> 1\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[43mprocessed_dataset\u001b[49m[\u001b[38;5;241m0\u001b[39m])\n",
135
- "\u001b[1;31mNameError\u001b[0m: name 'processed_dataset' is not defined"
136
- ]
137
- }
138
- ],
139
- "source": [
140
- "print(processed_dataset[0])"
141
- ]
142
- },
143
- {
144
- "cell_type": "code",
145
- "execution_count": 5,
146
- "id": "d1137deeda269919",
147
- "metadata": {
148
- "ExecuteTime": {
149
- "end_time": "2024-12-13T04:05:20.442135Z",
150
- "start_time": "2024-12-13T04:05:20.428230Z"
151
- }
152
- },
153
- "outputs": [],
154
- "source": [
155
- "myGLoader = GraphDataLoader(processed_dataset,batch_size=4,pin_memory=True)"
156
- ]
157
- },
158
- {
159
- "cell_type": "code",
160
- "execution_count": 13,
161
- "id": "b44c553b-dc97-445c-b50f-5d8cc58e12c3",
162
- "metadata": {
163
- "ExecuteTime": {
164
- "end_time": "2024-12-13T12:20:46.508536Z",
165
- "start_time": "2024-12-13T12:20:20.023147Z"
166
- }
167
- },
168
- "outputs": [
169
- {
170
- "name": "stderr",
171
- "output_type": "stream",
172
- "text": [
173
- " 0%| | 138/32708 [00:00<00:23, 1368.45it/s]"
174
- ]
175
- },
176
- {
177
- "name": "stdout",
178
- "output_type": "stream",
179
- "text": [
180
- "16\n",
181
- "21\n",
182
- "26\n",
183
- "38\n",
184
- "42\n",
185
- "44\n",
186
- "49\n",
187
- "50\n",
188
- "58\n",
189
- "72\n",
190
- "80\n",
191
- "82\n"
192
- ]
193
- },
194
- {
195
- "name": "stderr",
196
- "output_type": "stream",
197
- "text": [
198
- " 2%|▏ | 546/32708 [00:00<00:24, 1307.34it/s]"
199
- ]
200
- },
201
- {
202
- "name": "stdout",
203
- "output_type": "stream",
204
- "text": [
205
- "84\n",
206
- "86\n"
207
- ]
208
- },
209
- {
210
- "name": "stderr",
211
- "output_type": "stream",
212
- "text": [
213
- " 5%|▍ | 1633/32708 [00:01<00:23, 1311.41it/s]"
214
- ]
215
- },
216
- {
217
- "name": "stdout",
218
- "output_type": "stream",
219
- "text": [
220
- "94\n",
221
- "96\n"
222
- ]
223
- },
224
- {
225
- "name": "stderr",
226
- "output_type": "stream",
227
- "text": [
228
- " 43%|████▎ | 14160/32708 [00:10<00:15, 1224.36it/s]"
229
- ]
230
- },
231
- {
232
- "name": "stdout",
233
- "output_type": "stream",
234
- "text": [
235
- "98\n",
236
- "100\n",
237
- "106\n"
238
- ]
239
- },
240
- {
241
- "name": "stderr",
242
- "output_type": "stream",
243
- "text": [
244
- " 46%|████▌ | 14903/32708 [00:11<00:14, 1228.66it/s]"
245
- ]
246
- },
247
- {
248
- "name": "stdout",
249
- "output_type": "stream",
250
- "text": [
251
- "110\n"
252
- ]
253
- },
254
- {
255
- "name": "stderr",
256
- "output_type": "stream",
257
- "text": [
258
- "100%|██████████| 32708/32708 [00:26<00:00, 1235.31it/s]\n"
259
- ]
260
- }
261
- ],
262
- "source": [
263
- "max_nodes = 0\n",
264
- "for batch in tqdm(myGLoader):\n",
265
- " g,label = batch\n",
266
- " if g.num_nodes()>max_nodes:\n",
267
- " max_nodes = g.num_nodes()\n",
268
- " print(g.num_nodes())\n",
269
- " # print(g)\n",
270
- " # break\n",
271
- " "
272
- ]
273
- },
274
- {
275
- "cell_type": "code",
276
- "execution_count": 6,
277
- "id": "1a5caea191a642bc",
278
- "metadata": {
279
- "ExecuteTime": {
280
- "end_time": "2024-12-13T04:05:20.457355Z",
281
- "start_time": "2024-12-13T04:05:20.443241Z"
282
- }
283
- },
284
- "outputs": [],
285
- "source": [
286
- "from functools import partial\n",
287
- "import sys\n",
288
- "sys.path.append(\"lib\")\n",
289
- "from lib.metrics import sce_loss\n",
290
- "\n",
291
- "class GMae(nn.Module):\n",
292
- " def __init__(self, encoder,decoder,\n",
293
- " in_dim,hidden_dim,out_dim,mask_rate=0.3,replace_rate=0.1,alpha_l=2,\n",
294
- " embedding_layer_classes=5,embedding_layer_dim=4):\n",
295
- " super(GMae, self).__init__()\n",
296
- " self.Z_embedding = nn.Embedding(embedding_layer_classes,embedding_layer_dim)\n",
297
- " self.encoder = encoder\n",
298
- " self.decoder = decoder\n",
299
- " self.mask_rate = mask_rate\n",
300
- " self.replace_rate = replace_rate\n",
301
- " self.alpha_l = alpha_l\n",
302
- " self.in_dim = in_dim\n",
303
- " self.hidden_dim = hidden_dim\n",
304
- " self.out_dim = out_dim\n",
305
- " self.embedding_layer_classes = embedding_layer_classes\n",
306
- " self.embedding_layer_dim = embedding_layer_dim\n",
307
- " self.enc_mask_token = nn.Parameter(torch.zeros(1,in_dim))\n",
308
- " self.criterion = partial(sce_loss, alpha=alpha_l)\n",
309
- " self.encoder_to_decoder = nn.Linear(hidden_dim, hidden_dim, bias=False)\n",
310
- " def encode_atom_index(self,Z_index):\n",
311
- " return self.Z_embedding(Z_index)\n",
312
- " def encoding_mask_noise(self, g, x, mask_rate=0.3):\n",
313
- " num_nodes = g.num_nodes()\n",
314
- " perm = torch.randperm(num_nodes, device=x.device)\n",
315
- " # random masking\n",
316
- " num_mask_nodes = int(mask_rate * num_nodes)\n",
317
- " mask_nodes = perm[: num_mask_nodes]\n",
318
- " keep_nodes = perm[num_mask_nodes: ]\n",
319
- "\n",
320
- " if self.replace_rate > 0:\n",
321
- " num_noise_nodes = int(self.replace_rate * num_mask_nodes)\n",
322
- " perm_mask = torch.randperm(num_mask_nodes, device=x.device)\n",
323
- " token_nodes = mask_nodes[perm_mask[: int((1-self.replace_rate) * num_mask_nodes)]]\n",
324
- " noise_nodes = mask_nodes[perm_mask[-int(self.replace_rate * num_mask_nodes):]]\n",
325
- " noise_to_be_chosen = torch.randperm(num_nodes, device=x.device)[:num_noise_nodes]\n",
326
- " out_x = x.clone()\n",
327
- " out_x[token_nodes] = 0.0\n",
328
- " out_x[noise_nodes] = x[noise_to_be_chosen]\n",
329
- " else:\n",
330
- " out_x = x.clone()\n",
331
- " token_nodes = mask_nodes\n",
332
- " out_x[mask_nodes] = 0.0\n",
333
- "\n",
334
- " out_x[token_nodes] += self.enc_mask_token\n",
335
- " use_g = g.clone()\n",
336
- "\n",
337
- " return use_g, out_x, (mask_nodes, keep_nodes) \n",
338
- " def mask_attr_prediction(self, g, x):\n",
339
- " use_g, use_x, (mask_nodes, keep_nodes) = self.encoding_mask_noise(g, x, self.mask_rate)\n",
340
- " enc_rep = self.encoder(use_g, use_x)\n",
341
- " # ---- attribute reconstruction ----\n",
342
- " rep = self.encoder_to_decoder(enc_rep)\n",
343
- " recon = self.decoder(use_g, rep)\n",
344
- " x_init = x[mask_nodes]\n",
345
- " x_rec = recon[mask_nodes]\n",
346
- " loss = self.criterion(x_rec, x_init)\n",
347
- " return loss\n",
348
- "\n",
349
- " def embed(self, g, x):\n",
350
- " rep = self.encoder(g, x)\n",
351
- " return rep\n",
352
- " "
353
- ]
354
- },
355
- {
356
- "cell_type": "code",
357
- "execution_count": 7,
358
- "id": "c99cb509ac0f1054",
359
- "metadata": {
360
- "ExecuteTime": {
361
- "end_time": "2024-12-13T04:05:20.473215Z",
362
- "start_time": "2024-12-13T04:05:20.458354Z"
363
- }
364
- },
365
- "outputs": [],
366
- "source": [
367
- "import dgl.nn as dglnn\n",
368
- "import torch.nn as nn\n",
369
- "import torch.nn.functional as F\n",
370
- "class SimpleGNN(nn.Module):\n",
371
- " def __init__(self, in_feats, hid_feats, out_feats):\n",
372
- " super().__init__()\n",
373
- " self.conv1 = dglnn.SAGEConv(\n",
374
- " in_feats=in_feats, out_feats=hid_feats,aggregator_type=\"mean\")\n",
375
- " self.conv2 = dglnn.SAGEConv(\n",
376
- " in_feats=hid_feats, out_feats=out_feats,aggregator_type=\"mean\")\n",
377
- "\n",
378
- " def forward(self, graph, inputs):\n",
379
- " # 输入是节点的特征\n",
380
- " h = self.conv1(graph, inputs)\n",
381
- " h = F.relu(h)\n",
382
- " h = self.conv2(graph, h)\n",
383
- " return h"
384
- ]
385
- },
386
- {
387
- "cell_type": "code",
388
- "execution_count": 8,
389
- "id": "5a8a4e4dd753b642",
390
- "metadata": {
391
- "ExecuteTime": {
392
- "end_time": "2024-12-13T04:05:20.707956Z",
393
- "start_time": "2024-12-13T04:05:20.474302Z"
394
- }
395
- },
396
- "outputs": [],
397
- "source": [
398
- "sage_enc = SimpleGNN(in_feats=7,hid_feats=4,out_feats=4)\n",
399
- "sage_dec = SimpleGNN(in_feats=4,hid_feats=4,out_feats=7)\n",
400
- "gmae = GMae(sage_enc,sage_dec,7,4,7,replace_rate=0)\n",
401
- "epoches = 5\n",
402
- "optimizer = optim.Adam(gmae.parameters(), lr=1e-3)\n",
403
- "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')"
404
- ]
405
- },
406
- {
407
- "cell_type": "code",
408
- "execution_count": 11,
409
- "id": "224529a988b81ef5",
410
- "metadata": {
411
- "ExecuteTime": {
412
- "end_time": "2024-12-13T03:59:44.770215Z",
413
- "start_time": "2024-12-13T03:59:11.545931Z"
414
- }
415
- },
416
- "outputs": [
417
- {
418
- "name": "stdout",
419
- "output_type": "stream",
420
- "text": [
421
- "epoch 0 started!\n"
422
- ]
423
- },
424
- {
425
- "name": "stderr",
426
- "output_type": "stream",
427
- "text": [
428
- " 10%|▉ | 3262/32708 [00:32<04:55, 99.64it/s] \n",
429
- "\n",
430
- "KeyboardInterrupt\n",
431
- "\n"
432
- ]
433
- }
434
- ],
435
- "source": [
436
- "# print(f\"epoch {0} started!\")\n",
437
- "# gmae.train()\n",
438
- "# gmae.encoder.train()\n",
439
- "# gmae.decoder.train()\n",
440
- "# gmae.to(device)\n",
441
- "# loss_epoch = 0\n",
442
- "# import os\n",
443
- "# os.environ[\"CUDA_LAUNCH_BLOCKING\"]=\"1\"\n",
444
- "# for batch in tqdm(myGLoader):\n",
445
- "# optimizer.zero_grad()\n",
446
- "# batch_g, _ = batch\n",
447
- "# R = batch_g.ndata[\"R\"].to(device)\n",
448
- "# Z_index = batch_g.ndata[\"Z_index\"].to(device)\n",
449
- "# Z_emb = gmae.encode_atom_index(Z_index)\n",
450
- "# feat = torch.cat([R,Z_emb],dim=1)\n",
451
- "# batch_g = batch_g.to(device)\n",
452
- "# loss = gmae.mask_attr_prediction(batch_g, feat)\n",
453
- "# loss.backward()\n",
454
- "# optimizer.step()\n",
455
- "# loss_epoch+=loss.item()\n"
456
- ]
457
- },
458
- {
459
- "cell_type": "code",
460
- "execution_count": 9,
461
- "id": "a22599c4e591125b",
462
- "metadata": {
463
- "ExecuteTime": {
464
- "end_time": "2024-12-13T04:30:37.389930Z",
465
- "start_time": "2024-12-13T04:05:20.708461Z"
466
- }
467
- },
468
- "outputs": [
469
- {
470
- "name": "stdout",
471
- "output_type": "stream",
472
- "text": [
473
- "epoch 0 started!\n"
474
- ]
475
- },
476
- {
477
- "name": "stderr",
478
- "output_type": "stream",
479
- "text": [
480
- "100%|██████████| 32708/32708 [05:11<00:00, 105.09it/s]\n"
481
- ]
482
- },
483
- {
484
- "name": "stdout",
485
- "output_type": "stream",
486
- "text": [
487
- "best model saved-loss:470.463-save_path:./experiments/consumption/gmae/12-13@12_05/gmae_epoch-0-470.463.pt\n",
488
- "epoch 0: loss 470.46260083183415\n",
489
- "epoch 1 started!\n"
490
- ]
491
- },
492
- {
493
- "name": "stderr",
494
- "output_type": "stream",
495
- "text": [
496
- "100%|██████████| 32708/32708 [05:04<00:00, 107.34it/s]\n"
497
- ]
498
- },
499
- {
500
- "name": "stdout",
501
- "output_type": "stream",
502
- "text": [
503
- "best model saved-loss:18.848-save_path:./experiments/consumption/gmae/12-13@12_05/gmae_epoch-1-18.848.pt\n",
504
- "epoch 1: loss 18.848073385778548\n",
505
- "epoch 2 started!\n"
506
- ]
507
- },
508
- {
509
- "name": "stderr",
510
- "output_type": "stream",
511
- "text": [
512
- "100%|██████████| 32708/32708 [04:59<00:00, 109.35it/s]\n"
513
- ]
514
- },
515
- {
516
- "name": "stdout",
517
- "output_type": "stream",
518
- "text": [
519
- "best model saved-loss:4.784-save_path:./experiments/consumption/gmae/12-13@12_05/gmae_epoch-2-4.784.pt\n",
520
- "epoch 2: loss 4.7842518344823475\n",
521
- "epoch 3 started!\n"
522
- ]
523
- },
524
- {
525
- "name": "stderr",
526
- "output_type": "stream",
527
- "text": [
528
- "100%|██████████| 32708/32708 [05:04<00:00, 107.37it/s]\n"
529
- ]
530
- },
531
- {
532
- "name": "stdout",
533
- "output_type": "stream",
534
- "text": [
535
- "best model saved-loss:1.336-save_path:./experiments/consumption/gmae/12-13@12_05/gmae_epoch-3-1.336.pt\n",
536
- "epoch 3: loss 1.336019518836153\n",
537
- "epoch 4 started!\n"
538
- ]
539
- },
540
- {
541
- "name": "stderr",
542
- "output_type": "stream",
543
- "text": [
544
- "100%|██████████| 32708/32708 [04:56<00:00, 110.21it/s]"
545
- ]
546
- },
547
- {
548
- "name": "stdout",
549
- "output_type": "stream",
550
- "text": [
551
- "best model saved-loss:0.572-save_path:./experiments/consumption/gmae/12-13@12_05/gmae_epoch-4-0.572.pt\n",
552
- "epoch 4: loss 0.5721691430861142\n"
553
- ]
554
- },
555
- {
556
- "name": "stderr",
557
- "output_type": "stream",
558
- "text": [
559
- "\n"
560
- ]
561
- }
562
- ],
563
- "source": [
564
- "from datetime import datetime\n",
565
- "\n",
566
- "current_time = datetime.now().strftime(\"%m-%d@%H_%M\")\n",
567
- "best_loss = 10000\n",
568
- "for epoch in range(epoches):\n",
569
- " print(f\"epoch {epoch} started!\")\n",
570
- " gmae.train()\n",
571
- " gmae.encoder.train()\n",
572
- " gmae.decoder.train()\n",
573
- " gmae.to(device)\n",
574
- " loss_epoch = 0\n",
575
- " for batch in tqdm(myGLoader):\n",
576
- " optimizer.zero_grad()\n",
577
- " batch_g, _ = batch\n",
578
- " R = batch_g.ndata[\"R\"].to(device)\n",
579
- " # Z_index = batch_g.ndata[\"Z_index\"].to(device)\n",
580
- " Z_index = batch_g.ndata[\"Z_index\"].to(device)\n",
581
- " Z_emb = gmae.encode_atom_index(Z_index)\n",
582
- " feat = torch.cat([R,Z_emb],dim=1)\n",
583
- " batch_g = batch_g.to(device)\n",
584
- " loss = gmae.mask_attr_prediction(batch_g, feat)\n",
585
- " loss.backward()\n",
586
- " optimizer.step()\n",
587
- " loss_epoch+=loss.item()\n",
588
- " if loss_epoch < best_loss:\n",
589
- " formatted_loss_epoch = f\"{loss_epoch:.3f}\"\n",
590
- " save_path = f\"./experiments/QM9/gmae/{current_time}/gmae_epoch-{epoch}-{formatted_loss_epoch}.pt\"\n",
591
- " save_dir = os.path.dirname(save_path)\n",
592
- " if not os.path.exists(save_dir):\n",
593
- " os.makedirs(save_dir,exist_ok=True)\n",
594
- " torch.save(gmae.state_dict(), save_path)\n",
595
- " best_loss = loss_epoch\n",
596
- " print(f\"best model saved-loss:{formatted_loss_epoch}-save_path:{save_path}\")\n",
597
- " print(f\"epoch {epoch}: loss {loss_epoch}\")"
598
- ]
599
- }
600
- ],
601
- "metadata": {
602
- "kernelspec": {
603
- "display_name": "gnn_course",
604
- "language": "python",
605
- "name": "gnn_course"
606
- },
607
- "language_info": {
608
- "codemirror_mode": {
609
- "name": "ipython",
610
- "version": 3
611
- },
612
- "file_extension": ".py",
613
- "mimetype": "text/x-python",
614
- "name": "python",
615
- "nbconvert_exporter": "python",
616
- "pygments_lexer": "ipython3",
617
- "version": "3.8.20"
618
- }
619
- },
620
- "nbformat": 4,
621
- "nbformat_minor": 5
622
- }