ervau commited on
Commit
a022b41
·
verified ·
1 Parent(s): 2b816c8

Upload folder using huggingface_hub

Browse files
Files changed (39) hide show
  1. .gitattributes +2 -0
  2. finetuning_datasets/classification/bace/bace.csv +0 -0
  3. finetuning_datasets/classification/bace/bace_embs.npz +3 -0
  4. finetuning_datasets/classification/bbbp/bbbp.csv +0 -0
  5. finetuning_datasets/classification/bbbp/bbbp_embs.npz +3 -0
  6. finetuning_datasets/classification/hiv/hiv.csv +3 -0
  7. finetuning_datasets/classification/hiv/hiv_embs.npz +3 -0
  8. finetuning_datasets/classification/sider/sider.csv +0 -0
  9. finetuning_datasets/classification/sider/sider_embs.npz +3 -0
  10. finetuning_datasets/classification/tox21/tox21.csv +0 -0
  11. finetuning_datasets/classification/tox21/tox21_embs.npz +3 -0
  12. finetuning_datasets/regression/esol/esol.csv +0 -0
  13. finetuning_datasets/regression/esol/esol_embs.npz +3 -0
  14. finetuning_datasets/regression/freesolv/freesolv.csv +0 -0
  15. finetuning_datasets/regression/freesolv/freesolv_embs.npz +3 -0
  16. finetuning_datasets/regression/lipo/lipo.csv +0 -0
  17. finetuning_datasets/regression/lipo/lipo_embs.npz +3 -0
  18. finetuning_datasets/regression/pdbbind_full/pdbbind_full.csv +0 -0
  19. finetuning_datasets/regression/pdbbind_full/pdbbind_full_embs.npz +3 -0
  20. models/DMGI/dmgi_model.pt +3 -0
  21. models/SELFormerMM/config.json +27 -0
  22. models/SELFormerMM/merges.txt +378 -0
  23. models/SELFormerMM/pytorch_model.bin +3 -0
  24. models/SELFormerMM/special_tokens_map.json +51 -0
  25. models/SELFormerMM/tokenizer.json +909 -0
  26. models/SELFormerMM/tokenizer_config.json +65 -0
  27. models/SELFormerMM/vocab.json +1 -0
  28. pretraining_datasets/graph_embeddings.npy +3 -0
  29. pretraining_datasets/kg_embeddings.npy +3 -0
  30. pretraining_datasets/pretraining_dataset_meta.csv +3 -0
  31. pretraining_datasets/selformermm_kg_heterodata.pt +3 -0
  32. pretraining_datasets/text_embeddings.npy +3 -0
  33. processing/dmgi_model.py +103 -0
  34. processing/graph_embedding.py +71 -0
  35. processing/npy_to_h5.py +28 -0
  36. processing/pretrain_dmgi.py +77 -0
  37. processing/selfies_embedding.py +39 -0
  38. processing/smiles_to_selfies.py +52 -0
  39. processing/text_embedding.py +96 -0
.gitattributes CHANGED
@@ -58,3 +58,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
58
  # Video files - compressed
59
  *.mp4 filter=lfs diff=lfs merge=lfs -text
60
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
58
  # Video files - compressed
59
  *.mp4 filter=lfs diff=lfs merge=lfs -text
60
  *.webm filter=lfs diff=lfs merge=lfs -text
61
+ finetuning_datasets/classification/hiv/hiv.csv filter=lfs diff=lfs merge=lfs -text
62
+ pretraining_datasets/pretraining_dataset_meta.csv filter=lfs diff=lfs merge=lfs -text
finetuning_datasets/classification/bace/bace.csv ADDED
The diff for this file is too large to render. See raw diff
 
finetuning_datasets/classification/bace/bace_embs.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f465acd0549687e12687411084ca6d9b5791d434b3b54f9cfb8dc4bcf24a70c
3
+ size 8521956
finetuning_datasets/classification/bbbp/bbbp.csv ADDED
The diff for this file is too large to render. See raw diff
 
finetuning_datasets/classification/bbbp/bbbp_embs.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:62dc6df47da7d3906a92786252b4d69d4c122edace0c23fc7d5239f1d55a1aa8
3
+ size 11484388
finetuning_datasets/classification/hiv/hiv.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0bffaa9de18b5c60ffcf4b4c968e2c7e4b7925f1387503288034a5cf6205d0fa
3
+ size 15978461
finetuning_datasets/classification/hiv/hiv_embs.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc1f46f005a0c52b030edae79c34838a1e419c204ce30812b0d9580dd8e8c735
3
+ size 231628004
finetuning_datasets/classification/sider/sider.csv ADDED
The diff for this file is too large to render. See raw diff
 
finetuning_datasets/classification/sider/sider_embs.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:faf9296814b4e6dc2fdeee328962be70ee3146cfab0a1d4be479af4ed4a43782
3
+ size 8037604
finetuning_datasets/classification/tox21/tox21.csv ADDED
The diff for this file is too large to render. See raw diff
 
finetuning_datasets/classification/tox21/tox21_embs.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abcca0f8256afb8c65c401a01d0621c3df6a95478756614ab1ff1d273dc40ca1
3
+ size 44104932
finetuning_datasets/regression/esol/esol.csv ADDED
The diff for this file is too large to render. See raw diff
 
finetuning_datasets/regression/esol/esol_embs.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2942198373770199f8cc29feb13b8786864ac95ef94de40cc14246993bf3ac94
3
+ size 6353636
finetuning_datasets/regression/freesolv/freesolv.csv ADDED
The diff for this file is too large to render. See raw diff
 
finetuning_datasets/regression/freesolv/freesolv_embs.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d58624c0eb10906cba3d617bd4a0135327e35a79624d09f2e8bfdf34f8974b18
3
+ size 3616484
finetuning_datasets/regression/lipo/lipo.csv ADDED
The diff for this file is too large to render. See raw diff
 
finetuning_datasets/regression/lipo/lipo_embs.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb19c3757a30c69924092e2c60b127bcf703613421597c7085a81e95589fb220
3
+ size 23655140
finetuning_datasets/regression/pdbbind_full/pdbbind_full.csv ADDED
The diff for this file is too large to render. See raw diff
 
finetuning_datasets/regression/pdbbind_full/pdbbind_full_embs.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a72b9bd1675844c68a859c96cbdb91e7c2c5697c8f76ee30c437734d8a78b0b9
3
+ size 55644900
models/DMGI/dmgi_model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3bf449d1b5cc0c9e3ed8ddc40268de9c7f5d1624bfcbcdcd3bc1deff877bdb6
3
+ size 373485764
models/SELFormerMM/config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "HUBioDataLab/SELFormer",
3
+ "architectures": [
4
+ "RobertaForMaskedLM"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-12,
16
+ "max_position_embeddings": 514,
17
+ "model_type": "roberta",
18
+ "num_attention_heads": 4,
19
+ "num_hidden_layers": 12,
20
+ "pad_token_id": 1,
21
+ "position_embedding_type": "absolute",
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.26.1",
24
+ "type_vocab_size": 1,
25
+ "use_cache": true,
26
+ "vocab_size": 800
27
+ }
models/SELFormerMM/merges.txt ADDED
@@ -0,0 +1,378 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #version: 0.2 - Trained by `huggingface/tokenizers`
2
+ B r
3
+ a n
4
+ c h
5
+ Br an
6
+ Bran ch
7
+ Branch 1
8
+ = C
9
+ R i
10
+ n g
11
+ Ri ng
12
+ Ring 1
13
+ = Branch1
14
+ Branch 2
15
+ = O
16
+ Ring 2
17
+ H 1
18
+ C @
19
+ = N
20
+ # Branch1
21
+ C@ @
22
+ = Branch2
23
+ C@ H1
24
+ C@@ H1
25
+ # Branch2
26
+ # C
27
+ C l
28
+ / C
29
+ N H1
30
+ = Ring1
31
+ + 1
32
+ - 1
33
+ O -1
34
+ N +1
35
+ \ C
36
+ # N
37
+ / N
38
+ = Ring2
39
+ = S
40
+ =N +1
41
+ \ N
42
+ N a
43
+ Na +1
44
+ / O
45
+ \ O
46
+ Br -1
47
+ Branch 3
48
+ \ S
49
+ S +1
50
+ Cl -1
51
+ I -1
52
+ / C@@H1
53
+ S i
54
+ / C@H1
55
+ / S
56
+ =N -1
57
+ S e
58
+ = P
59
+ N -1
60
+ Ring 3
61
+ 2 H
62
+ P +1
63
+ K +1
64
+ \ C@@H1
65
+ \ C@H1
66
+ / N+1
67
+ @ @
68
+ C -1
69
+ # N+1
70
+ B -1
71
+ + 3
72
+ Cl +3
73
+ \ NH1
74
+ L i
75
+ Li +1
76
+ P H1
77
+ 1 8
78
+ 18 F
79
+ @ +1
80
+ 3 H
81
+ P @@
82
+ H 0
83
+ O H0
84
+ 1 2
85
+ P @
86
+ + 2
87
+ @@ +1
88
+ S -1
89
+ / Br
90
+ - /
91
+ \ Cl
92
+ -/ Ring2
93
+ \ O-1
94
+ 1 1
95
+ 5 I
96
+ 12 5I
97
+ 11 C
98
+ H 3
99
+ \ N+1
100
+ - \
101
+ / C@@
102
+ S @+1
103
+ A s
104
+ / Cl
105
+ 11C H3
106
+ =S e
107
+ S @@+1
108
+ N @+1
109
+ 1 4
110
+ -\ Ring2
111
+ 14 C
112
+ \ F
113
+ / C@
114
+ T e
115
+ H 2
116
+ H1 -1
117
+ =O +1
118
+ N @@+1
119
+ C +1
120
+ =S +1
121
+ Z n
122
+ / P
123
+ a +2
124
+ / I
125
+ O H1-1
126
+ C a+2
127
+ \ Br
128
+ M g
129
+ Zn +2
130
+ A l
131
+ / F
132
+ Mg +2
133
+ 12 3
134
+ 123 I
135
+ 1 3
136
+ I +1
137
+ / O-1
138
+ -\ Ring1
139
+ B H2
140
+ BH2 -1
141
+ \ I
142
+ / NH1
143
+ O +1
144
+ 13 1
145
+ 131 I
146
+ = 14C
147
+ / S+1
148
+ = Ring3
149
+ \ C@@
150
+ H2 +1
151
+ \ C@
152
+ A g
153
+ = As
154
+ =Se +1
155
+ N H2+1
156
+ Se H1
157
+ -/ Ring1
158
+ = Te
159
+ Al +3
160
+ Na H1
161
+ =Te +1
162
+ NH1 +1
163
+ Ag +1
164
+ H1 +1
165
+ NH1 -1
166
+ \ P
167
+ 14C H2
168
+ 13 C
169
+ 14C H1
170
+ = 11C
171
+ S @@
172
+ =P @@
173
+ Si H2
174
+ H3 -1
175
+ 14C H3
176
+ B H3-1
177
+ S @
178
+ =14C H1
179
+ =P H1
180
+ =P @
181
+ =N H1+1
182
+ \S +1
183
+ 12 4
184
+ C H1-1
185
+ S r
186
+ =S i
187
+ 124 I
188
+ Sr +2
189
+ #C -1
190
+ /C -1
191
+ N @
192
+ /N -1
193
+ 13C H1
194
+ / B
195
+ 1 9
196
+ B a+2
197
+ H 4
198
+ S H1+1
199
+ Se +1
200
+ 19 F
201
+ / 125I
202
+ P @+1
203
+ R b
204
+ Cl +1
205
+ Si H4
206
+ Rb +1
207
+ = Branch3
208
+ N @@
209
+ As +1
210
+ / Si
211
+ B H1-1
212
+ S H1
213
+ / 123I
214
+ 3 2
215
+ = Mg
216
+ H +1
217
+ \ B
218
+ Si H1
219
+ P@@ +1
220
+ - 2
221
+ 1 5
222
+ 1 7
223
+ 3 5
224
+ = 13CH1
225
+ C s
226
+ =N H2+1
227
+ =S H1
228
+ Mg H2
229
+ 32 P
230
+ 17 F
231
+ 35 S
232
+ Cs +1
233
+ # 11C
234
+ / 131I
235
+ B i
236
+ \ 125I
237
+ =S @@
238
+ \S -1
239
+ 6 Br
240
+ 7 I
241
+ 7 6Br
242
+ = B
243
+ e H1
244
+ \N -1
245
+ 18 O
246
+ 12 7I
247
+ 11C H2
248
+ 14 C@@H1
249
+ Te H2
250
+ 15 NH1
251
+ Bi +3
252
+ / P+1
253
+ / 13C
254
+ / 13CH1
255
+ 0 B
256
+ 1 0B
257
+ = Al
258
+ = 18O
259
+ B H0
260
+ F -1
261
+ N H3
262
+ S -2
263
+ Br +2
264
+ Cl +2
265
+ \S i
266
+ /S -1
267
+ =P H2
268
+ 14 C@H1
269
+ NH3 +1
270
+ # 14C
271
+ # O+1
272
+ - 3
273
+ 2 2
274
+ 4 H
275
+ 5 Se
276
+ 5 Sr+2
277
+ 7 5Se
278
+ 8 5Sr+2
279
+ = B-1
280
+ = 13C
281
+ @ -1
282
+ B e
283
+ B @@
284
+ B @-1
285
+ C a
286
+ C H1
287
+ I +3
288
+ K H1
289
+ O H1+1
290
+ R a+2
291
+ S H1-1
292
+ \ PH1
293
+ \ 123I
294
+ =C a
295
+ \C H1-1
296
+ =S @
297
+ \S eH1
298
+ /S eH1
299
+ Se -1
300
+ Li H1
301
+ 18F -1
302
+ 125I H1
303
+ 11C H1
304
+ Te H1
305
+ Zn +1
306
+ Zn -2
307
+ Al -3
308
+ 13C H3
309
+ 15 N
310
+ Be +2
311
+ B@@ -1
312
+ # P
313
+ # S
314
+ - 4
315
+ / PH1
316
+ / P@@
317
+ / As
318
+ / 14C
319
+ / 14CH1
320
+ 2 K+1
321
+ 2 Rb+1
322
+ 3 Se
323
+ 3 Ra+2
324
+ 4 5
325
+ 4 7
326
+ 4 2K+1
327
+ 5 I-1
328
+ 7 3Se
329
+ 8 9
330
+ 8 2Rb+1
331
+ = 32
332
+ = 32P
333
+ C H0
334
+ C H2
335
+ I +2
336
+ N H0
337
+ N H4
338
+ O H1
339
+ P H2+1
340
+ S H0
341
+ S H2
342
+ \ 3H
343
+ \ 11CH3
344
+ \C -1
345
+ \S e
346
+ Si @
347
+ Si -1
348
+ Si H1-1
349
+ Si H3-1
350
+ /S e
351
+ Se -2
352
+ \NH1 -1
353
+ 18F H1
354
+ 12 5I-1
355
+ 11 C@@H1
356
+ 11 C-1
357
+ As H1
358
+ As -1
359
+ 14 C@@
360
+ Te -1
361
+ Mg +1
362
+ 123 I-1
363
+ 123 Te
364
+ 123I H1
365
+ 13 5I
366
+ 131 I-1
367
+ Ag -4
368
+ 124 I-1
369
+ 76Br H1
370
+ 18O H1
371
+ 22 Na+1
372
+ 22 3Ra+2
373
+ Ca H2
374
+ 45 Ca+2
375
+ 47 Ca+2
376
+ 89 Sr+2
377
+ =32 PH1
378
+ NH4 +1
models/SELFormerMM/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01c5c78e9afb73faa656f644167d209a282e117f05af8a5b51588214948ef7bd
3
+ size 985086525
models/SELFormerMM/special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "<s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "<mask>",
25
+ "lstrip": true,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "<pad>",
32
+ "lstrip": false,
33
+ "normalized": true,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "</s>",
39
+ "lstrip": false,
40
+ "normalized": true,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "<unk>",
46
+ "lstrip": false,
47
+ "normalized": true,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
models/SELFormerMM/tokenizer.json ADDED
@@ -0,0 +1,909 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": {
4
+ "direction": "Right",
5
+ "max_length": 512,
6
+ "strategy": "LongestFirst",
7
+ "stride": 0
8
+ },
9
+ "padding": {
10
+ "strategy": {
11
+ "Fixed": 512
12
+ },
13
+ "direction": "Right",
14
+ "pad_to_multiple_of": null,
15
+ "pad_id": 3,
16
+ "pad_type_id": 0,
17
+ "pad_token": "<pad>"
18
+ },
19
+ "added_tokens": [
20
+ {
21
+ "id": 0,
22
+ "content": "<unk>",
23
+ "single_word": false,
24
+ "lstrip": false,
25
+ "rstrip": false,
26
+ "normalized": true,
27
+ "special": true
28
+ },
29
+ {
30
+ "id": 1,
31
+ "content": "<s>",
32
+ "single_word": false,
33
+ "lstrip": false,
34
+ "rstrip": false,
35
+ "normalized": true,
36
+ "special": true
37
+ },
38
+ {
39
+ "id": 2,
40
+ "content": "</s>",
41
+ "single_word": false,
42
+ "lstrip": false,
43
+ "rstrip": false,
44
+ "normalized": true,
45
+ "special": true
46
+ },
47
+ {
48
+ "id": 3,
49
+ "content": "<pad>",
50
+ "single_word": false,
51
+ "lstrip": false,
52
+ "rstrip": false,
53
+ "normalized": true,
54
+ "special": true
55
+ },
56
+ {
57
+ "id": 4,
58
+ "content": "<mask>",
59
+ "single_word": false,
60
+ "lstrip": true,
61
+ "rstrip": false,
62
+ "normalized": true,
63
+ "special": true
64
+ }
65
+ ],
66
+ "normalizer": null,
67
+ "pre_tokenizer": {
68
+ "type": "ByteLevel",
69
+ "add_prefix_space": false,
70
+ "trim_offsets": true,
71
+ "use_regex": true
72
+ },
73
+ "post_processor": {
74
+ "type": "RobertaProcessing",
75
+ "sep": [
76
+ "</s>",
77
+ 2
78
+ ],
79
+ "cls": [
80
+ "<s>",
81
+ 1
82
+ ],
83
+ "trim_offsets": true,
84
+ "add_prefix_space": false
85
+ },
86
+ "decoder": {
87
+ "type": "ByteLevel",
88
+ "add_prefix_space": true,
89
+ "trim_offsets": true,
90
+ "use_regex": true
91
+ },
92
+ "model": {
93
+ "type": "BPE",
94
+ "dropout": null,
95
+ "unk_token": null,
96
+ "continuing_subword_prefix": "",
97
+ "end_of_word_suffix": "",
98
+ "fuse_unk": false,
99
+ "vocab": {
100
+ "<unk>": 0,
101
+ "<s>": 1,
102
+ "</s>": 2,
103
+ "<pad>": 3,
104
+ "<mask>": 4,
105
+ "\n": 5,
106
+ "#": 6,
107
+ "+": 7,
108
+ "-": 8,
109
+ ".": 9,
110
+ "/": 10,
111
+ "0": 11,
112
+ "1": 12,
113
+ "2": 13,
114
+ "3": 14,
115
+ "4": 15,
116
+ "5": 16,
117
+ "6": 17,
118
+ "7": 18,
119
+ "8": 19,
120
+ "9": 20,
121
+ "=": 21,
122
+ "@": 22,
123
+ "A": 23,
124
+ "B": 24,
125
+ "C": 25,
126
+ "F": 26,
127
+ "H": 27,
128
+ "I": 28,
129
+ "K": 29,
130
+ "L": 30,
131
+ "M": 31,
132
+ "N": 32,
133
+ "O": 33,
134
+ "P": 34,
135
+ "R": 35,
136
+ "S": 36,
137
+ "T": 37,
138
+ "Z": 38,
139
+ "\\": 39,
140
+ "a": 40,
141
+ "b": 41,
142
+ "c": 42,
143
+ "e": 43,
144
+ "g": 44,
145
+ "h": 45,
146
+ "i": 46,
147
+ "l": 47,
148
+ "n": 48,
149
+ "r": 49,
150
+ "s": 50,
151
+ "Br": 51,
152
+ "an": 52,
153
+ "ch": 53,
154
+ "Bran": 54,
155
+ "Branch": 55,
156
+ "Branch1": 56,
157
+ "=C": 57,
158
+ "Ri": 58,
159
+ "ng": 59,
160
+ "Ring": 60,
161
+ "Ring1": 61,
162
+ "=Branch1": 62,
163
+ "Branch2": 63,
164
+ "=O": 64,
165
+ "Ring2": 65,
166
+ "H1": 66,
167
+ "C@": 67,
168
+ "=N": 68,
169
+ "#Branch1": 69,
170
+ "C@@": 70,
171
+ "=Branch2": 71,
172
+ "C@H1": 72,
173
+ "C@@H1": 73,
174
+ "#Branch2": 74,
175
+ "#C": 75,
176
+ "Cl": 76,
177
+ "/C": 77,
178
+ "NH1": 78,
179
+ "=Ring1": 79,
180
+ "+1": 80,
181
+ "-1": 81,
182
+ "O-1": 82,
183
+ "N+1": 83,
184
+ "\\C": 84,
185
+ "#N": 85,
186
+ "/N": 86,
187
+ "=Ring2": 87,
188
+ "=S": 88,
189
+ "=N+1": 89,
190
+ "\\N": 90,
191
+ "Na": 91,
192
+ "Na+1": 92,
193
+ "/O": 93,
194
+ "\\O": 94,
195
+ "Br-1": 95,
196
+ "Branch3": 96,
197
+ "\\S": 97,
198
+ "S+1": 98,
199
+ "Cl-1": 99,
200
+ "I-1": 100,
201
+ "/C@@H1": 101,
202
+ "Si": 102,
203
+ "/C@H1": 103,
204
+ "/S": 104,
205
+ "=N-1": 105,
206
+ "Se": 106,
207
+ "=P": 107,
208
+ "N-1": 108,
209
+ "Ring3": 109,
210
+ "2H": 110,
211
+ "P+1": 111,
212
+ "K+1": 112,
213
+ "\\C@@H1": 113,
214
+ "\\C@H1": 114,
215
+ "/N+1": 115,
216
+ "@@": 116,
217
+ "C-1": 117,
218
+ "#N+1": 118,
219
+ "B-1": 119,
220
+ "+3": 120,
221
+ "Cl+3": 121,
222
+ "\\NH1": 122,
223
+ "Li": 123,
224
+ "Li+1": 124,
225
+ "PH1": 125,
226
+ "18": 126,
227
+ "18F": 127,
228
+ "@+1": 128,
229
+ "3H": 129,
230
+ "P@@": 130,
231
+ "H0": 131,
232
+ "OH0": 132,
233
+ "12": 133,
234
+ "P@": 134,
235
+ "+2": 135,
236
+ "@@+1": 136,
237
+ "S-1": 137,
238
+ "/Br": 138,
239
+ "-/": 139,
240
+ "\\Cl": 140,
241
+ "-/Ring2": 141,
242
+ "\\O-1": 142,
243
+ "11": 143,
244
+ "5I": 144,
245
+ "125I": 145,
246
+ "11C": 146,
247
+ "H3": 147,
248
+ "\\N+1": 148,
249
+ "-\\": 149,
250
+ "/C@@": 150,
251
+ "S@+1": 151,
252
+ "As": 152,
253
+ "/Cl": 153,
254
+ "11CH3": 154,
255
+ "=Se": 155,
256
+ "S@@+1": 156,
257
+ "N@+1": 157,
258
+ "14": 158,
259
+ "-\\Ring2": 159,
260
+ "14C": 160,
261
+ "\\F": 161,
262
+ "/C@": 162,
263
+ "Te": 163,
264
+ "H2": 164,
265
+ "H1-1": 165,
266
+ "=O+1": 166,
267
+ "N@@+1": 167,
268
+ "C+1": 168,
269
+ "=S+1": 169,
270
+ "Zn": 170,
271
+ "/P": 171,
272
+ "a+2": 172,
273
+ "/I": 173,
274
+ "OH1-1": 174,
275
+ "Ca+2": 175,
276
+ "\\Br": 176,
277
+ "Mg": 177,
278
+ "Zn+2": 178,
279
+ "Al": 179,
280
+ "/F": 180,
281
+ "Mg+2": 181,
282
+ "123": 182,
283
+ "123I": 183,
284
+ "13": 184,
285
+ "I+1": 185,
286
+ "/O-1": 186,
287
+ "-\\Ring1": 187,
288
+ "BH2": 188,
289
+ "BH2-1": 189,
290
+ "\\I": 190,
291
+ "/NH1": 191,
292
+ "O+1": 192,
293
+ "131": 193,
294
+ "131I": 194,
295
+ "=14C": 195,
296
+ "/S+1": 196,
297
+ "=Ring3": 197,
298
+ "\\C@@": 198,
299
+ "H2+1": 199,
300
+ "\\C@": 200,
301
+ "Ag": 201,
302
+ "=As": 202,
303
+ "=Se+1": 203,
304
+ "NH2+1": 204,
305
+ "SeH1": 205,
306
+ "-/Ring1": 206,
307
+ "=Te": 207,
308
+ "Al+3": 208,
309
+ "NaH1": 209,
310
+ "=Te+1": 210,
311
+ "NH1+1": 211,
312
+ "Ag+1": 212,
313
+ "H1+1": 213,
314
+ "NH1-1": 214,
315
+ "\\P": 215,
316
+ "14CH2": 216,
317
+ "13C": 217,
318
+ "14CH1": 218,
319
+ "=11C": 219,
320
+ "S@@": 220,
321
+ "=P@@": 221,
322
+ "SiH2": 222,
323
+ "H3-1": 223,
324
+ "14CH3": 224,
325
+ "BH3-1": 225,
326
+ "S@": 226,
327
+ "=14CH1": 227,
328
+ "=PH1": 228,
329
+ "=P@": 229,
330
+ "=NH1+1": 230,
331
+ "\\S+1": 231,
332
+ "124": 232,
333
+ "CH1-1": 233,
334
+ "Sr": 234,
335
+ "=Si": 235,
336
+ "124I": 236,
337
+ "Sr+2": 237,
338
+ "#C-1": 238,
339
+ "/C-1": 239,
340
+ "N@": 240,
341
+ "/N-1": 241,
342
+ "13CH1": 242,
343
+ "/B": 243,
344
+ "19": 244,
345
+ "Ba+2": 245,
346
+ "H4": 246,
347
+ "SH1+1": 247,
348
+ "Se+1": 248,
349
+ "19F": 249,
350
+ "/125I": 250,
351
+ "P@+1": 251,
352
+ "Rb": 252,
353
+ "Cl+1": 253,
354
+ "SiH4": 254,
355
+ "Rb+1": 255,
356
+ "=Branch3": 256,
357
+ "N@@": 257,
358
+ "As+1": 258,
359
+ "/Si": 259,
360
+ "BH1-1": 260,
361
+ "SH1": 261,
362
+ "/123I": 262,
363
+ "32": 263,
364
+ "=Mg": 264,
365
+ "H+1": 265,
366
+ "\\B": 266,
367
+ "SiH1": 267,
368
+ "P@@+1": 268,
369
+ "-2": 269,
370
+ "15": 270,
371
+ "17": 271,
372
+ "35": 272,
373
+ "=13CH1": 273,
374
+ "Cs": 274,
375
+ "=NH2+1": 275,
376
+ "=SH1": 276,
377
+ "MgH2": 277,
378
+ "32P": 278,
379
+ "17F": 279,
380
+ "35S": 280,
381
+ "Cs+1": 281,
382
+ "#11C": 282,
383
+ "/131I": 283,
384
+ "Bi": 284,
385
+ "\\125I": 285,
386
+ "=S@@": 286,
387
+ "\\S-1": 287,
388
+ "6Br": 288,
389
+ "7I": 289,
390
+ "76Br": 290,
391
+ "=B": 291,
392
+ "eH1": 292,
393
+ "\\N-1": 293,
394
+ "18O": 294,
395
+ "127I": 295,
396
+ "11CH2": 296,
397
+ "14C@@H1": 297,
398
+ "TeH2": 298,
399
+ "15NH1": 299,
400
+ "Bi+3": 300,
401
+ "/P+1": 301,
402
+ "/13C": 302,
403
+ "/13CH1": 303,
404
+ "0B": 304,
405
+ "10B": 305,
406
+ "=Al": 306,
407
+ "=18O": 307,
408
+ "BH0": 308,
409
+ "F-1": 309,
410
+ "NH3": 310,
411
+ "S-2": 311,
412
+ "Br+2": 312,
413
+ "Cl+2": 313,
414
+ "\\Si": 314,
415
+ "/S-1": 315,
416
+ "=PH2": 316,
417
+ "14C@H1": 317,
418
+ "NH3+1": 318,
419
+ "#14C": 319,
420
+ "#O+1": 320,
421
+ "-3": 321,
422
+ "22": 322,
423
+ "4H": 323,
424
+ "5Se": 324,
425
+ "5Sr+2": 325,
426
+ "75Se": 326,
427
+ "85Sr+2": 327,
428
+ "=B-1": 328,
429
+ "=13C": 329,
430
+ "@-1": 330,
431
+ "Be": 331,
432
+ "B@@": 332,
433
+ "B@-1": 333,
434
+ "Ca": 334,
435
+ "CH1": 335,
436
+ "I+3": 336,
437
+ "KH1": 337,
438
+ "OH1+1": 338,
439
+ "Ra+2": 339,
440
+ "SH1-1": 340,
441
+ "\\PH1": 341,
442
+ "\\123I": 342,
443
+ "=Ca": 343,
444
+ "\\CH1-1": 344,
445
+ "=S@": 345,
446
+ "\\SeH1": 346,
447
+ "/SeH1": 347,
448
+ "Se-1": 348,
449
+ "LiH1": 349,
450
+ "18F-1": 350,
451
+ "125IH1": 351,
452
+ "11CH1": 352,
453
+ "TeH1": 353,
454
+ "Zn+1": 354,
455
+ "Zn-2": 355,
456
+ "Al-3": 356,
457
+ "13CH3": 357,
458
+ "15N": 358,
459
+ "Be+2": 359,
460
+ "B@@-1": 360,
461
+ "#P": 361,
462
+ "#S": 362,
463
+ "-4": 363,
464
+ "/PH1": 364,
465
+ "/P@@": 365,
466
+ "/As": 366,
467
+ "/14C": 367,
468
+ "/14CH1": 368,
469
+ "2K+1": 369,
470
+ "2Rb+1": 370,
471
+ "3Se": 371,
472
+ "3Ra+2": 372,
473
+ "45": 373,
474
+ "47": 374,
475
+ "42K+1": 375,
476
+ "5I-1": 376,
477
+ "73Se": 377,
478
+ "89": 378,
479
+ "82Rb+1": 379,
480
+ "=32": 380,
481
+ "=32P": 381,
482
+ "CH0": 382,
483
+ "CH2": 383,
484
+ "I+2": 384,
485
+ "NH0": 385,
486
+ "NH4": 386,
487
+ "OH1": 387,
488
+ "PH2+1": 388,
489
+ "SH0": 389,
490
+ "SH2": 390,
491
+ "\\3H": 391,
492
+ "\\11CH3": 392,
493
+ "\\C-1": 393,
494
+ "\\Se": 394,
495
+ "Si@": 395,
496
+ "Si-1": 396,
497
+ "SiH1-1": 397,
498
+ "SiH3-1": 398,
499
+ "/Se": 399,
500
+ "Se-2": 400,
501
+ "\\NH1-1": 401,
502
+ "18FH1": 402,
503
+ "125I-1": 403,
504
+ "11C@@H1": 404,
505
+ "11C-1": 405,
506
+ "AsH1": 406,
507
+ "As-1": 407,
508
+ "14C@@": 408,
509
+ "Te-1": 409,
510
+ "Mg+1": 410,
511
+ "123I-1": 411,
512
+ "123Te": 412,
513
+ "123IH1": 413,
514
+ "135I": 414,
515
+ "131I-1": 415,
516
+ "Ag-4": 416,
517
+ "124I-1": 417,
518
+ "76BrH1": 418,
519
+ "18OH1": 419,
520
+ "22Na+1": 420,
521
+ "223Ra+2": 421,
522
+ "CaH2": 422,
523
+ "45Ca+2": 423,
524
+ "47Ca+2": 424,
525
+ "89Sr+2": 425,
526
+ "=32PH1": 426,
527
+ "NH4+1": 427
528
+ },
529
+ "merges": [
530
+ "B r",
531
+ "a n",
532
+ "c h",
533
+ "Br an",
534
+ "Bran ch",
535
+ "Branch 1",
536
+ "= C",
537
+ "R i",
538
+ "n g",
539
+ "Ri ng",
540
+ "Ring 1",
541
+ "= Branch1",
542
+ "Branch 2",
543
+ "= O",
544
+ "Ring 2",
545
+ "H 1",
546
+ "C @",
547
+ "= N",
548
+ "# Branch1",
549
+ "C@ @",
550
+ "= Branch2",
551
+ "C@ H1",
552
+ "C@@ H1",
553
+ "# Branch2",
554
+ "# C",
555
+ "C l",
556
+ "/ C",
557
+ "N H1",
558
+ "= Ring1",
559
+ "+ 1",
560
+ "- 1",
561
+ "O -1",
562
+ "N +1",
563
+ "\\ C",
564
+ "# N",
565
+ "/ N",
566
+ "= Ring2",
567
+ "= S",
568
+ "=N +1",
569
+ "\\ N",
570
+ "N a",
571
+ "Na +1",
572
+ "/ O",
573
+ "\\ O",
574
+ "Br -1",
575
+ "Branch 3",
576
+ "\\ S",
577
+ "S +1",
578
+ "Cl -1",
579
+ "I -1",
580
+ "/ C@@H1",
581
+ "S i",
582
+ "/ C@H1",
583
+ "/ S",
584
+ "=N -1",
585
+ "S e",
586
+ "= P",
587
+ "N -1",
588
+ "Ring 3",
589
+ "2 H",
590
+ "P +1",
591
+ "K +1",
592
+ "\\ C@@H1",
593
+ "\\ C@H1",
594
+ "/ N+1",
595
+ "@ @",
596
+ "C -1",
597
+ "# N+1",
598
+ "B -1",
599
+ "+ 3",
600
+ "Cl +3",
601
+ "\\ NH1",
602
+ "L i",
603
+ "Li +1",
604
+ "P H1",
605
+ "1 8",
606
+ "18 F",
607
+ "@ +1",
608
+ "3 H",
609
+ "P @@",
610
+ "H 0",
611
+ "O H0",
612
+ "1 2",
613
+ "P @",
614
+ "+ 2",
615
+ "@@ +1",
616
+ "S -1",
617
+ "/ Br",
618
+ "- /",
619
+ "\\ Cl",
620
+ "-/ Ring2",
621
+ "\\ O-1",
622
+ "1 1",
623
+ "5 I",
624
+ "12 5I",
625
+ "11 C",
626
+ "H 3",
627
+ "\\ N+1",
628
+ "- \\",
629
+ "/ C@@",
630
+ "S @+1",
631
+ "A s",
632
+ "/ Cl",
633
+ "11C H3",
634
+ "=S e",
635
+ "S @@+1",
636
+ "N @+1",
637
+ "1 4",
638
+ "-\\ Ring2",
639
+ "14 C",
640
+ "\\ F",
641
+ "/ C@",
642
+ "T e",
643
+ "H 2",
644
+ "H1 -1",
645
+ "=O +1",
646
+ "N @@+1",
647
+ "C +1",
648
+ "=S +1",
649
+ "Z n",
650
+ "/ P",
651
+ "a +2",
652
+ "/ I",
653
+ "O H1-1",
654
+ "C a+2",
655
+ "\\ Br",
656
+ "M g",
657
+ "Zn +2",
658
+ "A l",
659
+ "/ F",
660
+ "Mg +2",
661
+ "12 3",
662
+ "123 I",
663
+ "1 3",
664
+ "I +1",
665
+ "/ O-1",
666
+ "-\\ Ring1",
667
+ "B H2",
668
+ "BH2 -1",
669
+ "\\ I",
670
+ "/ NH1",
671
+ "O +1",
672
+ "13 1",
673
+ "131 I",
674
+ "= 14C",
675
+ "/ S+1",
676
+ "= Ring3",
677
+ "\\ C@@",
678
+ "H2 +1",
679
+ "\\ C@",
680
+ "A g",
681
+ "= As",
682
+ "=Se +1",
683
+ "N H2+1",
684
+ "Se H1",
685
+ "-/ Ring1",
686
+ "= Te",
687
+ "Al +3",
688
+ "Na H1",
689
+ "=Te +1",
690
+ "NH1 +1",
691
+ "Ag +1",
692
+ "H1 +1",
693
+ "NH1 -1",
694
+ "\\ P",
695
+ "14C H2",
696
+ "13 C",
697
+ "14C H1",
698
+ "= 11C",
699
+ "S @@",
700
+ "=P @@",
701
+ "Si H2",
702
+ "H3 -1",
703
+ "14C H3",
704
+ "B H3-1",
705
+ "S @",
706
+ "=14C H1",
707
+ "=P H1",
708
+ "=P @",
709
+ "=N H1+1",
710
+ "\\S +1",
711
+ "12 4",
712
+ "C H1-1",
713
+ "S r",
714
+ "=S i",
715
+ "124 I",
716
+ "Sr +2",
717
+ "#C -1",
718
+ "/C -1",
719
+ "N @",
720
+ "/N -1",
721
+ "13C H1",
722
+ "/ B",
723
+ "1 9",
724
+ "B a+2",
725
+ "H 4",
726
+ "S H1+1",
727
+ "Se +1",
728
+ "19 F",
729
+ "/ 125I",
730
+ "P @+1",
731
+ "R b",
732
+ "Cl +1",
733
+ "Si H4",
734
+ "Rb +1",
735
+ "= Branch3",
736
+ "N @@",
737
+ "As +1",
738
+ "/ Si",
739
+ "B H1-1",
740
+ "S H1",
741
+ "/ 123I",
742
+ "3 2",
743
+ "= Mg",
744
+ "H +1",
745
+ "\\ B",
746
+ "Si H1",
747
+ "P@@ +1",
748
+ "- 2",
749
+ "1 5",
750
+ "1 7",
751
+ "3 5",
752
+ "= 13CH1",
753
+ "C s",
754
+ "=N H2+1",
755
+ "=S H1",
756
+ "Mg H2",
757
+ "32 P",
758
+ "17 F",
759
+ "35 S",
760
+ "Cs +1",
761
+ "# 11C",
762
+ "/ 131I",
763
+ "B i",
764
+ "\\ 125I",
765
+ "=S @@",
766
+ "\\S -1",
767
+ "6 Br",
768
+ "7 I",
769
+ "7 6Br",
770
+ "= B",
771
+ "e H1",
772
+ "\\N -1",
773
+ "18 O",
774
+ "12 7I",
775
+ "11C H2",
776
+ "14 C@@H1",
777
+ "Te H2",
778
+ "15 NH1",
779
+ "Bi +3",
780
+ "/ P+1",
781
+ "/ 13C",
782
+ "/ 13CH1",
783
+ "0 B",
784
+ "1 0B",
785
+ "= Al",
786
+ "= 18O",
787
+ "B H0",
788
+ "F -1",
789
+ "N H3",
790
+ "S -2",
791
+ "Br +2",
792
+ "Cl +2",
793
+ "\\S i",
794
+ "/S -1",
795
+ "=P H2",
796
+ "14 C@H1",
797
+ "NH3 +1",
798
+ "# 14C",
799
+ "# O+1",
800
+ "- 3",
801
+ "2 2",
802
+ "4 H",
803
+ "5 Se",
804
+ "5 Sr+2",
805
+ "7 5Se",
806
+ "8 5Sr+2",
807
+ "= B-1",
808
+ "= 13C",
809
+ "@ -1",
810
+ "B e",
811
+ "B @@",
812
+ "B @-1",
813
+ "C a",
814
+ "C H1",
815
+ "I +3",
816
+ "K H1",
817
+ "O H1+1",
818
+ "R a+2",
819
+ "S H1-1",
820
+ "\\ PH1",
821
+ "\\ 123I",
822
+ "=C a",
823
+ "\\C H1-1",
824
+ "=S @",
825
+ "\\S eH1",
826
+ "/S eH1",
827
+ "Se -1",
828
+ "Li H1",
829
+ "18F -1",
830
+ "125I H1",
831
+ "11C H1",
832
+ "Te H1",
833
+ "Zn +1",
834
+ "Zn -2",
835
+ "Al -3",
836
+ "13C H3",
837
+ "15 N",
838
+ "Be +2",
839
+ "B@@ -1",
840
+ "# P",
841
+ "# S",
842
+ "- 4",
843
+ "/ PH1",
844
+ "/ P@@",
845
+ "/ As",
846
+ "/ 14C",
847
+ "/ 14CH1",
848
+ "2 K+1",
849
+ "2 Rb+1",
850
+ "3 Se",
851
+ "3 Ra+2",
852
+ "4 5",
853
+ "4 7",
854
+ "4 2K+1",
855
+ "5 I-1",
856
+ "7 3Se",
857
+ "8 9",
858
+ "8 2Rb+1",
859
+ "= 32",
860
+ "= 32P",
861
+ "C H0",
862
+ "C H2",
863
+ "I +2",
864
+ "N H0",
865
+ "N H4",
866
+ "O H1",
867
+ "P H2+1",
868
+ "S H0",
869
+ "S H2",
870
+ "\\ 3H",
871
+ "\\ 11CH3",
872
+ "\\C -1",
873
+ "\\S e",
874
+ "Si @",
875
+ "Si -1",
876
+ "Si H1-1",
877
+ "Si H3-1",
878
+ "/S e",
879
+ "Se -2",
880
+ "\\NH1 -1",
881
+ "18F H1",
882
+ "12 5I-1",
883
+ "11 C@@H1",
884
+ "11 C-1",
885
+ "As H1",
886
+ "As -1",
887
+ "14 C@@",
888
+ "Te -1",
889
+ "Mg +1",
890
+ "123 I-1",
891
+ "123 Te",
892
+ "123I H1",
893
+ "13 5I",
894
+ "131 I-1",
895
+ "Ag -4",
896
+ "124 I-1",
897
+ "76Br H1",
898
+ "18O H1",
899
+ "22 Na+1",
900
+ "22 3Ra+2",
901
+ "Ca H2",
902
+ "45 Ca+2",
903
+ "47 Ca+2",
904
+ "89 Sr+2",
905
+ "=32 PH1",
906
+ "NH4 +1"
907
+ ]
908
+ }
909
+ }
models/SELFormerMM/tokenizer_config.json ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": {
4
+ "__type": "AddedToken",
5
+ "content": "<s>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false
10
+ },
11
+ "cls_token": {
12
+ "__type": "AddedToken",
13
+ "content": "<s>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false
18
+ },
19
+ "eos_token": {
20
+ "__type": "AddedToken",
21
+ "content": "</s>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false
26
+ },
27
+ "errors": "replace",
28
+ "mask_token": {
29
+ "__type": "AddedToken",
30
+ "content": "<mask>",
31
+ "lstrip": true,
32
+ "normalized": true,
33
+ "rstrip": false,
34
+ "single_word": false
35
+ },
36
+ "model_max_length": 1000000000000000019884624838656,
37
+ "name_or_path": "HUBioDataLab/SELFormer",
38
+ "pad_token": {
39
+ "__type": "AddedToken",
40
+ "content": "<pad>",
41
+ "lstrip": false,
42
+ "normalized": true,
43
+ "rstrip": false,
44
+ "single_word": false
45
+ },
46
+ "sep_token": {
47
+ "__type": "AddedToken",
48
+ "content": "</s>",
49
+ "lstrip": false,
50
+ "normalized": true,
51
+ "rstrip": false,
52
+ "single_word": false
53
+ },
54
+ "special_tokens_map_file": null,
55
+ "tokenizer_class": "RobertaTokenizer",
56
+ "trim_offsets": true,
57
+ "unk_token": {
58
+ "__type": "AddedToken",
59
+ "content": "<unk>",
60
+ "lstrip": false,
61
+ "normalized": true,
62
+ "rstrip": false,
63
+ "single_word": false
64
+ }
65
+ }
models/SELFormerMM/vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"<unk>":0,"<s>":1,"</s>":2,"<pad>":3,"<mask>":4,"\n":5,"#":6,"+":7,"-":8,".":9,"/":10,"0":11,"1":12,"2":13,"3":14,"4":15,"5":16,"6":17,"7":18,"8":19,"9":20,"=":21,"@":22,"A":23,"B":24,"C":25,"F":26,"H":27,"I":28,"K":29,"L":30,"M":31,"N":32,"O":33,"P":34,"R":35,"S":36,"T":37,"Z":38,"\\":39,"a":40,"b":41,"c":42,"e":43,"g":44,"h":45,"i":46,"l":47,"n":48,"r":49,"s":50,"Br":51,"an":52,"ch":53,"Bran":54,"Branch":55,"Branch1":56,"=C":57,"Ri":58,"ng":59,"Ring":60,"Ring1":61,"=Branch1":62,"Branch2":63,"=O":64,"Ring2":65,"H1":66,"C@":67,"=N":68,"#Branch1":69,"C@@":70,"=Branch2":71,"C@H1":72,"C@@H1":73,"#Branch2":74,"#C":75,"Cl":76,"/C":77,"NH1":78,"=Ring1":79,"+1":80,"-1":81,"O-1":82,"N+1":83,"\\C":84,"#N":85,"/N":86,"=Ring2":87,"=S":88,"=N+1":89,"\\N":90,"Na":91,"Na+1":92,"/O":93,"\\O":94,"Br-1":95,"Branch3":96,"\\S":97,"S+1":98,"Cl-1":99,"I-1":100,"/C@@H1":101,"Si":102,"/C@H1":103,"/S":104,"=N-1":105,"Se":106,"=P":107,"N-1":108,"Ring3":109,"2H":110,"P+1":111,"K+1":112,"\\C@@H1":113,"\\C@H1":114,"/N+1":115,"@@":116,"C-1":117,"#N+1":118,"B-1":119,"+3":120,"Cl+3":121,"\\NH1":122,"Li":123,"Li+1":124,"PH1":125,"18":126,"18F":127,"@+1":128,"3H":129,"P@@":130,"H0":131,"OH0":132,"12":133,"P@":134,"+2":135,"@@+1":136,"S-1":137,"/Br":138,"-/":139,"\\Cl":140,"-/Ring2":141,"\\O-1":142,"11":143,"5I":144,"125I":145,"11C":146,"H3":147,"\\N+1":148,"-\\":149,"/C@@":150,"S@+1":151,"As":152,"/Cl":153,"11CH3":154,"=Se":155,"S@@+1":156,"N@+1":157,"14":158,"-\\Ring2":159,"14C":160,"\\F":161,"/C@":162,"Te":163,"H2":164,"H1-1":165,"=O+1":166,"N@@+1":167,"C+1":168,"=S+1":169,"Zn":170,"/P":171,"a+2":172,"/I":173,"OH1-1":174,"Ca+2":175,"\\Br":176,"Mg":177,"Zn+2":178,"Al":179,"/F":180,"Mg+2":181,"123":182,"123I":183,"13":184,"I+1":185,"/O-1":186,"-\\Ring1":187,"BH2":188,"BH2-1":189,"\\I":190,"/NH1":191,"O+1":192,"131":193,"131I":194,"=14C":195,"/S+1":196,"=Ring3":197,"\\C@@":198,"H2+1":199,"\\C@":200,"Ag":201,"=As":202,"=Se+1":203,"NH2+1":204,"SeH1":205,"-/Ring1":206,"=Te":207,"Al+3":208,"NaH1":209,"=Te+1":210,"NH1+1":211,"Ag+1":212,"H1+1":213,"NH1-1":214,"\\P":215,"14CH2":216,"13C":217,"14CH1":218,"=11C":219,"S@@":220,"=P@@":221,"SiH2":222,"H3-1":223,"14CH3":224,"BH3-1":225,"S@":226,"=14CH1":227,"=PH1":228,"=P@":229,"=NH1+1":230,"\\S+1":231,"124":232,"CH1-1":233,"Sr":234,"=Si":235,"124I":236,"Sr+2":237,"#C-1":238,"/C-1":239,"N@":240,"/N-1":241,"13CH1":242,"/B":243,"19":244,"Ba+2":245,"H4":246,"SH1+1":247,"Se+1":248,"19F":249,"/125I":250,"P@+1":251,"Rb":252,"Cl+1":253,"SiH4":254,"Rb+1":255,"=Branch3":256,"N@@":257,"As+1":258,"/Si":259,"BH1-1":260,"SH1":261,"/123I":262,"32":263,"=Mg":264,"H+1":265,"\\B":266,"SiH1":267,"P@@+1":268,"-2":269,"15":270,"17":271,"35":272,"=13CH1":273,"Cs":274,"=NH2+1":275,"=SH1":276,"MgH2":277,"32P":278,"17F":279,"35S":280,"Cs+1":281,"#11C":282,"/131I":283,"Bi":284,"\\125I":285,"=S@@":286,"\\S-1":287,"6Br":288,"7I":289,"76Br":290,"=B":291,"eH1":292,"\\N-1":293,"18O":294,"127I":295,"11CH2":296,"14C@@H1":297,"TeH2":298,"15NH1":299,"Bi+3":300,"/P+1":301,"/13C":302,"/13CH1":303,"0B":304,"10B":305,"=Al":306,"=18O":307,"BH0":308,"F-1":309,"NH3":310,"S-2":311,"Br+2":312,"Cl+2":313,"\\Si":314,"/S-1":315,"=PH2":316,"14C@H1":317,"NH3+1":318,"#14C":319,"#O+1":320,"-3":321,"22":322,"4H":323,"5Se":324,"5Sr+2":325,"75Se":326,"85Sr+2":327,"=B-1":328,"=13C":329,"@-1":330,"Be":331,"B@@":332,"B@-1":333,"Ca":334,"CH1":335,"I+3":336,"KH1":337,"OH1+1":338,"Ra+2":339,"SH1-1":340,"\\PH1":341,"\\123I":342,"=Ca":343,"\\CH1-1":344,"=S@":345,"\\SeH1":346,"/SeH1":347,"Se-1":348,"LiH1":349,"18F-1":350,"125IH1":351,"11CH1":352,"TeH1":353,"Zn+1":354,"Zn-2":355,"Al-3":356,"13CH3":357,"15N":358,"Be+2":359,"B@@-1":360,"#P":361,"#S":362,"-4":363,"/PH1":364,"/P@@":365,"/As":366,"/14C":367,"/14CH1":368,"2K+1":369,"2Rb+1":370,"3Se":371,"3Ra+2":372,"45":373,"47":374,"42K+1":375,"5I-1":376,"73Se":377,"89":378,"82Rb+1":379,"=32":380,"=32P":381,"CH0":382,"CH2":383,"I+2":384,"NH0":385,"NH4":386,"OH1":387,"PH2+1":388,"SH0":389,"SH2":390,"\\3H":391,"\\11CH3":392,"\\C-1":393,"\\Se":394,"Si@":395,"Si-1":396,"SiH1-1":397,"SiH3-1":398,"/Se":399,"Se-2":400,"\\NH1-1":401,"18FH1":402,"125I-1":403,"11C@@H1":404,"11C-1":405,"AsH1":406,"As-1":407,"14C@@":408,"Te-1":409,"Mg+1":410,"123I-1":411,"123Te":412,"123IH1":413,"135I":414,"131I-1":415,"Ag-4":416,"124I-1":417,"76BrH1":418,"18OH1":419,"22Na+1":420,"223Ra+2":421,"CaH2":422,"45Ca+2":423,"47Ca+2":424,"89Sr+2":425,"=32PH1":426,"NH4+1":427}
pretraining_datasets/graph_embeddings.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1e2bd1e08eff1dd34237494c60350da500213ce95b1042e5f19d2db65d6e931
3
+ size 5846661248
pretraining_datasets/kg_embeddings.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a7316c20e3fd4beb0753b83da934f1b7538fe3258284c4d4dfea5872ea9c0e3
3
+ size 1461665408
pretraining_datasets/pretraining_dataset_meta.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:884157632e2124ba90d715756259ead61fcbf840a46edecfa5b7ab1422432cd5
3
+ size 1816899178
pretraining_datasets/selformermm_kg_heterodata.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7523a29b6ce2abd6cc58f02cdeea9b7cb5c74e19c46ed108a6965bde19aac681
3
+ size 2397688779
pretraining_datasets/text_embeddings.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97ba31be9aee7e9f25fea2d57a28d3c657aeddaf42c834552a17e626e8441001
3
+ size 8769991808
processing/dmgi_model.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn.functional as F
3
+
4
+
5
+ import torch_geometric.transforms as T
6
+ from torch_geometric.nn import GCNConv
7
+
8
+
9
+ def load_heterodata(path):
10
+
11
+ data = torch.load(path, map_location=torch.device('cpu'))
12
+
13
+ print("Available edge types in the dataset:", data.edge_types)
14
+ # data['Compound'].train_mask = torch.zeros(data['Compound'].num_nodes, dtype=torch.bool)
15
+ # data['Compound'].val_mask = torch.zeros(data['Compound'].num_nodes, dtype=torch.bool)
16
+ # data['Compound'].test_mask = torch.zeros(data['Compound'].num_nodes, dtype=torch.bool)
17
+
18
+
19
+ # train_indices = np.random.choice(data['Compound'].num_nodes, int(data['Compound'].num_nodes * 0.8), replace=False)
20
+ # val_indices = np.random.choice(np.setdiff1d(np.arange(data['Compound'].num_nodes), train_indices), int(data['Compound'].num_nodes * 0.1), replace=False)
21
+ # test_indices = np.setdiff1d(np.arange(data['Compound'].num_nodes), np.concatenate([train_indices, val_indices]))
22
+
23
+ # data['Compound'].train_mask[train_indices] = 1
24
+ # data['Compound'].val_mask[val_indices] = 1
25
+ # data['Compound'].test_mask[test_indices] = 1
26
+
27
+
28
+ # print(f'Train node count: {data["Compound"].train_mask.sum()}')
29
+ # print(f'Val node count: {data["Compound"].val_mask.sum()}')
30
+ # print(f'Test node count: {data["Compound"].test_mask.sum()}')
31
+
32
+ metapaths = [
33
+ [('Compound', 'CTI', 'Protein'), ('Protein', 'rev_CTI', 'Compound')],
34
+ [('Drug', 'DTI', 'Protein'), ('Protein', 'rev_DTI', 'Drug')],
35
+ [('Protein', 'PPI', 'Protein'), ('Protein', 'rev_PPI', 'Protein')],
36
+ [('Gene', 'Orthology', 'Gene'), ('Gene', 'rev_Orthology', 'Gene')],
37
+ ]
38
+ print(metapaths)
39
+
40
+ data = T.AddMetaPaths(metapaths, drop_orig_edge_types=True)(data)
41
+ print('Available edge types in the dataset after adding metapaths:', data.edge_types)
42
+
43
+ return data
44
+
45
+ class DMGI(torch.nn.Module):
46
+ def __init__(self, num_nodes, in_channels, out_channels, num_relations):
47
+ super().__init__()
48
+ self.convs = torch.nn.ModuleList(
49
+ [GCNConv(in_channels, out_channels) for _ in range(num_relations)])
50
+ self.M = torch.nn.Bilinear(out_channels, out_channels, 1)
51
+ self.Z = torch.nn.Parameter(torch.empty(num_nodes, out_channels))
52
+ self.reset_parameters()
53
+
54
+ def reset_parameters(self):
55
+ for conv in self.convs:
56
+ conv.reset_parameters()
57
+ torch.nn.init.xavier_uniform_(self.M.weight)
58
+ self.M.bias.data.zero_()
59
+ torch.nn.init.xavier_uniform_(self.Z)
60
+
61
+ def forward(self, x, edge_indices):
62
+ pos_hs, neg_hs, summaries = [], [], []
63
+ for conv, edge_index in zip(self.convs, edge_indices):
64
+ pos_h = F.dropout(x, p=0.5, training=self.training)
65
+ pos_h = conv(pos_h, edge_index).relu()
66
+ pos_hs.append(pos_h)
67
+
68
+ neg_h = F.dropout(x, p=0.5, training=self.training)
69
+ neg_h = neg_h[torch.randperm(neg_h.size(0), device=neg_h.device)]
70
+ neg_h = conv(neg_h, edge_index).relu()
71
+ neg_hs.append(neg_h)
72
+
73
+ summaries.append(pos_h.mean(dim=0, keepdim=True))
74
+
75
+ return pos_hs, neg_hs, summaries
76
+
77
+ def loss(self, pos_hs, neg_hs, summaries):
78
+ loss = 0.
79
+ for pos_h, neg_h, s in zip(pos_hs, neg_hs, summaries):
80
+ s = s.expand_as(pos_h)
81
+ loss += -torch.log(self.M(pos_h, s).sigmoid() + 1e-15).mean()
82
+ loss += -torch.log(1 - self.M(neg_h, s).sigmoid() + 1e-15).mean()
83
+
84
+ pos_mean = torch.stack(pos_hs, dim=0).mean(dim=0)
85
+ neg_mean = torch.stack(neg_hs, dim=0).mean(dim=0)
86
+
87
+ pos_reg_loss = (self.Z - pos_mean).pow(2).sum()
88
+ neg_reg_loss = (self.Z - neg_mean).pow(2).sum()
89
+ loss += 0.001 * (pos_reg_loss - neg_reg_loss)
90
+
91
+ return loss
92
+
93
+
94
+ def load_dmgi_model(path, data):
95
+
96
+ model = DMGI(data['Compound'].num_nodes,
97
+ data['Compound'].x.size(-1),
98
+ 64,
99
+ len(data.edge_types))
100
+
101
+ model.load_state_dict(torch.load(path))
102
+
103
+ return model
processing/graph_embedding.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pandas as pd
3
+ import numpy as np
4
+ import torch
5
+ import time
6
+ from datetime import datetime
7
+ from unimol_tools import UniMolRepr
8
+
9
+ unimol_model = UniMolRepr(data_type='molecule', remove_hs=False, use_gpu=True)
10
+
11
+ def get_unimol_embeddings_batch(smiles_list, model):
12
+ try:
13
+ batch_repr = model.get_repr(smiles_list, return_atomic_reprs=True)
14
+ cls_reprs = batch_repr['cls_repr']
15
+ return np.array(cls_reprs)
16
+ except Exception as e:
17
+ print(f"Error embedding batch: {e}")
18
+ return None
19
+
20
+ def process_folder_unimol(folder_path, batch_size=2000):
21
+ """
22
+ Walk through the folder and process each CSV file ending with 'filtered'.
23
+ Embeddings are saved in the same folder with '_graph_embedding.npy' added to the filename.
24
+ """
25
+ for root, dirs, files in os.walk(folder_path):
26
+ for file in files:
27
+ if not file.endswith("filtered.csv") and not file.endswith("mock.csv"):
28
+ file_path = os.path.join(root, file)
29
+ print(f"Processing file: {file_path}")
30
+ try:
31
+ df = pd.read_csv(file_path)
32
+ column_name = 'smiles'
33
+ if column_name not in df.columns:
34
+ column_name = 'mol'
35
+
36
+ if column_name not in df.columns:
37
+ raise ValueError("'smiles' column not found in the CSV file.")
38
+
39
+ df = df.dropna(subset=[column_name])
40
+ smiles_list = df[column_name].tolist()
41
+ print(f"Found {len(smiles_list)} valid SMILES to process.")
42
+
43
+ all_embeddings = []
44
+ for i in range(0, len(smiles_list), batch_size):
45
+ batch = smiles_list[i:i+batch_size]
46
+ embeddings = get_unimol_embeddings_batch(batch, unimol_model)
47
+ if embeddings is not None:
48
+ all_embeddings.append(embeddings)
49
+ else:
50
+ print(f"Warning: Batch {i//batch_size} failed.")
51
+
52
+ if all_embeddings:
53
+ final_embeddings = np.concatenate(all_embeddings)
54
+ output_file = os.path.join(root, f"{os.path.splitext(file)[0]}_graph_embedding.npy")
55
+ np.save(output_file, final_embeddings)
56
+ print(f"Saved embeddings with shape {final_embeddings.shape} to {output_file}\n")
57
+ else:
58
+ print(f"No embeddings generated for {file_path}.")
59
+
60
+ except Exception as e:
61
+ print(f"Failed to process {file_path}: {e}\n")
62
+
63
+ folder_path = "/home/g3-bbm-project/main_folder/FineTune/finetune_data_multi/finetuning_datasets/classification" # Set your top-level folder here
64
+ print(f"Starting UniMol embedding processing at {datetime.now().strftime('%H:%M:%S')}")
65
+ start_time = time.time()
66
+
67
+ process_folder_unimol(folder_path)
68
+
69
+ total_time = time.time() - start_time
70
+ print(f"\nTotal execution time: {total_time:.2f} seconds")
71
+ print(f"Finished at {datetime.now().strftime('%H:%M:%S')}")
processing/npy_to_h5.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import h5py
3
+ import argparse
4
+ import os
5
+
6
+ """
7
+ Example usage:
8
+ python convert_npy_to_h5.py /path/to/input_file.npy /path/to/output_file.h5
9
+ """
10
+
11
+ def convert_npy_to_h5(npy_file_path, h5_output_path):
12
+ if not os.path.isfile(npy_file_path):
13
+ print(f"Error: Input file '{npy_file_path}' does not exist.")
14
+ return
15
+
16
+ data = np.load(npy_file_path)
17
+
18
+ with h5py.File(h5_output_path, 'w') as h5_file:
19
+ h5_file.create_dataset('data', data=data)
20
+ print(f"Data from '{npy_file_path}' has been successfully saved to '{h5_output_path}'.")
21
+
22
+ if __name__ == "__main__":
23
+ parser = argparse.ArgumentParser(description="Convert a .npy file to a .h5 file.")
24
+ parser.add_argument("npy_file", type=str, help="Path to the input .npy file.")
25
+ parser.add_argument("h5_file", type=str, help="Path to the output .h5 file.")
26
+
27
+ args = parser.parse_args()
28
+ convert_npy_to_h5(args.npy_file, args.h5_file)
processing/pretrain_dmgi.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import numpy as np
4
+
5
+ import torch
6
+ from torch.optim import Adam
7
+ from torch_geometric import seed_everything
8
+
9
+ from dmgi_model import load_heterodata, DMGI
10
+
11
+ from datetime import datetime
12
+
13
+ # set random seeds
14
+ seed_everything(42)
15
+ np.random.seed(42)
16
+
17
+ torch.set_num_threads(5)
18
+
19
+ import argparse
20
+
21
+ parser = argparse.ArgumentParser()
22
+ parser.add_argument('--data', default='/home/g3bbmproject/main_folder/KG/kg.pt/selformerv2_kg_heterodata_1224.pt')
23
+
24
+ args = parser.parse_args()
25
+
26
+
27
+ def train(data, model, optimizer):
28
+ model.train()
29
+ optimizer.zero_grad()
30
+ x = data['Compound'].x
31
+ edge_indices = data.edge_index_dict.values()
32
+ pos_hs, neg_hs, summaries = model(x, edge_indices)
33
+ loss = model.loss(pos_hs, neg_hs, summaries)
34
+ loss.backward()
35
+ optimizer.step()
36
+ return float(loss)
37
+
38
+
39
+ def pretrain_dmgi(hps, data, device):
40
+ model = DMGI(data['Compound'].num_nodes,
41
+ data['Compound'].x.size(-1),
42
+ hps[0],
43
+ len(data.edge_types))
44
+
45
+ data, model = data.to(device), model.to(device)
46
+ print(data.node_types)
47
+ # Print available edge types in the dataset
48
+ print("Available edge types in the dataset:", data.edge_types)
49
+
50
+
51
+ optimizer = Adam(model.parameters(), lr=hps[1], weight_decay=hps[2])
52
+
53
+ for epoch in range(1, 101):
54
+ epoch_start = datetime.now()
55
+ train_loss = train(data, model, optimizer)
56
+
57
+ if epoch == 1 or epoch % 25 == 0:
58
+ print(f'\tEpoch: {epoch:03d}, Loss: {train_loss:.4f}, Time: {datetime.now() - epoch_start}')
59
+
60
+ return train_loss, model
61
+
62
+
63
+ if __name__ == '__main__':
64
+ data = load_heterodata(args.data)
65
+ print(f'Loaded data: {args.data}')
66
+ device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
67
+ print(f'\nUsing device: {device}\n')
68
+
69
+ print('Starting training...\n')
70
+ train_start = datetime.now()
71
+ loss, model = pretrain_dmgi([32, 0.01, 0.001], data, device)
72
+ print(f'\nDone. Total training time: {datetime.now() - train_start}')
73
+
74
+ # save model
75
+ os.makedirs('models', exist_ok=True)
76
+ torch.save(model.state_dict(), 'data/pretrained_models/kg_dmgi_model.pt')
77
+ print(f'Model saved: data/pretrained_models/kg_dmgi_model.pt\n')
processing/selfies_embedding.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pandas as pd
3
+ from pandarallel import pandarallel
4
+ from transformers import RobertaTokenizer, RobertaModel, RobertaConfig
5
+ import torch
6
+
7
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
8
+ os.environ["WANDB_DISABLED"] = "true"
9
+ os.environ["CUDA_VISIBLE_DEVICES"] = "0"
10
+
11
+
12
+ SELFIES_DATASET_PATH = "data/temp_selfies.csv" # path to the SELFIES dataset
13
+ MODEL_FILE_PATH = "data/pretrained_models/SELFormer" # path to the pre-trained SELFormer model
14
+ OUTPUT_EMBEDDINGS_PATH = "data/embeddings.csv" # path to save the generated embeddings
15
+
16
+
17
+ df = pd.read_csv(SELFIES_DATASET_PATH) # load the dataset
18
+ print(f"Loaded dataset with {len(df)} molecules.")
19
+
20
+
21
+ config = RobertaConfig.from_pretrained(MODEL_FILE_PATH) # load the pre-trained model and tokenizer
22
+ config.output_hidden_states = True
23
+ tokenizer = RobertaTokenizer.from_pretrained("data/RobertaFastTokenizer")
24
+ model = RobertaModel.from_pretrained(MODEL_FILE_PATH, config=config)
25
+
26
+
27
+ def get_sequence_embeddings(selfies):
28
+ token = torch.tensor([tokenizer.encode(selfies, add_special_tokens=True, max_length=512, padding=True, truncation=True)]) # tokenize the SELFIES string
29
+ output = model(token) # forward pass through the model
30
+ sequence_out = output[0] # extract the sequence output and compute the mean pooling
31
+ return torch.mean(sequence_out[0], dim=0).tolist()
32
+
33
+ print("Generating embeddings...")
34
+ pandarallel.initialize(nb_workers=5, progress_bar=True)
35
+ df["sequence_embeddings"] = df.selfies.parallel_apply(get_sequence_embeddings)
36
+
37
+ df.drop(columns=["selfies"], inplace=True)
38
+ df.to_csv(OUTPUT_EMBEDDINGS_PATH, index=False)
39
+ print(f"Embeddings saved to {OUTPUT_EMBEDDINGS_PATH}")
processing/smiles_to_selfies.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ from pandarallel import pandarallel
3
+ import selfies as sf
4
+
5
+ def to_selfies(smiles):
6
+ """
7
+ Converts SMILES to SELFIES representation.
8
+ If an error occurs, returns the original SMILES unchanged.
9
+ """
10
+ try:
11
+ return sf.encoder(smiles)
12
+ except sf.EncoderError:
13
+ print(f"EncoderError for SMILES: {smiles}")
14
+ return smiles
15
+
16
+ def prepare_data(path, save_to):
17
+ """
18
+ Reads a dataset with SMILES, converts SMILES to SELFIES, and saves the result.
19
+ """
20
+ chembl_df = pd.read_csv(path, sep="\t")
21
+ chembl_df["selfies"] = chembl_df["canonical_smiles"] # Copy the SMILES column
22
+
23
+ pandarallel.initialize()
24
+ chembl_df["selfies"] = chembl_df["selfies"].parallel_apply(to_selfies)
25
+ chembl_df.drop(chembl_df[chembl_df["canonical_smiles"] == chembl_df["selfies"]].index, inplace=True)
26
+ chembl_df.drop(columns=["canonical_smiles"], inplace=True)
27
+ chembl_df.to_csv(save_to, index=False)
28
+
29
+ input_csv_path = "/home/g3bbmproject/main_folder/KG/kg.pt/our_10k_matched_data_with_embeddings.csv"
30
+ output_csv_path = "data_with_selfies.csv"
31
+ temp_smiles_path = "temp_smiles.csv"
32
+ temp_selfies_path = "temp_selfies.csv"
33
+
34
+ data = pd.read_csv(input_csv_path)
35
+
36
+ # Save the SMILES column to a temporary file for conversion
37
+ data[['smiles']].rename(columns={"smiles": "canonical_smiles"}).to_csv(temp_smiles_path, index=False, sep="\t")
38
+
39
+ # Convert SMILES to SELFIES using the prepare_data function
40
+ prepare_data(path=temp_smiles_path, save_to=temp_selfies_path)
41
+
42
+ # Load the resulting SELFIES data
43
+ selfies_data = pd.read_csv(temp_selfies_path)
44
+
45
+ # Add the SELFIES column back to the original data
46
+ data['selfies'] = selfies_data['selfies'] # Assumes the converted file has a 'selfies' column
47
+
48
+ # Save the updated data to a new CSV file
49
+ data.to_csv(output_csv_path, index=False)
50
+
51
+ print(f'Total length of data: {len(data)}')
52
+ print(f"Updated dataset with SELFIES saved to: {output_csv_path}")
processing/text_embedding.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pandas as pd
3
+ import torch
4
+ import numpy as np
5
+ from transformers import AutoTokenizer, AutoModel
6
+ from tqdm import tqdm
7
+
8
+ # Check for GPU availability
9
+ device = torch.device("cuda:2" if torch.cuda.is_available() else "cpu")
10
+ print(f"Using device: {device}")
11
+
12
+ # Load SciBERT tokenizer and model
13
+ scibert_tokenizer = AutoTokenizer.from_pretrained("allenai/scibert_scivocab_uncased")
14
+ scibert_model = AutoModel.from_pretrained("allenai/scibert_scivocab_uncased").to(device)
15
+ scibert_model.eval() # Set the model to evaluation mode
16
+
17
+ # Function to generate text embeddings for a single text
18
+ def get_text_embeddings(text, tokenizer, model, device):
19
+ if isinstance(text, str) and text.strip() != "":
20
+ tokens = tokenizer.encode(
21
+ text,
22
+ add_special_tokens=True,
23
+ max_length=512,
24
+ padding=True,
25
+ truncation=True,
26
+ return_tensors="pt"
27
+ ).to(device)
28
+ with torch.no_grad():
29
+ output = model(tokens)
30
+ text_out = output[0][0].mean(dim=0)
31
+ else:
32
+ text_out = torch.zeros(768).to(device)
33
+ return text_out.cpu().numpy()
34
+
35
+ # New function: Process all CSV files ending with 'filtered' in a folder and its subfolders
36
+ def process_folder(folder_path):
37
+ """
38
+ Walk through the folder and process each CSV file ending with 'filtered'.
39
+ Embeddings are saved in the same folder with '_text_embedding.npy' added to the base filename.
40
+ """
41
+ for root, dirs, files in os.walk(folder_path):
42
+ for file in files:
43
+ if file.endswith("filtered.csv"):
44
+ filtered_path = os.path.join(root, file)
45
+ base_filename = file.replace("_filtered", "")
46
+ full_path = os.path.join(root, base_filename)
47
+
48
+ if not os.path.exists(full_path):
49
+ print(f"Corresponding full CSV file not found for {filtered_path}, skipping.\n")
50
+ continue
51
+
52
+ print(f"Processing file: {filtered_path}")
53
+
54
+ try:
55
+ # Read both full and filtered CSV files
56
+ df_full = pd.read_csv(full_path)
57
+ df_filtered = pd.read_csv(filtered_path)
58
+
59
+ '''if ("smiles" not in df_full.columns or "smiles" not in df_filtered.column):
60
+ raise ValueError("'smiles' column not found in one of the CSV files.")
61
+
62
+ if "Description" not in df_filtered.columns:
63
+ raise ValueError("'Description' column not found in filtered CSV file.")'''
64
+
65
+ # Map smiles to description
66
+
67
+ column_name = 'smiles'
68
+ if base_filename == 'bace':
69
+ column_name = 'mol'
70
+
71
+ smiles_to_description = dict(zip(df_filtered["smiles"], df_filtered["Description"]))
72
+
73
+ # Prepare Description column for full df (may include Nones)
74
+ df_full["Description"] = df_full[column_name].map(smiles_to_description)
75
+
76
+ # Now generate embeddings
77
+ tqdm.pandas(desc=f"Embedding {base_filename}")
78
+ embeddings = df_full["Description"].progress_apply(
79
+ lambda text: get_text_embeddings(text, scibert_tokenizer, scibert_model, device)
80
+ ).tolist()
81
+
82
+ embeddings_array = np.array(embeddings)
83
+
84
+ output_file = os.path.join(root, f"{os.path.splitext(base_filename)[0]}_text_embedding.npy")
85
+ np.save(output_file, embeddings_array)
86
+
87
+ print(f"Saved embeddings to {output_file}\n")
88
+
89
+ except Exception as e:
90
+ print(f"Failed to process {filtered_path}: {e}\n")
91
+
92
+ # Example usage
93
+ folder_path = "/home/g3-bbm-project/main_folder/FineTune/finetune_data_multi/finetuning_datasets/classification" # Set your top-level folder here
94
+ print(f"Starting to process folder: {folder_path}")
95
+ process_folder(folder_path)
96
+ print("Folder processing complete.")