jglaser commited on
Commit
d802ce7
1 Parent(s): acf20bc

add D-COID data set

Browse files
Files changed (5) hide show
  1. .gitattributes +1 -0
  2. data/dcoid.parquet +3 -0
  3. dcoid.py +85 -0
  4. dcoid.slurm +10 -0
  5. dcoid_combine.py +12 -0
.gitattributes CHANGED
@@ -815,3 +815,4 @@ data/dude/part.638.parquet filter=lfs diff=lfs merge=lfs -text
815
  data/dude/part.713.parquet filter=lfs diff=lfs merge=lfs -text
816
  data/dude/part.82.parquet filter=lfs diff=lfs merge=lfs -text
817
  data/dude_predict.parquet filter=lfs diff=lfs merge=lfs -text
 
 
815
  data/dude/part.713.parquet filter=lfs diff=lfs merge=lfs -text
816
  data/dude/part.82.parquet filter=lfs diff=lfs merge=lfs -text
817
  data/dude_predict.parquet filter=lfs diff=lfs merge=lfs -text
818
+ data/dcoid.parquet filter=lfs diff=lfs merge=lfs -text
data/dcoid.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57286fa1878faeb5954050d2c557042e806dd2ad0bf1731a57044d0fff3af785
3
+ size 597990
dcoid.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from mpi4py import MPI
2
+ from mpi4py.futures import MPICommExecutor
3
+
4
+ from Bio.PDB import PDBParser, PDBIO, Select, PPBuilder
5
+ import warnings
6
+
7
+ import tempfile
8
+ import os
9
+ import sys
10
+ from rdkit import Chem
11
+
12
+ import pandas as pd
13
+
14
+ def is_het(residue):
15
+ res = residue.id[0]
16
+ return res != " " and res != "W"
17
+
18
+
19
+ class ResidueSelect(Select):
20
+ def __init__(self, het):
21
+ self.het = het
22
+
23
+ def accept_residue(self, residue):
24
+ """ Recognition of heteroatoms - Remove water molecules """
25
+ return (self.het and is_het(residue) or not self.het and not is_het(residue))
26
+
27
+ def get_complex(fn):
28
+ try:
29
+ parser = PDBParser()
30
+ io = PDBIO()
31
+ structure = parser.get_structure('complex',fn)
32
+ io.set_structure(structure)
33
+
34
+ with tempfile.NamedTemporaryFile(mode='w',delete=False) as f:
35
+ name_receptor = f.name
36
+
37
+ with tempfile.NamedTemporaryFile(mode='w',delete=False) as f:
38
+ name_ligand = f.name
39
+
40
+ io.save(name_receptor,ResidueSelect(het=False))
41
+ io.save(name_ligand,ResidueSelect(het=True))
42
+
43
+ parser = PDBParser()
44
+ receptor = parser.get_structure('protein',name_receptor)
45
+ ppb = PPBuilder()
46
+ seq = []
47
+ for pp in ppb.build_peptides(structure):
48
+ seq.append(str(pp.get_sequence()))
49
+ seq = ''.join(seq)
50
+
51
+ mol = Chem.MolFromPDBFile(name_ligand)
52
+ smiles = Chem.MolToSmiles(mol)
53
+
54
+ os.unlink(name_ligand)
55
+ os.unlink(name_receptor)
56
+
57
+ return seq, smiles
58
+
59
+ except Exception as e:
60
+ print(e)
61
+ pass
62
+
63
+
64
+ if __name__ == '__main__':
65
+ import glob
66
+
67
+ filenames = glob.glob(sys.argv[2])
68
+ comm = MPI.COMM_WORLD
69
+ with MPICommExecutor(comm, root=0) as executor:
70
+ if executor is not None:
71
+ result = executor.map(get_complex, filenames)
72
+
73
+ names = []
74
+ all_seq = []
75
+ all_smiles = []
76
+ for n,r in zip(filenames,result):
77
+ try:
78
+ all_seq.append(r[0])
79
+ all_smiles.append(r[1])
80
+ names.append(os.path.basename(n))
81
+ except:
82
+ pass
83
+ df = pd.DataFrame({'name': names, 'seq': all_seq, 'smiles': all_smiles})
84
+ df.to_parquet(sys.argv[1])
85
+
dcoid.slurm ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH -J preprocess_dcoid
3
+ #SBATCH -p batch
4
+ #SBATCH -A STF006
5
+ #SBATCH -t 3:00:00
6
+ #SBATCH -N 20
7
+ ##SBATCH --ntasks-per-node=16
8
+
9
+ srun python dcoid.py data/dcoid_actives.parquet "/gpfs/alpine/world-shared/bip214/binding_affinity/dcoid/actives/*pdb"
10
+ srun python dcoid.py data/dcoid_decoys.parquet "/gpfs/alpine/world-shared/bip214/binding_affinity/dcoid/decoys/*pdb"
dcoid_combine.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+
3
+ if __name__ == '__main__':
4
+ import glob
5
+
6
+ df_actives = pd.read_parquet('data/dcoid_actives.parquet')
7
+ df_decoys = pd.read_parquet('data/dcoid_decoys.parquet')
8
+
9
+ df_actives['active'] = pd.Series([True]*len(df_actives))
10
+ df_decoys['active'] = pd.Series([False]*len(df_decoys))
11
+
12
+ pd.concat([df_actives,df_decoys]).to_parquet('data/dcoid.parquet')