text
stringlengths 0
1.05M
| meta
dict |
---|---|
"""unit testing code for the BuildComposite functionality
"""
from rdkit import RDConfig
import unittest,os
from rdkit.ML import BuildComposite
from rdkit.ML import ScreenComposite
from rdkit.Dbase.DbConnection import DbConnect
import cPickle as pickle
def feq(a,b,tol=1e-4):
if abs(a-b)>tol: return 0
else: return 1
class TestCase(unittest.TestCase):
def setUp(self):
#print '\n%s: '%self.shortDescription(),
self.baseDir = os.path.join(RDConfig.RDCodeDir,'ML','test_data')
self.dbName = RDConfig.RDTestDatabase
self.details = BuildComposite.SetDefaults()
self.details.dbName = self.dbName
self.details.dbUser = RDConfig.defaultDBUser
self.details.dbPassword = RDConfig.defaultDBPassword
def _init(self,refCompos,copyBounds=0):
BuildComposite._verbose=0
conn = DbConnect(self.details.dbName,self.details.tableName)
cols = [x.upper() for x in conn.GetColumnNames()]
cDescs = [x.upper() for x in refCompos.GetDescriptorNames()]
assert cols==cDescs,'bad descriptor names in table: %s != %s'%(cols,cDescs)
self.details.nModels = 10
self.details.lockRandom = 1
self.details.randomSeed = refCompos._randomSeed
self.details.splitFrac = refCompos._splitFrac
if self.details.splitFrac:
self.details.splitRun = 1
else:
self.details.splitRun = 0
if not copyBounds:
self.details.qBounds = [0]*len(cols)
else:
self.details.qBounds = refCompos.GetQuantBounds()[0]
def compare(self,compos,refCompos):
assert len(compos)==len(refCompos),'%d != %d'%(len(compos),len(refCompos))
cs = []
rcs = []
for i in range(len(compos)):
cs.append(compos[i])
rcs.append(refCompos[i])
def sortHelp(x,y):
if x[2]==y[2]:
return cmp(x[1],y[1])
else:
return cmp(x[2],y[2])
cs.sort(sortHelp)
rcs.sort(sortHelp)
for i in range(len(compos)):
tree,count,err = cs[i]
refTree,refCount,refErr = rcs[i]
assert count==refCount, str((count,refCount))
assert feq(err,refErr),'%f != %f'%(err,refErr)
def test1(self):
""" basics """
self.details.tableName = 'ferro_quant'
refComposName = 'ferromag_quant_10.pkl'
refCompos = pickle.load(open(os.path.join(self.baseDir,refComposName),
'rb'))
# first make sure the data are intact
self._init(refCompos)
compos = BuildComposite.RunIt(self.details,saveIt=0)
self.compare(compos,refCompos)
def test2(self):
""" depth limit """
self.details.tableName = 'ferro_quant'
refComposName = 'ferromag_quant_10_3.pkl'
refCompos = pickle.load(open(os.path.join(self.baseDir,refComposName),
'rb'))
# first make sure the data are intact
self._init(refCompos)
self.details.limitDepth = 3
compos = BuildComposite.RunIt(self.details,saveIt=0)
self.compare(compos,refCompos)
def test3(self):
""" depth limit + less greedy """
self.details.tableName = 'ferro_quant'
refComposName = 'ferromag_quant_10_3_lessgreedy.pkl'
refCompos = pickle.load(open(os.path.join(self.baseDir,refComposName),
'rb'))
# first make sure the data are intact
self._init(refCompos)
self.details.limitDepth = 3
self.details.lessGreedy = 1
compos = BuildComposite.RunIt(self.details,saveIt=0)
self.compare(compos,refCompos)
def test4(self):
""" more trees """
self.details.tableName = 'ferro_quant'
refComposName = 'ferromag_quant_50_3.pkl'
refCompos = pickle.load(open(os.path.join(self.baseDir,refComposName),
'rb'))
# first make sure the data are intact
self._init(refCompos)
self.details.limitDepth = 3
self.details.nModels = 50
compos = BuildComposite.RunIt(self.details,saveIt=0)
self.compare(compos,refCompos)
def test5(self):
""" auto bounds """
self.details.tableName = 'ferro_noquant'
refComposName = 'ferromag_auto_10_3.pkl'
refCompos = pickle.load(open(os.path.join(self.baseDir,refComposName),
'rb'))
# first make sure the data are intact
self._init(refCompos,copyBounds=1)
self.details.limitDepth = 3
self.details.nModels = 10
compos = BuildComposite.RunIt(self.details,saveIt=0)
self.compare(compos,refCompos)
def test6(self):
""" auto bounds with a real valued activity"""
self.details.tableName = 'ferro_noquant_realact'
refComposName = 'ferromag_auto_10_3.pkl'
refCompos = pickle.load(open(os.path.join(self.baseDir,refComposName),
'rb'))
# first make sure the data are intact
self._init(refCompos,copyBounds=1)
self.details.limitDepth = 3
self.details.nModels = 10
self.details.activityBounds=[0.5]
compos = BuildComposite.RunIt(self.details,saveIt=0)
self.compare(compos,refCompos)
def test7(self):
""" Test composite of naive bayes"""
self.details.tableName = 'ferro_noquant'
refComposName = 'ferromag_NaiveBayes.pkl'
pklFile = open(os.path.join(self.baseDir,refComposName), 'rb')
refCompos = pickle.load(pklFile)
self._init(refCompos,copyBounds=1)
self.details.useTrees = 0
self.details.useNaiveBayes = 1
self.details.mEstimateVal = 20.0
self.details.qBounds = [0] + [2]*6 + [0]
compos = BuildComposite.RunIt(self.details, saveIt= 0)
self.compare(compos,refCompos)
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "rdkit/rdkit-orig",
"path": "rdkit/ML/UnitTestBuildComposite.py",
"copies": "2",
"size": "5858",
"license": "bsd-3-clause",
"hash": -5591691407116287000,
"line_mean": 30.1595744681,
"line_max": 79,
"alpha_frac": 0.6536360533,
"autogenerated": false,
"ratio": 3.1854268624252313,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48390629157252313,
"avg_score": null,
"num_lines": null
} |
"""unit testing code for the BuildComposite functionality
"""
import io
import os
import unittest
from rdkit import RDConfig
from rdkit.Dbase.DbConnection import DbConnect
from rdkit.ML import BuildComposite
from rdkit.six.moves import cPickle as pickle
class TestCase(unittest.TestCase):
def setUp(self):
self.baseDir = os.path.join(RDConfig.RDCodeDir, 'ML', 'test_data')
self.dbName = RDConfig.RDTestDatabase
self.details = BuildComposite.SetDefaults()
self.details.dbName = self.dbName
self.details.dbUser = RDConfig.defaultDBUser
self.details.dbPassword = RDConfig.defaultDBPassword
def _init(self, refCompos, copyBounds=0):
BuildComposite._verbose = 0
conn = DbConnect(self.details.dbName, self.details.tableName)
cols = [x.upper() for x in conn.GetColumnNames()]
cDescs = [x.upper() for x in refCompos.GetDescriptorNames()]
self.assertEqual(cols, cDescs)
self.details.nModels = 10
self.details.lockRandom = 1
self.details.randomSeed = refCompos._randomSeed
self.details.splitFrac = refCompos._splitFrac
if self.details.splitFrac:
self.details.splitRun = 1
else:
self.details.splitRun = 0
if not copyBounds:
self.details.qBounds = [0] * len(cols)
else:
self.details.qBounds = refCompos.GetQuantBounds()[0]
def compare(self, compos, refCompos):
self.assertEqual(len(compos), len(refCompos))
cs = []
rcs = []
for i in range(len(compos)):
cs.append(compos[i])
rcs.append(refCompos[i])
cs.sort(key=lambda x: (x[2], x[2]))
rcs.sort(key=lambda x: (x[2], x[2]))
for i in range(len(compos)):
_, count, err = cs[i]
_, refCount, refErr = rcs[i]
self.assertEqual(count, refCount)
self.assertAlmostEqual(err, refErr, 4)
def test1_basics(self):
# """ basics """
self.details.tableName = 'ferro_quant'
refComposName = 'ferromag_quant_10.pkl'
with open(os.path.join(self.baseDir, refComposName), 'r') as pklTF:
buf = pklTF.read().replace('\r\n', '\n').encode('utf-8')
pklTF.close()
with io.BytesIO(buf) as pklF:
refCompos = pickle.load(pklF)
# first make sure the data are intact
self._init(refCompos)
compos = BuildComposite.RunIt(self.details, saveIt=0)
# pickle.dump(compos,open(os.path.join(self.baseDir,refComposName), 'wb'))
# with open(os.path.join(self.baseDir,refComposName), 'rb') as pklF:
# refCompos = pickle.load(pklF)
self.compare(compos, refCompos)
def test2_depth_limit(self):
# """ depth limit """
self.details.tableName = 'ferro_quant'
refComposName = 'ferromag_quant_10_3.pkl'
with open(os.path.join(self.baseDir, refComposName), 'r') as pklTF:
buf = pklTF.read().replace('\r\n', '\n').encode('utf-8')
pklTF.close()
with io.BytesIO(buf) as pklF:
refCompos = pickle.load(pklF)
# first make sure the data are intact
self._init(refCompos)
self.details.limitDepth = 3
compos = BuildComposite.RunIt(self.details, saveIt=0)
self.compare(compos, refCompos)
def test3_depth_limit_less_greedy(self):
# """ depth limit + less greedy """
self.details.tableName = 'ferro_quant'
refComposName = 'ferromag_quant_10_3_lessgreedy.pkl'
with open(os.path.join(self.baseDir, refComposName), 'r') as pklTF:
buf = pklTF.read().replace('\r\n', '\n').encode('utf-8')
pklTF.close()
with io.BytesIO(buf) as pklF:
refCompos = pickle.load(pklF)
# first make sure the data are intact
self._init(refCompos)
self.details.limitDepth = 3
self.details.lessGreedy = 1
compos = BuildComposite.RunIt(self.details, saveIt=0)
self.compare(compos, refCompos)
def test4_more_trees(self):
# """ more trees """
self.details.tableName = 'ferro_quant'
refComposName = 'ferromag_quant_50_3.pkl'
with open(os.path.join(self.baseDir, refComposName), 'r') as pklTF:
buf = pklTF.read().replace('\r\n', '\n').encode('utf-8')
pklTF.close()
with io.BytesIO(buf) as pklF:
refCompos = pickle.load(pklF)
# first make sure the data are intact
self._init(refCompos)
self.details.limitDepth = 3
self.details.nModels = 50
compos = BuildComposite.RunIt(self.details, saveIt=0)
self.compare(compos, refCompos)
def test5_auto_bounds(self):
# """ auto bounds """
self.details.tableName = 'ferro_noquant'
refComposName = 'ferromag_auto_10_3.pkl'
with open(os.path.join(self.baseDir, refComposName), 'r') as pklTF:
buf = pklTF.read().replace('\r\n', '\n').encode('utf-8')
pklTF.close()
with io.BytesIO(buf) as pklF:
refCompos = pickle.load(pklF)
# first make sure the data are intact
self._init(refCompos, copyBounds=1)
self.details.limitDepth = 3
self.details.nModels = 10
compos = BuildComposite.RunIt(self.details, saveIt=0)
self.compare(compos, refCompos)
def test6_auto_bounds_real_activity(self):
# """ auto bounds with a real valued activity"""
self.details.tableName = 'ferro_noquant_realact'
refComposName = 'ferromag_auto_10_3.pkl'
with open(os.path.join(self.baseDir, refComposName), 'r') as pklTF:
buf = pklTF.read().replace('\r\n', '\n').encode('utf-8')
pklTF.close()
with io.BytesIO(buf) as pklF:
refCompos = pickle.load(pklF)
# first make sure the data are intact
self._init(refCompos, copyBounds=1)
self.details.limitDepth = 3
self.details.nModels = 10
self.details.activityBounds = [0.5]
compos = BuildComposite.RunIt(self.details, saveIt=0)
self.compare(compos, refCompos)
def test7_composite_naiveBayes(self):
# """ Test composite of naive bayes"""
self.details.tableName = 'ferro_noquant'
refComposName = 'ferromag_NaiveBayes.pkl'
with open(os.path.join(self.baseDir, refComposName), 'r') as pklTFile:
buf = pklTFile.read().replace('\r\n', '\n').encode('utf-8')
pklTFile.close()
with io.BytesIO(buf) as pklFile:
refCompos = pickle.load(pklFile)
self._init(refCompos, copyBounds=1)
self.details.useTrees = 0
self.details.useNaiveBayes = 1
self.details.mEstimateVal = 20.0
self.details.qBounds = [0] + [2] * 6 + [0]
compos = BuildComposite.RunIt(self.details, saveIt=0)
self.compare(compos, refCompos)
if __name__ == '__main__': # pragma: nocover
unittest.main()
| {
"repo_name": "rvianello/rdkit",
"path": "rdkit/ML/UnitTestBuildComposite.py",
"copies": "4",
"size": "6697",
"license": "bsd-3-clause",
"hash": -1186130716703731200,
"line_mean": 30.8904761905,
"line_max": 78,
"alpha_frac": 0.6643273107,
"autogenerated": false,
"ratio": 3.018026137899955,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5682353448599956,
"avg_score": null,
"num_lines": null
} |
"""unit testing code for the BuildComposite functionality
"""
import unittest,os
from rdkit.six.moves import cPickle as pickle
from rdkit.six import cmp
from rdkit import RDConfig
from rdkit.ML import BuildComposite
from rdkit.ML import ScreenComposite
from rdkit.Dbase.DbConnection import DbConnect
class TestCase(unittest.TestCase):
def setUp(self):
#print '\n%s: '%self.shortDescription(),
self.baseDir = os.path.join(RDConfig.RDCodeDir,'ML','test_data')
self.dbName = RDConfig.RDTestDatabase
self.details = BuildComposite.SetDefaults()
self.details.dbName = self.dbName
self.details.dbUser = RDConfig.defaultDBUser
self.details.dbPassword = RDConfig.defaultDBPassword
def _init(self,refCompos,copyBounds=0):
BuildComposite._verbose=0
conn = DbConnect(self.details.dbName,self.details.tableName)
cols = [x.upper() for x in conn.GetColumnNames()]
cDescs = [x.upper() for x in refCompos.GetDescriptorNames()]
self.assertEqual(cols,cDescs)
self.details.nModels = 10
self.details.lockRandom = 1
self.details.randomSeed = refCompos._randomSeed
self.details.splitFrac = refCompos._splitFrac
if self.details.splitFrac:
self.details.splitRun = 1
else:
self.details.splitRun = 0
if not copyBounds:
self.details.qBounds = [0]*len(cols)
else:
self.details.qBounds = refCompos.GetQuantBounds()[0]
def compare(self,compos,refCompos):
self.assertEqual(len(compos),len(refCompos))
cs = []
rcs = []
for i in range(len(compos)):
cs.append(compos[i])
rcs.append(refCompos[i])
def sortHelp(x,y):
if x[2]==y[2]:
return cmp(x[1],y[1])
else:
return cmp(x[2],y[2])
cs.sort(key=lambda x:(x[2],x[2]))
rcs.sort(key=lambda x:(x[2],x[2]))
for i in range(len(compos)):
tree,count,err = cs[i]
refTree,refCount,refErr = rcs[i]
self.assertEqual(count,refCount)
self.assertAlmostEqual(err,refErr,4)
def test1(self):
""" basics """
self.details.tableName = 'ferro_quant'
refComposName = 'ferromag_quant_10.pkl'
with open(os.path.join(self.baseDir,refComposName), 'rb') as pklF:
refCompos = pickle.load(pklF)
# first make sure the data are intact
self._init(refCompos)
compos = BuildComposite.RunIt(self.details,saveIt=0)
#pickle.dump(compos,open(os.path.join(self.baseDir,refComposName), 'wb'))
#with open(os.path.join(self.baseDir,refComposName), 'rb') as pklF:
# refCompos = pickle.load(pklF)
self.compare(compos,refCompos)
def test2(self):
""" depth limit """
self.details.tableName = 'ferro_quant'
refComposName = 'ferromag_quant_10_3.pkl'
with open(os.path.join(self.baseDir,refComposName), 'rb') as pklF:
refCompos = pickle.load(pklF)
# first make sure the data are intact
self._init(refCompos)
self.details.limitDepth = 3
compos = BuildComposite.RunIt(self.details,saveIt=0)
self.compare(compos,refCompos)
def test3(self):
""" depth limit + less greedy """
self.details.tableName = 'ferro_quant'
refComposName = 'ferromag_quant_10_3_lessgreedy.pkl'
with open(os.path.join(self.baseDir,refComposName), 'rb') as pklF:
refCompos = pickle.load(pklF)
# first make sure the data are intact
self._init(refCompos)
self.details.limitDepth = 3
self.details.lessGreedy = 1
compos = BuildComposite.RunIt(self.details,saveIt=0)
self.compare(compos,refCompos)
def test4(self):
""" more trees """
self.details.tableName = 'ferro_quant'
refComposName = 'ferromag_quant_50_3.pkl'
with open(os.path.join(self.baseDir,refComposName), 'rb') as pklF:
refCompos = pickle.load(pklF)
# first make sure the data are intact
self._init(refCompos)
self.details.limitDepth = 3
self.details.nModels = 50
compos = BuildComposite.RunIt(self.details,saveIt=0)
self.compare(compos,refCompos)
def test5(self):
""" auto bounds """
self.details.tableName = 'ferro_noquant'
refComposName = 'ferromag_auto_10_3.pkl'
with open(os.path.join(self.baseDir,refComposName), 'rb') as pklF:
refCompos = pickle.load(pklF)
# first make sure the data are intact
self._init(refCompos,copyBounds=1)
self.details.limitDepth = 3
self.details.nModels = 10
compos = BuildComposite.RunIt(self.details,saveIt=0)
self.compare(compos,refCompos)
def test6(self):
""" auto bounds with a real valued activity"""
self.details.tableName = 'ferro_noquant_realact'
refComposName = 'ferromag_auto_10_3.pkl'
with open(os.path.join(self.baseDir,refComposName), 'rb') as pklF:
refCompos = pickle.load(pklF)
# first make sure the data are intact
self._init(refCompos,copyBounds=1)
self.details.limitDepth = 3
self.details.nModels = 10
self.details.activityBounds=[0.5]
compos = BuildComposite.RunIt(self.details,saveIt=0)
self.compare(compos,refCompos)
def test7(self):
""" Test composite of naive bayes"""
self.details.tableName = 'ferro_noquant'
refComposName = 'ferromag_NaiveBayes.pkl'
with open(os.path.join(self.baseDir,refComposName), 'rb') as pklFile:
refCompos = pickle.load(pklFile)
self._init(refCompos,copyBounds=1)
self.details.useTrees = 0
self.details.useNaiveBayes = 1
self.details.mEstimateVal = 20.0
self.details.qBounds = [0] + [2]*6 + [0]
compos = BuildComposite.RunIt(self.details, saveIt= 0)
self.compare(compos,refCompos)
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "AlexanderSavelyev/rdkit",
"path": "rdkit/ML/UnitTestBuildComposite.py",
"copies": "1",
"size": "5925",
"license": "bsd-3-clause",
"hash": -6177026492547434000,
"line_mean": 29.859375,
"line_max": 77,
"alpha_frac": 0.6766244726,
"autogenerated": false,
"ratio": 3.074727555786196,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9115504357516988,
"avg_score": 0.027169534173841756,
"num_lines": 192
} |
"""unit testing code for the ScreenComposite functionality
"""
from rdkit import RDConfig
import unittest,os
from rdkit.ML import BuildComposite
from rdkit.ML import ScreenComposite
from rdkit.six.moves import cPickle as pickle
def feq(a,b,tol=1e-4):
if abs(a-b)>tol: return 0
else: return 1
class TestCase(unittest.TestCase):
def setUp(self):
#print '\n%s: '%self.shortDescription(),
self.baseDir = os.path.join(RDConfig.RDCodeDir,'ML','test_data')
self.dbName = RDConfig.RDTestDatabase
self.details = ScreenComposite.SetDefaults()
self.details.dbName = self.dbName
self.details.dbUser = RDConfig.defaultDBUser
self.details.dbPassword = RDConfig.defaultDBPassword
def test1(self):
""" basics """
self.details.tableName = 'ferro_quant'
with open(os.path.join(self.baseDir,'ferromag_quant_10.pkl'),'rb') as pklF:
compos = pickle.load(pklF)
tgt = 5
self.assertEqual(len(compos),tgt)
nGood,misCount,nSkipped,avgGood,avgBad,avgSkip,tbl = ScreenComposite.ScreenFromDetails(compos,self.details)
self.assertEqual(nGood,93)
self.assertEqual(misCount,2)
self.assertEqual(nSkipped,0)
self.assertAlmostEqual(avgGood,.9871,4)
self.assertAlmostEqual(avgBad,.8000,4)
self.assertEqual(tbl[0,0] , 54)
self.assertEqual(tbl[1,1] , 39)
self.assertEqual(tbl[0,1] , 2)
self.assertEqual(tbl[1,0] , 0)
def test2(self):
""" include holdout data only """
self.details.tableName = 'ferro_quant'
self.details.doHoldout=1
self.details.doTraining=0
with open(os.path.join(self.baseDir,'ferromag_quant_10.pkl'),'rb') as pklF:
compos = pickle.load(pklF)
tgt = 5
self.assertEqual(len(compos),tgt)
nGood,misCount,nSkipped,avgGood,avgBad,avgSkip,tbl = ScreenComposite.ScreenFromDetails(compos,self.details)
self.assertEqual(nGood,28)
self.assertEqual(misCount,1)
self.assertEqual(nSkipped,0)
self.assertAlmostEqual(avgGood,.9964,4)
self.assertAlmostEqual(avgBad,1.000,4)
self.assertEqual(tbl[0,0] , 16)
self.assertEqual(tbl[1,1] , 12)
self.assertEqual(tbl[0,1] , 1)
self.assertEqual(tbl[1,0] , 0)
def test3(self):
""" include training data only """
self.details.tableName = 'ferro_quant'
self.details.doHoldout=0
self.details.doTraining=1
with open(os.path.join(self.baseDir,'ferromag_quant_10.pkl'),'rb') as pklF:
compos = pickle.load(pklF)
tgt = 5
self.assertEqual(len(compos),tgt,'bad composite loaded: %d != %d'%(len(compos),tgt))
nGood,misCount,nSkipped,avgGood,avgBad,avgSkip,tbl = ScreenComposite.ScreenFromDetails(compos,self.details)
self.assertEqual(nGood,65)
self.assertEqual(misCount,1)
self.assertEqual(nSkipped,0)
self.assertAlmostEqual(avgGood,.98307,4)
self.assertAlmostEqual(avgBad,0.600,4)
self.assertEqual(tbl[0,0] , 38,tbl)
self.assertEqual(tbl[1,1] , 27)
self.assertEqual(tbl[0,1] , 1)
self.assertEqual(tbl[1,0] , 0)
def test4(self):
""" include thresholding """
self.details.tableName = 'ferro_quant'
self.details.threshold = 0.80
self.details.doHoldout=0
self.details.doTraining=0
with open(os.path.join(self.baseDir,'ferromag_quant_10.pkl'),'rb') as pklF:
compos = pickle.load(pklF)
tgt = 5
self.assertEqual(len(compos),tgt)
nGood,misCount,nSkipped,avgGood,avgBad,avgSkip,tbl = ScreenComposite.ScreenFromDetails(compos,self.details)
self.assertEqual(nGood,91)
self.assertEqual(misCount,1)
self.assertEqual(nSkipped,3)
self.assertAlmostEqual(avgGood,0.9956,4)
self.assertAlmostEqual(avgBad,1.000,4)
self.assertAlmostEqual(avgSkip,0.6000,4)
self.assertEqual(tbl[0,0] , 54)
self.assertEqual(tbl[1,1] , 37)
self.assertEqual(tbl[0,1] , 1)
self.assertEqual(tbl[1,0] , 0)
def test5(self):
""" basics """
self.details.tableName = 'ferro_noquant'
with open(os.path.join(self.baseDir,'ferromag_auto_10_3.pkl'),'rb') as pklF:
compos = pickle.load(pklF)
tgt = 10
self.assertEqual(len(compos),tgt)
tpl = ScreenComposite.ScreenFromDetails(compos,self.details)
nGood,misCount,nSkipped,avgGood,avgBad,avgSkip,tbl = tpl
self.assertEqual(nGood,95)
self.assertEqual(misCount,8)
self.assertEqual(nSkipped,0)
self.assertAlmostEqual(avgGood,.9684,4)
self.assertAlmostEqual(avgBad,.8375,4)
self.assertEqual(tbl[0,0] , 50)
self.assertEqual(tbl[1,1] , 45)
self.assertEqual(tbl[0,1] , 5)
self.assertEqual(tbl[1,0] , 3)
def test6(self):
""" multiple models """
self.details.tableName = 'ferro_noquant'
with open(os.path.join(self.baseDir,'ferromag_auto_10_3.pkl'),'rb') as pklF:
compos = pickle.load(pklF)
tgt = 10
self.assertEqual(len(compos),tgt)
composites = [compos,compos]
tpl = ScreenComposite.ScreenFromDetails(composites,self.details)
nGood,misCount,nSkipped,avgGood,avgBad,avgSkip,tbl = tpl
self.assertEqual(nGood[0],95)
self.assertEqual(misCount[0],8)
self.assertEqual(nSkipped[0],0)
self.assertAlmostEqual(avgGood[0],.9684,4)
self.assertAlmostEqual(avgBad[0],.8375,4)
self.assertEqual(nGood[1],0)
self.assertEqual(misCount[1],0)
self.assertEqual(nSkipped[1],0)
self.assertEqual(avgGood[1],0)
self.assertEqual(avgBad[1],0)
self.assertEqual(tbl[0,0],50)
self.assertEqual(tbl[1,1],45)
self.assertEqual(tbl[0,1],5)
self.assertEqual(tbl[1,0],3)
def test7(self):
""" shuffle """
self.details.tableName = 'ferro_noquant'
with open(os.path.join(self.baseDir,'ferromag_shuffle_10_3.pkl'),'rb') as pklF:
compos = pickle.load(pklF)
tgt = 10
self.assertEqual(len(compos),tgt)
self.details.shuffleActivities=1
nGood,misCount,nSkipped,avgGood,avgBad,avgSkip,tbl = ScreenComposite.ScreenFromDetails(compos,self.details)
self.assertEqual(nGood,50)
self.assertEqual(misCount,53)
self.assertEqual(nSkipped,0)
self.assertAlmostEqual(avgGood,.7380,4)
self.assertAlmostEqual(avgBad,.7660,4)
self.assertEqual(tbl[0,0] , 30)
self.assertEqual(tbl[1,1] , 20)
self.assertEqual(tbl[0,1] , 25)
self.assertEqual(tbl[1,0] , 28)
def test8(self):
""" shuffle with segmentation """
self.details.tableName = 'ferro_noquant'
with open(os.path.join(self.baseDir,'ferromag_shuffle_10_3.pkl'),
'rb') as pklF:
compos = pickle.load(pklF)
tgt = 10
self.assertEqual(len(compos),tgt)
self.details.shuffleActivities=1
self.details.doHoldout=1
nGood,misCount,nSkipped,avgGood,avgBad,avgSkip,tbl = ScreenComposite.ScreenFromDetails(compos,self.details)
self.assertEqual(nGood,19)
self.assertEqual(misCount,12)
self.assertEqual(nSkipped,0)
self.assertAlmostEqual(avgGood,.7737,4)
self.assertAlmostEqual(avgBad,.7500,4)
self.assertEqual(tbl[0,0] , 12)
self.assertEqual(tbl[1,1] , 7)
self.assertEqual(tbl[0,1] , 6)
self.assertEqual(tbl[1,0] , 6)
def test9(self):
""" shuffle with segmentation2 """
self.details.tableName = 'ferro_noquant'
with open(os.path.join(self.baseDir,'ferromag_shuffle_10_3.pkl'),
'rb') as pklF:
compos = pickle.load(pklF)
tgt = 10
self.assertEqual(len(compos),tgt)
self.details.shuffleActivities=1
self.details.doTraining=1
nGood,misCount,nSkipped,avgGood,avgBad,avgSkip,tbl = ScreenComposite.ScreenFromDetails(compos,self.details)
self.assertEqual(nGood,31)
self.assertEqual(misCount,41)
self.assertEqual(nSkipped,0)
self.assertAlmostEqual(avgGood,.7161,4)
self.assertAlmostEqual(avgBad,.7707,4)
self.assertEqual(tbl[0,0] , 18)
self.assertEqual(tbl[1,1] , 13)
self.assertEqual(tbl[0,1] , 19)
self.assertEqual(tbl[1,0] , 22)
def test10(self):
""" filtering """
self.details.tableName = 'ferro_noquant'
with open(os.path.join(self.baseDir,'ferromag_filt_10_3.pkl'),
'rb') as pklF:
compos = pickle.load(pklF)
tgt = 10
self.assertEqual(len(compos),tgt)
self.details.filterVal=1
self.details.filterFrac=.33
nGood,misCount,nSkipped,avgGood,avgBad,avgSkip,tbl = ScreenComposite.ScreenFromDetails(compos,self.details)
self.assertEqual(nGood,90)
self.assertEqual(misCount,13)
self.assertEqual(nSkipped,0)
self.assertAlmostEqual(avgGood,.9578,4)
self.assertAlmostEqual(avgBad,.8538,4)
self.assertEqual(tbl[0,0] , 54)
self.assertEqual(tbl[1,1] , 36)
self.assertEqual(tbl[0,1] , 1)
self.assertEqual(tbl[1,0] , 12)
def test11(self):
""" filtering with segmentation """
self.details.tableName = 'ferro_noquant'
with open(os.path.join(self.baseDir,'ferromag_filt_10_3.pkl'),
'rb') as pklF:
compos = pickle.load(pklF)
tgt = 10
self.assertEqual(len(compos),tgt)
self.details.doHoldout=1
self.details.filterVal=1
self.details.filterFrac=.33
nGood,misCount,nSkipped,avgGood,avgBad,avgSkip,tbl = ScreenComposite.ScreenFromDetails(compos,self.details)
self.assertEqual(nGood,37)
self.assertEqual(misCount,6)
self.assertEqual(nSkipped,0)
self.assertAlmostEqual(avgGood,.95946,4)
self.assertAlmostEqual(avgBad,.85,4)
self.assertEqual(tbl[0,0] , 14)
self.assertEqual(tbl[1,1] , 23)
self.assertEqual(tbl[0,1] , 1)
self.assertEqual(tbl[1,0] , 5)
def test12(self):
""" test the naive bayes composite"""
self.details.tableName = 'ferro_noquant'
with open(os.path.join(self.baseDir,'ferromag_NaiveBayes.pkl'),
'rb') as pklF:
compos = pickle.load(pklF)
tgt = 10
self.assertEqual(len(compos),tgt)
self.details.doHoldout=1
nGood,misCount,nSkipped,avgGood,avgBad,avgSkip,tbl = ScreenComposite.ScreenFromDetails(compos,self.details)
self.assertEqual(nGood , 25)
self.assertEqual(misCount , 6)
self.assertEqual(nSkipped , 0)
self.assertAlmostEqual(avgGood, 0.9800,4)
self.assertAlmostEqual(avgBad, 0.86667,4)
self.assertEqual(tbl[0,0] , 9)
self.assertEqual(tbl[0,1] , 6)
self.assertEqual(tbl[1,0] , 0)
self.assertEqual(tbl[1,1] , 16)
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "AlexanderSavelyev/rdkit",
"path": "rdkit/ML/UnitTestScreenComposite.py",
"copies": "1",
"size": "10507",
"license": "bsd-3-clause",
"hash": 6088741859960249000,
"line_mean": 34.2583892617,
"line_max": 111,
"alpha_frac": 0.6851622728,
"autogenerated": false,
"ratio": 3.0393404686144057,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.901300082346641,
"avg_score": 0.04230038358959904,
"num_lines": 298
} |
"""unit testing code for the ScreenComposite functionality
"""
from rdkit import RDConfig
import unittest,os
from rdkit.ML import BuildComposite
from rdkit.ML import ScreenComposite
import cPickle as pickle
def feq(a,b,tol=1e-4):
if abs(a-b)>tol: return 0
else: return 1
class TestCase(unittest.TestCase):
def setUp(self):
#print '\n%s: '%self.shortDescription(),
self.baseDir = os.path.join(RDConfig.RDCodeDir,'ML','test_data')
self.dbName = RDConfig.RDTestDatabase
self.details = ScreenComposite.SetDefaults()
self.details.dbName = self.dbName
self.details.dbUser = RDConfig.defaultDBUser
self.details.dbPassword = RDConfig.defaultDBPassword
def test1(self):
""" basics """
self.details.tableName = 'ferro_quant'
compos = pickle.load(open(os.path.join(self.baseDir,'ferromag_quant_10.pkl'),
'rb'))
tgt = 7
assert len(compos)==tgt,'bad composite loaded: %d != %d'%(len(compos),tgt)
nGood,misCount,nSkipped,avgGood,avgBad,avgSkip,tbl = ScreenComposite.ScreenFromDetails(compos,self.details)
assert nGood==93
assert misCount==2
assert nSkipped==0
assert feq(avgGood,.9849),avgGood
assert feq(avgBad,.8500),avgBad
assert tbl[0,0] == 54,tbl
assert tbl[1,1] == 39
assert tbl[0,1] == 2
assert tbl[1,0] == 0
def test2(self):
""" include holdout data only """
self.details.tableName = 'ferro_quant'
self.details.doHoldout=1
self.details.doTraining=0
compos = pickle.load(open(os.path.join(self.baseDir,'ferromag_quant_10.pkl'),
'rb'))
tgt = 7
assert len(compos)==tgt,'bad composite loaded: %d != %d'%(len(compos),tgt)
nGood,misCount,nSkipped,avgGood,avgBad,avgSkip,tbl = ScreenComposite.ScreenFromDetails(compos,self.details)
assert nGood==28
assert misCount==1
assert nSkipped==0
assert feq(avgGood,.9857),avgGood
assert feq(avgBad,1.000),avgBad
assert tbl[0,0] == 16,tbl
assert tbl[1,1] == 12
assert tbl[0,1] == 1
assert tbl[1,0] == 0
def test3(self):
""" include training data only """
self.details.tableName = 'ferro_quant'
self.details.doHoldout=0
self.details.doTraining=1
compos = pickle.load(open(os.path.join(self.baseDir,'ferromag_quant_10.pkl'),
'rb'))
tgt = 7
assert len(compos)==tgt,'bad composite loaded: %d != %d'%(len(compos),tgt)
nGood,misCount,nSkipped,avgGood,avgBad,avgSkip,tbl = ScreenComposite.ScreenFromDetails(compos,self.details)
assert nGood==65
assert misCount==1
assert nSkipped==0
assert feq(avgGood,.9846),avgGood
assert feq(avgBad,.7000),avgBad
assert tbl[0,0] == 38,tbl
assert tbl[1,1] == 27
assert tbl[0,1] == 1
assert tbl[1,0] == 0
def test4(self):
""" include thresholding """
self.details.tableName = 'ferro_quant'
self.details.threshold = 0.80
self.details.doHoldout=0
self.details.doTraining=0
compos = pickle.load(open(os.path.join(self.baseDir,'ferromag_quant_10.pkl'),
'rb'))
tgt = 7
assert len(compos)==tgt,'bad composite loaded: %d != %d'%(len(compos),tgt)
nGood,misCount,nSkipped,avgGood,avgBad,avgSkip,tbl = ScreenComposite.ScreenFromDetails(compos,self.details)
assert nGood==87,str(nGood)
assert misCount==1
assert nSkipped==7,nSkipped
assert feq(avgGood,1.0),avgGood
assert feq(avgBad,1.000),avgBad
assert feq(avgSkip,.7571),avgSkip
assert tbl[0,0] == 50
assert tbl[1,1] == 37
assert tbl[0,1] == 1
assert tbl[1,0] == 0
def test5(self):
""" basics """
self.details.tableName = 'ferro_noquant'
compos = pickle.load(open(os.path.join(self.baseDir,'ferromag_auto_10_3.pkl'),
'rb'))
tgt = 10
assert len(compos)==tgt,'bad composite loaded: %d != %d'%(len(compos),tgt)
tpl = ScreenComposite.ScreenFromDetails(compos,self.details)
nGood,misCount,nSkipped,avgGood,avgBad,avgSkip,tbl = tpl
assert nGood==93,nGood
assert misCount==10
assert nSkipped==0
assert feq(avgGood,.9699),avgGood
assert feq(avgBad,.8100),avgBad
assert tbl[0,0] == 48,tbl
assert tbl[1,1] == 45
assert tbl[0,1] == 7
assert tbl[1,0] == 3
def test6(self):
""" multiple models """
self.details.tableName = 'ferro_noquant'
compos = pickle.load(open(os.path.join(self.baseDir,'ferromag_auto_10_3.pkl'),
'rb'))
tgt = 10
assert len(compos)==tgt,'bad composite loaded: %d != %d'%(len(compos),tgt)
composites = [compos,compos]
tpl = ScreenComposite.ScreenFromDetails(composites,self.details)
nGood,misCount,nSkipped,avgGood,avgBad,avgSkip,tbl = tpl
assert feq(nGood[0],93),nGood
assert feq(misCount[0],10)
assert feq(nSkipped[0],0)
assert feq(avgGood[0],.9699),avgGood
assert feq(avgBad[0],.8100),avgBad
assert feq(nGood[1],0)
assert feq(misCount[1],0)
assert feq(nSkipped[1],0)
assert feq(avgGood[1],0)
assert feq(avgBad[1],0)
assert feq(tbl[0,0],48),tbl
assert feq(tbl[1,1],45)
assert feq(tbl[0,1],7)
assert feq(tbl[1,0],3)
def test7(self):
""" shuffle """
self.details.tableName = 'ferro_noquant'
compos = pickle.load(open(os.path.join(self.baseDir,'ferromag_shuffle_10_3.pkl'),
'rb'))
tgt = 10
assert len(compos)==tgt,'bad composite loaded: %d != %d'%(len(compos),tgt)
self.details.shuffleActivities=1
nGood,misCount,nSkipped,avgGood,avgBad,avgSkip,tbl = ScreenComposite.ScreenFromDetails(compos,self.details)
assert nGood==50,nGood
assert misCount==53
assert nSkipped==0
assert feq(avgGood,.7380),avgGood
assert feq(avgBad,.7660),avgBad
assert tbl[0,0] == 30,tbl
assert tbl[1,1] == 20
assert tbl[0,1] == 25
assert tbl[1,0] == 28
def test8(self):
""" shuffle with segmentation """
self.details.tableName = 'ferro_noquant'
compos = pickle.load(open(os.path.join(self.baseDir,'ferromag_shuffle_10_3.pkl'),
'rb'))
tgt = 10
assert len(compos)==tgt,'bad composite loaded: %d != %d'%(len(compos),tgt)
self.details.shuffleActivities=1
self.details.doHoldout=1
nGood,misCount,nSkipped,avgGood,avgBad,avgSkip,tbl = ScreenComposite.ScreenFromDetails(compos,self.details)
assert nGood==19,nGood
assert misCount==12
assert nSkipped==0
assert feq(avgGood,.7737),avgGood
assert feq(avgBad,.7500),avgBad
assert tbl[0,0] == 12,tbl
assert tbl[1,1] == 7
assert tbl[0,1] == 6
assert tbl[1,0] == 6
def test9(self):
""" shuffle with segmentation2 """
self.details.tableName = 'ferro_noquant'
compos = pickle.load(open(os.path.join(self.baseDir,'ferromag_shuffle_10_3.pkl'),
'rb'))
tgt = 10
assert len(compos)==tgt,'bad composite loaded: %d != %d'%(len(compos),tgt)
self.details.shuffleActivities=1
self.details.doTraining=1
nGood,misCount,nSkipped,avgGood,avgBad,avgSkip,tbl = ScreenComposite.ScreenFromDetails(compos,self.details)
assert nGood==31,nGood
assert misCount==41
assert nSkipped==0
assert feq(avgGood,.7161),avgGood
assert feq(avgBad,.7707),avgBad
assert tbl[0,0] == 18,tbl
assert tbl[1,1] == 13
assert tbl[0,1] == 19
assert tbl[1,0] == 22
def test10(self):
""" filtering """
self.details.tableName = 'ferro_noquant'
compos = pickle.load(open(os.path.join(self.baseDir,'ferromag_filt_10_3.pkl'),
'rb'))
tgt = 10
assert len(compos)==tgt,'bad composite loaded: %d != %d'%(len(compos),tgt)
self.details.filterVal=1
self.details.filterFrac=.33
nGood,misCount,nSkipped,avgGood,avgBad,avgSkip,tbl = ScreenComposite.ScreenFromDetails(compos,self.details)
assert nGood==90
assert misCount==13
assert nSkipped==0
assert feq(avgGood,.9578)
assert feq(avgBad,.8538)
assert tbl[0,0] == 54
assert tbl[1,1] == 36
assert tbl[0,1] == 1
assert tbl[1,0] == 12
def test11(self):
""" filtering with segmentation """
self.details.tableName = 'ferro_noquant'
compos = pickle.load(open(os.path.join(self.baseDir,'ferromag_filt_10_3.pkl'),
'rb'))
tgt = 10
assert len(compos)==tgt,'bad composite loaded: %d != %d'%(len(compos),tgt)
self.details.doHoldout=1
self.details.filterVal=1
self.details.filterFrac=.33
nGood,misCount,nSkipped,avgGood,avgBad,avgSkip,tbl = ScreenComposite.ScreenFromDetails(compos,self.details)
assert nGood==37,nGood
assert misCount==6
assert nSkipped==0
assert feq(avgGood,.9594)
assert feq(avgBad,.85)
assert tbl[0,0] == 14,tbl
assert tbl[1,1] == 23
assert tbl[0,1] == 1
assert tbl[1,0] == 5
def test12(self):
""" test the naive bayes composite"""
self.details.tableName = 'ferro_noquant'
compos = pickle.load(open(os.path.join(self.baseDir,'ferromag_NaiveBayes.pkl'),
'rb'))
tgt = 10
assert len(compos)==tgt,'bad composite loaded: %d != %d'%(len(compos),tgt)
self.details.doHoldout=1
nGood,misCount,nSkipped,avgGood,avgBad,avgSkip,tbl = ScreenComposite.ScreenFromDetails(compos,self.details)
assert nGood == 27,nGood
assert misCount == 4,misCount
assert nSkipped == 0,nSkipped
assert feq(avgGood, 0.9407),avgGood
assert feq(avgBad, 0.875),avgBad
assert tbl[0,0] == 11,tbl
assert tbl[0,1] == 4
assert tbl[1,0] == 0
assert tbl[1,1] == 16
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "rdkit/rdkit-orig",
"path": "rdkit/ML/UnitTestScreenComposite.py",
"copies": "2",
"size": "9996",
"license": "bsd-3-clause",
"hash": -4490366876140209700,
"line_mean": 33.2328767123,
"line_max": 111,
"alpha_frac": 0.6396558623,
"autogenerated": false,
"ratio": 3.103384042222912,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4743039904522912,
"avg_score": null,
"num_lines": null
} |
from __future__ import print_function
from rdkit import RDConfig
import os, sys, time
try:
import subprocess
except ImportError:
subprocess = None
TEST_FAILED = -1
TEST_PASSED = 0
def RunTest(exeName, args, extras):
if exeName == "python":
exeName = RDConfig.pythonExe
args = args.split(' ')
startDir = os.getcwd()
if 'dir' in extras:
os.chdir(extras['dir'])
expectedReturn = extras.get('returns', 0)
if not subprocess:
raise NotImplementedError('cannot run tests if the subprocess module is not available.')
else:
try:
retVal = subprocess.call([exeName] + list(args))
except OSError:
print("Could not find executable: %s." % exeName, file=sys.stderr)
return TEST_FAILED
if 'dir' in extras:
os.chdir(startDir)
if retVal != expectedReturn:
return TEST_FAILED
else:
return TEST_PASSED
def RunScript(script, doLongTests, verbose):
# support python 2.7 style -f argument for failfast
if sys.argv[-1] == '-f':
# setting environment allows this setting to recursively pass to all child
# processes
os.environ['PYTHON_TEST_FAILFAST'] = '1'
if len(sys.argv) == 3 and sys.argv[1] == '--testDir':
os.chdir(sys.argv[2])
# -------------------------------------------------------
# this is pretty funny. Whatever directory we started python in
# will be in the search path, and if we've changed to another
# directory, that'll be there too. HOWEVER... the starting
# directory will be searched first (at least in python2.2), so we
# need to make sure that '.' is at the front of the search path
if sys.path[0] != '.':
sys.path = ['.'] + sys.path
script = script.split('.py')[0]
mod = __import__(script)
try:
tests = mod.tests
except AttributeError:
return [], 0
longTests = []
if doLongTests:
try:
longTests = mod.longTests
except AttributeError:
pass
failed = []
for i, entry in enumerate(tests):
try:
exeName, args, extras = entry
except ValueError:
print('bad entry:', entry)
sys.exit(-1)
try:
res = RunTest(exeName, args, extras)
except Exception:
import traceback
traceback.print_exc()
res = TEST_FAILED
if res != TEST_PASSED:
failed.append((exeName, args, extras))
# check failfast setting
if os.environ.get('PYTHON_TEST_FAILFAST', '') == '1':
# return immediately
sys.stderr.write("Exiting from %s\n" % str([exeName] + list(args)))
return failed, i + 1
for i, (exeName, args, extras) in enumerate(longTests):
res = RunTest(exeName, args, extras)
if res != TEST_PASSED:
failed.append((exeName, args, extras))
if os.environ.get('PYTHON_TEST_FAILFAST', '') == '1':
# return immediately
sys.stderr.write("Exitng from %s\n" % str([exeName] + list(args)))
return failed, len(tests) + i + 1
nTests = len(tests) + len(longTests)
del sys.modules[script]
if verbose and failed:
for exeName, args, extras in failed:
print("!!! TEST FAILURE: ", exeName, args, extras, file=sys.stderr)
return failed, nTests
def ReportResults(script, failedTests, nTests, runTime, verbose, dest):
if not nTests:
dest.write('!-!-!-!-!-!-!-!-!-!-!\n')
dest.write('\tScript: %s. No tests run!\n' % (script))
elif not len(failedTests):
dest.write('-----------------\n')
dest.write('\tScript: %s. Passed %d tests in %.2f seconds\n' % (script, nTests, runTime))
else:
dest.write('!-!-!-!-!-!-!-!-!-!-!\n')
dest.write('\tScript: %s. Failed %d (of %d) tests in %.2f seconds\n' %
(script, len(failedTests), nTests, runTime))
if verbose:
for exeName, args, extras in failedTests:
dirName = extras.get('dir', '.')
dirName = os.path.abspath(dirName)
dest.write('\t\t(%s): %s %s\n' % (dirName, exeName, args))
if __name__ == '__main__':
import getopt
args, extras = getopt.getopt(sys.argv[1:], 'lv')
doLongTests = 0
verbose = 1
for arg, val in args:
if arg == '-l':
doLongTests = 1
elif arg == '-v':
verbose = 0
pwd = os.getcwd()
totNumFailed = 0
totNumRun = 0
failures = []
timeAccum = 0.0
for script in extras:
try:
open(script, 'r')
except IOError:
sys.stderr.write('ERROR: Test script %s could not be opened.\n' % (script))
else:
dirName = os.path.dirname(script)
scriptBase = os.path.basename(script)
if dirName:
os.chdir(dirName)
try:
t1 = time.time()
failed, nTests = RunScript(scriptBase, doLongTests, verbose)
t2 = time.time()
except ImportError:
import traceback
traceback.print_exc()
sys.stderr.write('ERROR: Could not import test script %s\n' % (script))
else:
runTime = t2 - t1
ReportResults(script, failed, nTests, runTime, verbose, sys.stderr)
timeAccum += runTime
if dirName:
os.chdir(pwd)
if len(extras) > 1:
totNumFailed += len(failed)
totNumRun += nTests
if len(failed):
failures.append(script)
if totNumRun > 1:
sys.stderr.write('\n\n-*-*-*-*-*-*- Test Results Summary -*-*-*-*-*-*-\n')
sys.stderr.write('\t\tTotal run time: %.2f seconds\n' % (timeAccum))
if totNumFailed:
sys.stderr.write('!!!!!--- %d Failures in %d tests ---!!!!!\n' % (totNumFailed, totNumRun))
sys.stderr.write('\tModules with failures:\n')
for failure in failures:
sys.stderr.write('\t\t%s\n' % failure)
else:
sys.stderr.write(' All %d tests (in %d modules) passed\n' % (totNumRun, len(extras)))
sys.exit(totNumFailed)
class _RedirectStream:
_stream = None
def __init__(self, new_target):
self._new_target = new_target
# We use a list of old targets to make this CM re-entrant
self._old_targets = []
def __enter__(self):
self._old_targets.append(getattr(sys, self._stream))
setattr(sys, self._stream, self._new_target)
return self._new_target
def __exit__(self, exctype, excinst, exctb):
setattr(sys, self._stream, self._old_targets.pop())
class redirect_stdout(_RedirectStream):
"""Context manager for temporarily redirecting stdout to another file.
# How to send help() to stderr
with redirect_stdout(sys.stderr):
help(dir)
# How to write help() to a file
with open('help.txt', 'w') as f:
with redirect_stdout(f):
help(pow)
"""
_stream = "stdout"
class redirect_stderr(_RedirectStream):
"""Context manager for temporarily redirecting stderr to another file."""
_stream = "stderr"
class OutputRedirectC:
"""Context manager which uses low-level file descriptors to suppress
output to stdout/stderr, optionally redirecting to the named file(s).
Suppress all output
with Silence():
<code>
Redirect stdout to file
with OutputRedirectC(stdout='output.txt', mode='w'):
<code>
Redirect stderr to file
with OutputRedirectC(stderr='output.txt', mode='a'):
<code>
http://code.activestate.com/recipes/577564-context-manager-for-low-level-redirection-of-stdou/
>>>
"""
def __init__(self, stdout=os.devnull, stderr=os.devnull, mode='wb'):
self.outfiles = stdout, stderr
self.combine = (stdout == stderr)
self.mode = mode
self.saved_streams = None
self.fds = None
self.saved_fds = None
self.null_fds = None
self.null_streams = None
def __enter__(self):
# save previous stdout/stderr
self.saved_streams = saved_streams = sys.__stdout__, sys.__stderr__
self.fds = fds = [s.fileno() for s in saved_streams]
self.saved_fds = [os.dup(fd) for fd in fds]
# flush any pending output
for s in saved_streams:
s.flush()
# open surrogate files
if self.combine:
null_streams = [open(self.outfiles[0], self.mode, 0)] * 2
if self.outfiles[0] != os.devnull:
# disable buffering so output is merged immediately
sys.stdout, sys.stderr = [os.fdopen(fd, 'wb', 0) for fd in fds]
else:
null_streams = [open(f, self.mode, 0) for f in self.outfiles]
self.null_fds = null_fds = [s.fileno() for s in null_streams]
self.null_streams = null_streams
# overwrite file objects and low-level file descriptors
for null_fd, fd in zip(null_fds, fds):
os.dup2(null_fd, fd)
def __exit__(self, *args):
# flush any pending output
for s in self.saved_streams:
s.flush()
# restore original streams and file descriptors
for saved_fd, fd in zip(self.saved_fds, self.fds):
os.dup2(saved_fd, fd)
sys.stdout, sys.stderr = self.saved_streams
# clean up
for s in self.null_streams:
s.close()
for fd in self.saved_fds:
os.close(fd)
return False
| {
"repo_name": "rvianello/rdkit",
"path": "rdkit/TestRunner.py",
"copies": "1",
"size": "9112",
"license": "bsd-3-clause",
"hash": -8739317964714115000,
"line_mean": 29.8881355932,
"line_max": 99,
"alpha_frac": 0.6187445127,
"autogenerated": false,
"ratio": 3.4423876086135246,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4561132121313525,
"avg_score": null,
"num_lines": null
} |
""" Hybrid EState-VSA descriptors (like the MOE VSA descriptors)
"""
import numpy
from rdkit.Chem.EState.EState import EStateIndices as EStateIndices_
from rdkit.Chem.MolSurf import _LabuteHelper as VSAContribs_
import bisect
"""
These default VSA bins were chosen using the PP3K solubility data
set. An arbitrary number of bins were selected and the
boundaries were selected to give an approximately equal number of
atoms per bin
"""
vsaBins=[4.78,5.00,5.410,5.740,6.00,6.07,6.45,7.00,11.0]
def VSA_EState_(mol,bins=None,force=1):
""" *Internal Use Only*
"""
if not force and hasattr(mol,'_vsaEState'):
return mol._vsaEState
if bins is None: bins = estateBins
propContribs = EStateIndices_(mol,force=force)
volContribs = VSAContribs_(mol)
ans = numpy.zeros(len(bins)+1,numpy.float)
for i,prop in enumerate(propContribs):
if prop is not None:
bin = bisect.bisect_right(bins,volContribs[i+1])
ans[bin] += prop
mol._vsaEState=ans
return ans
"""
These default EState bins were chosen using the PP3K solubility data
set. An arbitrary number of bins (10) were selected and the
boundaries were selected to give an approximately equal number of
atoms per bin
"""
estateBins=[-0.390,0.290,0.717,1.165,1.540,1.807,2.05,4.69,9.17,15.0]
def EState_VSA_(mol,bins=None,force=1):
""" *Internal Use Only*
"""
if not force and hasattr(mol,'_eStateVSA'):
return mol._eStateVSA
if bins is None: bins = estateBins
propContribs = EStateIndices_(mol,force=force)
volContribs = VSAContribs_(mol)
ans = numpy.zeros(len(bins)+1,numpy.float)
for i,prop in enumerate(propContribs):
if prop is not None:
bin = bisect.bisect_right(bins,prop)
ans[bin] += volContribs[i+1]
mol._eStateVSA=ans
return ans
def _InstallDescriptors():
for i in range(len(vsaBins)):
fn = lambda x,y=i:VSA_EState_(x,force=0)[y]
if i > 0:
fn.__doc__="VSA EState Descriptor %d (% 4.2f <= x < % 4.2f)"%(i+1,vsaBins[i-1],vsaBins[i])
else:
fn.__doc__="VSA EState Descriptor %d (-inf < x < % 4.2f)"%(i+1,vsaBins[i])
name="VSA_EState%d"%(i+1)
fn.version="1.0.0"
globals()[name]=fn
i+=1
fn = lambda x,y=i:VSA_EState_(x,force=0)[y]
fn.__doc__="VSA EState Descriptor %d (% 4.2f <= x < inf)"%(i+1,vsaBins[i-1])
name="VSA_EState%d"%(i+1)
fn.version="1.0.0"
globals()[name]=fn
fn=None
for i in range(len(estateBins)):
fn = lambda x,y=i:EState_VSA_(x,force=0)[y]
if i > 0:
fn.__doc__="EState VSA Descriptor %d (% 4.2f <= x < % 4.2f)"%(i+1,estateBins[i-1],estateBins[i])
else:
fn.__doc__="EState VSA Descriptor %d (-inf < x < % 4.2f)"%(i+1,estateBins[i])
name="EState_VSA%d"%(i+1)
fn.version="1.0.1"
globals()[name]=fn
i+=1
fn = lambda x,y=i:EState_VSA_(x,force=0)[y]
fn.__doc__="EState VSA Descriptor %d (% 4.2f <= x < inf)"%(i+1,estateBins[i-1])
name="EState_VSA%d"%(i+1)
fn.version="1.0.1"
globals()[name]=fn
fn=None
# Change log for EState_VSA descriptors:
# version 1.0.1: optimizations, values unaffected
_InstallDescriptors()
| {
"repo_name": "soerendip42/rdkit",
"path": "rdkit/Chem/EState/EState_VSA.py",
"copies": "6",
"size": "3368",
"license": "bsd-3-clause",
"hash": -6254493243906284000,
"line_mean": 29.8990825688,
"line_max": 102,
"alpha_frac": 0.6591448931,
"autogenerated": false,
"ratio": 2.6108527131782946,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6269997606278294,
"avg_score": null,
"num_lines": null
} |
""" Hybrid EState-VSA descriptors (like the MOE VSA descriptors)
"""
import bisect
import numpy
from rdkit.Chem.EState.EState import EStateIndices as EStateIndices_
from rdkit.Chem.MolSurf import _LabuteHelper as VSAContribs_
"""
These default VSA bins were chosen using the PP3K solubility data
set. An arbitrary number of bins were selected and the
boundaries were selected to give an approximately equal number of
atoms per bin
"""
vsaBins = [4.78, 5.00, 5.410, 5.740, 6.00, 6.07, 6.45, 7.00, 11.0]
def VSA_EState_(mol, bins=None, force=1):
""" *Internal Use Only*
"""
if not force and hasattr(mol, '_vsaEState'):
return mol._vsaEState
if bins is None:
bins = estateBins
propContribs = EStateIndices_(mol, force=force)
volContribs = VSAContribs_(mol)
ans = numpy.zeros(len(bins) + 1, numpy.float)
for i, prop in enumerate(propContribs):
if prop is not None:
nbin = bisect.bisect_right(bins, volContribs[i + 1])
ans[nbin] += prop
mol._vsaEState = ans
return ans
"""
These default EState bins were chosen using the PP3K solubility data
set. An arbitrary number of bins (10) were selected and the
boundaries were selected to give an approximately equal number of
atoms per bin
"""
estateBins = [-0.390, 0.290, 0.717, 1.165, 1.540, 1.807, 2.05, 4.69, 9.17, 15.0]
def EState_VSA_(mol, bins=None, force=1):
""" *Internal Use Only*
"""
if not force and hasattr(mol, '_eStateVSA'):
return mol._eStateVSA
if bins is None:
bins = estateBins
propContribs = EStateIndices_(mol, force=force)
volContribs = VSAContribs_(mol)
ans = numpy.zeros(len(bins) + 1, numpy.float)
for i, prop in enumerate(propContribs):
if prop is not None:
nbin = bisect.bisect_right(bins, prop)
ans[nbin] += volContribs[i + 1]
mol._eStateVSA = ans
return ans
def _descriptorDocstring(name, nbin, bins):
""" Create a docstring for the descriptor name """
if nbin == 0:
interval = "-inf < x < {0:.2f}".format(bins[nbin])
elif nbin < len(bins):
interval = " {0:.2f} <= x < {1:.2f}".format(bins[nbin - 1], bins[nbin])
else:
interval = " {0:.2f} <= x < inf".format(bins[nbin - 1])
return '{0} Descriptor {1} ({2})'.format(name, nbin + 1, interval)
def _descriptor_VSA_EState(nbin):
def VSA_EState_bin(mol):
return VSA_EState_(mol, force=False)[nbin]
name = "VSA_EState{0}".format(nbin + 1)
fn = VSA_EState_bin
fn.__doc__ = _descriptorDocstring('VSA EState', nbin, vsaBins)
fn.version = '1.0.0'
return name, fn
def _descriptor_EState_VSA(nbin):
def EState_VSA_bin(mol):
return EState_VSA_(mol, force=False)[nbin]
name = "EState_VSA{0}".format(nbin + 1)
fn = EState_VSA_bin
fn.__name__ = name
if hasattr(fn, '__qualname__'):
fn.__qualname__ = name
fn.__doc__ = _descriptorDocstring('EState VSA', nbin, estateBins)
fn.version = '1.0.1'
return name, fn
def _InstallDescriptors():
for nbin in range(len(vsaBins) + 1):
name, fn = _descriptor_VSA_EState(nbin)
globals()[name] = fn
for nbin in range(len(estateBins) + 1):
name, fn = _descriptor_EState_VSA(nbin)
globals()[name] = fn
# Change log for EState_VSA descriptors:
# version 1.0.1: optimizations, values unaffected
_InstallDescriptors()
| {
"repo_name": "jandom/rdkit",
"path": "rdkit/Chem/EState/EState_VSA.py",
"copies": "6",
"size": "3552",
"license": "bsd-3-clause",
"hash": 8627994845224181000,
"line_mean": 25.7067669173,
"line_max": 80,
"alpha_frac": 0.6675112613,
"autogenerated": false,
"ratio": 2.8190476190476192,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.012381261534426738,
"num_lines": 133
} |
from __future__ import print_function
import bisect
class TopNContainer(object):
""" maintains a sorted list of a particular number of data elements.
"""
def __init__(self,size,mostNeg=-1e99):
"""
if size is negative, all entries will be kept in sorted order
"""
self._size = size
if(size>=0):
self.best = [mostNeg]*self._size
self.extras = [None]*self._size
else:
self.best = []
self.extras = []
def Insert(self,val,extra=None):
""" only does the insertion if val fits """
if self._size>=0:
if val > self.best[0]:
idx = bisect.bisect(self.best,val)
# insert the new element
if idx == self._size:
self.best.append(val)
self.extras.append(extra)
else:
self.best.insert(idx,val)
self.extras.insert(idx,extra)
# and pop off the head
self.best.pop(0)
self.extras.pop(0)
else:
idx = bisect.bisect(self.best,val)
self.best.insert(idx,val)
self.extras.insert(idx,extra)
def GetPts(self):
""" returns our set of points """
return self.best
def GetExtras(self):
""" returns our set of extras """
return self.extras
def __len__(self):
return self._size
def __getitem__(self,which):
return self.best[which],self.extras[which]
def reverse(self):
self.best.reverse()
self.extras.reverse()
if __name__ == '__main__':
import random
pts = [int(100*random.random()) for x in range(10)]
c = TopNContainer(4)
for pt in pts:
c.Insert(pt,extra=str(pt))
print(c.GetPts())
print(c.GetExtras())
| {
"repo_name": "strets123/rdkit",
"path": "rdkit/DataStructs/TopNContainer.py",
"copies": "4",
"size": "1915",
"license": "bsd-3-clause",
"hash": -4584533402615983000,
"line_mean": 25.2328767123,
"line_max": 70,
"alpha_frac": 0.6078328982,
"autogenerated": false,
"ratio": 3.431899641577061,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.037929492286115425,
"num_lines": 73
} |
from __future__ import print_function
import bisect
class TopNContainer(object):
""" maintains a sorted list of a particular number of data elements.
"""
def __init__(self, size, mostNeg=-1e99):
"""
if size is negative, all entries will be kept in sorted order
"""
self._size = size
if (size >= 0):
self.best = [mostNeg] * self._size
self.extras = [None] * self._size
else:
self.best = []
self.extras = []
def Insert(self, val, extra=None):
""" only does the insertion if val fits """
if self._size >= 0:
if val > self.best[0]:
idx = bisect.bisect(self.best, val)
# insert the new element
if idx == self._size:
self.best.append(val)
self.extras.append(extra)
else:
self.best.insert(idx, val)
self.extras.insert(idx, extra)
# and pop off the head
self.best.pop(0)
self.extras.pop(0)
else:
idx = bisect.bisect(self.best, val)
self.best.insert(idx, val)
self.extras.insert(idx, extra)
def GetPts(self):
""" returns our set of points """
return self.best
def GetExtras(self):
""" returns our set of extras """
return self.extras
def __len__(self):
if self._size >= 0:
return self._size
else:
return len(self.best)
def __getitem__(self, which):
return self.best[which], self.extras[which]
def reverse(self):
self.best.reverse()
self.extras.reverse()
def _exampleCode():
import random
random.seed(0)
pts = [int(100 * random.random()) for _ in range(10)]
c = TopNContainer(4)
for pt in pts:
c.Insert(pt, extra=str(pt))
print(c.GetPts())
print(c.GetExtras())
if __name__ == '__main__': # pragma: nocover
_exampleCode()
| {
"repo_name": "jandom/rdkit",
"path": "rdkit/DataStructs/TopNContainer.py",
"copies": "5",
"size": "2074",
"license": "bsd-3-clause",
"hash": 3134968020779097000,
"line_mean": 22.8390804598,
"line_max": 70,
"alpha_frac": 0.599807136,
"autogenerated": false,
"ratio": 3.428099173553719,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.012897170691971443,
"num_lines": 87
} |
""" functionality to allow adjusting composite model contents
"""
from __future__ import print_function
import numpy
import copy
def BalanceComposite(model, set1, set2, weight, targetSize, names1=None, names2=None):
""" adjusts the contents of the composite model so as to maximize
the weighted classification accuracty across the two data sets.
The resulting composite model, with _targetSize_ models, is returned.
**Notes**:
- if _names1_ and _names2_ are not provided, _set1_ and _set2_ should
have the same ordering of columns and _model_ should have already
have had _SetInputOrder()_ called.
"""
#
# adjust the weights to be proportional to the size of the two data sets
# The normalization we do here assures that a perfect model contributes
# a score of S1+S2 to the final
#
S1 = len(set1)
S2 = len(set2)
weight1 = float(S1 + S2) * (1 - weight) / S1
weight2 = float(S1 + S2) * weight / S2
#print '\t:::',S1,S2,weight1,weight2
#print 'nModels:',len(model)
# start with a copy so that we get all the additional schnick-schnack
res = copy.copy(model)
res.modelList = []
res.errList = []
res.countList = []
res.quantizationRequirements = []
startSize = len(model)
scores = numpy.zeros(startSize, numpy.float)
actQuantBounds = model.GetActivityQuantBounds()
if names1 is not None:
model.SetInputOrder(names1)
for pt in set1:
pred, conf = model.ClassifyExample(pt)
if actQuantBounds:
ans = model.QuantizeActivity(pt)[-1]
else:
ans = pt[-1]
votes = model.GetVoteDetails()
for i in range(startSize):
if votes[i] == ans:
scores[i] += weight1
if names2 is not None:
model.SetInputOrder(names2)
for pt in set2:
pred, conf = model.ClassifyExample(pt)
if actQuantBounds:
ans = model.QuantizeActivity(pt)[-1]
else:
ans = pt[-1]
votes = model.GetVoteDetails()
for i in range(startSize):
if votes[i] == ans:
scores[i] += weight2
# normalize the scores
nPts = S1 + S2
scores /= nPts
# sort them:
bestOrder = list(numpy.argsort(scores))
bestOrder.reverse()
print('\tTAKE:', bestOrder[:targetSize])
# and now take the best set:
for i in range(targetSize):
idx = bestOrder[i]
mdl = model.modelList[idx]
res.modelList.append(mdl)
res.errList.append(1. - scores[idx])
res.countList.append(1)
# FIX: this should probably be more general:
res.quantizationRequirements.append(0)
return res
| {
"repo_name": "jandom/rdkit",
"path": "rdkit/ML/Composite/AdjustComposite.py",
"copies": "1",
"size": "2617",
"license": "bsd-3-clause",
"hash": -4321554604370946600,
"line_mean": 28.7386363636,
"line_max": 86,
"alpha_frac": 0.6679403898,
"autogenerated": false,
"ratio": 3.3042929292929295,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9355781545583133,
"avg_score": 0.02329035470195934,
"num_lines": 88
} |
""" functionality to allow adjusting composite model contents
"""
from __future__ import print_function
import copy
import numpy
def BalanceComposite(model, set1, set2, weight, targetSize, names1=None, names2=None):
""" adjusts the contents of the composite model so as to maximize
the weighted classification accuracty across the two data sets.
The resulting composite model, with _targetSize_ models, is returned.
**Notes**:
- if _names1_ and _names2_ are not provided, _set1_ and _set2_ should
have the same ordering of columns and _model_ should have already
have had _SetInputOrder()_ called.
"""
#
# adjust the weights to be proportional to the size of the two data sets
# The normalization we do here assures that a perfect model contributes
# a score of S1+S2 to the final
#
S1 = len(set1)
S2 = len(set2)
weight1 = float(S1 + S2) * (1 - weight) / S1
weight2 = float(S1 + S2) * weight / S2
# print('\t:::', S1, S2, weight1, weight2)
# print('nModels:', len(model))
# start with a copy so that we get all the additional schnick-schnack
res = copy.copy(model)
res.modelList = []
res.errList = []
res.countList = []
res.quantizationRequirements = []
startSize = len(model)
scores = numpy.zeros(startSize, numpy.float)
actQuantBounds = model.GetActivityQuantBounds()
if names1 is not None:
model.SetInputOrder(names1)
for pt in set1:
pred, conf = model.ClassifyExample(pt)
if actQuantBounds:
ans = model.QuantizeActivity(pt)[-1]
else:
ans = pt[-1]
votes = model.GetVoteDetails()
for i in range(startSize):
if votes[i] == ans:
scores[i] += weight1
if names2 is not None:
model.SetInputOrder(names2)
for pt in set2:
pred, conf = model.ClassifyExample(pt)
if actQuantBounds:
ans = model.QuantizeActivity(pt)[-1]
else:
ans = pt[-1]
votes = model.GetVoteDetails()
for i in range(startSize):
if votes[i] == ans:
scores[i] += weight2
# normalize the scores
nPts = S1 + S2
scores /= nPts
# sort them:
bestOrder = list(numpy.argsort(scores))
bestOrder.reverse()
print('\tTAKE:', bestOrder[:targetSize])
# and now take the best set:
for i in range(targetSize):
idx = bestOrder[i]
mdl = model.modelList[idx]
res.modelList.append(mdl)
res.errList.append(1. - scores[idx])
res.countList.append(1)
# FIX: this should probably be more general:
res.quantizationRequirements.append(0)
return res
| {
"repo_name": "rvianello/rdkit",
"path": "rdkit/ML/Composite/AdjustComposite.py",
"copies": "4",
"size": "2628",
"license": "bsd-3-clause",
"hash": -604106757098932400,
"line_mean": 28.2,
"line_max": 86,
"alpha_frac": 0.6651445967,
"autogenerated": false,
"ratio": 3.3056603773584907,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5970804974058491,
"avg_score": null,
"num_lines": null
} |
""" functionality to allow adjusting composite model contents
"""
import numpy
import copy
def BalanceComposite(model,set1,set2,weight,targetSize,names1=None,names2=None):
""" adjusts the contents of the composite model so as to maximize
the weighted classification accuracty across the two data sets.
The resulting composite model, with _targetSize_ models, is returned.
**Notes**:
- if _names1_ and _names2_ are not provided, _set1_ and _set2_ should
have the same ordering of columns and _model_ should have already
have had _SetInputOrder()_ called.
"""
#
# adjust the weights to be proportional to the size of the two data sets
# The normalization we do here assures that a perfect model contributes
# a score of S1+S2 to the final
#
S1 = len(set1)
S2 = len(set2)
weight1 = float(S1+S2)*(1-weight)/S1
weight2 = float(S1+S2)*weight/S2
#print '\t:::',S1,S2,weight1,weight2
#print 'nModels:',len(model)
# start with a copy so that we get all the additional schnick-schnack
res = copy.copy(model)
res.modelList = []
res.errList = []
res.countList = []
res.quantizationRequirements = []
startSize = len(model)
scores = numpy.zeros(startSize,numpy.float)
actQuantBounds = model.GetActivityQuantBounds()
if names1 is not None:
model.SetInputOrder(names1)
for pt in set1:
pred,conf = model.ClassifyExample(pt)
if actQuantBounds:
ans = model.QuantizeActivity(pt)[-1]
else:
ans = pt[-1]
votes = model.GetVoteDetails()
for i in range(startSize):
if votes[i]==ans: scores[i] += weight1
if names2 is not None:
model.SetInputOrder(names2)
for pt in set2:
pred,conf = model.ClassifyExample(pt)
if actQuantBounds:
ans = model.QuantizeActivity(pt)[-1]
else:
ans = pt[-1]
votes = model.GetVoteDetails()
for i in range(startSize):
if votes[i]==ans: scores[i] += weight2
# normalize the scores
nPts = S1+S2
scores /= nPts
# sort them:
bestOrder = list(numpy.argsort(scores))
bestOrder.reverse()
print '\tTAKE:',bestOrder[:targetSize]
# and now take the best set:
for i in range(targetSize):
idx = bestOrder[i]
mdl = model.modelList[idx]
res.modelList.append(mdl)
res.errList.append(1.-scores[idx])
res.countList.append(1)
# FIX: this should probably be more general:
res.quantizationRequirements.append(0)
return res
| {
"repo_name": "rdkit/rdkit-orig",
"path": "rdkit/ML/Composite/AdjustComposite.py",
"copies": "2",
"size": "2529",
"license": "bsd-3-clause",
"hash": 7237315060509842000,
"line_mean": 29.1071428571,
"line_max": 80,
"alpha_frac": 0.6797153025,
"autogenerated": false,
"ratio": 3.2674418604651163,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4947157162965117,
"avg_score": null,
"num_lines": null
} |
""" Define the class _KNNClassificationModel_, used to represent a k-nearest neighbhors
classification model
Inherits from _KNNModel_
"""
from rdkit.ML.KNN import KNNModel
class KNNClassificationModel(KNNModel.KNNModel):
""" This is used to represent a k-nearest neighbor classifier
"""
def __init__(self, k, attrs, dfunc, radius=None):
self._setup(k, attrs, dfunc, radius)
self._badExamples = [] # list of examples incorrectly classified
def type(self):
return "Classification Model"
def SetBadExamples(self, examples):
self._badExamples = examples
def GetBadExamples(self):
return self._badExamples
def NameModel(self, varNames):
self.SetName(self.type())
def ClassifyExample(self, example, appendExamples=0, neighborList=None):
""" Classify a an example by looking at its closest neighbors
The class assigned to this example is same as the class for the mojority of its
_k neighbors
**Arguments**
- examples: the example to be classified
- appendExamples: if this is nonzero then the example will be stored on this model
- neighborList: if provided, will be used to return the list of neighbors
**Returns**
- the classification of _example_
"""
if appendExamples:
self._examples.append(example)
# first find the k-closest examples in the traning set
knnLst = self.GetNeighbors(example)
# find out how many of the neighbors belong to each of the classes
clsCnt = {}
for knn in knnLst:
cls = knn[1][-1]
if (cls in clsCnt):
clsCnt[cls] += 1
else:
clsCnt[cls] = 1
if neighborList is not None:
neighborList.extend(knnLst)
# now return the class with the maximum count
mkey = -1
mcnt = -1
for key in clsCnt.keys():
if (mcnt < clsCnt[key]):
mkey = key
mcnt = clsCnt[key]
return mkey
| {
"repo_name": "rvianello/rdkit",
"path": "rdkit/ML/KNN/KNNClassificationModel.py",
"copies": "11",
"size": "1992",
"license": "bsd-3-clause",
"hash": 493484967371214500,
"line_mean": 23.9,
"line_max": 87,
"alpha_frac": 0.6666666667,
"autogenerated": false,
"ratio": 3.808795411089866,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9975462077789866,
"avg_score": null,
"num_lines": null
} |
""" Define the class _KNNRegressionModel_, used to represent a k-nearest neighbhors
regression model
Inherits from _KNNModel_
"""
from rdkit.ML.KNN import KNNModel
class KNNRegressionModel(KNNModel.KNNModel):
""" This is used to represent a k-nearest neighbor classifier
"""
def __init__(self, k, attrs, dfunc, radius=None):
self._setup(k, attrs, dfunc, radius)
self._badExamples = [] # list of examples incorrectly classified
def type(self):
return "Regression Model"
def SetBadExamples(self, examples):
self._badExamples = examples
def GetBadExamples(self):
return self._badExamples
def NameModel(self, varNames):
self.SetName(self.type())
def PredictExample(self, example, appendExamples=0, weightedAverage=0, neighborList=None):
""" Generates a prediction for an example by looking at its closest neighbors
**Arguments**
- examples: the example to be classified
- appendExamples: if this is nonzero then the example will be stored on this model
- weightedAverage: if provided, the neighbors' contributions to the value will be
weighed by their reciprocal square distance
- neighborList: if provided, will be used to return the list of neighbors
**Returns**
- the classification of _example_
"""
if appendExamples:
self._examples.append(example)
# first find the k-closest examples in the training set
knnLst = self.GetNeighbors(example)
accum = 0.0
denom = 0.0
for knn in knnLst:
if knn[1] is None:
continue
if weightedAverage:
dist = knn[0]
if dist == 0.0:
w = 1.
else:
w = 1. / dist
else:
w = 1.0
accum += w * knn[1][-1]
denom += w
if denom:
accum /= denom
if neighborList is not None:
neighborList.extend(knnLst)
return accum
| {
"repo_name": "jandom/rdkit",
"path": "rdkit/ML/KNN/KNNRegressionModel.py",
"copies": "12",
"size": "1998",
"license": "bsd-3-clause",
"hash": -1707234064698523400,
"line_mean": 23.6666666667,
"line_max": 92,
"alpha_frac": 0.6461461461,
"autogenerated": false,
"ratio": 3.9176470588235293,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from __future__ import print_function
from rdkit import RDConfig
from rdkit import Chem
import os.path
from rdkit.VLib.NodeLib import *
from rdkit.VLib import Supply,Filter
# this would be a real input, from an sd file:
#fName = os.path.join(RDConfig.RDCodeDir,'VLib','NodeLib','test_data','NCI_aids.10.dupes.sdf')
#supplier = SDSupply.SDSupplyNode(fName)
# instead though, we want a simpler input:
smis = ['CCOC','CCO.Cl','CC(=O)[O-].[Na+]','CC[Cu]CC','OCC','C[N+](C)(C)C.[Cl-]',
'[Na+].[Cl-]']
mols = [Chem.MolFromSmiles(x) for x in smis]
# name the molecules (only needed because we built them from smiles):
for i in range(len(mols)):
mols[i].SetProp('Name','Mol-%d'%(i+1))
supplier = Supply.SupplyNode(contents=mols)
# should be 7 here
print('initial:',len([x for x in supplier]))
# filter out anything with a transition metal or lanthanide:
metals = '[#21,#22,#23,#24,#25,#26,#27,#28,#29,#39,#40,#41,#42,#43,#44,#45,#46,#47,#57,#58,#59,#60,#61,#62,#63,#64,#65,#66,#67,#68,#69,#70,#71,#72,#73,#74,#75,#76,#77,#78,#79]'
smaFilter= SmartsMolFilter.SmartsFilter(patterns=[metals],counts=[1])
smaFilter.SetNegate(1)
smaFilter.AddParent(supplier)
# should be 6 here
print('post-smaFilter:',len([x for x in smaFilter]))
salts = ['[Cl;H1&X1,-]','[Na+]','[O;H2,H1&-,X0&-2]']
remover = SmartsRemover.SmartsRemover(patterns=salts)
remover.AddParent(smaFilter)
atsFilter = Filter.FilterNode(func=lambda x:x.GetNumAtoms()>1)
atsFilter.AddParent(remover)
# should be 5 here
print('post-remover:',len([x for x in atsFilter]))
dupeFilter = SmilesDupeFilter.DupeFilter()
dupeFilter.AddParent(atsFilter)
# should be 4 here
print('post-dupes:',len([x for x in dupeFilter]))
import StringIO
# a StringIO object acts like a file:
io = StringIO.StringIO()
output = SmilesOutput.OutputNode(dest=io,delim=', ',idField='Name')
output.AddParent(dupeFilter)
print('post-output:',len([x for x in output]))
print('OUTPUT:')
print(io.getvalue())
| {
"repo_name": "soerendip42/rdkit",
"path": "rdkit/VLib/NodeLib/demo.py",
"copies": "4",
"size": "2022",
"license": "bsd-3-clause",
"hash": 2159516174675559200,
"line_mean": 33.8620689655,
"line_max": 176,
"alpha_frac": 0.696834817,
"autogenerated": false,
"ratio": 2.6328125,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5329647317,
"avg_score": null,
"num_lines": null
} |
from qt import *
import sys
if sys.platform == 'win32':
from rdkit.qtGui.qtActiveX import MakeActiveXClass
import win32com.client.gencache
import win32clipboard
try:
cdxModule = win32com.client.gencache.EnsureModule("{AF2D2DBA-75E4-4123-BC0B-A57BD5C5C5D2}", 0, 7, 0)
except Exception:
raise ImportError("Chemdraw 6.0 or greater does not appear to be installed.")
else:
raise ImportError("Chemdraw support only available under Windows")
#----------------------------------------------------------------------
class ChemdrawPanel(QWidget):
def __init__(self,parent=None,name="test",readOnly=0,size=(300,300)):
QWidget.__init__(self,parent,name)
self.cdx = None
#self.resize(QSize(300,300))
self.resize(size[0],size[1])
# Make a new class that derives from the class in the
# COM module imported above. This class also derives from QWidget and
# implements the machinery needed to integrate the two worlds.
theClass = MakeActiveXClass(cdxModule.ChemDrawCtl,
eventObj = self)
# Create an instance of that class
self.cdx = theClass(self)
if readOnly:
self.cdx.ViewOnly=1
# FIX:
# This hackery is due to an apparent problem with PyQt: there is
# always a gray box about 30 pixels high in the widget we're deriving
# from.
self.offset=30
self.label=QLabel(self,"ChemDraw")
self.label.setText(name)
self.label.setAlignment(Qt.AlignHCenter)
fnt = QApplication.font()
fnt.setPointSize(14)
self.label.setFont(fnt)
def pullData(self,fmt='chemical/daylight-smiles'):
data = self.cdx.GetData(fmt)
return str(data)
def setData(self,data,fmt='chemical/daylight-smiles'):
self.cdx.Objects.Clear()
res = self.cdx.SetData(fmt,data)
return res
def resizeEvent(self,evt):
sz = evt.size()
self.label.setGeometry(0,0,sz.width(),self.offset)
self.cdx.MoveWindow((0,self.offset,sz.width(),sz.height()),1)
def __del__(self):
if self.cdx:
self.cdx = None
# demo code
if __name__ == '__main__':
import sys,container
a = QApplication(sys.argv)
widg = QMainWindow()
panel = ChemdrawPanel(widg)
panel.show()
widg.setCentralWidget(panel)
widg.resize(QSize(300,300))
panel.setData("c1ccccc1C(=O)O")
widg.show()
a.setMainWidget(widg)
a.exec_loop()
| {
"repo_name": "adalke/rdkit",
"path": "rdkit/utils/chemdraw_qax.py",
"copies": "1",
"size": "2430",
"license": "bsd-3-clause",
"hash": -1699072233390998000,
"line_mean": 29.7594936709,
"line_max": 104,
"alpha_frac": 0.6609053498,
"autogenerated": false,
"ratio": 3.2142857142857144,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43751910640857145,
"avg_score": null,
"num_lines": null
} |
from rdkit import Chem
from rdkit.VLib.Filter import FilterNode
class DupeFilter(FilterNode):
""" canonical-smiles based duplicate filter
Assumptions:
- inputs are molecules
Sample Usage:
>>> import os
>>> from rdkit import RDConfig
>>> from rdkit.VLib.NodeLib.SDSupply import SDSupplyNode
>>> fileN = os.path.join(RDConfig.RDCodeDir,'VLib','NodeLib',\
'test_data','NCI_aids.10.sdf')
>>> suppl = SDSupplyNode(fileN)
>>> filt = DupeFilter()
>>> filt.AddParent(suppl)
>>> ms = [x for x in filt]
>>> len(ms)
10
>>> ms[0].GetProp("_Name")
'48'
>>> ms[1].GetProp("_Name")
'78'
>>> filt.reset()
>>> filt.next().GetProp("_Name")
'48'
"""
def __init__(self, **kwargs):
FilterNode.__init__(self, func=self.filter, **kwargs)
self._smisSeen = set()
def reset(self):
FilterNode.reset(self)
self._smisSeen = set()
def filter(self, cmpd):
smi = Chem.MolToSmiles(cmpd)
if smi not in self._smisSeen:
self._smisSeen.add(smi)
return 1
else:
return 0
# ------------------------------------
#
# doctest boilerplate
#
def _runDoctests(verbose=None): # pragma: nocover
import sys
import doctest
failed, _ = doctest.testmod(optionflags=doctest.ELLIPSIS, verbose=verbose)
sys.exit(failed)
if __name__ == '__main__': # pragma: nocover
_runDoctests()
| {
"repo_name": "rvianello/rdkit",
"path": "rdkit/VLib/NodeLib/SmilesDupeFilter.py",
"copies": "11",
"size": "1497",
"license": "bsd-3-clause",
"hash": 221437255125937150,
"line_mean": 20.3857142857,
"line_max": 76,
"alpha_frac": 0.588510354,
"autogenerated": false,
"ratio": 3.198717948717949,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9287228302717949,
"avg_score": null,
"num_lines": null
} |
from rdkit import Chem
from rdkit.VLib.Filter import FilterNode
class SmartsFilter(FilterNode):
""" filter out molecules matching one or more SMARTS patterns
There is a count associated with each pattern. Molecules are
allowed to match the pattern up to this number of times.
Assumptions:
- inputs are molecules
Sample Usage:
>>> smis = ['C1CCC1','C1CCC1C=O','CCCC','CCC=O','CC(=O)C','CCN','NCCN','NCC=O']
>>> mols = [Chem.MolFromSmiles(x) for x in smis]
>>> from rdkit.VLib.Supply import SupplyNode
>>> suppl = SupplyNode(contents=mols)
>>> ms = [x for x in suppl]
>>> len(ms)
8
We can pass in SMARTS strings:
>>> smas = ['C=O','CN']
>>> counts = [1,2]
>>> filt = SmartsFilter(patterns=smas,counts=counts)
>>> filt.AddParent(suppl)
>>> ms = [x for x in filt]
>>> len(ms)
5
Alternatively, we can pass in molecule objects:
>>> mols =[Chem.MolFromSmarts(x) for x in smas]
>>> counts = [1,2]
>>> filt.Destroy()
>>> filt = SmartsFilter(patterns=mols,counts=counts)
>>> filt.AddParent(suppl)
>>> ms = [x for x in filt]
>>> len(ms)
5
Negation does what you'd expect:
>>> filt.SetNegate(1)
>>> ms = [x for x in filt]
>>> len(ms)
3
"""
def __init__(self, patterns=[], counts=[], **kwargs):
FilterNode.__init__(self, func=self.filter, **kwargs)
self._initPatterns(patterns, counts)
def _initPatterns(self, patterns, counts):
nPatts = len(patterns)
if len(counts) and len(counts) != nPatts:
raise ValueError('if counts is specified, it must match patterns in length')
if not len(counts):
counts = [1] * nPatts
targets = [None] * nPatts
for i in range(nPatts):
p = patterns[i]
c = counts[i]
if type(p) in (str, bytes):
m = Chem.MolFromSmarts(p)
if not m:
raise ValueError('bad smarts: %s' % (p))
p = m
targets[i] = p, c
self._patterns = tuple(targets)
def filter(self, cmpd):
res = False
for patt, count in self._patterns:
ms = cmpd.GetSubstructMatches(patt)
nMatches = len(ms)
if nMatches >= count:
# this query is an or, so we short circuit true:
res = True
break
return res
# ------------------------------------
#
# doctest boilerplate
#
def _runDoctests(verbose=None): # pragma: nocover
import sys
import doctest
failed, _ = doctest.testmod(optionflags=doctest.ELLIPSIS, verbose=verbose)
sys.exit(failed)
if __name__ == '__main__': # pragma: nocover
_runDoctests()
| {
"repo_name": "rvianello/rdkit",
"path": "rdkit/VLib/NodeLib/SmartsMolFilter.py",
"copies": "11",
"size": "2668",
"license": "bsd-3-clause",
"hash": 1041939902500087700,
"line_mean": 24.6538461538,
"line_max": 83,
"alpha_frac": 0.5948275862,
"autogenerated": false,
"ratio": 3.289765721331689,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006843602825685255,
"num_lines": 104
} |
from rdkit import RDConfig
from rdkit import Chem
import os.path
from rdkit.VLib.NodeLib import *
from rdkit.VLib import Supply,Filter
# this would be a real input, from an sd file:
#fName = os.path.join(RDConfig.RDCodeDir,'VLib','NodeLib','test_data','NCI_aids.10.dupes.sdf')
#supplier = SDSupply.SDSupplyNode(fName)
# instead though, we want a simpler input:
smis = ['CCOC','CCO.Cl','CC(=O)[O-].[Na+]','CC[Cu]CC','OCC','C[N+](C)(C)C.[Cl-]',
'[Na+].[Cl-]']
mols = [Chem.MolFromSmiles(x) for x in smis]
# name the molecules (only needed because we built them from smiles):
for i in range(len(mols)):
mols[i].SetProp('Name','Mol-%d'%(i+1))
supplier = Supply.SupplyNode(contents=mols)
# should be 7 here
print 'initial:',len([x for x in supplier])
# filter out anything with a transition metal or lanthanide:
metals = '[#21,#22,#23,#24,#25,#26,#27,#28,#29,#39,#40,#41,#42,#43,#44,#45,#46,#47,#57,#58,#59,#60,#61,#62,#63,#64,#65,#66,#67,#68,#69,#70,#71,#72,#73,#74,#75,#76,#77,#78,#79]'
smaFilter= SmartsMolFilter.SmartsFilter(patterns=[metals],counts=[1])
smaFilter.SetNegate(1)
smaFilter.AddParent(supplier)
# should be 6 here
print 'post-smaFilter:',len([x for x in smaFilter])
salts = ['[Cl;H1&X1,-]','[Na+]','[O;H2,H1&-,X0&-2]']
remover = SmartsRemover.SmartsRemover(patterns=salts)
remover.AddParent(smaFilter)
atsFilter = Filter.FilterNode(func=lambda x:x.GetNumAtoms()>1)
atsFilter.AddParent(remover)
# should be 5 here
print 'post-remover:',len([x for x in atsFilter])
dupeFilter = SmilesDupeFilter.DupeFilter()
dupeFilter.AddParent(atsFilter)
# should be 4 here
print 'post-dupes:',len([x for x in dupeFilter])
import StringIO
# a StringIO object acts like a file:
io = StringIO.StringIO()
output = SmilesOutput.OutputNode(dest=io,delim=', ',idField='Name')
output.AddParent(dupeFilter)
print 'post-output:',len([x for x in output])
print 'OUTPUT:'
print io.getvalue()
| {
"repo_name": "rdkit/rdkit-orig",
"path": "rdkit/VLib/NodeLib/demo.py",
"copies": "2",
"size": "1977",
"license": "bsd-3-clause",
"hash": 4897881568559909000,
"line_mean": 33.6842105263,
"line_max": 176,
"alpha_frac": 0.6980273141,
"autogenerated": false,
"ratio": 2.608179419525066,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9173002216679869,
"avg_score": 0.026640903389039454,
"num_lines": 57
} |
from rdkit import RDConfig
import sys,os,types
from rdkit import Chem
from rdkit.VLib.Filter import FilterNode
class SmartsFilter(FilterNode):
""" filter out molecules matching one or more SMARTS patterns
There is a count associated with each pattern. Molecules are
allowed to match the pattern up to this number of times.
Assumptions:
- inputs are molecules
Sample Usage:
>>> smis = ['C1CCC1','C1CCC1C=O','CCCC','CCC=O','CC(=O)C','CCN','NCCN','NCC=O']
>>> mols = [Chem.MolFromSmiles(x) for x in smis]
>>> from rdkit.VLib.Supply import SupplyNode
>>> suppl = SupplyNode(contents=mols)
>>> ms = [x for x in suppl]
>>> len(ms)
8
We can pass in SMARTS strings:
>>> smas = ['C=O','CN']
>>> counts = [1,2]
>>> filt = SmartsFilter(patterns=smas,counts=counts)
>>> filt.AddParent(suppl)
>>> ms = [x for x in filt]
>>> len(ms)
5
Alternatively, we can pass in molecule objects:
>>> mols =[Chem.MolFromSmarts(x) for x in smas]
>>> counts = [1,2]
>>> filt.Destroy()
>>> filt = SmartsFilter(patterns=mols,counts=counts)
>>> filt.AddParent(suppl)
>>> ms = [x for x in filt]
>>> len(ms)
5
Negation does what you'd expect:
>>> filt.SetNegate(1)
>>> ms = [x for x in filt]
>>> len(ms)
3
"""
def __init__(self,patterns=[],counts=[],**kwargs):
FilterNode.__init__(self,func=self.filter,**kwargs)
self._initPatterns(patterns,counts)
def _initPatterns(self,patterns,counts):
nPatts = len(patterns)
if len(counts) and len(counts)!=nPatts:
raise ValueError,'if counts is specified, it must match patterns in length'
if not len(counts):
counts = [1]*nPatts
targets = [None]*nPatts
for i in range(nPatts):
p = patterns[i]
c = counts[i]
if type(p) in types.StringTypes:
m = Chem.MolFromSmarts(p)
if not m:
raise ValueError,'bad smarts: %s'%(p)
p = m
targets[i] = p,c
self._patterns = tuple(targets)
def filter(self,cmpd):
neg = self.Negate()
res = 0
#sys.stderr.write('\tFILTER: %s\n'%(Chem.MolToSmiles(cmpd)))
for patt,count in self._patterns:
ms = cmpd.GetSubstructMatches(patt)
nMatches = len(ms)
if nMatches >= count:
# this query is an or, so we short circuit true:
res = 1
break
return res
#------------------------------------
#
# doctest boilerplate
#
def _test():
import doctest,sys
return doctest.testmod(sys.modules["__main__"])
if __name__ == '__main__':
import sys
failed,tried = _test()
sys.exit(failed)
| {
"repo_name": "rdkit/rdkit-orig",
"path": "rdkit/VLib/NodeLib/SmartsMolFilter.py",
"copies": "2",
"size": "2725",
"license": "bsd-3-clause",
"hash": 1067319250771499500,
"line_mean": 24.4672897196,
"line_max": 83,
"alpha_frac": 0.5941284404,
"autogenerated": false,
"ratio": 3.2517899761336517,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4845918416533652,
"avg_score": null,
"num_lines": null
} |
from rdkit import RDConfig
import sys,os,types
from rdkit import Chem
from rdkit.VLib.Transform import TransformNode
class SmartsRemover(TransformNode):
""" transforms molecules by removing atoms matching smarts patterns
Assumptions:
- inputs are molecules
Sample Usage:
>>> smis = ['C1CCC1.C=O','C1CCC1C=O','CCC=O.C=O','NCC=O.C=O.CN']
>>> mols = [Chem.MolFromSmiles(x) for x in smis]
>>> from rdkit.VLib.Supply import SupplyNode
>>> suppl = SupplyNode(contents=mols)
>>> ms = [x for x in suppl]
>>> len(ms)
4
We can pass in SMARTS strings:
>>> smas = ['C=O','CN']
>>> tform = SmartsRemover(patterns=smas)
>>> tform.AddParent(suppl)
>>> ms = [x for x in tform]
>>> len(ms)
4
>>> Chem.MolToSmiles(ms[0])
'C1CCC1'
>>> Chem.MolToSmiles(ms[1])
'O=CC1CCC1'
>>> Chem.MolToSmiles(ms[2])
'CCC=O'
>>> Chem.MolToSmiles(ms[3])
'NCC=O'
We can also remove pieces of the molecule that are not complete
fragments:
>>> tform.Destroy()
>>> smas = ['C=O','CN']
>>> smas = [Chem.MolFromSmarts(x) for x in smas]
>>> tform = SmartsRemover(patterns=smas,wholeFragments=0)
>>> tform.AddParent(suppl)
>>> ms = [x for x in tform]
>>> len(ms)
4
>>> Chem.MolToSmiles(ms[0])
'C1CCC1'
>>> Chem.MolToSmiles(ms[1])
'C1CCC1'
>>> Chem.MolToSmiles(ms[3])
''
Or patterns themselves:
>>> tform.Destroy()
>>> smas = ['C=O','CN']
>>> smas = [Chem.MolFromSmarts(x) for x in smas]
>>> tform = SmartsRemover(patterns=smas)
>>> tform.AddParent(suppl)
>>> ms = [x for x in tform]
>>> len(ms)
4
>>> Chem.MolToSmiles(ms[0])
'C1CCC1'
>>> Chem.MolToSmiles(ms[3])
'NCC=O'
"""
def __init__(self,patterns=[],wholeFragments=1,**kwargs):
TransformNode.__init__(self,func=self.transform,**kwargs)
self._wholeFragments = wholeFragments
self._initPatterns(patterns)
def _initPatterns(self,patterns):
nPatts = len(patterns)
targets = [None]*nPatts
for i in range(nPatts):
p = patterns[i]
if type(p) in types.StringTypes:
m = Chem.MolFromSmarts(p)
if not m:
raise ValueError,'bad smarts: %s'%(p)
p = m
targets[i] = p
self._patterns = tuple(targets)
def transform(self,cmpd):
#sys.stderr.write('\tTRANSFORM: %s\n'%(Chem.MolToSmiles(cmpd)))
for patt in self._patterns:
cmpd = Chem.DeleteSubstructs(cmpd,patt,onlyFrags=self._wholeFragments)
#sys.stderr.write('\t\tAfter %s: %s\n'%(Chem.MolToSmiles(patt),Chem.MolToSmiles(cmpd)))
return cmpd
biggerTest="""
>>> smis = ['CCOC','CCO.Cl','CC(=O)[O-].[Na+]','OCC','C[N+](C)(C)C.[Cl-]']
>>> mols = [Chem.MolFromSmiles(x) for x in smis]
>>> from rdkit.VLib.Supply import SupplyNode
>>> suppl = SupplyNode(contents=mols)
>>> ms = [x for x in suppl]
>>> len(ms)
5
#>>> salts = ['[Cl;H1&X1,-]','[Na+]','[O;H2,H1&-,X0&-2]']
>>> salts = ['[Cl;H1&X1,-]','[Na+]','[O;H2,H1&-,X0&-2]']
>>> m = mols[2]
>>> m.GetNumAtoms()
5
>>> patts = [Chem.MolFromSmarts(x) for x in salts]
>>> m2 = Chem.DeleteSubstructs(m,patts[0],1)
>>> m2.GetNumAtoms()
5
>>> m2 = Chem.DeleteSubstructs(m2,patts[1],1)
>>> m2.GetNumAtoms()
4
>>> m2 = Chem.DeleteSubstructs(m2,patts[2],1)
>>> m2.GetNumAtoms()
4
>>> tform = SmartsRemover(patterns=salts)
>>> tform.AddParent(suppl)
>>> ms = [x for x in tform]
>>> len(ms)
5
"""
#------------------------------------
#
# doctest boilerplate
#
__test__={'bigger':biggerTest}
def _test():
import doctest,sys
return doctest.testmod(sys.modules["__main__"])
if __name__ == '__main__':
import sys
failed,tried = _test()
sys.exit(failed)
| {
"repo_name": "rdkit/rdkit-orig",
"path": "rdkit/VLib/NodeLib/SmartsRemover.py",
"copies": "2",
"size": "3784",
"license": "bsd-3-clause",
"hash": 5915839498637561000,
"line_mean": 24.2266666667,
"line_max": 93,
"alpha_frac": 0.5872093023,
"autogenerated": false,
"ratio": 2.6204986149584486,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42077079172584486,
"avg_score": null,
"num_lines": null
} |
from rdkit import six
from rdkit.VLib.Node import VLibNode
class SupplyNode(VLibNode):
""" base class for nodes which supply things
Assumptions:
1) no parents
Usage Example:
>>> supplier = SupplyNode(contents=[1,2,3])
>>> supplier.next()
1
>>> supplier.next()
2
>>> supplier.next()
3
>>> supplier.next()
Traceback (most recent call last):
...
StopIteration
>>> supplier.reset()
>>> supplier.next()
1
>>> [x for x in supplier]
[1, 2, 3]
"""
def __init__(self, contents=None, **kwargs):
VLibNode.__init__(self, **kwargs)
if contents is not None:
self._contents = contents
else:
self._contents = []
self._pos = 0
def reset(self):
VLibNode.reset(self)
self._pos = 0
def next(self):
if self._pos == len(self._contents):
raise StopIteration
res = self._contents[self._pos]
self._pos += 1
return res
def AddParent(self, parent, notify=1):
raise ValueError('SupplyNodes do not have parents')
if six.PY3:
SupplyNode.__next__ = SupplyNode.next
# ------------------------------------
#
# doctest boilerplate
#
def _runDoctests(verbose=None): # pragma: nocover
import sys
import doctest
failed, _ = doctest.testmod(optionflags=doctest.ELLIPSIS, verbose=verbose)
sys.exit(failed)
if __name__ == '__main__': # pragma: nocover
_runDoctests()
| {
"repo_name": "rvianello/rdkit",
"path": "rdkit/VLib/Supply.py",
"copies": "4",
"size": "1492",
"license": "bsd-3-clause",
"hash": 2943231362243582500,
"line_mean": 18.3766233766,
"line_max": 76,
"alpha_frac": 0.5971849866,
"autogenerated": false,
"ratio": 3.560859188544153,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007383017850633627,
"num_lines": 77
} |
import sys
from rdkit import six
from rdkit.VLib.Node import VLibNode
class TransformNode(VLibNode):
""" base class for nodes which filter their input
Assumptions:
- transform function takes a number of arguments equal to the
number of inputs we have. We return whatever it returns
- inputs (parents) can be stepped through in lockstep
Usage Example:
>>> from rdkit.VLib.Supply import SupplyNode
>>> def func(a,b):
... return a+b
>>> tform = TransformNode(func)
>>> suppl1 = SupplyNode(contents=[1,2,3,3])
>>> suppl2 = SupplyNode(contents=[1,2,3,1])
>>> tform.AddParent(suppl1)
>>> tform.AddParent(suppl2)
>>> v = [x for x in tform]
>>> v
[2, 4, 6, 4]
>>> tform.reset()
>>> v = [x for x in tform]
>>> v
[2, 4, 6, 4]
If we don't provide a function, just return the inputs:
>>> tform = TransformNode()
>>> suppl1 = SupplyNode(contents=[1,2,3,3])
>>> suppl2 = SupplyNode(contents=[1,2,3,1])
>>> tform.AddParent(suppl1)
>>> tform.AddParent(suppl2)
>>> v = [x for x in tform]
>>> v
[(1, 1), (2, 2), (3, 3), (3, 1)]
"""
def __init__(self,func=None,**kwargs):
VLibNode.__init__(self,**kwargs)
self._func = func
def next(self):
done = 0
parent = self.GetParents()[0]
args = []
try:
for parent in self.GetParents():
args.append(parent.next())
except StopIteration:
raise StopIteration
args = tuple(args)
if self._func is not None:
res = self._func(*args)
else:
res = args
return res
if six.PY3:
TransformNode.__next__ = TransformNode.next
#------------------------------------
#
# doctest boilerplate
#
def _test():
import doctest,sys
return doctest.testmod(sys.modules["__main__"])
if __name__ == '__main__':
import sys
failed,tried = _test()
sys.exit(failed)
| {
"repo_name": "adalke/rdkit",
"path": "rdkit/VLib/Transform.py",
"copies": "1",
"size": "1972",
"license": "bsd-3-clause",
"hash": 4679554147723710000,
"line_mean": 21.9302325581,
"line_max": 65,
"alpha_frac": 0.5811359026,
"autogenerated": false,
"ratio": 3.2487644151565074,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43299003177565076,
"avg_score": null,
"num_lines": null
} |
import sys, os.path
from rdkit import six
from rdkit import RDConfig
from rdkit.VLib.Supply import SupplyNode
from rdkit import Chem
class SDSupplyNode(SupplyNode):
""" SD supplier
Sample Usage:
>>> fileN = os.path.join(RDConfig.RDCodeDir,'VLib','NodeLib',\
'test_data','NCI_aids.10.sdf')
>>> suppl = SDSupplyNode(fileN)
>>> ms = [x for x in suppl]
>>> len(ms)
10
>>> ms[0].GetProp("_Name")
'48'
>>> ms[1].GetProp("_Name")
'78'
>>> suppl.reset()
>>> suppl.next().GetProp("_Name")
'48'
>>> suppl.next().GetProp("_Name")
'78'
"""
def __init__(self, fileName, **kwargs):
SupplyNode.__init__(self, **kwargs)
self._fileName = fileName
self._supplier = Chem.SDMolSupplier(self._fileName)
def reset(self):
SupplyNode.reset(self)
self._supplier.reset()
def next(self):
"""
"""
return next(self._supplier)
if six.PY3:
SDSupplyNode.__next__ = SDSupplyNode.next
#------------------------------------
#
# doctest boilerplate
#
def _test():
import doctest, sys
return doctest.testmod(sys.modules["__main__"])
if __name__ == '__main__':
import sys
failed, tried = _test()
sys.exit(failed)
| {
"repo_name": "jandom/rdkit",
"path": "rdkit/VLib/NodeLib/SDSupply.py",
"copies": "1",
"size": "1317",
"license": "bsd-3-clause",
"hash": 2620716342770040000,
"line_mean": 18.3676470588,
"line_max": 66,
"alpha_frac": 0.5740318907,
"autogenerated": false,
"ratio": 3.158273381294964,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9166213073859422,
"avg_score": 0.013218439627108359,
"num_lines": 68
} |
import sys, types
from rdkit import Chem
from rdkit import six
from rdkit.VLib.Output import OutputNode as BaseOutputNode
class OutputNode(BaseOutputNode):
""" dumps smiles output
Assumptions:
- destination supports a write() method
- inputs (parents) can be stepped through in lockstep
Usage Example:
>>> smis = ['C1CCC1','C1CC1','C=O','NCC']
>>> mols = [Chem.MolFromSmiles(x) for x in smis]
>>> from rdkit.VLib.Supply import SupplyNode
>>> suppl = SupplyNode(contents=mols)
>>> from rdkit.six import StringIO
>>> sio = StringIO()
>>> node = OutputNode(dest=sio,delim=', ')
>>> node.AddParent(suppl)
>>> ms = [x for x in node]
>>> len(ms)
4
>>> txt = sio.getvalue()
>>> repr(txt)
"'1, C1CCC1\\\\n2, C1CC1\\\\n3, C=O\\\\n4, CCN\\\\n'"
"""
def __init__(self, dest=None, delim='\t', idField=None, **kwargs):
BaseOutputNode.__init__(self, dest=dest, strFunc=self.smilesOut)
self._dest = dest
self._idField = idField
self._delim = delim
self._nDumped = 0
def reset(self):
BaseOutputNode.reset(self)
self._nDumped = 0
def smilesOut(self, mol):
self._nDumped += 1
if type(mol) in (tuple, list):
args = mol
mol = args[0]
if len(args) > 1:
args = args[1:]
else:
args = []
else:
args = []
if self._idField and mol.HasProp(self._idField):
label = mol.GetProp(self._idField)
else:
label = str(self._nDumped)
smi = Chem.MolToSmiles(mol)
outp = [label, smi] + args
return '%s\n' % (self._delim.join(outp))
#------------------------------------
#
# doctest boilerplate
#
def _test():
import doctest, sys
return doctest.testmod(sys.modules["__main__"])
if __name__ == '__main__':
import sys
failed, tried = _test()
sys.exit(failed)
| {
"repo_name": "jandom/rdkit",
"path": "rdkit/VLib/NodeLib/SmilesOutput.py",
"copies": "1",
"size": "1925",
"license": "bsd-3-clause",
"hash": -6269298467054758000,
"line_mean": 21.6470588235,
"line_max": 68,
"alpha_frac": 0.5828571429,
"autogenerated": false,
"ratio": 3.1505728314238954,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42334299743238957,
"avg_score": null,
"num_lines": null
} |
import sys,types
from rdkit import Chem
from rdkit.VLib.Output import OutputNode as BaseOutputNode
class OutputNode(BaseOutputNode):
""" dumps smiles output
Assumptions:
- destination supports a write() method
- inputs (parents) can be stepped through in lockstep
Usage Example:
>>> smis = ['C1CCC1','C1CC1','C=O','NCC']
>>> mols = [Chem.MolFromSmiles(x) for x in smis]
>>> from rdkit.VLib.Supply import SupplyNode
>>> suppl = SupplyNode(contents=mols)
>>> import StringIO
>>> io = StringIO.StringIO()
>>> node = OutputNode(dest=io,delim=', ')
>>> node.AddParent(suppl)
>>> ms = [x for x in node]
>>> len(ms)
4
>>> txt = io.getvalue()
>>> repr(txt)
"'1, C1CCC1\\\\n2, C1CC1\\\\n3, C=O\\\\n4, CCN\\\\n'"
"""
def __init__(self,dest=None,delim='\t',idField=None,**kwargs):
BaseOutputNode.__init__(self,dest=dest,strFunc=self.smilesOut)
self._dest = dest
self._idField = idField
self._delim = delim
self._nDumped = 0
def reset(self):
BaseOutputNode.reset(self)
self._nDumped=0
def smilesOut(self,mol):
self._nDumped += 1
if type(mol) in [types.TupleType,types.ListType]:
args = mol
mol = args[0]
if len(args)>1:
args = args[1:]
else:
args = []
else:
args = []
if self._idField and mol.HasProp(self._idField):
label = mol.GetProp(self._idField)
else:
label = str(self._nDumped)
smi = Chem.MolToSmiles(mol)
outp = [label,smi]+args
return '%s\n'%(self._delim.join(outp))
#------------------------------------
#
# doctest boilerplate
#
def _test():
import doctest,sys
return doctest.testmod(sys.modules["__main__"])
if __name__ == '__main__':
import sys
failed,tried = _test()
sys.exit(failed)
| {
"repo_name": "AlexanderSavelyev/rdkit",
"path": "rdkit/VLib/NodeLib/SmilesOutput.py",
"copies": "5",
"size": "1903",
"license": "bsd-3-clause",
"hash": 3700854602063619600,
"line_mean": 21.9277108434,
"line_max": 66,
"alpha_frac": 0.5859169732,
"autogenerated": false,
"ratio": 3.1506622516556293,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.623657922485563,
"avg_score": null,
"num_lines": null
} |
""" Define the class _KNNClassificationModel_, used to represent a k-nearest neighbhors classification model
Inherits from _KNNModel_
"""
from rdkit.ML.KNN import KNNModel
class KNNClassificationModel(KNNModel.KNNModel) :
""" This is used to represent a k-nearest neighbor classifier
"""
def __init__(self, k, attrs, dfunc,radius=None) :
self._setup(k, attrs, dfunc,radius)
self._badExamples = [] # list of examples incorrectly classified
def type(self):
return "Classification Model"
def SetBadExamples(self, examples) :
self._badExamples = examples
def GetBadExamples(self) :
return self._badExamples
def NameModel(self, varNames) :
self.SetName(self.type())
def ClassifyExample(self, example, appendExamples=0, neighborList=None) :
""" Classify a an example by looking at its closest neighbors
The class assigned to this example is same as the class for the mojority of its
_k neighbors
**Arguments**
- examples: the example to be classified
- appendExamples: if this is nonzero then the example will be stored on this model
- neighborList: if provided, will be used to return the list of neighbors
**Returns**
- the classification of _example_
"""
if appendExamples:
self._examples.append(example)
# first find the k-closest examples in the traning set
knnLst = self.GetNeighbors(example)
# find out how many of the neighbors belong to each of the classes
clsCnt = {}
for knn in knnLst :
cls = knn[1][-1]
if (clsCnt.has_key(cls)) :
clsCnt[cls] += 1
else :
clsCnt[cls] = 1
if neighborList is not None:
neighborList.extend(knnLst)
# now return the class with the maximum count
mkey = -1
mcnt = -1
for key in clsCnt.keys() :
if (mcnt < clsCnt[key]) :
mkey = key
mcnt = clsCnt[key]
return mkey
| {
"repo_name": "rdkit/rdkit-orig",
"path": "rdkit/ML/KNN/KNNClassificationModel.py",
"copies": "2",
"size": "2016",
"license": "bsd-3-clause",
"hash": -211442763568115620,
"line_mean": 24.5189873418,
"line_max": 108,
"alpha_frac": 0.6607142857,
"autogenerated": false,
"ratio": 3.7611940298507465,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.028049036355877476,
"num_lines": 79
} |
""" Define the class _KNNModel_, used to represent a k-nearest neighbhors model
"""
from rdkit.DataStructs.TopNContainer import TopNContainer
class KNNModel(object):
""" This is a base class used by KNNClassificationModel
and KNNRegressionModel to represent a k-nearest neighbor predictor. In general
one of this child classes needs to be instantiated.
_KNNModel_s can save the following pieces of internal state, accessible via
standard setter/getter functions - the child object store additional stuff:
1) _Examples_: a list of examples which have been predicted (either classified
or values predicted)
2) _TrainingExamples_: List of training examples (since this is a KNN model these examples
along with the value _k_ below define the model)
3) _TestExamples_: the list of examples used to test the model
4) _k_: the number of closest neighbors used for prediction
"""
def __init__(self, k, attrs, dfunc, radius=None) :
self._setup(k, attrs, dfunc, radius)
def _setup(self, k, attrs, dfunc, radius) :
self._examples = []
self._trainingExamples = []
self._testExamples = []
self._k = k
self._attrs = attrs
self._dfunc = dfunc
self._name = ""
self._radius = radius
def GetName(self) :
return self_name
def SetName(self, name) :
self._name = name
def GetExamples(self) :
return self._examples
def SetExamples(self, examples):
self._examples = examples
def GetTrainingExamples(self):
return self._trainingExamples
def SetTrainingExamples(self,examples):
self._trainingExamples = examples
def GetTestExamples(self) :
return self._testExamples
def SetTestExamples(self, examples) :
self._testExamples = examples
def GetNeighbors(self,example):
""" Returns the k nearest neighbors of the example
"""
nbrs = TopNContainer(self._k)
for trex in self._trainingExamples:
dist = self._dfunc(trex, example, self._attrs)
if self._radius is None or dist<self._radius:
nbrs.Insert(-dist,trex)
nbrs.reverse()
return [x for x in nbrs]
| {
"repo_name": "soerendip42/rdkit",
"path": "rdkit/ML/KNN/KNNModel.py",
"copies": "6",
"size": "2243",
"license": "bsd-3-clause",
"hash": 3538960052297391600,
"line_mean": 27.7564102564,
"line_max": 94,
"alpha_frac": 0.6772180116,
"autogenerated": false,
"ratio": 3.9699115044247786,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7647129516024779,
"avg_score": null,
"num_lines": null
} |
""" Define the class _KNNRegressionModel_, used to represent a k-nearest neighbhors
regression model
Inherits from _KNNModel_
"""
from rdkit.ML.KNN import KNNModel
class KNNRegressionModel(KNNModel.KNNModel) :
""" This is used to represent a k-nearest neighbor classifier
"""
def __init__(self, k, attrs, dfunc, radius=None) :
self._setup(k, attrs, dfunc,radius)
self._badExamples = [] # list of examples incorrectly classified
def type(self):
return "Regression Model"
def SetBadExamples(self, examples) :
self._badExamples = examples
def GetBadExamples(self) :
return self._badExamples
def NameModel(self, varNames) :
self.SetName(self.type())
def PredictExample(self, example, appendExamples=0, weightedAverage=0,neighborList=None) :
""" Generates a prediction for an example by looking at its closest neighbors
**Arguments**
- examples: the example to be classified
- appendExamples: if this is nonzero then the example will be stored on this model
- weightedAverage: if provided, the neighbors' contributions to the value will be
weighed by their reciprocal square distance
- neighborList: if provided, will be used to return the list of neighbors
**Returns**
- the classification of _example_
"""
if appendExamples:
self._examples.append(example)
# first find the k-closest examples in the training set
knnLst = self.GetNeighbors(example)
accum = 0.0
denom = 0.0
for knn in knnLst:
if knn[1] is None: continue
if weightedAverage:
dist = knn[0]
if dist==0.0:
w = 1.
else:
w = 1./dist
else:
w=1.0
accum += w*knn[1][-1]
denom += w
if denom:
accum /= denom
if neighborList is not None:
neighborList.extend(knnLst)
return accum
| {
"repo_name": "soerendip42/rdkit",
"path": "rdkit/ML/KNN/KNNRegressionModel.py",
"copies": "6",
"size": "1987",
"license": "bsd-3-clause",
"hash": 6461570690119748000,
"line_mean": 23.2317073171,
"line_max": 92,
"alpha_frac": 0.6497232008,
"autogenerated": false,
"ratio": 3.8657587548638133,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7515481955663813,
"avg_score": null,
"num_lines": null
} |
import math
def EuclideanDist(ex1, ex2, attrs):
"""
>>> v1 = [0,1,0,1]
>>> v2 = [1,0,1,0]
>>> EuclideanDist(v1,v2,range(4))
2.0
>>> EuclideanDist(v1,v1,range(4))
0.0
>>> v2 = [0,0,0,1]
>>> EuclideanDist(v1,v2,range(4))
1.0
>>> v2 = [0,.5,0,.5]
>>> abs(EuclideanDist(v1,v2,range(4))-1./math.sqrt(2))<1e-4
1
"""
dist = 0.0
for i in attrs:
dist += (ex1[i] - ex2[i])**2
dist = math.sqrt(dist)
return dist
def TanimotoDist(ex1, ex2, attrs):
"""
>>> v1 = [0,1,0,1]
>>> v2 = [1,0,1,0]
>>> TanimotoDist(v1,v2,range(4))
1.0
>>> v2 = [1,0,1,1]
>>> TanimotoDist(v1,v2,range(4))
0.75
>>> TanimotoDist(v2,v2,range(4))
0.0
# this tests Issue 122
>>> v3 = [0,0,0,0]
>>> TanimotoDist(v3,v3,range(4))
1.0
"""
inter = 0.0
unin = 0.0
for i in attrs:
if (ex1[i] or ex2[i]):
unin += 1
if (ex1[i] and ex2[i]):
inter += 1
if (unin != 0.0):
return (1 - inter / unin)
else:
return 1.0
# ------------------------------------
#
# doctest boilerplate
#
def _runDoctests(verbose=None): # pragma: nocover
import sys
import doctest
failed, _ = doctest.testmod(optionflags=doctest.ELLIPSIS, verbose=verbose)
sys.exit(failed)
if __name__ == '__main__': # pragma: nocover
_runDoctests()
| {
"repo_name": "rvianello/rdkit",
"path": "rdkit/ML/KNN/DistFunctions.py",
"copies": "11",
"size": "1427",
"license": "bsd-3-clause",
"hash": -4573777458871548000,
"line_mean": 17.7763157895,
"line_max": 76,
"alpha_frac": 0.511562719,
"autogenerated": false,
"ratio": 2.4731369150779896,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.848469963407799,
"avg_score": null,
"num_lines": null
} |
""" handles doing cross validation with naive bayes models
and evaluation of individual models
"""
from __future__ import print_function
from rdkit.ML.Data import SplitData
from rdkit.ML.NaiveBayes.ClassificationModel import NaiveBayesClassifier
try:
from rdkit.ML.FeatureSelect import CMIM
except ImportError:
CMIM = None
def makeNBClassificationModel(trainExamples, attrs, nPossibleValues, nQuantBounds,
mEstimateVal=-1.0, useSigs=False, ensemble=None, useCMIM=0, **kwargs):
if CMIM is not None and useCMIM > 0 and useSigs and not ensemble:
ensemble = CMIM.SelectFeatures(trainExamples, useCMIM, bvCol=1)
if ensemble:
attrs = ensemble
model = NaiveBayesClassifier(attrs, nPossibleValues, nQuantBounds, mEstimateVal=mEstimateVal,
useSigs=useSigs)
model.SetTrainingExamples(trainExamples)
model.trainModel()
return model
def CrossValidate(NBmodel, testExamples, appendExamples=0):
nTest = len(testExamples)
assert nTest, 'no test examples: %s' % str(testExamples)
badExamples = []
nBad = 0
preds = NBmodel.ClassifyExamples(testExamples, appendExamples)
assert len(preds) == nTest
for i in range(nTest):
testEg = testExamples[i]
trueRes = testEg[-1]
res = preds[i]
if (trueRes != res):
badExamples.append(testEg)
nBad += 1
return float(nBad) / nTest, badExamples
def CrossValidationDriver(examples, attrs, nPossibleValues, nQuantBounds, mEstimateVal=0.0,
holdOutFrac=0.3, modelBuilder=makeNBClassificationModel, silent=0,
calcTotalError=0, **kwargs):
nTot = len(examples)
if not kwargs.get('replacementSelection', 0):
testIndices, trainIndices = SplitData.SplitIndices(nTot, holdOutFrac, silent=1, legacy=1,
replacement=0)
else:
testIndices, trainIndices = SplitData.SplitIndices(nTot, holdOutFrac, silent=1, legacy=0,
replacement=1)
trainExamples = [examples[x] for x in trainIndices]
testExamples = [examples[x] for x in testIndices]
NBmodel = modelBuilder(trainExamples, attrs, nPossibleValues, nQuantBounds, mEstimateVal,
**kwargs)
if not calcTotalError:
xValError, _ = CrossValidate(NBmodel, testExamples, appendExamples=1)
else:
xValError, _ = CrossValidate(NBmodel, examples, appendExamples=0)
if not silent:
print('Validation error was %%%4.2f' % (100 * xValError))
NBmodel._trainIndices = trainIndices
return NBmodel, xValError
| {
"repo_name": "rvianello/rdkit",
"path": "rdkit/ML/NaiveBayes/CrossValidate.py",
"copies": "4",
"size": "2697",
"license": "bsd-3-clause",
"hash": -4920573875304774000,
"line_mean": 32.7125,
"line_max": 100,
"alpha_frac": 0.6792732666,
"autogenerated": false,
"ratio": 3.6844262295081966,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6363699496108197,
"avg_score": null,
"num_lines": null
} |
import Chem
from Chem.Draw.MolDrawing import MolDrawing
from Chem import TemplateAlign
from sping.SVG.pidSVG import SVGCanvas as Canvas
from mod_python import apache
from utils import cactvs
import sys,os,tempfile
def gif(req,smiles,width=100,height=100,highlight='[]',frame=0,
dblSize=0,**kwargs):
req.content_type='image/gif'
width=int(width)
height=int(height)
frame=int(frame)
dblSize = int(dblSize)
# FIX: unsafe:
highlight = eval(highlight)
imgD = ''
if smiles:
fName = tempfile.mktemp('.gif')
cactvs.SmilesToGif(smiles,fName,(width,height),dblSize=dblSize,frame=frame)
if os.path.exists(fName):
imgD = open(fName,'rb').read()
try:
os.unlink(fName)
except OSError:
pass
return imgD
def svg(req,smiles,width=100,height=100,highlight='[]',frame=0,
template='',numbers=0,
**kwargs):
req.content_type='image/svg+xml'
width=int(width)
height=int(height)
frame=int(frame)
# FIX: unsafe:
highlight = eval(highlight)
imgD = ''
mol = None
if smiles:
mol = Chem.MolFromSmiles(smiles)
if mol:
if kwargs.get('kekulize',True):
Chem.Kekulize(mol)
if template and highlight:
try:
patt = Chem.MolFromSmiles(template)
Chem.Compute2DCoords(patt)
TemplateAlign.AlignMolToTemplate2D(mol,patt,highlight)
except Exception:
Chem.Compute2DCoords(mol)
else:
Chem.Compute2DCoords(mol)
canvas = Canvas(size=(width,height))
drawer = MolDrawing(canvas=canvas)
if numbers and numbers!='0':
drawer.includeAtomNumbers=True
drawer.atomNumberOffset=1
drawer.noCarbonSymbols = 1
drawer.AddMol(mol,highlightAtoms=highlight)
svg = canvas._txt+'</svg>'
else:
svg = ''
return svg
| {
"repo_name": "adalke/rdkit",
"path": "Web/RDExtras/MolImage.py",
"copies": "1",
"size": "1865",
"license": "bsd-3-clause",
"hash": -740125204472663900,
"line_mean": 23.8666666667,
"line_max": 79,
"alpha_frac": 0.6691689008,
"autogenerated": false,
"ratio": 3.134453781512605,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.904991017535503,
"avg_score": 0.050742501391514976,
"num_lines": 75
} |
import os
import unittest
from rdkit import RDConfig
from rdkit.DataStructs import ExplicitBitVect
from rdkit.ML.Data import DataUtils
from rdkit.ML.NaiveBayes import CrossValidate
from rdkit.ML.NaiveBayes.ClassificationModel import NaiveBayesClassifier
class TestCase(unittest.TestCase):
def setUp(self):
DataUtils.InitRandomNumbers((25, 25))
def test1NaiveBayes(self):
fName = os.path.join(RDConfig.RDCodeDir, 'ML', 'NaiveBayes', 'test_data', 'stddata.csv')
data = DataUtils.TextFileToData(fName)
examples = data.GetNamedData()
nvars = data.GetNVars()
attrs = list(range(1, nvars + 1))
npvals = [0] + [3] * nvars + [2]
qBounds = [0] + [2] * nvars + [0]
mod, err = CrossValidate.CrossValidationDriver(examples, attrs, npvals, qBounds, silent=True)
self.assertAlmostEqual(mod._classProbs[0], 0.5000, 4)
self.assertAlmostEqual(mod._classProbs[1], 0.5000, 4)
self.assertAlmostEqual(mod._QBoundVals[1][0], -0.0360, 4)
self.assertAlmostEqual(mod._QBoundVals[1][1], 0.114)
self.assertAlmostEqual(mod._QBoundVals[2][0], -0.7022, 4)
self.assertAlmostEqual(mod._QBoundVals[2][1], -0.16635, 4)
self.assertAlmostEqual(mod._QBoundVals[3][0], -0.3659, 4)
self.assertAlmostEqual(mod._QBoundVals[3][1], 0.4305, 4)
self.assertAlmostEqual(err, 0.2121, 4)
mod, err = CrossValidate.CrossValidationDriver(examples, attrs, npvals, qBounds, silent=True,
calcTotalError=True)
self.assertAlmostEqual(mod._classProbs[0], 0.515151, 4)
self.assertAlmostEqual(mod._classProbs[1], 0.484848, 4)
self.assertAlmostEqual(mod._QBoundVals[1][0], -0.40315, 4)
self.assertAlmostEqual(mod._QBoundVals[1][1], 0.114)
self.assertAlmostEqual(mod._QBoundVals[2][0], -0.62185, 4)
self.assertAlmostEqual(mod._QBoundVals[2][1], -0.19965, 4)
self.assertAlmostEqual(mod._QBoundVals[3][0], 0.4305, 4)
self.assertAlmostEqual(mod._QBoundVals[3][1], 0.80305, 4)
self.assertAlmostEqual(err, 0.14563, 4)
mod, err = CrossValidate.CrossValidationDriver(examples, attrs, npvals, qBounds, silent=True,
replacementSelection=True)
self.assertAlmostEqual(mod._classProbs[0], 0.5131578, 4)
self.assertAlmostEqual(mod._classProbs[1], 0.4868421, 4)
self.assertAlmostEqual(mod._QBoundVals[1][0], -0.036, 4)
self.assertAlmostEqual(mod._QBoundVals[1][1], 0.93465, 4)
self.assertAlmostEqual(mod._QBoundVals[2][0], -0.6696, 4)
self.assertAlmostEqual(mod._QBoundVals[2][1], -0.19965, 4)
self.assertAlmostEqual(mod._QBoundVals[3][0], -1.06785, 4)
self.assertAlmostEqual(mod._QBoundVals[3][1], 0.4305, 4)
self.assertAlmostEqual(err, 0.3, 4)
def test2NaiveBayes(self):
fName = os.path.join(RDConfig.RDCodeDir, 'ML', 'NaiveBayes', 'test_data', 'stddata.csv')
data = DataUtils.TextFileToData(fName)
examples = data.GetNamedData()
nvars = data.GetNVars()
attrs = list(range(1, nvars + 1))
npvals = [0] + [3] * nvars + [2]
qBounds = [0] + [2] * nvars + [0]
mod, err = CrossValidate.CrossValidationDriver(examples, attrs, npvals, qBounds,
mEstimateVal=20.0, silent=True)
self.assertTrue(isinstance(mod, NaiveBayesClassifier))
self.assertAlmostEqual(err, 0.1818, 4)
self.assertEqual(mod.GetName(), '')
mod.SetName('modelName')
self.assertEqual(mod.GetName(), 'modelName')
mod.NameModel(None)
self.assertEqual(mod.GetName(), 'NaiveBayesClassifier')
self.assertGreater(len(mod.GetExamples()), 0)
self.assertGreater(len(mod.GetTrainingExamples()), 0)
self.assertEqual(sorted(mod.GetTrainingExamples() + mod.GetExamples()), sorted(examples))
def test3(self):
examples = [
['a', 1, 0, 1, 0, 1],
['b', 1, 0, 0, 0, 1],
['c', 1, 0, 1, 0, 0],
['d', 0, 1, 1, 0, 0],
['e', 0, 1, 1, 1, 0],
]
nvars = len(examples[0]) - 2
attrs = list(range(1, nvars + 1))
npvals = [0] + [2] * nvars + [2]
qBounds = [0] + [0] * nvars + [0]
mdl = CrossValidate.makeNBClassificationModel(examples, attrs, npvals, qBounds)
nWrong = 0
for eg in examples:
p = mdl.ClassifyExample(eg)
if p != eg[-1]:
nWrong += 1
self.assertEqual(nWrong, 1)
bitEx = []
for eg in examples:
newEg = [eg[0], None, eg[-1]]
bv = ExplicitBitVect(nvars)
for i in range(nvars):
if eg[i + 1]:
bv.SetBit(i)
newEg[1] = bv
bitEx.append(newEg)
attrs = list(range(nvars))
mdl2 = CrossValidate.makeNBClassificationModel(bitEx, attrs, npvals, qBounds, useSigs=True)
nWrong = 0
for eg in bitEx:
p = mdl2.ClassifyExample(eg)
if p != eg[-1]:
nWrong += 1
self.assertEqual(nWrong, 1)
# now compare:
for i in range(len(bitEx)):
eg = examples[i]
p1 = mdl.ClassifyExample(eg)
bitEg = bitEx[i]
p2 = mdl2.ClassifyExample(bitEg)
self.assertEqual(p1, p2)
v1 = mdl.GetClassificationDetails()
v2 = mdl.GetClassificationDetails()
self.assertAlmostEqual(v1, v2, 4)
def test4(self):
examples = [
['a', 1, 0, 1, 0, 1],
['b', 1, 0, 0, 0, 1],
['c', 1, 0, 1, 0, 0],
['d', 0, 1, 1, 0, 0],
['e', 0, 1, 1, 1, 0],
]
nvars = len(examples[0]) - 2
origNVars = nvars
nvars = 10
npvals = [0] + [2] * nvars + [2]
qBounds = [0] + [0] * nvars + [0]
bitEx = []
for eg in examples:
newEg = [eg[0], None, eg[-1]]
bv = ExplicitBitVect(nvars)
for i in range(origNVars):
if eg[i + 1]:
bv.SetBit(i)
# this bit will yield perfect accuracy if
# the attrs argument isn't being used properly:
if eg[-1]:
bv.SetBit(origNVars)
newEg[1] = bv
bitEx.append(newEg)
attrs = list(range(origNVars))
mdl2 = CrossValidate.makeNBClassificationModel(bitEx, attrs, npvals, qBounds, useSigs=True)
nWrong = 0
for eg in bitEx:
p = mdl2.ClassifyExample(eg)
if p != eg[-1]:
nWrong += 1
self.assertEqual(nWrong, 1)
def _test5(self): # disabled because CMIM was removed # pragma: nocover
examples = [
['a', 1, 0, 1, 0, 1, 1, 0, 1],
['b', 1, 0, 0, 0, 1, 0, 0, 1],
['c', 1, 0, 1, 0, 1, 1, 0, 0],
['d', 0, 1, 1, 0, 1, 0, 0, 0],
['e', 0, 1, 1, 1, 0, 1, 0, 0],
]
nvars = len(examples[0]) - 2
npvals = [0] + [2] * nvars + [2]
qBounds = [0] + [0] * nvars + [0]
bitEx = []
for eg in examples:
newEg = [eg[0], None, eg[-1]]
bv = ExplicitBitVect(nvars)
for i in range(nvars):
if eg[i + 1]:
bv.SetBit(i)
# this bit will yield perfect accuracy if
# the attrs argument isn't being used properly:
newEg[1] = bv
bitEx.append(newEg)
attrs = list(range(nvars))
mdl2 = CrossValidate.makeNBClassificationModel(bitEx, attrs, npvals, qBounds, useSigs=True,
useCMIM=2)
nWrong = 0
for eg in bitEx:
p = mdl2.ClassifyExample(eg)
if p != eg[-1]:
nWrong += 1
self.assertEqual(nWrong, 1)
if __name__ == '__main__': # pragma: nocover
unittest.main()
| {
"repo_name": "rvianello/rdkit",
"path": "rdkit/ML/NaiveBayes/UnitTestNB.py",
"copies": "11",
"size": "7351",
"license": "bsd-3-clause",
"hash": 3094732866560428000,
"line_mean": 32.7201834862,
"line_max": 97,
"alpha_frac": 0.597469732,
"autogenerated": false,
"ratio": 2.9869971556277934,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9084466887627793,
"avg_score": null,
"num_lines": null
} |
from rdkit import RDConfig
import unittest,sys,os
from rdkit.six.moves import cPickle
from rdkit import Chem
from rdkit.Chem import ChemicalFeatures,AllChem
import EmbedLib
import gzip
from rdkit import DistanceGeometry as DG
from rdkit import Geometry
import Pharmacophore
def feq(n1,n2,tol=1e-5):
return abs(n1-n2)<=tol
class TestCase(unittest.TestCase):
def setUp(self):
self.fdefBlock = \
"""DefineFeature HAcceptor1 [N,O;H0]
Family HBondAcceptor
Weights 1.0
EndFeature
DefineFeature HDonor1 [N,O;!H0]
Family HBondDonor
Weights 1.0
EndFeature
DefineFeature Aromatic1 c1ccccc1
Family Aromatic
Weights 1.0,1.0,1.0,1.0,1.0,1.0
EndFeature\n"""
self.featFactory = ChemicalFeatures.BuildFeatureFactoryFromString(self.fdefBlock)
self.feats = [ChemicalFeatures.FreeChemicalFeature('HBondAcceptor', 'HAcceptor1',
Geometry.Point3D(0.0, 0.0, 0.0)),
ChemicalFeatures.FreeChemicalFeature('HBondDonor', 'HDonor1',
Geometry.Point3D(2.65, 0.0, 0.0)),
ChemicalFeatures.FreeChemicalFeature('Aromatic', 'Aromatic1',
Geometry.Point3D(5.12, 0.908, 0.0)),
]
self.pcophore=Pharmacophore.Pharmacophore(self.feats)
def test1Basics(self):
pcophore = self.pcophore
self.assertTrue(len(pcophore.getFeatures())==3)
self.assertTrue(pcophore.getFeature(0))
self.assertTrue(pcophore.getFeature(1))
self.assertTrue(pcophore.getFeature(2))
self.assertRaises(IndexError,pcophore.getFeature,3)
def test2BoundSetting(self):
pcophore = self.pcophore
pcophore.setUpperBound(0,1,3.0)
self.assertTrue(feq(pcophore.getUpperBound(0,1),3.0))
self.assertTrue(feq(pcophore.getUpperBound(1,0),3.0))
pcophore.setUpperBound(1,0,5.0)
self.assertTrue(feq(pcophore.getUpperBound(0,1),5.0))
self.assertTrue(feq(pcophore.getUpperBound(1,0),5.0))
self.assertRaises(IndexError,pcophore.setUpperBound,0,3,2.0)
self.assertRaises(ValueError,pcophore.setUpperBound,0,3,2.0,checkBounds=True)
self.assertRaises(IndexError,pcophore.setUpperBound,3,0,2.0)
self.assertRaises(ValueError,pcophore.setUpperBound,3,0,2.0,checkBounds=True)
pcophore.setLowerBound(0,1,2.0)
self.assertTrue(feq(pcophore.getLowerBound(0,1),2.0))
self.assertTrue(feq(pcophore.getLowerBound(1,0),2.0))
pcophore.setLowerBound(1,0,3.0)
self.assertTrue(feq(pcophore.getLowerBound(0,1),3.0))
self.assertTrue(feq(pcophore.getLowerBound(1,0),3.0))
self.assertRaises(IndexError,pcophore.setLowerBound,0,3,2.0)
self.assertRaises(ValueError,pcophore.setLowerBound,0,3,2.0,checkBounds=True)
self.assertRaises(IndexError,pcophore.setLowerBound,3,0,2.0)
self.assertRaises(ValueError,pcophore.setLowerBound,3,0,2.0,checkBounds=True)
def test3Bound2DSetting(self):
pcophore = self.pcophore
pcophore.setUpperBound2D(0,1,3)
self.assertTrue(pcophore.getUpperBound2D(0,1)==3)
self.assertTrue(pcophore.getUpperBound2D(1,0)==3)
pcophore.setUpperBound2D(1,0,5)
self.assertTrue(pcophore.getUpperBound2D(0,1)==5)
self.assertTrue(pcophore.getUpperBound2D(1,0)==5)
self.assertRaises(IndexError,pcophore.setUpperBound2D,0,3,2)
self.assertRaises(ValueError,pcophore.setUpperBound2D,0,3,2,checkBounds=True)
self.assertRaises(IndexError,pcophore.setUpperBound2D,3,0,2)
self.assertRaises(ValueError,pcophore.setUpperBound2D,3,0,2,checkBounds=True)
pcophore.setLowerBound2D(0,1,3)
self.assertTrue(pcophore.getLowerBound2D(0,1)==3)
self.assertTrue(pcophore.getLowerBound2D(1,0)==3)
pcophore.setLowerBound2D(1,0,5)
self.assertTrue(pcophore.getLowerBound2D(0,1)==5)
self.assertTrue(pcophore.getLowerBound2D(1,0)==5)
self.assertRaises(IndexError,pcophore.setLowerBound2D,0,3,2)
self.assertRaises(ValueError,pcophore.setLowerBound2D,0,3,2,checkBounds=True)
self.assertRaises(IndexError,pcophore.setLowerBound2D,3,0,2)
self.assertRaises(ValueError,pcophore.setLowerBound2D,3,0,2,checkBounds=True)
def test4Github252(self):
fdef= os.path.join(RDConfig.RDDataDir,'BaseFeatures.fdef')
feat_factory = ChemicalFeatures.BuildFeatureFactory(fdef)
m1 = Chem.MolFromSmiles('Cc1ccccc1')
feats = feat_factory.GetFeaturesForMol(m1)
try:
pcophore = Pharmacophore.Pharmacophore(feats)
ok=False
except:
ok=True
self.assertTrue(ok)
AllChem.Compute2DCoords(m1)
try:
pcophore = Pharmacophore.Pharmacophore(feats)
ok=True
except:
ok=False
self.assertTrue(ok)
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "soerendip42/rdkit",
"path": "rdkit/Chem/Pharm3D/UnitTestPharmacophore.py",
"copies": "3",
"size": "5219",
"license": "bsd-3-clause",
"hash": -6000396559295810000,
"line_mean": 37.375,
"line_max": 85,
"alpha_frac": 0.6857635562,
"autogenerated": false,
"ratio": 2.8834254143646407,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5069188970564641,
"avg_score": null,
"num_lines": null
} |
"""unit testing code for the AnalyzeComposite functionality
"""
from rdkit import RDConfig
import unittest,os
from rdkit.ML import AnalyzeComposite
from rdkit.six.moves import cPickle as pickle
def feq(a,b,tol=1e-4):
if abs(a-b)>tol: return 0
else: return 1
class TestCase(unittest.TestCase):
def setUp(self):
#print '\n%s: '%self.shortDescription(),
self.baseDir = os.path.join(RDConfig.RDCodeDir,'ML','test_data')
def test1_Issue163(self):
name1 = os.path.join(self.baseDir,'humanoral.1.pkl')
try:
with open(name1,'rb') as pklF:
c1 = pickle.load(pklF)
except Exception:
c1 = None
self.assertTrue(c1)
name2 = os.path.join(self.baseDir,'humanoral.2.pkl')
try:
with open(name2, 'rb') as pklF:
c2 = pickle.load(pklF)
except Exception:
c2 = None
self.assertTrue(c2)
try:
res = AnalyzeComposite.ProcessIt([c1,c2],verbose=-1)
except Exception:
import traceback
traceback.print_exc()
ok=0
else:
ok=1
self.assertTrue(ok)
self.assertTrue(res[0][0]=='BALABANJ')
self.assertTrue(res[1][0]=='BERTZCT')
self.assertTrue(res[-1][0]=='FR_ALLYLIC_OXID')
for entry in res:
self.assertTrue(len(entry)==5)
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "adalke/rdkit",
"path": "rdkit/ML/UnitTestAnalyzeComposite.py",
"copies": "1",
"size": "1598",
"license": "bsd-3-clause",
"hash": 9045071593346077000,
"line_mean": 25.1967213115,
"line_max": 68,
"alpha_frac": 0.6495619524,
"autogenerated": false,
"ratio": 3.1089494163424125,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9028037522586181,
"avg_score": 0.04609476923124618,
"num_lines": 61
} |
from __future__ import print_function
from rdkit import RDConfig
import os,sys,copy
import unittest
import math
from rdkit import Chem
from rdkit.Chem import rdMolAlign,rdMolTransforms,rdMolDescriptors,rdDistGeom,ChemicalForceFields
def lstFeq(l1, l2, tol=1.e-4):
if (len(list(l1)) != len(list(l2))):
return 0
for i in range(len(list(l1))):
if not feq(l1[i], l2[i], tol):
return 0
return 1
def feq(v1,v2,tol2=1e-4):
return abs(v1-v2)<=tol2
class TestCase(unittest.TestCase):
def setUp(self):
pass
def test1Basic(self):
file1 = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol',
'MolAlign', 'test_data', '1oir.mol')
file2 = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol',
'MolAlign', 'test_data', '1oir_conf.mol')
mol1 = Chem.MolFromMolFile(file1)
mol2 = Chem.MolFromMolFile(file2)
rmsd = rdMolAlign.AlignMol(mol2, mol1)
self.assertTrue(feq(rmsd, 0.6578))
file3 = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol',
'MolAlign', 'test_data', '1oir_trans.mol')
mol3 = Chem.MolFromMolFile(file3)
conf2 = mol2.GetConformer()
conf3 = mol3.GetConformer()
for i in range(mol2.GetNumAtoms()):
self.assertTrue(lstFeq(conf2.GetAtomPosition(i), conf3.GetAtomPosition(i)))
rmsd, trans = rdMolAlign.GetAlignmentTransform(mol2, mol1)
self.assertAlmostEqual(rmsd, 0.6579,4)
def test2AtomMap(self) :
atomMap = ((18,27), (13,23), (21,14), (24,7), (9,19), (16,30))
file1 = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol',
'MolAlign', 'test_data', '1oir.mol')
file2 = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol',
'MolAlign', 'test_data', '1oir_conf.mol')
mol1 = Chem.MolFromMolFile(file1)
mol2 = Chem.MolFromMolFile(file2)
rmsd = rdMolAlign.AlignMol(mol2, mol1, 0, 0, atomMap)
self.assertAlmostEqual(rmsd, 0.8525,4)
def test3Weights(self):
atomMap = ((18,27), (13,23), (21,14), (24,7), (9,19), (16,30))
file1 = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol',
'MolAlign', 'test_data', '1oir.mol')
file2 = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol',
'MolAlign', 'test_data', '1oir_conf.mol')
mol1 = Chem.MolFromMolFile(file1)
mol2 = Chem.MolFromMolFile(file2)
wts = (1.0, 1.0, 1.0, 1.0, 1.0, 2.0)
rmsd = rdMolAlign.AlignMol(mol2, mol1, 0, 0, atomMap, wts)
self.assertAlmostEqual(rmsd, 0.9513,4)
def test4AlignConfs(self):
mol = Chem.MolFromSmiles('C1CC1CNc(n2)nc(C)cc2Nc(cc34)ccc3[nH]nc4')
cids = rdDistGeom.EmbedMultipleConfs(mol,10,30,100)
#writer = Chem.SDWriter('mol_899.sdf')
for cid in cids:
ff = ChemicalForceFields.UFFGetMoleculeForceField(mol, confId=cid)
ff.Initialize()
more = 1
while more :
more = ff.Minimize()
# FIX: this should not be necessary but somehow more comes out to be 0
# even with the structure still being crappy
ff.Minimize()
aids = [12, 13, 14, 15, 16, 17, 18]
rdMolAlign.AlignMolConformers(mol, aids)
# now test that the atom location of these atom are consistent
confs = mol.GetConformers()
for aid in aids:
mpos = 0
for i,conf in enumerate(confs):
if (i == 0):
mpos = list(conf.GetAtomPosition(aid))
continue
else :
pos = list(conf.GetAtomPosition(aid))
self.assertTrue(lstFeq(mpos, pos, .5))
# now test that we can get a list of RMS values
rmsvals = []
rdMolAlign.AlignMolConformers(mol, aids, RMSlist=rmsvals)
self.assertTrue((len(rmsvals)==mol.GetNumConformers()-1))
# make sure something sensible happens if we provide a stupid
# argument:
rmsvals = 4
self.assertRaises(AttributeError,rdMolAlign.AlignMolConformers,mol, atomIds=aids, RMSlist=rmsvals)
def test5MMFFO3A(self):
sdf = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol',
'MolAlign', 'test_data', 'ref_e2.sdf')
# alignedSdf = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol',
# 'MolAlign', 'test_data', 'ref_e2_pyMMFFO3A.sdf')
molS = Chem.SDMolSupplier(sdf, True, False)
# molW = Chem.SDWriter(alignedSdf)
refNum = 48
refMol = molS[refNum]
cumScore = 0.0
cumMsd = 0.0
refPyMP = ChemicalForceFields.MMFFGetMoleculeProperties(refMol)
for prbMol in molS:
prbPyMP = ChemicalForceFields.MMFFGetMoleculeProperties(prbMol)
pyO3A = rdMolAlign.GetO3A(prbMol, refMol, prbPyMP, refPyMP)
cumScore += pyO3A.Score()
rmsd = pyO3A.Align()
cumMsd += rmsd * rmsd
# molW.write(prbMol)
cumMsd /= len(molS)
self.assertAlmostEqual(cumScore,6942,0)
self.assertAlmostEqual(math.sqrt(cumMsd),.345,3)
def test6MMFFO3A(self):
" now test where the mmff parameters are generated on call "
sdf = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol',
'MolAlign', 'test_data', 'ref_e2.sdf')
molS = Chem.SDMolSupplier(sdf, True, False)
refNum = 48
refMol = molS[refNum]
cumScore = 0.0
cumMsd = 0.0
for prbMol in molS:
pyO3A = rdMolAlign.GetO3A(prbMol, refMol)
cumScore += pyO3A.Score()
rmsd = pyO3A.Align()
cumMsd += rmsd * rmsd
cumMsd /= len(molS)
self.assertAlmostEqual(cumScore,6942,0)
self.assertAlmostEqual(math.sqrt(cumMsd),.345,3)
def test7MMFFO3A(self):
" make sure we generate an error if parameters are missing (github issue 158) "
m1 = Chem.MolFromSmiles('c1ccccc1Cl')
rdDistGeom.EmbedMolecule(m1)
m2 = Chem.MolFromSmiles('c1ccccc1B(O)O')
rdDistGeom.EmbedMolecule(m1)
self.assertRaises(ValueError,lambda :rdMolAlign.GetO3A(m1, m2))
self.assertRaises(ValueError,lambda :rdMolAlign.GetO3A(m2, m1))
def test8MMFFO3A(self):
" test MMFFO3A with constraints "
#we superimpose two identical coplanar 4-phenylpyridines:
#1) the usual way
#2) forcing the pyridine nitrogen to match with the para
# carbon of the phenyl ring
m = Chem.MolFromSmiles('n1ccc(cc1)-c1ccccc1')
m1 = Chem.AddHs(m)
rdDistGeom.EmbedMolecule(m1)
mp = ChemicalForceFields.MMFFGetMoleculeProperties(m1)
ff = ChemicalForceFields.MMFFGetMoleculeForceField(m1, mp)
ff.Minimize()
sub1 = m1.GetSubstructMatch(Chem.MolFromSmarts('nccc-cccc'))
nIdx = sub1[0]
cIdx = sub1[-1]
dihe = sub1[2:6]
rdMolTransforms.SetDihedralDeg(m1.GetConformer(),
dihe[0], dihe[1], dihe[2], dihe[3], 0)
m2 = copy.copy(m1)
rdMolAlign.RandomTransform(m2)
m3 = copy.copy(m2)
pyO3A = rdMolAlign.GetO3A(m2, m1)
pyO3A.Align()
d = m2.GetConformer().GetAtomPosition(cIdx). \
Distance(m1.GetConformer().GetAtomPosition(cIdx))
self.assertAlmostEqual(d, 0, 0)
pyO3A = rdMolAlign.GetO3A(m3, m1, constraintMap = [[cIdx, nIdx]])
pyO3A.Align()
d = m3.GetConformer().GetAtomPosition(cIdx). \
Distance(m1.GetConformer().GetAtomPosition(cIdx))
self.assertAlmostEqual(d, 7, 0)
#alignedSdf = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol',
# 'MolAlign', 'test_data',
# '4-phenylpyridines_MMFFO3A.sdf')
#sdW = Chem.SDWriter(alignedSdf)
#sdW.write(m1)
#sdW.write(m2)
#sdW.write(m3)
#sdW.close()
def test9MMFFO3A(self):
" test MMFFO3A with variable weight constraints followed by local-only optimization "
sdf = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol',
'MolAlign', 'test_data', 'ref_e2.sdf')
# alignedSdf = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol',
# 'MolAlign', 'test_data', 'localonly.sdf')
molS = Chem.SDMolSupplier(sdf, True, False)
refNum = 23
prbNum = 32
refMol = molS[refNum]
prbMol = molS[prbNum]
refPyMP = ChemicalForceFields.MMFFGetMoleculeProperties(refMol)
prbPyMP = ChemicalForceFields.MMFFGetMoleculeProperties(prbMol)
refSIdx = refMol.GetSubstructMatch(Chem.MolFromSmarts('S'))[0]
prbOIdx = prbMol.GetSubstructMatch(Chem.MolFromSmarts('O'))[0]
# molW = Chem.SDWriter(alignedSdf)
# molW.write(refMol)
weights = [10.0, 100.0]
distOS = [3.2, 0.3]
for i in [0, 1]:
pyO3A = rdMolAlign.GetO3A(prbMol, refMol,
prbPyMP, refPyMP, constraintMap = [[prbOIdx, refSIdx]],
constraintWeights = [weights[i]])
pyO3A.Align()
# molW.write(prbMol)
pyO3A = rdMolAlign.GetO3A(prbMol, refMol,
prbPyMP, refPyMP, options = 4)
pyO3A.Align()
# molW.write(prbMol)
d = prbMol.GetConformer().GetAtomPosition(prbOIdx). \
Distance(refMol.GetConformer().GetAtomPosition(refSIdx))
self.assertAlmostEqual(d, distOS[i], 1)
# molW.close()
def test10CrippenO3A(self):
sdf = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol',
'MolAlign', 'test_data', 'ref_e2.sdf')
alignedSdf = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol',
'MolAlign', 'test_data', 'ref_e2_pyCrippenO3A.sdf')
molS = Chem.SDMolSupplier(sdf, True, False)
molW = Chem.SDWriter(alignedSdf)
refNum = 48
refMol = molS[refNum]
cumScore = 0.0
cumMsd = 0.0
refList = rdMolDescriptors._CalcCrippenContribs(refMol, True)
for prbMol in molS:
prbList = rdMolDescriptors._CalcCrippenContribs(prbMol, True)
pyO3A = rdMolAlign.GetCrippenO3A(prbMol, refMol, prbList, refList)
cumScore += pyO3A.Score()
rmsd = pyO3A.Align()
cumMsd += rmsd * rmsd
molW.write(prbMol)
cumMsd /= len(molS)
self.assertAlmostEqual(cumScore,4918,0)
self.assertAlmostEqual(math.sqrt(cumMsd),.304,3)
def test11CrippenO3A(self):
" now test where the Crippen parameters are generated on call "
sdf = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol',
'MolAlign', 'test_data', 'ref_e2.sdf')
molS = Chem.SDMolSupplier(sdf, True, False)
refNum = 48
refMol = molS[refNum]
cumScore = 0.0
cumMsd = 0.0
for prbMol in molS:
pyO3A = rdMolAlign.GetCrippenO3A(prbMol, refMol)
cumScore += pyO3A.Score()
rmsd = pyO3A.Trans()[0]
cumMsd += rmsd * rmsd
cumMsd /= len(molS)
self.assertAlmostEqual(cumScore,4918,0)
self.assertAlmostEqual(math.sqrt(cumMsd),.304,3)
def test12CrippenO3A(self):
" test CrippenO3A with constraints "
#we superimpose two identical coplanar 4-phenylpyridines:
#1) the usual way
#2) forcing the pyridine nitrogen to match with the para
# carbon of the phenyl ring
m = Chem.MolFromSmiles('n1ccc(cc1)-c1ccccc1')
m1 = Chem.AddHs(m)
rdDistGeom.EmbedMolecule(m1)
mp = ChemicalForceFields.MMFFGetMoleculeProperties(m1)
ff = ChemicalForceFields.MMFFGetMoleculeForceField(m1, mp)
ff.Minimize()
sub1 = m1.GetSubstructMatch(Chem.MolFromSmarts('nccc-cccc'))
nIdx = sub1[0]
cIdx = sub1[-1]
dihe = sub1[2:6]
rdMolTransforms.SetDihedralDeg(m1.GetConformer(),
dihe[0], dihe[1], dihe[2], dihe[3], 0)
m2 = copy.copy(m1)
rdMolAlign.RandomTransform(m2)
m3 = copy.copy(m2)
pyO3A = rdMolAlign.GetCrippenO3A(m2, m1)
pyO3A.Align()
d = m2.GetConformer().GetAtomPosition(cIdx). \
Distance(m1.GetConformer().GetAtomPosition(cIdx))
self.assertAlmostEqual(d, 0, 0)
pyO3A = rdMolAlign.GetCrippenO3A(m3, m1, constraintMap = [[cIdx, nIdx]])
pyO3A.Align()
d = m3.GetConformer().GetAtomPosition(cIdx). \
Distance(m1.GetConformer().GetAtomPosition(cIdx))
self.assertAlmostEqual(d, 7, 0)
#alignedSdf = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol',
# 'MolAlign', 'test_data',
# '4-phenylpyridines_CrippenO3A.sdf')
#sdW = Chem.SDWriter(alignedSdf)
#sdW.write(m1)
#sdW.write(m2)
#sdW.write(m3)
#sdW.close()
def test13CrippenO3A(self):
" test CrippenO3A with variable weight constraints followed by local-only optimization "
sdf = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol',
'MolAlign', 'test_data', 'ref_e2.sdf')
# alignedSdf = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol',
# 'MolAlign', 'test_data', 'localonly.sdf')
molS = Chem.SDMolSupplier(sdf, True, False)
refNum = 23
prbNum = 32
refMol = molS[refNum]
prbMol = molS[prbNum]
refPyMP = ChemicalForceFields.MMFFGetMoleculeProperties(refMol)
prbPyMP = ChemicalForceFields.MMFFGetMoleculeProperties(prbMol)
refSIdx = refMol.GetSubstructMatch(Chem.MolFromSmarts('S'))[0]
prbOIdx = prbMol.GetSubstructMatch(Chem.MolFromSmarts('O'))[0]
# molW = Chem.SDWriter(alignedSdf)
# molW.write(refMol)
weights = [0.1, 100.0]
distOS = [2.7, 0.4]
for i in [0, 1]:
pyO3A = rdMolAlign.GetCrippenO3A(prbMol, refMol,
constraintMap = [[prbOIdx, refSIdx]],
constraintWeights = [weights[i]])
pyO3A.Align()
# molW.write(prbMol)
pyO3A = rdMolAlign.GetCrippenO3A(prbMol, refMol, options = 4)
pyO3A.Align()
# molW.write(prbMol)
d = prbMol.GetConformer().GetAtomPosition(prbOIdx). \
Distance(refMol.GetConformer().GetAtomPosition(refSIdx))
self.assertAlmostEqual(d, distOS[i], 1)
# molW.close()
def test14Github385(self):
""" test github issue 385:
O3A code generating incorrect results for multiconformer molecules
"""
def _multiConfFromSmiles(smiles, nConfs=10, maxIters=500):
"""Adds hydrogens to molecule and optimises a chosen number of conformers. Returns the optimised RDKit mol."""
idea = Chem.MolFromSmiles(smiles)
idea = Chem.AddHs(idea)
confs = rdDistGeom.EmbedMultipleConfs(idea, nConfs)
for conf in confs:
opt = ChemicalForceFields.MMFFOptimizeMolecule(idea, confId=conf, maxIters=maxIters)
return idea
def _confsToAlignedMolsList(multiConfMol):
"""Input is a multiconformer RDKit mol. Output is an aligned set of conformers as a list of RDKit mols."""
rdMolAlign.AlignMolConformers(multiConfMol)
ms = []
cids = [x.GetId() for x in multiConfMol.GetConformers()]
for cid in cids:
newmol = Chem.Mol(multiConfMol)
for ocid in cids:
if ocid==cid:
continue
newmol.RemoveConformer(ocid)
ms.append(newmol)
return ms
reference = Chem.MolFromSmiles("c1ccccc1N2CCC(NS(=O)(=O)C(F)(F)F)CC2")
reference = Chem.AddHs(reference)
rdDistGeom.EmbedMolecule(reference)
idea1 = _multiConfFromSmiles("c1ccccc1C2CCCCC2", 10)
idea1_mols = _confsToAlignedMolsList(idea1)
cids = [x.GetId() for x in idea1.GetConformers()]
refParams = ChemicalForceFields.MMFFGetMoleculeProperties(reference)
prbParams = ChemicalForceFields.MMFFGetMoleculeProperties(idea1)
for i in range(len(cids)):
o3a1 = rdMolAlign.GetO3A(idea1_mols[i],reference,prbParams,refParams)
score1 = o3a1.Score()
o3a2 = rdMolAlign.GetO3A(idea1,reference,prbParams,refParams,prbCid=cids[i])
score2 = o3a2.Score()
self.assertAlmostEqual(score1,score2,3)
if __name__ == '__main__':
print("Testing MolAlign Wrappers")
unittest.main()
| {
"repo_name": "AlexanderSavelyev/rdkit",
"path": "Code/GraphMol/MolAlign/Wrap/testMolAlign.py",
"copies": "1",
"size": "16282",
"license": "bsd-3-clause",
"hash": -2526764635969939000,
"line_mean": 38.5194174757,
"line_max": 121,
"alpha_frac": 0.6134381526,
"autogenerated": false,
"ratio": 2.9023172905525847,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40157554431525844,
"avg_score": null,
"num_lines": null
} |
from rdkit import RDConfig
import os,sys
import unittest
from rdkit import Chem
from rdkit.Chem import rdMolAlign,rdDistGeom,ChemicalForceFields
def lstFeq(l1, l2, tol=1.e-4):
if (len(list(l1)) != len(list(l2))):
return 0
for i in range(len(list(l1))):
if not feq(l1[i], l2[i], tol):
return 0
return 1
def feq(v1,v2,tol2=1e-4):
return abs(v1-v2)<=tol2
class TestCase(unittest.TestCase):
def setUp(self):
pass
def test1Basic(self):
file1 = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol',
'MolAlign', 'test_data', '1oir.mol')
file2 = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol',
'MolAlign', 'test_data', '1oir_conf.mol')
mol1 = Chem.MolFromMolFile(file1)
mol2 = Chem.MolFromMolFile(file2)
rmsd = rdMolAlign.AlignMol(mol2, mol1)
self.failUnless(feq(rmsd, 0.6578))
file3 = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol',
'MolAlign', 'test_data', '1oir_trans.mol')
mol3 = Chem.MolFromMolFile(file3)
conf2 = mol2.GetConformer()
conf3 = mol3.GetConformer()
for i in range(mol2.GetNumAtoms()):
self.failUnless(lstFeq(conf2.GetAtomPosition(i), conf3.GetAtomPosition(i)))
rmsd, trans = rdMolAlign.GetAlignmentTransform(mol2, mol1)
self.failUnless(feq(rmsd, 0.6578))
def test2AtomMap(self) :
atomMap = ((18,27), (13,23), (21,14), (24,7), (9,19), (16,30))
file1 = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol',
'MolAlign', 'test_data', '1oir.mol')
file2 = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol',
'MolAlign', 'test_data', '1oir_conf.mol')
mol1 = Chem.MolFromMolFile(file1)
mol2 = Chem.MolFromMolFile(file2)
rmsd = rdMolAlign.AlignMol(mol2, mol1, 0, 0, atomMap)
self.failUnless(feq(rmsd, 0.8525))
def test3Weights(self):
atomMap = ((18,27), (13,23), (21,14), (24,7), (9,19), (16,30))
file1 = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol',
'MolAlign', 'test_data', '1oir.mol')
file2 = os.path.join(RDConfig.RDBaseDir,'Code','GraphMol',
'MolAlign', 'test_data', '1oir_conf.mol')
mol1 = Chem.MolFromMolFile(file1)
mol2 = Chem.MolFromMolFile(file2)
wts = (1.0, 1.0, 1.0, 1.0, 1.0, 2.0)
rmsd = rdMolAlign.AlignMol(mol2, mol1, 0, 0, atomMap, wts)
self.failUnless(feq(rmsd, 0.9513))
def test4AlignConfs(self):
mol = Chem.MolFromSmiles('C1CC1CNc(n2)nc(C)cc2Nc(cc34)ccc3[nH]nc4')
cids = rdDistGeom.EmbedMultipleConfs(mol,10,30,100)
writer = Chem.SDWriter('mol_899.sdf')
for cid in cids:
print 'cid:',repr(cid)
ff = ChemicalForceFields.UFFGetMoleculeForceField(mol, confId=cid)
ff.Initialize()
more = 1
while more :
more = ff.Minimize()
# FIX: this should not be necessary but somehow more comes out to be 0
# even with the structure still being crappy
ff.Minimize()
aids = [12, 13, 14, 15, 16, 17, 18]
rdMolAlign.AlignMolConformers(mol, aids)
# now test that the atom location of these atom are consistent
confs = mol.GetConformers()
for aid in aids:
mpos = 0
for i,conf in enumerate(confs):
if (i == 0):
mpos = list(conf.GetAtomPosition(aid))
continue
else :
pos = list(conf.GetAtomPosition(aid))
self.failUnless(lstFeq(mpos, pos, .5))
if __name__ == '__main__':
print "Testing MolAlign Wrappers"
unittest.main()
| {
"repo_name": "rdkit/rdkit-orig",
"path": "Code/GraphMol/MolAlign/Wrap/testMolAlign.py",
"copies": "1",
"size": "3881",
"license": "bsd-3-clause",
"hash": 4433754164010141700,
"line_mean": 33.6517857143,
"line_max": 87,
"alpha_frac": 0.5766555012,
"autogenerated": false,
"ratio": 2.9648586707410236,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40415141719410236,
"avg_score": null,
"num_lines": null
} |
""" Contains an implementation of Topological-torsion fingerprints, as
described in:
R. Nilakantan, N. Bauman, J. S. Dixon, R. Venkataraghavan;
"Topological Torsion: A New Molecular Descriptor for SAR Applications.
Comparison with Other Descriptors" JCICS 27, 82-85 (1987).
"""
from rdkit import Chem
from rdkit.Chem import rdMolDescriptors
import Utils
def pyScorePath(mol,path,size,atomCodes=None):
""" Returns a score for an individual path.
>>> m = Chem.MolFromSmiles('CCCCC')
>>> c1 = long(Utils.GetAtomCode(m.GetAtomWithIdx(0),1))
>>> c2 = long(Utils.GetAtomCode(m.GetAtomWithIdx(1),2))
>>> c3 = long(Utils.GetAtomCode(m.GetAtomWithIdx(2),2))
>>> c4 = long(Utils.GetAtomCode(m.GetAtomWithIdx(3),1))
>>> t = c1 | (c2 << rdMolDescriptors.AtomPairsParameters.codeSize) | (c3 << (rdMolDescriptors.AtomPairsParameters.codeSize*2)) | (c4 << (rdMolDescriptors.AtomPairsParameters.codeSize*3))
>>> pyScorePath(m,(0,1,2,3),4)==t
1
The scores are path direction independent:
>>> pyScorePath(m,(3,2,1,0),4)==t
1
>>> m = Chem.MolFromSmiles('C=CC(=O)O')
>>> c1 = long(Utils.GetAtomCode(m.GetAtomWithIdx(0),1))
>>> c2 = long(Utils.GetAtomCode(m.GetAtomWithIdx(1),2))
>>> c3 = long(Utils.GetAtomCode(m.GetAtomWithIdx(2),2))
>>> c4 = long(Utils.GetAtomCode(m.GetAtomWithIdx(4),1))
>>> t = c1 | (c2 << rdMolDescriptors.AtomPairsParameters.codeSize) | (c3 << (rdMolDescriptors.AtomPairsParameters.codeSize*2)) | (c4 << (rdMolDescriptors.AtomPairsParameters.codeSize*3))
>>> pyScorePath(m,(0,1,2,4),4)==t
1
"""
codes = [None]*size
for i in range(size):
if i==0 or i==(size-1):
sub = 1
else:
sub = 2
if not atomCodes:
codes[i] = Utils.GetAtomCode(mol.GetAtomWithIdx(path[i]),sub)
else:
base = atomCodes[path[i]]
codes[i]=base-sub
# "canonicalize" the code vector:
beg=0
end = len(codes)-1
while(beg < end):
if codes[beg] > codes[end]:
codes.reverse()
break
elif codes[beg]==codes[end]:
beg += 1
end -= 1
else:
break
accum = 0L
for i in range(size):
accum |= long(codes[i]) << (rdMolDescriptors.AtomPairsParameters.codeSize*i)
return accum
def ExplainPathScore(score,size=4):
"""
>>> m = Chem.MolFromSmiles('C=CC')
>>> score=pyScorePath(m,(0,1,2),3)
>>> ExplainPathScore(score,3)
(('C', 1, 0), ('C', 2, 1), ('C', 1, 1))
Again, it's order independent:
>>> score=pyScorePath(m,(2,1,0),3)
>>> ExplainPathScore(score,3)
(('C', 1, 0), ('C', 2, 1), ('C', 1, 1))
>>> m = Chem.MolFromSmiles('C=CO')
>>> score=pyScorePath(m,(0,1,2),3)
>>> ExplainPathScore(score,3)
(('C', 1, 1), ('C', 2, 1), ('O', 1, 0))
>>> m = Chem.MolFromSmiles('OC=CO')
>>> score=pyScorePath(m,(0,1,2,3),4)
>>> ExplainPathScore(score,4)
(('O', 1, 0), ('C', 2, 1), ('C', 2, 1), ('O', 1, 0))
>>> m = Chem.MolFromSmiles('CC=CO')
>>> score=pyScorePath(m,(0,1,2,3),4)
>>> ExplainPathScore(score,4)
(('C', 1, 0), ('C', 2, 1), ('C', 2, 1), ('O', 1, 0))
>>> m = Chem.MolFromSmiles('C=CC(=O)O')
>>> score=pyScorePath(m,(0,1,2,3),4)
>>> ExplainPathScore(score,4)
(('C', 1, 1), ('C', 2, 1), ('C', 3, 1), ('O', 1, 1))
>>> score=pyScorePath(m,(0,1,2,4),4)
>>> ExplainPathScore(score,4)
(('C', 1, 1), ('C', 2, 1), ('C', 3, 1), ('O', 1, 0))
>>> m = Chem.MolFromSmiles('OOOO')
>>> score=pyScorePath(m,(0,1,2),3)
>>> ExplainPathScore(score,3)
(('O', 1, 0), ('O', 2, 0), ('O', 2, 0))
>>> score=pyScorePath(m,(0,1,2,3),4)
>>> ExplainPathScore(score,4)
(('O', 1, 0), ('O', 2, 0), ('O', 2, 0), ('O', 1, 0))
"""
codeMask=(1<<rdMolDescriptors.AtomPairsParameters.codeSize)-1
res=[None]*size
#print '>>>>>>>>>>>',score,size,codeMask
for i in range(size):
if i==0 or i==(size-1):
sub = 1
else:
sub = 2
code = score&codeMask
#print i,code,score
score = score>>rdMolDescriptors.AtomPairsParameters.codeSize
symb,nBranch,nPi = Utils.ExplainAtomCode(code)
expl = symb,nBranch+sub,nPi
res[i] = expl
return tuple(res)
from rdkit.Chem.rdMolDescriptors import GetTopologicalTorsionFingerprint,GetHashedTopologicalTorsionFingerprint
GetTopologicalTorsionFingerprintAsIntVect=rdMolDescriptors.GetTopologicalTorsionFingerprint
def GetTopologicalTorsionFingerprintAsIds(mol,targetSize=4):
iv = GetTopologicalTorsionFingerprint(mol,targetSize)
res=[]
for k,v in iv.GetNonzeroElements().iteritems():
res.extend([k]*v)
res.sort()
return res
#------------------------------------
#
# doctest boilerplate
#
def _test():
import doctest,sys
return doctest.testmod(sys.modules["__main__"])
if __name__ == '__main__':
import sys
failed,tried = _test()
sys.exit(failed)
| {
"repo_name": "rdkit/rdkit-orig",
"path": "rdkit/Chem/AtomPairs/Torsions.py",
"copies": "2",
"size": "4994",
"license": "bsd-3-clause",
"hash": 586635135906475300,
"line_mean": 28.2046783626,
"line_max": 188,
"alpha_frac": 0.6177412895,
"autogenerated": false,
"ratio": 2.6763129689174705,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9120432701165966,
"avg_score": 0.0347243114503009,
"num_lines": 171
} |
from __future__ import division
from rdkit import rdBase
from rdkit.DataStructs import cDataStructs
__doc__ = cDataStructs.__doc__
from rdkit.DataStructs.cDataStructs import *
similarityFunctions = [
('Tanimoto', TanimotoSimilarity, ''),
("Dice", DiceSimilarity, ''),
("Cosine", CosineSimilarity, ''),
("Sokal", SokalSimilarity, ''),
("Russel", RusselSimilarity, ''),
("RogotGoldberg", RogotGoldbergSimilarity, ''),
("AllBit", AllBitSimilarity, ''),
("Kulczynski", KulczynskiSimilarity, ''),
("McConnaughey", McConnaugheySimilarity, ''),
("Asymmetric", AsymmetricSimilarity, ''),
("BraunBlanquet", BraunBlanquetSimilarity, ''),
]
def FingerprintSimilarity(fp1, fp2, metric=TanimotoSimilarity):
""" returns the calculated similarity between two fingerprints,
handles any folding that may need to be done to ensure that they
are compatible
"""
sz1 = fp1.GetNumBits()
sz2 = fp2.GetNumBits()
if sz1 < sz2:
fp2 = FoldFingerprint(fp2, sz2 // sz1)
elif sz2 < sz1:
fp1 = FoldFingerprint(fp1, sz1 // sz2)
return metric(fp1, fp2)
def FoldToTargetDensity(fp, density=0.3, minLength=64):
while fp.GetNumOnBits() / len(fp) > density and len(fp) // 2 > minLength:
fp = FoldFingerprint(fp, 2)
return fp
ExplicitBitVect.ToBitString = BitVectToText
SparseBitVect.ToBitString = BitVectToText
| {
"repo_name": "jandom/rdkit",
"path": "rdkit/DataStructs/__init__.py",
"copies": "1",
"size": "1624",
"license": "bsd-3-clause",
"hash": 5260115597227195000,
"line_mean": 29.0740740741,
"line_max": 75,
"alpha_frac": 0.7013546798,
"autogenerated": false,
"ratio": 3.012987012987013,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9182104670820459,
"avg_score": 0.006447404393310825,
"num_lines": 54
} |
from __future__ import division
from rdkit import rdBase
from rdkit.DataStructs import cDataStructs
from rdkit.DataStructs.cDataStructs import *
__doc__ = cDataStructs.__doc__
similarityFunctions = [
('Tanimoto', TanimotoSimilarity, ''),
("Dice", DiceSimilarity, ''),
("Cosine", CosineSimilarity, ''),
("Sokal", SokalSimilarity, ''),
("Russel", RusselSimilarity, ''),
("RogotGoldberg", RogotGoldbergSimilarity, ''),
("AllBit", AllBitSimilarity, ''),
("Kulczynski", KulczynskiSimilarity, ''),
("McConnaughey", McConnaugheySimilarity, ''),
("Asymmetric", AsymmetricSimilarity, ''),
("BraunBlanquet", BraunBlanquetSimilarity, ''),
]
def FingerprintSimilarity(fp1, fp2, metric=TanimotoSimilarity):
""" returns the calculated similarity between two fingerprints,
handles any folding that may need to be done to ensure that they
are compatible
"""
sz1 = fp1.GetNumBits()
sz2 = fp2.GetNumBits()
if sz1 < sz2:
fp2 = FoldFingerprint(fp2, sz2 // sz1)
elif sz2 < sz1:
fp1 = FoldFingerprint(fp1, sz1 // sz2)
return metric(fp1, fp2)
def FoldToTargetDensity(fp, density=0.3, minLength=64):
while fp.GetNumOnBits() / len(fp) > density and len(fp) // 2 > minLength:
fp = FoldFingerprint(fp, 2)
return fp
ExplicitBitVect.ToBitString = BitVectToText
SparseBitVect.ToBitString = BitVectToText
| {
"repo_name": "rvianello/rdkit",
"path": "rdkit/DataStructs/__init__.py",
"copies": "4",
"size": "1625",
"license": "bsd-3-clause",
"hash": -4876271652371967000,
"line_mean": 28.5454545455,
"line_max": 75,
"alpha_frac": 0.7009230769,
"autogenerated": false,
"ratio": 3.009259259259259,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.571018233615926,
"avg_score": null,
"num_lines": null
} |
from __future__ import print_function
from rdkit import Chem
from rdkit.Chem import rdMolDescriptors
import math
def ExplainAtomCode(code, branchSubtract=0):
"""
**Arguments**:
- the code to be considered
- branchSubtract: (optional) the constant that was subtracted off
the number of neighbors before integrating it into the code.
This is used by the topological torsions code.
>>> m = Chem.MolFromSmiles('C=CC(=O)O')
>>> code = GetAtomCode(m.GetAtomWithIdx(0))
>>> ExplainAtomCode(code)
('C', 1, 1)
>>> code = GetAtomCode(m.GetAtomWithIdx(1))
>>> ExplainAtomCode(code)
('C', 2, 1)
>>> code = GetAtomCode(m.GetAtomWithIdx(2))
>>> ExplainAtomCode(code)
('C', 3, 1)
>>> code = GetAtomCode(m.GetAtomWithIdx(3))
>>> ExplainAtomCode(code)
('O', 1, 1)
>>> code = GetAtomCode(m.GetAtomWithIdx(4))
>>> ExplainAtomCode(code)
('O', 1, 0)
"""
typeMask = (1 << rdMolDescriptors.AtomPairsParameters.numTypeBits) - 1
branchMask = (1 << rdMolDescriptors.AtomPairsParameters.numBranchBits) - 1
piMask = (1 << rdMolDescriptors.AtomPairsParameters.numPiBits) - 1
nBranch = int(code & branchMask)
code = code >> rdMolDescriptors.AtomPairsParameters.numBranchBits
nPi = int(code & piMask)
code = code >> rdMolDescriptors.AtomPairsParameters.numPiBits
typeIdx = int(code & typeMask)
if typeIdx < len(rdMolDescriptors.AtomPairsParameters.atomTypes):
atomNum = rdMolDescriptors.AtomPairsParameters.atomTypes[typeIdx]
atomSymbol = Chem.GetPeriodicTable().GetElementSymbol(atomNum)
else:
atomSymbol = 'X'
return (atomSymbol, nBranch, nPi)
GetAtomCode = rdMolDescriptors.GetAtomPairAtomCode
def NumPiElectrons(atom):
""" Returns the number of electrons an atom is using for pi bonding
>>> m = Chem.MolFromSmiles('C=C')
>>> NumPiElectrons(m.GetAtomWithIdx(0))
1
>>> m = Chem.MolFromSmiles('C#CC')
>>> NumPiElectrons(m.GetAtomWithIdx(0))
2
>>> NumPiElectrons(m.GetAtomWithIdx(1))
2
>>> m = Chem.MolFromSmiles('O=C=CC')
>>> NumPiElectrons(m.GetAtomWithIdx(0))
1
>>> NumPiElectrons(m.GetAtomWithIdx(1))
2
>>> NumPiElectrons(m.GetAtomWithIdx(2))
1
>>> NumPiElectrons(m.GetAtomWithIdx(3))
0
>>> m = Chem.MolFromSmiles('c1ccccc1')
>>> NumPiElectrons(m.GetAtomWithIdx(0))
1
FIX: this behaves oddly in these cases:
>>> m = Chem.MolFromSmiles('S(=O)(=O)')
>>> NumPiElectrons(m.GetAtomWithIdx(0))
2
>>> m = Chem.MolFromSmiles('S(=O)(=O)(O)O')
>>> NumPiElectrons(m.GetAtomWithIdx(0))
0
In the second case, the S atom is tagged as sp3 hybridized.
"""
res = 0
if atom.GetIsAromatic():
res = 1
elif atom.GetHybridization() != Chem.HybridizationType.SP3:
# the number of pi electrons is just the number of
# unsaturations (valence - degree):
res = atom.GetExplicitValence() - atom.GetNumExplicitHs()
if res < atom.GetDegree():
raise ValueError("explicit valence exceeds atom degree")
res -= atom.GetDegree()
return res
def BitsInCommon(v1, v2):
""" Returns the number of bits in common between two vectors
**Arguments**:
- two vectors (sequences of bit ids)
**Returns**: an integer
**Notes**
- the vectors must be sorted
- duplicate bit IDs are counted more than once
>>> BitsInCommon( (1,2,3,4,10), (2,4,6) )
2
Here's how duplicates are handled:
>>> BitsInCommon( (1,2,2,3,4), (2,2,4,5,6) )
3
"""
res = 0
v2Pos = 0
nV2 = len(v2)
for val in v1:
while v2Pos < nV2 and v2[v2Pos] < val:
v2Pos += 1
if v2Pos >= nV2:
break
if v2[v2Pos] == val:
res += 1
v2Pos += 1
return res
def DiceSimilarity(v1, v2, bounds=None):
""" Implements the DICE similarity metric.
This is the recommended metric in both the Topological torsions
and Atom pairs papers.
**Arguments**:
- two vectors (sequences of bit ids)
**Returns**: a float.
**Notes**
- the vectors must be sorted
>>> DiceSimilarity( (1,2,3), (1,2,3) )
1.0
>>> DiceSimilarity( (1,2,3), (5,6) )
0.0
>>> DiceSimilarity( (1,2,3,4), (1,3,5,7) )
0.5
>>> DiceSimilarity( (1,2,3,4,5,6), (1,3) )
0.5
Note that duplicate bit IDs count multiple times:
>>> DiceSimilarity( (1,1,3,4,5,6), (1,1) )
0.5
but only if they are duplicated in both vectors:
>>> DiceSimilarity( (1,1,3,4,5,6), (1,) )==2./7
True
edge case
>>> DiceSimilarity( (), () )
0.0
and bounds check
>>> DiceSimilarity( (1,1,3,4), (1,1))
0.666...
>>> DiceSimilarity( (1,1,3,4), (1,1), bounds=0.3)
0.666...
>>> DiceSimilarity( (1,1,3,4), (1,1), bounds=0.33)
0.666...
>>> DiceSimilarity( (1,1,3,4,5,6), (1,1), bounds=0.34)
0.0
"""
denom = 1.0 * (len(v1) + len(v2))
if not denom:
res = 0.0
else:
if bounds and (min(len(v1), len(v2)) / denom) < bounds:
numer = 0.0
else:
numer = 2.0 * BitsInCommon(v1, v2)
res = numer / denom
return res
def Dot(v1, v2):
""" Returns the Dot product between two vectors:
**Arguments**:
- two vectors (sequences of bit ids)
**Returns**: an integer
**Notes**
- the vectors must be sorted
- duplicate bit IDs are counted more than once
>>> Dot( (1,2,3,4,10), (2,4,6) )
2
Here's how duplicates are handled:
>>> Dot( (1,2,2,3,4), (2,2,4,5,6) )
5
>>> Dot( (1,2,2,3,4), (2,4,5,6) )
2
>>> Dot( (1,2,2,3,4), (5,6) )
0
>>> Dot( (), (5,6) )
0
"""
res = 0
nV1 = len(v1)
nV2 = len(v2)
i = 0
j = 0
while i < nV1:
v1Val = v1[i]
v1Count = 1
i += 1
while i < nV1 and v1[i] == v1Val:
v1Count += 1
i += 1
while j < nV2 and v2[j] < v1Val:
j += 1
if j < nV2 and v2[j] == v1Val:
v2Count = 1
j += 1
while j < nV2 and v2[j] == v1Val:
v2Count += 1
j += 1
commonCount = min(v1Count, v2Count)
res += commonCount * commonCount
elif j >= nV2:
break
return res
def CosineSimilarity(v1, v2):
""" Implements the Cosine similarity metric.
This is the recommended metric in the LaSSI paper
**Arguments**:
- two vectors (sequences of bit ids)
**Returns**: a float.
**Notes**
- the vectors must be sorted
>>> print('%.3f'%CosineSimilarity( (1,2,3,4,10), (2,4,6) ))
0.516
>>> print('%.3f'%CosineSimilarity( (1,2,2,3,4), (2,2,4,5,6) ))
0.714
>>> print('%.3f'%CosineSimilarity( (1,2,2,3,4), (1,2,2,3,4) ))
1.000
>>> print('%.3f'%CosineSimilarity( (1,2,2,3,4), (5,6,7) ))
0.000
>>> print('%.3f'%CosineSimilarity( (1,2,2,3,4), () ))
0.000
"""
d1 = Dot(v1, v1)
d2 = Dot(v2, v2)
denom = math.sqrt(d1 * d2)
if not denom:
res = 0.0
else:
numer = Dot(v1, v2)
res = numer / denom
return res
# ------------------------------------
#
# doctest boilerplate
#
def _runDoctests(verbose=None): # pragma: nocover
import sys
import doctest
failed, _ = doctest.testmod(optionflags=doctest.ELLIPSIS, verbose=verbose)
sys.exit(failed)
if __name__ == '__main__': # pragma: nocover
_runDoctests()
| {
"repo_name": "jandom/rdkit",
"path": "rdkit/Chem/AtomPairs/Utils.py",
"copies": "2",
"size": "7283",
"license": "bsd-3-clause",
"hash": 1631624067527401500,
"line_mean": 21.4783950617,
"line_max": 76,
"alpha_frac": 0.6088150487,
"autogenerated": false,
"ratio": 2.7451941198643044,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9297928661105601,
"avg_score": 0.011216101491740793,
"num_lines": 324
} |
from rdkit import rdBase
import cDataStructs
__doc__=cDataStructs.__doc__
from cDataStructs import *
similarityFunctions=[
('Tanimoto',TanimotoSimilarity,''),
("Dice",DiceSimilarity,''),
("Cosine",CosineSimilarity,''),
("Sokal",SokalSimilarity,''),
("Russel",RusselSimilarity,''),
("RogotGoldberg",RogotGoldbergSimilarity,''),
("AllBit",AllBitSimilarity,''),
("Kulczynski",KulczynskiSimilarity,''),
("McConnaughey",McConnaugheySimilarity,''),
("Asymmetric",AsymmetricSimilarity,''),
("BraunBlanquet",BraunBlanquetSimilarity,''),
]
def FingerprintSimilarity(fp1,fp2,metric=TanimotoSimilarity):
""" returns the calculated similarity between two fingerprints,
handles any folding that may need to be done to ensure that they
are compatible
"""
sz1 = fp1.GetNumBits()
sz2 = fp2.GetNumBits()
if sz1<sz2:
fp2 = FoldFingerprint(fp2,sz2/sz1)
elif sz2<sz1:
fp1 = FoldFingerprint(fp1,sz1/sz2)
return metric(fp1,fp2)
def FoldToTargetDensity(fp,density=0.3,minLength=64):
while float(fp.GetNumOnBits())/len(fp)>density and len(fp)/2>minLength:
fp = FoldFingerprint(fp,2)
return fp
ExplicitBitVect.ToBitString = BitVectToText
SparseBitVect.ToBitString = BitVectToText
| {
"repo_name": "rdkit/rdkit-orig",
"path": "rdkit/DataStructs/__init__.py",
"copies": "2",
"size": "1506",
"license": "bsd-3-clause",
"hash": 972658805899757000,
"line_mean": 27.9615384615,
"line_max": 73,
"alpha_frac": 0.7197875166,
"autogenerated": false,
"ratio": 2.9129593810444874,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9394825220996379,
"avg_score": 0.04758433532962171,
"num_lines": 52
} |
""" unit testing code for MOE-type descriptors with EStates
"""
from __future__ import print_function
from rdkit import RDConfig
import unittest,os
from rdkit import Chem
from rdkit.Chem.EState import EState_VSA
import os.path
def feq(n1,n2,tol=1e-4):
return abs(n1-n2)<=tol
class TestCase(unittest.TestCase):
def setUp(self):
if doLong:
print('\n%s: '%self.shortDescription(), end='')
def test1(self):
inName = os.path.join(RDConfig.RDCodeDir,'Chem','EState','test_data',
'EState_VSA.csv')
with open(inName,'r') as inF:
inL = inF.readline()
names = [x.strip() for x in inL.split(',')[1:]]
suppl = Chem.SmilesMolSupplier(inName,delimiter=',',nameColumn=-1)
for mol in suppl:
self.assertTrue(mol)
smi = Chem.MolToSmiles(mol)
for name in names:
prop = float(mol.GetProp(name))
func = getattr(EState_VSA,name)
v = func(mol)
self.assertTrue(feq(v,prop),'%s: %.4f!=%.4f'%(smi,v,prop))
if __name__ == '__main__':
import sys,getopt,re
doLong=0
if len(sys.argv) >1:
args,extras=getopt.getopt(sys.argv[1:],'l')
for arg,val in args:
if arg=='-l':
doLong=1
sys.argv.remove('-l')
if doLong:
for methName in dir(TestCase):
if re.match('_test',methName):
newName = re.sub('_test','test',methName)
exec('TestCase.%s = TestCase.%s'%(newName,methName))
unittest.main()
| {
"repo_name": "adalke/rdkit",
"path": "rdkit/Chem/EState/UnitTestVSA.py",
"copies": "4",
"size": "1739",
"license": "bsd-3-clause",
"hash": -1630963691464598500,
"line_mean": 27.5081967213,
"line_max": 73,
"alpha_frac": 0.6198964922,
"autogenerated": false,
"ratio": 3.0138648180242633,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.05067652033262737,
"num_lines": 61
} |
""" Contains an implementation of Atom-pair fingerprints, as
described in:
R.E. Carhart, D.H. Smith, R. Venkataraghavan;
"Atom Pairs as Molecular Features in Structure-Activity Studies:
Definition and Applications" JCICS 25, 64-73 (1985).
"""
from rdkit.DataStructs import IntSparseIntVect
from rdkit import Chem
from rdkit.Chem import rdMolDescriptors
from rdkit.Chem.AtomPairs import Utils
from rdkit import DataStructs
from rdkit.Chem.rdMolDescriptors import GetAtomPairFingerprint,GetHashedAtomPairFingerprint
GetAtomPairFingerprintAsIntVect=rdMolDescriptors.GetAtomPairFingerprint
numPathBits=rdMolDescriptors.AtomPairsParameters.numPathBits
_maxPathLen=(1<<numPathBits)-1
numFpBits=numPathBits+2*rdMolDescriptors.AtomPairsParameters.codeSize
fpLen=1<<numFpBits
def pyScorePair(at1,at2,dist,atomCodes=None):
""" Returns a score for an individual atom pair.
>>> m = Chem.MolFromSmiles('CCCCC')
>>> c1 = Utils.GetAtomCode(m.GetAtomWithIdx(0))
>>> c2 = Utils.GetAtomCode(m.GetAtomWithIdx(1))
>>> c3 = Utils.GetAtomCode(m.GetAtomWithIdx(2))
>>> t = 1 | min(c1,c2)<<numPathBits | max(c1,c2)<<(rdMolDescriptors.AtomPairsParameters.codeSize+numPathBits)
>>> pyScorePair(m.GetAtomWithIdx(0),m.GetAtomWithIdx(1),1)==t
1
>>> pyScorePair(m.GetAtomWithIdx(1),m.GetAtomWithIdx(0),1)==t
1
>>> t = 2 | min(c1,c3)<<numPathBits | max(c1,c3)<<(rdMolDescriptors.AtomPairsParameters.codeSize+numPathBits)
>>> pyScorePair(m.GetAtomWithIdx(0),m.GetAtomWithIdx(2),2)==t
1
>>> pyScorePair(m.GetAtomWithIdx(0),m.GetAtomWithIdx(2),2,
... atomCodes=(Utils.GetAtomCode(m.GetAtomWithIdx(0)),Utils.GetAtomCode(m.GetAtomWithIdx(2))))==t
1
"""
if not atomCodes:
code1 = Utils.GetAtomCode(at1)
code2 = Utils.GetAtomCode(at2)
else:
code1,code2=atomCodes
accum = int(dist) % _maxPathLen
accum |= min(code1,code2) << numPathBits
accum |= max(code1,code2) << (rdMolDescriptors.AtomPairsParameters.codeSize+numPathBits)
return accum
def ExplainPairScore(score):
"""
>>> m = Chem.MolFromSmiles('C=CC')
>>> score = pyScorePair(m.GetAtomWithIdx(0),m.GetAtomWithIdx(1),1)
>>> ExplainPairScore(score)
(('C', 1, 1), 1, ('C', 2, 1))
>>> score = pyScorePair(m.GetAtomWithIdx(0),m.GetAtomWithIdx(2),2)
>>> ExplainPairScore(score)
(('C', 1, 0), 2, ('C', 1, 1))
>>> score = pyScorePair(m.GetAtomWithIdx(1),m.GetAtomWithIdx(2),1)
>>> ExplainPairScore(score)
(('C', 1, 0), 1, ('C', 2, 1))
>>> score = pyScorePair(m.GetAtomWithIdx(2),m.GetAtomWithIdx(1),1)
>>> ExplainPairScore(score)
(('C', 1, 0), 1, ('C', 2, 1))
"""
codeMask = (1<<rdMolDescriptors.AtomPairsParameters.codeSize)-1
pathMask = (1<<numPathBits)-1
dist = score&pathMask
score = score>>numPathBits
code1 = score&codeMask
score = score>>rdMolDescriptors.AtomPairsParameters.codeSize
code2 = score&codeMask
res = Utils.ExplainAtomCode(code1),dist,Utils.ExplainAtomCode(code2)
return res
def GetAtomPairFingerprintAsBitVect(mol):
""" Returns the Atom-pair fingerprint for a molecule as
a SparseBitVect. Note that this doesn't match the standard
definition of atom pairs, which uses counts of the
pairs, not just their presence.
**Arguments**:
- mol: a molecule
**Returns**: a SparseBitVect
>>> m = Chem.MolFromSmiles('CCC')
>>> v = [ pyScorePair(m.GetAtomWithIdx(0),m.GetAtomWithIdx(1),1),
... pyScorePair(m.GetAtomWithIdx(0),m.GetAtomWithIdx(2),2),
... ]
>>> v.sort()
>>> fp = GetAtomPairFingerprintAsBitVect(m)
>>> list(fp.GetOnBits())==v
True
"""
res = DataStructs.SparseBitVect(fpLen)
fp = rdMolDescriptors.GetAtomPairFingerprint(mol)
for val in fp.GetNonzeroElements().keys():
res.SetBit(val)
return res
#------------------------------------
#
# doctest boilerplate
#
def _test():
import doctest,sys
return doctest.testmod(sys.modules["__main__"])
if __name__ == '__main__':
import sys
failed,tried = _test()
sys.exit(failed)
| {
"repo_name": "strets123/rdkit",
"path": "rdkit/Chem/AtomPairs/Pairs.py",
"copies": "4",
"size": "4247",
"license": "bsd-3-clause",
"hash": -7505089715439233000,
"line_mean": 30.4592592593,
"line_max": 111,
"alpha_frac": 0.700023546,
"autogenerated": false,
"ratio": 2.871534820824882,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5571558366824882,
"avg_score": null,
"num_lines": null
} |
""" Defines Naive Baysean classification model
Based on development in: Chapter 6 of "Machine Learning" by Tom Mitchell
"""
import numpy
from rdkit.ML.Data import Quantize
from rdkit.six import iteritems
def _getBinId(val, qBounds):
bid = 0
for bnd in qBounds:
if (val > bnd):
bid += 1
return bid
# FIX: this class has not been updated to new-style classes
# (RD Issue380) because that would break all of our legacy pickled
# data. Until a solution is found for this breakage, an update is
# impossible.
class NaiveBayesClassifier:
"""
_NaiveBayesClassifier_s can save the following pieces of internal state, accessible via
standard setter/getter functions:
1) _Examples_: a list of examples which have been predicted
2) _TrainingExamples_: List of training examples - the descriptor value of these examples
are quantized based on info gain using ML/Data/Quantize.py if necessary
3) _TestExamples_: the list of examples used to test the model
4) _BadExamples_ : list of examples that were incorrectly classified
4) _QBoundVals_: Quant bound values for each varaible - a list of lists
5) _QBounds_ : Number of bounds for each variable
"""
def __init__(self, attrs, nPossibleVals, nQuantBounds, mEstimateVal=-1.0, useSigs=False):
""" Constructor
"""
self._attrs = attrs
self._mEstimateVal = mEstimateVal
self._useSigs = useSigs
self._classProbs = {}
self._examples = []
self._trainingExamples = []
self._testExamples = []
self._badExamples = []
self._QBoundVals = {}
self._nClasses = nPossibleVals[-1]
self._qBounds = nQuantBounds
self._nPosVals = nPossibleVals
self._needsQuant = 1
self._name = ""
self.mprob = -1.0
# for the sake a of efficiency lets try to change the conditional probabities
# to a numpy array instead of a dictionary. The three dimension array is indexed
# on the the activity class, the discriptor ID and the descriptor binID
#self._condProbs = {}
#self._condProbs = numpy.zeros((self._nClasses, max(self._attrs)+1, max(self._nPosVals)+1), 'd')
self._condProbs = [None] * self._nClasses
for i in range(self._nClasses):
if not (hasattr(self, '_useSigs') and self._useSigs):
nA = max(self._attrs) + 1
self._condProbs[i] = [None] * nA
for j in range(nA):
nV = self._nPosVals[j]
if self._qBounds[j]:
nV = max(nV, self._qBounds[j] + 1)
self._condProbs[i][j] = [0.0] * nV
else:
self._condProbs[i] = {}
for idx in self._attrs:
self._condProbs[i][idx] = [0.0] * 2
def GetName(self):
return self._name
def SetName(self, name):
self._name = name
def NameModel(self, varNames):
self.SetName('NaiveBayesCalssifier')
def GetExamples(self):
return self._examples
def SetExamples(self, examples):
self._examples = examples
def GetTrainingExamples(self):
return self._trainingExamples
def SetTrainingExamples(self, examples):
self._trainingExamples = examples
def GetTestExamples(self):
return self._testExamples
def SetTestExamples(self, examples):
self._testExamples = examples
def SetBadExamples(self, examples):
self._badExamples = examples
def GetBadExamples(self):
return self._badExamples
def _computeQuantBounds(self):
neg = len(self._trainingExamples)
natr = len(self._attrs)
# make a list of results and values
allVals = numpy.zeros((neg, natr), 'd')
res = [] # list of y values
i = 0
for eg in self._trainingExamples:
res.append(eg[-1])
j = 0
for ai in self._attrs:
val = eg[ai]
allVals[i, j] = val
j += 1
i += 1
# now loop over each of the columns and compute the bounds
# the number of bounds is determined by the maximum info gain
i = 0
for ai in self._attrs:
nbnds = self._qBounds[ai]
if nbnds > 0:
mbnds = []
mgain = -1.0
for j in range(1, nbnds + 1):
bnds, igain = Quantize.FindVarMultQuantBounds(allVals[:, i], j, res, self._nClasses)
if (igain > mgain):
mbnds = bnds
mgain = igain
self._QBoundVals[ai] = mbnds
i += 1
def trainModel(self):
""" We will assume at this point that the training examples have been set
We have to estmate the conditional probabilities for each of the (binned) descriptor
component give a outcome (or class). Also the probabilities for each class is estimated
"""
# first estimate the class probabilities
n = len(self._trainingExamples)
for i in range(self._nClasses):
self._classProbs[i] = 0.0
#for i in range(self._nClasses):
# self._classProbs[i] = float(self._classProbs[i])/n
# first find the bounds for each descriptor value if necessary
if not self._useSigs and max(self._qBounds) > 0:
self._computeQuantBounds()
# now compute the probabilities
ncls = {}
incr = 1.0 / n
for eg in self._trainingExamples:
cls = eg[-1]
self._classProbs[cls] += incr
ncls[cls] = ncls.get(cls, 0) + 1
tmp = self._condProbs[cls]
if not self._useSigs:
for ai in self._attrs:
bid = eg[ai]
if self._qBounds[ai] > 0:
bid = _getBinId(bid, self._QBoundVals[ai])
tmp[ai][bid] += 1.0
else:
for ai in self._attrs:
if eg[1].GetBit(ai):
tmp[ai][1] += 1.0
else:
tmp[ai][0] += 1.0
#for key in self._condProbs:
for cls in range(self._nClasses):
if not cls in ncls:
continue
#cls = key[0]
tmp = self._condProbs[cls]
for ai in self._attrs:
if not self._useSigs:
nbnds = self._nPosVals[ai]
if (self._qBounds[ai] > 0):
nbnds = self._qBounds[ai]
else:
nbnds = 2
for bid in range(nbnds):
if self._mEstimateVal <= 0.0:
# this is simple the fraction of of time this descriptor component assume
# this value for the examples that belong a specific class
#self._condProbs[key] = (float(self._condProbs[key]))/ncls[cls]
tmp[ai][bid] /= ncls[cls]
else:
# this a bit more complicated form - more appropriate for unbalanced data
# see "Machine Learning" by Tom Mitchell section 6.9.1.1
# this is the probability that this descriptor component can take this specific value
# in the lack of any other information is is simply the inverse of the number of
# possible values 'npossible'
# If we quantized this component then
# npossible = 1 + len(self._QBoundVals[ai])
# else if we did no qunatize (the descriptor came quantized)
# npossible = nPossibleVals[ai]
#ai = key[1]
pdesc = 0.0
if self._qBounds[ai] > 0:
pdesc = 1.0 / (1 + len(self._QBoundVals[ai]))
elif (self._nPosVals[ai] > 0):
pdesc = 1.0 / (self._nPosVals[ai])
else:
raise ValueError('Neither Bounds set nor data pre-quantized for attribute ' + str(ai))
tmp[ai][bid] += (self._mEstimateVal) * pdesc
tmp[ai][bid] /= (ncls[cls] + self._mEstimateVal)
def ClassifyExamples(self, examples, appendExamples=0):
preds = []
for eg in examples:
pred = self.ClassifyExample(eg, appendExamples)
preds.append(int(pred))
return preds
def GetClassificationDetails(self):
""" returns the probability of the last prediction """
return self.mprob
def ClassifyExample(self, example, appendExamples=0):
""" Classify an example by summing over the conditional probabilities
The most likely class is the one with the largest probability
"""
if appendExamples:
self._examples.append(example)
clsProb = {}
for key, prob in iteritems(self._classProbs):
clsProb[key] = prob
tmp = self._condProbs[key]
for ai in self._attrs:
if not (hasattr(self, '_useSigs') and self._useSigs):
bid = example[ai]
if self._qBounds[ai] > 0:
bid = _getBinId(bid, self._QBoundVals[ai])
else:
if example[1].GetBit(ai):
bid = 1
else:
bid = 0
clsProb[key] *= tmp[ai][bid]
mkey = -1
self.mprob = -1.0
for key, prob in iteritems(clsProb):
if (prob > self.mprob):
mkey = key
self.mprob = prob
return mkey
| {
"repo_name": "jandom/rdkit",
"path": "rdkit/ML/NaiveBayes/ClassificationModel.py",
"copies": "1",
"size": "8719",
"license": "bsd-3-clause",
"hash": -2859280128291355600,
"line_mean": 30.3633093525,
"line_max": 100,
"alpha_frac": 0.6105057919,
"autogenerated": false,
"ratio": 3.5675122749590833,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4678018066859083,
"avg_score": null,
"num_lines": null
} |
from __future__ import print_function
from rdkit import RDConfig
import unittest,sys,os
from rdkit.six import PY3
from rdkit.six.moves import cPickle
from rdkit import Chem
from rdkit.Chem import ChemicalFeatures,rdDistGeom
import EmbedLib
import gzip
from rdkit import DistanceGeometry as DG
from rdkit import Geometry
import Pharmacophore
import numpy
def feq(n1,n2,tol=1e-5):
return abs(n1-n2)<=tol
class TestCase(unittest.TestCase):
def setUp(self):
self.dataDir = os.path.join(RDConfig.RDCodeDir,'Chem/Pharm3D/test_data')
self.fdefBlock = \
"""DefineFeature HAcceptor1 [N,O;H0]
Family HBondAcceptor
Weights 1.0
EndFeature
DefineFeature HDonor1 [N,O;!H0]
Family HBondDonor
Weights 1.0
EndFeature
DefineFeature Aromatic1 c1ccccc1
Family Aromatic
Weights 1.,1.,1.,1.,1.,1.
EndFeature\n"""
self.featFactory = ChemicalFeatures.BuildFeatureFactoryFromString(self.fdefBlock)
self.feats = [ChemicalFeatures.FreeChemicalFeature('HBondAcceptor', 'HAcceptor1',
Geometry.Point3D(0.0, 0.0, 0.0)),
ChemicalFeatures.FreeChemicalFeature('HBondDonor', 'HDonor1',
Geometry.Point3D(2.65, 0.0, 0.0)),
ChemicalFeatures.FreeChemicalFeature('Aromatic', 'Aromatic1',
Geometry.Point3D(5.12, 0.908, 0.0)),
]
self.pcophore=Pharmacophore.Pharmacophore(self.feats)
self.pcophore.setLowerBound(0,1, 2.0)
self.pcophore.setUpperBound(0,1, 3.3)
self.pcophore.setLowerBound(0,2, 5.0)
self.pcophore.setUpperBound(0,2, 5.4)
self.pcophore.setLowerBound(1,2, 2.6)
self.pcophore.setUpperBound(1,2, 3.0)
def _matchMol(self,tpl,pcophore,featFactory,downSample):
name,molPkl,boundsMat = tpl
mol = Chem.Mol(molPkl)
matched,matches = EmbedLib.MatchPharmacophoreToMol(mol,featFactory,pcophore)
if matched:
r = EmbedLib.MatchPharmacophore(matches,boundsMat,pcophore,
useDownsampling=downSample)
if r[0]:
return 0
else:
return 1
else:
return 0
def test1SearchFullMat(self):
inF = gzip.open(os.path.join(self.dataDir,'cdk2-syn-clip100.pkl.gz'),'rb')
#outF = gzip.open(os.path.join(self.dataDir,'cdk2-syn-clip100.pkl.new.gz'),'wb+')
nDone = 0
nHits = 0
while 1:
try:
tpl = cPickle.load(inF, encoding='latin1')
if PY3:
tpl = tpl[0], tpl[1].encode('latin1'), tpl[2]
#tpl=tpl[0],tpl[1],numpy.array(tpl[2])
#cPickle.dump(tpl,outF)
except:
break
if self._matchMol(tpl,self.pcophore,self.featFactory,0):
nHits+=1
nDone += 1
self.assertEqual(nDone,100)
#print 'nHits:',nHits
self.assertEqual(nHits,47)
def test2SearchDownsample(self):
inF = gzip.open(os.path.join(self.dataDir,'cdk2-syn-clip100.pkl.gz'),'rb')
nDone = 0
nHits = 0
hits = []
while 1:
try:
tpl = cPickle.load(inF, encoding='latin1')
if PY3:
tpl = tpl[0], tpl[1].encode('latin1'), tpl[2]
except:
break
if self._matchMol(tpl,self.pcophore, self.featFactory,1):
nHits+=1
nDone += 1
self.assertEqual(nDone,100)
#print 'nHits:',nHits
self.assertEqual(nHits,47)
def test3Embed(self):
testResults={
'mol_197':(218.80,35.75,110.33,11.58,109.66,11.09,90.35,2.95,0.00),
'mol_223':(259.19,6.27,134.13,1.12,134.06,1.12,85.74,0.61,0.00),
'mol_269':(204.51,7.89,103.89,1.20,102.66,1.20,88.07,1.21,6.00),
}
inF = gzip.open(os.path.join(self.dataDir,'cdk2-syn-clip100.pkl.gz'),'rb')
nDone = 0
nHits = 0
while 1:
try:
name,molPkl,boundsMat = cPickle.load(inF, encoding='latin1')
if PY3:
molPkl = bytes(molPkl, encoding='latin1')
except:
break
nDone += 1
mol = Chem.Mol(molPkl)
nboundsMat = rdDistGeom.GetMoleculeBoundsMatrix(mol)
DG.DoTriangleSmoothing(nboundsMat)
matched,matches = EmbedLib.MatchPharmacophoreToMol(mol,self.featFactory,
self.pcophore)
if matched:
failed,bm,match,stats = EmbedLib.MatchPharmacophore(matches,nboundsMat,
self.pcophore,
useDownsampling=1)
if not failed:
nHits += 1
if name in testResults:
stats = EmbedLib.EmbedOne(mol,name,match,self.pcophore,count=10,
silent=1,randomSeed=23)
tgt = testResults[name]
self.assertEqual(len(tgt),len(stats))
print(name)
print(','.join(['%.2f'%x for x in stats]))
# we'll use different tolerances for the different values:
self.assertTrue(feq(tgt[0],stats[0],5.0),(tgt[0],stats[0]))
for i in range(2,len(tgt)):
self.assertTrue(feq(tgt[i],stats[i],5.0),(tgt[i],stats[i]))
self.assertEqual(nDone,100)
#print 'nHits:',nHits
self.assertEqual(nHits,50)
def test4Search(self):
featFactory = ChemicalFeatures.BuildFeatureFactory(os.path.join(self.dataDir,
'BaseFeatures.fdef'))
activeFeats = [ChemicalFeatures.FreeChemicalFeature('Acceptor',
Geometry.Point3D(0.0, 0.0, 0.0)),
ChemicalFeatures.FreeChemicalFeature('Donor',
Geometry.Point3D(0.0, 0.0, 0.0)),
ChemicalFeatures.FreeChemicalFeature('Aromatic',
Geometry.Point3D(0.0, 0.0, 0.0))]
pcophore= Pharmacophore.Pharmacophore(activeFeats)
pcophore.setLowerBound(0,1,2.251)
pcophore.setUpperBound(0,1,2.451)
pcophore.setUpperBound2D(0,1,3)
pcophore.setLowerBound(0,2,4.970)
pcophore.setUpperBound(0,2,5.170)
pcophore.setUpperBound2D(0,2,6)
pcophore.setLowerBound(1,2,2.681)
pcophore.setUpperBound(1,2,2.881)
pcophore.setUpperBound2D(1,2,6)
inF = gzip.open(os.path.join(self.dataDir,'cdk2-syn-clip100.pkl.gz'),'rb')
nDone = 0
nMatches = 0
nHits = 0
while 1:
try:
name,molPkl,boundsMat = cPickle.load(inF, encoding='latin1')
if PY3:
molPkl = bytes(molPkl, encoding='latin1')
except:
break
nDone += 1
mol = Chem.Mol(molPkl)
boundsMat = rdDistGeom.GetMoleculeBoundsMatrix(mol)
DG.DoTriangleSmoothing(boundsMat)
canMatch,matches = EmbedLib.MatchPharmacophoreToMol(mol,featFactory,
pcophore)
if canMatch:
nMatches+=1
r = EmbedLib.MatchPharmacophore(matches,boundsMat,pcophore,
useDownsampling=True,use2DLimits=True,
mol=mol)
failed,bm,match,details = r
if not failed:
nHits+=1
self.assertEqual(nDone,100)
self.assertEqual(nMatches,93)
#print 'nhits:',nHits
self.assertEqual(nHits,67)
def testIssue268(self):
from rdkit import RDLogger
#RDLogger.EnableLog('rdApp.debug')
featFactory = ChemicalFeatures.BuildFeatureFactory(os.path.join(self.dataDir,
'Issue268.fdef'))
m1 = Chem.MolFromMolFile(os.path.join(self.dataDir,
'Issue268_Mol1.mol'))
m2 = Chem.MolFromMolFile(os.path.join(self.dataDir,
'Issue268_Mol2.mol'))
with open(os.path.join(self.dataDir,
'Issue268_Pcop.pkl'),'rb') as inF:
pcop = cPickle.load(inF, encoding='latin1')
#pcop._boundsMat=numpy.array(pcop._boundsMat)
#pcop._boundsMat2D=numpy.array(pcop._boundsMat2D)
#cPickle.dump(pcop,file(os.path.join(self.dataDir,
# 'Issue268_Pcop.new.pkl'),'wb+'))
match,mList1 = EmbedLib.MatchFeatsToMol(m1,featFactory,pcop.getFeatures())
match,mList2 = EmbedLib.MatchFeatsToMol(m2,featFactory,pcop.getFeatures())
b1 = rdDistGeom.GetMoleculeBoundsMatrix(m1)
b2 = rdDistGeom.GetMoleculeBoundsMatrix(m2)
self.assertEqual(len(EmbedLib.MatchPharmacophore(mList1,b1,pcop)[2]),4)
self.assertEqual(len(EmbedLib.MatchPharmacophore(mList2,b2,pcop)[2]),4)
self.assertEqual(len(EmbedLib.MatchPharmacophore(mList1,b1,pcop,
mol=m1,use2DLimits=True)[2]),4)
self.assertEqual(len(EmbedLib.MatchPharmacophore(mList2,b2,pcop,
mol=m2,use2DLimits=True)[2]),4)
from rdkit import DistanceGeometry as DG
self.assertTrue(DG.DoTriangleSmoothing(b1))
self.assertTrue(DG.DoTriangleSmoothing(b2))
self.assertEqual(len(EmbedLib.MatchPharmacophore(mList1,b1,pcop)[2]),4)
self.assertEqual(len(EmbedLib.MatchPharmacophore(mList2,b2,pcop)[2]),4)
self.assertEqual(len(EmbedLib.MatchPharmacophore(mList1,b1,pcop,
mol=m1,use2DLimits=True)[2]),4)
self.assertEqual(len(EmbedLib.MatchPharmacophore(mList2,b2,pcop,
mol=m2,use2DLimits=True)[2]),4)
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "AlexanderSavelyev/rdkit",
"path": "rdkit/Chem/Pharm3D/UnitTestEmbed.py",
"copies": "1",
"size": "10065",
"license": "bsd-3-clause",
"hash": 8775973549836883000,
"line_mean": 36.6966292135,
"line_max": 85,
"alpha_frac": 0.5786388475,
"autogenerated": false,
"ratio": 3.1841189496994624,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9083797925130674,
"avg_score": 0.035791974413757525,
"num_lines": 267
} |
from __future__ import print_function
from rdkit import RDConfig
import sys,time,math
from rdkit.ML.Data import Stats
import rdkit.DistanceGeometry as DG
from rdkit import Chem
import numpy
from rdkit.Chem import rdDistGeom as MolDG
from rdkit.Chem import ChemicalFeatures
from rdkit.Chem import ChemicalForceFields
import Pharmacophore,ExcludedVolume
from rdkit import Geometry
_times = {}
from rdkit import RDLogger as logging
logger = logging.logger()
defaultFeatLength=2.0
def GetAtomHeavyNeighbors(atom):
""" returns a list of the heavy-atom neighbors of the
atom passed in:
>>> m = Chem.MolFromSmiles('CCO')
>>> l = GetAtomHeavyNeighbors(m.GetAtomWithIdx(0))
>>> len(l)
1
>>> isinstance(l[0],Chem.Atom)
True
>>> l[0].GetIdx()
1
>>> l = GetAtomHeavyNeighbors(m.GetAtomWithIdx(1))
>>> len(l)
2
>>> l[0].GetIdx()
0
>>> l[1].GetIdx()
2
"""
res=[]
for nbr in atom.GetNeighbors():
if nbr.GetAtomicNum() != 1:
res.append(nbr)
return res
def ReplaceGroup(match,bounds,slop=0.01,useDirs=False,dirLength=defaultFeatLength):
""" Adds an entry at the end of the bounds matrix for a point at
the center of a multi-point feature
returns a 2-tuple:
new bounds mat
index of point added
>>> boundsMat = numpy.array([[0.0,2.0,2.0],[1.0,0.0,2.0],[1.0,1.0,0.0]])
>>> match = [0,1,2]
>>> bm,idx = ReplaceGroup(match,boundsMat,slop=0.0)
the index is at the end:
>>> idx == 3
True
and the matrix is one bigger:
>>> bm.shape == (4, 4)
True
but the original bounds mat is not altered:
>>> boundsMat.shape == (3, 3)
True
We make the assumption that the points of the
feature form a regular polygon, are listed in order
(i.e. pt 0 is a neighbor to pt 1 and pt N-1)
and that the replacement point goes at the center:
>>> print(', '.join(['%.3f'%x for x in bm[-1]]))
0.577, 0.577, 0.577, 0.000
>>> print(', '.join(['%.3f'%x for x in bm[:,-1]]))
1.155, 1.155, 1.155, 0.000
The slop argument (default = 0.01) is fractional:
>>> bm,idx = ReplaceGroup(match,boundsMat)
>>> print(', '.join(['%.3f'%x for x in bm[-1]]))
0.572, 0.572, 0.572, 0.000
>>> print(', '.join(['%.3f'%x for x in bm[:,-1]]))
1.166, 1.166, 1.166, 0.000
"""
maxVal = -1000.0
minVal = 1e8
nPts = len(match)
for i in range(nPts):
idx0 = match[i]
if i<nPts-1:
idx1 = match[i+1]
else:
idx1 = match[0]
if idx1<idx0:
idx0,idx1 = idx1,idx0
minVal = min(minVal,bounds[idx1,idx0])
maxVal = max(maxVal,bounds[idx0,idx1])
maxVal *= (1+slop)
minVal *= (1-slop)
scaleFact = 1.0/(2.0*math.sin(math.pi/nPts))
minVal *= scaleFact
maxVal *= scaleFact
replaceIdx = bounds.shape[0]
if not useDirs:
bm = numpy.zeros((bounds.shape[0]+1,bounds.shape[1]+1),numpy.float)
else:
bm = numpy.zeros((bounds.shape[0]+2,bounds.shape[1]+2),numpy.float)
bm[0:bounds.shape[0],0:bounds.shape[1]]=bounds
bm[:replaceIdx,replaceIdx]=1000.
if useDirs:
bm[:replaceIdx+1,replaceIdx+1]=1000.
# set the feature - direction point bounds:
bm[replaceIdx,replaceIdx+1]=dirLength+slop
bm[replaceIdx+1,replaceIdx]=dirLength-slop
for idx1 in match:
bm[idx1,replaceIdx]=maxVal
bm[replaceIdx,idx1]=minVal
if useDirs:
# set the point - direction point bounds:
bm[idx1,replaceIdx+1] = numpy.sqrt(bm[replaceIdx,replaceIdx+1]**2+maxVal**2)
bm[replaceIdx+1,idx1] = numpy.sqrt(bm[replaceIdx+1,replaceIdx]**2+minVal**2)
return bm,replaceIdx
def EmbedMol(mol,bm,atomMatch=None,weight=2.0,randomSeed=-1,
excludedVolumes=None):
""" Generates an embedding for a molecule based on a bounds matrix and adds
a conformer (id 0) to the molecule
if the optional argument atomMatch is provided, it will be used to provide
supplemental weights for the embedding routine (used in the optimization
phase to ensure that the resulting geometry really does satisfy the
pharmacophore).
if the excludedVolumes is provided, it should be a sequence of
ExcludedVolume objects
>>> m = Chem.MolFromSmiles('c1ccccc1C')
>>> bounds = MolDG.GetMoleculeBoundsMatrix(m)
>>> bounds.shape == (7, 7)
True
>>> m.GetNumConformers()
0
>>> EmbedMol(m,bounds,randomSeed=23)
>>> m.GetNumConformers()
1
"""
nAts = mol.GetNumAtoms()
weights=[]
if(atomMatch):
for i in range(len(atomMatch)):
for j in range(i+1,len(atomMatch)):
weights.append((i,j,weight))
if(excludedVolumes):
for vol in excludedVolumes:
idx = vol.index
# excluded volumes affect every other atom:
for i in range(nAts):
weights.append((i,idx,weight))
coords = DG.EmbedBoundsMatrix(bm,weights=weights,numZeroFail=1,randomSeed=randomSeed)
#for row in coords:
# print(', '.join(['%.2f'%x for x in row]))
conf = Chem.Conformer(nAts)
conf.SetId(0)
for i in range(nAts):
conf.SetAtomPosition(i,list(coords[i]))
if excludedVolumes:
for vol in excludedVolumes:
vol.pos = numpy.array(coords[vol.index])
#print(' % 7.4f % 7.4f % 7.4f Ar 0 0 0 0 0 0 0 0 0 0 0 0'%tuple(coords[-1]), file=sys.stderr)
mol.AddConformer(conf)
def AddExcludedVolumes(bm,excludedVolumes,smoothIt=True):
""" Adds a set of excluded volumes to the bounds matrix
and returns the new matrix
excludedVolumes is a list of ExcludedVolume objects
>>> boundsMat = numpy.array([[0.0,2.0,2.0],[1.0,0.0,2.0],[1.0,1.0,0.0]])
>>> ev1 = ExcludedVolume.ExcludedVolume(([(0,),0.5,1.0],),exclusionDist=1.5)
>>> bm = AddExcludedVolumes(boundsMat,(ev1,))
the results matrix is one bigger:
>>> bm.shape == (4, 4)
True
and the original bounds mat is not altered:
>>> boundsMat.shape == (3, 3)
True
>>> print(', '.join(['%.3f'%x for x in bm[-1]]))
0.500, 1.500, 1.500, 0.000
>>> print(', '.join(['%.3f'%x for x in bm[:,-1]]))
1.000, 3.000, 3.000, 0.000
"""
oDim = bm.shape[0]
dim = oDim+len(excludedVolumes)
res = numpy.zeros((dim,dim),numpy.float)
res[:oDim,:oDim] = bm
for i,vol in enumerate(excludedVolumes):
bmIdx = oDim+i
vol.index = bmIdx
# set values to all the atoms:
res[bmIdx,:bmIdx] = vol.exclusionDist
res[:bmIdx,bmIdx] = 1000.0
# set values to our defining features:
for indices,minV,maxV in vol.featInfo:
for index in indices:
try:
res[bmIdx,index] = minV
res[index,bmIdx] = maxV
except IndexError:
logger.error('BAD INDEX: res[%d,%d], shape is %s'%(bmIdx,index,str(res.shape)))
raise IndexError
# set values to other excluded volumes:
for j in range(bmIdx+1,dim):
res[bmIdx,j:dim] = 0
res[j:dim,bmIdx] = 1000
if smoothIt: DG.DoTriangleSmoothing(res)
return res
def UpdatePharmacophoreBounds(bm,atomMatch,pcophore,useDirs=False,
dirLength=defaultFeatLength,
mol=None):
""" loops over a distance bounds matrix and replaces the elements
that are altered by a pharmacophore
**NOTE** this returns the resulting bounds matrix, but it may also
alter the input matrix
atomMatch is a sequence of sequences containing atom indices
for each of the pharmacophore's features.
>>> feats = [ChemicalFeatures.FreeChemicalFeature('HBondAcceptor', 'HAcceptor1', Geometry.Point3D(0.0, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('HBondDonor', 'HDonor1', Geometry.Point3D(2.65, 0.0, 0.0)),
... ]
>>> pcophore=Pharmacophore.Pharmacophore(feats)
>>> pcophore.setLowerBound(0,1, 1.0)
>>> pcophore.setUpperBound(0,1, 2.0)
>>> boundsMat = numpy.array([[0.0,3.0,3.0],[2.0,0.0,3.0],[2.0,2.0,0.0]])
>>> atomMatch = ((0,),(1,))
>>> bm = UpdatePharmacophoreBounds(boundsMat,atomMatch,pcophore)
In this case, there are no multi-atom features, so the result matrix
is the same as the input:
>>> bm is boundsMat
True
this means, of course, that the input boundsMat is altered:
>>> print(', '.join(['%.3f'%x for x in boundsMat[0]]))
0.000, 2.000, 3.000
>>> print(', '.join(['%.3f'%x for x in boundsMat[1]]))
1.000, 0.000, 3.000
>>> print(', '.join(['%.3f'%x for x in boundsMat[2]]))
2.000, 2.000, 0.000
"""
replaceMap = {}
for i,matchI in enumerate(atomMatch):
if len(matchI)>1:
bm,replaceIdx = ReplaceGroup(matchI,bm,useDirs=useDirs)
replaceMap[i] = replaceIdx
for i,matchI in enumerate(atomMatch):
mi = replaceMap.get(i,matchI[0])
for j in range(i+1,len(atomMatch)):
mj = replaceMap.get(j,atomMatch[j][0])
if mi<mj:
idx0,idx1 = mi,mj
else:
idx0,idx1 = mj,mi
bm[idx0,idx1] = pcophore.getUpperBound(i,j)
bm[idx1,idx0] = pcophore.getLowerBound(i,j)
return bm
def EmbedPharmacophore(mol,atomMatch,pcophore,randomSeed=-1,count=10,smoothFirst=True,
silent=False,bounds=None,excludedVolumes=None,targetNumber=-1,
useDirs=False):
""" Generates one or more embeddings for a molecule that satisfy a pharmacophore
atomMatch is a sequence of sequences containing atom indices
for each of the pharmacophore's features.
- count: is the maximum number of attempts to make a generating an embedding
- smoothFirst: toggles triangle smoothing of the molecular bounds matix
- bounds: if provided, should be the molecular bounds matrix. If this isn't
provided, the matrix will be generated.
- targetNumber: if this number is positive, it provides a maximum number
of embeddings to generate (i.e. we'll have count attempts to generate
targetNumber embeddings).
returns: a 3 tuple:
1) the molecular bounds matrix adjusted for the pharmacophore
2) a list of embeddings (molecules with a single conformer)
3) the number of failed attempts at embedding
>>> m = Chem.MolFromSmiles('OCCN')
>>> feats = [ChemicalFeatures.FreeChemicalFeature('HBondAcceptor', 'HAcceptor1', Geometry.Point3D(0.0, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('HBondDonor', 'HDonor1', Geometry.Point3D(2.65, 0.0, 0.0)),
... ]
>>> pcophore=Pharmacophore.Pharmacophore(feats)
>>> pcophore.setLowerBound(0,1, 2.5)
>>> pcophore.setUpperBound(0,1, 3.5)
>>> atomMatch = ((0,),(3,))
>>> bm,embeds,nFail = EmbedPharmacophore(m,atomMatch,pcophore,randomSeed=23,silent=1)
>>> len(embeds)
10
>>> nFail
0
Set up a case that can't succeed:
>>> pcophore=Pharmacophore.Pharmacophore(feats)
>>> pcophore.setLowerBound(0,1, 2.0)
>>> pcophore.setUpperBound(0,1, 2.1)
>>> atomMatch = ((0,),(3,))
>>> bm,embeds,nFail = EmbedPharmacophore(m,atomMatch,pcophore,randomSeed=23,silent=1)
>>> len(embeds)
0
>>> nFail
10
"""
global _times
if not hasattr(mol,'_chiralCenters'):
mol._chiralCenters = Chem.FindMolChiralCenters(mol)
if bounds is None:
bounds = MolDG.GetMoleculeBoundsMatrix(mol)
if smoothFirst: DG.DoTriangleSmoothing(bounds)
bm = bounds.copy()
#print '------------'
#print 'initial'
#for row in bm:
# print ' ',' '.join(['% 4.2f'%x for x in row])
#print '------------'
bm = UpdatePharmacophoreBounds(bm,atomMatch,pcophore,useDirs=useDirs,mol=mol)
if excludedVolumes:
bm = AddExcludedVolumes(bm,excludedVolumes,smoothIt=False)
if not DG.DoTriangleSmoothing(bm):
raise ValueError("could not smooth bounds matrix")
#print '------------'
#print 'post replace and smooth'
#for row in bm:
# print ' ',' '.join(['% 4.2f'%x for x in row])
#print '------------'
if targetNumber<=0:
targetNumber=count
nFailed = 0
res = []
for i in range(count):
tmpM = bm[:,:]
m2 = Chem.Mol(mol)
t1 = time.time()
try:
if randomSeed<=0:
seed = i*10+1
else:
seed = i*10+randomSeed
EmbedMol(m2,tmpM,atomMatch,randomSeed=seed,
excludedVolumes=excludedVolumes)
except ValueError:
if not silent:
logger.info('Embed failed')
nFailed += 1
else:
t2 = time.time()
_times['embed'] = _times.get('embed',0)+t2-t1
keepIt=True
for idx,stereo in mol._chiralCenters:
if stereo in ('R','S'):
vol = ComputeChiralVolume(m2,idx)
if (stereo=='R' and vol>=0) or \
(stereo=='S' and vol<=0):
keepIt=False
break
if keepIt:
res.append(m2)
else:
logger.debug('Removed embedding due to chiral constraints.')
if len(res)==targetNumber: break
return bm,res,nFailed
def isNaN(v):
""" provides an OS independent way of detecting NaNs
This is intended to be used with values returned from the C++
side of things.
We can't actually test this from Python (which traps
zero division errors), but it would work something like
this if we could:
>>> isNaN(0)
False
#>>> isNan(1/0)
#True
"""
if v!=v and sys.platform=='win32':
return True
elif v==0 and v==1 and sys.platform!='win32':
return True
return False
def OptimizeMol(mol,bm,atomMatches=None,excludedVolumes=None,
forceConstant=1200.0,
maxPasses=5,verbose=False):
""" carries out a UFF optimization for a molecule optionally subject
to the constraints in a bounds matrix
- atomMatches, if provided, is a sequence of sequences
- forceConstant is the force constant of the spring used to enforce
the constraints
returns a 2-tuple:
1) the energy of the initial conformation
2) the energy post-embedding
NOTE that these energies include the energies of the constraints
>>> m = Chem.MolFromSmiles('OCCN')
>>> feats = [ChemicalFeatures.FreeChemicalFeature('HBondAcceptor', 'HAcceptor1', Geometry.Point3D(0.0, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('HBondDonor', 'HDonor1', Geometry.Point3D(2.65, 0.0, 0.0)),
... ]
>>> pcophore=Pharmacophore.Pharmacophore(feats)
>>> pcophore.setLowerBound(0,1, 2.5)
>>> pcophore.setUpperBound(0,1, 2.8)
>>> atomMatch = ((0,),(3,))
>>> bm,embeds,nFail = EmbedPharmacophore(m,atomMatch,pcophore,randomSeed=23,silent=1)
>>> len(embeds)
10
>>> testM = embeds[0]
Do the optimization:
>>> e1,e2 = OptimizeMol(testM,bm,atomMatches=atomMatch)
Optimizing should have lowered the energy:
>>> e2 < e1
True
Check the constrained distance:
>>> conf = testM.GetConformer(0)
>>> p0 = conf.GetAtomPosition(0)
>>> p3 = conf.GetAtomPosition(3)
>>> d03 = p0.Distance(p3)
>>> d03 >= pcophore.getLowerBound(0,1)-.01
True
>>> d03 <= pcophore.getUpperBound(0,1)+.01
True
If we optimize without the distance constraints (provided via the atomMatches
argument) we're not guaranteed to get the same results, particularly in a case
like the current one where the pharmcophore brings the atoms uncomfortably
close together:
>>> testM = embeds[1]
>>> e1,e2 = OptimizeMol(testM,bm)
>>> e2 < e1
True
>>> conf = testM.GetConformer(0)
>>> p0 = conf.GetAtomPosition(0)
>>> p3 = conf.GetAtomPosition(3)
>>> d03 = p0.Distance(p3)
>>> d03 >= pcophore.getLowerBound(0,1)-.01
True
>>> d03 <= pcophore.getUpperBound(0,1)+.01
False
"""
try:
ff = ChemicalForceFields.UFFGetMoleculeForceField(mol)
except Exception:
logger.info('Problems building molecular forcefield',exc_info=True)
return -1.0,-1.0
weights=[]
if(atomMatches):
for k in range(len(atomMatches)):
for i in atomMatches[k]:
for l in range(k+1,len(atomMatches)):
for j in atomMatches[l]:
weights.append((i,j))
for i,j in weights:
if j<i:
i,j = j,i
minV = bm[j,i]
maxV = bm[i,j]
ff.AddDistanceConstraint(i,j,minV,maxV,forceConstant)
if excludedVolumes:
nAts = mol.GetNumAtoms()
conf = mol.GetConformer()
idx = nAts
for exVol in excludedVolumes:
assert exVol.pos is not None
logger.debug('ff.AddExtraPoint(%.4f,%.4f,%.4f)'%(exVol.pos[0],exVol.pos[1],
exVol.pos[2]))
ff.AddExtraPoint(exVol.pos[0],exVol.pos[1],exVol.pos[2],True)
indices = []
for localIndices,foo,bar in exVol.featInfo:
indices += list(localIndices)
for i in range(nAts):
v = numpy.array(conf.GetAtomPosition(i))-numpy.array(exVol.pos)
d = numpy.sqrt(numpy.dot(v,v))
if i not in indices:
if d<5.0:
logger.debug('ff.AddDistanceConstraint(%d,%d,%.3f,%d,%.0f)'%(i,idx,exVol.exclusionDist,1000,forceConstant))
ff.AddDistanceConstraint(i,idx,exVol.exclusionDist,1000,
forceConstant)
else:
logger.debug('ff.AddDistanceConstraint(%d,%d,%.3f,%.3f,%.0f)'%(i,idx,bm[exVol.index,i],bm[i,exVol.index],forceConstant))
ff.AddDistanceConstraint(i,idx,bm[exVol.index,i],bm[i,exVol.index],
forceConstant)
idx += 1
ff.Initialize()
e1 = ff.CalcEnergy()
if isNaN(e1):
raise ValueError('bogus energy')
if verbose:
print(Chem.MolToMolBlock(mol))
for i,vol in enumerate(excludedVolumes):
pos = ff.GetExtraPointPos(i)
print(' % 7.4f % 7.4f % 7.4f As 0 0 0 0 0 0 0 0 0 0 0 0'%tuple(pos), file=sys.stderr)
needsMore=ff.Minimize()
nPasses=0
while needsMore and nPasses<maxPasses:
needsMore=ff.Minimize()
nPasses+=1
e2 = ff.CalcEnergy()
if isNaN(e2):
raise ValueError('bogus energy')
if verbose:
print('--------')
print(Chem.MolToMolBlock(mol))
for i,vol in enumerate(excludedVolumes):
pos = ff.GetExtraPointPos(i)
print(' % 7.4f % 7.4f % 7.4f Sb 0 0 0 0 0 0 0 0 0 0 0 0'%tuple(pos), file=sys.stderr)
ff = None
return e1,e2
def EmbedOne(mol,name,match,pcophore,count=1,silent=0,**kwargs):
""" generates statistics for a molecule's embeddings
Four energies are computed for each embedding:
1) E1: the energy (with constraints) of the initial embedding
2) E2: the energy (with constraints) of the optimized embedding
3) E3: the energy (no constraints) the geometry for E2
4) E4: the energy (no constraints) of the optimized free-molecule
(starting from the E3 geometry)
Returns a 9-tuple:
1) the mean value of E1
2) the sample standard deviation of E1
3) the mean value of E2
4) the sample standard deviation of E2
5) the mean value of E3
6) the sample standard deviation of E3
7) the mean value of E4
8) the sample standard deviation of E4
9) The number of embeddings that failed
"""
global _times
atomMatch = [list(x.GetAtomIds()) for x in match]
bm,ms,nFailed = EmbedPharmacophore(mol,atomMatch,pcophore,count=count,
silent=silent,**kwargs)
e1s = []
e2s = []
e3s = []
e4s = []
d12s = []
d23s = []
d34s = []
for m in ms:
t1 = time.time()
try:
e1,e2 = OptimizeMol(m,bm,atomMatch)
except ValueError:
pass
else:
t2 = time.time()
_times['opt1'] = _times.get('opt1',0)+t2-t1
e1s.append(e1)
e2s.append(e2)
d12s.append(e1-e2)
t1 = time.time()
try:
e3,e4 = OptimizeMol(m,bm)
except ValueError:
pass
else:
t2 = time.time()
_times['opt2'] = _times.get('opt2',0)+t2-t1
e3s.append(e3)
e4s.append(e4)
d23s.append(e2-e3)
d34s.append(e3-e4)
count += 1
try:
e1,e1d = Stats.MeanAndDev(e1s)
except Exception:
e1 = -1.0
e1d=-1.0
try:
e2,e2d = Stats.MeanAndDev(e2s)
except Exception:
e2 = -1.0
e2d=-1.0
try:
e3,e3d = Stats.MeanAndDev(e3s)
except Exception:
e3 = -1.0
e3d=-1.0
try:
e4,e4d = Stats.MeanAndDev(e4s)
except Exception:
e4 = -1.0
e4d=-1.0
if not silent:
print('%s(%d): %.2f(%.2f) -> %.2f(%.2f) : %.2f(%.2f) -> %.2f(%.2f)' %
(name,nFailed,e1,e1d,e2,e2d,e3,e3d,e4,e4d))
return e1,e1d,e2,e2d,e3,e3d,e4,e4d,nFailed
def MatchPharmacophoreToMol(mol, featFactory, pcophore):
""" generates a list of all possible mappings of a pharmacophore to a molecule
Returns a 2-tuple:
1) a boolean indicating whether or not all features were found
2) a list, numFeatures long, of sequences of features
>>> import os.path
>>> dataDir = os.path.join(RDConfig.RDCodeDir,'Chem/Pharm3D/test_data')
>>> featFactory = ChemicalFeatures.BuildFeatureFactory(os.path.join(dataDir,'BaseFeatures.fdef'))
>>> activeFeats = [ChemicalFeatures.FreeChemicalFeature('Acceptor', Geometry.Point3D(0.0, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('Donor',Geometry.Point3D(0.0, 0.0, 0.0))]
>>> pcophore= Pharmacophore.Pharmacophore(activeFeats)
>>> m = Chem.MolFromSmiles('FCCN')
>>> match,mList = MatchPharmacophoreToMol(m,featFactory,pcophore)
>>> match
True
Two feature types:
>>> len(mList)
2
The first feature type, Acceptor, has two matches:
>>> len(mList[0])
2
>>> mList[0][0].GetAtomIds()
(0,)
>>> mList[0][1].GetAtomIds()
(3,)
The first feature type, Donor, has a single match:
>>> len(mList[1])
1
>>> mList[1][0].GetAtomIds()
(3,)
"""
return MatchFeatsToMol(mol, featFactory, pcophore.getFeatures())
def _getFeatDict(mol,featFactory,features):
""" **INTERNAL USE ONLY**
>>> import os.path
>>> dataDir = os.path.join(RDConfig.RDCodeDir,'Chem/Pharm3D/test_data')
>>> featFactory = ChemicalFeatures.BuildFeatureFactory(os.path.join(dataDir,'BaseFeatures.fdef'))
>>> activeFeats = [ChemicalFeatures.FreeChemicalFeature('Acceptor', Geometry.Point3D(0.0, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('Donor',Geometry.Point3D(0.0, 0.0, 0.0))]
>>> m = Chem.MolFromSmiles('FCCN')
>>> d =_getFeatDict(m,featFactory,activeFeats)
>>> sorted(list(d.keys()))
['Acceptor', 'Donor']
>>> donors = d['Donor']
>>> len(donors)
1
>>> donors[0].GetAtomIds()
(3,)
>>> acceptors = d['Acceptor']
>>> len(acceptors)
2
>>> acceptors[0].GetAtomIds()
(0,)
>>> acceptors[1].GetAtomIds()
(3,)
"""
molFeats = {}
for feat in features:
family = feat.GetFamily()
if not family in molFeats:
matches = featFactory.GetFeaturesForMol(mol,includeOnly=family)
molFeats[family] = matches
return molFeats
def MatchFeatsToMol(mol, featFactory, features):
""" generates a list of all possible mappings of each feature to a molecule
Returns a 2-tuple:
1) a boolean indicating whether or not all features were found
2) a list, numFeatures long, of sequences of features
>>> import os.path
>>> dataDir = os.path.join(RDConfig.RDCodeDir,'Chem/Pharm3D/test_data')
>>> featFactory = ChemicalFeatures.BuildFeatureFactory(os.path.join(dataDir,'BaseFeatures.fdef'))
>>> activeFeats = [ChemicalFeatures.FreeChemicalFeature('Acceptor', Geometry.Point3D(0.0, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('Donor',Geometry.Point3D(0.0, 0.0, 0.0))]
>>> m = Chem.MolFromSmiles('FCCN')
>>> match,mList = MatchFeatsToMol(m,featFactory,activeFeats)
>>> match
True
Two feature types:
>>> len(mList)
2
The first feature type, Acceptor, has two matches:
>>> len(mList[0])
2
>>> mList[0][0].GetAtomIds()
(0,)
>>> mList[0][1].GetAtomIds()
(3,)
The first feature type, Donor, has a single match:
>>> len(mList[1])
1
>>> mList[1][0].GetAtomIds()
(3,)
"""
molFeats = _getFeatDict(mol,featFactory,features)
res = []
for feat in features:
matches = molFeats.get(feat.GetFamily(),[])
if len(matches) == 0 :
return False, None
res.append(matches)
return True, res
def CombiEnum(sequence):
""" This generator takes a sequence of sequences as an argument and
provides all combinations of the elements of the subsequences:
>>> gen = CombiEnum(((1,2),(10,20)))
>>> next(gen)
[1, 10]
>>> next(gen)
[1, 20]
>>> [x for x in CombiEnum(((1,2),(10,20)))]
[[1, 10], [1, 20], [2, 10], [2, 20]]
>>> [x for x in CombiEnum(((1,2),(10,20),(100,200)))]
[[1, 10, 100], [1, 10, 200], [1, 20, 100], [1, 20, 200], [2, 10, 100], [2, 10, 200], [2, 20, 100], [2, 20, 200]]
"""
if not len(sequence):
yield []
elif len(sequence)==1:
for entry in sequence[0]:
yield [entry]
else:
for entry in sequence[0]:
for subVal in CombiEnum(sequence[1:]):
yield [entry]+subVal
def DownsampleBoundsMatrix(bm,indices,maxThresh=4.0):
""" removes rows from a bounds matrix that are
that are greater than a threshold value away from a set of
other points
returns the modfied bounds matrix
The goal of this function is to remove rows from the bounds matrix
that correspond to atoms that are likely to be quite far from
the pharmacophore we're interested in. Because the bounds smoothing
we eventually have to do is N^3, this can be a big win
>>> boundsMat = numpy.array([[0.0,3.0,4.0],[2.0,0.0,3.0],[2.0,2.0,0.0]])
>>> bm = DownsampleBoundsMatrix(boundsMat,(0,),3.5)
>>> bm.shape == (2, 2)
True
we don't touch the input matrix:
>>> boundsMat.shape == (3, 3)
True
>>> print(', '.join(['%.3f'%x for x in bm[0]]))
0.000, 3.000
>>> print(', '.join(['%.3f'%x for x in bm[1]]))
2.000, 0.000
if the threshold is high enough, we don't do anything:
>>> boundsMat = numpy.array([[0.0,4.0,3.0],[2.0,0.0,3.0],[2.0,2.0,0.0]])
>>> bm = DownsampleBoundsMatrix(boundsMat,(0,),5.0)
>>> bm.shape == (3, 3)
True
If there's a max value that's close enough to *any* of the indices
we pass in, we'll keep it:
>>> boundsMat = numpy.array([[0.0,4.0,3.0],[2.0,0.0,3.0],[2.0,2.0,0.0]])
>>> bm = DownsampleBoundsMatrix(boundsMat,(0,1),3.5)
>>> bm.shape == (3, 3)
True
"""
nPts = bm.shape[0]
k = numpy.zeros(nPts,numpy.int0)
for idx in indices: k[idx]=1
for i in indices:
row = bm[i]
for j in range(i+1,nPts):
if not k[j] and row[j]<maxThresh:
k[j]=1
keep = numpy.nonzero(k)[0]
bm2 = numpy.zeros((len(keep),len(keep)),numpy.float)
for i,idx in enumerate(keep):
row = bm[idx]
bm2[i] = numpy.take(row,keep)
return bm2
def CoarseScreenPharmacophore(atomMatch,bounds,pcophore,verbose=False):
"""
>>> feats = [ChemicalFeatures.FreeChemicalFeature('HBondAcceptor', 'HAcceptor1', Geometry.Point3D(0.0, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('HBondDonor', 'HDonor1', Geometry.Point3D(2.65, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('Aromatic', 'Aromatic1', Geometry.Point3D(5.12, 0.908, 0.0)),
... ]
>>> pcophore=Pharmacophore.Pharmacophore(feats)
>>> pcophore.setLowerBound(0,1, 1.1)
>>> pcophore.setUpperBound(0,1, 1.9)
>>> pcophore.setLowerBound(0,2, 2.1)
>>> pcophore.setUpperBound(0,2, 2.9)
>>> pcophore.setLowerBound(1,2, 2.1)
>>> pcophore.setUpperBound(1,2, 3.9)
>>> bounds = numpy.array([[0,2,3],[1,0,4],[2,3,0]],numpy.float)
>>> CoarseScreenPharmacophore(((0,),(1,)),bounds,pcophore)
True
>>> CoarseScreenPharmacophore(((0,),(2,)),bounds,pcophore)
False
>>> CoarseScreenPharmacophore(((1,),(2,)),bounds,pcophore)
False
>>> CoarseScreenPharmacophore(((0,),(1,),(2,)),bounds,pcophore)
True
>>> CoarseScreenPharmacophore(((1,),(0,),(2,)),bounds,pcophore)
False
>>> CoarseScreenPharmacophore(((2,),(1,),(0,)),bounds,pcophore)
False
# we ignore the point locations here and just use their definitions:
>>> feats = [ChemicalFeatures.FreeChemicalFeature('HBondAcceptor', 'HAcceptor1', Geometry.Point3D(0.0, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('HBondDonor', 'HDonor1', Geometry.Point3D(2.65, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('Aromatic', 'Aromatic1', Geometry.Point3D(5.12, 0.908, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('HBondDonor', 'HDonor1', Geometry.Point3D(2.65, 0.0, 0.0)),
... ]
>>> pcophore=Pharmacophore.Pharmacophore(feats)
>>> pcophore.setLowerBound(0,1, 2.1)
>>> pcophore.setUpperBound(0,1, 2.9)
>>> pcophore.setLowerBound(0,2, 2.1)
>>> pcophore.setUpperBound(0,2, 2.9)
>>> pcophore.setLowerBound(0,3, 2.1)
>>> pcophore.setUpperBound(0,3, 2.9)
>>> pcophore.setLowerBound(1,2, 1.1)
>>> pcophore.setUpperBound(1,2, 1.9)
>>> pcophore.setLowerBound(1,3, 1.1)
>>> pcophore.setUpperBound(1,3, 1.9)
>>> pcophore.setLowerBound(2,3, 1.1)
>>> pcophore.setUpperBound(2,3, 1.9)
>>> bounds = numpy.array([[0,3,3,3],[2,0,2,2],[2,1,0,2],[2,1,1,0]],numpy.float)
>>> CoarseScreenPharmacophore(((0,),(1,),(2,),(3,)),bounds,pcophore)
True
>>> CoarseScreenPharmacophore(((0,),(1,),(3,),(2,)),bounds,pcophore)
True
>>> CoarseScreenPharmacophore(((1,),(0,),(3,),(2,)),bounds,pcophore)
False
"""
for k in range(len(atomMatch)):
if len(atomMatch[k])==1:
for l in range(k+1,len(atomMatch)):
if len(atomMatch[l])==1:
idx0 = atomMatch[k][0]
idx1 = atomMatch[l][0]
if idx1<idx0:
idx0,idx1=idx1,idx0
if bounds[idx1,idx0] >= pcophore.getUpperBound(k, l) or \
bounds[idx0,idx1] <= pcophore.getLowerBound(k, l) :
if verbose:
print('\t (%d,%d) [%d,%d] fail'%(idx1,idx0,k,l))
print('\t %f,%f - %f,%f' %
(bounds[idx1,idx0],pcophore.getUpperBound(k,l),
bounds[idx0,idx1],pcophore.getLowerBound(k,l)))
#logger.debug('\t >%s'%str(atomMatch))
#logger.debug()
#logger.debug('\t %f,%f - %f,%f'%(bounds[idx1,idx0],pcophore.getUpperBound(k,l),
# bounds[idx0,idx1],pcophore.getLowerBound(k,l)))
return False
return True
def Check2DBounds(atomMatch,mol,pcophore):
""" checks to see if a particular mapping of features onto
a molecule satisfies a pharmacophore's 2D restrictions
>>> activeFeats = [ChemicalFeatures.FreeChemicalFeature('Acceptor', Geometry.Point3D(0.0, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('Donor',Geometry.Point3D(0.0, 0.0, 0.0))]
>>> pcophore= Pharmacophore.Pharmacophore(activeFeats)
>>> pcophore.setUpperBound2D(0,1,3)
>>> m = Chem.MolFromSmiles('FCC(N)CN')
>>> Check2DBounds(((0,),(3,)),m,pcophore)
True
>>> Check2DBounds(((0,),(5,)),m,pcophore)
False
"""
dm = Chem.GetDistanceMatrix(mol,False,False,False)
nFeats = len(atomMatch)
for i in range(nFeats):
for j in range(i+1,nFeats):
lowerB = pcophore._boundsMat2D[j,i] #lowerB = pcophore.getLowerBound2D(i,j)
upperB = pcophore._boundsMat2D[i,j] #upperB = pcophore.getUpperBound2D(i,j)
dij=10000
for atomI in atomMatch[i]:
for atomJ in atomMatch[j]:
try:
dij = min(dij,dm[atomI,atomJ])
except IndexError:
print('bad indices:',atomI,atomJ)
print(' shape:',dm.shape)
print(' match:',atomMatch)
print(' mol:')
print(Chem.MolToMolBlock(mol))
raise IndexError
if dij<lowerB or dij>upperB:
return False
return True
def _checkMatch(match,mol,bounds,pcophore,use2DLimits):
""" **INTERNAL USE ONLY**
checks whether a particular atom match can be satisfied by
a molecule
"""
atomMatch = ChemicalFeatures.GetAtomMatch(match)
if not atomMatch:
return None
elif use2DLimits:
if not Check2DBounds(atomMatch,mol,pcophore):
return None
if not CoarseScreenPharmacophore(atomMatch,bounds,pcophore):
return None
return atomMatch
def ConstrainedEnum(matches,mol,pcophore,bounds,use2DLimits=False,
index=0,soFar=[]):
""" Enumerates the list of atom mappings a molecule
has to a particular pharmacophore.
We do check distance bounds here.
"""
nMatches = len(matches)
if index>=nMatches:
yield soFar,[]
elif index==nMatches-1:
for entry in matches[index]:
nextStep = soFar+[entry]
if index != 0:
atomMatch = _checkMatch(nextStep,mol,bounds,pcophore,use2DLimits)
else:
atomMatch = ChemicalFeatures.GetAtomMatch(nextStep)
if atomMatch:
yield soFar+[entry],atomMatch
else:
for entry in matches[index]:
nextStep = soFar+[entry]
if index != 0:
atomMatch = _checkMatch(nextStep,mol,bounds,pcophore,use2DLimits)
if not atomMatch:
continue
for val in ConstrainedEnum(matches,mol,pcophore,bounds,use2DLimits=use2DLimits,
index=index+1,soFar=nextStep):
if val:
yield val
def MatchPharmacophore(matches,bounds,pcophore,useDownsampling=False,
use2DLimits=False,mol=None,excludedVolumes=None,
useDirs=False):
"""
if use2DLimits is set, the molecule must also be provided and topological
distances will also be used to filter out matches
"""
for match,atomMatch in ConstrainedEnum(matches,mol,pcophore,bounds,
use2DLimits=use2DLimits):
bm = bounds.copy()
bm = UpdatePharmacophoreBounds(bm,atomMatch,pcophore,useDirs=useDirs,mol=mol);
if excludedVolumes:
localEvs = []
for eV in excludedVolumes:
featInfo = []
for i,entry in enumerate(atomMatch):
info = list(eV.featInfo[i])
info[0] = entry
featInfo.append(info)
localEvs.append(ExcludedVolume.ExcludedVolume(featInfo,eV.index,
eV.exclusionDist))
bm = AddExcludedVolumes(bm,localEvs,smoothIt=False)
sz = bm.shape[0]
if useDownsampling:
indices = []
for entry in atomMatch:
indices.extend(entry)
if excludedVolumes:
for vol in localEvs:
indices.append(vol.index)
bm = DownsampleBoundsMatrix(bm,indices)
if DG.DoTriangleSmoothing(bm):
return 0,bm,match,(sz,bm.shape[0])
return 1,None,None,None
def GetAllPharmacophoreMatches(matches,bounds,pcophore,useDownsampling=0,
progressCallback=None,
use2DLimits=False,mol=None,
verbose=False):
res = []
nDone = 0
for match in CombiEnum(matches):
atomMatch = ChemicalFeatures.GetAtomMatch(match)
if atomMatch and use2DLimits and mol:
pass2D = Check2DBounds(atomMatch,mol,pcophore)
if verbose:
print('..',atomMatch)
print(' ..Pass2d:',pass2D)
else:
pass2D = True
if atomMatch and pass2D and \
CoarseScreenPharmacophore(atomMatch,bounds,pcophore,verbose=verbose):
if verbose:
print(' ..CoarseScreen: Pass')
bm = bounds.copy()
if verbose:
print('pre update:')
for row in bm:
print(' ',' '.join(['% 4.2f'%x for x in row]))
bm = UpdatePharmacophoreBounds(bm,atomMatch,pcophore);
sz = bm.shape[0]
if verbose:
print('pre downsample:')
for row in bm:
print(' ',' '.join(['% 4.2f'%x for x in row]))
if useDownsampling:
indices = []
for entry in atomMatch:
indices += list(entry)
bm = DownsampleBoundsMatrix(bm,indices)
if verbose:
print('post downsample:')
for row in bm:
print(' ',' '.join(['% 4.2f'%x for x in row]))
if DG.DoTriangleSmoothing(bm):
res.append(match)
elif verbose:
print('cannot smooth')
nDone+=1
if progressCallback:
progressCallback(nDone)
return res
def ComputeChiralVolume(mol,centerIdx,confId=-1):
""" Computes the chiral volume of an atom
We're using the chiral volume formula from Figure 7 of
Blaney and Dixon, Rev. Comp. Chem. V, 299-335 (1994)
>>> import os.path
>>> dataDir = os.path.join(RDConfig.RDCodeDir,'Chem/Pharm3D/test_data')
R configuration atoms give negative volumes:
>>> mol = Chem.MolFromMolFile(os.path.join(dataDir,'mol-r.mol'))
>>> Chem.AssignStereochemistry(mol)
>>> mol.GetAtomWithIdx(1).GetProp('_CIPCode')
'R'
>>> ComputeChiralVolume(mol,1) < 0
True
S configuration atoms give positive volumes:
>>> mol = Chem.MolFromMolFile(os.path.join(dataDir,'mol-s.mol'))
>>> Chem.AssignStereochemistry(mol)
>>> mol.GetAtomWithIdx(1).GetProp('_CIPCode')
'S'
>>> ComputeChiralVolume(mol,1) > 0
True
Non-chiral (or non-specified) atoms give zero volume:
>>> ComputeChiralVolume(mol,0) == 0.0
True
We also work on 3-coordinate atoms (with implicit Hs):
>>> mol = Chem.MolFromMolFile(os.path.join(dataDir,'mol-r-3.mol'))
>>> Chem.AssignStereochemistry(mol)
>>> mol.GetAtomWithIdx(1).GetProp('_CIPCode')
'R'
>>> ComputeChiralVolume(mol,1)<0
True
>>> mol = Chem.MolFromMolFile(os.path.join(dataDir,'mol-s-3.mol'))
>>> Chem.AssignStereochemistry(mol)
>>> mol.GetAtomWithIdx(1).GetProp('_CIPCode')
'S'
>>> ComputeChiralVolume(mol,1)>0
True
"""
conf = mol.GetConformer(confId)
Chem.AssignStereochemistry(mol)
center = mol.GetAtomWithIdx(centerIdx)
if not center.HasProp('_CIPCode'):
return 0.0
nbrs = center.GetNeighbors()
nbrRanks = []
for nbr in nbrs:
rank = int(nbr.GetProp('_CIPRank'))
pos = conf.GetAtomPosition(nbr.GetIdx())
nbrRanks.append((rank,pos))
# if we only have three neighbors (i.e. the determining H isn't present)
# then use the central atom as the fourth point:
if len(nbrRanks)==3:
nbrRanks.append((-1,conf.GetAtomPosition(centerIdx)))
nbrRanks.sort()
ps = [x[1] for x in nbrRanks]
v1 = ps[0]-ps[3]
v2 = ps[1]-ps[3]
v3 = ps[2]-ps[3]
res = v1.DotProduct(v2.CrossProduct(v3))
return res
#------------------------------------
#
# doctest boilerplate
#
def _test():
import doctest,sys
return doctest.testmod(sys.modules["__main__"])
if __name__ == '__main__':
import sys
failed,tried = _test()
sys.exit(failed)
| {
"repo_name": "adalke/rdkit",
"path": "rdkit/Chem/Pharm3D/EmbedLib.py",
"copies": "1",
"size": "38756",
"license": "bsd-3-clause",
"hash": -8048911424920958000,
"line_mean": 30.4833468725,
"line_max": 130,
"alpha_frac": 0.6207554959,
"autogenerated": false,
"ratio": 3.03611437524481,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9021384317356422,
"avg_score": 0.027097110757677557,
"num_lines": 1231
} |
from __future__ import print_function
from rdkit import RDConfig
import sys, time, math
from rdkit.ML.Data import Stats
import rdkit.DistanceGeometry as DG
from rdkit import Chem
import numpy
from rdkit.Chem import rdDistGeom as MolDG
from rdkit.Chem import ChemicalFeatures
from rdkit.Chem import ChemicalForceFields
import Pharmacophore, ExcludedVolume
from rdkit import Geometry
_times = {}
from rdkit import RDLogger as logging
logger = logging.logger()
defaultFeatLength = 2.0
def GetAtomHeavyNeighbors(atom):
""" returns a list of the heavy-atom neighbors of the
atom passed in:
>>> m = Chem.MolFromSmiles('CCO')
>>> l = GetAtomHeavyNeighbors(m.GetAtomWithIdx(0))
>>> len(l)
1
>>> isinstance(l[0],Chem.Atom)
True
>>> l[0].GetIdx()
1
>>> l = GetAtomHeavyNeighbors(m.GetAtomWithIdx(1))
>>> len(l)
2
>>> l[0].GetIdx()
0
>>> l[1].GetIdx()
2
"""
res = []
for nbr in atom.GetNeighbors():
if nbr.GetAtomicNum() != 1:
res.append(nbr)
return res
def ReplaceGroup(match, bounds, slop=0.01, useDirs=False, dirLength=defaultFeatLength):
""" Adds an entry at the end of the bounds matrix for a point at
the center of a multi-point feature
returns a 2-tuple:
new bounds mat
index of point added
>>> boundsMat = numpy.array([[0.0,2.0,2.0],[1.0,0.0,2.0],[1.0,1.0,0.0]])
>>> match = [0,1,2]
>>> bm,idx = ReplaceGroup(match,boundsMat,slop=0.0)
the index is at the end:
>>> idx == 3
True
and the matrix is one bigger:
>>> bm.shape == (4, 4)
True
but the original bounds mat is not altered:
>>> boundsMat.shape == (3, 3)
True
We make the assumption that the points of the
feature form a regular polygon, are listed in order
(i.e. pt 0 is a neighbor to pt 1 and pt N-1)
and that the replacement point goes at the center:
>>> print(', '.join(['%.3f'%x for x in bm[-1]]))
0.577, 0.577, 0.577, 0.000
>>> print(', '.join(['%.3f'%x for x in bm[:,-1]]))
1.155, 1.155, 1.155, 0.000
The slop argument (default = 0.01) is fractional:
>>> bm,idx = ReplaceGroup(match,boundsMat)
>>> print(', '.join(['%.3f'%x for x in bm[-1]]))
0.572, 0.572, 0.572, 0.000
>>> print(', '.join(['%.3f'%x for x in bm[:,-1]]))
1.166, 1.166, 1.166, 0.000
"""
maxVal = -1000.0
minVal = 1e8
nPts = len(match)
for i in range(nPts):
idx0 = match[i]
if i < nPts - 1:
idx1 = match[i + 1]
else:
idx1 = match[0]
if idx1 < idx0:
idx0, idx1 = idx1, idx0
minVal = min(minVal, bounds[idx1, idx0])
maxVal = max(maxVal, bounds[idx0, idx1])
maxVal *= (1 + slop)
minVal *= (1 - slop)
scaleFact = 1.0 / (2.0 * math.sin(math.pi / nPts))
minVal *= scaleFact
maxVal *= scaleFact
replaceIdx = bounds.shape[0]
if not useDirs:
bm = numpy.zeros((bounds.shape[0] + 1, bounds.shape[1] + 1), numpy.float)
else:
bm = numpy.zeros((bounds.shape[0] + 2, bounds.shape[1] + 2), numpy.float)
bm[0:bounds.shape[0], 0:bounds.shape[1]] = bounds
bm[:replaceIdx, replaceIdx] = 1000.
if useDirs:
bm[:replaceIdx + 1, replaceIdx + 1] = 1000.
# set the feature - direction point bounds:
bm[replaceIdx, replaceIdx + 1] = dirLength + slop
bm[replaceIdx + 1, replaceIdx] = dirLength - slop
for idx1 in match:
bm[idx1, replaceIdx] = maxVal
bm[replaceIdx, idx1] = minVal
if useDirs:
# set the point - direction point bounds:
bm[idx1, replaceIdx + 1] = numpy.sqrt(bm[replaceIdx, replaceIdx + 1]**2 + maxVal**2)
bm[replaceIdx + 1, idx1] = numpy.sqrt(bm[replaceIdx + 1, replaceIdx]**2 + minVal**2)
return bm, replaceIdx
def EmbedMol(mol, bm, atomMatch=None, weight=2.0, randomSeed=-1, excludedVolumes=None):
""" Generates an embedding for a molecule based on a bounds matrix and adds
a conformer (id 0) to the molecule
if the optional argument atomMatch is provided, it will be used to provide
supplemental weights for the embedding routine (used in the optimization
phase to ensure that the resulting geometry really does satisfy the
pharmacophore).
if the excludedVolumes is provided, it should be a sequence of
ExcludedVolume objects
>>> m = Chem.MolFromSmiles('c1ccccc1C')
>>> bounds = MolDG.GetMoleculeBoundsMatrix(m)
>>> bounds.shape == (7, 7)
True
>>> m.GetNumConformers()
0
>>> EmbedMol(m,bounds,randomSeed=23)
>>> m.GetNumConformers()
1
"""
nAts = mol.GetNumAtoms()
weights = []
if (atomMatch):
for i in range(len(atomMatch)):
for j in range(i + 1, len(atomMatch)):
weights.append((i, j, weight))
if (excludedVolumes):
for vol in excludedVolumes:
idx = vol.index
# excluded volumes affect every other atom:
for i in range(nAts):
weights.append((i, idx, weight))
coords = DG.EmbedBoundsMatrix(bm, weights=weights, numZeroFail=1, randomSeed=randomSeed)
#for row in coords:
# print(', '.join(['%.2f'%x for x in row]))
conf = Chem.Conformer(nAts)
conf.SetId(0)
for i in range(nAts):
conf.SetAtomPosition(i, list(coords[i]))
if excludedVolumes:
for vol in excludedVolumes:
vol.pos = numpy.array(coords[vol.index])
#print(' % 7.4f % 7.4f % 7.4f Ar 0 0 0 0 0 0 0 0 0 0 0 0'%tuple(coords[-1]), file=sys.stderr)
mol.AddConformer(conf)
def AddExcludedVolumes(bm, excludedVolumes, smoothIt=True):
""" Adds a set of excluded volumes to the bounds matrix
and returns the new matrix
excludedVolumes is a list of ExcludedVolume objects
>>> boundsMat = numpy.array([[0.0,2.0,2.0],[1.0,0.0,2.0],[1.0,1.0,0.0]])
>>> ev1 = ExcludedVolume.ExcludedVolume(([(0,),0.5,1.0],),exclusionDist=1.5)
>>> bm = AddExcludedVolumes(boundsMat,(ev1,))
the results matrix is one bigger:
>>> bm.shape == (4, 4)
True
and the original bounds mat is not altered:
>>> boundsMat.shape == (3, 3)
True
>>> print(', '.join(['%.3f'%x for x in bm[-1]]))
0.500, 1.500, 1.500, 0.000
>>> print(', '.join(['%.3f'%x for x in bm[:,-1]]))
1.000, 3.000, 3.000, 0.000
"""
oDim = bm.shape[0]
dim = oDim + len(excludedVolumes)
res = numpy.zeros((dim, dim), numpy.float)
res[:oDim, :oDim] = bm
for i, vol in enumerate(excludedVolumes):
bmIdx = oDim + i
vol.index = bmIdx
# set values to all the atoms:
res[bmIdx, :bmIdx] = vol.exclusionDist
res[:bmIdx, bmIdx] = 1000.0
# set values to our defining features:
for indices, minV, maxV in vol.featInfo:
for index in indices:
try:
res[bmIdx, index] = minV
res[index, bmIdx] = maxV
except IndexError:
logger.error('BAD INDEX: res[%d,%d], shape is %s' % (bmIdx, index, str(res.shape)))
raise IndexError
# set values to other excluded volumes:
for j in range(bmIdx + 1, dim):
res[bmIdx, j:dim] = 0
res[j:dim, bmIdx] = 1000
if smoothIt:
DG.DoTriangleSmoothing(res)
return res
def UpdatePharmacophoreBounds(bm, atomMatch, pcophore, useDirs=False, dirLength=defaultFeatLength,
mol=None):
""" loops over a distance bounds matrix and replaces the elements
that are altered by a pharmacophore
**NOTE** this returns the resulting bounds matrix, but it may also
alter the input matrix
atomMatch is a sequence of sequences containing atom indices
for each of the pharmacophore's features.
>>> feats = [ChemicalFeatures.FreeChemicalFeature('HBondAcceptor', 'HAcceptor1', Geometry.Point3D(0.0, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('HBondDonor', 'HDonor1', Geometry.Point3D(2.65, 0.0, 0.0)),
... ]
>>> pcophore=Pharmacophore.Pharmacophore(feats)
>>> pcophore.setLowerBound(0,1, 1.0)
>>> pcophore.setUpperBound(0,1, 2.0)
>>> boundsMat = numpy.array([[0.0,3.0,3.0],[2.0,0.0,3.0],[2.0,2.0,0.0]])
>>> atomMatch = ((0,),(1,))
>>> bm = UpdatePharmacophoreBounds(boundsMat,atomMatch,pcophore)
In this case, there are no multi-atom features, so the result matrix
is the same as the input:
>>> bm is boundsMat
True
this means, of course, that the input boundsMat is altered:
>>> print(', '.join(['%.3f'%x for x in boundsMat[0]]))
0.000, 2.000, 3.000
>>> print(', '.join(['%.3f'%x for x in boundsMat[1]]))
1.000, 0.000, 3.000
>>> print(', '.join(['%.3f'%x for x in boundsMat[2]]))
2.000, 2.000, 0.000
"""
replaceMap = {}
for i, matchI in enumerate(atomMatch):
if len(matchI) > 1:
bm, replaceIdx = ReplaceGroup(matchI, bm, useDirs=useDirs)
replaceMap[i] = replaceIdx
for i, matchI in enumerate(atomMatch):
mi = replaceMap.get(i, matchI[0])
for j in range(i + 1, len(atomMatch)):
mj = replaceMap.get(j, atomMatch[j][0])
if mi < mj:
idx0, idx1 = mi, mj
else:
idx0, idx1 = mj, mi
bm[idx0, idx1] = pcophore.getUpperBound(i, j)
bm[idx1, idx0] = pcophore.getLowerBound(i, j)
return bm
def EmbedPharmacophore(mol, atomMatch, pcophore, randomSeed=-1, count=10, smoothFirst=True,
silent=False, bounds=None, excludedVolumes=None, targetNumber=-1,
useDirs=False):
""" Generates one or more embeddings for a molecule that satisfy a pharmacophore
atomMatch is a sequence of sequences containing atom indices
for each of the pharmacophore's features.
- count: is the maximum number of attempts to make a generating an embedding
- smoothFirst: toggles triangle smoothing of the molecular bounds matix
- bounds: if provided, should be the molecular bounds matrix. If this isn't
provided, the matrix will be generated.
- targetNumber: if this number is positive, it provides a maximum number
of embeddings to generate (i.e. we'll have count attempts to generate
targetNumber embeddings).
returns: a 3 tuple:
1) the molecular bounds matrix adjusted for the pharmacophore
2) a list of embeddings (molecules with a single conformer)
3) the number of failed attempts at embedding
>>> m = Chem.MolFromSmiles('OCCN')
>>> feats = [ChemicalFeatures.FreeChemicalFeature('HBondAcceptor', 'HAcceptor1', Geometry.Point3D(0.0, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('HBondDonor', 'HDonor1', Geometry.Point3D(2.65, 0.0, 0.0)),
... ]
>>> pcophore=Pharmacophore.Pharmacophore(feats)
>>> pcophore.setLowerBound(0,1, 2.5)
>>> pcophore.setUpperBound(0,1, 3.5)
>>> atomMatch = ((0,),(3,))
>>> bm,embeds,nFail = EmbedPharmacophore(m,atomMatch,pcophore,randomSeed=23,silent=1)
>>> len(embeds)
10
>>> nFail
0
Set up a case that can't succeed:
>>> pcophore=Pharmacophore.Pharmacophore(feats)
>>> pcophore.setLowerBound(0,1, 2.0)
>>> pcophore.setUpperBound(0,1, 2.1)
>>> atomMatch = ((0,),(3,))
>>> bm,embeds,nFail = EmbedPharmacophore(m,atomMatch,pcophore,randomSeed=23,silent=1)
>>> len(embeds)
0
>>> nFail
10
"""
global _times
if not hasattr(mol, '_chiralCenters'):
mol._chiralCenters = Chem.FindMolChiralCenters(mol)
if bounds is None:
bounds = MolDG.GetMoleculeBoundsMatrix(mol)
if smoothFirst:
DG.DoTriangleSmoothing(bounds)
bm = bounds.copy()
#print '------------'
#print 'initial'
#for row in bm:
# print ' ',' '.join(['% 4.2f'%x for x in row])
#print '------------'
bm = UpdatePharmacophoreBounds(bm, atomMatch, pcophore, useDirs=useDirs, mol=mol)
if excludedVolumes:
bm = AddExcludedVolumes(bm, excludedVolumes, smoothIt=False)
if not DG.DoTriangleSmoothing(bm):
raise ValueError("could not smooth bounds matrix")
#print '------------'
#print 'post replace and smooth'
#for row in bm:
# print ' ',' '.join(['% 4.2f'%x for x in row])
#print '------------'
if targetNumber <= 0:
targetNumber = count
nFailed = 0
res = []
for i in range(count):
tmpM = bm[:, :]
m2 = Chem.Mol(mol)
t1 = time.time()
try:
if randomSeed <= 0:
seed = i * 10 + 1
else:
seed = i * 10 + randomSeed
EmbedMol(m2, tmpM, atomMatch, randomSeed=seed, excludedVolumes=excludedVolumes)
except ValueError:
if not silent:
logger.info('Embed failed')
nFailed += 1
else:
t2 = time.time()
_times['embed'] = _times.get('embed', 0) + t2 - t1
keepIt = True
for idx, stereo in mol._chiralCenters:
if stereo in ('R', 'S'):
vol = ComputeChiralVolume(m2, idx)
if (stereo=='R' and vol>=0) or \
(stereo=='S' and vol<=0):
keepIt = False
break
if keepIt:
res.append(m2)
else:
logger.debug('Removed embedding due to chiral constraints.')
if len(res) == targetNumber:
break
return bm, res, nFailed
def isNaN(v):
""" provides an OS independent way of detecting NaNs
This is intended to be used with values returned from the C++
side of things.
We can't actually test this from Python (which traps
zero division errors), but it would work something like
this if we could:
>>> isNaN(0)
False
#>>> isNan(1/0)
#True
"""
if v != v and sys.platform == 'win32':
return True
elif v == 0 and v == 1 and sys.platform != 'win32':
return True
return False
def OptimizeMol(mol, bm, atomMatches=None, excludedVolumes=None, forceConstant=1200.0, maxPasses=5,
verbose=False):
""" carries out a UFF optimization for a molecule optionally subject
to the constraints in a bounds matrix
- atomMatches, if provided, is a sequence of sequences
- forceConstant is the force constant of the spring used to enforce
the constraints
returns a 2-tuple:
1) the energy of the initial conformation
2) the energy post-embedding
NOTE that these energies include the energies of the constraints
>>> m = Chem.MolFromSmiles('OCCN')
>>> feats = [ChemicalFeatures.FreeChemicalFeature('HBondAcceptor', 'HAcceptor1', Geometry.Point3D(0.0, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('HBondDonor', 'HDonor1', Geometry.Point3D(2.65, 0.0, 0.0)),
... ]
>>> pcophore=Pharmacophore.Pharmacophore(feats)
>>> pcophore.setLowerBound(0,1, 2.5)
>>> pcophore.setUpperBound(0,1, 2.8)
>>> atomMatch = ((0,),(3,))
>>> bm,embeds,nFail = EmbedPharmacophore(m,atomMatch,pcophore,randomSeed=23,silent=1)
>>> len(embeds)
10
>>> testM = embeds[0]
Do the optimization:
>>> e1,e2 = OptimizeMol(testM,bm,atomMatches=atomMatch)
Optimizing should have lowered the energy:
>>> e2 < e1
True
Check the constrained distance:
>>> conf = testM.GetConformer(0)
>>> p0 = conf.GetAtomPosition(0)
>>> p3 = conf.GetAtomPosition(3)
>>> d03 = p0.Distance(p3)
>>> d03 >= pcophore.getLowerBound(0,1)-.01
True
>>> d03 <= pcophore.getUpperBound(0,1)+.01
True
If we optimize without the distance constraints (provided via the atomMatches
argument) we're not guaranteed to get the same results, particularly in a case
like the current one where the pharmcophore brings the atoms uncomfortably
close together:
>>> testM = embeds[1]
>>> e1,e2 = OptimizeMol(testM,bm)
>>> e2 < e1
True
>>> conf = testM.GetConformer(0)
>>> p0 = conf.GetAtomPosition(0)
>>> p3 = conf.GetAtomPosition(3)
>>> d03 = p0.Distance(p3)
>>> d03 >= pcophore.getLowerBound(0,1)-.01
True
>>> d03 <= pcophore.getUpperBound(0,1)+.01
False
"""
try:
ff = ChemicalForceFields.UFFGetMoleculeForceField(mol)
except Exception:
logger.info('Problems building molecular forcefield', exc_info=True)
return -1.0, -1.0
weights = []
if (atomMatches):
for k in range(len(atomMatches)):
for i in atomMatches[k]:
for l in range(k + 1, len(atomMatches)):
for j in atomMatches[l]:
weights.append((i, j))
for i, j in weights:
if j < i:
i, j = j, i
minV = bm[j, i]
maxV = bm[i, j]
ff.AddDistanceConstraint(i, j, minV, maxV, forceConstant)
if excludedVolumes:
nAts = mol.GetNumAtoms()
conf = mol.GetConformer()
idx = nAts
for exVol in excludedVolumes:
assert exVol.pos is not None
logger.debug('ff.AddExtraPoint(%.4f,%.4f,%.4f)' % (exVol.pos[0], exVol.pos[1], exVol.pos[2]))
ff.AddExtraPoint(exVol.pos[0], exVol.pos[1], exVol.pos[2], True)
indices = []
for localIndices, foo, bar in exVol.featInfo:
indices += list(localIndices)
for i in range(nAts):
v = numpy.array(conf.GetAtomPosition(i)) - numpy.array(exVol.pos)
d = numpy.sqrt(numpy.dot(v, v))
if i not in indices:
if d < 5.0:
logger.debug('ff.AddDistanceConstraint(%d,%d,%.3f,%d,%.0f)' %
(i, idx, exVol.exclusionDist, 1000, forceConstant))
ff.AddDistanceConstraint(i, idx, exVol.exclusionDist, 1000, forceConstant)
else:
logger.debug('ff.AddDistanceConstraint(%d,%d,%.3f,%.3f,%.0f)' %
(i, idx, bm[exVol.index, i], bm[i, exVol.index], forceConstant))
ff.AddDistanceConstraint(i, idx, bm[exVol.index, i], bm[i, exVol.index], forceConstant)
idx += 1
ff.Initialize()
e1 = ff.CalcEnergy()
if isNaN(e1):
raise ValueError('bogus energy')
if verbose:
print(Chem.MolToMolBlock(mol))
for i, vol in enumerate(excludedVolumes):
pos = ff.GetExtraPointPos(i)
print(' % 7.4f % 7.4f % 7.4f As 0 0 0 0 0 0 0 0 0 0 0 0' % tuple(pos),
file=sys.stderr)
needsMore = ff.Minimize()
nPasses = 0
while needsMore and nPasses < maxPasses:
needsMore = ff.Minimize()
nPasses += 1
e2 = ff.CalcEnergy()
if isNaN(e2):
raise ValueError('bogus energy')
if verbose:
print('--------')
print(Chem.MolToMolBlock(mol))
for i, vol in enumerate(excludedVolumes):
pos = ff.GetExtraPointPos(i)
print(' % 7.4f % 7.4f % 7.4f Sb 0 0 0 0 0 0 0 0 0 0 0 0' % tuple(pos),
file=sys.stderr)
ff = None
return e1, e2
def EmbedOne(mol, name, match, pcophore, count=1, silent=0, **kwargs):
""" generates statistics for a molecule's embeddings
Four energies are computed for each embedding:
1) E1: the energy (with constraints) of the initial embedding
2) E2: the energy (with constraints) of the optimized embedding
3) E3: the energy (no constraints) the geometry for E2
4) E4: the energy (no constraints) of the optimized free-molecule
(starting from the E3 geometry)
Returns a 9-tuple:
1) the mean value of E1
2) the sample standard deviation of E1
3) the mean value of E2
4) the sample standard deviation of E2
5) the mean value of E3
6) the sample standard deviation of E3
7) the mean value of E4
8) the sample standard deviation of E4
9) The number of embeddings that failed
"""
global _times
atomMatch = [list(x.GetAtomIds()) for x in match]
bm, ms, nFailed = EmbedPharmacophore(mol, atomMatch, pcophore, count=count, silent=silent,
**kwargs)
e1s = []
e2s = []
e3s = []
e4s = []
d12s = []
d23s = []
d34s = []
for m in ms:
t1 = time.time()
try:
e1, e2 = OptimizeMol(m, bm, atomMatch)
except ValueError:
pass
else:
t2 = time.time()
_times['opt1'] = _times.get('opt1', 0) + t2 - t1
e1s.append(e1)
e2s.append(e2)
d12s.append(e1 - e2)
t1 = time.time()
try:
e3, e4 = OptimizeMol(m, bm)
except ValueError:
pass
else:
t2 = time.time()
_times['opt2'] = _times.get('opt2', 0) + t2 - t1
e3s.append(e3)
e4s.append(e4)
d23s.append(e2 - e3)
d34s.append(e3 - e4)
count += 1
try:
e1, e1d = Stats.MeanAndDev(e1s)
except Exception:
e1 = -1.0
e1d = -1.0
try:
e2, e2d = Stats.MeanAndDev(e2s)
except Exception:
e2 = -1.0
e2d = -1.0
try:
e3, e3d = Stats.MeanAndDev(e3s)
except Exception:
e3 = -1.0
e3d = -1.0
try:
e4, e4d = Stats.MeanAndDev(e4s)
except Exception:
e4 = -1.0
e4d = -1.0
if not silent:
print('%s(%d): %.2f(%.2f) -> %.2f(%.2f) : %.2f(%.2f) -> %.2f(%.2f)' %
(name, nFailed, e1, e1d, e2, e2d, e3, e3d, e4, e4d))
return e1, e1d, e2, e2d, e3, e3d, e4, e4d, nFailed
def MatchPharmacophoreToMol(mol, featFactory, pcophore):
""" generates a list of all possible mappings of a pharmacophore to a molecule
Returns a 2-tuple:
1) a boolean indicating whether or not all features were found
2) a list, numFeatures long, of sequences of features
>>> import os.path
>>> dataDir = os.path.join(RDConfig.RDCodeDir,'Chem/Pharm3D/test_data')
>>> featFactory = ChemicalFeatures.BuildFeatureFactory(os.path.join(dataDir,'BaseFeatures.fdef'))
>>> activeFeats = [ChemicalFeatures.FreeChemicalFeature('Acceptor', Geometry.Point3D(0.0, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('Donor',Geometry.Point3D(0.0, 0.0, 0.0))]
>>> pcophore= Pharmacophore.Pharmacophore(activeFeats)
>>> m = Chem.MolFromSmiles('FCCN')
>>> match,mList = MatchPharmacophoreToMol(m,featFactory,pcophore)
>>> match
True
Two feature types:
>>> len(mList)
2
The first feature type, Acceptor, has two matches:
>>> len(mList[0])
2
>>> mList[0][0].GetAtomIds()
(0,)
>>> mList[0][1].GetAtomIds()
(3,)
The first feature type, Donor, has a single match:
>>> len(mList[1])
1
>>> mList[1][0].GetAtomIds()
(3,)
"""
return MatchFeatsToMol(mol, featFactory, pcophore.getFeatures())
def _getFeatDict(mol, featFactory, features):
""" **INTERNAL USE ONLY**
>>> import os.path
>>> dataDir = os.path.join(RDConfig.RDCodeDir,'Chem/Pharm3D/test_data')
>>> featFactory = ChemicalFeatures.BuildFeatureFactory(os.path.join(dataDir,'BaseFeatures.fdef'))
>>> activeFeats = [ChemicalFeatures.FreeChemicalFeature('Acceptor', Geometry.Point3D(0.0, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('Donor',Geometry.Point3D(0.0, 0.0, 0.0))]
>>> m = Chem.MolFromSmiles('FCCN')
>>> d =_getFeatDict(m,featFactory,activeFeats)
>>> sorted(list(d.keys()))
['Acceptor', 'Donor']
>>> donors = d['Donor']
>>> len(donors)
1
>>> donors[0].GetAtomIds()
(3,)
>>> acceptors = d['Acceptor']
>>> len(acceptors)
2
>>> acceptors[0].GetAtomIds()
(0,)
>>> acceptors[1].GetAtomIds()
(3,)
"""
molFeats = {}
for feat in features:
family = feat.GetFamily()
if not family in molFeats:
matches = featFactory.GetFeaturesForMol(mol, includeOnly=family)
molFeats[family] = matches
return molFeats
def MatchFeatsToMol(mol, featFactory, features):
""" generates a list of all possible mappings of each feature to a molecule
Returns a 2-tuple:
1) a boolean indicating whether or not all features were found
2) a list, numFeatures long, of sequences of features
>>> import os.path
>>> dataDir = os.path.join(RDConfig.RDCodeDir,'Chem/Pharm3D/test_data')
>>> featFactory = ChemicalFeatures.BuildFeatureFactory(os.path.join(dataDir,'BaseFeatures.fdef'))
>>> activeFeats = [ChemicalFeatures.FreeChemicalFeature('Acceptor', Geometry.Point3D(0.0, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('Donor',Geometry.Point3D(0.0, 0.0, 0.0))]
>>> m = Chem.MolFromSmiles('FCCN')
>>> match,mList = MatchFeatsToMol(m,featFactory,activeFeats)
>>> match
True
Two feature types:
>>> len(mList)
2
The first feature type, Acceptor, has two matches:
>>> len(mList[0])
2
>>> mList[0][0].GetAtomIds()
(0,)
>>> mList[0][1].GetAtomIds()
(3,)
The first feature type, Donor, has a single match:
>>> len(mList[1])
1
>>> mList[1][0].GetAtomIds()
(3,)
"""
molFeats = _getFeatDict(mol, featFactory, features)
res = []
for feat in features:
matches = molFeats.get(feat.GetFamily(), [])
if len(matches) == 0:
return False, None
res.append(matches)
return True, res
def CombiEnum(sequence):
""" This generator takes a sequence of sequences as an argument and
provides all combinations of the elements of the subsequences:
>>> gen = CombiEnum(((1,2),(10,20)))
>>> next(gen)
[1, 10]
>>> next(gen)
[1, 20]
>>> [x for x in CombiEnum(((1,2),(10,20)))]
[[1, 10], [1, 20], [2, 10], [2, 20]]
>>> [x for x in CombiEnum(((1,2),(10,20),(100,200)))]
[[1, 10, 100], [1, 10, 200], [1, 20, 100], [1, 20, 200], [2, 10, 100], [2, 10, 200], [2, 20, 100], [2, 20, 200]]
"""
if not len(sequence):
yield []
elif len(sequence) == 1:
for entry in sequence[0]:
yield [entry]
else:
for entry in sequence[0]:
for subVal in CombiEnum(sequence[1:]):
yield [entry] + subVal
def DownsampleBoundsMatrix(bm, indices, maxThresh=4.0):
""" removes rows from a bounds matrix that are
that are greater than a threshold value away from a set of
other points
returns the modfied bounds matrix
The goal of this function is to remove rows from the bounds matrix
that correspond to atoms that are likely to be quite far from
the pharmacophore we're interested in. Because the bounds smoothing
we eventually have to do is N^3, this can be a big win
>>> boundsMat = numpy.array([[0.0,3.0,4.0],[2.0,0.0,3.0],[2.0,2.0,0.0]])
>>> bm = DownsampleBoundsMatrix(boundsMat,(0,),3.5)
>>> bm.shape == (2, 2)
True
we don't touch the input matrix:
>>> boundsMat.shape == (3, 3)
True
>>> print(', '.join(['%.3f'%x for x in bm[0]]))
0.000, 3.000
>>> print(', '.join(['%.3f'%x for x in bm[1]]))
2.000, 0.000
if the threshold is high enough, we don't do anything:
>>> boundsMat = numpy.array([[0.0,4.0,3.0],[2.0,0.0,3.0],[2.0,2.0,0.0]])
>>> bm = DownsampleBoundsMatrix(boundsMat,(0,),5.0)
>>> bm.shape == (3, 3)
True
If there's a max value that's close enough to *any* of the indices
we pass in, we'll keep it:
>>> boundsMat = numpy.array([[0.0,4.0,3.0],[2.0,0.0,3.0],[2.0,2.0,0.0]])
>>> bm = DownsampleBoundsMatrix(boundsMat,(0,1),3.5)
>>> bm.shape == (3, 3)
True
"""
nPts = bm.shape[0]
k = numpy.zeros(nPts, numpy.int0)
for idx in indices:
k[idx] = 1
for i in indices:
row = bm[i]
for j in range(i + 1, nPts):
if not k[j] and row[j] < maxThresh:
k[j] = 1
keep = numpy.nonzero(k)[0]
bm2 = numpy.zeros((len(keep), len(keep)), numpy.float)
for i, idx in enumerate(keep):
row = bm[idx]
bm2[i] = numpy.take(row, keep)
return bm2
def CoarseScreenPharmacophore(atomMatch, bounds, pcophore, verbose=False):
"""
>>> feats = [ChemicalFeatures.FreeChemicalFeature('HBondAcceptor', 'HAcceptor1', Geometry.Point3D(0.0, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('HBondDonor', 'HDonor1', Geometry.Point3D(2.65, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('Aromatic', 'Aromatic1', Geometry.Point3D(5.12, 0.908, 0.0)),
... ]
>>> pcophore=Pharmacophore.Pharmacophore(feats)
>>> pcophore.setLowerBound(0,1, 1.1)
>>> pcophore.setUpperBound(0,1, 1.9)
>>> pcophore.setLowerBound(0,2, 2.1)
>>> pcophore.setUpperBound(0,2, 2.9)
>>> pcophore.setLowerBound(1,2, 2.1)
>>> pcophore.setUpperBound(1,2, 3.9)
>>> bounds = numpy.array([[0,2,3],[1,0,4],[2,3,0]],numpy.float)
>>> CoarseScreenPharmacophore(((0,),(1,)),bounds,pcophore)
True
>>> CoarseScreenPharmacophore(((0,),(2,)),bounds,pcophore)
False
>>> CoarseScreenPharmacophore(((1,),(2,)),bounds,pcophore)
False
>>> CoarseScreenPharmacophore(((0,),(1,),(2,)),bounds,pcophore)
True
>>> CoarseScreenPharmacophore(((1,),(0,),(2,)),bounds,pcophore)
False
>>> CoarseScreenPharmacophore(((2,),(1,),(0,)),bounds,pcophore)
False
# we ignore the point locations here and just use their definitions:
>>> feats = [ChemicalFeatures.FreeChemicalFeature('HBondAcceptor', 'HAcceptor1', Geometry.Point3D(0.0, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('HBondDonor', 'HDonor1', Geometry.Point3D(2.65, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('Aromatic', 'Aromatic1', Geometry.Point3D(5.12, 0.908, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('HBondDonor', 'HDonor1', Geometry.Point3D(2.65, 0.0, 0.0)),
... ]
>>> pcophore=Pharmacophore.Pharmacophore(feats)
>>> pcophore.setLowerBound(0,1, 2.1)
>>> pcophore.setUpperBound(0,1, 2.9)
>>> pcophore.setLowerBound(0,2, 2.1)
>>> pcophore.setUpperBound(0,2, 2.9)
>>> pcophore.setLowerBound(0,3, 2.1)
>>> pcophore.setUpperBound(0,3, 2.9)
>>> pcophore.setLowerBound(1,2, 1.1)
>>> pcophore.setUpperBound(1,2, 1.9)
>>> pcophore.setLowerBound(1,3, 1.1)
>>> pcophore.setUpperBound(1,3, 1.9)
>>> pcophore.setLowerBound(2,3, 1.1)
>>> pcophore.setUpperBound(2,3, 1.9)
>>> bounds = numpy.array([[0,3,3,3],[2,0,2,2],[2,1,0,2],[2,1,1,0]],numpy.float)
>>> CoarseScreenPharmacophore(((0,),(1,),(2,),(3,)),bounds,pcophore)
True
>>> CoarseScreenPharmacophore(((0,),(1,),(3,),(2,)),bounds,pcophore)
True
>>> CoarseScreenPharmacophore(((1,),(0,),(3,),(2,)),bounds,pcophore)
False
"""
for k in range(len(atomMatch)):
if len(atomMatch[k]) == 1:
for l in range(k + 1, len(atomMatch)):
if len(atomMatch[l]) == 1:
idx0 = atomMatch[k][0]
idx1 = atomMatch[l][0]
if idx1 < idx0:
idx0, idx1 = idx1, idx0
if bounds[idx1,idx0] >= pcophore.getUpperBound(k, l) or \
bounds[idx0,idx1] <= pcophore.getLowerBound(k, l) :
if verbose:
print('\t (%d,%d) [%d,%d] fail' % (idx1, idx0, k, l))
print('\t %f,%f - %f,%f' % (bounds[idx1, idx0], pcophore.getUpperBound(k, l),
bounds[idx0, idx1], pcophore.getLowerBound(k, l)))
#logger.debug('\t >%s'%str(atomMatch))
#logger.debug()
#logger.debug('\t %f,%f - %f,%f'%(bounds[idx1,idx0],pcophore.getUpperBound(k,l),
# bounds[idx0,idx1],pcophore.getLowerBound(k,l)))
return False
return True
def Check2DBounds(atomMatch, mol, pcophore):
""" checks to see if a particular mapping of features onto
a molecule satisfies a pharmacophore's 2D restrictions
>>> activeFeats = [ChemicalFeatures.FreeChemicalFeature('Acceptor', Geometry.Point3D(0.0, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('Donor',Geometry.Point3D(0.0, 0.0, 0.0))]
>>> pcophore= Pharmacophore.Pharmacophore(activeFeats)
>>> pcophore.setUpperBound2D(0,1,3)
>>> m = Chem.MolFromSmiles('FCC(N)CN')
>>> Check2DBounds(((0,),(3,)),m,pcophore)
True
>>> Check2DBounds(((0,),(5,)),m,pcophore)
False
"""
dm = Chem.GetDistanceMatrix(mol, False, False, False)
nFeats = len(atomMatch)
for i in range(nFeats):
for j in range(i + 1, nFeats):
lowerB = pcophore._boundsMat2D[j, i] #lowerB = pcophore.getLowerBound2D(i,j)
upperB = pcophore._boundsMat2D[i, j] #upperB = pcophore.getUpperBound2D(i,j)
dij = 10000
for atomI in atomMatch[i]:
for atomJ in atomMatch[j]:
try:
dij = min(dij, dm[atomI, atomJ])
except IndexError:
print('bad indices:', atomI, atomJ)
print(' shape:', dm.shape)
print(' match:', atomMatch)
print(' mol:')
print(Chem.MolToMolBlock(mol))
raise IndexError
if dij < lowerB or dij > upperB:
return False
return True
def _checkMatch(match, mol, bounds, pcophore, use2DLimits):
""" **INTERNAL USE ONLY**
checks whether a particular atom match can be satisfied by
a molecule
"""
atomMatch = ChemicalFeatures.GetAtomMatch(match)
if not atomMatch:
return None
elif use2DLimits:
if not Check2DBounds(atomMatch, mol, pcophore):
return None
if not CoarseScreenPharmacophore(atomMatch, bounds, pcophore):
return None
return atomMatch
def ConstrainedEnum(matches, mol, pcophore, bounds, use2DLimits=False, index=0, soFar=[]):
""" Enumerates the list of atom mappings a molecule
has to a particular pharmacophore.
We do check distance bounds here.
"""
nMatches = len(matches)
if index >= nMatches:
yield soFar, []
elif index == nMatches - 1:
for entry in matches[index]:
nextStep = soFar + [entry]
if index != 0:
atomMatch = _checkMatch(nextStep, mol, bounds, pcophore, use2DLimits)
else:
atomMatch = ChemicalFeatures.GetAtomMatch(nextStep)
if atomMatch:
yield soFar + [entry], atomMatch
else:
for entry in matches[index]:
nextStep = soFar + [entry]
if index != 0:
atomMatch = _checkMatch(nextStep, mol, bounds, pcophore, use2DLimits)
if not atomMatch:
continue
for val in ConstrainedEnum(matches, mol, pcophore, bounds, use2DLimits=use2DLimits,
index=index + 1, soFar=nextStep):
if val:
yield val
def MatchPharmacophore(matches, bounds, pcophore, useDownsampling=False, use2DLimits=False,
mol=None, excludedVolumes=None, useDirs=False):
"""
if use2DLimits is set, the molecule must also be provided and topological
distances will also be used to filter out matches
"""
for match, atomMatch in ConstrainedEnum(matches, mol, pcophore, bounds, use2DLimits=use2DLimits):
bm = bounds.copy()
bm = UpdatePharmacophoreBounds(bm, atomMatch, pcophore, useDirs=useDirs, mol=mol)
if excludedVolumes:
localEvs = []
for eV in excludedVolumes:
featInfo = []
for i, entry in enumerate(atomMatch):
info = list(eV.featInfo[i])
info[0] = entry
featInfo.append(info)
localEvs.append(ExcludedVolume.ExcludedVolume(featInfo, eV.index, eV.exclusionDist))
bm = AddExcludedVolumes(bm, localEvs, smoothIt=False)
sz = bm.shape[0]
if useDownsampling:
indices = []
for entry in atomMatch:
indices.extend(entry)
if excludedVolumes:
for vol in localEvs:
indices.append(vol.index)
bm = DownsampleBoundsMatrix(bm, indices)
if DG.DoTriangleSmoothing(bm):
return 0, bm, match, (sz, bm.shape[0])
return 1, None, None, None
def GetAllPharmacophoreMatches(matches, bounds, pcophore, useDownsampling=0, progressCallback=None,
use2DLimits=False, mol=None, verbose=False):
res = []
nDone = 0
for match in CombiEnum(matches):
atomMatch = ChemicalFeatures.GetAtomMatch(match)
if atomMatch and use2DLimits and mol:
pass2D = Check2DBounds(atomMatch, mol, pcophore)
if verbose:
print('..', atomMatch)
print(' ..Pass2d:', pass2D)
else:
pass2D = True
if atomMatch and pass2D and \
CoarseScreenPharmacophore(atomMatch,bounds,pcophore,verbose=verbose):
if verbose:
print(' ..CoarseScreen: Pass')
bm = bounds.copy()
if verbose:
print('pre update:')
for row in bm:
print(' ', ' '.join(['% 4.2f' % x for x in row]))
bm = UpdatePharmacophoreBounds(bm, atomMatch, pcophore)
sz = bm.shape[0]
if verbose:
print('pre downsample:')
for row in bm:
print(' ', ' '.join(['% 4.2f' % x for x in row]))
if useDownsampling:
indices = []
for entry in atomMatch:
indices += list(entry)
bm = DownsampleBoundsMatrix(bm, indices)
if verbose:
print('post downsample:')
for row in bm:
print(' ', ' '.join(['% 4.2f' % x for x in row]))
if DG.DoTriangleSmoothing(bm):
res.append(match)
elif verbose:
print('cannot smooth')
nDone += 1
if progressCallback:
progressCallback(nDone)
return res
def ComputeChiralVolume(mol, centerIdx, confId=-1):
""" Computes the chiral volume of an atom
We're using the chiral volume formula from Figure 7 of
Blaney and Dixon, Rev. Comp. Chem. V, 299-335 (1994)
>>> import os.path
>>> dataDir = os.path.join(RDConfig.RDCodeDir,'Chem/Pharm3D/test_data')
R configuration atoms give negative volumes:
>>> mol = Chem.MolFromMolFile(os.path.join(dataDir,'mol-r.mol'))
>>> Chem.AssignStereochemistry(mol)
>>> mol.GetAtomWithIdx(1).GetProp('_CIPCode')
'R'
>>> ComputeChiralVolume(mol,1) < 0
True
S configuration atoms give positive volumes:
>>> mol = Chem.MolFromMolFile(os.path.join(dataDir,'mol-s.mol'))
>>> Chem.AssignStereochemistry(mol)
>>> mol.GetAtomWithIdx(1).GetProp('_CIPCode')
'S'
>>> ComputeChiralVolume(mol,1) > 0
True
Non-chiral (or non-specified) atoms give zero volume:
>>> ComputeChiralVolume(mol,0) == 0.0
True
We also work on 3-coordinate atoms (with implicit Hs):
>>> mol = Chem.MolFromMolFile(os.path.join(dataDir,'mol-r-3.mol'))
>>> Chem.AssignStereochemistry(mol)
>>> mol.GetAtomWithIdx(1).GetProp('_CIPCode')
'R'
>>> ComputeChiralVolume(mol,1)<0
True
>>> mol = Chem.MolFromMolFile(os.path.join(dataDir,'mol-s-3.mol'))
>>> Chem.AssignStereochemistry(mol)
>>> mol.GetAtomWithIdx(1).GetProp('_CIPCode')
'S'
>>> ComputeChiralVolume(mol,1)>0
True
"""
conf = mol.GetConformer(confId)
Chem.AssignStereochemistry(mol)
center = mol.GetAtomWithIdx(centerIdx)
if not center.HasProp('_CIPCode'):
return 0.0
nbrs = center.GetNeighbors()
nbrRanks = []
for nbr in nbrs:
rank = int(nbr.GetProp('_CIPRank'))
pos = conf.GetAtomPosition(nbr.GetIdx())
nbrRanks.append((rank, pos))
# if we only have three neighbors (i.e. the determining H isn't present)
# then use the central atom as the fourth point:
if len(nbrRanks) == 3:
nbrRanks.append((-1, conf.GetAtomPosition(centerIdx)))
nbrRanks.sort()
ps = [x[1] for x in nbrRanks]
v1 = ps[0] - ps[3]
v2 = ps[1] - ps[3]
v3 = ps[2] - ps[3]
res = v1.DotProduct(v2.CrossProduct(v3))
return res
#------------------------------------
#
# doctest boilerplate
#
def _test():
import doctest, sys
return doctest.testmod(sys.modules["__main__"])
if __name__ == '__main__':
import sys
failed, tried = _test()
sys.exit(failed)
| {
"repo_name": "jandom/rdkit",
"path": "rdkit/Chem/Pharm3D/EmbedLib.py",
"copies": "1",
"size": "38924",
"license": "bsd-3-clause",
"hash": 4507648311856197600,
"line_mean": 30.4410339257,
"line_max": 119,
"alpha_frac": 0.6180762512,
"autogenerated": false,
"ratio": 3.0383264382171573,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4156402689417157,
"avg_score": null,
"num_lines": null
} |
from rdkit import Geometry
from rdkit import Chem
import numpy
import math
# BIG NOTE: we are going assume atom IDs starting from 0 instead of 1
# for all the functions in this file. This is so that they
# are reasonably indepedent of the combicode. However when using
# with combicode the caller needs to make sure the atom IDs from combicode
# are corrected before feeding them in here.
def cross(v1, v2):
res = numpy.array([v1[1] * v2[2] - v1[2] * v2[1], -v1[0] * v2[2] + v1[2] * v2[0],
v1[0] * v2[1] - v1[1] * v2[0]], numpy.double)
return res
def findNeighbors(atomId, adjMat):
"""
Find the IDs of the neighboring atom IDs
ARGUMENTS:
atomId - atom of interest
adjMat - adjacency matrix for the compound
"""
nbrs = []
for i, nid in enumerate(adjMat[atomId]):
if nid >= 1:
nbrs.append(i)
return nbrs
def _findAvgVec(conf, center, nbrs):
# find the average of the normalized vectors going from the center atoms to the
# neighbors
# the average vector is also normalized
avgVec = 0
for nbr in nbrs:
nid = nbr.GetIdx()
pt = conf.GetAtomPosition(nid)
pt -= center
pt.Normalize()
if (avgVec == 0):
avgVec = pt
else:
avgVec += pt
avgVec.Normalize()
return avgVec
def GetAromaticFeatVects(conf, featAtoms, featLoc, scale=1.5):
"""
Compute the direction vector for an aromatic feature
ARGUMENTS:
conf - a conformer
featAtoms - list of atom IDs that make up the feature
featLoc - location of the aromatic feature specified as point3d
scale - the size of the direction vector
"""
dirType = 'linear'
head = featLoc
ats = [conf.GetAtomPosition(x) for x in featAtoms]
p0 = ats[0]
p1 = ats[1]
v1 = p0 - head
v2 = p1 - head
norm1 = v1.CrossProduct(v2)
norm1.Normalize()
norm1 *= scale
#norm2 = norm1
norm2 = head - norm1
norm1 += head
return ((head, norm1), (head, norm2)), dirType
def ArbAxisRotation(theta, ax, pt):
theta = math.pi * theta / 180
c = math.cos(theta)
s = math.sin(theta)
t = 1 - c
X = ax.x
Y = ax.y
Z = ax.z
mat = [[t * X * X + c, t * X * Y + s * Z, t * X * Z - s * Y],
[t * X * Y - s * Z, t * Y * Y + c, t * Y * Z + s * X],
[t * X * Z + s * Y, t * Y * Z - s * X, t * Z * Z + c]]
mat = numpy.array(mat)
if isinstance(pt, Geometry.Point3D):
pt = numpy.array((pt.x, pt.y, pt.z))
tmp = numpy.dot(mat, pt)
res = Geometry.Point3D(tmp[0], tmp[1], tmp[2])
elif isinstance(pt, list) or isinstance(pt, tuple):
pts = pt
res = []
for pt in pts:
pt = numpy.array((pt.x, pt.y, pt.z))
tmp = numpy.dot(mat, pt)
res.append(Geometry.Point3D(tmp[0], tmp[1], tmp[2]))
else:
res = None
return res
def GetAcceptor2FeatVects(conf, featAtoms, scale=1.5):
"""
Get the direction vectors for Acceptor of type 2
This is the acceptor with two adjacent heavy atoms. We will special case a few things here.
If the acceptor atom is an oxygen we will assume a sp3 hybridization
the acceptor directions (two of them)
reflect that configurations. Otherwise the direction vector in plane with the neighboring
heavy atoms
ARGUMENTS:
featAtoms - list of atoms that are part of the feature
scale - length of the direction vector
"""
assert len(featAtoms) == 1
aid = featAtoms[0]
cpt = conf.GetAtomPosition(aid)
mol = conf.GetOwningMol()
nbrs = list(mol.GetAtomWithIdx(aid).GetNeighbors())
hydrogens = []
tmp = []
while len(nbrs):
nbr = nbrs.pop()
if nbr.GetAtomicNum() == 1:
hydrogens.append(nbr)
else:
tmp.append(nbr)
nbrs = tmp
assert len(nbrs) == 2
bvec = _findAvgVec(conf, cpt, nbrs)
bvec *= (-1.0 * scale)
if (mol.GetAtomWithIdx(aid).GetAtomicNum() == 8):
# assume sp3
# we will create two vectors by rotating bvec by half the tetrahedral angle in either directions
v1 = conf.GetAtomPosition(nbrs[0].GetIdx())
v1 -= cpt
v2 = conf.GetAtomPosition(nbrs[1].GetIdx())
v2 -= cpt
rotAxis = v1 - v2
rotAxis.Normalize()
bv1 = ArbAxisRotation(54.5, rotAxis, bvec)
bv1 += cpt
bv2 = ArbAxisRotation(-54.5, rotAxis, bvec)
bv2 += cpt
return ((cpt, bv1),
(cpt, bv2), ), 'linear'
else:
bvec += cpt
return ((cpt, bvec), ), 'linear'
def _GetTetrahedralFeatVect(conf, aid, scale):
mol = conf.GetOwningMol()
cpt = conf.GetAtomPosition(aid)
nbrs = mol.GetAtomWithIdx(aid).GetNeighbors()
if not _checkPlanarity(conf, cpt, nbrs, tol=0.1):
bvec = _findAvgVec(conf, cpt, nbrs)
bvec *= (-1.0 * scale)
bvec += cpt
res = ((cpt, bvec), )
else:
res = ()
return res
def GetDonor3FeatVects(conf, featAtoms, scale=1.5):
"""
Get the direction vectors for Donor of type 3
This is a donor with three heavy atoms as neighbors. We will assume
a tetrahedral arrangement of these neighbors. So the direction we are seeking
is the last fourth arm of the sp3 arrangment
ARGUMENTS:
featAtoms - list of atoms that are part of the feature
scale - length of the direction vector
"""
assert len(featAtoms) == 1
aid = featAtoms[0]
tfv = _GetTetrahedralFeatVect(conf, aid, scale)
return tfv, 'linear'
def GetAcceptor3FeatVects(conf, featAtoms, scale=1.5):
"""
Get the direction vectors for Donor of type 3
This is a donor with three heavy atoms as neighbors. We will assume
a tetrahedral arrangement of these neighbors. So the direction we are seeking
is the last fourth arm of the sp3 arrangment
ARGUMENTS:
featAtoms - list of atoms that are part of the feature
scale - length of the direction vector
"""
assert len(featAtoms) == 1
aid = featAtoms[0]
tfv = _GetTetrahedralFeatVect(conf, aid, scale)
return tfv, 'linear'
def _findHydAtoms(nbrs, atomNames):
hAtoms = []
for nid in nbrs:
if atomNames[nid] == 'H':
hAtoms.append(nid)
return hAtoms
def _checkPlanarity(conf, cpt, nbrs, tol=1.0e-3):
assert len(nbrs) == 3
v1 = conf.GetAtomPosition(nbrs[0].GetIdx())
v1 -= cpt
v2 = conf.GetAtomPosition(nbrs[1].GetIdx())
v2 -= cpt
v3 = conf.GetAtomPosition(nbrs[2].GetIdx())
v3 -= cpt
normal = v1.CrossProduct(v2)
dotP = abs(v3.DotProduct(normal))
if (dotP <= tol):
return 1
else:
return 0
def GetDonor2FeatVects(conf, featAtoms, scale=1.5):
"""
Get the direction vectors for Donor of type 2
This is a donor with two heavy atoms as neighbors. The atom may are may not have
hydrogen on it. Here are the situations with the neighbors that will be considered here
1. two heavy atoms and two hydrogens: we will assume a sp3 arrangement here
2. two heavy atoms and one hydrogen: this can either be sp2 or sp3
3. two heavy atoms and no hydrogens
ARGUMENTS:
featAtoms - list of atoms that are part of the feature
scale - length of the direction vector
"""
assert len(featAtoms) == 1
aid = featAtoms[0]
mol = conf.GetOwningMol()
cpt = conf.GetAtomPosition(aid)
# find the two atoms that are neighbors of this atoms
nbrs = list(mol.GetAtomWithIdx(aid).GetNeighbors())
assert len(nbrs) >= 2
hydrogens = []
tmp = []
while len(nbrs):
nbr = nbrs.pop()
if nbr.GetAtomicNum() == 1:
hydrogens.append(nbr)
else:
tmp.append(nbr)
nbrs = tmp
if len(nbrs) == 2:
# there should be no hydrogens in this case
assert len(hydrogens) == 0
# in this case the direction is the opposite of the average vector of the two neighbors
bvec = _findAvgVec(conf, cpt, nbrs)
bvec *= (-1.0 * scale)
bvec += cpt
return ((cpt, bvec), ), 'linear'
elif len(nbrs) == 3:
assert len(hydrogens) == 1
# this is a little more tricky we have to check if the hydrogen is in the plane of the
# two heavy atoms (i.e. sp2 arrangement) or out of plane (sp3 arrangement)
# one of the directions will be from hydrogen atom to the heavy atom
hid = hydrogens[0].GetIdx()
bvec = conf.GetAtomPosition(hid)
bvec -= cpt
bvec.Normalize()
bvec *= scale
bvec += cpt
if _checkPlanarity(conf, cpt, nbrs):
# only the hydrogen atom direction needs to be used
return ((cpt, bvec), ), 'linear'
else:
# we have a non-planar configuration - we will assume sp3 and compute a second direction vector
ovec = _findAvgVec(conf, cpt, nbrs)
ovec *= (-1.0 * scale)
ovec += cpt
return ((cpt, bvec),
(cpt, ovec), ), 'linear'
elif len(nbrs) >= 4:
# in this case we should have two or more hydrogens we will simple use there directions
res = []
for hid in hydrogens:
bvec = conf.GetAtomPosition(hid)
bvec -= cpt
bvec.Normalize()
bvec *= scale
bvec += cpt
res.append((cpt, bvec))
return tuple(res), 'linear'
def GetDonor1FeatVects(conf, featAtoms, scale=1.5):
"""
Get the direction vectors for Donor of type 1
This is a donor with one heavy atom. It is not clear where we should we should be putting the
direction vector for this. It should probably be a cone. In this case we will just use the
direction vector from the donor atom to the heavy atom
ARGUMENTS:
featAtoms - list of atoms that are part of the feature
scale - length of the direction vector
"""
assert len(featAtoms) == 1
aid = featAtoms[0]
mol = conf.GetOwningMol()
nbrs = mol.GetAtomWithIdx(aid).GetNeighbors()
# find the neighboring heavy atom
hnbr = -1
for nbr in nbrs:
if nbr.GetAtomicNum() != 1:
hnbr = nbr.GetIdx()
break
cpt = conf.GetAtomPosition(aid)
v1 = conf.GetAtomPosition(hnbr)
v1 -= cpt
v1.Normalize()
v1 *= (-1.0 * scale)
v1 += cpt
return ((cpt, v1), ), 'cone'
def GetAcceptor1FeatVects(conf, featAtoms, scale=1.5):
"""
Get the direction vectors for Acceptor of type 1
This is a acceptor with one heavy atom neighbor. There are two possibilities we will
consider here
1. The bond to the heavy atom is a single bond e.g. CO
In this case we don't know the exact direction and we just use the inversion of this bond direction
and mark this direction as a 'cone'
2. The bond to the heavy atom is a double bond e.g. C=O
In this case the we have two possible direction except in some special cases e.g. SO2
where again we will use bond direction
ARGUMENTS:
featAtoms - list of atoms that are part of the feature
scale - length of the direction vector
"""
assert len(featAtoms) == 1
aid = featAtoms[0]
mol = conf.GetOwningMol()
nbrs = mol.GetAtomWithIdx(aid).GetNeighbors()
cpt = conf.GetAtomPosition(aid)
# find the adjacent heavy atom
heavyAt = -1
for nbr in nbrs:
if nbr.GetAtomicNum() != 1:
heavyAt = nbr
break
singleBnd = mol.GetBondBetweenAtoms(aid, heavyAt.GetIdx()).GetBondType() > Chem.BondType.SINGLE
# special scale - if the heavy atom is a sulfur (we should proabably check phosphorous as well)
sulfur = heavyAt.GetAtomicNum() == 16
if singleBnd or sulfur:
v1 = conf.GetAtomPosition(heavyAt.GetIdx())
v1 -= cpt
v1.Normalize()
v1 *= (-1.0 * scale)
v1 += cpt
return ((cpt, v1), ), 'cone'
else:
# ok in this case we will assume that
# heavy atom is sp2 hybridized and the direction vectors (two of them)
# are in the same plane, we will find this plane by looking for one
# of the neighbors of the heavy atom
hvNbrs = heavyAt.GetNeighbors()
hvNbr = -1
for nbr in hvNbrs:
if nbr.GetIdx() != aid:
hvNbr = nbr
break
pt1 = conf.GetAtomPosition(hvNbr.GetIdx())
v1 = conf.GetAtomPosition(heavyAt.GetIdx())
pt1 -= v1
v1 -= cpt
rotAxis = v1.CrossProduct(pt1)
rotAxis.Normalize()
bv1 = ArbAxisRotation(120, rotAxis, v1)
bv1.Normalize()
bv1 *= scale
bv1 += cpt
bv2 = ArbAxisRotation(-120, rotAxis, v1)
bv2.Normalize()
bv2 *= scale
bv2 += cpt
return ((cpt, bv1),
(cpt, bv2), ), 'linear'
| {
"repo_name": "jandom/rdkit",
"path": "rdkit/Chem/Features/FeatDirUtilsRD.py",
"copies": "2",
"size": "12281",
"license": "bsd-3-clause",
"hash": -2614181898708243000,
"line_mean": 27.5604651163,
"line_max": 104,
"alpha_frac": 0.64929566,
"autogenerated": false,
"ratio": 3.1257317383558156,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47750273983558156,
"avg_score": null,
"num_lines": null
} |
from rdkit import Geometry
import numpy
from rdkit import Chem
from rdkit.Chem import ChemicalFeatures
from rdkit.RDLogger import logger
logger = logger()
class Pharmacophore:
def __init__(self, feats, initMats=True):
self._initializeFeats(feats)
nf = len(feats)
self._boundsMat = numpy.zeros((nf, nf), numpy.float)
self._boundsMat2D = numpy.zeros((nf, nf), numpy.int)
if initMats:
self._initializeMatrices()
def _initializeFeats(self, feats):
self._feats = []
for feat in feats:
if isinstance(feat, ChemicalFeatures.MolChemicalFeature):
pos = feat.GetPos()
newFeat = ChemicalFeatures.FreeChemicalFeature(feat.GetFamily(), feat.GetType(),
Geometry.Point3D(pos[0], pos[1], pos[2]))
self._feats.append(newFeat)
else:
self._feats.append(feat)
def _initializeMatrices(self):
# initialize the bounds matrix with distances to start with
nf = len(self._feats)
for i in range(1, nf):
loci = self._feats[i].GetPos()
for j in range(i):
locj = self._feats[j].GetPos()
dist = loci.Distance(locj)
self._boundsMat[i, j] = dist
self._boundsMat[j, i] = dist
for i in range(nf):
for j in range(i + 1, nf):
self._boundsMat2D[i, j] = 1000
def getFeatures(self):
return self._feats
def getFeature(self, i):
return self._feats[i]
def getUpperBound(self, i, j):
if (i > j):
j, i = i, j
return self._boundsMat[i, j]
def getLowerBound(self, i, j):
if (j > i):
j, i = i, j
return self._boundsMat[i, j]
def _checkBounds(self, i, j):
" raises ValueError on failure "
nf = len(self._feats)
if (i < 0) or (i >= nf):
raise ValueError("Index out of bound")
if (j < 0) or (j >= nf):
raise ValueError("Index out of bound")
return True
def setUpperBound(self, i, j, val, checkBounds=False):
if (checkBounds):
self._checkBounds(i, j)
if (i > j):
j, i = i, j
self._boundsMat[i, j] = val
def setLowerBound(self, i, j, val, checkBounds=False):
if (checkBounds):
self._checkBounds(i, j)
if (j > i):
j, i = i, j
self._boundsMat[i, j] = val
def getUpperBound2D(self, i, j):
if (i > j):
j, i = i, j
return self._boundsMat2D[i, j]
def getLowerBound2D(self, i, j):
if (j > i):
j, i = i, j
return self._boundsMat2D[i, j]
def setUpperBound2D(self, i, j, val, checkBounds=False):
if (checkBounds):
self._checkBounds(i, j)
if (i > j):
j, i = i, j
self._boundsMat2D[i, j] = val
def setLowerBound2D(self, i, j, val, checkBounds=False):
if (checkBounds):
self._checkBounds(i, j)
if (j > i):
j, i = i, j
self._boundsMat2D[i, j] = val
def __str__(self):
res = ' ' * 13
for i, iFeat in enumerate(self._feats):
res += '% 12s ' % iFeat.GetFamily()
res += '\n'
for i, iFeat in enumerate(self._feats):
res += '% 12s ' % iFeat.GetFamily()
for j, jFeat in enumerate(self._feats):
if j < i:
res += '% 12.3f ' % self.getLowerBound(i, j)
elif j > i:
res += '% 12.3f ' % self.getUpperBound(i, j)
else:
res += '% 12.3f ' % 0.0
res += '\n'
return res
class ExplicitPharmacophore:
""" this is a pharmacophore with explicit point locations and radii
"""
def __init__(self, feats=None, radii=None):
if feats and radii:
self._initializeFeats(feats, radii)
def _initializeFeats(self, feats, radii):
if len(feats) != len(radii):
raise ValueError('len(feats)!=len(radii)')
self._feats = []
self._radii = []
for feat, rad in zip(feats, radii):
if isinstance(feat, ChemicalFeatures.MolChemicalFeature):
pos = feat.GetPos()
newFeat = ChemicalFeatures.FreeChemicalFeature(feat.GetFamily(), feat.GetType(),
Geometry.Point3D(pos[0], pos[1], pos[2]))
else:
newFeat = feat
self._feats.append(newFeat)
self._radii.append(rad)
def getFeatures(self):
return self._feats
def getRadii(self):
return self._radii
def getFeature(self, i):
return self._feats[i]
def getRadius(self, i):
return self._radii[i]
def setRadius(self, i, rad):
self._radii[i] = rad
def initFromString(self, text):
lines = text.split(r'\n')
self.initFromLines(lines)
def initFromFile(self, inF):
self.initFromLines(inF.readlines())
def initFromLines(self, lines):
from rdkit.Chem import ChemicalFeatures
import re
spaces = re.compile('[\ \t]+')
feats = []
rads = []
for lineNum, line in enumerate(lines):
txt = line.split('#')[0].strip()
if txt:
splitL = spaces.split(txt)
if len(splitL) < 5:
logger.error('Input line %d only contains %d fields, 5 are required. Read failed.' %
(lineNum, len(splitL)))
return
fName = splitL[0]
try:
xP = float(splitL[1])
yP = float(splitL[2])
zP = float(splitL[3])
rad = float(splitL[4])
except ValueError:
logger.error('Error parsing a number of line %d. Read failed.' % (lineNum))
return
feats.append(
ChemicalFeatures.FreeChemicalFeature(fName, fName, Geometry.Point3D(xP, yP, zP)))
rads.append(rad)
self._initializeFeats(feats, rads)
def __str__(self):
res = ''
for feat, rad in zip(self._feats, self._radii):
res += '% 12s ' % feat.GetFamily()
p = feat.GetPos()
res += ' % 8.4f % 8.4f % 8.4f ' % (p.x, p.y, p.z)
res += '% 5.2f' % rad
res += '\n'
return res
| {
"repo_name": "jandom/rdkit",
"path": "rdkit/Chem/Pharm3D/Pharmacophore.py",
"copies": "1",
"size": "6085",
"license": "bsd-3-clause",
"hash": 8974841837638450000,
"line_mean": 26.9128440367,
"line_max": 96,
"alpha_frac": 0.5753492194,
"autogenerated": false,
"ratio": 3.174230568596766,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9187696393858763,
"avg_score": 0.012376678827600514,
"num_lines": 218
} |
from rdkit import RDConfig
import unittest,sys,os,cPickle
from rdkit import Chem
from rdkit.Chem import ChemicalFeatures,rdDistGeom
import EmbedLib
import gzip
from rdkit import DistanceGeometry as DG
from rdkit import Geometry
import Pharmacophore
import cPickle
import numpy
def feq(n1,n2,tol=1e-5):
return abs(n1-n2)<=tol
class TestCase(unittest.TestCase):
def setUp(self):
self.dataDir = os.path.join(RDConfig.RDCodeDir,'Chem/Pharm3D/test_data')
self.fdefBlock = \
"""DefineFeature HAcceptor1 [N,O;H0]
Family HBondAcceptor
Weights 1.0
EndFeature
DefineFeature HDonor1 [N,O;!H0]
Family HBondDonor
Weights 1.0
EndFeature
DefineFeature Aromatic1 c1ccccc1
Family Aromatic
Weights 1.,1.,1.,1.,1.,1.
EndFeature\n"""
self.featFactory = ChemicalFeatures.BuildFeatureFactoryFromString(self.fdefBlock)
self.feats = [ChemicalFeatures.FreeChemicalFeature('HBondAcceptor', 'HAcceptor1',
Geometry.Point3D(0.0, 0.0, 0.0)),
ChemicalFeatures.FreeChemicalFeature('HBondDonor', 'HDonor1',
Geometry.Point3D(2.65, 0.0, 0.0)),
ChemicalFeatures.FreeChemicalFeature('Aromatic', 'Aromatic1',
Geometry.Point3D(5.12, 0.908, 0.0)),
]
self.pcophore=Pharmacophore.Pharmacophore(self.feats)
self.pcophore.setLowerBound(0,1, 2.0)
self.pcophore.setUpperBound(0,1, 3.3)
self.pcophore.setLowerBound(0,2, 5.0)
self.pcophore.setUpperBound(0,2, 5.4)
self.pcophore.setLowerBound(1,2, 2.6)
self.pcophore.setUpperBound(1,2, 3.0)
def _matchMol(self,tpl,pcophore,featFactory,downSample):
name,molPkl,boundsMat = tpl
mol = Chem.Mol(molPkl)
matched,matches = EmbedLib.MatchPharmacophoreToMol(mol,featFactory,pcophore)
if matched:
r = EmbedLib.MatchPharmacophore(matches,boundsMat,pcophore,
useDownsampling=downSample)
if r[0]:
return 0
else:
return 1
else:
return 0
def test1SearchFullMat(self):
inF = gzip.open(os.path.join(self.dataDir,'cdk2-syn-clip100.pkl.gz'),'rb')
#outF = gzip.open(os.path.join(self.dataDir,'cdk2-syn-clip100.pkl.new.gz'),'wb+')
nDone = 0
nHits = 0
while 1:
try:
tpl = cPickle.load(inF)
#tpl=tpl[0],tpl[1],numpy.array(tpl[2])
#cPickle.dump(tpl,outF)
except:
break
if self._matchMol(tpl,self.pcophore,self.featFactory,0):
nHits+=1
nDone += 1
self.failUnlessEqual(nDone,100)
#print 'nHits:',nHits
self.failUnlessEqual(nHits,47)
def test2SearchDownsample(self):
inF = gzip.open(os.path.join(self.dataDir,'cdk2-syn-clip100.pkl.gz'),'rb')
nDone = 0
nHits = 0
hits = []
while 1:
try:
tpl = cPickle.load(inF)
except:
break
if self._matchMol(tpl,self.pcophore, self.featFactory,1):
nHits+=1
nDone += 1
self.failUnlessEqual(nDone,100)
#print 'nHits:',nHits
self.failUnlessEqual(nHits,47)
def test3Embed(self):
testResults={
'mol_197':(181.30,30.21,92.03,8.73,91.60,8.33,74.68,1.35,0.00),
'mol_223':(211.07,4.22,114.14,1.57,114.08,1.58,68.22,0.48,0.00),
'mol_269':(162.28,2.03,74.50,1.00,73.45,0.96,60.18,0.91,6.00),
}
inF = gzip.open(os.path.join(self.dataDir,'cdk2-syn-clip100.pkl.gz'),'rb')
nDone = 0
nHits = 0
while 1:
try:
name,molPkl,boundsMat = cPickle.load(inF)
except:
break
nDone += 1
mol = Chem.Mol(molPkl)
nboundsMat = rdDistGeom.GetMoleculeBoundsMatrix(mol)
DG.DoTriangleSmoothing(nboundsMat)
matched,matches = EmbedLib.MatchPharmacophoreToMol(mol,self.featFactory,
self.pcophore)
if matched:
failed,bm,match,stats = EmbedLib.MatchPharmacophore(matches,nboundsMat,
self.pcophore,
useDownsampling=1)
if not failed:
nHits += 1
if testResults.has_key(name):
stats = EmbedLib.EmbedOne(mol,name,match,self.pcophore,count=10,
silent=1,randomSeed=23)
tgt = testResults[name]
self.failUnlessEqual(len(tgt),len(stats))
print name
print ','.join(['%.2f'%x for x in stats])
# we'll use different tolerances for the different values:
self.failUnless(feq(tgt[0],stats[0],5.0),(tgt[0],stats[0]))
for i in range(2,len(tgt)):
self.failUnless(feq(tgt[i],stats[i],5.0),(tgt[i],stats[i]))
self.failUnlessEqual(nDone,100)
#print 'nHits:',nHits
self.failUnlessEqual(nHits,50)
def test4Search(self):
featFactory = ChemicalFeatures.BuildFeatureFactory(os.path.join(self.dataDir,
'BaseFeatures.fdef'))
activeFeats = [ChemicalFeatures.FreeChemicalFeature('Acceptor',
Geometry.Point3D(0.0, 0.0, 0.0)),
ChemicalFeatures.FreeChemicalFeature('Donor',
Geometry.Point3D(0.0, 0.0, 0.0)),
ChemicalFeatures.FreeChemicalFeature('Aromatic',
Geometry.Point3D(0.0, 0.0, 0.0))]
pcophore= Pharmacophore.Pharmacophore(activeFeats)
pcophore.setLowerBound(0,1,2.251)
pcophore.setUpperBound(0,1,2.451)
pcophore.setUpperBound2D(0,1,3)
pcophore.setLowerBound(0,2,4.970)
pcophore.setUpperBound(0,2,5.170)
pcophore.setUpperBound2D(0,2,6)
pcophore.setLowerBound(1,2,2.681)
pcophore.setUpperBound(1,2,2.881)
pcophore.setUpperBound2D(1,2,6)
inF = gzip.open(os.path.join(self.dataDir,'cdk2-syn-clip100.pkl.gz'),'rb')
nDone = 0
nMatches = 0
nHits = 0
while 1:
try:
name,molPkl,boundsMat = cPickle.load(inF)
except:
break
nDone += 1
mol = Chem.Mol(molPkl)
boundsMat = rdDistGeom.GetMoleculeBoundsMatrix(mol)
DG.DoTriangleSmoothing(boundsMat)
canMatch,matches = EmbedLib.MatchPharmacophoreToMol(mol,featFactory,
pcophore)
if canMatch:
nMatches+=1
r = EmbedLib.MatchPharmacophore(matches,boundsMat,pcophore,
useDownsampling=True,use2DLimits=True,
mol=mol)
failed,bm,match,details = r
if not failed:
nHits+=1
self.failUnlessEqual(nDone,100)
self.failUnlessEqual(nMatches,93)
#print 'nhits:',nHits
self.failUnlessEqual(nHits,68)
def testIssue268(self):
from rdkit import RDLogger
#RDLogger.EnableLog('rdApp.debug')
featFactory = ChemicalFeatures.BuildFeatureFactory(os.path.join(self.dataDir,
'Issue268.fdef'))
m1 = Chem.MolFromMolFile(os.path.join(self.dataDir,
'Issue268_Mol1.mol'))
m2 = Chem.MolFromMolFile(os.path.join(self.dataDir,
'Issue268_Mol2.mol'))
pcop = cPickle.load(file(os.path.join(self.dataDir,
'Issue268_Pcop.pkl'),'rb'))
#pcop._boundsMat=numpy.array(pcop._boundsMat)
#pcop._boundsMat2D=numpy.array(pcop._boundsMat2D)
#cPickle.dump(pcop,file(os.path.join(self.dataDir,
# 'Issue268_Pcop.new.pkl'),'wb+'))
match,mList1 = EmbedLib.MatchFeatsToMol(m1,featFactory,pcop.getFeatures())
match,mList2 = EmbedLib.MatchFeatsToMol(m2,featFactory,pcop.getFeatures())
b1 = rdDistGeom.GetMoleculeBoundsMatrix(m1)
b2 = rdDistGeom.GetMoleculeBoundsMatrix(m2)
self.failUnlessEqual(len(EmbedLib.MatchPharmacophore(mList1,b1,pcop)[2]),4)
self.failUnlessEqual(len(EmbedLib.MatchPharmacophore(mList2,b2,pcop)[2]),4)
self.failUnlessEqual(len(EmbedLib.MatchPharmacophore(mList1,b1,pcop,
mol=m1,use2DLimits=True)[2]),4)
self.failUnlessEqual(len(EmbedLib.MatchPharmacophore(mList2,b2,pcop,
mol=m2,use2DLimits=True)[2]),4)
from rdkit import DistanceGeometry as DG
self.failUnless(DG.DoTriangleSmoothing(b1))
self.failUnless(DG.DoTriangleSmoothing(b2))
self.failUnlessEqual(len(EmbedLib.MatchPharmacophore(mList1,b1,pcop)[2]),4)
self.failUnlessEqual(len(EmbedLib.MatchPharmacophore(mList2,b2,pcop)[2]),4)
self.failUnlessEqual(len(EmbedLib.MatchPharmacophore(mList1,b1,pcop,
mol=m1,use2DLimits=True)[2]),4)
self.failUnlessEqual(len(EmbedLib.MatchPharmacophore(mList2,b2,pcop,
mol=m2,use2DLimits=True)[2]),4)
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "rdkit/rdkit-orig",
"path": "rdkit/Chem/Pharm3D/UnitTestEmbed.py",
"copies": "1",
"size": "9675",
"license": "bsd-3-clause",
"hash": 5872861235074889000,
"line_mean": 36.79296875,
"line_max": 85,
"alpha_frac": 0.5804651163,
"autogenerated": false,
"ratio": 3.210019907100199,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4290485023400199,
"avg_score": null,
"num_lines": null
} |
from rdkit import RDConfig
import sys,time,math
from rdkit.ML.Data import Stats
import rdkit.DistanceGeometry as DG
from rdkit import Chem
import numpy
from rdkit.Chem import rdDistGeom as MolDG
from rdkit.Chem import ChemicalFeatures
from rdkit.Chem import ChemicalForceFields
import Pharmacophore,ExcludedVolume
from rdkit import Geometry
_times = {}
from rdkit import RDLogger as logging
logger = logging.logger()
defaultFeatLength=2.0
def GetAtomHeavyNeighbors(atom):
""" returns a list of the heavy-atom neighbors of the
atom passed in:
>>> m = Chem.MolFromSmiles('CCO')
>>> l = GetAtomHeavyNeighbors(m.GetAtomWithIdx(0))
>>> len(l)
1
>>> isinstance(l[0],Chem.Atom)
True
>>> l[0].GetIdx()
1
>>> l = GetAtomHeavyNeighbors(m.GetAtomWithIdx(1))
>>> len(l)
2
>>> l[0].GetIdx()
0
>>> l[1].GetIdx()
2
"""
res=[]
for nbr in atom.GetNeighbors():
if nbr.GetAtomicNum() != 1:
res.append(nbr)
return res
def ReplaceGroup(match,bounds,slop=0.01,useDirs=False,dirLength=defaultFeatLength):
""" Adds an entry at the end of the bounds matrix for a point at
the center of a multi-point feature
returns a 2-tuple:
new bounds mat
index of point added
>>> boundsMat = numpy.array([[0.0,2.0,2.0],[1.0,0.0,2.0],[1.0,1.0,0.0]])
>>> match = [0,1,2]
>>> bm,idx = ReplaceGroup(match,boundsMat,slop=0.0)
the index is at the end:
>>> idx
3
and the matrix is one bigger:
>>> bm.shape
(4, 4)
but the original bounds mat is not altered:
>>> boundsMat.shape
(3, 3)
We make the assumption that the points of the
feature form a regular polygon, are listed in order
(i.e. pt 0 is a neighbor to pt 1 and pt N-1)
and that the replacement point goes at the center:
>>> print ', '.join(['%.3f'%x for x in bm[-1]])
0.577, 0.577, 0.577, 0.000
>>> print ', '.join(['%.3f'%x for x in bm[:,-1]])
1.155, 1.155, 1.155, 0.000
The slop argument (default = 0.01) is fractional:
>>> bm,idx = ReplaceGroup(match,boundsMat)
>>> print ', '.join(['%.3f'%x for x in bm[-1]])
0.572, 0.572, 0.572, 0.000
>>> print ', '.join(['%.3f'%x for x in bm[:,-1]])
1.166, 1.166, 1.166, 0.000
"""
maxVal = -1000.0
minVal = 1e8
nPts = len(match)
for i in range(nPts):
idx0 = match[i]
if i<nPts-1:
idx1 = match[i+1]
else:
idx1 = match[0]
if idx1<idx0:
idx0,idx1 = idx1,idx0
minVal = min(minVal,bounds[idx1,idx0])
maxVal = max(maxVal,bounds[idx0,idx1])
maxVal *= (1+slop)
minVal *= (1-slop)
scaleFact = 1.0/(2.0*math.sin(math.pi/nPts))
minVal *= scaleFact
maxVal *= scaleFact
replaceIdx = bounds.shape[0]
if not useDirs:
bm = numpy.zeros((bounds.shape[0]+1,bounds.shape[1]+1),numpy.float)
else:
bm = numpy.zeros((bounds.shape[0]+2,bounds.shape[1]+2),numpy.float)
bm[0:bounds.shape[0],0:bounds.shape[1]]=bounds
bm[:replaceIdx,replaceIdx]=1000.
if useDirs:
bm[:replaceIdx+1,replaceIdx+1]=1000.
# set the feature - direction point bounds:
bm[replaceIdx,replaceIdx+1]=dirLength+slop
bm[replaceIdx+1,replaceIdx]=dirLength-slop
for idx1 in match:
bm[idx1,replaceIdx]=maxVal
bm[replaceIdx,idx1]=minVal
if useDirs:
# set the point - direction point bounds:
bm[idx1,replaceIdx+1] = numpy.sqrt(bm[replaceIdx,replaceIdx+1]**2+maxVal**2)
bm[replaceIdx+1,idx1] = numpy.sqrt(bm[replaceIdx+1,replaceIdx]**2+minVal**2)
return bm,replaceIdx
def EmbedMol(mol,bm,atomMatch=None,weight=2.0,randomSeed=-1,
excludedVolumes=None):
""" Generates an embedding for a molecule based on a bounds matrix and adds
a conformer (id 0) to the molecule
if the optional argument atomMatch is provided, it will be used to provide
supplemental weights for the embedding routine (used in the optimization
phase to ensure that the resulting geometry really does satisfy the
pharmacophore).
if the excludedVolumes is provided, it should be a sequence of
ExcludedVolume objects
>>> m = Chem.MolFromSmiles('c1ccccc1C')
>>> bounds = MolDG.GetMoleculeBoundsMatrix(m)
>>> bounds.shape
(7, 7)
>>> m.GetNumConformers()
0
>>> EmbedMol(m,bounds,randomSeed=23)
>>> m.GetNumConformers()
1
"""
nAts = mol.GetNumAtoms()
weights=[]
if(atomMatch):
for i in range(len(atomMatch)):
for j in range(i+1,len(atomMatch)):
weights.append((i,j,weight))
if(excludedVolumes):
for vol in excludedVolumes:
idx = vol.index
# excluded volumes affect every other atom:
for i in range(nAts):
weights.append((i,idx,weight))
coords = DG.EmbedBoundsMatrix(bm,weights=weights,numZeroFail=1,randomSeed=randomSeed)
#for row in coords:
# print ', '.join(['%.2f'%x for x in row])
conf = Chem.Conformer(nAts)
conf.SetId(0)
for i in range(nAts):
conf.SetAtomPosition(i,list(coords[i]))
if excludedVolumes:
for vol in excludedVolumes:
vol.pos = numpy.array(coords[vol.index])
#print >>sys.stderr,' % 7.4f % 7.4f % 7.4f Ar 0 0 0 0 0 0 0 0 0 0 0 0'%tuple(coords[-1])
mol.AddConformer(conf)
def AddExcludedVolumes(bm,excludedVolumes,smoothIt=True):
""" Adds a set of excluded volumes to the bounds matrix
and returns the new matrix
excludedVolumes is a list of ExcludedVolume objects
>>> boundsMat = numpy.array([[0.0,2.0,2.0],[1.0,0.0,2.0],[1.0,1.0,0.0]])
>>> ev1 = ExcludedVolume.ExcludedVolume(([(0,),0.5,1.0],),exclusionDist=1.5)
>>> bm = AddExcludedVolumes(boundsMat,(ev1,))
the results matrix is one bigger:
>>> bm.shape
(4, 4)
and the original bounds mat is not altered:
>>> boundsMat.shape
(3, 3)
>>> print ', '.join(['%.3f'%x for x in bm[-1]])
0.500, 1.500, 1.500, 0.000
>>> print ', '.join(['%.3f'%x for x in bm[:,-1]])
1.000, 3.000, 3.000, 0.000
"""
oDim = bm.shape[0]
dim = oDim+len(excludedVolumes)
res = numpy.zeros((dim,dim),numpy.float)
res[:oDim,:oDim] = bm
for i,vol in enumerate(excludedVolumes):
bmIdx = oDim+i
vol.index = bmIdx
# set values to all the atoms:
res[bmIdx,:bmIdx] = vol.exclusionDist
res[:bmIdx,bmIdx] = 1000.0
# set values to our defining features:
for indices,minV,maxV in vol.featInfo:
for index in indices:
try:
res[bmIdx,index] = minV
res[index,bmIdx] = maxV
except IndexError:
logger.error('BAD INDEX: res[%d,%d], shape is %s'%(bmIdx,index,str(res.shape)))
raise IndexError
# set values to other excluded volumes:
for j in range(bmIdx+1,dim):
res[bmIdx,j:dim] = 0
res[j:dim,bmIdx] = 1000
if smoothIt: DG.DoTriangleSmoothing(res)
return res
def UpdatePharmacophoreBounds(bm,atomMatch,pcophore,useDirs=False,
dirLength=defaultFeatLength,
mol=None):
""" loops over a distance bounds matrix and replaces the elements
that are altered by a pharmacophore
**NOTE** this returns the resulting bounds matrix, but it may also
alter the input matrix
atomMatch is a sequence of sequences containing atom indices
for each of the pharmacophore's features.
>>> feats = [ChemicalFeatures.FreeChemicalFeature('HBondAcceptor', 'HAcceptor1', Geometry.Point3D(0.0, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('HBondDonor', 'HDonor1', Geometry.Point3D(2.65, 0.0, 0.0)),
... ]
>>> pcophore=Pharmacophore.Pharmacophore(feats)
>>> pcophore.setLowerBound(0,1, 1.0)
>>> pcophore.setUpperBound(0,1, 2.0)
>>> boundsMat = numpy.array([[0.0,3.0,3.0],[2.0,0.0,3.0],[2.0,2.0,0.0]])
>>> atomMatch = ((0,),(1,))
>>> bm = UpdatePharmacophoreBounds(boundsMat,atomMatch,pcophore)
In this case, there are no multi-atom features, so the result matrix
is the same as the input:
>>> bm is boundsMat
True
this means, of course, that the input boundsMat is altered:
>>> print ', '.join(['%.3f'%x for x in boundsMat[0]])
0.000, 2.000, 3.000
>>> print ', '.join(['%.3f'%x for x in boundsMat[1]])
1.000, 0.000, 3.000
>>> print ', '.join(['%.3f'%x for x in boundsMat[2]])
2.000, 2.000, 0.000
"""
replaceMap = {}
for i,matchI in enumerate(atomMatch):
if len(matchI)>1:
bm,replaceIdx = ReplaceGroup(matchI,bm,useDirs=useDirs)
replaceMap[i] = replaceIdx
for i,matchI in enumerate(atomMatch):
mi = replaceMap.get(i,matchI[0])
for j in range(i+1,len(atomMatch)):
mj = replaceMap.get(j,atomMatch[j][0])
if mi<mj:
idx0,idx1 = mi,mj
else:
idx0,idx1 = mj,mi
bm[idx0,idx1] = pcophore.getUpperBound(i,j)
bm[idx1,idx0] = pcophore.getLowerBound(i,j)
return bm
def EmbedPharmacophore(mol,atomMatch,pcophore,randomSeed=-1,count=10,smoothFirst=True,
silent=False,bounds=None,excludedVolumes=None,targetNumber=-1,
useDirs=False):
""" Generates one or more embeddings for a molecule that satisfy a pharmacophore
atomMatch is a sequence of sequences containing atom indices
for each of the pharmacophore's features.
- count: is the maximum number of attempts to make a generating an embedding
- smoothFirst: toggles triangle smoothing of the molecular bounds matix
- bounds: if provided, should be the molecular bounds matrix. If this isn't
provided, the matrix will be generated.
- targetNumber: if this number is positive, it provides a maximum number
of embeddings to generate (i.e. we'll have count attempts to generate
targetNumber embeddings).
returns: a 3 tuple:
1) the molecular bounds matrix adjusted for the pharmacophore
2) a list of embeddings (molecules with a single conformer)
3) the number of failed attempts at embedding
>>> m = Chem.MolFromSmiles('OCCN')
>>> feats = [ChemicalFeatures.FreeChemicalFeature('HBondAcceptor', 'HAcceptor1', Geometry.Point3D(0.0, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('HBondDonor', 'HDonor1', Geometry.Point3D(2.65, 0.0, 0.0)),
... ]
>>> pcophore=Pharmacophore.Pharmacophore(feats)
>>> pcophore.setLowerBound(0,1, 2.5)
>>> pcophore.setUpperBound(0,1, 3.5)
>>> atomMatch = ((0,),(3,))
>>> bm,embeds,nFail = EmbedPharmacophore(m,atomMatch,pcophore,randomSeed=23,silent=1)
>>> len(embeds)
10
>>> nFail
0
Set up a case that can't succeed:
>>> pcophore=Pharmacophore.Pharmacophore(feats)
>>> pcophore.setLowerBound(0,1, 2.0)
>>> pcophore.setUpperBound(0,1, 2.1)
>>> atomMatch = ((0,),(3,))
>>> bm,embeds,nFail = EmbedPharmacophore(m,atomMatch,pcophore,randomSeed=23,silent=1)
>>> len(embeds)
0
>>> nFail
10
"""
global _times
if not hasattr(mol,'_chiralCenters'):
mol._chiralCenters = Chem.FindMolChiralCenters(mol)
if bounds is None:
bounds = MolDG.GetMoleculeBoundsMatrix(mol)
if smoothFirst: DG.DoTriangleSmoothing(bounds)
bm = bounds.copy()
#print '------------'
#print 'initial'
#for row in bm:
# print ' ',' '.join(['% 4.2f'%x for x in row])
#print '------------'
bm = UpdatePharmacophoreBounds(bm,atomMatch,pcophore,useDirs=useDirs,mol=mol)
if excludedVolumes:
bm = AddExcludedVolumes(bm,excludedVolumes,smoothIt=False)
if not DG.DoTriangleSmoothing(bm):
raise ValueError,"could not smooth bounds matrix"
#print '------------'
#print 'post replace and smooth'
#for row in bm:
# print ' ',' '.join(['% 4.2f'%x for x in row])
#print '------------'
if targetNumber<=0:
targetNumber=count
nFailed = 0
res = []
for i in range(count):
tmpM = bm[:,:]
m2 = Chem.Mol(mol.ToBinary())
t1 = time.time()
try:
if randomSeed<=0:
seed = i*10+1
else:
seed = i*10+randomSeed
EmbedMol(m2,tmpM,atomMatch,randomSeed=seed,
excludedVolumes=excludedVolumes)
except ValueError:
if not silent:
logger.info('Embed failed')
nFailed += 1
else:
t2 = time.time()
_times['embed'] = _times.get('embed',0)+t2-t1
keepIt=True
for idx,stereo in mol._chiralCenters:
if stereo in ('R','S'):
vol = ComputeChiralVolume(m2,idx)
if (stereo=='R' and vol>=0) or \
(stereo=='S' and vol<=0):
keepIt=False
break
if keepIt:
res.append(m2)
else:
logger.debug('Removed embedding due to chiral constraints.')
if len(res)==targetNumber: break
return bm,res,nFailed
def isNaN(v):
""" provides an OS independent way of detecting NaNs
This is intended to be used with values returned from the C++
side of things.
We can't actually test this from Python (which traps
zero division errors), but it would work something like
this if we could:
>>> isNaN(0)
False
#>>> isNan(1/0)
#True
"""
if v!=v and sys.platform=='win32':
return True
elif v==0 and v==1 and sys.platform!='win32':
return True
return False
def OptimizeMol(mol,bm,atomMatches=None,excludedVolumes=None,
forceConstant=1200.0,
maxPasses=5,verbose=False):
""" carries out a UFF optimization for a molecule optionally subject
to the constraints in a bounds matrix
- atomMatches, if provided, is a sequence of sequences
- forceConstant is the force constant of the spring used to enforce
the constraints
returns a 2-tuple:
1) the energy of the initial conformation
2) the energy post-embedding
NOTE that these energies include the energies of the constraints
>>> m = Chem.MolFromSmiles('OCCN')
>>> feats = [ChemicalFeatures.FreeChemicalFeature('HBondAcceptor', 'HAcceptor1', Geometry.Point3D(0.0, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('HBondDonor', 'HDonor1', Geometry.Point3D(2.65, 0.0, 0.0)),
... ]
>>> pcophore=Pharmacophore.Pharmacophore(feats)
>>> pcophore.setLowerBound(0,1, 2.5)
>>> pcophore.setUpperBound(0,1, 2.8)
>>> atomMatch = ((0,),(3,))
>>> bm,embeds,nFail = EmbedPharmacophore(m,atomMatch,pcophore,randomSeed=23,silent=1)
>>> len(embeds)
10
>>> testM = embeds[0]
Do the optimization:
>>> e1,e2 = OptimizeMol(testM,bm,atomMatches=atomMatch)
Optimizing should have lowered the energy:
>>> e2 < e1
True
Check the constrained distance:
>>> conf = testM.GetConformer(0)
>>> p0 = conf.GetAtomPosition(0)
>>> p3 = conf.GetAtomPosition(3)
>>> d03 = p0.Distance(p3)
>>> d03 >= pcophore.getLowerBound(0,1)-.01
True
>>> d03 <= pcophore.getUpperBound(0,1)+.01
True
If we optimize without the distance constraints (provided via the atomMatches
argument) we're not guaranteed to get the same results, particularly in a case
like the current one where the pharmcophore brings the atoms uncomfortably
close together:
>>> testM = embeds[1]
>>> e1,e2 = OptimizeMol(testM,bm)
>>> e2 < e1
True
>>> conf = testM.GetConformer(0)
>>> p0 = conf.GetAtomPosition(0)
>>> p3 = conf.GetAtomPosition(3)
>>> d03 = p0.Distance(p3)
>>> d03 >= pcophore.getLowerBound(0,1)-.01
True
>>> d03 <= pcophore.getUpperBound(0,1)+.01
False
"""
try:
ff = ChemicalForceFields.UFFGetMoleculeForceField(mol)
except:
logger.info('Problems building molecular forcefield',exc_info=True)
return -1.0,-1.0
weights=[]
if(atomMatches):
for k in range(len(atomMatches)):
for i in atomMatches[k]:
for l in range(k+1,len(atomMatches)):
for j in atomMatches[l]:
weights.append((i,j))
for i,j in weights:
if j<i:
i,j = j,i
minV = bm[j,i]
maxV = bm[i,j]
ff.AddDistanceConstraint(i,j,minV,maxV,forceConstant)
if excludedVolumes:
nAts = mol.GetNumAtoms()
conf = mol.GetConformer()
idx = nAts
for exVol in excludedVolumes:
assert exVol.pos is not None
logger.debug('ff.AddExtraPoint(%.4f,%.4f,%.4f)'%(exVol.pos[0],exVol.pos[1],
exVol.pos[2]))
ff.AddExtraPoint(exVol.pos[0],exVol.pos[1],exVol.pos[2],True)
indices = []
for localIndices,foo,bar in exVol.featInfo:
indices += list(localIndices)
for i in range(nAts):
v = numpy.array(conf.GetAtomPosition(i))-numpy.array(exVol.pos)
d = numpy.sqrt(numpy.dot(v,v))
if i not in indices:
if d<5.0:
logger.debug('ff.AddDistanceConstraint(%d,%d,%.3f,%d,%.0f)'%(i,idx,exVol.exclusionDist,1000,forceConstant))
ff.AddDistanceConstraint(i,idx,exVol.exclusionDist,1000,
forceConstant)
else:
logger.debug('ff.AddDistanceConstraint(%d,%d,%.3f,%.3f,%.0f)'%(i,idx,bm[exVol.index,i],bm[i,exVol.index],forceConstant))
ff.AddDistanceConstraint(i,idx,bm[exVol.index,i],bm[i,exVol.index],
forceConstant)
idx += 1
ff.Initialize()
e1 = ff.CalcEnergy()
if isNaN(e1):
raise ValueError,'bogus energy'
if verbose:
print Chem.MolToMolBlock(mol)
for i,vol in enumerate(excludedVolumes):
pos = ff.GetExtraPointPos(i)
print >>sys.stderr,' % 7.4f % 7.4f % 7.4f As 0 0 0 0 0 0 0 0 0 0 0 0'%tuple(pos)
needsMore=ff.Minimize()
nPasses=0
while needsMore and nPasses<maxPasses:
needsMore=ff.Minimize()
nPasses+=1
e2 = ff.CalcEnergy()
if isNaN(e2):
raise ValueError,'bogus energy'
if verbose:
print '--------'
print Chem.MolToMolBlock(mol)
for i,vol in enumerate(excludedVolumes):
pos = ff.GetExtraPointPos(i)
print >>sys.stderr,' % 7.4f % 7.4f % 7.4f Sb 0 0 0 0 0 0 0 0 0 0 0 0'%tuple(pos)
ff = None
return e1,e2
def EmbedOne(mol,name,match,pcophore,count=1,silent=0,**kwargs):
""" generates statistics for a molecule's embeddings
Four energies are computed for each embedding:
1) E1: the energy (with constraints) of the initial embedding
2) E2: the energy (with constraints) of the optimized embedding
3) E3: the energy (no constraints) the geometry for E2
4) E4: the energy (no constraints) of the optimized free-molecule
(starting from the E3 geometry)
Returns a 9-tuple:
1) the mean value of E1
2) the sample standard deviation of E1
3) the mean value of E2
4) the sample standard deviation of E2
5) the mean value of E3
6) the sample standard deviation of E3
7) the mean value of E4
8) the sample standard deviation of E4
9) The number of embeddings that failed
"""
global _times
atomMatch = [list(x.GetAtomIds()) for x in match]
bm,ms,nFailed = EmbedPharmacophore(mol,atomMatch,pcophore,count=count,
silent=silent,**kwargs)
e1s = []
e2s = []
e3s = []
e4s = []
d12s = []
d23s = []
d34s = []
for m in ms:
t1 = time.time()
try:
e1,e2 = OptimizeMol(m,bm,atomMatch)
except ValueError:
pass
else:
t2 = time.time()
_times['opt1'] = _times.get('opt1',0)+t2-t1
e1s.append(e1)
e2s.append(e2)
d12s.append(e1-e2)
t1 = time.time()
try:
e3,e4 = OptimizeMol(m,bm)
except ValueError:
pass
else:
t2 = time.time()
_times['opt2'] = _times.get('opt2',0)+t2-t1
e3s.append(e3)
e4s.append(e4)
d23s.append(e2-e3)
d34s.append(e3-e4)
count += 1
try:
e1,e1d = Stats.MeanAndDev(e1s)
except:
e1 = -1.0
e1d=-1.0
try:
e2,e2d = Stats.MeanAndDev(e2s)
except:
e2 = -1.0
e2d=-1.0
try:
e3,e3d = Stats.MeanAndDev(e3s)
except:
e3 = -1.0
e3d=-1.0
try:
e4,e4d = Stats.MeanAndDev(e4s)
except:
e4 = -1.0
e4d=-1.0
if not silent:
print '%s(%d): %.2f(%.2f) -> %.2f(%.2f) : %.2f(%.2f) -> %.2f(%.2f)'%(name,nFailed,e1,e1d,e2,e2d,e3,e3d,e4,e4d)
return e1,e1d,e2,e2d,e3,e3d,e4,e4d,nFailed
def MatchPharmacophoreToMol(mol, featFactory, pcophore):
""" generates a list of all possible mappings of a pharmacophore to a molecule
Returns a 2-tuple:
1) a boolean indicating whether or not all features were found
2) a list, numFeatures long, of sequences of features
>>> import os.path
>>> dataDir = os.path.join(RDConfig.RDCodeDir,'Chem/Pharm3D/test_data')
>>> featFactory = ChemicalFeatures.BuildFeatureFactory(os.path.join(dataDir,'BaseFeatures.fdef'))
>>> activeFeats = [ChemicalFeatures.FreeChemicalFeature('Acceptor', Geometry.Point3D(0.0, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('Donor',Geometry.Point3D(0.0, 0.0, 0.0))]
>>> pcophore= Pharmacophore.Pharmacophore(activeFeats)
>>> m = Chem.MolFromSmiles('FCCN')
>>> match,mList = MatchPharmacophoreToMol(m,featFactory,pcophore)
>>> match
True
Two feature types:
>>> len(mList)
2
The first feature type, Acceptor, has two matches:
>>> len(mList[0])
2
>>> mList[0][0].GetAtomIds()
(0,)
>>> mList[0][1].GetAtomIds()
(3,)
The first feature type, Donor, has a single match:
>>> len(mList[1])
1
>>> mList[1][0].GetAtomIds()
(3,)
"""
return MatchFeatsToMol(mol, featFactory, pcophore.getFeatures())
def _getFeatDict(mol,featFactory,features):
""" **INTERNAL USE ONLY**
>>> import os.path
>>> dataDir = os.path.join(RDConfig.RDCodeDir,'Chem/Pharm3D/test_data')
>>> featFactory = ChemicalFeatures.BuildFeatureFactory(os.path.join(dataDir,'BaseFeatures.fdef'))
>>> activeFeats = [ChemicalFeatures.FreeChemicalFeature('Acceptor', Geometry.Point3D(0.0, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('Donor',Geometry.Point3D(0.0, 0.0, 0.0))]
>>> m = Chem.MolFromSmiles('FCCN')
>>> d =_getFeatDict(m,featFactory,activeFeats)
>>> d.keys()
['Donor', 'Acceptor']
>>> donors = d['Donor']
>>> len(donors)
1
>>> donors[0].GetAtomIds()
(3,)
>>> acceptors = d['Acceptor']
>>> len(acceptors)
2
>>> acceptors[0].GetAtomIds()
(0,)
>>> acceptors[1].GetAtomIds()
(3,)
"""
molFeats = {}
for feat in features:
family = feat.GetFamily()
if not molFeats.has_key(family):
matches = featFactory.GetFeaturesForMol(mol,includeOnly=family)
molFeats[family] = matches
return molFeats
def MatchFeatsToMol(mol, featFactory, features):
""" generates a list of all possible mappings of each feature to a molecule
Returns a 2-tuple:
1) a boolean indicating whether or not all features were found
2) a list, numFeatures long, of sequences of features
>>> import os.path
>>> dataDir = os.path.join(RDConfig.RDCodeDir,'Chem/Pharm3D/test_data')
>>> featFactory = ChemicalFeatures.BuildFeatureFactory(os.path.join(dataDir,'BaseFeatures.fdef'))
>>> activeFeats = [ChemicalFeatures.FreeChemicalFeature('Acceptor', Geometry.Point3D(0.0, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('Donor',Geometry.Point3D(0.0, 0.0, 0.0))]
>>> m = Chem.MolFromSmiles('FCCN')
>>> match,mList = MatchFeatsToMol(m,featFactory,activeFeats)
>>> match
True
Two feature types:
>>> len(mList)
2
The first feature type, Acceptor, has two matches:
>>> len(mList[0])
2
>>> mList[0][0].GetAtomIds()
(0,)
>>> mList[0][1].GetAtomIds()
(3,)
The first feature type, Donor, has a single match:
>>> len(mList[1])
1
>>> mList[1][0].GetAtomIds()
(3,)
"""
molFeats = _getFeatDict(mol,featFactory,features)
res = []
for feat in features:
matches = molFeats.get(feat.GetFamily(),[])
if len(matches) == 0 :
return False, None
res.append(matches)
return True, res
def CombiEnum(sequence):
""" This generator takes a sequence of sequences as an argument and
provides all combinations of the elements of the subsequences:
>>> gen = CombiEnum(((1,2),(10,20)))
>>> gen.next()
[1, 10]
>>> gen.next()
[1, 20]
>>> [x for x in CombiEnum(((1,2),(10,20)))]
[[1, 10], [1, 20], [2, 10], [2, 20]]
>>> [x for x in CombiEnum(((1,2),(10,20),(100,200)))]
[[1, 10, 100], [1, 10, 200], [1, 20, 100], [1, 20, 200], [2, 10, 100], [2, 10, 200], [2, 20, 100], [2, 20, 200]]
"""
if not len(sequence):
yield []
elif len(sequence)==1:
for entry in sequence[0]:
yield [entry]
else:
for entry in sequence[0]:
for subVal in CombiEnum(sequence[1:]):
yield [entry]+subVal
def DownsampleBoundsMatrix(bm,indices,maxThresh=4.0):
""" removes rows from a bounds matrix that are
that are greater than a threshold value away from a set of
other points
returns the modfied bounds matrix
The goal of this function is to remove rows from the bounds matrix
that correspond to atoms that are likely to be quite far from
the pharmacophore we're interested in. Because the bounds smoothing
we eventually have to do is N^3, this can be a big win
>>> boundsMat = numpy.array([[0.0,3.0,4.0],[2.0,0.0,3.0],[2.0,2.0,0.0]])
>>> bm = DownsampleBoundsMatrix(boundsMat,(0,),3.5)
>>> bm.shape
(2, 2)
we don't touch the input matrix:
>>> boundsMat.shape
(3, 3)
>>> print ', '.join(['%.3f'%x for x in bm[0]])
0.000, 3.000
>>> print ', '.join(['%.3f'%x for x in bm[1]])
2.000, 0.000
if the threshold is high enough, we don't do anything:
>>> boundsMat = numpy.array([[0.0,4.0,3.0],[2.0,0.0,3.0],[2.0,2.0,0.0]])
>>> bm = DownsampleBoundsMatrix(boundsMat,(0,),5.0)
>>> bm.shape
(3, 3)
If there's a max value that's close enough to *any* of the indices
we pass in, we'll keep it:
>>> boundsMat = numpy.array([[0.0,4.0,3.0],[2.0,0.0,3.0],[2.0,2.0,0.0]])
>>> bm = DownsampleBoundsMatrix(boundsMat,(0,1),3.5)
>>> bm.shape
(3, 3)
"""
nPts = bm.shape[0]
k = numpy.zeros(nPts,numpy.int0)
for idx in indices: k[idx]=1
for i in indices:
row = bm[i]
for j in range(i+1,nPts):
if not k[j] and row[j]<maxThresh:
k[j]=1
keep = numpy.nonzero(k)[0]
bm2 = numpy.zeros((len(keep),len(keep)),numpy.float)
for i,idx in enumerate(keep):
row = bm[idx]
bm2[i] = numpy.take(row,keep)
return bm2
def CoarseScreenPharmacophore(atomMatch,bounds,pcophore,verbose=False):
"""
>>> feats = [ChemicalFeatures.FreeChemicalFeature('HBondAcceptor', 'HAcceptor1', Geometry.Point3D(0.0, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('HBondDonor', 'HDonor1', Geometry.Point3D(2.65, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('Aromatic', 'Aromatic1', Geometry.Point3D(5.12, 0.908, 0.0)),
... ]
>>> pcophore=Pharmacophore.Pharmacophore(feats)
>>> pcophore.setLowerBound(0,1, 1.1)
>>> pcophore.setUpperBound(0,1, 1.9)
>>> pcophore.setLowerBound(0,2, 2.1)
>>> pcophore.setUpperBound(0,2, 2.9)
>>> pcophore.setLowerBound(1,2, 2.1)
>>> pcophore.setUpperBound(1,2, 3.9)
>>> bounds = numpy.array([[0,2,3],[1,0,4],[2,3,0]],numpy.float)
>>> CoarseScreenPharmacophore(((0,),(1,)),bounds,pcophore)
True
>>> CoarseScreenPharmacophore(((0,),(2,)),bounds,pcophore)
False
>>> CoarseScreenPharmacophore(((1,),(2,)),bounds,pcophore)
False
>>> CoarseScreenPharmacophore(((0,),(1,),(2,)),bounds,pcophore)
True
>>> CoarseScreenPharmacophore(((1,),(0,),(2,)),bounds,pcophore)
False
>>> CoarseScreenPharmacophore(((2,),(1,),(0,)),bounds,pcophore)
False
# we ignore the point locations here and just use their definitions:
>>> feats = [ChemicalFeatures.FreeChemicalFeature('HBondAcceptor', 'HAcceptor1', Geometry.Point3D(0.0, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('HBondDonor', 'HDonor1', Geometry.Point3D(2.65, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('Aromatic', 'Aromatic1', Geometry.Point3D(5.12, 0.908, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('HBondDonor', 'HDonor1', Geometry.Point3D(2.65, 0.0, 0.0)),
... ]
>>> pcophore=Pharmacophore.Pharmacophore(feats)
>>> pcophore.setLowerBound(0,1, 2.1)
>>> pcophore.setUpperBound(0,1, 2.9)
>>> pcophore.setLowerBound(0,2, 2.1)
>>> pcophore.setUpperBound(0,2, 2.9)
>>> pcophore.setLowerBound(0,3, 2.1)
>>> pcophore.setUpperBound(0,3, 2.9)
>>> pcophore.setLowerBound(1,2, 1.1)
>>> pcophore.setUpperBound(1,2, 1.9)
>>> pcophore.setLowerBound(1,3, 1.1)
>>> pcophore.setUpperBound(1,3, 1.9)
>>> pcophore.setLowerBound(2,3, 1.1)
>>> pcophore.setUpperBound(2,3, 1.9)
>>> bounds = numpy.array([[0,3,3,3],[2,0,2,2],[2,1,0,2],[2,1,1,0]],numpy.float)
>>> CoarseScreenPharmacophore(((0,),(1,),(2,),(3,)),bounds,pcophore)
True
>>> CoarseScreenPharmacophore(((0,),(1,),(3,),(2,)),bounds,pcophore)
True
>>> CoarseScreenPharmacophore(((1,),(0,),(3,),(2,)),bounds,pcophore)
False
"""
for k in range(len(atomMatch)):
if len(atomMatch[k])==1:
for l in range(k+1,len(atomMatch)):
if len(atomMatch[l])==1:
idx0 = atomMatch[k][0]
idx1 = atomMatch[l][0]
if idx1<idx0:
idx0,idx1=idx1,idx0
if bounds[idx1,idx0] >= pcophore.getUpperBound(k, l) or \
bounds[idx0,idx1] <= pcophore.getLowerBound(k, l) :
if verbose:
print '\t (%d,%d) [%d,%d] fail'%(idx1,idx0,k,l)
print '\t %f,%f - %f,%f'%(bounds[idx1,idx0],pcophore.getUpperBound(k,l),
bounds[idx0,idx1],pcophore.getLowerBound(k,l))
#logger.debug('\t >%s'%str(atomMatch))
#logger.debug()
#logger.debug('\t %f,%f - %f,%f'%(bounds[idx1,idx0],pcophore.getUpperBound(k,l),
# bounds[idx0,idx1],pcophore.getLowerBound(k,l)))
return False
return True
def Check2DBounds(atomMatch,mol,pcophore):
""" checks to see if a particular mapping of features onto
a molecule satisfies a pharmacophore's 2D restrictions
>>> activeFeats = [ChemicalFeatures.FreeChemicalFeature('Acceptor', Geometry.Point3D(0.0, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('Donor',Geometry.Point3D(0.0, 0.0, 0.0))]
>>> pcophore= Pharmacophore.Pharmacophore(activeFeats)
>>> pcophore.setUpperBound2D(0,1,3)
>>> m = Chem.MolFromSmiles('FCC(N)CN')
>>> Check2DBounds(((0,),(3,)),m,pcophore)
True
>>> Check2DBounds(((0,),(5,)),m,pcophore)
False
"""
dm = Chem.GetDistanceMatrix(mol,False,False,False)
nFeats = len(atomMatch)
for i in range(nFeats):
for j in range(i+1,nFeats):
lowerB = pcophore._boundsMat2D[j,i] #lowerB = pcophore.getLowerBound2D(i,j)
upperB = pcophore._boundsMat2D[i,j] #upperB = pcophore.getUpperBound2D(i,j)
dij=10000
for atomI in atomMatch[i]:
for atomJ in atomMatch[j]:
try:
dij = min(dij,dm[atomI,atomJ])
except IndexError:
print 'bad indices:',atomI,atomJ
print ' shape:',dm.shape
print ' match:',atomMatch
print ' mol:'
print Chem.MolToMolBlock(mol)
raise IndexError
if dij<lowerB or dij>upperB:
return False
return True
def _checkMatch(match,mol,bounds,pcophore,use2DLimits):
""" **INTERNAL USE ONLY**
checks whether a particular atom match can be satisfied by
a molecule
"""
atomMatch = ChemicalFeatures.GetAtomMatch(match)
if not atomMatch:
return None
elif use2DLimits:
if not Check2DBounds(atomMatch,mol,pcophore):
return None
if not CoarseScreenPharmacophore(atomMatch,bounds,pcophore):
return None
return atomMatch
def ConstrainedEnum(matches,mol,pcophore,bounds,use2DLimits=False,
index=0,soFar=[]):
""" Enumerates the list of atom mappings a molecule
has to a particular pharmacophore.
We do check distance bounds here.
"""
nMatches = len(matches)
if index>=nMatches:
yield soFar,[]
elif index==nMatches-1:
for entry in matches[index]:
nextStep = soFar+[entry]
if index != 0:
atomMatch = _checkMatch(nextStep,mol,bounds,pcophore,use2DLimits)
else:
atomMatch = ChemicalFeatures.GetAtomMatch(nextStep)
if atomMatch:
yield soFar+[entry],atomMatch
else:
for entry in matches[index]:
nextStep = soFar+[entry]
if index != 0:
atomMatch = _checkMatch(nextStep,mol,bounds,pcophore,use2DLimits)
if not atomMatch:
continue
for val in ConstrainedEnum(matches,mol,pcophore,bounds,use2DLimits=use2DLimits,
index=index+1,soFar=nextStep):
if val:
yield val
def MatchPharmacophore(matches,bounds,pcophore,useDownsampling=False,
use2DLimits=False,mol=None,excludedVolumes=None,
useDirs=False):
"""
if use2DLimits is set, the molecule must also be provided and topological
distances will also be used to filter out matches
"""
for match,atomMatch in ConstrainedEnum(matches,mol,pcophore,bounds,
use2DLimits=use2DLimits):
bm = bounds.copy()
bm = UpdatePharmacophoreBounds(bm,atomMatch,pcophore,useDirs=useDirs,mol=mol);
if excludedVolumes:
localEvs = []
for eV in excludedVolumes:
featInfo = []
for i,entry in enumerate(atomMatch):
info = list(eV.featInfo[i])
info[0] = entry
featInfo.append(info)
localEvs.append(ExcludedVolume.ExcludedVolume(featInfo,eV.index,
eV.exclusionDist))
bm = AddExcludedVolumes(bm,localEvs,smoothIt=False)
sz = bm.shape[0]
if useDownsampling:
indices = []
for entry in atomMatch:
indices.extend(entry)
if excludedVolumes:
for vol in localEvs:
indices.append(vol.index)
bm = DownsampleBoundsMatrix(bm,indices)
if DG.DoTriangleSmoothing(bm):
return 0,bm,match,(sz,bm.shape[0])
return 1,None,None,None
def GetAllPharmacophoreMatches(matches,bounds,pcophore,useDownsampling=0,
progressCallback=None,
use2DLimits=False,mol=None,
verbose=False):
res = []
nDone = 0
for match in CombiEnum(matches):
atomMatch = ChemicalFeatures.GetAtomMatch(match)
if atomMatch and use2DLimits and mol:
pass2D = Check2DBounds(atomMatch,mol,pcophore)
if verbose:
print '..',atomMatch
print ' ..Pass2d:',pass2D
else:
pass2D = True
if atomMatch and pass2D and \
CoarseScreenPharmacophore(atomMatch,bounds,pcophore,verbose=verbose):
if verbose:
print ' ..CoarseScreen: Pass'
bm = bounds.copy()
if verbose:
print 'pre update:'
for row in bm:
print ' ',' '.join(['% 4.2f'%x for x in row])
bm = UpdatePharmacophoreBounds(bm,atomMatch,pcophore);
sz = bm.shape[0]
if verbose:
print 'pre downsample:'
for row in bm:
print ' ',' '.join(['% 4.2f'%x for x in row])
if useDownsampling:
indices = []
for entry in atomMatch:
indices += list(entry)
bm = DownsampleBoundsMatrix(bm,indices)
if verbose:
print 'post downsample:'
for row in bm:
print ' ',' '.join(['% 4.2f'%x for x in row])
if DG.DoTriangleSmoothing(bm):
res.append(match)
elif verbose:
print 'cannot smooth'
nDone+=1
if progressCallback:
progressCallback(nDone)
return res
def ComputeChiralVolume(mol,centerIdx,confId=-1):
""" Computes the chiral volume of an atom
We're using the chiral volume formula from Figure 7 of
Blaney and Dixon, Rev. Comp. Chem. V, 299-335 (1994)
>>> import os.path
>>> dataDir = os.path.join(RDConfig.RDCodeDir,'Chem/Pharm3D/test_data')
R configuration atoms give negative volumes:
>>> mol = Chem.MolFromMolFile(os.path.join(dataDir,'mol-r.mol'))
>>> Chem.AssignStereochemistry(mol)
>>> mol.GetAtomWithIdx(1).GetProp('_CIPCode')
'R'
>>> ComputeChiralVolume(mol,1) < 0
True
S configuration atoms give positive volumes:
>>> mol = Chem.MolFromMolFile(os.path.join(dataDir,'mol-s.mol'))
>>> Chem.AssignStereochemistry(mol)
>>> mol.GetAtomWithIdx(1).GetProp('_CIPCode')
'S'
>>> ComputeChiralVolume(mol,1) > 0
True
Non-chiral (or non-specified) atoms give zero volume:
>>> ComputeChiralVolume(mol,0) == 0.0
True
We also work on 3-coordinate atoms (with implicit Hs):
>>> mol = Chem.MolFromMolFile(os.path.join(dataDir,'mol-r-3.mol'))
>>> Chem.AssignStereochemistry(mol)
>>> mol.GetAtomWithIdx(1).GetProp('_CIPCode')
'R'
>>> ComputeChiralVolume(mol,1)<0
True
>>> mol = Chem.MolFromMolFile(os.path.join(dataDir,'mol-s-3.mol'))
>>> Chem.AssignStereochemistry(mol)
>>> mol.GetAtomWithIdx(1).GetProp('_CIPCode')
'S'
>>> ComputeChiralVolume(mol,1)>0
True
"""
conf = mol.GetConformer(confId)
Chem.AssignStereochemistry(mol)
center = mol.GetAtomWithIdx(centerIdx)
if not center.HasProp('_CIPCode'):
return 0.0
nbrs = center.GetNeighbors()
nbrRanks = []
for nbr in nbrs:
rank = int(nbr.GetProp('_CIPRank'))
pos = conf.GetAtomPosition(nbr.GetIdx())
nbrRanks.append((rank,pos))
# if we only have three neighbors (i.e. the determining H isn't present)
# then use the central atom as the fourth point:
if len(nbrRanks)==3:
nbrRanks.append((-1,conf.GetAtomPosition(centerIdx)))
nbrRanks.sort()
ps = [x[1] for x in nbrRanks]
v1 = ps[0]-ps[3]
v2 = ps[1]-ps[3]
v3 = ps[2]-ps[3]
res = v1.DotProduct(v2.CrossProduct(v3))
return res
#------------------------------------
#
# doctest boilerplate
#
def _test():
import doctest,sys
return doctest.testmod(sys.modules["__main__"])
if __name__ == '__main__':
import sys
failed,tried = _test()
sys.exit(failed)
| {
"repo_name": "rdkit/rdkit-orig",
"path": "rdkit/Chem/Pharm3D/EmbedLib.py",
"copies": "1",
"size": "38529",
"license": "bsd-3-clause",
"hash": -8608010837664877000,
"line_mean": 30.3754071661,
"line_max": 130,
"alpha_frac": 0.621194425,
"autogenerated": false,
"ratio": 3.030438886267107,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4151633311267107,
"avg_score": null,
"num_lines": null
} |
""" uses pymol to interact with molecules
"""
from rdkit import Chem
import os, tempfile
# Python3 compatibility
try:
from xmlrpclib import Server
except ImportError:
from xmlrpc.client import Server
_server=None
class MolViewer(object):
def __init__(self,host=None,port=9123,force=0,**kwargs):
global _server
if not force and _server is not None:
self.server=_server
else:
if not host:
host=os.environ.get('PYMOL_RPCHOST','localhost')
_server=None
serv = Server('http://%s:%d'%(host,port))
serv.ping()
_server = serv
self.server=serv
self.InitializePyMol()
def InitializePyMol(self):
""" does some initializations to set up PyMol according to our
tastes
"""
self.server.do('set valence,1')
self.server.do('set stick_rad,0.15')
self.server.do('set mouse_selection_mode,0')
self.server.do('set line_width,2')
self.server.do('set selection_width,10')
self.server.do('set auto_zoom,0')
def DeleteAll(self):
" blows out everything in the viewer "
self.server.deleteAll()
def DeleteAllExcept(self,excludes):
" deletes everything except the items in the provided list of arguments "
allNames = self.server.getNames('*',False)
for nm in allNames:
if nm not in excludes:
self.server.deleteObject(nm)
def LoadFile(self,filename,name,showOnly=False):
""" calls pymol's "load" command on the given filename; the loaded object
is assigned the name "name"
"""
if showOnly:
self.DeleteAll()
id = self.server.loadFile(filename,name)
return id
def ShowMol(self,mol,name='molecule',showOnly=True,highlightFeatures=[],
molB="",confId=-1,zoom=True,forcePDB=False, showSticks=False):
""" special case for displaying a molecule or mol block """
server = self.server
if not zoom:
self.server.do('view rdinterface,store')
if showOnly:
self.DeleteAll()
if not forcePDB and mol.GetNumAtoms()<999 :
if not molB:
molB = Chem.MolToMolBlock(mol,confId=confId)
mid = server.loadMolBlock(molB,name)
else:
if not molB:
molB = Chem.MolToPDBBlock(mol,confId=confId)
mid = server.loadPDB(molB,name)
if highlightFeatures:
nm = name+'-features'
conf = mol.GetConformer(confId)
for feat in highlightFeatures:
pt = [0.0,0.0,0.0]
for idx in feat:
loc = conf.GetAtomPosition(idx)
pt[0] += loc[0]/len(feat)
pt[1] += loc[1]/len(feat)
pt[2] += loc[2]/len(feat)
server.sphere(pt,0.2,(1,1,1),nm)
if zoom:
server.zoom('visible')
else:
self.server.do('view rdinterface,recall')
if showSticks: # show molecule in stick view
self.server.do('show sticks, {}'.format(name))
return mid
def GetSelectedAtoms(self,whichSelection=None):
" returns the selected atoms "
if not whichSelection:
sels = self.server.getNames('selections')
if sels:
whichSelection = sels[-1]
else:
whichSelection=None
if whichSelection:
items = self.server.index(whichSelection)
else:
items = []
return items
def SelectAtoms(self,itemId,atomIndices,selName='selection'):
" selects a set of atoms "
ids = '(id '
ids += ','.join(['%d'%(x+1) for x in atomIndices])
ids += ')'
cmd = 'select %s,%s and %s'%(selName,ids,itemId)
self.server.do(cmd)
def HighlightAtoms(self,indices,where,extraHighlight=False):
" highlights a set of atoms "
if extraHighlight:
idxText = ','.join(['%s and (id %d)'%(where,x) for x in indices])
self.server.do('edit %s'%idxText)
else:
idxText = ' or '.join(['id %d'%x for x in indices])
self.server.do('select selection, %s and (%s)'%(where,idxText))
def SetDisplayStyle(self,obj,style=''):
" change the display style of the specified object "
self.server.do('hide everything,%s'%(obj,))
if style:
self.server.do('show %s,%s'%(style,obj))
def SelectProteinNeighborhood(self,aroundObj,inObj,distance=5.0,
name='neighborhood',showSurface=False):
""" selects the area of a protein around a specified object/selection name;
optionally adds a surface to that """
self.server.do('select %(name)s,byres (%(aroundObj)s around %(distance)f) and %(inObj)s'%locals())
if showSurface:
self.server.do('show surface,%s'%name)
self.server.do('disable %s'%name)
def AddPharmacophore(self,locs,colors,label,sphereRad=0.5):
" adds a set of spheres "
self.server.do('view rdinterface,store')
self.server.resetCGO(label)
for i,loc in enumerate(locs):
self.server.sphere(loc,sphereRad,colors[i],label,1)
self.server.do('enable %s'%label)
self.server.do('view rdinterface,recall')
def SetDisplayUpdate(self,val):
if not val:
self.server.do('set defer_update,1')
else:
self.server.do('set defer_update,0')
def GetAtomCoords(self,sels):
" returns the coordinates of the selected atoms "
res = {}
for label,idx in sels:
coords = self.server.getAtomCoords('(%s and id %d)'%(label,idx))
res[(label,idx)] = coords
return res
def HideAll(self):
self.server.do('disable all')
def HideObject(self,objName):
self.server.do('disable %s'%objName)
def DisplayObject(self,objName):
self.server.do('enable %s'%objName)
def Redraw(self):
self.server.do('refresh')
def Zoom(self,objName):
self.server.zoom(objName)
def DisplayHBonds(self,objName,molName,proteinName,
molSelText='(%(molName)s)',
proteinSelText='(%(proteinName)s and not het)'):
" toggles display of h bonds between the protein and a specified molecule "
cmd = "delete %(objName)s;\n"
cmd += "dist %(objName)s," + molSelText+","+proteinSelText+",mode=2;\n"
cmd += "enable %(objName)s;"
cmd = cmd%locals()
self.server.do(cmd)
def DisplayCollisions(self,objName,molName,proteinName,distCutoff=3.0,
color='red',
molSelText='(%(molName)s)',
proteinSelText='(%(proteinName)s and not het)'):
" toggles display of collisions between the protein and a specified molecule "
cmd = "delete %(objName)s;\n"
cmd += "dist %(objName)s," + molSelText+","+proteinSelText+",%(distCutoff)f,mode=0;\n"
cmd += """enable %(objName)s
color %(color)s, %(objName)s"""
cmd = cmd%locals()
self.server.do(cmd)
def GetPNG(self,h=None,w=None,preDelay=0):
try:
import Image
except ImportError:
from PIL import Image
import time
if preDelay>0:
time.sleep(preDelay)
fd = tempfile.NamedTemporaryFile(suffix='.png',delete=False)
fd.close()
self.server.do('png %s'%fd.name)
time.sleep(0.2) # <- wait a short period so that PyMol can finish
for i in range(10):
try:
img = Image.open(fd.name)
break
except IOError:
time.sleep(0.1)
os.unlink(fd.name)
fd=None
if h is not None or w is not None:
sz = img.size
if h is None:
h=sz[1]
if w is None:
w=sz[0]
if h<sz[1]:
frac = float(h)/sz[1]
w *= frac
w = int(w)
img=img.resize((w,h),True)
elif w<sz[0]:
frac = float(w)/sz[0]
h *= frac
h = int(h)
img=img.resize((w,h),True)
return img
| {
"repo_name": "soerendip42/rdkit",
"path": "rdkit/Chem/PyMol.py",
"copies": "2",
"size": "7796",
"license": "bsd-3-clause",
"hash": -5112879296776743000,
"line_mean": 29.5725490196,
"line_max": 102,
"alpha_frac": 0.6222421755,
"autogenerated": false,
"ratio": 3.3487972508591066,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4971039426359107,
"avg_score": null,
"num_lines": null
} |
""" uses pymol to interact with molecules
"""
from rdkit import Chem
import os, tempfile, sys
# Python3 compatibility
try:
from xmlrpclib import Server
except ImportError:
from xmlrpc.client import Server
_server=None
class MolViewer(object):
def __init__(self,host=None,port=9123,force=0,**kwargs):
global _server
if not force and _server is not None:
self.server=_server
else:
if not host:
host=os.environ.get('PYMOL_RPCHOST','localhost')
_server=None
serv = Server('http://%s:%d'%(host,port))
serv.ping()
_server = serv
self.server=serv
self.InitializePyMol()
def InitializePyMol(self):
""" does some initializations to set up PyMol according to our
tastes
"""
self.server.do('set valence,1')
self.server.do('set stick_rad,0.15')
self.server.do('set mouse_selection_mode,0')
self.server.do('set line_width,2')
self.server.do('set selection_width,10')
self.server.do('set auto_zoom,0')
def DeleteAll(self):
" blows out everything in the viewer "
self.server.deleteAll()
def DeleteAllExcept(self,excludes):
" deletes everything except the items in the provided list of arguments "
allNames = self.server.getNames('*',False)
for nm in allNames:
if nm not in excludes:
self.server.deleteObject(nm)
def LoadFile(self,filename,name,showOnly=False):
""" calls pymol's "load" command on the given filename; the loaded object
is assigned the name "name"
"""
if showOnly:
self.DeleteAll()
id = self.server.loadFile(filename,name)
return id
def ShowMol(self,mol,name='molecule',showOnly=True,highlightFeatures=[],
molB="",confId=-1,zoom=True,forcePDB=False, showSticks=False):
""" special case for displaying a molecule or mol block """
server = self.server
if not zoom:
self.server.do('view rdinterface,store')
if showOnly:
self.DeleteAll()
if not forcePDB and mol.GetNumAtoms()<999 :
if not molB:
molB = Chem.MolToMolBlock(mol,confId=confId)
mid = server.loadMolBlock(molB,name)
else:
if not molB:
molB = Chem.MolToPDBBlock(mol,confId=confId)
mid = server.loadPDB(molB,name)
if highlightFeatures:
nm = name+'-features'
conf = mol.GetConformer(confId)
for feat in highlightFeatures:
pt = [0.0,0.0,0.0]
for idx in feat:
loc = conf.GetAtomPosition(idx)
pt[0] += loc[0]/len(feat)
pt[1] += loc[1]/len(feat)
pt[2] += loc[2]/len(feat)
server.sphere(pt,0.2,(1,1,1),nm)
if zoom:
server.zoom('visible')
else:
self.server.do('view rdinterface,recall')
if showSticks: # show molecule in stick view
self.server.do('show sticks, {}'.format(name))
return mid
def GetSelectedAtoms(self,whichSelection=None):
" returns the selected atoms "
if not whichSelection:
sels = self.server.getNames('selections')
if sels:
whichSelection = sels[-1]
else:
whichSelection=None
if whichSelection:
items = self.server.index(whichSelection)
else:
items = []
return items
def SelectAtoms(self,itemId,atomIndices,selName='selection'):
" selects a set of atoms "
ids = '(id '
ids += ','.join(['%d'%(x+1) for x in atomIndices])
ids += ')'
cmd = 'select %s,%s and %s'%(selName,ids,itemId)
self.server.do(cmd)
def HighlightAtoms(self,indices,where,extraHighlight=False):
" highlights a set of atoms "
if extraHighlight:
idxText = ','.join(['%s and (id %d)'%(where,x) for x in indices])
self.server.do('edit %s'%idxText)
else:
idxText = ' or '.join(['id %d'%x for x in indices])
self.server.do('select selection, %s and (%s)'%(where,idxText))
def SetDisplayStyle(self,obj,style=''):
" change the display style of the specified object "
self.server.do('hide everything,%s'%(obj,))
if style:
self.server.do('show %s,%s'%(style,obj))
def SelectProteinNeighborhood(self,aroundObj,inObj,distance=5.0,
name='neighborhood',showSurface=False):
""" selects the area of a protein around a specified object/selection name;
optionally adds a surface to that """
self.server.do('select %(name)s,byres (%(aroundObj)s around %(distance)f) and %(inObj)s'%locals())
if showSurface:
self.server.do('show surface,%s'%name)
self.server.do('disable %s'%name)
def AddPharmacophore(self,locs,colors,label,sphereRad=0.5):
" adds a set of spheres "
self.server.do('view rdinterface,store')
self.server.resetCGO(label)
for i,loc in enumerate(locs):
self.server.sphere(loc,sphereRad,colors[i],label,1)
self.server.do('enable %s'%label)
self.server.do('view rdinterface,recall')
def SetDisplayUpdate(self,val):
if not val:
self.server.do('set defer_update,1')
else:
self.server.do('set defer_update,0')
def GetAtomCoords(self,sels):
" returns the coordinates of the selected atoms "
res = {}
for label,idx in sels:
coords = self.server.getAtomCoords('(%s and id %d)'%(label,idx))
res[(label,idx)] = coords
return res
def HideAll(self):
self.server.do('disable all')
def HideObject(self,objName):
self.server.do('disable %s'%objName)
def DisplayObject(self,objName):
self.server.do('enable %s'%objName)
def Redraw(self):
self.server.do('refresh')
def Zoom(self,objName):
self.server.zoom(objName)
def DisplayHBonds(self,objName,molName,proteinName,
molSelText='(%(molName)s)',
proteinSelText='(%(proteinName)s and not het)'):
" toggles display of h bonds between the protein and a specified molecule "
cmd = "delete %(objName)s;\n"
cmd += "dist %(objName)s," + molSelText+","+proteinSelText+",mode=2;\n"
cmd += "enable %(objName)s;"
cmd = cmd%locals()
self.server.do(cmd)
def DisplayCollisions(self,objName,molName,proteinName,distCutoff=3.0,
color='red',
molSelText='(%(molName)s)',
proteinSelText='(%(proteinName)s and not het)'):
" toggles display of collisions between the protein and a specified molecule "
cmd = "delete %(objName)s;\n"
cmd += "dist %(objName)s," + molSelText+","+proteinSelText+",%(distCutoff)f,mode=0;\n"
cmd += """enable %(objName)s
color %(color)s, %(objName)s"""
cmd = cmd%locals()
self.server.do(cmd)
def SaveFile(self,filename):
# PyMol will interpret the path to be relative to where it was started
# from. Remedy that.
if not filename:
raise ValueError('empty filename')
filename = os.path.abspath(filename)
self.server.save(filename)
def GetPNG(self,h=None,w=None,preDelay=0):
try:
import Image
except ImportError:
from PIL import Image
import time
if preDelay>0:
time.sleep(preDelay)
fd = tempfile.NamedTemporaryFile(suffix='.png',delete=False)
fd.close()
self.server.do('png %s'%fd.name)
time.sleep(0.2) # <- wait a short period so that PyMol can finish
for i in range(10):
try:
img = Image.open(fd.name)
break
except IOError:
time.sleep(0.1)
try:
os.unlink(fd.name)
except (OSError,PermissionError):
# happens sometimes on Windows. Not going to worry about this too deeply since
# the files are in a temp dir anyway. This was github #936
pass
fd=None
if h is not None or w is not None:
sz = img.size
if h is None:
h=sz[1]
if w is None:
w=sz[0]
if h<sz[1]:
frac = float(h)/sz[1]
w *= frac
w = int(w)
img=img.resize((w,h),True)
elif w<sz[0]:
frac = float(w)/sz[0]
h *= frac
h = int(h)
img=img.resize((w,h),True)
return img
| {
"repo_name": "adalke/rdkit",
"path": "rdkit/Chem/PyMol.py",
"copies": "1",
"size": "8287",
"license": "bsd-3-clause",
"hash": -2759578170375097300,
"line_mean": 29.921641791,
"line_max": 102,
"alpha_frac": 0.6250754193,
"autogenerated": false,
"ratio": 3.3921408104789195,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.940957314131631,
"avg_score": 0.021528617692522144,
"num_lines": 268
} |
""" uses pymol to interact with molecules
"""
from rdkit import Chem
import xmlrpclib,os,tempfile
_server=None
class MolViewer(object):
def __init__(self,host=None,port=9123,force=0,**kwargs):
global _server
if not force and _server is not None:
self.server=_server
else:
if not host:
host=os.environ.get('PYMOL_RPCHOST','localhost')
_server=None
serv = xmlrpclib.Server('http://%s:%d'%(host,port))
serv.ping()
_server = serv
self.server=serv
self.InitializePyMol()
def InitializePyMol(self):
""" does some initializations to set up PyMol according to our
tastes
"""
self.server.do('set valence,1')
self.server.do('set stick_rad,0.15')
self.server.do('set mouse_selection_mode,0')
self.server.do('set line_width,2')
self.server.do('set selection_width,10')
self.server.do('set auto_zoom,0')
def DeleteAll(self):
" blows out everything in the viewer "
self.server.deleteAll()
def DeleteAllExcept(self,excludes):
" deletes everything except the items in the provided list of arguments "
allNames = self.server.getNames('*',False)
for nm in allNames:
if nm not in excludes:
self.server.deleteObject(nm)
def LoadFile(self,filename,name,showOnly=False):
""" calls pymol's "load" command on the given filename; the loaded object
is assigned the name "name"
"""
if showOnly:
self.DeleteAll()
id = self.server.loadFile(filename,name)
return id
def ShowMol(self,mol,name='molecule',showOnly=True,highlightFeatures=[],
molB="",confId=-1,zoom=True,forcePDB=False):
""" special case for displaying a molecule or mol block """
server = self.server
if not zoom:
self.server.do('view rdinterface,store')
if showOnly:
self.DeleteAll()
if not forcePDB and mol.GetNumAtoms()<999 :
if not molB:
molB = Chem.MolToMolBlock(mol,confId=confId)
mid = server.loadMolBlock(molB,name)
else:
if not molB:
molB = Chem.MolToPDBBlock(mol,confId=confId)
mid = server.loadPDB(molB,name)
if highlightFeatures:
nm = name+'-features'
conf = mol.GetConformer(confId)
for feat in highlightFeatures:
pt = [0.0,0.0,0.0]
for idx in feat:
loc = conf.GetAtomPosition(idx)
pt[0] += loc[0]/len(feat)
pt[1] += loc[1]/len(feat)
pt[2] += loc[2]/len(feat)
server.sphere(pt,0.2,(1,1,1),nm)
if zoom:
server.zoom('visible')
else:
self.server.do('view rdinterface,recall')
return mid
def GetSelectedAtoms(self,whichSelection=None):
" returns the selected atoms "
if not whichSelection:
sels = self.server.getNames('selections')
if sels:
whichSelection = sels[-1]
else:
whichSelection=None
if whichSelection:
items = self.server.index(whichSelection)
else:
items = []
return items
def SelectAtoms(self,itemId,atomIndices,selName='selection'):
" selects a set of atoms "
ids = '(id '
ids += ','.join(['%d'%(x+1) for x in atomIndices])
ids += ')'
cmd = 'select %s,%s and %s'%(selName,ids,itemId)
self.server.do(cmd)
def HighlightAtoms(self,indices,where,extraHighlight=False):
" highlights a set of atoms "
if extraHighlight:
idxText = ','.join(['%s and (id %d)'%(where,x) for x in indices])
self.server.do('edit %s'%idxText)
else:
idxText = ' or '.join(['id %d'%x for x in indices])
self.server.do('select selection, %s and (%s)'%(where,idxText))
def SetDisplayStyle(self,obj,style=''):
" change the display style of the specified object "
self.server.do('hide everything,%s'%(obj,))
if style:
self.server.do('show %s,%s'%(style,obj))
def SelectProteinNeighborhood(self,aroundObj,inObj,distance=5.0,
name='neighborhood',showSurface=False):
""" selects the area of a protein around a specified object/selection name;
optionally adds a surface to that """
self.server.do('select %(name)s,byres (%(aroundObj)s around %(distance)f) and %(inObj)s'%locals())
if showSurface:
self.server.do('show surface,%s'%name)
self.server.do('disable %s'%name)
def AddPharmacophore(self,locs,colors,label,sphereRad=0.5):
" adds a set of spheres "
self.server.do('view rdinterface,store')
self.server.resetCGO(label)
for i,loc in enumerate(locs):
self.server.sphere(loc,sphereRad,colors[i],label,1)
self.server.do('enable %s'%label)
self.server.do('view rdinterface,recall')
def SetDisplayUpdate(self,val):
if not val:
self.server.do('set defer_update,1')
else:
self.server.do('set defer_update,0')
def GetAtomCoords(self,sels):
" returns the coordinates of the selected atoms "
res = {}
for label,idx in sels:
coords = self.server.getAtomCoords('(%s and id %d)'%(label,idx))
res[(label,idx)] = coords
return res
def HideAll(self):
self.server.do('disable all')
def HideObject(self,objName):
self.server.do('disable %s'%objName)
def DisplayObject(self,objName):
self.server.do('enable %s'%objName)
def Redraw(self):
self.server.do('refresh')
def Zoom(self,objName):
self.server.zoom(objName)
def DisplayHBonds(self,objName,molName,proteinName,
molSelText='(%(molName)s)',
proteinSelText='(%(proteinName)s and not het)'):
" toggles display of h bonds between the protein and a specified molecule "
cmd = "delete %(objName)s;\n"
cmd += "dist %(objName)s," + molSelText+","+proteinSelText+",mode=2;\n"
cmd += "enable %(objName)s;"
cmd = cmd%locals()
self.server.do(cmd)
def DisplayCollisions(self,objName,molName,proteinName,distCutoff=3.0,
color='red',
molSelText='(%(molName)s)',
proteinSelText='(%(proteinName)s and not het)'):
" toggles display of collisions between the protein and a specified molecule "
cmd = "delete %(objName)s;\n"
cmd += "dist %(objName)s," + molSelText+","+proteinSelText+",%(distCutoff)f,mode=0;\n"
cmd += """enable %(objName)s
color %(color)s, %(objName)s"""
cmd = cmd%locals()
self.server.do(cmd)
def GetPNG(self,h=None,w=None,preDelay=0):
try:
import Image
except ImportError:
from PIL import Image
import time
if preDelay>0:
time.sleep(preDelay)
fd = tempfile.NamedTemporaryFile(suffix='.png',delete=False)
fd.close()
self.server.do('png %s'%fd.name)
time.sleep(0.2) # <- wait a short period so that PyMol can finish
for i in range(10):
try:
img = Image.open(fd.name)
break
except IOError:
time.sleep(0.1)
os.unlink(fd.name)
fd=None
if h is not None or w is not None:
sz = img.size
if h is None:
h=sz[1]
if w is None:
w=sz[0]
if h<sz[1]:
frac = float(h)/sz[1]
w *= frac
w = int(w)
img=img.resize((w,h),True)
elif w<sz[0]:
frac = float(w)/sz[0]
h *= frac
h = int(h)
img=img.resize((w,h),True)
return img
| {
"repo_name": "AlexanderSavelyev/rdkit",
"path": "rdkit/Chem/PyMol.py",
"copies": "1",
"size": "7617",
"license": "bsd-3-clause",
"hash": -6033795344289171000,
"line_mean": 29.8380566802,
"line_max": 102,
"alpha_frac": 0.616253118,
"autogenerated": false,
"ratio": 3.342255375164546,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9309636988105882,
"avg_score": 0.029774301011732854,
"num_lines": 247
} |
from __future__ import print_function
from rdkit import RDConfig
import sys, os.path
from rdkit.VLib.Supply import SupplyNode
from rdkit.six.moves import cPickle
if RDConfig.usePgSQL:
from pyPgSQL import PgSQL as sql
class _lazyDataSeq:
"""
These classes are used to speed up (a lot) the process of
pulling pickled objects from PostgreSQL databases. Instead of
having to use all of PgSQL's typechecking, we'll make a lot of
assumptions about what's coming out of the Db and its layout.
The results can lead to drastic improvements in perfomance.
"""
def __init__(self, cursor, cmd, pickleCol=1, depickle=1, klass=None):
self.cursor = cursor
self.cmd = cmd
self._first = 0
self._pickleCol = pickleCol
self._depickle = depickle
self._klass = klass
def _validate(self):
curs = self.cursor
if not curs or \
curs.closed or \
curs.conn is None or \
(curs.res.resultType != sql.RESULT_DQL and curs.closed is None):
raise ValueError('bad cursor')
if curs.res.nfields and curs.res.nfields < 2:
raise ValueError('invalid number of results returned (%d), must be at least 2' %
curs.res.nfields)
desc1 = curs.description[self._pickleCol]
ftv = desc1[self._pickleCol].value
if ftv != sql.BINARY:
raise TypeError('pickle column (%d) of bad type' % self._pickleCol)
def __iter__(self):
try:
self.cursor.execute(self.cmd)
except Exception:
import traceback
traceback.print_exc()
print('COMMAND:', self.cmd)
raise
self._first = 1
self._validate()
return self
def next(self):
curs = self.cursor
if not curs or \
curs.closed or \
curs.conn is None or \
curs.res is None or \
(curs.res.resultType != sql.RESULT_DQL and curs.closed is None):
raise StopIteration
if not self._first:
res = curs.conn.conn.query('fetch 1 from "%s"' % self.cursor.name)
if res.ntuples == 0:
raise StopIteration
else:
if res.nfields < 2:
raise ValueError('bad result: %s' % str(res))
t = [res.getvalue(0, x) for x in range(res.nfields)]
val = t[self._pickleCol]
else:
t = curs.fetchone()
val = str(t[self._pickleCol])
self._first = 0
if self._depickle:
if not self._klass:
fp = cPickle.loads(val)
else:
fp = self._klass(val)
fields = list(t)
del fields[self._pickleCol]
fp._fieldsFromDb = fields
else:
fp = list(t)
return fp
class _dataSeq(_lazyDataSeq):
def __init__(self, cursor, cmd, pickleCol=1, depickle=1):
self.cursor = cursor
self.cmd = cmd
self.res = None
self.rowCount = -1
self.idx = 0
self._pickleCol = pickleCol
self._depickle = depickle
def __iter__(self):
self.cursor.execute(self.cmd)
self._first = self.cursor.fetchone()
self._validate()
self.res = self.cursor.conn.conn.query('fetch all from "%s"' % self.cursor.name)
self.rowCount = self.res.ntuples + 1
self.idx = 0
if self.res.nfields < 2:
raise ValueError('bad query result' % str(res))
return self
def next(self):
if self.idx >= self.rowCount:
raise StopIteration
fp = self[self.idx]
self.idx += 1
return fp
def __len__(self):
return self.rowCount
def __getitem__(self, idx):
if self.res is None:
self.cursor.execute(self.cmd)
self._first = self.cursor.fetchone()
self._validate()
self.res = self.cursor.conn.conn.query('fetch all from "%s"' % self.cursor.name)
self.rowCount = self.res.ntuples + 1
self.idx = 0
if self.res.nfields < 2:
raise ValueError('bad query result' % str(res))
if idx < 0:
idx = self.rowCount + idx
if idx < 0 or (idx >= 0 and idx >= self.rowCount):
raise IndexError
if idx == 0:
val = str(self._first[self._pickleCol])
t = list(self._first)
else:
val = self.res.getvalue(self.idx - 1, self._pickleCol)
t = [self.res.getvalue(self.idx - 1, x) for x in range(self.res.nfields)]
if self._depickle:
try:
fp = cPickle.loads(val)
except Exception:
import logging
del t[self._pickleCol]
logging.exception('Depickling failure in row: %s' % str(t))
raise
del t[self._pickleCol]
fp._fieldsFromDb = t
else:
fp = t
return fp
else:
_dataSeq = None
class DbPickleSupplyNode(SupplyNode):
""" Supplies pickled objects from a db result set:
Sample Usage:
>>> from rdkit.Dbase.DbConnection import DbConnect
"""
def __init__(self, cursor, cmd, binaryCol, **kwargs):
SupplyNode.__init__(self, **kwargs)
self._dbResults = dbResults
self._supplier = DbMolSupplier.RandomAccessDbMolSupplier(self._dbResults, **kwargs)
def reset(self):
SupplyNode.reset(self)
self._supplier.Reset()
def next(self):
"""
"""
return self._supplier.next()
def GetNode(dbName, tableName):
from rdkit.Dbase.DbConnection import DbConnect
conn = DbConnect(dbName, tableName)
return DbMolSupplyNode(conn.GetData())
#------------------------------------
#
# doctest boilerplate
#
def _test():
import doctest, sys
return doctest.testmod(sys.modules["__main__"])
if __name__ == '__main__':
import sys
failed, tried = _test()
sys.exit(failed)
| {
"repo_name": "rvianello/rdkit",
"path": "rdkit/VLib/NodeLib/DbPickleSupplier.py",
"copies": "5",
"size": "5791",
"license": "bsd-3-clause",
"hash": -1577467328491821800,
"line_mean": 26.7081339713,
"line_max": 88,
"alpha_frac": 0.5862545329,
"autogenerated": false,
"ratio": 3.6932397959183674,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6779494328818367,
"avg_score": null,
"num_lines": null
} |
from rdkit import RDConfig
import os,sys
import unittest
from rdkit import Chem
from rdkit.Geometry import Point3D
def feq(v1,v2,tol2=1e-4):
return abs(v1-v2)<=tol2
def ptEq(pt1, pt2, tol=1e-4):
return feq(pt1.x,pt2.x,tol) and feq(pt1.y,pt2.y,tol) and feq(pt1.z,pt2.z,tol)
def addConf(mol):
conf = Chem.Conformer(mol.GetNumAtoms())
for i in range(mol.GetNumAtoms()):
conf.SetAtomPosition(i,(0.,0.,0.))
mol.AddConformer(conf)
mb = Chem.MolToMolBlock(mol)
mb = Chem.MolToMolBlock(mol)
class TestCase(unittest.TestCase):
def setUp(self):
pass
def test0Conformers(self) :
"""Test the conformer data structure"""
mol = Chem.MolFromSmiles("CC")
conf = Chem.Conformer(2)
conf.SetAtomPosition(0, (-0.5, 0.0, 0.0))
conf.SetAtomPosition(1, (1.0, 0.0, 0.0))
conf.SetId(0)
cid = mol.AddConformer(conf)
self.failUnless(cid == 0)
conf2 = mol.GetConformer(0)
self.failUnless(conf2.GetId() == cid)
pt1 = conf2.GetAtomPosition(0)
self.failUnless(ptEq(pt1, Point3D(-0.5, 0.0, 0.0)))
pt2 = conf2.GetAtomPosition(1)
self.failUnless(ptEq(pt2, Point3D(1.0, 0.0, 0.0)))
#changing conf should not change conf2 - related to issue 217
conf.SetAtomPosition(1, Point3D(2.0, 0.0, 0.0))
pt2 = conf2.GetAtomPosition(1)
self.failUnless(feq(pt2[0], 1.0))
conf = Chem.Conformer(2)
conf.SetAtomPosition(0, Point3D(-0.5, 0.0, 0.0))
conf.SetAtomPosition(1, Point3D(1.0, 0.0, 0.0))
conf.SetId(2)
cid = mol.AddConformer(conf, 0)
self.failUnless(cid == 2)
conf3 = mol.GetConformer(2)
def test0AddHds(self) :
mol = Chem.MolFromSmiles("CC")
conf = Chem.Conformer(1)
conf.SetAtomPosition(0, Point3D(-0.5, 0.0, 0.0))
conf.SetAtomPosition(1, Point3D(1.0, 0.0, 0.0))
cid = mol.AddConformer(conf)
conf2 = mol.GetConformer()
self.failUnless(conf2.GetNumAtoms() == 2)
nmol = Chem.AddHs(mol, 0,1)
conf3 = nmol.GetConformer()
self.failUnless(conf3.GetNumAtoms() == 8)
self.failUnless(conf2.GetNumAtoms() == 2)
targetCoords = [[-0.5, 0.0, 0.0],
[1.0, 0.0, 0.0],
[-0.8667, 0.0, 1.03709],
[-0.8667, 0.8981, -0.5185],
[-0.8667, -0.8981, -0.5185],
[1.3667, 0.0, -1.0371],
[1.36667, 0.8981, 0.5185],
[1.36667, -0.8981, 0.5185]]
for i in range(8) :
pt = conf3.GetAtomPosition(i)
self.failUnless(ptEq(pt, apply(Point3D,tuple(targetCoords[i]))))
def test2Issue217(self) :
smi = 'c1ccccc1'
m = Chem.MolFromSmiles(smi)
addConf(m)
self.failUnless(m.GetNumConformers()==1);
mb2 = Chem.MolToMolBlock(m)
def test3Exceptions(self) :
smi = 'c1ccccc1'
m = Chem.MolFromSmiles(smi)
addConf(m)
self.failUnless(m.GetNumConformers()==1)
self.failUnlessRaises(ValueError,lambda:m.GetConformer(2))
def test4ConfTuple(self):
smi = 'c1ccccc1'
m = Chem.MolFromSmiles(smi)
for i in range(10):
addConf(m)
confs = m.GetConformers()
self.failUnless(len(confs) == 10)
for conf in confs:
for i in range(6):
pt = conf.GetAtomPosition(i)
self.failUnless(ptEq(pt, Point3D(0.0, 0.0, 0.0)))
m.RemoveAllConformers()
self.failUnless(m.GetNumConformers() == 0)
confs = m.GetConformers()
self.failUnless(confs == ())
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "rdkit/rdkit-orig",
"path": "Code/GraphMol/Wrap/testConformer.py",
"copies": "2",
"size": "3548",
"license": "bsd-3-clause",
"hash": 7824153832633648000,
"line_mean": 26.937007874,
"line_max": 79,
"alpha_frac": 0.6087936866,
"autogenerated": false,
"ratio": 2.6300963676797626,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42388900542797625,
"avg_score": null,
"num_lines": null
} |
from rdkit import RDConfig
import os, sys
import unittest
from rdkit import Chem
from rdkit import Geometry
from rdkit.Geometry import Point3D
def feq(v1, v2, tol2=1e-4):
return abs(v1 - v2) <= tol2
def ptEq(pt1, pt2, tol=1e-4):
return feq(pt1.x, pt2.x, tol) and feq(pt1.y, pt2.y, tol) and feq(pt1.z, pt2.z, tol)
def addConf(mol):
conf = Chem.Conformer(mol.GetNumAtoms())
for i in range(mol.GetNumAtoms()):
conf.SetAtomPosition(i, (0., 0., 0.))
mol.AddConformer(conf)
mb = Chem.MolToMolBlock(mol)
mb = Chem.MolToMolBlock(mol)
class TestCase(unittest.TestCase):
def setUp(self):
pass
def test0Conformers(self):
"""Test the conformer data structure"""
mol = Chem.MolFromSmiles("CC")
conf = Chem.Conformer(2)
conf.SetAtomPosition(0, (-0.5, 0.0, 0.0))
conf.SetAtomPosition(1, (1.0, 0.0, 0.0))
conf.SetId(0)
cid = mol.AddConformer(conf)
self.assertTrue(cid == 0)
conf2 = mol.GetConformer(0)
self.assertTrue(conf2.GetId() == cid)
pt1 = conf2.GetAtomPosition(0)
self.assertTrue(ptEq(pt1, Point3D(-0.5, 0.0, 0.0)))
pt2 = conf2.GetAtomPosition(1)
self.assertTrue(ptEq(pt2, Point3D(1.0, 0.0, 0.0)))
#changing conf should not change conf2 - related to issue 217
conf.SetAtomPosition(1, Point3D(2.0, 0.0, 0.0))
pt2 = conf2.GetAtomPosition(1)
self.assertTrue(feq(pt2[0], 1.0))
conf = Chem.Conformer(2)
conf.SetAtomPosition(0, Point3D(-0.5, 0.0, 0.0))
conf.SetAtomPosition(1, Point3D(1.0, 0.0, 0.0))
conf.SetId(2)
cid = mol.AddConformer(conf, 0)
self.assertTrue(cid == 2)
conf3 = mol.GetConformer(2)
def test0AddHds(self):
mol = Chem.MolFromSmiles("CC")
conf = Chem.Conformer(1)
conf.SetAtomPosition(0, Point3D(-0.5, 0.0, 0.0))
conf.SetAtomPosition(1, Point3D(1.0, 0.0, 0.0))
cid = mol.AddConformer(conf)
conf2 = mol.GetConformer()
self.assertTrue(conf2.GetNumAtoms() == 2)
nmol = Chem.AddHs(mol, 0, 1)
conf3 = nmol.GetConformer()
self.assertTrue(conf3.GetNumAtoms() == 8)
self.assertTrue(conf2.GetNumAtoms() == 2)
targetCoords = [[-0.5, 0.0, 0.0], [1.0, 0.0, 0.0], [-0.8667, 0.0, 1.03709],
[-0.8667, 0.8981, -0.5185], [-0.8667, -0.8981, -0.5185], [1.3667, 0.0, -1.0371],
[1.36667, 0.8981, 0.5185], [1.36667, -0.8981, 0.5185]]
for i in range(8):
pt = conf3.GetAtomPosition(i)
self.assertTrue(ptEq(pt, Point3D(*tuple(targetCoords[i]))))
def test2Issue217(self):
smi = 'c1ccccc1'
m = Chem.MolFromSmiles(smi)
addConf(m)
self.assertTrue(m.GetNumConformers() == 1)
mb2 = Chem.MolToMolBlock(m)
def test3Exceptions(self):
smi = 'c1ccccc1'
m = Chem.MolFromSmiles(smi)
addConf(m)
self.assertTrue(m.GetNumConformers() == 1)
self.assertRaises(ValueError, lambda: m.GetConformer(2))
def test4ConfTuple(self):
smi = 'c1ccccc1'
m = Chem.MolFromSmiles(smi)
for i in range(10):
addConf(m)
confs = m.GetConformers()
self.assertTrue(len(confs) == 10)
for conf in confs:
for i in range(6):
pt = conf.GetAtomPosition(i)
self.assertTrue(ptEq(pt, Point3D(0.0, 0.0, 0.0)))
m.RemoveAllConformers()
self.assertTrue(m.GetNumConformers() == 0)
confs = m.GetConformers()
self.assertTrue(confs == ())
def test5PositionsArray(self):
smi = 'c1ccccc1'
m = Chem.MolFromSmiles(smi)
addConf(m)
confs = m.GetConformers()
self.assertTrue(len(confs) == 1)
for conf in confs:
pos = conf.GetPositions()
self.assertTrue(pos.shape, (6, 3))
m.RemoveAllConformers()
self.assertTrue(m.GetNumConformers() == 0)
confs = m.GetConformers()
self.assertTrue(confs == ())
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "rvianello/rdkit",
"path": "Code/GraphMol/Wrap/testConformer.py",
"copies": "6",
"size": "3871",
"license": "bsd-3-clause",
"hash": 5609444473556453000,
"line_mean": 25.8819444444,
"line_max": 100,
"alpha_frac": 0.6274864376,
"autogenerated": false,
"ratio": 2.6208530805687205,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.624833951816872,
"avg_score": null,
"num_lines": null
} |
def ConstructEnsembleBV(bv,bitsToKeep):
"""
>>> from rdkit import DataStructs
>>> bv = DataStructs.ExplicitBitVect(128)
>>> bv.SetBitsFromList((1,5,47,99,120))
>>> r = ConstructEnsembleBV(bv,(0,1,2,3,45,46,47,48,49))
>>> r.GetNumBits()
9
>>> r.GetBit(0)
0
>>> r.GetBit(1)
1
>>> r.GetBit(5)
0
>>> r.GetBit(6) # old bit 47
1
"""
finalSize=len(bitsToKeep)
res = bv.__class__(finalSize)
for i,bit in enumerate(bitsToKeep):
if bv.GetBit(bit):
res.SetBit(i)
return res
#------------------------------------
#
# doctest boilerplate
#
def _test():
import doctest,sys
return doctest.testmod(sys.modules["__main__"])
if __name__ == '__main__':
import sys
failed,tried = _test()
sys.exit(failed)
| {
"repo_name": "AlexanderSavelyev/rdkit",
"path": "rdkit/DataStructs/BitUtils.py",
"copies": "6",
"size": "1061",
"license": "bsd-3-clause",
"hash": 6826026387626297000,
"line_mean": 19.0188679245,
"line_max": 67,
"alpha_frac": 0.6041470311,
"autogenerated": false,
"ratio": 2.891008174386921,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.649515520548692,
"avg_score": null,
"num_lines": null
} |
from __future__ import print_function
import copy
import struct
from rdkit.six import iterkeys
from rdkit import six
from rdkit import DataStructs
class VectCollection(object):
"""
>>> vc = VectCollection()
>>> bv1 = DataStructs.ExplicitBitVect(10)
>>> bv1.SetBitsFromList((1,3,5))
>>> vc.AddVect(1,bv1)
>>> bv1 = DataStructs.ExplicitBitVect(10)
>>> bv1.SetBitsFromList((6,8))
>>> vc.AddVect(2,bv1)
>>> len(vc)
10
>>> vc.GetNumBits()
10
>>> vc[0]
0
>>> vc[1]
1
>>> vc[9]
0
>>> vc[6]
1
>>> vc.GetBit(6)
1
>>> list(vc.GetOnBits())
[1, 3, 5, 6, 8]
keys must be unique, so adding a duplicate replaces the
previous values:
>>> bv1 = DataStructs.ExplicitBitVect(10)
>>> bv1.SetBitsFromList((7,9))
>>> vc.AddVect(1,bv1)
>>> len(vc)
10
>>> vc[1]
0
>>> vc[9]
1
>>> vc[6]
1
we can also query the children:
>>> vc.NumChildren()
2
>>> cs = vc.GetChildren()
>>> id,fp = cs[0]
>>> id
1
>>> list(fp.GetOnBits())
[7, 9]
>>> id,fp = cs[1]
>>> id
2
>>> list(fp.GetOnBits())
[6, 8]
attach/detach operations:
>>> bv1 = DataStructs.ExplicitBitVect(10)
>>> bv1.SetBitsFromList((5,6))
>>> vc.AddVect(3,bv1)
>>> vc.NumChildren()
3
>>> list(vc.GetOnBits())
[5, 6, 7, 8, 9]
>>> vc.DetachVectsNotMatchingBit(6)
>>> vc.NumChildren()
2
>>> list(vc.GetOnBits())
[5, 6, 8]
>>> bv1 = DataStructs.ExplicitBitVect(10)
>>> bv1.SetBitsFromList((7,9))
>>> vc.AddVect(1,bv1)
>>> vc.NumChildren()
3
>>> list(vc.GetOnBits())
[5, 6, 7, 8, 9]
>>> vc.DetachVectsMatchingBit(6)
>>> vc.NumChildren()
1
>>> list(vc.GetOnBits())
[7, 9]
to copy VectCollections, use the copy module:
>>> bv1 = DataStructs.ExplicitBitVect(10)
>>> bv1.SetBitsFromList((5,6))
>>> vc.AddVect(3,bv1)
>>> list(vc.GetOnBits())
[5, 6, 7, 9]
>>> vc2 = copy.copy(vc)
>>> vc.DetachVectsNotMatchingBit(6)
>>> list(vc.GetOnBits())
[5, 6]
>>> list(vc2.GetOnBits())
[5, 6, 7, 9]
The Uniquify() method can be used to remove duplicate vectors:
>>> vc = VectCollection()
>>> bv1 = DataStructs.ExplicitBitVect(10)
>>> bv1.SetBitsFromList((7,9))
>>> vc.AddVect(1,bv1)
>>> vc.AddVect(2,bv1)
>>> bv1 = DataStructs.ExplicitBitVect(10)
>>> bv1.SetBitsFromList((2,3,5))
>>> vc.AddVect(3,bv1)
>>> vc.NumChildren()
3
>>> vc.Uniquify()
>>> vc.NumChildren()
2
"""
def __init__(self):
self.__vects = {}
self.__orVect = None
self.__numBits = -1
self.__needReset = True
def GetOrVect(self):
if self.__needReset:
self.Reset()
return self.__orVect
orVect = property(GetOrVect)
def AddVect(self, idx, vect):
self.__vects[idx] = vect
self.__needReset = True
def Reset(self):
if not self.__needReset:
return
self.__orVect = None
if not self.__vects:
return
ks = list(iterkeys(self.__vects))
self.__orVect = copy.copy(self.__vects[ks[0]])
self.__numBits = self.__orVect.GetNumBits()
for i in range(1, len(ks)):
self.__orVect |= self.__vects[ks[i]]
self.__needReset = False
def NumChildren(self):
return len(self.__vects.keys())
def GetChildren(self):
return tuple(self.__vects.items())
def __getitem__(self, idx):
if self.__needReset:
self.Reset()
return self.__orVect.GetBit(idx)
GetBit = __getitem__
def __len__(self):
if self.__needReset:
self.Reset()
return self.__numBits
GetNumBits = __len__
def GetOnBits(self):
if self.__needReset:
self.Reset()
return self.__orVect.GetOnBits()
def DetachVectsNotMatchingBit(self, bit):
items = list(self.__vects.items())
for k, v in items:
if not v.GetBit(bit):
del (self.__vects[k])
self.__needReset = True
def DetachVectsMatchingBit(self, bit):
items = list(self.__vects.items())
for k, v in items:
if v.GetBit(bit):
del (self.__vects[k])
self.__needReset = True
def Uniquify(self, verbose=False):
obls = {}
for k, v in self.__vects.items():
obls[k] = list(v.GetOnBits())
keys = list(self.__vects.keys())
nKeys = len(keys)
keep = list(self.__vects.keys())
for i in range(nKeys):
k1 = keys[i]
if k1 in keep:
obl1 = obls[k1]
idx = keys.index(k1)
for j in range(idx + 1, nKeys):
k2 = keys[j]
if k2 in keep:
obl2 = obls[k2]
if obl1 == obl2:
keep.remove(k2)
self.__needsReset = True
tmp = {}
for k in keep:
tmp[k] = self.__vects[k]
if verbose: # pragma: nocover
print('uniquify:', len(self.__vects), '->', len(tmp))
self.__vects = tmp
#
# set up our support for pickling:
#
def __getstate__(self):
pkl = struct.pack('<I', len(self.__vects))
for k, v in self.__vects.items():
pkl += struct.pack('<I', k)
p = v.ToBinary()
l = len(p)
pkl += struct.pack('<I', l)
pkl += struct.pack('%ds' % (l), p)
return pkl
def __setstate__(self, pkl):
if six.PY3 and isinstance(pkl, str):
pkl = bytes(pkl, encoding='Latin1')
self.__vects = {}
self.__orVect = None
self.__numBits = -1
self.__needReset = True
szI = struct.calcsize('I')
offset = 0
nToRead = struct.unpack('<I', pkl[offset:offset + szI])[0]
offset += szI
for _ in range(nToRead):
k = struct.unpack('<I', pkl[offset:offset + szI])[0]
offset += szI
l = struct.unpack('<I', pkl[offset:offset + szI])[0]
offset += szI
sz = struct.calcsize('%ds' % l)
bv = DataStructs.ExplicitBitVect(struct.unpack('%ds' % l, pkl[offset:offset + sz])[0])
offset += sz
self.AddVect(k, bv)
# ------------------------------------
#
# doctest boilerplate
#
def _runDoctests(verbose=None): # pragma: nocover
import sys
import doctest
failed, _ = doctest.testmod(optionflags=doctest.ELLIPSIS, verbose=verbose)
sys.exit(failed)
if __name__ == '__main__': # pragma: nocover
_runDoctests()
| {
"repo_name": "jandom/rdkit",
"path": "rdkit/DataStructs/VectCollection.py",
"copies": "1",
"size": "6326",
"license": "bsd-3-clause",
"hash": 939121153869144400,
"line_mean": 21.5928571429,
"line_max": 92,
"alpha_frac": 0.5722415428,
"autogenerated": false,
"ratio": 2.8912248628884827,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3963466405688483,
"avg_score": null,
"num_lines": null
} |
from __future__ import print_function
import copy,struct,sys
from rdkit.six.moves import cPickle
from rdkit.six import iterkeys
from rdkit import six
from rdkit import DataStructs
class VectCollection(object):
"""
>>> vc = VectCollection()
>>> bv1 = DataStructs.ExplicitBitVect(10)
>>> bv1.SetBitsFromList((1,3,5))
>>> vc.AddVect(1,bv1)
>>> bv1 = DataStructs.ExplicitBitVect(10)
>>> bv1.SetBitsFromList((6,8))
>>> vc.AddVect(2,bv1)
>>> len(vc)
10
>>> vc.GetNumBits()
10
>>> vc[0]
0
>>> vc[1]
1
>>> vc[9]
0
>>> vc[6]
1
>>> vc.GetBit(6)
1
>>> list(vc.GetOnBits())
[1, 3, 5, 6, 8]
keys must be unique, so adding a duplicate replaces the
previous values:
>>> bv1 = DataStructs.ExplicitBitVect(10)
>>> bv1.SetBitsFromList((7,9))
>>> vc.AddVect(1,bv1)
>>> len(vc)
10
>>> vc[1]
0
>>> vc[9]
1
>>> vc[6]
1
we can also query the children:
>>> vc.NumChildren()
2
>>> cs = vc.GetChildren()
>>> id,fp = cs[0]
>>> id
1
>>> list(fp.GetOnBits())
[7, 9]
>>> id,fp = cs[1]
>>> id
2
>>> list(fp.GetOnBits())
[6, 8]
attach/detach operations:
>>> bv1 = DataStructs.ExplicitBitVect(10)
>>> bv1.SetBitsFromList((5,6))
>>> vc.AddVect(3,bv1)
>>> vc.NumChildren()
3
>>> list(vc.GetOnBits())
[5, 6, 7, 8, 9]
>>> vc.DetachVectsNotMatchingBit(6)
>>> vc.NumChildren()
2
>>> list(vc.GetOnBits())
[5, 6, 8]
>>> bv1 = DataStructs.ExplicitBitVect(10)
>>> bv1.SetBitsFromList((7,9))
>>> vc.AddVect(1,bv1)
>>> vc.NumChildren()
3
>>> list(vc.GetOnBits())
[5, 6, 7, 8, 9]
>>> vc.DetachVectsMatchingBit(6)
>>> vc.NumChildren()
1
>>> list(vc.GetOnBits())
[7, 9]
to copy VectCollections, use the copy module:
>>> bv1 = DataStructs.ExplicitBitVect(10)
>>> bv1.SetBitsFromList((5,6))
>>> vc.AddVect(3,bv1)
>>> list(vc.GetOnBits())
[5, 6, 7, 9]
>>> vc2 = copy.copy(vc)
>>> vc.DetachVectsNotMatchingBit(6)
>>> list(vc.GetOnBits())
[5, 6]
>>> list(vc2.GetOnBits())
[5, 6, 7, 9]
The Uniquify() method can be used to remove duplicate vectors:
>>> vc = VectCollection()
>>> bv1 = DataStructs.ExplicitBitVect(10)
>>> bv1.SetBitsFromList((7,9))
>>> vc.AddVect(1,bv1)
>>> vc.AddVect(2,bv1)
>>> bv1 = DataStructs.ExplicitBitVect(10)
>>> bv1.SetBitsFromList((2,3,5))
>>> vc.AddVect(3,bv1)
>>> vc.NumChildren()
3
>>> vc.Uniquify()
>>> vc.NumChildren()
2
"""
def __init__(self):
self.__vects = {}
self.__orVect = None
self.__numBits = -1
self.__needReset=True
def GetOrVect(self):
if self.__needReset:
self.Reset()
return self.__orVect
orVect = property(GetOrVect)
def AddVect(self,id,vect):
self.__vects[id]=vect
self.__needReset=True
def Reset(self):
if not self.__needReset:
return
self.__orVect=None
if not self.__vects:
return
ks = list(iterkeys(self.__vects))
self.__orVect = copy.copy(self.__vects[ks[0]])
self.__numBits = self.__orVect.GetNumBits()
for i in range(1,len(ks)):
self.__orVect |= self.__vects[ks[i]]
self.__needReset=False
def NumChildren(self):
return len(self.__vects.keys())
def GetChildren(self):
return tuple(self.__vects.items())
def GetBit(self,id):
if self.__needReset:
self.Reset()
return self[id]
def GetNumBits(self):
return len(self)
def GetOnBits(self):
if self.__needReset:
self.Reset()
return self.__orVect.GetOnBits()
def DetachVectsNotMatchingBit(self,bit):
items = list(self.__vects.items())
for k,v in items:
if not v.GetBit(bit):
del(self.__vects[k])
self.__needReset=True
def DetachVectsMatchingBit(self,bit):
items = list(self.__vects.items())
for k,v in items:
if v.GetBit(bit):
del(self.__vects[k])
self.__needReset=True
def Uniquify(self,verbose=False):
obls = {}
for k,v in self.__vects.items():
obls[k] = list(v.GetOnBits())
keys = list(self.__vects.keys())
nKeys = len(keys)
keep = list(self.__vects.keys())
for i in range(nKeys):
k1 = keys[i]
if k1 in keep:
obl1 = obls[k1]
idx = keys.index(k1)
for j in range(idx+1,nKeys):
k2 = keys[j]
if k2 in keep:
obl2 = obls[k2]
if obl1==obl2:
keep.remove(k2)
self.__needsReset=True
tmp = {}
for k in keep:
tmp[k] = self.__vects[k]
if verbose: print('uniquify:',len(self.__vects),'->',len(tmp))
self.__vects=tmp
def __len__(self):
if self.__needReset:
self.Reset()
return self.__numBits
def __getitem__(self,id):
if self.__needReset:
self.Reset()
return self.__orVect.GetBit(id)
#
# set up our support for pickling:
#
def __getstate__(self):
pkl = struct.pack('<I',len(self.__vects))
for k,v in self.__vects.items():
pkl += struct.pack('<I',k)
p = v.ToBinary()
l = len(p)
pkl += struct.pack('<I',l)
pkl += struct.pack('%ds'%(l),p)
return pkl
def __setstate__(self,pkl):
if six.PY3 and isinstance(pkl,str):
pkl = bytes(pkl,encoding='Latin1')
self.__vects = {}
self.__orVect = None
self.__numBits = -1
self.__needReset=True
szI = struct.calcsize('I')
offset = 0
nToRead = struct.unpack('<I',pkl[offset:offset+szI])[0]
offset += szI
for i in range(nToRead):
k = struct.unpack('<I',pkl[offset:offset+szI])[0]
offset += szI
l = struct.unpack('<I',pkl[offset:offset+szI])[0]
offset += szI
sz = struct.calcsize('%ds'%l)
bv = DataStructs.ExplicitBitVect(struct.unpack('%ds'%l,pkl[offset:offset+sz])[0])
offset += sz
self.AddVect(k,bv)
#------------------------------------
#
# doctest boilerplate
#
def _test():
import doctest,sys
return doctest.testmod(sys.modules["__main__"])
if __name__ == '__main__':
import sys
failed,tried = _test()
sys.exit(failed)
| {
"repo_name": "adalke/rdkit",
"path": "rdkit/DataStructs/VectCollection.py",
"copies": "1",
"size": "6312",
"license": "bsd-3-clause",
"hash": -5363067129588532000,
"line_mean": 21.3038869258,
"line_max": 87,
"alpha_frac": 0.5739860583,
"autogenerated": false,
"ratio": 2.87693710118505,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39509231594850497,
"avg_score": null,
"num_lines": null
} |
from rdkit.rdBase import EnableLog, DisableLog, AttachFileToLog, LogMessage
import sys, traceback
_levels = ['rdApp.debug', 'rdApp.info', 'rdApp.warning', 'rdApp.error']
DEBUG = 0
INFO = 1
WARNING = 2
ERROR = 3
CRITICAL = 4
class logger(object):
def logIt(self, dest, msg, *args, **kwargs):
if (args):
msg = msg % args
LogMessage(dest, msg + '\n')
if kwargs.get('exc_info', False):
exc_type, exc_val, exc_tb = sys.exc_info()
if exc_type:
LogMessage(dest, '\n')
txt = ''.join(traceback.format_exception(exc_type, exc_val, exc_tb))
LogMessage(dest, txt)
def debug(self, msg, *args, **kwargs):
self.logIt('rdApp.debug', 'DEBUG: ' + msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
self.logIt('rdApp.error', 'ERROR: ' + msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
self.logIt('rdApp.info', 'INFO: ' + msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
self.logIt('rdApp.warning', 'WARNING: ' + msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
self.logIt('rdApp.error', 'CRITICAL: ' + msg, *args, **kwargs)
def setLevel(self, val):
global _levels
for i in range(val, len(_levels)):
EnableLog(_levels[i])
for i in range(0, val):
DisableLog(_levels[i])
| {
"repo_name": "jandom/rdkit",
"path": "rdkit/RDLogger.py",
"copies": "12",
"size": "1622",
"license": "bsd-3-clause",
"hash": 3278426643006919000,
"line_mean": 27.9642857143,
"line_max": 76,
"alpha_frac": 0.6251541307,
"autogenerated": false,
"ratio": 3.0317757009345794,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007325835716769957,
"num_lines": 56
} |
from __future__ import print_function
from rdkit import Chem
import sys
from rdkit.Chem import Randomize
def TestMolecule(mol):
try:
Chem.SanitizeMol(mol)
mol = Chem.RemoveHs(mol)
except ValueError as msg:
return -1
except Exception:
import traceback
traceback.print_exc()
return -2
if mol.GetNumAtoms():
try:
Randomize.CheckCanonicalization(mol,10)
except Exception:
import traceback
traceback.print_exc()
return -3
return 0
def TestSupplier(suppl,stopAfter=-1,reportInterval=100,reportTo=sys.stderr,
nameProp='_Name'):
nDone = 0
nFailed = 0
while 1:
try:
mol = suppl.next()
except StopIteration:
break
except Exception:
import traceback
traceback.print_exc()
nFailed += 1
reportTo.flush()
print('Failure at mol %d'%nDone, file=reportTo)
else:
if mol:
ok = TestMolecule(mol)
else:
ok = -3
if ok<0:
nFailed += 1
reportTo.flush()
if ok==-3:
print('Canonicalization',end='',file=reportTo)
print('Failure at mol %d'%nDone,end='',file=reportTo)
if mol:
print(mol.GetProp(nameProp),end='',file=reportTo)
print('', file=reportTo)
nDone += 1
if nDone==stopAfter:
break
if not nDone%reportInterval:
print('Done %d molecules, %d failures'%(nDone,nFailed))
return nDone,nFailed
if __name__=='__main__':
suppl = Chem.SDMolSupplier(sys.argv[1],False)
if len(sys.argv)>2:
nameProp = sys.argv[2]
else:
nameProp = '_Name'
nDone,nFailed = TestSupplier(suppl,nameProp=nameProp)
print('%d failures in %d mols'%(nFailed,nDone))
| {
"repo_name": "adalke/rdkit",
"path": "rdkit/Chem/ChemUtils/BulkTester.py",
"copies": "1",
"size": "2012",
"license": "bsd-3-clause",
"hash": -5640717515552599000,
"line_mean": 23.5365853659,
"line_max": 75,
"alpha_frac": 0.6197813121,
"autogenerated": false,
"ratio": 3.4275979557069847,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45473792678069846,
"avg_score": null,
"num_lines": null
} |
from __future__ import print_function
from rdkit import Chem
import sys
from rdkit.Chem import Randomize
def TestMolecule(mol):
try:
Chem.SanitizeMol(mol)
mol = Chem.RemoveHs(mol)
except ValueError as msg:
return -1
except Exception:
import traceback
traceback.print_exc()
return -2
if mol.GetNumAtoms():
try:
Randomize.CheckCanonicalization(mol, 10)
except Exception:
import traceback
traceback.print_exc()
return -3
return 0
def TestSupplier(suppl, stopAfter=-1, reportInterval=100, reportTo=sys.stderr, nameProp='_Name'):
nDone = 0
nFailed = 0
while 1:
try:
mol = suppl.next()
except StopIteration:
break
except Exception:
import traceback
traceback.print_exc()
nFailed += 1
reportTo.flush()
print('Failure at mol %d' % nDone, file=reportTo)
else:
if mol:
ok = TestMolecule(mol)
else:
ok = -3
if ok < 0:
nFailed += 1
reportTo.flush()
if ok == -3:
print('Canonicalization', end='', file=reportTo)
print('Failure at mol %d' % nDone, end='', file=reportTo)
if mol:
print(mol.GetProp(nameProp), end='', file=reportTo)
print('', file=reportTo)
nDone += 1
if nDone == stopAfter:
break
if not nDone % reportInterval:
print('Done %d molecules, %d failures' % (nDone, nFailed))
return nDone, nFailed
if __name__ == '__main__':
suppl = Chem.SDMolSupplier(sys.argv[1], False)
if len(sys.argv) > 2:
nameProp = sys.argv[2]
else:
nameProp = '_Name'
nDone, nFailed = TestSupplier(suppl, nameProp=nameProp)
print('%d failures in %d mols' % (nFailed, nDone))
| {
"repo_name": "rvianello/rdkit",
"path": "rdkit/Chem/ChemUtils/BulkTester.py",
"copies": "5",
"size": "2011",
"license": "bsd-3-clause",
"hash": 8403946734130894000,
"line_mean": 23.2289156627,
"line_max": 97,
"alpha_frac": 0.6200895077,
"autogenerated": false,
"ratio": 3.3912310286677907,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6511320536367791,
"avg_score": null,
"num_lines": null
} |
from __future__ import print_function
_version = "$Rev$"
_splashMessage = """
-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
FeatFinderCLI version %s
Copyright (C) 2005 Rational Discovery LLC
This software is copyrighted. The software may not be copied,
reproduced, translated or reduced to any electronic medium or
machine-readable form without the prior written consent of
Rational Discovery LLC.
-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
""" % _version
from rdkit import Chem
from rdkit.Chem import ChemicalFeatures
from rdkit import RDLogger
logger = RDLogger.logger()
import sys, os
import re
splitExpr = re.compile(r'[ \t,]')
def GetAtomFeatInfo(factory, mol):
res = [None] * mol.GetNumAtoms()
feats = factory.GetFeaturesForMol(mol)
for feat in feats:
ids = feat.GetAtomIds()
for id in ids:
if res[id] is None:
res[id] = []
res[id].append("%s-%s" % (feat.GetFamily(), feat.GetType()))
return res
if __name__ == '__main__':
def Usage():
message = """
Usage: FeatFinderCLI [-r] <fdefFilename> <smilesFilename>
NOTE:
- the smiles file should have SMILES in the first column
"""
print(message, file=sys.stderr)
import getopt
args, extras = getopt.getopt(sys.argv[1:], 'r')
reverseIt = False
for arg, val in args:
if arg == '-r':
reverseIt = True
if len(extras) < 2:
Usage()
sys.exit(-1)
print(_splashMessage, file=sys.stderr)
fdefFilename = extras[0]
if not os.path.exists(fdefFilename):
logger.error("Fdef file %s does not exist." % fdefFilename)
sys.exit(-1)
try:
factory = ChemicalFeatures.BuildFeatureFactory(fdefFilename)
except Exception:
logger.error("Could not parse Fdef file %s." % fdefFilename, exc_info=True)
sys.exit(-1)
smilesFilename = extras[1]
if not os.path.exists(smilesFilename):
logger.error("Smiles file %s does not exist." % smilesFilename)
sys.exit(-1)
try:
inF = file(smilesFilename, 'r')
except Exception:
logger.error("Could not open smiles file %s." % smilesFilename, exc_info=True)
sys.exit(-1)
lineNo = 0
for line in inF.readlines():
lineNo += 1
line = line.strip()
smi = splitExpr.split(line)[0].strip()
mol = Chem.MolFromSmiles(smi)
if mol is not None:
print('Mol-%d\t%s' % (lineNo, smi))
if not reverseIt:
featInfo = GetAtomFeatInfo(factory, mol)
for i, v in enumerate(featInfo):
print('\t% 2s(%d)' % (mol.GetAtomWithIdx(i).GetSymbol(), i + 1), end='')
if v:
print('\t', ', '.join(v))
else:
print()
else:
feats = factory.GetFeaturesForMol(mol)
for feat in feats:
print('\t%s-%s: ' % (feat.GetFamily(), feat.GetType()), end='')
print(', '.join([str(x) for x in feat.GetAtomIds()]))
else:
logger.warning("Could not process smiles '%s' on line %d." % (smi, lineNo))
| {
"repo_name": "jandom/rdkit",
"path": "rdkit/Chem/FeatFinderCLI.py",
"copies": "1",
"size": "3262",
"license": "bsd-3-clause",
"hash": -8944519266990291000,
"line_mean": 27.3652173913,
"line_max": 82,
"alpha_frac": 0.6085223789,
"autogenerated": false,
"ratio": 3.191780821917808,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43003032008178077,
"avg_score": null,
"num_lines": null
} |
from rdkit import Chem
from rdkit.Chem import rdDepictor
from rdkit import Geometry
def AlignMolToTemplate2D(mol,template,match=None,clearConfs=False,
templateConfId=-1,):
"""
Arguments:
- mol: the molecule to be aligned
- template: the template to align to
- match: If provided, this should be a sequence of integers
containing the indices of the atoms in mol that match
those in template. This is the result of calling:
mol.GetSubstructMatch(template)
- clearConfs: toggles removing any existing conformers on mol
Returns the confId of the conformer containing the depiction
>>> patt = Chem.MolFromSmiles('C1CC1')
>>> rdDepictor.Compute2DCoords(patt)
0
>>> mol = Chem.MolFromSmiles('OC1CC1CC1CCC1')
>>> rdDepictor.Compute2DCoords(mol)
0
>>> pc = patt.GetConformer(0)
>>> mc = mol.GetConformer(0)
We start out with the molecules not aligned:
>>> vs = [abs(pc.GetAtomPosition(i).x-mc.GetAtomPosition(i+1).x) for i in range(pc.GetNumAtoms())]
>>> [x<1e-4 for x in vs]
[False, False, False]
But then we can replace the conformer of mol:
>>> AlignMolToTemplate2D(mol,patt,clearConfs=True)
0
>>> mol.GetNumConformers()
1
>>> pc = patt.GetConformer(0)
>>> mc = mol.GetConformer(0)
>>> vs = [abs(pc.GetAtomPosition(i).x-mc.GetAtomPosition(i+1).x) for i in range(pc.GetNumAtoms())]
>>> [x<1e-4 for x in vs]
[True, True, True]
If we like, we can specify the atom map explicitly in order to align to the second
matching ring in the probe molecule:
>>> match = (5,6,7)
>>> AlignMolToTemplate2D(mol,patt,clearConfs=True,match=match)
0
>>> mol.GetNumConformers()
1
>>> pc = patt.GetConformer(0)
>>> mc = mol.GetConformer(0)
>>> vs = [abs(pc.GetAtomPosition(i).x-mc.GetAtomPosition(i+1).x) for i in range(pc.GetNumAtoms())]
>>> [x<1e-4 for x in vs]
[False, False, False]
>>> vs = [abs(pc.GetAtomPosition(i).x-mc.GetAtomPosition(i+5).x) for i in range(pc.GetNumAtoms())]
>>> [x<1e-4 for x in vs]
[True, True, True]
"""
if not match:
match = mol.GetSubstructMatch(template)
if not match:
raise ValueError,'no match between mol and template'
atomMap = {}
templateConf = template.GetConformer(templateConfId)
for i,idx in enumerate(match):
p = templateConf.GetAtomPosition(i)
atomMap[idx] = Geometry.Point2D(p.x,p.y)
molConfId = rdDepictor.Compute2DCoords(mol,clearConfs=clearConfs,coordMap=atomMap)
return molConfId
#------------------------------------
#
# doctest boilerplate
#
def _test():
import doctest,sys
return doctest.testmod(sys.modules["__main__"])
if __name__ == '__main__':
import sys
failed,tried = _test()
sys.exit(failed)
| {
"repo_name": "rdkit/rdkit-orig",
"path": "rdkit/Chem/TemplateAlign.py",
"copies": "2",
"size": "3130",
"license": "bsd-3-clause",
"hash": 3981429026796701700,
"line_mean": 30.3,
"line_max": 102,
"alpha_frac": 0.6428115016,
"autogenerated": false,
"ratio": 3.1776649746192893,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9738665817493277,
"avg_score": 0.01636213174520253,
"num_lines": 100
} |
from rdkit import Chem
from rdkit.Chem import rdDepictor
from rdkit import Geometry
def AlignMolToTemplate2D(mol,
template,
match=None,
clearConfs=False,
templateConfId=-1, ):
"""
Arguments:
- mol: the molecule to be aligned
- template: the template to align to
- match: If provided, this should be a sequence of integers
containing the indices of the atoms in mol that match
those in template. This is the result of calling:
mol.GetSubstructMatch(template)
- clearConfs: toggles removing any existing conformers on mol
Returns the confId of the conformer containing the depiction
>>> patt = Chem.MolFromSmiles('C1CC1')
>>> rdDepictor.Compute2DCoords(patt)
0
>>> mol = Chem.MolFromSmiles('OC1CC1CC1CCC1')
>>> rdDepictor.Compute2DCoords(mol)
0
>>> pc = patt.GetConformer(0)
>>> mc = mol.GetConformer(0)
We start out with the molecules not aligned:
>>> vs = [abs(pc.GetAtomPosition(i).x-mc.GetAtomPosition(i+1).x) for i in range(pc.GetNumAtoms())]
>>> [x<1e-4 for x in vs]
[False, False, False]
But then we can replace the conformer of mol:
>>> AlignMolToTemplate2D(mol,patt,clearConfs=True)
0
>>> mol.GetNumConformers()
1
>>> pc = patt.GetConformer(0)
>>> mc = mol.GetConformer(0)
>>> vs = [abs(pc.GetAtomPosition(i).x-mc.GetAtomPosition(i+1).x) for i in range(pc.GetNumAtoms())]
>>> [x<1e-4 for x in vs]
[True, True, True]
If we like, we can specify the atom map explicitly in order to align to the second
matching ring in the probe molecule:
>>> match = (5,6,7)
>>> AlignMolToTemplate2D(mol,patt,clearConfs=True,match=match)
0
>>> mol.GetNumConformers()
1
>>> pc = patt.GetConformer(0)
>>> mc = mol.GetConformer(0)
>>> vs = [abs(pc.GetAtomPosition(i).x-mc.GetAtomPosition(i+1).x) for i in range(pc.GetNumAtoms())]
>>> [x<1e-4 for x in vs]
[False, False, False]
>>> vs = [abs(pc.GetAtomPosition(i).x-mc.GetAtomPosition(i+5).x) for i in range(pc.GetNumAtoms())]
>>> [x<1e-4 for x in vs]
[True, True, True]
"""
if not match:
match = mol.GetSubstructMatch(template)
if not match:
raise ValueError('no match between mol and template')
atomMap = {}
templateConf = template.GetConformer(templateConfId)
for i, idx in enumerate(match):
p = templateConf.GetAtomPosition(i)
atomMap[idx] = Geometry.Point2D(p.x, p.y)
molConfId = rdDepictor.Compute2DCoords(mol, clearConfs=clearConfs, coordMap=atomMap)
return molConfId
#------------------------------------
#
# doctest boilerplate
#
def _test():
import doctest, sys
return doctest.testmod(sys.modules["__main__"])
if __name__ == '__main__':
import sys
failed, tried = _test()
sys.exit(failed)
| {
"repo_name": "rvianello/rdkit",
"path": "rdkit/Chem/TemplateAlign.py",
"copies": "2",
"size": "3215",
"license": "bsd-3-clause",
"hash": -6950193799603544000,
"line_mean": 29.9134615385,
"line_max": 102,
"alpha_frac": 0.6258164852,
"autogenerated": false,
"ratio": 3.2540485829959516,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4879865068195952,
"avg_score": null,
"num_lines": null
} |
from rdkit import Chem
import sys
from rdkit.Chem import Randomize
def TestMolecule(mol):
try:
Chem.SanitizeMol(mol)
mol = Chem.RemoveHs(mol)
except ValueError,msg:
return -1
except:
import traceback
traceback.print_exc()
return -2
if mol.GetNumAtoms():
try:
Randomize.CheckCanonicalization(mol,10)
except:
import traceback
traceback.print_exc()
return -3
return 0
def TestSupplier(suppl,stopAfter=-1,reportInterval=100,reportTo=sys.stderr,
nameProp='_Name'):
nDone = 0
nFailed = 0
while 1:
try:
mol = suppl.next()
except StopIteration:
break
except:
import traceback
traceback.print_exc()
nFailed += 1
reportTo.flush()
print >>reportTo,'Failure at mol %d'%nDone
else:
if mol:
ok = TestMolecule(mol)
else:
ok = -3
if ok<0:
nFailed += 1
reportTo.flush()
if ok==-3:
print >>reportTo,'Canonicalization',
print >>reportTo,'Failure at mol %d'%nDone,
if mol:
print >>reportTo,mol.GetProp(nameProp),
print >>reportTo,''
nDone += 1
if nDone==stopAfter:
break
if not nDone%reportInterval:
print 'Done %d molecules, %d failures'%(nDone,nFailed)
return nDone,nFailed
if __name__=='__main__':
suppl = Chem.SDMolSupplier(sys.argv[1],False)
if len(sys.argv)>2:
nameProp = sys.argv[2]
else:
nameProp = '_Name'
nDone,nFailed = TestSupplier(suppl,nameProp=nameProp)
print '%d failures in %d mols'%(nFailed,nDone)
| {
"repo_name": "rdkit/rdkit-orig",
"path": "rdkit/Chem/ChemUtils/BulkTester.py",
"copies": "2",
"size": "1898",
"license": "bsd-3-clause",
"hash": -884859499107085300,
"line_mean": 22.725,
"line_max": 75,
"alpha_frac": 0.6111696523,
"autogenerated": false,
"ratio": 3.401433691756272,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9788045218787202,
"avg_score": 0.04491162505381385,
"num_lines": 80
} |
import random
from rdkit import Chem
def RandomizeMolBlock(molB):
splitB = molB.split('\n')
res = []
res.extend(splitB[0:3])
idx = 3
inL = splitB[idx]
res.append(inL)
nAts = int(inL[0:3])
nBonds = int(inL[3:6])
idx+=1
atLines = splitB[idx:idx+nAts]
order = range(nAts)
random.shuffle(order)
for i in order:
res.append(atLines[i])
#print 'ORDER:',order
idx += nAts
for i in range(nBonds):
inL = splitB[idx]
idx1 = int(inL[0:3])-1
idx2 = int(inL[3:6])-1
idx1 = order.index(idx1)
idx2 = order.index(idx2)
inL = '% 3d% 3d'%(idx1+1,idx2+1)+inL[6:]
res.append(inL)
idx += 1
res.append('M END')
return '\n'.join(res)
def RandomizeMol(mol):
mb = Chem.MolToMolBlock(mol)
#print '-----------------'
#print mb
mb = RandomizeMolBlock(mb)
#print mb
return Chem.MolFromMolBlock(mb)
def CheckCanonicalization(mol,nReps=10):
refSmi = Chem.MolToSmiles(mol,False)
for i in range(nReps):
m2 = RandomizeMol(mol)
smi = Chem.MolToSmiles(m2,False)
if smi!=refSmi:
raise ValueError,'\nRef: %s\n : %s'%(refSmi,smi)
if __name__=='__main__':
from rdkit.Chem import Randomize
CheckCanonicalization(Chem.MolFromSmiles('CON'))
CheckCanonicalization(Chem.MolFromSmiles('c1ccccn1'))
CheckCanonicalization(Chem.MolFromSmiles('C/C=C/F'))
| {
"repo_name": "rdkit/rdkit-orig",
"path": "rdkit/Chem/Randomize.py",
"copies": "2",
"size": "1626",
"license": "bsd-3-clause",
"hash": 7852179043251908000,
"line_mean": 22.2285714286,
"line_max": 63,
"alpha_frac": 0.6402214022,
"autogenerated": false,
"ratio": 2.585055643879173,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4225277046079174,
"avg_score": null,
"num_lines": null
} |
import random
from rdkit.six.moves import range
from rdkit import Chem
def RandomizeMolBlock(molB):
splitB = molB.split('\n')
res = []
res.extend(splitB[0:3])
idx = 3
inL = splitB[idx]
res.append(inL)
nAts = int(inL[0:3])
nBonds = int(inL[3:6])
idx+=1
atLines = splitB[idx:idx+nAts]
order = list(range(nAts))
random.shuffle(order,random=random.random)
for i in order:
res.append(atLines[i])
#print 'ORDER:',order
idx += nAts
for i in range(nBonds):
inL = splitB[idx]
idx1 = int(inL[0:3])-1
idx2 = int(inL[3:6])-1
idx1 = order.index(idx1)
idx2 = order.index(idx2)
inL = '% 3d% 3d'%(idx1+1,idx2+1)+inL[6:]
res.append(inL)
idx += 1
res.append('M END')
return '\n'.join(res)
def RandomizeMol(mol):
mb = Chem.MolToMolBlock(mol)
#print '-----------------'
#print mb
mb = RandomizeMolBlock(mb)
#print mb
return Chem.MolFromMolBlock(mb)
def CheckCanonicalization(mol,nReps=10):
refSmi = Chem.MolToSmiles(mol,False)
for i in range(nReps):
m2 = RandomizeMol(mol)
smi = Chem.MolToSmiles(m2,False)
if smi!=refSmi:
raise ValueError('\nRef: %s\n : %s'%(refSmi,smi))
if __name__=='__main__':
from rdkit.Chem import Randomize
CheckCanonicalization(Chem.MolFromSmiles('CON'))
CheckCanonicalization(Chem.MolFromSmiles('c1ccccn1'))
CheckCanonicalization(Chem.MolFromSmiles('C/C=C/F'))
| {
"repo_name": "soerendip42/rdkit",
"path": "rdkit/Chem/Randomize.py",
"copies": "4",
"size": "1688",
"license": "bsd-3-clause",
"hash": -4444090380005678600,
"line_mean": 22.7746478873,
"line_max": 63,
"alpha_frac": 0.6463270142,
"autogenerated": false,
"ratio": 2.613003095975232,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.04571893029693759,
"num_lines": 71
} |
import random
from rdkit.six.moves import range
from rdkit import Chem
def RandomizeMolBlock(molB):
splitB = molB.split('\n')
res = []
res.extend(splitB[0:3])
idx = 3
inL = splitB[idx]
res.append(inL)
nAts = int(inL[0:3])
nBonds = int(inL[3:6])
idx += 1
atLines = splitB[idx:idx + nAts]
order = list(range(nAts))
random.shuffle(order, random=random.random)
for i in order:
res.append(atLines[i])
#print 'ORDER:',order
idx += nAts
for i in range(nBonds):
inL = splitB[idx]
idx1 = int(inL[0:3]) - 1
idx2 = int(inL[3:6]) - 1
idx1 = order.index(idx1)
idx2 = order.index(idx2)
inL = '% 3d% 3d' % (idx1 + 1, idx2 + 1) + inL[6:]
res.append(inL)
idx += 1
res.append('M END')
return '\n'.join(res)
def RandomizeMol(mol):
mb = Chem.MolToMolBlock(mol)
#print '-----------------'
#print mb
mb = RandomizeMolBlock(mb)
#print mb
return Chem.MolFromMolBlock(mb)
def CheckCanonicalization(mol, nReps=10):
refSmi = Chem.MolToSmiles(mol, False)
for i in range(nReps):
m2 = RandomizeMol(mol)
smi = Chem.MolToSmiles(m2, False)
if smi != refSmi:
raise ValueError('\nRef: %s\n : %s' % (refSmi, smi))
if __name__ == '__main__':
from rdkit.Chem import Randomize
CheckCanonicalization(Chem.MolFromSmiles('CON'))
CheckCanonicalization(Chem.MolFromSmiles('c1ccccn1'))
CheckCanonicalization(Chem.MolFromSmiles('C/C=C/F'))
| {
"repo_name": "jandom/rdkit",
"path": "rdkit/Chem/Randomize.py",
"copies": "5",
"size": "1704",
"license": "bsd-3-clause",
"hash": -4938258008736394000,
"line_mean": 22.6666666667,
"line_max": 63,
"alpha_frac": 0.640258216,
"autogenerated": false,
"ratio": 2.6175115207373274,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5757769736737328,
"avg_score": null,
"num_lines": null
} |
""" uses DSViewer to interact with molecules
"""
from rdkit import Chem
from win32com.client import Dispatch
import tempfile,os
_nextDisplayId=1
class Displayable(object):
def __init__(self,doc,id=-1):
global _nextDisplayId
if id<0:
id = _nextDisplayId
_nextDisplayId += 1
self.doc = doc
self.id = id
self.visible=True
self.children = []
def Select(self,atoms=[],state=True,recurse=False):
if state:
selText = 'true'
else:
selText = 'false'
if not atoms or atoms=='*':
atomStr = '; atom "*"'
else:
# DSViewer has atom ids from 1, we do it from 0:
atoms = ['id=%d'%(x) for x in atoms]
atomStr = '; atom %s'%','.join(atoms)
cmd = 'SetProperty object RD_Visual=%d %s: select=%s'%(self.id,atomStr,
selText)
r = int(str(self.doc.DoCommand(cmd)))
if not r and not atoms:
# this handles an annoying case where if you try to select
# a molecule by ID in DSViewer, you get nothing selected:
atomStr=''
cmd = 'SetProperty object RD_Visual=%d %s: select=%s'%(self.id,atomStr,
selText)
r = int(str(self.doc.DoCommand(cmd)))
#print 'sel cmd:',cmd
#print 'result:', r
# stupid DSViewer will select the bonds between pairs of highlighted atoms,
# stop that nonsense right now:
if r:
cmd = 'SetProperty object RD_Visual=%d; bond index="*": select=off'%(self.id)
self.doc.DoCommand(cmd)
if recurse:
for child in self.children:
child.Select(atoms=atoms,state=state,recurse=True)
return r
def Hide(self,recurse=True):
self.Select(state=True,recurse=True)
self.doc.DoCommand('hide')
self.Select(state=False,recurse=True)
def Show(self,recurse=True):
self.Select(state=True,recurse=True)
self.doc.DoCommand('Show')
self.Select(state=False,recurse=True)
def ShowOnly(self,recurse=True):
self.doc.DoCommand('HideAll')
self.Select(state=True,recurse=True)
self.doc.DoCommand('Show')
self.Select(state=False,recurse=True)
def __del__(self):
self.doc.DoCommand('UnselectAll')
count=self.Select(state=True,recurse=True)
if count:
self.doc.DoCommand('Delete')
class MolViewer(object):
def __init__(self,force=0,title='Untitled',**kwargs):
self.app = Dispatch('WebLabViewerPro.Application')
self.app.Visible=1
if force or self.app.ActiveDocument is None:
self.doc = self.app.New(title)
else:
self.doc = self.app.ActiveDocument
self.displayables={}
def DeleteAll(self):
self.doc.DoCommand('SelectAll')
self.doc.DoCommand('Delete')
self.displayables = {}
def DeleteAllExcept(self,excludes):
excludes = [x.lower() for x in excludes]
allNames = self.displayables.keys()
for nm in allNames:
if nm not in excludes:
del self.displayables[nm]
def ShowMol(self,mol,name='molecule',showOnly=True,highlightFeatures=[],
molB="",confId=-1,zoom=True):
if showOnly:
self.DeleteAll()
obj = None
else:
obj = self.displayables.get(name.lower(),None)
#if obj:
# obj.Select(state=True)
# self.doc.DoCommand('Delete')
# obj.Select(state=False)
if not molB:
molB = Chem.MolToMolBlock(mol,confId=confId)
tmp = name + "\n" + molB[molB.index('\n')+1:]
molB = tmp
if not obj:
obj = Displayable(self.doc)
if not hasattr(obj,'_molBlock') or obj._molBlock != molB:
obj._molBlock = molB
fN = tempfile.mktemp('.mol')
open(fN,'w+').write(molB)
self.doc.DoCommand('PasteFrom %s'%fN)
self.doc.DoCommand('SetProperty molecule id=0 : RD_Visual=%d'%(obj.id))
self.doc.DoCommand('SetProperty molecule id=0 : id=%d'%(obj.id))
self.doc.DoCommand('SetProperty molecule id=0 : select=off')
os.unlink(fN)
else:
obj.Select(state=True)
self.doc.DoCommand('Show')
self.displayables[name.lower()] = obj
if zoom:
self.doc.DoCommand('Center')
self.doc.DoCommand('FitView')
return
def LoadFile(self,filename,name,showOnly=False):
if showOnly:
self.DeleteAll()
self.doc.DoCommand('PasteFrom %s'%filename)
obj = Displayable(self.doc)
self.doc.DoCommand('SetProperty molecule id=0 : id=%d'%(obj.id))
self.doc.DoCommand('SetProperty molecule id=0 : select=off')
count = self.doc.DoCommand('SetProperty AminoAcidChain id=0 : RD_Visual=%d'%(obj.id))
if not count or int(count)<=0:
count = self.doc.DoCommand('SetProperty molecule id=0 : RD_Visual=%d'%(obj.id))
self.displayables[name.lower()] = obj
return obj
def GetSelectedAtoms(self,whichSelection=''):
#print 'WHICH',repr(whichSelection),self.displayables.has_key(whichSelection.lower())
if not whichSelection:
d = str(self.doc.DoCommand('GetPropertyValue atom select=true: id=?'))
d2 = str(self.doc.DoCommand('GetPropertyValue atom select=true: molecule=?'))
if d2:
molIds = []
tmpD = {}
for id in d2.split(','):
id = int(id.split('/')[1])+1
if tmpD.has_key(id):
molIds.append(tmpD[id])
else:
for k,v in self.displayables.iteritems():
if id==v.id:
tmpD[id] = k
molIds.append(k)
else:
molIds = ['']*(d.count(',')+1)
elif self.displayables.has_key(whichSelection.lower()):
whichSelection = whichSelection.lower()
whichSelection = self.displayables[whichSelection].id
d = str(self.doc.DoCommand('GetPropertyValue molecule RD_Visual=%d; atom select=true: id=?'%whichSelection))
molIds = [whichSelection]*(d.count(',')+1)
else:
d = None
molIds = None
if d:
splitD = d.split(',')
#print 'splitD:',splitD
#print 'molIds:',molIds
try:
res = []
for i in range(len(splitD)):
# DSViewer has atom ids from 1, we do it from 0:
idx = int(splitD[i])
res.append((molIds[i],idx))
except:
import traceback
traceback.print_exc()
res = []
else:
res = []
return res
def HighlightAtoms(self,indices,where,extraHighlight=False):
self.doc.DoCommand('UnSelectAll')
self.SelectAtoms(where,indices)
def SelectAtoms(self,itemId,atomIndices,selName='selection'):
self.doc.DoCommand('UnSelectAll')
self.doc.DoCommand('SetProperty atom id="*": select=off')
o = self.displayables.get(itemId.lower(),None)
#print 'O:',itemId,atomIndices
if o:
o.Select(atoms=atomIndices)
def SetDisplayUpdate(self,val):
if not val:
self.doc.DoCommand('UpdateView off')
else:
self.doc.DoCommand('UpdateView on')
def GetAtomCoords(self,sels):
res = {}
for label,idx in sels:
whichSelection = label.lower()
whichSelection = self.displayables[label].id
# DSViewer has atom ids from 1, we do it from 0:
idx += 1
cmd = 'GetPropertyValue molecule RD_Visual=%d; atom id=%d: xyz=?'%(whichSelection,idx)
coords = self.doc.DoCommand(cmd)
coords = [float(x) for x in coords.split(' ')]
res[(label,idx)] = coords
#print 'grab:',label,idx,coords
return res
def AddPharmacophore(self,locs,colors,label,sphereRad=0.5):
label=label.lower()
self.SetDisplayUpdate(False)
parent = Displayable(self.doc)
for i,loc in enumerate(locs):
color = colors[i]
color = ' '.join([str(int(255*x)) for x in color])
obj = Displayable(self.doc)
nm = 'sphere-%d'%obj.id
self.doc.DoCommand('Sphere %s'%nm)
self.doc.DoCommand('SetProperty Object name=%s : xyz=%f %f %f'%(nm,loc[0],loc[1],loc[2]))
self.doc.DoCommand('SetProperty Object name=%s : radius=%f'%(nm,sphereRad))
self.doc.DoCommand('SetProperty Object name=%s : color=%s'%(nm,color))
self.doc.DoCommand('SetProperty Object name=%s : RD_Visual=%d'%(nm,parent.id))
self.doc.DoCommand('SetProperty Object name=%s : id=%d'%(nm,parent.id))
#parent.children.append(obj)
self.displayables[label] = parent
self.SetDisplayUpdate(True)
def SetDisplayStyle(self,obj,style=''):
self.doc.DoCommand('UnSelectAll')
obj = obj.lower()
o = self.displayables.get(obj,None)
if o:
o.Select(state=True)
if style=='sticks':
self.doc.DoCommand('DisplayStyle Atom Stick')
elif style=='lines':
self.doc.DoCommand('DisplayStyle Atom Line')
elif style=='':
self.doc.DoCommand('DisplayStyle Atom Off')
o.Select(state=False)
def HideAll(self):
self.doc.DoCommand('HideAll')
def HideObject(self,objName):
self.doc.DoCommand('UnSelectAll')
objName = objName.lower()
o = self.displayables.get(objName,None)
if o:
o.Hide()
def DisplayObject(self,objName):
self.doc.DoCommand('UnSelectAll')
objName = objName.lower()
o = self.displayables.get(objName,None)
if o:
o.Show()
def Zoom(self,objName):
self.doc.DoCommand('UnSelectAll')
objName = objName.lower()
o = self.displayables.get(objName,None)
if o:
r = o.Select(state=True)
self.doc.DoCommand('Center')
self.doc.DoCommand('FitView')
o.Select(state=False)
def SelectProteinNeighborhood(self,aroundObj,inObj,distance=5.0,
name='neighborhood',showSurface=False):
""" FIX: the surface display stuff here is all screwed up due to
differences between the way PyMol and DSViewer handle surfaces.
In PyMol they are essentially a display mode for the protein, so
they don't need to be managed separately.
In DSViewer, on the other hand, the surface is attached to the
protein, but it needs to be hidden or shown on its own. I haven't
figured out how to do that yet.
"""
self.doc.DoCommand('UnSelectAll')
o = self.displayables.get(aroundObj.lower(),None)
p = self.displayables.get(inObj.lower(),None)
if o and p:
self.SetDisplayUpdate(False)
p.Show()
self.doc.DoCommand('UnSelectAll')
tmp = self.doc.DoCommand('SetProperty object RD_Visual=%d;object id="*":select=on'%o.id)
tmp = self.doc.DoCommand('SelectByRadius inside %f atom'%distance)
# that selects all atoms in the radius, now we need to make sure
# only atoms in _inObj_ are selected:
for obj in self.displayables.values():
if obj.id != p.id:
self.doc.DoCommand('SetProperty object RD_Visual=%d;object id="*":select=off'%obj.id)
# ----
# now get all the residue names for the selected atoms:
rs = self.doc.DoCommand('GetPropertyValue atom select=true: parent=?')
if rs:
rs = rs.split(',')
residues = {}
for r in rs:
residues[r] = 1
# and select each atom in those residues:
parents=','.join(['parent="%s"'%x for x in residues.keys()])
cmd = 'SetProperty atom %s: select=on'%parents
tmp=self.doc.DoCommand(cmd)
if showSurface:
# create the surface:
self.doc.DoCommand('Surface')
obj = Displayable(self.doc)
self.displayables[name]=obj
self.doc.DoCommand('SetProperty surface id="*":RD_Visual=%d'%obj.id)
self.doc.DoCommand('UnSelectAll')
self.SetDisplayUpdate(True)
def Redraw(self):
self.SetDisplayUpdate(True)
if __name__=='__main__':
from rdkit import Chem
from rdkit.Chem import rdDistGeom, rdForceFieldHelpers
m = Chem.MolFromSmiles('c1cccc2c1cccc2')
rdDistGeom.EmbedMolecule(m)
rdForceFieldHelpers.UFFOptimizeMolecule(m)
s = MolViewer()
s.ShowMol(m)
| {
"repo_name": "strets123/rdkit",
"path": "rdkit/Chem/DSViewer.py",
"copies": "5",
"size": "12172",
"license": "bsd-3-clause",
"hash": 3953421219471895600,
"line_mean": 31.7204301075,
"line_max": 114,
"alpha_frac": 0.6196187972,
"autogenerated": false,
"ratio": 3.4123913652929634,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.032391589350551586,
"num_lines": 372
} |
""" uses DSViewer to interact with molecules
"""
from rdkit import Chem
from win32com.client import Dispatch
import tempfile, os
_nextDisplayId = 1
class Displayable(object):
def __init__(self, doc, id=-1):
global _nextDisplayId
if id < 0:
id = _nextDisplayId
_nextDisplayId += 1
self.doc = doc
self.id = id
self.visible = True
self.children = []
def Select(self, atoms=[], state=True, recurse=False):
if state:
selText = 'true'
else:
selText = 'false'
if not atoms or atoms == '*':
atomStr = '; atom "*"'
else:
# DSViewer has atom ids from 1, we do it from 0:
atoms = ['id=%d' % (x) for x in atoms]
atomStr = '; atom %s' % ','.join(atoms)
cmd = 'SetProperty object RD_Visual=%d %s: select=%s' % (self.id, atomStr, selText)
r = int(str(self.doc.DoCommand(cmd)))
if not r and not atoms:
# this handles an annoying case where if you try to select
# a molecule by ID in DSViewer, you get nothing selected:
atomStr = ''
cmd = 'SetProperty object RD_Visual=%d %s: select=%s' % (self.id, atomStr, selText)
r = int(str(self.doc.DoCommand(cmd)))
#print 'sel cmd:',cmd
#print 'result:', r
# stupid DSViewer will select the bonds between pairs of highlighted atoms,
# stop that nonsense right now:
if r:
cmd = 'SetProperty object RD_Visual=%d; bond index="*": select=off' % (self.id)
self.doc.DoCommand(cmd)
if recurse:
for child in self.children:
child.Select(atoms=atoms, state=state, recurse=True)
return r
def Hide(self, recurse=True):
self.Select(state=True, recurse=True)
self.doc.DoCommand('hide')
self.Select(state=False, recurse=True)
def Show(self, recurse=True):
self.Select(state=True, recurse=True)
self.doc.DoCommand('Show')
self.Select(state=False, recurse=True)
def ShowOnly(self, recurse=True):
self.doc.DoCommand('HideAll')
self.Select(state=True, recurse=True)
self.doc.DoCommand('Show')
self.Select(state=False, recurse=True)
def __del__(self):
self.doc.DoCommand('UnselectAll')
count = self.Select(state=True, recurse=True)
if count:
self.doc.DoCommand('Delete')
class MolViewer(object):
def __init__(self, force=0, title='Untitled', **kwargs):
self.app = Dispatch('WebLabViewerPro.Application')
self.app.Visible = 1
if force or self.app.ActiveDocument is None:
self.doc = self.app.New(title)
else:
self.doc = self.app.ActiveDocument
self.displayables = {}
def DeleteAll(self):
self.doc.DoCommand('SelectAll')
self.doc.DoCommand('Delete')
self.displayables = {}
def DeleteAllExcept(self, excludes):
excludes = [x.lower() for x in excludes]
allNames = self.displayables.keys()
for nm in allNames:
if nm not in excludes:
del self.displayables[nm]
def ShowMol(self, mol, name='molecule', showOnly=True, highlightFeatures=[], molB="", confId=-1,
zoom=True):
if showOnly:
self.DeleteAll()
obj = None
else:
obj = self.displayables.get(name.lower(), None)
#if obj:
# obj.Select(state=True)
# self.doc.DoCommand('Delete')
# obj.Select(state=False)
if not molB:
molB = Chem.MolToMolBlock(mol, confId=confId)
tmp = name + "\n" + molB[molB.index('\n') + 1:]
molB = tmp
if not obj:
obj = Displayable(self.doc)
if not hasattr(obj, '_molBlock') or obj._molBlock != molB:
obj._molBlock = molB
fN = tempfile.mktemp('.mol')
open(fN, 'w+').write(molB)
self.doc.DoCommand('PasteFrom %s' % fN)
self.doc.DoCommand('SetProperty molecule id=0 : RD_Visual=%d' % (obj.id))
self.doc.DoCommand('SetProperty molecule id=0 : id=%d' % (obj.id))
self.doc.DoCommand('SetProperty molecule id=0 : select=off')
os.unlink(fN)
else:
obj.Select(state=True)
self.doc.DoCommand('Show')
self.displayables[name.lower()] = obj
if zoom:
self.doc.DoCommand('Center')
self.doc.DoCommand('FitView')
return
def LoadFile(self, filename, name, showOnly=False):
if showOnly:
self.DeleteAll()
self.doc.DoCommand('PasteFrom %s' % filename)
obj = Displayable(self.doc)
self.doc.DoCommand('SetProperty molecule id=0 : id=%d' % (obj.id))
self.doc.DoCommand('SetProperty molecule id=0 : select=off')
count = self.doc.DoCommand('SetProperty AminoAcidChain id=0 : RD_Visual=%d' % (obj.id))
if not count or int(count) <= 0:
count = self.doc.DoCommand('SetProperty molecule id=0 : RD_Visual=%d' % (obj.id))
self.displayables[name.lower()] = obj
return obj
def GetSelectedAtoms(self, whichSelection=''):
#print 'WHICH',repr(whichSelection), whichSelection.lower() in self.displayables
if not whichSelection:
d = str(self.doc.DoCommand('GetPropertyValue atom select=true: id=?'))
d2 = str(self.doc.DoCommand('GetPropertyValue atom select=true: molecule=?'))
if d2:
molIds = []
tmpD = {}
for id in d2.split(','):
id = int(id.split('/')[1]) + 1
if id in tmpD:
molIds.append(tmpD[id])
else:
for k, v in self.displayables.iteritems():
if id == v.id:
tmpD[id] = k
molIds.append(k)
else:
molIds = [''] * (d.count(',') + 1)
elif whichSelection.lower() in self.displayables:
whichSelection = whichSelection.lower()
whichSelection = self.displayables[whichSelection].id
d = str(
self.doc.DoCommand('GetPropertyValue molecule RD_Visual=%d; atom select=true: id=?' %
whichSelection))
molIds = [whichSelection] * (d.count(',') + 1)
else:
d = None
molIds = None
if d:
splitD = d.split(',')
#print 'splitD:',splitD
#print 'molIds:',molIds
try:
res = []
for i in range(len(splitD)):
# DSViewer has atom ids from 1, we do it from 0:
idx = int(splitD[i])
res.append((molIds[i], idx))
except Exception:
import traceback
traceback.print_exc()
res = []
else:
res = []
return res
def HighlightAtoms(self, indices, where, extraHighlight=False):
self.doc.DoCommand('UnSelectAll')
self.SelectAtoms(where, indices)
def SelectAtoms(self, itemId, atomIndices, selName='selection'):
self.doc.DoCommand('UnSelectAll')
self.doc.DoCommand('SetProperty atom id="*": select=off')
o = self.displayables.get(itemId.lower(), None)
#print 'O:',itemId,atomIndices
if o:
o.Select(atoms=atomIndices)
def SetDisplayUpdate(self, val):
if not val:
self.doc.DoCommand('UpdateView off')
else:
self.doc.DoCommand('UpdateView on')
def GetAtomCoords(self, sels):
res = {}
for label, idx in sels:
whichSelection = label.lower()
whichSelection = self.displayables[label].id
# DSViewer has atom ids from 1, we do it from 0:
idx += 1
cmd = 'GetPropertyValue molecule RD_Visual=%d; atom id=%d: xyz=?' % (whichSelection, idx)
coords = self.doc.DoCommand(cmd)
coords = [float(x) for x in coords.split(' ')]
res[(label, idx)] = coords
#print 'grab:',label,idx,coords
return res
def AddPharmacophore(self, locs, colors, label, sphereRad=0.5):
label = label.lower()
self.SetDisplayUpdate(False)
parent = Displayable(self.doc)
for i, loc in enumerate(locs):
color = colors[i]
color = ' '.join([str(int(255 * x)) for x in color])
obj = Displayable(self.doc)
nm = 'sphere-%d' % obj.id
self.doc.DoCommand('Sphere %s' % nm)
self.doc.DoCommand('SetProperty Object name=%s : xyz=%f %f %f' % (nm, loc[0], loc[1], loc[2]))
self.doc.DoCommand('SetProperty Object name=%s : radius=%f' % (nm, sphereRad))
self.doc.DoCommand('SetProperty Object name=%s : color=%s' % (nm, color))
self.doc.DoCommand('SetProperty Object name=%s : RD_Visual=%d' % (nm, parent.id))
self.doc.DoCommand('SetProperty Object name=%s : id=%d' % (nm, parent.id))
#parent.children.append(obj)
self.displayables[label] = parent
self.SetDisplayUpdate(True)
def SetDisplayStyle(self, obj, style=''):
self.doc.DoCommand('UnSelectAll')
obj = obj.lower()
o = self.displayables.get(obj, None)
if o:
o.Select(state=True)
if style == 'sticks':
self.doc.DoCommand('DisplayStyle Atom Stick')
elif style == 'lines':
self.doc.DoCommand('DisplayStyle Atom Line')
elif style == '':
self.doc.DoCommand('DisplayStyle Atom Off')
o.Select(state=False)
def HideAll(self):
self.doc.DoCommand('HideAll')
def HideObject(self, objName):
self.doc.DoCommand('UnSelectAll')
objName = objName.lower()
o = self.displayables.get(objName, None)
if o:
o.Hide()
def DisplayObject(self, objName):
self.doc.DoCommand('UnSelectAll')
objName = objName.lower()
o = self.displayables.get(objName, None)
if o:
o.Show()
def Zoom(self, objName):
self.doc.DoCommand('UnSelectAll')
objName = objName.lower()
o = self.displayables.get(objName, None)
if o:
r = o.Select(state=True)
self.doc.DoCommand('Center')
self.doc.DoCommand('FitView')
o.Select(state=False)
def SelectProteinNeighborhood(self, aroundObj, inObj, distance=5.0, name='neighborhood',
showSurface=False):
""" FIX: the surface display stuff here is all screwed up due to
differences between the way PyMol and DSViewer handle surfaces.
In PyMol they are essentially a display mode for the protein, so
they don't need to be managed separately.
In DSViewer, on the other hand, the surface is attached to the
protein, but it needs to be hidden or shown on its own. I haven't
figured out how to do that yet.
"""
self.doc.DoCommand('UnSelectAll')
o = self.displayables.get(aroundObj.lower(), None)
p = self.displayables.get(inObj.lower(), None)
if o and p:
self.SetDisplayUpdate(False)
p.Show()
self.doc.DoCommand('UnSelectAll')
tmp = self.doc.DoCommand('SetProperty object RD_Visual=%d;object id="*":select=on' % o.id)
tmp = self.doc.DoCommand('SelectByRadius inside %f atom' % distance)
# that selects all atoms in the radius, now we need to make sure
# only atoms in _inObj_ are selected:
for obj in self.displayables.values():
if obj.id != p.id:
self.doc.DoCommand('SetProperty object RD_Visual=%d;object id="*":select=off' % obj.id)
# ----
# now get all the residue names for the selected atoms:
rs = self.doc.DoCommand('GetPropertyValue atom select=true: parent=?')
if rs:
rs = rs.split(',')
residues = {}
for r in rs:
residues[r] = 1
# and select each atom in those residues:
parents = ','.join(['parent="%s"' % x for x in residues.keys()])
cmd = 'SetProperty atom %s: select=on' % parents
tmp = self.doc.DoCommand(cmd)
if showSurface:
# create the surface:
self.doc.DoCommand('Surface')
obj = Displayable(self.doc)
self.displayables[name] = obj
self.doc.DoCommand('SetProperty surface id="*":RD_Visual=%d' % obj.id)
self.doc.DoCommand('UnSelectAll')
self.SetDisplayUpdate(True)
def Redraw(self):
self.SetDisplayUpdate(True)
if __name__ == '__main__':
from rdkit import Chem
from rdkit.Chem import rdDistGeom, rdForceFieldHelpers
m = Chem.MolFromSmiles('c1cccc2c1cccc2')
rdDistGeom.EmbedMolecule(m)
rdForceFieldHelpers.UFFOptimizeMolecule(m)
s = MolViewer()
s.ShowMol(m)
| {
"repo_name": "rvianello/rdkit",
"path": "rdkit/Chem/DSViewer.py",
"copies": "5",
"size": "12113",
"license": "bsd-3-clause",
"hash": -6413528897471017000,
"line_mean": 31.6495956873,
"line_max": 100,
"alpha_frac": 0.6223891687,
"autogenerated": false,
"ratio": 3.370339454646633,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6492728623346634,
"avg_score": null,
"num_lines": null
} |
_version = "$Rev$"
_splashMessage="""
-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
FeatFinderCLI version %s
Copyright (C) 2005 Rational Discovery LLC
This software is copyrighted. The software may not be copied,
reproduced, translated or reduced to any electronic medium or
machine-readable form without the prior written consent of
Rational Discovery LLC.
-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
"""%_version
from rdkit import Chem
from rdkit.Chem import ChemicalFeatures
from rdkit import RDLogger
logger = RDLogger.logger()
import sys,os
import re
splitExpr = re.compile(r'[ \t,]')
def GetAtomFeatInfo(factory,mol):
res = [None]*mol.GetNumAtoms()
feats = factory.GetFeaturesForMol(mol)
for feat in feats:
ids = feat.GetAtomIds()
for id in ids:
if res[id] is None:
res[id] = []
res[id].append("%s-%s"%(feat.GetFamily(),feat.GetType()))
return res
if __name__ == '__main__':
def Usage():
message="""
Usage: FeatFinderCLI [-r] <fdefFilename> <smilesFilename>
NOTE:
- the smiles file should have SMILES in the first column
"""
print >>sys.stderr,message
import getopt
args,extras = getopt.getopt(sys.argv[1:],'r')
reverseIt=False
for arg,val in args:
if arg=='-r':
reverseIt=True
if len(extras)<2:
Usage()
sys.exit(-1)
print >>sys.stderr,_splashMessage
fdefFilename = extras[0]
if not os.path.exists(fdefFilename):
logger.error("Fdef file %s does not exist."%fdefFilename)
sys.exit(-1)
try:
factory = ChemicalFeatures.BuildFeatureFactory(fdefFilename)
except:
logger.error("Could not parse Fdef file %s."%fdefFilename,exc_info=True)
sys.exit(-1)
smilesFilename = extras[1]
if not os.path.exists(smilesFilename):
logger.error("Smiles file %s does not exist."%smilesFilename)
sys.exit(-1)
try:
inF = file(smilesFilename,'r')
except:
logger.error("Could not open smiles file %s."%smilesFilename,exc_info=True)
sys.exit(-1)
lineNo=0
for line in inF.readlines():
lineNo+=1
line = line.strip()
smi = splitExpr.split(line)[0].strip()
try:
mol = Chem.MolFromSmiles(smi)
except:
mol = None
if mol:
print 'Mol-%d\t%s'%(lineNo,smi)
if not reverseIt:
featInfo = GetAtomFeatInfo(factory,mol)
for i,v in enumerate(featInfo):
print '\t% 2s(%d)'%(mol.GetAtomWithIdx(i).GetSymbol(),i+1),
if v:
print '\t',', '.join(v)
else:
print
else:
feats = factory.GetFeaturesForMol(mol)
for feat in feats:
print '\t%s-%s: '%(feat.GetFamily(),feat.GetType()),
print ', '.join([str(x) for x in feat.GetAtomIds()])
else:
logger.warning("Could not process smiles '%s' on line %d."%(smi,lineNo))
| {
"repo_name": "rdkit/rdkit-orig",
"path": "rdkit/Chem/FeatFinderCLI.py",
"copies": "2",
"size": "3166",
"license": "bsd-3-clause",
"hash": 6317289670174538000,
"line_mean": 25.8305084746,
"line_max": 79,
"alpha_frac": 0.6099178774,
"autogenerated": false,
"ratio": 3.1851106639839033,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9601415947807668,
"avg_score": 0.0387225187152472,
"num_lines": 118
} |
from rdkit import RDConfig
import unittest,os,sys
from rdkit.ML.DecTree.SigTree import SigTreeNode
from rdkit.ML import InfoTheory
from rdkit.DataStructs import ExplicitBitVect
from rdkit.DataStructs.VectCollection import VectCollection
class TestCase(unittest.TestCase):
def setUp(self):
t1 = SigTreeNode(None,'root',0)
t2 = SigTreeNode(t1,'nodeL1',1)
t1.AddChildNode(t2)
t3 = SigTreeNode(t2,'nodeLTerm0',0,isTerminal=1)
t4 = SigTreeNode(t2,'nodeLTerm1',1,isTerminal=1)
t2.AddChildNode(t3)
t2.AddChildNode(t4)
t2 = SigTreeNode(t1,'nodeR1',2)
t1.AddChildNode(t2)
t3 = SigTreeNode(t2,'nodeRTerm0',1,isTerminal=1)
t4 = SigTreeNode(t2,'nodeRTerm1',0,isTerminal=1)
t2.AddChildNode(t3)
t2.AddChildNode(t4)
self.tree = t1
def test1(self):
t1 = self.tree
bv = ExplicitBitVect(5)
ex = ['nm',bv]
self.assertFalse(t1.ClassifyExample(ex))
bv.SetBit(1)
self.assertTrue(t1.ClassifyExample(ex))
bv.SetBit(0)
self.assertTrue(t1.ClassifyExample(ex))
bv.SetBit(2)
self.assertFalse(t1.ClassifyExample(ex))
def test2(self):
t1 = self.tree
vc = VectCollection()
bv = ExplicitBitVect(5)
bv.SetBitsFromList([0])
vc.AddVect(1,bv)
bv = ExplicitBitVect(5)
bv.SetBitsFromList([1,2])
vc.AddVect(2,bv)
ex = ['nm',bv,1]
self.assertTrue(t1.ClassifyExample(ex))
bv = ExplicitBitVect(5)
bv.SetBitsFromList([0,2])
vc.AddVect(1,bv)
ex = ['nm',bv,1]
self.assertFalse(t1.ClassifyExample(ex))
def test3(self):
from BuildSigTree import BuildSigTree
examples = []
bv = ExplicitBitVect(2)
vc = VectCollection()
vc.AddVect(1,bv)
examples.append(['a',vc,1])
bv = ExplicitBitVect(2)
bv.SetBit(1)
vc = VectCollection()
vc.AddVect(1,bv)
examples.append(['c',vc,0])
bv = ExplicitBitVect(2)
bv.SetBit(1)
vc = VectCollection()
vc.AddVect(1,bv)
examples.append(['c2',vc,0])
bv = ExplicitBitVect(2)
bv.SetBit(0)
vc = VectCollection()
vc.AddVect(1,bv)
examples.append(['d',vc,0])
bv = ExplicitBitVect(2)
bv.SetBit(0)
vc = VectCollection()
vc.AddVect(1,bv)
bv = ExplicitBitVect(2)
bv.SetBit(1)
vc.AddVect(2,bv)
examples.append(['d2',vc,0])
bv = ExplicitBitVect(2)
bv.SetBit(0)
bv.SetBit(1)
vc = VectCollection()
vc.AddVect(1,bv)
examples.append(['d',vc,1])
bv = ExplicitBitVect(2)
bv.SetBit(0)
bv.SetBit(1)
vc = VectCollection()
vc.AddVect(1,bv)
examples.append(['e',vc,1])
t = BuildSigTree(examples,2,metric=InfoTheory.InfoType.ENTROPY,
maxDepth=2,verbose=0)
self.assertEqual(t.GetName(),'Bit-0')
self.assertEqual(t.GetLabel(),0)
c0 = t.GetChildren()[0]
self.assertEqual(c0.GetName(),'Bit-1')
self.assertEqual(c0.GetLabel(),1)
c1 = t.GetChildren()[1]
self.assertEqual(c1.GetName(),'Bit-1')
self.assertEqual(c1.GetLabel(),1)
bv = ExplicitBitVect(2)
bv.SetBit(0)
vc = VectCollection()
vc.AddVect(1,bv)
bv = ExplicitBitVect(2)
bv.SetBit(1)
vc.AddVect(2,bv)
r = t.ClassifyExample(['t',vc,0])
self.assertEqual(r,0)
def test4(self):
import gzip
from rdkit.six.moves import cPickle
from BuildSigTree import BuildSigTree
gz = gzip.open(os.path.join(RDConfig.RDCodeDir,'ML','DecTree','test_data',
'cdk2-few.pkl.gz'),
'rb')
examples = cPickle.load(gz,encoding='Latin1')
t = BuildSigTree(examples,2,maxDepth=3)
self.assertEqual(t.GetLabel(),2181)
self.assertEqual(t.GetChildren()[0].GetLabel(),2861)
self.assertEqual(t.GetChildren()[1].GetLabel(),8182)
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "adalke/rdkit",
"path": "rdkit/ML/DecTree/UnitTestSigTree.py",
"copies": "4",
"size": "3894",
"license": "bsd-3-clause",
"hash": -8748136318955324000,
"line_mean": 23.8025477707,
"line_max": 78,
"alpha_frac": 0.6325115562,
"autogenerated": false,
"ratio": 2.8014388489208635,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5433950405120864,
"avg_score": null,
"num_lines": null
} |
import gzip
import os
import unittest
from rdkit import RDConfig
from rdkit.DataStructs import ExplicitBitVect
from rdkit.DataStructs.VectCollection import VectCollection
from rdkit.ML import InfoTheory
from rdkit.ML.DecTree.BuildSigTree import BuildSigTree, _GenerateRandomEnsemble
from rdkit.ML.DecTree.SigTree import SigTreeNode
from rdkit.TestRunner import redirect_stdout
from rdkit.six import StringIO
class TestCase(unittest.TestCase):
def setUp(self):
t1 = SigTreeNode(None, 'root', 0)
t2 = SigTreeNode(t1, 'nodeL1', 1)
t1.AddChildNode(t2)
t3 = SigTreeNode(t2, 'nodeLTerm0', 0, isTerminal=1)
t4 = SigTreeNode(t2, 'nodeLTerm1', 1, isTerminal=1)
t2.AddChildNode(t3)
t2.AddChildNode(t4)
t2 = SigTreeNode(t1, 'nodeR1', 2)
t1.AddChildNode(t2)
t3 = SigTreeNode(t2, 'nodeRTerm0', 1, isTerminal=1)
t4 = SigTreeNode(t2, 'nodeRTerm1', 0, isTerminal=1)
t2.AddChildNode(t3)
t2.AddChildNode(t4)
self.tree = t1
def test1(self):
t1 = self.tree
bv = ExplicitBitVect(5)
ex = ['nm', bv]
self.assertFalse(t1.ClassifyExample(ex))
bv.SetBit(1)
self.assertTrue(t1.ClassifyExample(ex))
bv.SetBit(0)
self.assertTrue(t1.ClassifyExample(ex))
bv.SetBit(2)
self.assertFalse(t1.ClassifyExample(ex))
def test2(self):
t1 = self.tree
vc = VectCollection()
bv = ExplicitBitVect(5)
bv.SetBitsFromList([0])
vc.AddVect(1, bv)
bv = ExplicitBitVect(5)
bv.SetBitsFromList([1, 2])
vc.AddVect(2, bv)
ex = ['nm', bv, 1]
self.assertTrue(t1.ClassifyExample(ex))
bv = ExplicitBitVect(5)
bv.SetBitsFromList([0, 2])
vc.AddVect(1, bv)
ex = ['nm', bv, 1]
self.assertFalse(t1.ClassifyExample(ex))
def test3(self):
examples = []
bv = ExplicitBitVect(2)
vc = VectCollection()
vc.AddVect(1, bv)
examples.append(['a', vc, 1])
bv = ExplicitBitVect(2)
bv.SetBit(1)
vc = VectCollection()
vc.AddVect(1, bv)
examples.append(['c', vc, 0])
bv = ExplicitBitVect(2)
bv.SetBit(1)
vc = VectCollection()
vc.AddVect(1, bv)
examples.append(['c2', vc, 0])
bv = ExplicitBitVect(2)
bv.SetBit(0)
vc = VectCollection()
vc.AddVect(1, bv)
examples.append(['d', vc, 0])
bv = ExplicitBitVect(2)
bv.SetBit(0)
vc = VectCollection()
vc.AddVect(1, bv)
bv = ExplicitBitVect(2)
bv.SetBit(1)
vc.AddVect(2, bv)
examples.append(['d2', vc, 0])
bv = ExplicitBitVect(2)
bv.SetBit(0)
bv.SetBit(1)
vc = VectCollection()
vc.AddVect(1, bv)
examples.append(['d', vc, 1])
bv = ExplicitBitVect(2)
bv.SetBit(0)
bv.SetBit(1)
vc = VectCollection()
vc.AddVect(1, bv)
examples.append(['e', vc, 1])
f = StringIO()
with redirect_stdout(f):
t = BuildSigTree(examples, 2, metric=InfoTheory.InfoType.ENTROPY, maxDepth=2, verbose=True)
self.assertIn('Build', f.getvalue())
self.assertEqual(t.GetName(), 'Bit-0')
self.assertEqual(t.GetLabel(), 0)
c0 = t.GetChildren()[0]
self.assertEqual(c0.GetName(), 'Bit-1')
self.assertEqual(c0.GetLabel(), 1)
c1 = t.GetChildren()[1]
self.assertEqual(c1.GetName(), 'Bit-1')
self.assertEqual(c1.GetLabel(), 1)
bv = ExplicitBitVect(2)
bv.SetBit(0)
vc = VectCollection()
vc.AddVect(1, bv)
bv = ExplicitBitVect(2)
bv.SetBit(1)
vc.AddVect(2, bv)
r = t.ClassifyExample(['t', vc, 0])
self.assertEqual(r, 0)
def test4(self):
from rdkit.six.moves import cPickle
gz = gzip.open(
os.path.join(RDConfig.RDCodeDir, 'ML', 'DecTree', 'test_data', 'cdk2-few.pkl.gz'), 'rb')
examples = cPickle.load(gz, encoding='Latin1')
t = BuildSigTree(examples, 2, maxDepth=3)
self.assertEqual(t.GetLabel(), 2181)
self.assertEqual(t.GetChildren()[0].GetLabel(), 2861)
self.assertEqual(t.GetChildren()[1].GetLabel(), 8182)
def test_GenerateRandomEnsemble(self):
ensemble = _GenerateRandomEnsemble(2, 4)
self.assertEqual(len(ensemble), 2)
self.assertTrue(all(r < 4 for r in ensemble))
ensemble = _GenerateRandomEnsemble(4, 4)
self.assertEqual(len(ensemble), 4)
self.assertTrue(all(r < 4 for r in ensemble))
ensemble = _GenerateRandomEnsemble(4, 40)
self.assertEqual(len(ensemble), 4)
self.assertTrue(all(r < 40 for r in ensemble))
if __name__ == '__main__': # pragma: nocover
unittest.main()
| {
"repo_name": "rvianello/rdkit",
"path": "rdkit/ML/DecTree/UnitTestSigTree.py",
"copies": "4",
"size": "4513",
"license": "bsd-3-clause",
"hash": -7371638601603785000,
"line_mean": 24.9367816092,
"line_max": 97,
"alpha_frac": 0.6456902282,
"autogenerated": false,
"ratio": 2.863578680203046,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5509268908403047,
"avg_score": null,
"num_lines": null
} |
__doc__ = """hashlib module - A common interface to many hash functions.
new(name, string='') - returns a new hash object implementing the
given hash function; initializing the hash
using the given string data.
Named constructor functions are also available, these are much faster
than using new():
md5(), sha1(), sha224(), sha256(), sha384(), and sha512()
More algorithms may be available on your platform but the above are guaranteed
to exist. See the algorithms_guaranteed and algorithms_available attributes
to find out what algorithm names can be passed to new().
NOTE: If you want the adler32 or crc32 hash functions they are available in
the zlib module.
Choose your hash function wisely. Some have known collision weaknesses.
sha384 and sha512 will be slow on 32 bit platforms.
Hash objects have these methods:
- update(arg): Update the hash object with the string arg. Repeated calls
are equivalent to a single call with the concatenation of all
the arguments.
- digest(): Return the digest of the strings passed to the update() method
so far. This may contain non-ASCII characters, including
NUL bytes.
- hexdigest(): Like digest() except the digest is returned as a string of
double length, containing only hexadecimal digits.
- copy(): Return a copy (clone) of the hash object. This can be used to
efficiently compute the digests of strings that share a common
initial substring.
For example, to obtain the digest of the string 'Nobody inspects the
spammish repetition':
>>> import hashlib
>>> m = hashlib.md5()
>>> m.update("Nobody inspects")
>>> m.update(" the spammish repetition")
>>> m.digest()
'\\xbbd\\x9c\\x83\\xdd\\x1e\\xa5\\xc9\\xd9\\xde\\xc9\\xa1\\x8d\\xf0\\xff\\xe9'
More condensed:
>>> hashlib.sha224("Nobody inspects the spammish repetition").hexdigest()
'a4337bc45a8fc544c03f52dc550cd6e1e87021bc896588bd79e901e2'
"""
# This tuple and __get_builtin_constructor() must be modified if a new
# always available algorithm is added.
__always_supported = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
algorithms_guaranteed = set(__always_supported)
algorithms_available = set(__always_supported)
algorithms = __always_supported
__all__ = __always_supported + ('new', 'algorithms_guaranteed',
'algorithms_available', 'algorithms',
'pbkdf2_hmac')
def __get_builtin_constructor(name):
try:
if name in ('SHA1', 'sha1'):
import _sha
return _sha.new
elif name in ('MD5', 'md5'):
import _md5
return _md5.new
elif name in ('SHA256', 'sha256', 'SHA224', 'sha224'):
import _sha256
bs = name[3:]
if bs == '256':
return _sha256.sha256
elif bs == '224':
return _sha256.sha224
elif name in ('SHA512', 'sha512', 'SHA384', 'sha384'):
import _sha512
bs = name[3:]
if bs == '512':
return _sha512.sha512
elif bs == '384':
return _sha512.sha384
except ImportError:
pass # no extension module, this hash is unsupported.
raise ValueError('unsupported hash type ' + name)
def __get_openssl_constructor(name):
try:
f = getattr(_hashlib, 'openssl_' + name)
# Allow the C module to raise ValueError. The function will be
# defined but the hash not actually available thanks to OpenSSL.
f()
# Use the C function directly (very fast)
return f
except (AttributeError, ValueError):
return __get_builtin_constructor(name)
def __py_new(name, string=''):
"""new(name, string='') - Return a new hashing object using the named algorithm;
optionally initialized with a string.
"""
return __get_builtin_constructor(name)(string)
def __hash_new(name, string=''):
"""new(name, string='') - Return a new hashing object using the named algorithm;
optionally initialized with a string.
"""
try:
return _hashlib.new(name, string)
except ValueError:
# If the _hashlib module (OpenSSL) doesn't support the named
# hash, try using our builtin implementations.
# This allows for SHA224/256 and SHA384/512 support even though
# the OpenSSL library prior to 0.9.8 doesn't provide them.
return __get_builtin_constructor(name)(string)
try:
import _hashlib
new = __hash_new
__get_hash = __get_openssl_constructor
algorithms_available = algorithms_available.union(
_hashlib.openssl_md_meth_names)
except ImportError:
new = __py_new
__get_hash = __get_builtin_constructor
for __func_name in __always_supported:
# try them all, some may not work due to the OpenSSL
# version not supporting that algorithm.
try:
globals()[__func_name] = __get_hash(__func_name)
except ValueError:
import logging
logging.exception('code for hash %s was not found.', __func_name)
try:
# OpenSSL's PKCS5_PBKDF2_HMAC requires OpenSSL 1.0+ with HMAC and SHA
from _hashlib import pbkdf2_hmac
except ImportError:
import binascii
import struct
_trans_5C = b"".join(chr(x ^ 0x5C) for x in range(256))
_trans_36 = b"".join(chr(x ^ 0x36) for x in range(256))
def pbkdf2_hmac(hash_name, password, salt, iterations, dklen=None):
"""Password based key derivation function 2 (PKCS #5 v2.0)
This Python implementations based on the hmac module about as fast
as OpenSSL's PKCS5_PBKDF2_HMAC for short passwords and much faster
for long passwords.
"""
if not isinstance(hash_name, str):
raise TypeError(hash_name)
if not isinstance(password, (bytes, bytearray)):
password = bytes(buffer(password))
if not isinstance(salt, (bytes, bytearray)):
salt = bytes(buffer(salt))
# Fast inline HMAC implementation
inner = new(hash_name)
outer = new(hash_name)
blocksize = getattr(inner, 'block_size', 64)
if len(password) > blocksize:
password = new(hash_name, password).digest()
password = password + b'\x00' * (blocksize - len(password))
inner.update(password.translate(_trans_36))
outer.update(password.translate(_trans_5C))
def prf(msg, inner=inner, outer=outer):
# PBKDF2_HMAC uses the password as key. We can re-use the same
# digest objects and just update copies to skip initialization.
icpy = inner.copy()
ocpy = outer.copy()
icpy.update(msg)
ocpy.update(icpy.digest())
return ocpy.digest()
if iterations < 1:
raise ValueError(iterations)
if dklen is None:
dklen = outer.digest_size
if dklen < 1:
raise ValueError(dklen)
hex_format_string = "%%0%ix" % (new(hash_name).digest_size * 2)
dkey = b''
loop = 1
while len(dkey) < dklen:
prev = prf(salt + struct.pack(b'>I', loop))
rkey = int(binascii.hexlify(prev), 16)
for i in xrange(iterations - 1):
prev = prf(prev)
rkey ^= int(binascii.hexlify(prev), 16)
loop += 1
dkey += binascii.unhexlify(hex_format_string % rkey)
return dkey[:dklen]
# Cleanup locals()
del __always_supported, __func_name, __get_hash
del __py_new, __hash_new, __get_openssl_constructor
| {
"repo_name": "johnkeepmoving/oss-ftp",
"path": "python27/win32/Lib/hashlib.py",
"copies": "51",
"size": "7842",
"license": "mit",
"hash": 1618744083514250200,
"line_mean": 34.4841628959,
"line_max": 84,
"alpha_frac": 0.6169344555,
"autogenerated": false,
"ratio": 3.9466532460996477,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00010646792653713069,
"num_lines": 221
} |
""" for the moment this is using Francois Fleuret's cmim library
to do the feature selection
Reference: F. Fleuret "Fast Binary Feature Selection with Conditional
Mutual Information", J. Machine Learn. Res. 5, 1531-1535
(2004)
"""
from rdkit import RDConfig
from rdkit import DataStructs
import tempfile
import os
import rdFeatSelect
def SelectFeatures(examples,nFeatsToPick,bvCol=1):
res = rdFeatSelect.selectCMIM(examples,nFeatsToPick)
if -1 in res:
res = list(res)
res = tuple(res[:res.index(-1)])
return res
def _SelectFeatures(examples,nFeatsToPick,bvCol=1):
nPts = len(examples)
nFeats = examples[0][bvCol].GetNumBits()
exe = os.path.join(RDConfig.RDBaseDir,'External','cmim-1.0','cmim.exe')
if not os.path.exists(exe):
raise ValueError,'could not find cmim executable %s'%exe
inFname = tempfile.mktemp('.dat')
outFname = inFname + '.out'
inF = open(inFname,'w+')
print >>inF,nPts,nFeats
for row in examples:
print >>inF,row[bvCol].ToBitString()
print >>inF,row[-1]
inF.close()
inF = None
os.spawnlp(os.P_WAIT,exe,exe,'--nb-features',str(nFeatsToPick),'--train',
inFname,outFname)
inD = open(outFname,'r')
inL = inD.readline()
nCreated = int(inL)
inL = inD.readline()
res = []
splitL = inL.split(' ')
for i in range(nFeatsToPick):
res.append(int(splitL[i]))
inD.close()
inD = None
os.unlink(inFname)
os.unlink(outFname)
return res
| {
"repo_name": "rdkit/rdkit-orig",
"path": "rdkit/ML/FeatureSelect/CMIM.py",
"copies": "2",
"size": "1551",
"license": "bsd-3-clause",
"hash": 7530698759182581000,
"line_mean": 24.0161290323,
"line_max": 75,
"alpha_frac": 0.6718246293,
"autogenerated": false,
"ratio": 2.9154135338345863,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9358598720803569,
"avg_score": 0.04572788846620349,
"num_lines": 62
} |
class LazySig:
def __init__(self,computeFunc,sigSize):
"""
computeFunc should take a single argument, the integer bit id
to compute
"""
if sigSize<=0:
raise ValueError('zero size')
self.computeFunc=computeFunc
self.size=sigSize
self._cache={}
def __len__(self):
"""
>>> obj = LazySig(lambda x:1,10)
>>> len(obj)
10
"""
return self.size
def __getitem__(self,which):
"""
>>> obj = LazySig(lambda x:x,10)
>>> obj[1]
1
>>> obj[-1]
9
>>> try:
... obj[10]
... except IndexError:
... 1
... else:
... 0
1
>>> try:
... obj[-10]
... except IndexError:
... 1
... else:
... 0
1
"""
if which<0:
# handle negative indices
which = self.size+which
if which<=0 or which>=self.size:
raise IndexError('bad index')
if which in self._cache:
v= self._cache[which]
else:
v = self.computeFunc(which)
self._cache[which]=v
return v
#------------------------------------
#
# doctest boilerplate
#
def _test():
import doctest,sys
return doctest.testmod(sys.modules["__main__"])
if __name__ == '__main__':
import sys
failed,tried = _test()
sys.exit(failed)
| {
"repo_name": "adalke/rdkit",
"path": "rdkit/DataStructs/LazySignature.py",
"copies": "1",
"size": "1391",
"license": "bsd-3-clause",
"hash": 843844296093386600,
"line_mean": 15.9634146341,
"line_max": 65,
"alpha_frac": 0.4996405464,
"autogenerated": false,
"ratio": 3.5850515463917527,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4584692092791752,
"avg_score": null,
"num_lines": null
} |
class LazySig:
def __init__(self, computeFunc, sigSize):
"""
computeFunc should take a single argument, the integer bit id
to compute
"""
if sigSize <= 0:
raise ValueError('zero size')
self.computeFunc = computeFunc
self.size = sigSize
self._cache = {}
def __len__(self):
"""
>>> obj = LazySig(lambda x:1,10)
>>> len(obj)
10
"""
return self.size
def __getitem__(self, which):
"""
>>> obj = LazySig(lambda x:x,10)
>>> obj[1]
1
>>> obj[-1]
9
>>> try:
... obj[10]
... except IndexError:
... 1
... else:
... 0
1
>>> try:
... obj[-10]
... except IndexError:
... 1
... else:
... 0
1
"""
if which < 0:
# handle negative indices
which = self.size + which
if which <= 0 or which >= self.size:
raise IndexError('bad index')
if which in self._cache:
v = self._cache[which]
else:
v = self.computeFunc(which)
self._cache[which] = v
return v
# ------------------------------------
#
# doctest boilerplate
#
def _runDoctests(verbose=None): # pragma: nocover
import sys
import doctest
failed, _ = doctest.testmod(optionflags=doctest.ELLIPSIS, verbose=verbose)
sys.exit(failed)
if __name__ == '__main__': # pragma: nocover
_runDoctests()
| {
"repo_name": "rvianello/rdkit",
"path": "rdkit/DataStructs/LazySignature.py",
"copies": "12",
"size": "1472",
"license": "bsd-3-clause",
"hash": -49117080255145340,
"line_mean": 16.734939759,
"line_max": 76,
"alpha_frac": 0.5149456522,
"autogenerated": false,
"ratio": 3.607843137254902,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from __future__ import print_function
import os, sys
import io
import unittest
from rdkit.six.moves import cPickle
from rdkit import RDConfig
from rdkit import Chem
from rdkit.Chem import ChemicalFeatures
from rdkit.Geometry import rdGeometry as geom
def feq(v1, v2, tol2=1e-4):
return abs(v1 - v2) <= tol2
def lstFeq(l1, l2, tol=1.e-4):
if (len(l1) != len(l2)):
return 0
for i in range(len(l1)):
if not feq(l1[i], l2[i], tol):
return 0
return 1
def ptFeq(pt1, pt2, tol=0.0001):
dist = pt1.Distance(pt2)
return feq(dist, 0.0, tol)
class TestCase(unittest.TestCase):
def setUp(self):
pass
def testBasic(self):
ffeat = ChemicalFeatures.FreeChemicalFeature()
ffeat.SetId(123)
pos = ffeat.GetId()
self.assertTrue(pos == 123)
ffeat.SetFamily("HBondDonor")
self.assertTrue(ffeat.GetFamily() == "HBondDonor")
ffeat.SetPos(geom.Point3D(1.0, 2.0, 3.0))
pos = ffeat.GetPos()
self.assertTrue(ptFeq(pos, geom.Point3D(1.0, 2.0, 3.0)))
ffeat.SetType("HBondDonor1")
self.assertTrue(ffeat.GetType() == "HBondDonor1")
ffeat = ChemicalFeatures.FreeChemicalFeature("HBondDonor", "HBondDonor1", geom.Point3D(1.0, 2.0,
3.0))
self.assertTrue(ffeat.GetId() == -1)
self.assertTrue(ffeat.GetFamily() == "HBondDonor")
self.assertTrue(ffeat.GetType() == "HBondDonor1")
ffeat = ChemicalFeatures.FreeChemicalFeature("HBondDonor", "HBondDonor1",
geom.Point3D(1.0, 2.0, 3.0), id=123)
self.assertTrue(ffeat.GetId() == 123)
self.assertTrue(ffeat.GetFamily() == "HBondDonor")
self.assertTrue(ffeat.GetType() == "HBondDonor1")
pos = ffeat.GetPos()
self.assertTrue(ptFeq(pos, geom.Point3D(1.0, 2.0, 3.0)))
ffeat = ChemicalFeatures.FreeChemicalFeature(id=123, type="HBondDonor1", family="HBondDonor",
loc=geom.Point3D(1.0, 2.0, 3.0))
self.assertTrue(ffeat.GetId() == 123)
self.assertTrue(ffeat.GetFamily() == "HBondDonor")
self.assertTrue(ffeat.GetType() == "HBondDonor1")
pos = ffeat.GetPos()
self.assertTrue(ptFeq(pos, geom.Point3D(1.0, 2.0, 3.0)))
def testPickle(self):
ffeat = ChemicalFeatures.FreeChemicalFeature("HBondDonor", "HBondDonor1",
geom.Point3D(1.0, 2.0, 3.0), 123)
pkl = cPickle.dumps(ffeat)
ffeat2 = cPickle.loads(pkl, encoding='bytes')
self.assertTrue(ffeat2.GetId() == ffeat.GetId())
self.assertTrue(ffeat2.GetFamily() == ffeat.GetFamily())
self.assertTrue(ffeat2.GetType() == ffeat.GetType())
self.assertTrue(ptFeq(ffeat2.GetPos(), ffeat.GetPos()))
# Check that the old pickled versions have not been broken
inTF = open(
os.path.join(RDConfig.RDBaseDir, 'Code/ChemicalFeatures/Wrap/testData/feat.pkl'), 'r')
buf = inTF.read().replace('\r\n', '\n').encode('utf-8')
inTF.close()
inF = io.BytesIO(buf)
ffeat2 = cPickle.load(inF, encoding='bytes')
# this version (1.0) does not have an id in the byte stream
self.assertTrue(ffeat2.GetFamily() == ffeat.GetFamily())
self.assertTrue(ffeat2.GetType() == ffeat.GetType())
self.assertTrue(ptFeq(ffeat2.GetPos(), ffeat.GetPos()))
# Test the new version also has the id and works as expected
# uncomment the following to generate (overrwrite) new version of pickled
# data file
#cPickle.dump(ffeat,file(os.path.join(RDConfig.RDBaseDir, 'Code/ChemicalFeatures/Wrap/testData/featv2.pkl'),'wb+'))
inTF = open(
os.path.join(RDConfig.RDBaseDir, 'Code/ChemicalFeatures/Wrap/testData/featv2.pkl'), 'r')
buf = inTF.read().replace('\r\n', '\n').encode('utf-8')
inTF.close()
inF = io.BytesIO(buf)
ffeat2 = cPickle.load(inF, encoding='bytes')
self.assertTrue(ffeat2.GetId() == ffeat.GetId())
self.assertTrue(ffeat2.GetFamily() == ffeat.GetFamily())
self.assertTrue(ffeat2.GetType() == ffeat.GetType())
self.assertTrue(ptFeq(ffeat2.GetPos(), ffeat.GetPos()))
if __name__ == '__main__':
print("Testing ChemicalFeatures Wrapper code:")
unittest.main()
| {
"repo_name": "rvianello/rdkit",
"path": "Code/ChemicalFeatures/Wrap/testFeatures.py",
"copies": "5",
"size": "4279",
"license": "bsd-3-clause",
"hash": 3779023811508788000,
"line_mean": 35.2627118644,
"line_max": 119,
"alpha_frac": 0.6375321337,
"autogenerated": false,
"ratio": 2.986043265875785,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0061079062013230856,
"num_lines": 118
} |
from __future__ import print_function
import os,sys
import unittest
from rdkit.six.moves import cPickle
from rdkit import RDConfig
from rdkit import Chem
from rdkit.Chem import ChemicalFeatures
from rdkit.Geometry import rdGeometry as geom
def feq(v1,v2,tol2=1e-4):
return abs(v1-v2)<=tol2
def lstFeq(l1, l2, tol=1.e-4):
if (len(l1) != len(l2)):
return 0
for i in range(len(l1)):
if not feq(l1[i], l2[i], tol):
return 0
return 1
def ptFeq(pt1, pt2, tol=0.0001):
dist = pt1.Distance(pt2)
return feq(dist, 0.0, tol)
class TestCase(unittest.TestCase):
def setUp(self):
pass
def testBasic(self):
ffeat = ChemicalFeatures.FreeChemicalFeature()
ffeat.SetId(123)
pos = ffeat.GetId()
self.assertTrue(pos == 123)
ffeat.SetFamily("HBondDonor")
self.assertTrue(ffeat.GetFamily() == "HBondDonor")
ffeat.SetPos(geom.Point3D(1.0, 2.0, 3.0))
pos = ffeat.GetPos()
self.assertTrue(ptFeq(pos, geom.Point3D(1.0, 2.0, 3.0)))
ffeat.SetType("HBondDonor1")
self.assertTrue(ffeat.GetType() == "HBondDonor1")
ffeat = ChemicalFeatures.FreeChemicalFeature("HBondDonor", "HBondDonor1", geom.Point3D(1.0, 2.0, 3.0))
self.assertTrue(ffeat.GetId() == -1)
self.assertTrue(ffeat.GetFamily() == "HBondDonor")
self.assertTrue(ffeat.GetType() == "HBondDonor1")
ffeat = ChemicalFeatures.FreeChemicalFeature("HBondDonor", "HBondDonor1", geom.Point3D(1.0, 2.0, 3.0),id=123)
self.assertTrue(ffeat.GetId() == 123)
self.assertTrue(ffeat.GetFamily() == "HBondDonor")
self.assertTrue(ffeat.GetType() == "HBondDonor1")
pos = ffeat.GetPos()
self.assertTrue(ptFeq(pos, geom.Point3D(1.0, 2.0, 3.0)))
ffeat = ChemicalFeatures.FreeChemicalFeature(id = 123, type="HBondDonor1", family="HBondDonor", loc=geom.Point3D(1.0, 2.0, 3.0))
self.assertTrue(ffeat.GetId() == 123)
self.assertTrue(ffeat.GetFamily() == "HBondDonor")
self.assertTrue(ffeat.GetType() == "HBondDonor1")
pos = ffeat.GetPos()
self.assertTrue(ptFeq(pos, geom.Point3D(1.0, 2.0, 3.0)))
def testPickle(self):
ffeat = ChemicalFeatures.FreeChemicalFeature("HBondDonor", "HBondDonor1", geom.Point3D(1.0, 2.0, 3.0),123)
pkl = cPickle.dumps(ffeat)
ffeat2 = cPickle.loads(pkl, encoding='bytes')
self.assertTrue(ffeat2.GetId()==ffeat.GetId());
self.assertTrue(ffeat2.GetFamily()==ffeat.GetFamily())
self.assertTrue(ffeat2.GetType()==ffeat.GetType())
self.assertTrue(ptFeq(ffeat2.GetPos(),ffeat.GetPos()))
# Check that the old pickled versions have not been broken
inF = open(os.path.join(RDConfig.RDBaseDir,
'Code/ChemicalFeatures/Wrap/testData/feat.pkl'),'rb')
ffeat2=cPickle.load(inF, encoding='bytes')
# this version (1.0) does not have an id in the byte stream
self.assertTrue(ffeat2.GetFamily()==ffeat.GetFamily())
self.assertTrue(ffeat2.GetType()==ffeat.GetType())
self.assertTrue(ptFeq(ffeat2.GetPos(),ffeat.GetPos()))
# Test the new version also has the id and works as expected
# uncomment the following to generate (overrwrite) new version of pickled
# data file
#cPickle.dump(ffeat,file(os.path.join(RDConfig.RDBaseDir, 'Code/ChemicalFeatures/Wrap/testData/featv2.pkl'),'wb+'))
inF = open(os.path.join(RDConfig.RDBaseDir,
'Code/ChemicalFeatures/Wrap/testData/featv2.pkl'),'rb')
ffeat2=cPickle.load(inF, encoding='bytes')
self.assertTrue(ffeat2.GetId()==ffeat.GetId());
self.assertTrue(ffeat2.GetFamily()==ffeat.GetFamily())
self.assertTrue(ffeat2.GetType()==ffeat.GetType())
self.assertTrue(ptFeq(ffeat2.GetPos(),ffeat.GetPos()))
if __name__ == '__main__':
print("Testing ChemicalFeatures Wrapper code:")
unittest.main()
| {
"repo_name": "AlexanderSavelyev/rdkit",
"path": "Code/ChemicalFeatures/Wrap/testFeatures.py",
"copies": "1",
"size": "4027",
"license": "bsd-3-clause",
"hash": 608325846380943700,
"line_mean": 37.7211538462,
"line_max": 134,
"alpha_frac": 0.6481251552,
"autogenerated": false,
"ratio": 2.9653902798232696,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41135154350232694,
"avg_score": null,
"num_lines": null
} |
""" Import all RDKit chemistry modules
"""
from rdkit import rdBase
from rdkit import RDConfig
from rdkit import DataStructs
from rdkit.Geometry import rdGeometry
from rdkit.Chem import *
from rdkit.Chem.rdPartialCharges import *
from rdkit.Chem.rdDepictor import *
from rdkit.Chem.rdForceFieldHelpers import *
from rdkit.Chem.ChemicalFeatures import *
from rdkit.Chem.rdDistGeom import *
from rdkit.Chem.rdMolAlign import *
from rdkit.Chem.rdMolTransforms import *
from rdkit.Chem.rdShapeHelpers import *
from rdkit.Chem.rdChemReactions import *
from rdkit.Chem.rdReducedGraphs import *
try:
from rdkit.Chem.rdSLNParse import *
except:
pass
from rdkit.Chem.rdMolDescriptors import *
from rdkit.Chem.rdqueries import *
from rdkit import ForceField
Mol.Compute2DCoords = Compute2DCoords
Mol.ComputeGasteigerCharges = ComputeGasteigerCharges
import numpy, os
from rdkit.RDLogger import logger
logger = logger()
import warnings
def TransformMol(mol,tform,confId=-1,keepConfs=False):
""" Applies the transformation (usually a 4x4 double matrix) to a molecule
if keepConfs is False then all but that conformer are removed
"""
refConf = mol.GetConformer(confId)
TransformConformer(refConf,tform)
if not keepConfs:
if confId==-1: confId=0
allConfIds = [c.GetId() for c in mol.GetConformers()]
for id in allConfIds:
if not id==confId: mol.RemoveConformer(id)
#reset the conf Id to zero since there is only one conformer left
mol.GetConformer(confId).SetId(0)
def ComputeMolShape(mol,confId=-1,boxDim=(20,20,20),spacing=0.5,**kwargs):
""" returns a grid representation of the molecule's shape
"""
res = rdGeometry.UniformGrid3D(boxDim[0],boxDim[1],boxDim[2],spacing=spacing)
EncodeShape(mol,res,confId,**kwargs)
return res
def ComputeMolVolume(mol,confId=-1,gridSpacing=0.2,boxMargin=2.0):
""" Calculates the volume of a particular conformer of a molecule
based on a grid-encoding of the molecular shape.
"""
mol = rdchem.Mol(mol)
conf = mol.GetConformer(confId)
CanonicalizeConformer(conf)
box = ComputeConfBox(conf)
sideLen = ( box[1].x-box[0].x + 2*boxMargin, \
box[1].y-box[0].y + 2*boxMargin, \
box[1].z-box[0].z + 2*boxMargin )
shape = rdGeometry.UniformGrid3D(sideLen[0],sideLen[1],sideLen[2],
spacing=gridSpacing)
EncodeShape(mol,shape,confId,ignoreHs=False,vdwScale=1.0)
voxelVol = gridSpacing**3
occVect = shape.GetOccupancyVect()
voxels = [1 for x in occVect if x==3]
vol = voxelVol*len(voxels)
return vol
def GenerateDepictionMatching2DStructure(mol,reference,confId=-1,
referencePattern=None,
acceptFailure=False,
**kwargs):
""" Generates a depiction for a molecule where a piece of the molecule
is constrained to have the same coordinates as a reference.
This is useful for, for example, generating depictions of SAR data
sets so that the cores of the molecules are all oriented the same
way.
Arguments:
- mol: the molecule to be aligned, this will come back
with a single conformer.
- reference: a molecule with the reference atoms to align to;
this should have a depiction.
- confId: (optional) the id of the reference conformation to use
- referencePattern: (optional) an optional molecule to be used to
generate the atom mapping between the molecule
and the reference.
- acceptFailure: (optional) if True, standard depictions will be generated
for molecules that don't have a substructure match to the
reference; if False, a ValueError will be raised
"""
if reference and referencePattern:
if not reference.GetNumAtoms(onlyExplicit=True)==referencePattern.GetNumAtoms(onlyExplicit=True):
raise ValueError('When a pattern is provided, it must have the same number of atoms as the reference')
referenceMatch = reference.GetSubstructMatch(referencePattern)
if not referenceMatch:
raise ValueError("Reference does not map to itself")
else:
referenceMatch = range(reference.GetNumAtoms(onlyExplicit=True))
if referencePattern:
match = mol.GetSubstructMatch(referencePattern)
else:
match = mol.GetSubstructMatch(reference)
if not match:
if not acceptFailure:
raise ValueError('Substructure match with reference not found.')
else:
coordMap={}
else:
conf = reference.GetConformer()
coordMap={}
for i,idx in enumerate(match):
pt3 = conf.GetAtomPosition(referenceMatch[i])
pt2 = rdGeometry.Point2D(pt3.x,pt3.y)
coordMap[idx] = pt2
Compute2DCoords(mol,clearConfs=True,coordMap=coordMap,canonOrient=False)
def GenerateDepictionMatching3DStructure(mol,reference,confId=-1,
**kwargs):
""" Generates a depiction for a molecule where a piece of the molecule
is constrained to have coordinates similar to those of a 3D reference
structure.
Arguments:
- mol: the molecule to be aligned, this will come back
with a single conformer.
- reference: a molecule with the reference atoms to align to;
this should have a depiction.
- confId: (optional) the id of the reference conformation to use
"""
nAts = mol.GetNumAtoms()
dm = []
conf = reference.GetConformer(confId)
for i in range(nAts):
pi = conf.GetAtomPosition(i)
#npi.z=0
for j in range(i+1,nAts):
pj = conf.GetAtomPosition(j)
#pj.z=0
dm.append((pi-pj).Length())
dm = numpy.array(dm)
Compute2DCoordsMimicDistmat(mol,dm,**kwargs)
def GetBestRMS(ref,probe,refConfId=-1,probeConfId=-1,maps=None):
""" Returns the optimal RMS for aligning two molecules, taking
symmetry into account. As a side-effect, the probe molecule is
left in the aligned state.
Arguments:
- ref: the reference molecule
- probe: the molecule to be aligned to the reference
- refConfId: (optional) reference conformation to use
- probeConfId: (optional) probe conformation to use
- maps: (optional) a list of lists of (probeAtomId,refAtomId)
tuples with the atom-atom mappings of the two molecules.
If not provided, these will be generated using a substructure
search.
Note:
This function will attempt to align all permutations of matching atom
orders in both molecules, for some molecules it will lead to 'combinatorial
explosion' especially if hydrogens are present.
Use 'rdkit.Chem.AllChem.AlignMol' to align molecules without changing the
atom order.
"""
if not maps:
matches = ref.GetSubstructMatches(probe,uniquify=False)
if not matches:
raise ValueError('mol %s does not match mol %s'%(ref.GetProp('_Name'),
probe.GetProp('_Name')))
if len(matches) > 1e6:
warnings.warn("{} matches detected for molecule {}, this may lead to a performance slowdown.".format(len(matches), probe.GetProp('_Name')))
maps = [list(enumerate(match)) for match in matches]
bestRMS=1000.
for amap in maps:
rms=AlignMol(probe,ref,probeConfId,refConfId,atomMap=amap)
if rms<bestRMS:
bestRMS=rms
bestMap = amap
# finally repeate the best alignment :
if bestMap != amap:
AlignMol(probe,ref,probeConfId,refConfId,atomMap=bestMap)
return bestRMS
def GetConformerRMS(mol,confId1,confId2,atomIds=None,prealigned=False):
""" Returns the RMS between two conformations.
By default, the conformers will be aligned to the first conformer
of the molecule (i.e. the reference) before RMS calculation and,
as a side-effect, will be left in the aligned state.
Arguments:
- mol: the molecule
- confId1: the id of the first conformer
- confId2: the id of the second conformer
- atomIds: (optional) list of atom ids to use a points for
alingment - defaults to all atoms
- prealigned: (optional) by default the conformers are assumed
be unaligned and will therefore be aligned to the
first conformer
"""
# align the conformers if necessary
# Note: the reference conformer is always the first one
if not prealigned:
if atomIds:
AlignMolConformers(mol, confIds=[confId1,confId2], atomIds=atomIds)
else:
AlignMolConformers(mol, confIds=[confId1,confId2])
# calculate the RMS between the two conformations
conf1 = mol.GetConformer(id=confId1)
conf2 = mol.GetConformer(id=confId2)
ssr = 0
for i in range(mol.GetNumAtoms()):
d = conf1.GetAtomPosition(i).Distance(conf2.GetAtomPosition(i))
ssr += d*d
ssr /= mol.GetNumAtoms()
return numpy.sqrt(ssr)
def GetConformerRMSMatrix(mol,atomIds=None,prealigned=False):
""" Returns the RMS matrix of the conformers of a molecule.
As a side-effect, the conformers will be aligned to the first
conformer (i.e. the reference) and will left in the aligned state.
Arguments:
- mol: the molecule
- atomIds: (optional) list of atom ids to use a points for
alingment - defaults to all atoms
- prealigned: (optional) by default the conformers are assumed
be unaligned and will therefore be aligned to the
first conformer
Note that the returned RMS matrix is symmetrically, i.e. it is the
lower half of the matrix, e.g. for 5 conformers:
rmsmatrix = [ a,
b, c,
d, e, f,
g, h, i, j]
This way it can be directly used as distance matrix in e.g. Butina
clustering.
"""
# if necessary, align the conformers
# Note: the reference conformer is always the first one
rmsvals = []
if not prealigned:
if atomIds:
AlignMolConformers(mol, atomIds=atomIds, RMSlist=rmsvals)
else:
AlignMolConformers(mol, RMSlist=rmsvals)
else: # already prealigned
for i in range(1, mol.GetNumConformers()):
rmsvals.append(GetConformerRMS(mol, 0, i, atomIds=atomIds, prealigned=prealigned))
# loop over the conformations (except the reference one)
cmat = []
for i in range(1, mol.GetNumConformers()):
cmat.append(rmsvals[i-1])
for j in range(1,i):
cmat.append(GetConformerRMS(mol, i, j, atomIds=atomIds, prealigned=True))
return cmat
def EnumerateLibraryFromReaction(reaction,sidechainSets) :
""" Returns a generator for the virtual library defined by
a reaction and a sequence of sidechain sets
>>> from rdkit import Chem
>>> from rdkit.Chem import AllChem
>>> s1=[Chem.MolFromSmiles(x) for x in ('NC','NCC')]
>>> s2=[Chem.MolFromSmiles(x) for x in ('OC=O','OC(=O)C')]
>>> rxn = AllChem.ReactionFromSmarts('[O:2]=[C:1][OH].[N:3]>>[O:2]=[C:1][N:3]')
>>> r = AllChem.EnumerateLibraryFromReaction(rxn,[s2,s1])
>>> [Chem.MolToSmiles(x[0]) for x in list(r)]
['CNC=O', 'CCNC=O', 'CNC(C)=O', 'CCNC(C)=O']
Note that this is all done in a lazy manner, so "infinitely" large libraries can
be done without worrying about running out of memory. Your patience will run out first:
Define a set of 10000 amines:
>>> amines = (Chem.MolFromSmiles('N'+'C'*x) for x in range(10000))
... a set of 10000 acids
>>> acids = (Chem.MolFromSmiles('OC(=O)'+'C'*x) for x in range(10000))
... now the virtual library (1e8 compounds in principle):
>>> r = AllChem.EnumerateLibraryFromReaction(rxn,[acids,amines])
... look at the first 4 compounds:
>>> [Chem.MolToSmiles(next(r)[0]) for x in range(4)]
['NC=O', 'CNC=O', 'CCNC=O', 'CCCNC=O']
"""
if len(sidechainSets) != reaction.GetNumReactantTemplates():
raise ValueError('%d sidechains provided, %d required' %
(len(sidechainSets),reaction.GetNumReactantTemplates()))
def _combiEnumerator(items,depth=0):
for item in items[depth]:
if depth+1 < len(items):
v = _combiEnumerator(items,depth+1)
for entry in v:
l=[item]
l.extend(entry)
yield l
else:
yield [item]
for chains in _combiEnumerator(sidechainSets):
prodSets = reaction.RunReactants(chains)
for prods in prodSets:
yield prods
def ConstrainedEmbed(mol,core,useTethers=True,coreConfId=-1,
randomseed=2342,getForceField=UFFGetMoleculeForceField,**kwargs):
""" generates an embedding of a molecule where part of the molecule
is constrained to have particular coordinates
Arguments
- mol: the molecule to embed
- core: the molecule to use as a source of constraints
- useTethers: (optional) if True, the final conformation will be
optimized subject to a series of extra forces that pull the
matching atoms to the positions of the core atoms. Otherwise
simple distance constraints based on the core atoms will be
used in the optimization.
- coreConfId: (optional) id of the core conformation to use
- randomSeed: (optional) seed for the random number generator
An example, start by generating a template with a 3D structure:
>>> from rdkit.Chem import AllChem
>>> template = AllChem.MolFromSmiles("c1nn(Cc2ccccc2)cc1")
>>> AllChem.EmbedMolecule(template)
0
>>> AllChem.UFFOptimizeMolecule(template)
0
Here's a molecule:
>>> mol = AllChem.MolFromSmiles("c1nn(Cc2ccccc2)cc1-c3ccccc3")
Now do the constrained embedding
>>> newmol=AllChem.ConstrainedEmbed(mol, template)
Demonstrate that the positions are the same:
>>> newp=newmol.GetConformer().GetAtomPosition(0)
>>> molp=mol.GetConformer().GetAtomPosition(0)
>>> list(newp-molp)==[0.0,0.0,0.0]
True
>>> newp=newmol.GetConformer().GetAtomPosition(1)
>>> molp=mol.GetConformer().GetAtomPosition(1)
>>> list(newp-molp)==[0.0,0.0,0.0]
True
"""
match = mol.GetSubstructMatch(core)
if not match:
raise ValueError("molecule doesn't match the core")
coordMap={}
coreConf = core.GetConformer(coreConfId)
for i,idxI in enumerate(match):
corePtI = coreConf.GetAtomPosition(i)
coordMap[idxI]=corePtI
ci = EmbedMolecule(mol,coordMap=coordMap,randomSeed=randomseed,**kwargs)
if ci<0:
raise ValueError('Could not embed molecule.')
algMap=[(j,i) for i,j in enumerate(match)]
if not useTethers:
# clean up the conformation
ff = getForceField(mol,confId=0)
for i,idxI in enumerate(match):
for j in range(i+1,len(match)):
idxJ = match[j]
d = coordMap[idxI].Distance(coordMap[idxJ])
ff.AddDistanceConstraint(idxI,idxJ,d,d,100.)
ff.Initialize()
n=4
more=ff.Minimize()
while more and n:
more=ff.Minimize()
n-=1
# rotate the embedded conformation onto the core:
rms =AlignMol(mol,core,atomMap=algMap)
else:
# rotate the embedded conformation onto the core:
rms = AlignMol(mol,core,atomMap=algMap)
ff = getForceField(mol,confId=0)
conf = core.GetConformer()
for i in range(core.GetNumAtoms()):
p =conf.GetAtomPosition(i)
pIdx=ff.AddExtraPoint(p.x,p.y,p.z,fixed=True)-1
ff.AddDistanceConstraint(pIdx,match[i],0,0,100.)
ff.Initialize()
n=4
more=ff.Minimize(energyTol=1e-4,forceTol=1e-3)
while more and n:
more=ff.Minimize(energyTol=1e-4,forceTol=1e-3)
n-=1
# realign
rms = AlignMol(mol,core,atomMap=algMap)
mol.SetProp('EmbedRMS',str(rms))
return mol
def AssignBondOrdersFromTemplate(refmol, mol):
""" assigns bond orders to a molecule based on the
bond orders in a template molecule
Arguments
- refmol: the template molecule
- mol: the molecule to assign bond orders to
An example, start by generating a template from a SMILES
and read in the PDB structure of the molecule
>>> from rdkit.Chem import AllChem
>>> template = AllChem.MolFromSmiles("CN1C(=NC(C1=O)(c2ccccc2)c3ccccc3)N")
>>> mol = AllChem.MolFromPDBFile(os.path.join(RDConfig.RDCodeDir, 'Chem', 'test_data', '4DJU_lig.pdb'))
>>> len([1 for b in template.GetBonds() if b.GetBondTypeAsDouble() == 1.0])
8
>>> len([1 for b in mol.GetBonds() if b.GetBondTypeAsDouble() == 1.0])
22
Now assign the bond orders based on the template molecule
>>> newMol = AllChem.AssignBondOrdersFromTemplate(template, mol)
>>> len([1 for b in newMol.GetBonds() if b.GetBondTypeAsDouble() == 1.0])
8
Note that the template molecule should have no explicit hydrogens
else the algorithm will fail.
It also works if there are different formal charges (this was github issue 235):
>>> template=AllChem.MolFromSmiles('CN(C)C(=O)Cc1ccc2c(c1)NC(=O)c3ccc(cc3N2)c4ccc(c(c4)OC)[N+](=O)[O-]')
>>> mol = AllChem.MolFromMolFile(os.path.join(RDConfig.RDCodeDir, 'Chem', 'test_data', '4FTR_lig.mol'))
>>> AllChem.MolToSmiles(mol)
'COC1CC(C2CCC3C(O)NC4CC(CC(O)N(C)C)CCC4NC3C2)CCC1N(O)O'
>>> newMol = AllChem.AssignBondOrdersFromTemplate(template, mol)
>>> AllChem.MolToSmiles(newMol)
'COc1cc(-c2ccc3c(c2)Nc2ccc(CC(=O)N(C)C)cc2NC3=O)ccc1[N+](=O)[O-]'
"""
refmol2 = rdchem.Mol(refmol)
mol2 = rdchem.Mol(mol)
# do the molecules match already?
matching = mol2.GetSubstructMatch(refmol2)
if not matching: # no, they don't match
# check if bonds of mol are SINGLE
for b in mol2.GetBonds():
if b.GetBondType() != BondType.SINGLE:
b.SetBondType(BondType.SINGLE)
b.SetIsAromatic(False)
# set the bonds of mol to SINGLE
for b in refmol2.GetBonds():
b.SetBondType(BondType.SINGLE)
b.SetIsAromatic(False)
# set atom charges to zero;
for a in refmol2.GetAtoms():
a.SetFormalCharge(0)
for a in mol2.GetAtoms():
a.SetFormalCharge(0)
matching = mol2.GetSubstructMatches(refmol2, uniquify=False)
# do the molecules match now?
if matching:
if len(matching) > 1:
logger.warning("More than one matching pattern found - picking one")
matching = matching[0]
# apply matching: set bond properties
for b in refmol.GetBonds():
atom1 = matching[b.GetBeginAtomIdx()]
atom2 = matching[b.GetEndAtomIdx()]
b2 = mol2.GetBondBetweenAtoms(atom1, atom2)
b2.SetBondType(b.GetBondType())
b2.SetIsAromatic(b.GetIsAromatic())
# apply matching: set atom properties
for a in refmol.GetAtoms():
a2 = mol2.GetAtomWithIdx(matching[a.GetIdx()])
a2.SetHybridization(a.GetHybridization())
a2.SetIsAromatic(a.GetIsAromatic())
a2.SetNumExplicitHs(a.GetNumExplicitHs())
a2.SetFormalCharge(a.GetFormalCharge())
SanitizeMol(mol2)
if hasattr(mol2, '__sssAtoms'):
mol2.__sssAtoms = None # we don't want all bonds highlighted
else:
raise ValueError("No matching found")
return mol2
#------------------------------------
#
# doctest boilerplate
#
def _test():
import doctest,sys
return doctest.testmod(sys.modules["__main__"])
if __name__ == '__main__':
import sys
failed,tried = _test()
sys.exit(failed)
| {
"repo_name": "soerendip42/rdkit",
"path": "rdkit/Chem/AllChem.py",
"copies": "1",
"size": "19475",
"license": "bsd-3-clause",
"hash": 264373935059233470,
"line_mean": 36.2370936902,
"line_max": 145,
"alpha_frac": 0.6744544288,
"autogenerated": false,
"ratio": 3.3393347050754456,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45137891338754454,
"avg_score": null,
"num_lines": null
} |
import os
from rdkit.six import iteritems
from rdkit.Chem.Draw.MolDrawing import MolDrawing,DrawingOptions
from rdkit.Chem.Draw.rdMolDraw2D import *
def _getCanvas():
useAGG=False
useCairo=False
useSping=False
Canvas=None
if not os.environ.get('RDKIT_CANVAS',''):
try:
from rdkit.Chem.Draw.cairoCanvas import Canvas
useCairo=True
except ImportError:
try:
from rdkit.Chem.Draw.aggCanvas import Canvas
useAGG=True
except ImportError:
from rdkit.Chem.Draw.spingCanvas import Canvas
useSping=True
else:
canv=os.environ['RDKIT_CANVAS'].lower()
if canv =='cairo':
from rdkit.Chem.Draw.cairoCanvas import Canvas
useCairo=True
elif canv =='agg':
from rdkit.Chem.Draw.aggCanvas import Canvas
useAGG=True
else:
from rdkit.Chem.Draw.spingCanvas import Canvas
useSping=True
if useSping:
DrawingOptions.radicalSymbol='.' #<- the sping canvas doesn't support unicode well
return useAGG,useCairo,Canvas
def _createCanvas(size):
useAGG,useCairo,Canvas=_getCanvas()
if useAGG or useCairo:
try:
import Image
except ImportError:
from PIL import Image
img = Image.new("RGBA",size,(0,0,0,0))
canvas = Canvas(img)
else:
from rdkit.Chem.Draw.spingCanvas import Canvas
canvas = Canvas(size=size,name='MolToImageFile')
img = canvas._image
return img,canvas
def MolToImage(mol, size=(300,300), kekulize=True, wedgeBonds=True,
fitImage=False, options=None, canvas=None, **kwargs):
"""Returns a PIL image containing a drawing of the molecule
ARGUMENTS:
- kekulize: run kekulization routine on input `mol` (default True)
- size: final image size, in pixel (default (300,300))
- wedgeBonds: draw wedge (stereo) bonds (default True)
- highlightAtoms: list of atoms to highlight (default [])
- highlightMap: dictionary of (atom, color) pairs (default None)
- highlightBonds: list of bonds to highlight (default [])
- highlightColor: RGB color as tuple (default [1, 0, 0])
NOTE:
use 'matplotlib.colors.to_rgb()' to convert string and
HTML color codes into the RGB tuple representation, eg.
from matplotlib.colors import ColorConverter
img = Draw.MolToImage(m, highlightAtoms=[1,2], highlightColor=ColorConverter().to_rgb('aqua'))
img.save("molecule.png")
RETURNS:
a PIL Image object
"""
if not mol:
raise ValueError('Null molecule provided')
if canvas is None:
img,canvas=_createCanvas(size)
else:
img=None
if options is None:
options = DrawingOptions()
if fitImage:
options.dotsPerAngstrom = int(min(size) / 10)
options.wedgeDashedBonds = wedgeBonds
if 'highlightColor' in kwargs:
color = kwargs.pop('highlightColor', (1, 0, 0))
options.selectColor = color
drawer = MolDrawing(canvas=canvas,drawingOptions=options)
if kekulize:
from rdkit import Chem
mol = Chem.Mol(mol.ToBinary())
Chem.Kekulize(mol)
if not mol.GetNumConformers():
from rdkit.Chem import AllChem
AllChem.Compute2DCoords(mol)
if 'legend' in kwargs:
legend = kwargs['legend']
del kwargs['legend']
else:
legend=''
drawer.AddMol(mol,**kwargs)
if legend:
from rdkit.Chem.Draw.MolDrawing import Font
bbox = drawer.boundingBoxes[mol]
pos = size[0]/2,int(.94*size[1]),0 # the 0.94 is extremely empirical
# canvas.addCanvasPolygon(((bbox[0],bbox[1]),(bbox[2],bbox[1]),(bbox[2],bbox[3]),(bbox[0],bbox[3])),
# color=(1,0,0),fill=False,stroke=True)
# canvas.addCanvasPolygon(((0,0),(0,size[1]),(size[0],size[1]),(size[0],0) ),
# color=(0,0,1),fill=False,stroke=True)
font=Font(face='sans',size=12)
canvas.addCanvasText(legend,pos,font)
if kwargs.get('returnCanvas',False):
return img,canvas,drawer
else:
canvas.flush()
return img
def MolToFile(mol,fileName,size=(300,300),kekulize=True, wedgeBonds=True,
imageType=None, fitImage=False, options=None, **kwargs):
""" Generates a drawing of a molecule and writes it to a file
"""
# original contribution from Uwe Hoffmann
if not fileName:
raise ValueError('no fileName provided')
if not mol:
raise ValueError('Null molecule provided')
if imageType is None:
imageType=os.path.splitext(fileName)[1][1:]
if options is None:
options = DrawingOptions()
useAGG,useCairo,Canvas = _getCanvas()
if fitImage:
options.dotsPerAngstrom = int(min(size) / 10)
options.wedgeDashedBonds = wedgeBonds
if useCairo or useAGG:
canvas = Canvas(size=size,imageType=imageType,
fileName=fileName)
else:
options.radicalSymbol = '.' #<- the sping canvas doesn't support unicode well
canvas = Canvas(size=size,name=fileName,imageType=imageType)
drawer = MolDrawing(canvas=canvas,drawingOptions=options)
if kekulize:
from rdkit import Chem
mol = Chem.Mol(mol.ToBinary())
Chem.Kekulize(mol)
if not mol.GetNumConformers():
from rdkit.Chem import AllChem
AllChem.Compute2DCoords(mol)
drawer.AddMol(mol,**kwargs)
if useCairo or useAGG:
canvas.flush()
else:
canvas.save()
def MolToImageFile(mol,filename,size=(300,300),kekulize=True, wedgeBonds=True,
**kwargs):
""" DEPRECATED: please use MolToFile instead
"""
img = MolToImage(mol,size=size,kekulize=kekulize,wedgeBonds=wedgeBonds,**kwargs)
img.save(filename)
tkRoot=None
tkLabel=None
tkPI=None
def ShowMol(mol,size=(300,300),kekulize=True,wedgeBonds=True,
title='RDKit Molecule',**kwargs):
""" Generates a picture of a molecule and displays it in a Tkinter window
"""
global tkRoot,tkLabel,tkPI
try:
import Tkinter
except ImportError:
import tkinter as Tkinter
try:
import ImageTk
except ImportError:
from PIL import ImageTk
img = MolToImage(mol,size,kekulize,wedgeBonds,**kwargs)
if not tkRoot:
tkRoot = Tkinter.Tk()
tkRoot.title(title)
tkPI = ImageTk.PhotoImage(img)
tkLabel = Tkinter.Label(tkRoot,image=tkPI)
tkLabel.place(x=0,y=0,width=img.size[0],height=img.size[1])
else:
tkPI.paste(img)
tkRoot.geometry('%dx%d'%(img.size))
def MolToMPL(mol,size=(300,300),kekulize=True, wedgeBonds=True,
imageType=None, fitImage=False, options=None, **kwargs):
""" Generates a drawing of a molecule on a matplotlib canvas
"""
if not mol:
raise ValueError('Null molecule provided')
from rdkit.Chem.Draw.mplCanvas import Canvas
canvas = Canvas(size)
if options is None:
options = DrawingOptions()
options.bgColor=None
if fitImage:
drawingOptions.dotsPerAngstrom = int(min(size) / 10)
options.wedgeDashedBonds=wedgeBonds
drawer = MolDrawing(canvas=canvas, drawingOptions=options)
omol=mol
if kekulize:
from rdkit import Chem
mol = Chem.Mol(mol.ToBinary())
Chem.Kekulize(mol)
if not mol.GetNumConformers():
from rdkit.Chem import AllChem
AllChem.Compute2DCoords(mol)
drawer.AddMol(mol,**kwargs)
omol._atomPs=drawer.atomPs[mol]
for k,v in iteritems(omol._atomPs):
omol._atomPs[k]=canvas.rescalePt(v)
canvas._figure.set_size_inches(float(size[0])/100,float(size[1])/100)
return canvas._figure
def calcAtomGaussians(mol,a=0.03,step=0.02,weights=None):
"""
useful things to do with these:
fig.axes[0].imshow(z,cmap=cm.gray,interpolation='bilinear',origin='lower',extent=(0,1,0,1))
fig.axes[0].contour(x,y,z,20,colors='k')
fig=Draw.MolToMPL(m);
contribs=Crippen.rdMolDescriptors._CalcCrippenContribs(m)
logps,mrs=zip(*contribs)
x,y,z=Draw.calcAtomGaussians(m,0.03,step=0.01,weights=logps)
fig.axes[0].imshow(z,cmap=cm.jet,interpolation='bilinear',origin='lower',extent=(0,1,0,1))
fig.axes[0].contour(x,y,z,20,colors='k',alpha=0.5)
fig.savefig('coumlogps.colored.png',bbox_inches='tight')
"""
import numpy
from matplotlib import mlab
x = numpy.arange(0,1,step)
y = numpy.arange(0,1,step)
X,Y = numpy.meshgrid(x,y)
if weights is None:
weights=[1.]*mol.GetNumAtoms()
Z = mlab.bivariate_normal(X,Y,a,a,mol._atomPs[0][0], mol._atomPs[0][1])*weights[0]
for i in range(1,mol.GetNumAtoms()):
Zp = mlab.bivariate_normal(X,Y,a,a,mol._atomPs[i][0], mol._atomPs[i][1])
Z += Zp*weights[i]
return X,Y,Z
def MolsToImage(mols, subImgSize=(200,200),legends=None,**kwargs):
"""
"""
try:
import Image
except ImportError:
from PIL import Image
if legends is None: legends = [None]*len(mols)
res = Image.new("RGBA",(subImgSize[0]*len(mols),subImgSize[1]))
for i,mol in enumerate(mols):
res.paste(MolToImage(mol,subImgSize,legend=legends[i],**kwargs),(i*subImgSize[0],0))
return res
def MolsToGridImage(mols,molsPerRow=3,subImgSize=(200,200),legends=None,
highlightAtomLists=None,**kwargs):
"""
"""
try:
import Image
except ImportError:
from PIL import Image
if legends is None: legends = [None]*len(mols)
nRows = len(mols)//molsPerRow
if len(mols)%molsPerRow : nRows+=1
res = Image.new("RGBA",(molsPerRow*subImgSize[0],nRows*subImgSize[1]),(255,255,255,0))
for i,mol in enumerate(mols):
row = i//molsPerRow
col = i%molsPerRow
highlights=None
if highlightAtomLists and highlightAtomLists[i]:
highlights=highlightAtomLists[i]
res.paste(MolToImage(mol,subImgSize,legend=legends[i],highlightAtoms=highlights,
**kwargs),(col*subImgSize[0],row*subImgSize[1]))
return res
def ReactionToImage(rxn, subImgSize=(200,200),**kwargs):
"""
"""
try:
import Image
except ImportError:
from PIL import Image
mols = []
for i in range(rxn.GetNumReactantTemplates()):
tmpl=rxn.GetReactantTemplate(i)
tmpl.UpdatePropertyCache(False)
mols.append(tmpl)
mols.append(None)
for i in range(rxn.GetNumProductTemplates()):
tmpl = rxn.GetProductTemplate(i)
tmpl.UpdatePropertyCache(False)
mols.append(tmpl)
res = Image.new("RGBA",(subImgSize[0]*len(mols),subImgSize[1]),(255,255,255,0))
for i,mol in enumerate(mols):
if mol is not None:
nimg = MolToImage(mol,subImgSize,kekulize=False,**kwargs)
else:
nimg,canvas = _createCanvas(subImgSize)
p0 = (10,subImgSize[1]//2)
p1 = (subImgSize[0]-10,subImgSize[1]//2)
p3 = (subImgSize[0]-20,subImgSize[1]//2-10)
p4 = (subImgSize[0]-20,subImgSize[1]//2+10)
canvas.addCanvasLine(p0,p1,lineWidth=2,color=(0,0,0))
canvas.addCanvasLine(p3,p1,lineWidth=2,color=(0,0,0))
canvas.addCanvasLine(p4,p1,lineWidth=2,color=(0,0,0))
if hasattr(canvas,'flush'):
canvas.flush()
else:
canvas.save()
res.paste(nimg,(i*subImgSize[0],0))
return res
def MolToQPixmap(mol, size=(300,300), kekulize=True, wedgeBonds=True,
fitImage=False, options=None, **kwargs):
""" Generates a drawing of a molecule on a Qt QPixmap
"""
if not mol:
raise ValueError('Null molecule provided')
from rdkit.Chem.Draw.qtCanvas import Canvas
canvas = Canvas(size)
if options is None:
options = DrawingOptions()
options.bgColor = None
if fitImage:
options.dotsPerAngstrom = int(min(size) / 10)
options.wedgeDashedBonds=wedgeBonds
if kekulize:
from rdkit import Chem
mol = Chem.Mol(mol.ToBinary())
Chem.Kekulize(mol)
if not mol.GetNumConformers():
from rdkit.Chem import AllChem
AllChem.Compute2DCoords(mol)
drawer = MolDrawing(canvas=canvas, drawingOptions=options)
drawer.AddMol(mol, **kwargs)
canvas.flush()
return canvas.pixmap
| {
"repo_name": "soerendip42/rdkit",
"path": "rdkit/Chem/Draw/__init__.py",
"copies": "2",
"size": "11955",
"license": "bsd-3-clause",
"hash": -6749942334766245000,
"line_mean": 29.9715025907,
"line_max": 108,
"alpha_frac": 0.6625679632,
"autogenerated": false,
"ratio": 3.114092211513415,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4776660174713415,
"avg_score": null,
"num_lines": null
} |
import os
def _getCanvas():
useAGG=False
useCairo=False
Canvas=None
if not os.environ.get('RDKIT_CANVAS',''):
try:
from cairoCanvas import Canvas
useCairo=True
except ImportError:
try:
from aggCanvas import Canvas
useAGG=True
except ImportError:
from spingCanvas import Canvas
else:
canv=os.environ['RDKIT_CANVAS'].lower()
if canv =='cairo':
from cairoCanvas import Canvas
useCairo=True
elif canv =='agg':
from aggCanvas import Canvas
useAGG=True
else:
from spingCanvas import Canvas
return useAGG,useCairo,Canvas
def _createCanvas(size):
useAGG,useCairo,Canvas=_getCanvas()
if useAGG or useCairo:
import Image
img = Image.new("RGBA",size,"white")
canvas = Canvas(img)
else:
MolDrawing.radicalSymbol='.' #<- the sping canvas doesn't support unicode well
from spingCanvas import Canvas
canvas = Canvas(size=size,name='MolToImageFile')
img = canvas._image
return img,canvas
def MolToImage(mol, size=(300,300), kekulize=True, wedgeBonds=True,
**kwargs):
""" returns a PIL image containing a drawing of the molecule
Keyword arguments:
kekulize -- run kekulization routine on input `mol` (default True)
size -- final image size, in pixel (default (300,300))
wedgeBonds -- draw wedge (stereo) bonds (default True)
highlightAtoms -- list of atoms to highlight (default [])
highlightMap -- dictionary of (atom, color) pairs (default None)
highlightBonds -- list of bonds to highlight (default [])
"""
from MolDrawing import MolDrawing
if not mol:
raise ValueError,'Null molecule provided'
img,canvas=_createCanvas(size)
drawer = MolDrawing(canvas)
if kekulize:
from rdkit import Chem
mol = Chem.Mol(mol.ToBinary())
Chem.Kekulize(mol)
if not mol.GetNumConformers():
from rdkit.Chem import AllChem
AllChem.Compute2DCoords(mol)
drawer.wedgeDashedBonds=wedgeBonds
if kwargs.has_key('legend'):
legend = kwargs['legend']
del kwargs['legend']
else:
legend=''
drawer.AddMol(mol,**kwargs)
if legend:
from rdkit.Chem.Draw.MolDrawing import Font
bbox = drawer.boundingBoxes[mol]
pos = size[0]/2,int(.94*size[1]) # the 0.94 is extremely empirical
# canvas.addCanvasPolygon(((bbox[0],bbox[1]),(bbox[2],bbox[1]),(bbox[2],bbox[3]),(bbox[0],bbox[3])),
# color=(1,0,0),fill=False,stroke=True)
# canvas.addCanvasPolygon(((0,0),(0,size[1]),(size[0],size[1]),(size[0],0) ),
# color=(0,0,1),fill=False,stroke=True)
font=Font(face='sans',size=12)
canvas.addCanvasText(legend,pos,font)
if kwargs.get('returnCanvas',False):
return img,canvas,drawer
else:
canvas.flush()
return img
def MolToFile(mol,fileName,size=(300,300),kekulize=True, wedgeBonds=True,
imageType=None,**kwargs):
""" Generates a drawing of a molecule and writes it to a file
"""
from MolDrawing import MolDrawing
# original contribution from Uwe Hoffmann
if not fileName:
raise ValueError,'no fileName provided'
if not mol:
raise ValueError,'Null molecule provided'
if imageType is None:
imageType=os.path.splitext(fileName)[1][1:]
useAGG,useCairo,Canvas = _getCanvas()
if useCairo or useAGG:
canvas = Canvas(size=size,imageType=imageType,
fileName=fileName)
else:
MolDrawing.radicalSymbol='.' #<- the sping canvas doesn't support unicode well
canvas = Canvas(size=size,name=fileName,imageType=imageType)
drawer = MolDrawing(canvas)
if kekulize:
from rdkit import Chem
mol = Chem.Mol(mol.ToBinary())
Chem.Kekulize(mol)
if not mol.GetNumConformers():
from rdkit.Chem import AllChem
AllChem.Compute2DCoords(mol)
drawer.wedgeDashedBonds=wedgeBonds
drawer.AddMol(mol,**kwargs)
if useCairo or useAGG:
canvas.flush()
else:
canvas.save()
def MolToImageFile(mol,filename,size=(300,300),kekulize=True, wedgeBonds=True,
**kwargs):
""" DEPRECATED: please use MolToFile instead
"""
img = MolToImage(mol,size=size,kekulize=kekulize,wedgeBonds=wedgeBonds,**kwargs)
img.save(filename)
tkRoot=None
tkLabel=None
tkPI=None
def ShowMol(mol,size=(300,300),kekulize=True,wedgeBonds=True,
title='RDKit Molecule',**kwargs):
""" Generates a picture of a molecule and displays it in a Tkinter window
"""
global tkRoot,tkLabel,tkPI
import Tkinter
import ImageTk
img = MolToImage(mol,size,kekulize,wedgeBonds,**kwargs)
if not tkRoot:
tkRoot = Tkinter.Tk()
tkRoot.title(title)
tkPI = ImageTk.PhotoImage(img)
tkLabel = Tkinter.Label(tkRoot,image=tkPI)
tkLabel.place(x=0,y=0,width=img.size[0],height=img.size[1])
else:
tkPI.paste(img)
tkRoot.geometry('%dx%d'%(img.size))
def MolToMPL(mol,size=(300,300),kekulize=True, wedgeBonds=True,
imageType=None,**kwargs):
""" Generates a drawing of a molecule on a matplotlib canvas
"""
if not mol:
raise ValueError,'Null molecule provided'
from MolDrawing import MolDrawing
from mplCanvas import Canvas
canvas = Canvas(size)
drawer = MolDrawing(canvas)
omol=mol
if kekulize:
from rdkit import Chem
mol = Chem.Mol(mol.ToBinary())
Chem.Kekulize(mol)
if not mol.GetNumConformers():
from rdkit.Chem import AllChem
AllChem.Compute2DCoords(mol)
drawer.wedgeDashedBonds=wedgeBonds
drawer.AddMol(mol,**kwargs)
omol._atomPs=drawer.atomPs[mol]
for k,v in omol._atomPs.iteritems():
omol._atomPs[k]=canvas.rescalePt(v)
canvas._figure.set_size_inches(float(size[0])/100,float(size[1])/100)
return canvas._figure
def calcAtomGaussians(mol,a=0.03,step=0.02,weights=None):
"""
useful things to do with these:
fig.axes[0].imshow(z,cmap=cm.gray,interpolation='bilinear',origin='lower',extent=(0,1,0,1))
fig.axes[0].contour(x,y,z,20,colors='k')
fig=Draw.MolToMPL(m);
contribs=Crippen.rdMolDescriptors._CalcCrippenContribs(m)
logps,mrs=zip(*contribs)
x,y,z=Draw.calcAtomGaussians(m,0.03,step=0.01,weights=logps)
fig.axes[0].imshow(z,cmap=cm.jet,interpolation='bilinear',origin='lower',extent=(0,1,0,1))
fig.axes[0].contour(x,y,z,20,colors='k',alpha=0.5)
fig.savefig('coumlogps.colored.png',bbox_inches='tight')
"""
import numpy
from matplotlib import mlab
x = numpy.arange(0,1,step)
y = numpy.arange(0,1,step)
X,Y = numpy.meshgrid(x,y)
if weights is None:
weights=[1.]*mol.GetNumAtoms()
Z = mlab.bivariate_normal(X,Y,a,a,mol._atomPs[0][0], mol._atomPs[0][1])*weights[0]
for i in range(1,mol.GetNumAtoms()):
Zp = mlab.bivariate_normal(X,Y,a,a,mol._atomPs[i][0], mol._atomPs[i][1])
Z += Zp*weights[i]
return X,Y,Z
def MolsToImage(mols, subImgSize=(200,200),legends=None,**kwargs):
"""
"""
import Image
if legends is None: legends = [None]*len(mols)
res = Image.new("RGB",(subImgSize[0]*len(mols),subImgSize[1]))
for i,mol in enumerate(mols):
res.paste(MolToImage(mol,subImgSize,legend=legends[i],**kwargs),(i*subImgSize[0],0))
return res
def MolsToGridImage(mols,molsPerRow=3,subImgSize=(200,200),legends=None,**kwargs):
"""
"""
import Image
if legends is None: legends = [None]*len(mols)
nRows = len(mols)//molsPerRow
if len(mols)%molsPerRow : nRows+=1
res = Image.new("RGB",(molsPerRow*subImgSize[1],nRows*subImgSize[1]),(255,255,255))
for i,mol in enumerate(mols):
row = i//molsPerRow
col = i%molsPerRow
res.paste(MolToImage(mol,subImgSize,legend=legends[i],**kwargs),(col*subImgSize[0],row*subImgSize[1]))
return res
def ReactionToImage(rxn, subImgSize=(200,200),**kwargs):
"""
"""
import Image
mols = []
for i in range(rxn.GetNumReactantTemplates()):
tmpl=rxn.GetReactantTemplate(i)
tmpl.UpdatePropertyCache(False)
mols.append(tmpl)
mols.append(None)
for i in range(rxn.GetNumProductTemplates()):
tmpl = rxn.GetProductTemplate(i)
tmpl.UpdatePropertyCache(False)
mols.append(tmpl)
res = Image.new("RGB",(subImgSize[0]*len(mols),subImgSize[1]),(255,255,255))
for i,mol in enumerate(mols):
if mol is not None:
nimg = MolToImage(mol,subImgSize,kekulize=False,**kwargs)
else:
nimg,canvas = _createCanvas(subImgSize)
p0 = (10,subImgSize[1]//2)
p1 = (subImgSize[0]-10,subImgSize[1]//2)
p3 = (subImgSize[0]-20,subImgSize[1]//2-10)
p4 = (subImgSize[0]-20,subImgSize[1]//2+10)
canvas.addCanvasLine(p0,p1,lineWidth=2,color=(0,0,0))
canvas.addCanvasLine(p3,p1,lineWidth=2,color=(0,0,0))
canvas.addCanvasLine(p4,p1,lineWidth=2,color=(0,0,0))
if hasattr(canvas,'flush'):
canvas.flush()
else:
canvas.save()
res.paste(nimg,(i*subImgSize[0],0))
return res
| {
"repo_name": "rdkit/rdkit-orig",
"path": "rdkit/Chem/Draw/__init__.py",
"copies": "1",
"size": "8895",
"license": "bsd-3-clause",
"hash": -2760273108516460000,
"line_mean": 29.4623287671,
"line_max": 106,
"alpha_frac": 0.671950534,
"autogenerated": false,
"ratio": 2.9473161033797215,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41192666373797215,
"avg_score": null,
"num_lines": null
} |
from rdkit.Chem.FeatMaps.FeatMapPoint import FeatMapPoint
import math
class FeatMapScoreMode(object):
All = 0
""" score each feature in the probe against every matching
feature in the FeatMap.
"""
Closest = 1
""" score each feature in the probe against the closest
matching feature in the FeatMap.
"""
Best = 2
""" score each feature in the probe against the matching
feature in the FeatMap that leads to the highest score
"""
class FeatDirScoreMode(object):
Ignore = 0
""" ignore feature directions
"""
DotFullRange = 1
""" Use the dot product and allow negative contributions when
directions are anti-parallel.
e.g. score = dot(f1Dir,f2Dir)
"""
DotPosRange = 2
""" Use the dot product and scale contributions to lie between
zero and one.
e.g. score = ( dot(f1Dir,f2Dir) + 1 ) / 2
"""
class FeatMapParams(object):
""" one of these should be instantiated for each
feature type in the feature map
"""
radius = 2.5
" cutoff radius "
width = 1.0
" width parameter (e.g. the gaussian sigma) "
class FeatProfile(object):
" scoring profile of the feature "
Gaussian = 0
Triangle = 1
Box = 2
featProfile = FeatProfile.Gaussian
class FeatMap(object):
dirScoreMode = FeatDirScoreMode.Ignore
scoreMode = FeatMapScoreMode.All
params = {}
def __init__(self, params=None, feats=None, weights=None):
if params:
self.params = params
self._initializeFeats(feats, weights)
def _initializeFeats(self, feats, weights):
self._feats = []
if feats:
if len(feats) != len(weights):
raise ValueError('feats and weights lists must be the same length')
for feat, weight in zip(feats, weights):
self.AddFeature(feat, weight)
def AddFeature(self, feat, weight=None):
if self.params and not feat.GetFamily() in self.params:
raise ValueError('feature family %s not found in params' % feat.GetFamily())
newFeat = FeatMapPoint()
newFeat.initFromFeat(feat)
newFeat.weight = weight
self.AddFeatPoint(newFeat)
def AddFeatPoint(self, featPt):
if not isinstance(featPt, FeatMapPoint):
raise ValueError('addFeatPoint() must be called with a FeatMapPoint instance')
if self.params and not featPt.GetFamily() in self.params:
raise ValueError('feature family %s not found in params' % featPt.GetFamily())
self._feats.append(featPt)
def GetFeatures(self):
return self._feats
def GetNumFeatures(self):
return len(self._feats)
def GetFeature(self, i):
return self._feats[i]
def DropFeature(self, i):
del self._feats[i]
def _loopOverMatchingFeats(self, oFeat):
for sIdx, sFeat in enumerate(self._feats):
if sFeat.GetFamily() == oFeat.GetFamily():
yield sIdx, sFeat
def GetFeatFeatScore(self, feat1, feat2, typeMatch=True):
""" feat1 is one of our feats
feat2 is any Feature
"""
if typeMatch and feat1.GetFamily() != feat2.GetFamily():
return 0.0
d2 = feat1.GetDist2(feat2)
params = self.params[feat1.GetFamily()]
if d2 > params.radius * params.radius:
return 0.0
if params.featProfile == FeatMapParams.FeatProfile.Gaussian:
score = math.exp(-d2 / params.width)
elif params.featProfile == FeatMapParams.FeatProfile.Triangle:
d = math.sqrt(d2)
if d < params.width:
score = 1. - d / params.width
else:
score = 0.0
elif params.featProfile == FeatMapParams.FeatProfile.Box:
score = 1.0
weight = feat1.weight
score *= weight
if self.dirScoreMode != FeatDirScoreMode.Ignore:
dirScore = feat1.GetDirMatch(feat2)
if self.dirScoreMode == FeatDirScoreMode.DotPosRange:
dirScore = (dirScore + 1.0) / 2.0
elif self.dirScoreMode != FeatDirScoreMode.DotFullRange:
raise NotImplementedError('bad feature dir score mode')
score *= dirScore
return score
def ScoreFeats(self, featsToScore, mapScoreVect=[], featsScoreVect=[], featsToFeatMapIdx=[]):
nFeats = len(self._feats)
if mapScoreVect and len(mapScoreVect) != nFeats:
raise ValueError('if provided, len(mapScoreVect) should equal numFeats')
nToScore = len(featsToScore)
if featsScoreVect and len(featsScoreVect) != nToScore:
raise ValueError('if provided, len(featsScoreVect) should equal len(featsToScore)')
if featsToFeatMapIdx and len(featsToFeatMapIdx) != nToScore:
raise ValueError('if provided, len(featsToFeatMapIdx) should equal len(featsToScore)')
if mapScoreVect:
for i in range(nFeats):
mapScoreVect[i] = 0.0
else:
mapScoreVect = [0.0] * nFeats
if self.scoreMode == FeatMapScoreMode.Closest:
defScore = 1000.0
else:
defScore = 0.0
if featsScoreVect:
for i in range(nToScore):
featsScoreVect[i] = defScore
else:
featsScoreVect = [defScore] * nToScore
if not featsToFeatMapIdx:
featsToFeatMapIdx = [None] * nToScore
for i in range(nToScore):
if self.scoreMode != FeatMapScoreMode.All:
featsToFeatMapIdx[i] = [-1]
else:
featsToFeatMapIdx[i] = []
for oIdx, oFeat in enumerate(featsToScore):
for sIdx, sFeat in self._loopOverMatchingFeats(oFeat):
if self.scoreMode == FeatMapScoreMode.Closest:
d = sFeat.GetDist2(oFeat)
if d < featsScoreVect[oIdx]:
featsScoreVect[oIdx] = d
featsToFeatMapIdx[oIdx][0] = sIdx
else:
lScore = self.GetFeatFeatScore(sFeat, oFeat, typeMatch=False)
if self.scoreMode == FeatMapScoreMode.Best:
if lScore > featsScoreVect[oIdx]:
featsScoreVect[oIdx] = lScore
featsToFeatMapIdx[oIdx][0] = sIdx
elif self.scoreMode == FeatMapScoreMode.All:
featsScoreVect[oIdx] += lScore
mapScoreVect[sIdx] += lScore
featsToFeatMapIdx[oIdx].append(sIdx)
else:
raise ValueError('bad score mode')
totScore = 0.0
if self.scoreMode == FeatMapScoreMode.Closest:
for oIdx, oFeat in enumerate(featsToScore):
sIdx = featsToFeatMapIdx[oIdx][0]
if sIdx > -1:
lScore = self.GetFeatFeatScore(sFeat, oFeat, typeMatch=False)
featsScoreVect[oIdx] = lScore
mapScoreVect[sIdx] = lScore
totScore += lScore
else:
featsScoreVect[oIdx] = 0
else:
totScore = sum(featsScoreVect)
if self.scoreMode == FeatMapScoreMode.Best:
for oIdx, lScore in enumerate(featsScoreVect):
sIdx = featsToFeatMapIdx[oIdx][0]
if sIdx > -1:
mapScoreVect[sIdx] = lScore
# replace placeholders:
if self.scoreMode != FeatMapScoreMode.All:
for elem in featsToFeatMapIdx:
if elem == [-1]:
elem.pop()
return totScore
def __str__(self):
res = ''
for i, feat in enumerate(self._feats):
weight = feat.weight
pos = feat.GetPos()
res += '% 3d % 12s % 6.4f % 6.4f % 6.4f % 6.4f\n' % (i + 1, feat.GetFamily(), pos.x, pos.y,
pos.z, weight)
return res
| {
"repo_name": "jandom/rdkit",
"path": "rdkit/Chem/FeatMaps/FeatMaps.py",
"copies": "12",
"size": "7458",
"license": "bsd-3-clause",
"hash": -6629856976347529000,
"line_mean": 29.3170731707,
"line_max": 97,
"alpha_frac": 0.6427996782,
"autogenerated": false,
"ratio": 3.435283279594657,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.012191879131962853,
"num_lines": 246
} |
from rdkit import Geometry
from rdkit.Chem.FeatMaps import FeatMaps, FeatMapPoint
import re
"""
ScoreMode=All
DirScoreMode=Ignore
BeginParams
family=Aromatic radius=2.5 width=1.0 profile=Gaussian
family=Acceptor radius=1.5
EndParams
# optional
BeginPoints
family=Acceptor pos=(1.0, 0.0, 5.0) weight=1.25 dir=(1, 1, 0)
family=Aromatic pos=(0.0,1.0,0.0) weight=2.0 dir=(0,0,1) dir=(0,0,-1)
family=Acceptor pos=(1.0,1.0,2.0) weight=1.25
EndPoints
"""
class FeatMapParseError(ValueError):
pass
class FeatMapParser(object):
data = None
def __init__(self, file=None, data=None):
if file:
self.data = file.readlines()
elif data:
self.SetData(data)
self._lineNum = 0
def SetData(self, data):
if isinstance(data, str):
self.data = data.split('\n')
else:
self.data = data
self._lineNum = 0
def _NextLine(self):
txt = ''
while 1:
try:
l = self.data[self._lineNum].split('#')[0].strip()
except IndexError:
break
self._lineNum += 1
if l:
txt += l
if l[-1] != '\\':
break
return txt
def Parse(self, featMap=None):
if featMap is None:
featMap = FeatMaps.FeatMap()
l = self._NextLine().strip()
while l:
splitL = l.split('=')
if len(splitL) == 1:
keyword = splitL[0].strip().lower()
if keyword == 'beginpoints':
pts = self.ParseFeatPointBlock()
for pt in pts:
featMap.AddFeatPoint(pt)
elif keyword == 'beginparams':
featMap.params = self.ParseParamBlock()
else:
raise FeatMapParseError('Unrecognized keyword %s on line %d' % (keyword, self._lineNum))
else:
keyword = splitL[0].strip().lower()
val = splitL[1].strip()
if keyword == 'scoremode':
try:
featMap.scoreMode = getattr(FeatMaps.FeatMapScoreMode, val)
except AttributeError:
raise FeatMapParseError('ScoreMode %s not recognized on line %d' % (val, self._lineNum))
elif keyword == 'dirscoremode':
try:
featMap.dirScoreMode = getattr(FeatMaps.FeatDirScoreMode, val)
except AttributeError:
raise FeatMapParseError('DirScoreMode %s not recognized on line %d' %
(val, self._lineNum))
else:
raise FeatMapParseError('Unrecognized keyword %s on line %d' % (keyword, self._lineNum))
l = self._NextLine().strip()
return featMap
def ParseParamBlock(self):
paramLineSplitter = re.compile(r'([a-zA-Z]+) *= *(\S+)')
params = {}
l = self._NextLine()
while l and l != 'EndParams':
param = FeatMaps.FeatMapParams()
vals = paramLineSplitter.findall(l)
for name, val in vals:
name = name.lower()
if name == 'family':
family = val
elif name == 'radius':
param.radius = float(val)
elif name == 'width':
param.width = float(val)
elif name == 'profile':
try:
param.featProfile = getattr(param.FeatProfile, val)
except AttributeError:
raise FeatMapParseError('Profile %s not recognized on line %d' % (val, self._lineNum))
else:
raise FeatMapParseError('FeatMapParam option %s not recognized on line %d' %
(name, self._lineNum))
params[family] = param
l = self._NextLine()
if l != 'EndParams':
raise FeatMapParseError('EndParams line not found')
return params
def _parsePoint(self, txt):
txt = txt.strip()
startP = 0
endP = len(txt)
if txt[0] == '(':
startP += 1
if txt[-1] == ')':
endP -= 1
txt = txt[startP:endP]
splitL = txt.split(',')
if len(splitL) != 3:
raise ValueError('Bad location string')
vs = [float(x) for x in splitL]
pt = Geometry.Point3D(vs[0], vs[1], vs[2])
return pt
def ParseFeatPointBlock(self):
featLineSplitter = re.compile(r'([a-zA-Z]+) *= *')
feats = []
l = self._NextLine()
while l and l != 'EndPoints':
vals = featLineSplitter.split(l)
while vals.count(''):
vals.remove('')
p = FeatMapPoint.FeatMapPoint()
i = 0
while i < len(vals):
name = vals[i].lower()
if name == 'family':
i += 1
val = vals[i].strip()
p.SetFamily(val)
elif name == 'weight':
i += 1
val = float(vals[i])
p.weight = val
elif name == 'pos':
i += 1
val = vals[i]
pos = self._parsePoint(val)
p.SetPos(pos)
elif name == 'dir':
i += 1
val = vals[i]
pos = self._parsePoint(val)
p.featDirs.append(pos)
else:
raise FeatMapParseError('FeatPoint option %s not recognized on line %d' %
(name, self._lineNum))
i += 1
feats.append(p)
l = self._NextLine()
return feats
| {
"repo_name": "jandom/rdkit",
"path": "rdkit/Chem/FeatMaps/FeatMapParser.py",
"copies": "12",
"size": "5307",
"license": "bsd-3-clause",
"hash": 5239492474791536000,
"line_mean": 26.7853403141,
"line_max": 100,
"alpha_frac": 0.5577539099,
"autogenerated": false,
"ratio": 3.5498327759197323,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from rdkit import Geometry
from rdkit.Chem import ChemicalFeatures
from rdkit.Chem.FeatMaps import FeatMaps,FeatMapPoint
import re
"""
ScoreMode=All
DirScoreMode=Ignore
BeginParams
family=Aromatic radius=2.5 width=1.0 profile=Gaussian
family=Acceptor radius=1.5
EndParams
# optional
BeginPoints
family=Acceptor pos=(1.0, 0.0, 5.0) weight=1.25 dir=(1, 1, 0)
family=Aromatic pos=(0.0,1.0,0.0) weight=2.0 dir=(0,0,1) dir=(0,0,-1)
family=Acceptor pos=(1.0,1.0,2.0) weight=1.25
EndPoints
"""
class FeatMapParseError(ValueError):
pass
class FeatMapParser(object):
data=None
def __init__(self,file=None,data=None):
if file:
self.data=file.readlines()
elif data:
self.SetData(data)
self._lineNum=0
def SetData(self,data):
if isinstance(data,str):
self.data=data.split('\n')
else:
self.data=data
self._lineNum=0
def _NextLine(self):
txt = ''
while 1:
try:
l = self.data[self._lineNum].split('#')[0].strip()
except IndexError:
break
self._lineNum+=1
if l:
txt += l
if l[-1]!='\\':
break
return txt
def Parse(self,featMap=None):
if featMap is None:
featMap = FeatMaps.FeatMap()
l = self._NextLine().strip()
while l:
splitL = l.split('=')
if len(splitL)==1:
keyword=splitL[0].strip().lower()
if keyword=='beginpoints':
pts=self.ParseFeatPointBlock()
for pt in pts:
featMap.AddFeatPoint(pt)
elif keyword=='beginparams':
featMap.params=self.ParseParamBlock()
else:
raise FeatMapParseError,'Unrecognized keyword %s on line %d'%(keyword,self._lineNum)
else:
keyword = splitL[0].strip().lower()
val = splitL[1].strip()
if keyword=='scoremode':
try:
featMap.scoreMode=getattr(FeatMaps.FeatMapScoreMode,val)
except AttributeError:
raise FeatMapParseError,'ScoreMode %s not recognized on line %d'%(val,self._lineNum)
elif keyword=='dirscoremode':
try:
featMap.dirScoreMode=getattr(FeatMaps.FeatDirScoreMode,val)
except AttributeError:
raise FeatMapParseError,'DirScoreMode %s not recognized on line %d'%(val,self._lineNum)
else:
raise FeatMapParseError,'Unrecognized keyword %s on line %d'%(keyword,self._lineNum)
l = self._NextLine().strip()
return featMap
def ParseParamBlock(self):
paramLineSplitter = re.compile(r'([a-zA-Z]+) *= *(\S+)')
params = {}
l = self._NextLine()
while l and l!='EndParams':
param = FeatMaps.FeatMapParams()
vals=paramLineSplitter.findall(l)
for name,val in vals:
name = name.lower()
if name=='family':
family=val
elif name=='radius':
param.radius=float(val)
elif name=='width':
param.width=float(val)
elif name=='profile':
try:
param.featProfile=getattr(param.FeatProfile,val)
except AttributeError:
raise FeatMapParseError,'Profile %s not recognized on line %d'%(val,self._lineNum)
else:
raise FeatMapParseError,'FeatMapParam option %s not recognized on line %d'%(name,self._lineNum)
params[family]=param
l = self._NextLine()
if l!='EndParams':
raise FeatMapParseError('EndParams line not found')
return params
def _parsePoint(self,txt):
txt = txt.strip()
startP=0
endP=len(txt)
if txt[0]=='(':
startP += 1
if txt[-1]==')':
endP -= 1
txt = txt[startP:endP]
splitL = txt.split(',')
if len(splitL) != 3:
raise ValueError,'Bad location string'
vs = [float(x) for x in splitL]
pt = Geometry.Point3D(vs[0],vs[1],vs[2])
return pt
def ParseFeatPointBlock(self):
featLineSplitter = re.compile(r'([a-zA-Z]+) *= *')
feats = []
l = self._NextLine()
while l and l!='EndPoints':
vals=featLineSplitter.split(l)
while vals.count(''): vals.remove('')
p = FeatMapPoint.FeatMapPoint()
i=0
while i<len(vals):
name = vals[i].lower()
if name=='family':
i+=1
val = vals[i].strip()
p.SetFamily(val)
elif name=='weight':
i+=1
val = float(vals[i])
p.weight = val
elif name=='pos':
i+=1
val = vals[i]
pos = self._parsePoint(val)
p.SetPos(pos)
elif name=='dir':
i+=1
val = vals[i]
pos = self._parsePoint(val)
p.featDirs.append(pos)
else:
raise FeatMapParseError,'FeatPoint option %s not recognized on line %d'%(name,self._lineNum)
i+=1
feats.append(p)
l = self._NextLine()
return feats
#------------------------------------
#
# doctest boilerplate
#
def _test():
import doctest,sys
return doctest.testmod(sys.modules["__main__"])
if __name__ == '__main__':
import sys
failed,tried = _test()
sys.exit(failed)
| {
"repo_name": "rdkit/rdkit-orig",
"path": "rdkit/Chem/FeatMaps/FeatMapParser.py",
"copies": "2",
"size": "5354",
"license": "bsd-3-clause",
"hash": 1657975308928289800,
"line_mean": 25.9045226131,
"line_max": 105,
"alpha_frac": 0.5821815465,
"autogenerated": false,
"ratio": 3.481144343302991,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5063325889802991,
"avg_score": null,
"num_lines": null
} |
import copy
from rdkit.Chem.FeatMaps import FeatMaps
class MergeMethod(object):
# Put the new point at the weighted average position of the two fused points
WeightedAverage = 0
# Put the new point at the un-weighted average position of the two fused points
Average = 1
# Put the new point at the position of the larger (by weight) of the two points
UseLarger = 2
@classmethod
def valid(cls, mergeMethod):
""" Check that mergeMethod is valid """
if mergeMethod not in (cls.WeightedAverage, cls.Average, cls.UseLarger):
raise ValueError('unrecognized mergeMethod')
class MergeMetric(object):
# Do not merge points
NoMerge = 0
# merge two points if they come within a threshold distance
Distance = 1
# merge two points if their percent overlap exceeds a threshold
Overlap = 2
@classmethod
def valid(cls, mergeMetric):
""" Check that mergeMetric is valid """
if mergeMetric not in (cls.NoMerge, cls.Distance, cls.Overlap):
raise ValueError('unrecognized mergeMetric')
class DirMergeMode(object):
# Do not merge directions (i.e. keep all direction vectors)
NoMerge = 0
# Sum direction vectors
Sum = 1
@classmethod
def valid(cls, dirMergeMode):
""" Check that dirMergeMode is valid """
if dirMergeMode not in (cls.NoMerge, cls.Sum):
raise ValueError('unrecognized dirMergeMode')
def __copyAll(res, fm1, fm2):
""" no user-serviceable parts inside """
for feat in fm1.GetFeatures():
res.AddFeatPoint(copy.deepcopy(feat))
for feat in fm2.GetFeatures():
res.AddFeatPoint(copy.deepcopy(feat))
def GetFeatFeatDistMatrix(fm, mergeMetric, mergeTol, dirMergeMode, compatFunc):
"""
NOTE that mergeTol is a max value for merging when using distance-based
merging and a min value when using score-based merging.
"""
MergeMetric.valid(mergeMetric)
dists = [[1e8] * fm.GetNumFeatures() for _ in range(fm.GetNumFeatures())]
if mergeMetric == MergeMetric.NoMerge:
return dists
elif mergeMetric == MergeMetric.Distance:
mergeTol2 = mergeTol * mergeTol
for i in range(fm.GetNumFeatures()):
ptI = fm.GetFeature(i)
for j in range(i + 1, fm.GetNumFeatures()):
ptJ = fm.GetFeature(j)
if compatFunc(ptI, ptJ):
dist2 = ptI.GetDist2(ptJ)
if dist2 < mergeTol2:
dists[i][j] = dist2
dists[j][i] = dist2
elif mergeMetric == MergeMetric.Overlap:
for i in range(fm.GetNumFeatures()):
ptI = fm.GetFeature(i)
for j in range(i + 1, fm.GetNumFeatures()):
ptJ = fm.GetFeature(j)
if compatFunc(ptI, ptJ):
score = fm.GetFeatFeatScore(ptI, ptJ, typeMatch=False)
score *= -1 * ptJ.weight
if score < mergeTol:
dists[i][j] = score
dists[j][i] = score
return dists
def familiesMatch(f1, f2):
return f1.GetFamily() == f2.GetFamily()
def feq(v1, v2, tol=1e-4):
return abs(v1 - v2) < tol
def MergeFeatPoints(fm, mergeMetric=MergeMetric.NoMerge, mergeTol=1.5,
dirMergeMode=DirMergeMode.NoMerge, mergeMethod=MergeMethod.WeightedAverage,
compatFunc=familiesMatch):
"""
NOTE that mergeTol is a max value for merging when using distance-based
merging and a min value when using score-based merging.
returns whether or not any points were actually merged
"""
MergeMetric.valid(mergeMetric)
MergeMethod.valid(mergeMethod)
DirMergeMode.valid(dirMergeMode)
res = False
if mergeMetric == MergeMetric.NoMerge:
return res
dists = GetFeatFeatDistMatrix(fm, mergeMetric, mergeTol, dirMergeMode, compatFunc)
distOrders = [None] * len(dists)
for i in range(len(dists)):
distV = dists[i]
distOrders[i] = []
for j, dist in enumerate(distV):
if dist < mergeTol:
distOrders[i].append((dist, j))
distOrders[i].sort()
# print('distOrders:')
# print(distOrders)
# we now know the "distances" and have rank-ordered list of
# each point's neighbors. Work with that.
# progressively merge nearest neighbors until there
# are no more points left to merge
featsInPlay = list(range(fm.GetNumFeatures()))
featsToRemove = []
# print '--------------------------------'
while featsInPlay:
# find two features who are mutual nearest neighbors:
fipCopy = featsInPlay[:]
for fi in fipCopy:
# print('>>>',fi,fipCopy,featsInPlay)
# print('\t',distOrders[fi])
mergeThem = False
if not distOrders[fi]:
featsInPlay.remove(fi)
continue
dist, nbr = distOrders[fi][0]
if nbr not in featsInPlay:
continue
if distOrders[nbr][0][1] == fi:
# print 'direct:',fi,nbr
mergeThem = True
else:
# it may be that there are several points at about the same distance,
# check for that now
if (feq(distOrders[nbr][0][0], dist)):
for distJ, nbrJ in distOrders[nbr][1:]:
if feq(dist, distJ):
if nbrJ == fi:
# print 'indirect: ',fi,nbr
mergeThem = True
break
else:
break
# print ' bottom:',mergeThem
if mergeThem:
break
if mergeThem:
res = True
featI = fm.GetFeature(fi)
nbrFeat = fm.GetFeature(nbr)
if mergeMethod == MergeMethod.WeightedAverage:
newPos = featI.GetPos() * featI.weight + nbrFeat.GetPos() * nbrFeat.weight
newPos /= (featI.weight + nbrFeat.weight)
newWeight = (featI.weight + nbrFeat.weight) / 2
elif mergeMethod == MergeMethod.Average:
newPos = featI.GetPos() + nbrFeat.GetPos()
newPos /= 2
newWeight = (featI.weight + nbrFeat.weight) / 2
elif mergeMethod == MergeMethod.UseLarger:
if featI.weight > nbrFeat.weight:
newPos = featI.GetPos()
newWeight = featI.weight
else:
newPos = nbrFeat.GetPos()
newWeight = nbrFeat.weight
featI.SetPos(newPos)
featI.weight = newWeight
# nbr and fi are no longer valid targets:
# print 'nbr done:',nbr,featsToRemove,featsInPlay
featsToRemove.append(nbr)
featsInPlay.remove(fi)
featsInPlay.remove(nbr)
for nbrList in distOrders:
try:
nbrList.remove(fi)
except ValueError:
pass
try:
nbrList.remove(nbr)
except ValueError:
pass
else:
# print ">>>> Nothing found, abort"
break
featsToRemove.sort()
for i, fIdx in enumerate(featsToRemove):
fm.DropFeature(fIdx - i)
return res
def CombineFeatMaps(fm1, fm2, mergeMetric=MergeMetric.NoMerge, mergeTol=1.5,
dirMergeMode=DirMergeMode.NoMerge):
"""
the parameters will be taken from fm1
"""
res = FeatMaps.FeatMap(params=fm1.params)
__copyAll(res, fm1, fm2)
if mergeMetric != MergeMetric.NoMerge:
MergeFeatPoints(res, mergeMetric=mergeMetric, mergeTol=mergeTol)
return res
| {
"repo_name": "rvianello/rdkit",
"path": "rdkit/Chem/FeatMaps/FeatMapUtils.py",
"copies": "12",
"size": "7261",
"license": "bsd-3-clause",
"hash": 8989646949305047000,
"line_mean": 29.5084033613,
"line_max": 95,
"alpha_frac": 0.6412339898,
"autogenerated": false,
"ratio": 3.5733267716535435,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
import unittest,os,sys
from rdkit.six.moves import cPickle
from rdkit import RDConfig
from rdkit import Chem
from rdkit import DataStructs
from rdkit.Chem import MolCatalog
class TestCase(unittest.TestCase):
def test1(self):
cat = MolCatalog.CreateMolCatalog()
es = []
for smi in ('C1CCC1OC','C1CCC1','C'):
m = Chem.MolFromSmiles(smi)
entry = MolCatalog.MolCatalogEntry()
entry.SetMol(m)
self.assertTrue(entry.GetMol())
eSmi = Chem.MolToSmiles(entry.GetMol())
self.assertTrue(eSmi==Chem.MolToSmiles(m))
entry.SetDescription(smi)
self.assertTrue(entry.GetDescription()==smi)
es.append(entry)
v=cat.AddEntry(es[0])
self.assertTrue(v==0)
self.assertTrue(cat.GetNumEntries()==1)
v=cat.AddEntry(es[1])
self.assertTrue(v==1)
self.assertTrue(cat.GetNumEntries()==2)
v=cat.AddEntry(es[2])
self.assertTrue(v==2)
self.assertTrue(cat.GetNumEntries()==3)
cat.AddEdge(0,1)
cat.AddEdge(0,2)
cat.AddEdge(1,2)
d = cPickle.dumps(cat)
es = None
entry = None
cat=None
cat = cPickle.loads(d)
self.assertTrue(cat.GetNumEntries()==3)
cat=None
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "strets123/rdkit",
"path": "Code/GraphMol/MolCatalog/Wrap/rough_test.py",
"copies": "4",
"size": "1285",
"license": "bsd-3-clause",
"hash": 2677997044440974300,
"line_mean": 21.1551724138,
"line_max": 50,
"alpha_frac": 0.6435797665,
"autogenerated": false,
"ratio": 2.9204545454545454,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.030728956509119526,
"num_lines": 58
} |
import unittest, os, sys
from rdkit.six.moves import cPickle
from rdkit import RDConfig
from rdkit import Chem
from rdkit import DataStructs
from rdkit.Chem import MolCatalog
class TestCase(unittest.TestCase):
def test1(self):
cat = MolCatalog.CreateMolCatalog()
es = []
for smi in ('C1CCC1OC', 'C1CCC1', 'C'):
m = Chem.MolFromSmiles(smi)
entry = MolCatalog.MolCatalogEntry()
entry.SetMol(m)
self.assertTrue(entry.GetMol())
eSmi = Chem.MolToSmiles(entry.GetMol())
self.assertTrue(eSmi == Chem.MolToSmiles(m))
entry.SetDescription(smi)
self.assertTrue(entry.GetDescription() == smi)
es.append(entry)
v = cat.AddEntry(es[0])
self.assertTrue(v == 0)
self.assertTrue(cat.GetNumEntries() == 1)
v = cat.AddEntry(es[1])
self.assertTrue(v == 1)
self.assertTrue(cat.GetNumEntries() == 2)
v = cat.AddEntry(es[2])
self.assertTrue(v == 2)
self.assertTrue(cat.GetNumEntries() == 3)
cat.AddEdge(0, 1)
cat.AddEdge(0, 2)
cat.AddEdge(1, 2)
d = cPickle.dumps(cat)
es = None
entry = None
cat = None
cat = cPickle.loads(d)
self.assertTrue(cat.GetNumEntries() == 3)
cat = None
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "rvianello/rdkit",
"path": "Code/GraphMol/MolCatalog/Wrap/rough_test.py",
"copies": "5",
"size": "1303",
"license": "bsd-3-clause",
"hash": -4307203768100849000,
"line_mean": 22.2678571429,
"line_max": 52,
"alpha_frac": 0.6346891788,
"autogenerated": false,
"ratio": 2.9681093394077447,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00727800369750766,
"num_lines": 56
} |
from __future__ import print_function
from rdkit import RDLogger
logger = RDLogger.logger()
from rdkit import Chem, Geometry
import numpy
from rdkit.Numerics import Alignment
from rdkit.Chem.Subshape import SubshapeObjects
class SubshapeAlignment(object):
transform = None
triangleSSD = None
targetTri = None
queryTri = None
alignedConfId = -1
dirMatch = 0.0
shapeDist = 0.0
def _getAllTriangles(pts, orderedTraversal=False):
for i in range(len(pts)):
if orderedTraversal:
jStart = i + 1
else:
jStart = 0
for j in range(jStart, len(pts)):
if j == i:
continue
if orderedTraversal:
kStart = j + 1
else:
kStart = 0
for k in range(j + 1, len(pts)):
if k == i or k == j:
continue
yield (i, j, k)
class SubshapeDistanceMetric(object):
TANIMOTO = 0
PROTRUDE = 1
# returns the distance between two shapea according to the provided metric
def GetShapeShapeDistance(s1, s2, distMetric):
if distMetric == SubshapeDistanceMetric.PROTRUDE:
#print s1.grid.GetOccupancyVect().GetTotalVal(),s2.grid.GetOccupancyVect().GetTotalVal()
if s1.grid.GetOccupancyVect().GetTotalVal() < s2.grid.GetOccupancyVect().GetTotalVal():
d = Geometry.ProtrudeDistance(s1.grid, s2.grid)
#print d
else:
d = Geometry.ProtrudeDistance(s2.grid, s1.grid)
else:
d = Geometry.TanimotoDistance(s1.grid, s2.grid)
return d
# clusters a set of alignments and returns the cluster centroid
def ClusterAlignments(mol, alignments, builder, neighborTol=0.1,
distMetric=SubshapeDistanceMetric.PROTRUDE, tempConfId=1001):
from rdkit.ML.Cluster import Butina
dists = []
for i in range(len(alignments)):
TransformMol(mol, alignments[i].transform, newConfId=tempConfId)
shapeI = builder.GenerateSubshapeShape(mol, tempConfId, addSkeleton=False)
for j in range(i):
TransformMol(mol, alignments[j].transform, newConfId=tempConfId + 1)
shapeJ = builder.GenerateSubshapeShape(mol, tempConfId + 1, addSkeleton=False)
d = GetShapeShapeDistance(shapeI, shapeJ, distMetric)
dists.append(d)
mol.RemoveConformer(tempConfId + 1)
mol.RemoveConformer(tempConfId)
clusts = Butina.ClusterData(dists, len(alignments), neighborTol, isDistData=True)
res = [alignments[x[0]] for x in clusts]
return res
def TransformMol(mol, tform, confId=-1, newConfId=100):
""" Applies the transformation to a molecule and sets it up with
a single conformer
"""
newConf = Chem.Conformer()
newConf.SetId(0)
refConf = mol.GetConformer(confId)
for i in range(refConf.GetNumAtoms()):
pos = list(refConf.GetAtomPosition(i))
pos.append(1.0)
newPos = numpy.dot(tform, numpy.array(pos))
newConf.SetAtomPosition(i, list(newPos)[:3])
newConf.SetId(newConfId)
mol.RemoveConformer(newConfId)
mol.AddConformer(newConf, assignId=False)
class SubshapeAligner(object):
triangleRMSTol = 1.0
distMetric = SubshapeDistanceMetric.PROTRUDE
shapeDistTol = 0.2
numFeatThresh = 3
dirThresh = 2.6
edgeTol = 6.0
#coarseGridToleranceMult=1.5
#medGridToleranceMult=1.25
coarseGridToleranceMult = 1.0
medGridToleranceMult = 1.0
def GetTriangleMatches(self, target, query):
""" this is a generator function returning the possible triangle
matches between the two shapes
"""
ssdTol = (self.triangleRMSTol**2) * 9
res = []
tgtPts = target.skelPts
queryPts = query.skelPts
tgtLs = {}
for i in range(len(tgtPts)):
for j in range(i + 1, len(tgtPts)):
l2 = (tgtPts[i].location - tgtPts[j].location).LengthSq()
tgtLs[(i, j)] = l2
queryLs = {}
for i in range(len(queryPts)):
for j in range(i + 1, len(queryPts)):
l2 = (queryPts[i].location - queryPts[j].location).LengthSq()
queryLs[(i, j)] = l2
compatEdges = {}
tol2 = self.edgeTol * self.edgeTol
for tk, tv in tgtLs.items():
for qk, qv in queryLs.items():
if abs(tv - qv) < tol2:
compatEdges[(tk, qk)] = 1
seqNo = 0
for tgtTri in _getAllTriangles(tgtPts, orderedTraversal=True):
tgtLocs = [tgtPts[x].location for x in tgtTri]
for queryTri in _getAllTriangles(queryPts, orderedTraversal=False):
if ((tgtTri[0],tgtTri[1]),(queryTri[0],queryTri[1])) in compatEdges and \
((tgtTri[0],tgtTri[2]),(queryTri[0],queryTri[2])) in compatEdges and \
((tgtTri[1],tgtTri[2]),(queryTri[1],queryTri[2])) in compatEdges:
queryLocs = [queryPts[x].location for x in queryTri]
ssd, tf = Alignment.GetAlignmentTransform(tgtLocs, queryLocs)
if ssd <= ssdTol:
alg = SubshapeAlignment()
alg.transform = tf
alg.triangleSSD = ssd
alg.targetTri = tgtTri
alg.queryTri = queryTri
alg._seqNo = seqNo
seqNo += 1
yield alg
def _checkMatchFeatures(self, targetPts, queryPts, alignment):
nMatched = 0
for i in range(3):
tgtFeats = targetPts[alignment.targetTri[i]].molFeatures
qFeats = queryPts[alignment.queryTri[i]].molFeatures
if not tgtFeats and not qFeats:
nMatched += 1
else:
for j, jFeat in enumerate(tgtFeats):
if jFeat in qFeats:
nMatched += 1
break
if nMatched >= self.numFeatThresh:
break
return nMatched >= self.numFeatThresh
def PruneMatchesUsingFeatures(self, target, query, alignments, pruneStats=None):
i = 0
targetPts = target.skelPts
queryPts = query.skelPts
while i < len(alignments):
alg = alignments[i]
if not self._checkMatchFeatures(targetPts, queryPts, alg):
if pruneStats is not None:
pruneStats['features'] = pruneStats.get('features', 0) + 1
del alignments[i]
else:
i += 1
def _checkMatchDirections(self, targetPts, queryPts, alignment):
dot = 0.0
for i in range(3):
tgtPt = targetPts[alignment.targetTri[i]]
queryPt = queryPts[alignment.queryTri[i]]
qv = queryPt.shapeDirs[0]
tv = tgtPt.shapeDirs[0]
rotV = [0.0] * 3
rotV[0] = alignment.transform[0, 0] * qv[0] + alignment.transform[0, 1] * qv[
1] + alignment.transform[0, 2] * qv[2]
rotV[1] = alignment.transform[1, 0] * qv[0] + alignment.transform[1, 1] * qv[
1] + alignment.transform[1, 2] * qv[2]
rotV[2] = alignment.transform[2, 0] * qv[0] + alignment.transform[2, 1] * qv[
1] + alignment.transform[2, 2] * qv[2]
dot += abs(rotV[0] * tv[0] + rotV[1] * tv[1] + rotV[2] * tv[2])
if dot >= self.dirThresh:
# already above the threshold, no need to continue
break
alignment.dirMatch = dot
return dot >= self.dirThresh
def PruneMatchesUsingDirection(self, target, query, alignments, pruneStats=None):
i = 0
tgtPts = target.skelPts
queryPts = query.skelPts
while i < len(alignments):
if not self._checkMatchDirections(tgtPts, queryPts, alignments[i]):
if pruneStats is not None:
pruneStats['direction'] = pruneStats.get('direction', 0) + 1
del alignments[i]
else:
i += 1
def _addCoarseAndMediumGrids(self, mol, tgt, confId, builder):
oSpace = builder.gridSpacing
if mol:
builder.gridSpacing = oSpace * 1.5
tgt.medGrid = builder.GenerateSubshapeShape(mol, confId, addSkeleton=False)
builder.gridSpacing = oSpace * 2
tgt.coarseGrid = builder.GenerateSubshapeShape(mol, confId, addSkeleton=False)
builder.gridSpacing = oSpace
else:
tgt.medGrid = builder.SampleSubshape(tgt, oSpace * 1.5)
tgt.coarseGrid = builder.SampleSubshape(tgt, oSpace * 2.0)
def _checkMatchShape(self, targetMol, target, queryMol, query, alignment, builder, targetConf,
queryConf, pruneStats=None, tConfId=1001):
matchOk = True
TransformMol(queryMol, alignment.transform, confId=queryConf, newConfId=tConfId)
oSpace = builder.gridSpacing
builder.gridSpacing = oSpace * 2
coarseGrid = builder.GenerateSubshapeShape(queryMol, tConfId, addSkeleton=False)
d = GetShapeShapeDistance(coarseGrid, target.coarseGrid, self.distMetric)
if d > self.shapeDistTol * self.coarseGridToleranceMult:
matchOk = False
if pruneStats is not None:
pruneStats['coarseGrid'] = pruneStats.get('coarseGrid', 0) + 1
else:
builder.gridSpacing = oSpace * 1.5
medGrid = builder.GenerateSubshapeShape(queryMol, tConfId, addSkeleton=False)
d = GetShapeShapeDistance(medGrid, target.medGrid, self.distMetric)
if d > self.shapeDistTol * self.medGridToleranceMult:
matchOk = False
if pruneStats is not None:
pruneStats['medGrid'] = pruneStats.get('medGrid', 0) + 1
else:
builder.gridSpacing = oSpace
fineGrid = builder.GenerateSubshapeShape(queryMol, tConfId, addSkeleton=False)
d = GetShapeShapeDistance(fineGrid, target, self.distMetric)
#print ' ',d
if d > self.shapeDistTol:
matchOk = False
if pruneStats is not None:
pruneStats['fineGrid'] = pruneStats.get('fineGrid', 0) + 1
alignment.shapeDist = d
queryMol.RemoveConformer(tConfId)
builder.gridSpacing = oSpace
return matchOk
def PruneMatchesUsingShape(self, targetMol, target, queryMol, query, builder, alignments,
tgtConf=-1, queryConf=-1, pruneStats=None):
if not hasattr(target, 'medGrid'):
self._addCoarseAndMediumGrids(targetMol, target, tgtConf, builder)
logger.info("Shape-based Pruning")
i = 0
nOrig = len(alignments)
nDone = 0
while i < len(alignments):
removeIt = False
alg = alignments[i]
nDone += 1
if not nDone % 100:
nLeft = len(alignments)
logger.info(' processed %d of %d. %d alignments remain' % ((nDone, nOrig, nLeft)))
if not self._checkMatchShape(targetMol, target, queryMol, query, alg, builder,
targetConf=tgtConf, queryConf=queryConf, pruneStats=pruneStats):
del alignments[i]
else:
i += 1
def GetSubshapeAlignments(self, targetMol, target, queryMol, query, builder, tgtConf=-1,
queryConf=-1, pruneStats=None):
import time
if pruneStats is None:
pruneStats = {}
logger.info("Generating triangle matches")
t1 = time.time()
res = [x for x in self.GetTriangleMatches(target, query)]
t2 = time.time()
logger.info("Got %d possible alignments in %.1f seconds" % (len(res), t2 - t1))
pruneStats['gtm_time'] = t2 - t1
if builder.featFactory:
logger.info("Doing feature pruning")
t1 = time.time()
self.PruneMatchesUsingFeatures(target, query, res, pruneStats=pruneStats)
t2 = time.time()
pruneStats['feats_time'] = t2 - t1
logger.info("%d possible alignments remain. (%.1f seconds required)" % (len(res), t2 - t1))
logger.info("Doing direction pruning")
t1 = time.time()
self.PruneMatchesUsingDirection(target, query, res, pruneStats=pruneStats)
t2 = time.time()
pruneStats['direction_time'] = t2 - t1
logger.info("%d possible alignments remain. (%.1f seconds required)" % (len(res), t2 - t1))
t1 = time.time()
self.PruneMatchesUsingShape(targetMol, target, queryMol, query, builder, res, tgtConf=tgtConf,
queryConf=queryConf, pruneStats=pruneStats)
t2 = time.time()
pruneStats['shape_time'] = t2 - t1
return res
def __call__(self, targetMol, target, queryMol, query, builder, tgtConf=-1, queryConf=-1,
pruneStats=None):
for alignment in self.GetTriangleMatches(target, query):
if builder.featFactory and \
not self._checkMatchFeatures(target.skelPts,query.skelPts,alignment):
if pruneStats is not None:
pruneStats['features'] = pruneStats.get('features', 0) + 1
continue
if not self._checkMatchDirections(target.skelPts, query.skelPts, alignment):
if pruneStats is not None:
pruneStats['direction'] = pruneStats.get('direction', 0) + 1
continue
if not hasattr(target, 'medGrid'):
self._addCoarseAndMediumGrids(targetMol, target, tgtConf, builder)
if not self._checkMatchShape(targetMol, target, queryMol, query, alignment, builder,
targetConf=tgtConf, queryConf=queryConf, pruneStats=pruneStats):
continue
# if we made it this far, it's a good alignment
yield alignment
if __name__ == '__main__':
from rdkit.six.moves import cPickle
tgtMol, tgtShape = cPickle.load(file('target.pkl', 'rb'))
queryMol, queryShape = cPickle.load(file('query.pkl', 'rb'))
builder = cPickle.load(file('builder.pkl', 'rb'))
aligner = SubshapeAligner()
algs = aligner.GetSubshapeAlignments(tgtMol, tgtShape, queryMol, queryShape, builder)
print(len(algs))
from rdkit.Chem.PyMol import MolViewer
v = MolViewer()
v.ShowMol(tgtMol, name='Target', showOnly=True)
v.ShowMol(queryMol, name='Query', showOnly=False)
SubshapeObjects.DisplaySubshape(v, tgtShape, 'target_shape', color=(.8, .2, .2))
SubshapeObjects.DisplaySubshape(v, queryShape, 'query_shape', color=(.2, .2, .8))
| {
"repo_name": "jandom/rdkit",
"path": "rdkit/Chem/Subshape/SubshapeAligner.py",
"copies": "1",
"size": "13420",
"license": "bsd-3-clause",
"hash": -5043293890927798000,
"line_mean": 36.8028169014,
"line_max": 99,
"alpha_frac": 0.6524590164,
"autogenerated": false,
"ratio": 3.286798922361009,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9360487573334718,
"avg_score": 0.01575407308525806,
"num_lines": 355
} |
from rdkit import RDLogger
logger = RDLogger.logger()
from rdkit import Chem,Geometry
import numpy
from rdkit.Numerics import Alignment
from rdkit.Chem.Subshape import SubshapeObjects
class SubshapeAlignment(object):
transform=None
triangleSSD=None
targetTri=None
queryTri=None
alignedConfId=-1
dirMatch=0.0
shapeDist=0.0
def _getAllTriangles(pts,orderedTraversal=False):
for i in range(len(pts)):
if orderedTraversal:
jStart=i+1
else:
jStart=0
for j in range(jStart,len(pts)):
if j==i:
continue
if orderedTraversal:
kStart=j+1
else:
kStart=0
for k in range(j+1,len(pts)):
if k==i or k==j:
continue
yield (i,j,k)
class SubshapeDistanceMetric(object):
TANIMOTO=0
PROTRUDE=1
# returns the distance between two shapea according to the provided metric
def GetShapeShapeDistance(s1,s2,distMetric):
if distMetric==SubshapeDistanceMetric.PROTRUDE:
#print s1.grid.GetOccupancyVect().GetTotalVal(),s2.grid.GetOccupancyVect().GetTotalVal()
if s1.grid.GetOccupancyVect().GetTotalVal()<s2.grid.GetOccupancyVect().GetTotalVal():
d = Geometry.ProtrudeDistance(s1.grid,s2.grid)
#print d
else:
d = Geometry.ProtrudeDistance(s2.grid,s1.grid)
else:
d = Geometry.TanimotoDistance(s1.grid,s2.grid)
return d
# clusters a set of alignments and returns the cluster centroid
def ClusterAlignments(mol,alignments,builder,
neighborTol=0.1,
distMetric=SubshapeDistanceMetric.PROTRUDE,
tempConfId=1001):
from rdkit.ML.Cluster import Butina
dists = []
for i in range(len(alignments)):
TransformMol(mol,alignments[i].transform,newConfId=tempConfId)
shapeI=builder.GenerateSubshapeShape(mol,tempConfId,addSkeleton=False)
for j in range(i):
TransformMol(mol,alignments[j].transform,newConfId=tempConfId+1)
shapeJ=builder.GenerateSubshapeShape(mol,tempConfId+1,addSkeleton=False)
d = GetShapeShapeDistance(shapeI,shapeJ,distMetric)
dists.append(d)
mol.RemoveConformer(tempConfId+1)
mol.RemoveConformer(tempConfId)
clusts=Butina.ClusterData(dists,len(alignments),neighborTol,isDistData=True)
res = [alignments[x[0]] for x in clusts]
return res
def TransformMol(mol,tform,confId=-1,newConfId=100):
""" Applies the transformation to a molecule and sets it up with
a single conformer
"""
newConf = Chem.Conformer()
newConf.SetId(0)
refConf = mol.GetConformer(confId)
for i in range(refConf.GetNumAtoms()):
pos = list(refConf.GetAtomPosition(i))
pos.append(1.0)
newPos = numpy.dot(tform,numpy.array(pos))
newConf.SetAtomPosition(i,list(newPos)[:3])
newConf.SetId(newConfId)
mol.RemoveConformer(newConfId)
mol.AddConformer(newConf,assignId=False)
class SubshapeAligner(object):
triangleRMSTol=1.0
distMetric=SubshapeDistanceMetric.PROTRUDE
shapeDistTol=0.2
numFeatThresh=3
dirThresh=2.6
edgeTol=6.0
#coarseGridToleranceMult=1.5
#medGridToleranceMult=1.25
coarseGridToleranceMult=1.0
medGridToleranceMult=1.0
def GetTriangleMatches(self,target,query):
""" this is a generator function returning the possible triangle
matches between the two shapes
"""
ssdTol = (self.triangleRMSTol**2)*9
res = []
tgtPts = target.skelPts
queryPts = query.skelPts
tgtLs = {}
for i in range(len(tgtPts)):
for j in range(i+1,len(tgtPts)):
l2 = (tgtPts[i].location-tgtPts[j].location).LengthSq()
tgtLs[(i,j)]=l2
queryLs = {}
for i in range(len(queryPts)):
for j in range(i+1,len(queryPts)):
l2 = (queryPts[i].location-queryPts[j].location).LengthSq()
queryLs[(i,j)]=l2
compatEdges={}
tol2 = self.edgeTol*self.edgeTol
for tk,tv in tgtLs.iteritems():
for qk,qv in queryLs.iteritems():
if abs(tv-qv)<tol2:
compatEdges[(tk,qk)]=1
seqNo=0
for tgtTri in _getAllTriangles(tgtPts,orderedTraversal=True):
tgtLocs=[tgtPts[x].location for x in tgtTri]
for queryTri in _getAllTriangles(queryPts,orderedTraversal=False):
if compatEdges.has_key(((tgtTri[0],tgtTri[1]),(queryTri[0],queryTri[1]))) and \
compatEdges.has_key(((tgtTri[0],tgtTri[2]),(queryTri[0],queryTri[2]))) and \
compatEdges.has_key(((tgtTri[1],tgtTri[2]),(queryTri[1],queryTri[2]))):
queryLocs=[queryPts[x].location for x in queryTri]
ssd,tf = Alignment.GetAlignmentTransform(tgtLocs,queryLocs)
if ssd<=ssdTol:
alg = SubshapeAlignment()
alg.transform=tf
alg.triangleSSD=ssd
alg.targetTri=tgtTri
alg.queryTri=queryTri
alg._seqNo=seqNo
seqNo+=1
yield alg
def _checkMatchFeatures(self,targetPts,queryPts,alignment):
nMatched=0
for i in range(3):
tgtFeats = targetPts[alignment.targetTri[i]].molFeatures
qFeats = queryPts[alignment.queryTri[i]].molFeatures
if not tgtFeats and not qFeats:
nMatched+=1
else:
for j,jFeat in enumerate(tgtFeats):
if jFeat in qFeats:
nMatched+=1
break
if nMatched>=self.numFeatThresh:
break
return nMatched>=self.numFeatThresh
def PruneMatchesUsingFeatures(self,target,query,alignments,pruneStats=None):
i = 0
targetPts = target.skelPts
queryPts = query.skelPts
while i<len(alignments):
alg = alignments[i]
if not self._checkMatchFeatures(targetPts,queryPts,alg):
if pruneStats is not None:
pruneStats['features']=pruneStats.get('features',0)+1
del alignments[i]
else:
i+=1
def _checkMatchDirections(self,targetPts,queryPts,alignment):
dot = 0.0
for i in range(3):
tgtPt = targetPts[alignment.targetTri[i]]
queryPt = queryPts[alignment.queryTri[i]]
qv = queryPt.shapeDirs[0]
tv = tgtPt.shapeDirs[0]
rotV =[0.0]*3
rotV[0] = alignment.transform[0,0]*qv[0]+alignment.transform[0,1]*qv[1]+alignment.transform[0,2]*qv[2]
rotV[1] = alignment.transform[1,0]*qv[0]+alignment.transform[1,1]*qv[1]+alignment.transform[1,2]*qv[2]
rotV[2] = alignment.transform[2,0]*qv[0]+alignment.transform[2,1]*qv[1]+alignment.transform[2,2]*qv[2]
dot += abs(rotV[0]*tv[0]+rotV[1]*tv[1]+rotV[2]*tv[2])
if dot>=self.dirThresh:
# already above the threshold, no need to continue
break
alignment.dirMatch=dot
return dot>=self.dirThresh
def PruneMatchesUsingDirection(self,target,query,alignments,pruneStats=None):
i = 0
tgtPts = target.skelPts
queryPts = query.skelPts
while i<len(alignments):
if not self._checkMatchDirections(tgtPts,queryPts,alignments[i]):
if pruneStats is not None:
pruneStats['direction']=pruneStats.get('direction',0)+1
del alignments[i]
else:
i+=1
def _addCoarseAndMediumGrids(self,mol,tgt,confId,builder):
oSpace=builder.gridSpacing
if mol:
builder.gridSpacing = oSpace*1.5
tgt.medGrid = builder.GenerateSubshapeShape(mol,confId,addSkeleton=False)
builder.gridSpacing = oSpace*2
tgt.coarseGrid = builder.GenerateSubshapeShape(mol,confId,addSkeleton=False)
builder.gridSpacing = oSpace
else:
tgt.medGrid = builder.SampleSubshape(tgt,oSpace*1.5)
tgt.coarseGrid = builder.SampleSubshape(tgt,oSpace*2.0)
def _checkMatchShape(self,targetMol,target,queryMol,query,alignment,builder,
targetConf,queryConf,pruneStats=None,tConfId=1001):
matchOk=True
TransformMol(queryMol,alignment.transform,confId=queryConf,newConfId=tConfId)
oSpace=builder.gridSpacing
builder.gridSpacing=oSpace*2
coarseGrid=builder.GenerateSubshapeShape(queryMol,tConfId,addSkeleton=False)
d = GetShapeShapeDistance(coarseGrid,target.coarseGrid,self.distMetric)
if d>self.shapeDistTol*self.coarseGridToleranceMult:
matchOk=False
if pruneStats is not None:
pruneStats['coarseGrid']=pruneStats.get('coarseGrid',0)+1
else:
builder.gridSpacing=oSpace*1.5
medGrid=builder.GenerateSubshapeShape(queryMol,tConfId,addSkeleton=False)
d = GetShapeShapeDistance(medGrid,target.medGrid,self.distMetric)
if d>self.shapeDistTol*self.medGridToleranceMult:
matchOk=False
if pruneStats is not None:
pruneStats['medGrid']=pruneStats.get('medGrid',0)+1
else:
builder.gridSpacing=oSpace
fineGrid=builder.GenerateSubshapeShape(queryMol,tConfId,addSkeleton=False)
d = GetShapeShapeDistance(fineGrid,target,self.distMetric)
#print ' ',d
if d>self.shapeDistTol:
matchOk=False
if pruneStats is not None:
pruneStats['fineGrid']=pruneStats.get('fineGrid',0)+1
alignment.shapeDist=d
queryMol.RemoveConformer(tConfId)
builder.gridSpacing=oSpace
return matchOk
def PruneMatchesUsingShape(self,targetMol,target,queryMol,query,builder,
alignments,tgtConf=-1,queryConf=-1,
pruneStats=None):
if not hasattr(target,'medGrid'):
self._addCoarseAndMediumGrids(targetMol,target,tgtConf,builder)
logger.info("Shape-based Pruning")
i=0
nOrig = len(alignments)
nDone=0
while i < len(alignments):
removeIt=False
alg = alignments[i]
nDone+=1
if not nDone%100:
nLeft = len(alignments)
logger.info(' processed %d of %d. %d alignments remain'%((nDone,
nOrig,
nLeft)))
if not self._checkMatchShape(targetMol,target,queryMol,query,alg,builder,
targetConf=tgtConf,queryConf=queryConf,
pruneStats=pruneStats):
del alignments[i]
else:
i+=1
def GetSubshapeAlignments(self,targetMol,target,queryMol,query,builder,
tgtConf=-1,queryConf=-1,pruneStats=None):
import time
if pruneStats is None:
pruneStats={}
logger.info("Generating triangle matches")
t1=time.time()
res = [x for x in self.GetTriangleMatches(target,query)]
t2=time.time()
logger.info("Got %d possible alignments in %.1f seconds"%(len(res),t2-t1))
pruneStats['gtm_time']=t2-t1
if builder.featFactory:
logger.info("Doing feature pruning")
t1 = time.time()
self.PruneMatchesUsingFeatures(target,query,res,pruneStats=pruneStats)
t2 = time.time()
pruneStats['feats_time']=t2-t1
logger.info("%d possible alignments remain. (%.1f seconds required)"%(len(res),t2-t1))
logger.info("Doing direction pruning")
t1 = time.time()
self.PruneMatchesUsingDirection(target,query,res,pruneStats=pruneStats)
t2 = time.time()
pruneStats['direction_time']=t2-t1
logger.info("%d possible alignments remain. (%.1f seconds required)"%(len(res),t2-t1))
t1 = time.time()
self.PruneMatchesUsingShape(targetMol,target,queryMol,query,builder,res,
tgtConf=tgtConf,queryConf=queryConf,
pruneStats=pruneStats)
t2 = time.time()
pruneStats['shape_time']=t2-t1
return res
def __call__(self,targetMol,target,queryMol,query,builder,
tgtConf=-1,queryConf=-1,pruneStats=None):
for alignment in self.GetTriangleMatches(target,query):
if builder.featFactory and \
not self._checkMatchFeatures(target.skelPts,query.skelPts,alignment):
if pruneStats is not None:
pruneStats['features']=pruneStats.get('features',0)+1
continue
if not self._checkMatchDirections(target.skelPts,query.skelPts,alignment):
if pruneStats is not None:
pruneStats['direction']=pruneStats.get('direction',0)+1
continue
if not hasattr(target,'medGrid'):
self._addCoarseAndMediumGrids(targetMol,target,tgtConf,builder)
if not self._checkMatchShape(targetMol,target,queryMol,query,alignment,builder,
targetConf=tgtConf,queryConf=queryConf,
pruneStats=pruneStats):
continue
# if we made it this far, it's a good alignment
yield alignment
if __name__=='__main__':
import cPickle
tgtMol,tgtShape = cPickle.load(file('target.pkl','rb'))
queryMol,queryShape = cPickle.load(file('query.pkl','rb'))
builder = cPickle.load(file('builder.pkl','rb'))
aligner = SubshapeAligner()
algs = aligner.GetSubshapeAlignments(tgtMol,tgtShape,queryMol,queryShape,builder)
print len(algs)
from rdkit.Chem.PyMol import MolViewer
v = MolViewer()
v.ShowMol(tgtMol,name='Target',showOnly=True)
v.ShowMol(queryMol,name='Query',showOnly=False)
SubshapeObjects.DisplaySubshape(v,tgtShape,'target_shape',color=(.8,.2,.2))
SubshapeObjects.DisplaySubshape(v,queryShape,'query_shape',color=(.2,.2,.8))
| {
"repo_name": "rdkit/rdkit-orig",
"path": "rdkit/Chem/Subshape/SubshapeAligner.py",
"copies": "2",
"size": "13196",
"license": "bsd-3-clause",
"hash": 552674047822483100,
"line_mean": 36.4886363636,
"line_max": 108,
"alpha_frac": 0.6615641103,
"autogenerated": false,
"ratio": 3.2842210054753607,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9736972004035912,
"avg_score": 0.04176262234788971,
"num_lines": 352
} |
""" Implementation of the clustering algorithm published in:
Butina JCICS 39 747-750 (1999)
"""
import numpy
from rdkit import RDLogger
logger=RDLogger.logger()
def EuclideanDist(pi,pj):
dv = numpy.array(pi)- numpy.array(pj)
return numpy.sqrt(dv*dv)
def ClusterData(data,nPts,distThresh,isDistData=False,distFunc=EuclideanDist):
""" clusters the data points passed in and returns the list of clusters
**Arguments**
- data: a list of items with the input data
(see discussion of _isDistData_ argument for the exception)
- nPts: the number of points to be used
- distThresh: elements within this range of each other are considered
to be neighbors
- isDistData: set this toggle when the data passed in is a
distance matrix. The distance matrix should be stored
symmetrically. An example of how to do this:
dists = []
for i in range(nPts):
for j in range(i):
dists.append( distfunc(i,j) )
- distFunc: a function to calculate distances between points.
Receives 2 points as arguments, should return a float
**Returns**
- a tuple of tuples containing information about the clusters:
( (cluster1_elem1, cluster1_elem2, ...),
(cluster2_elem1, cluster2_elem2, ...),
...
)
The first element for each cluster is its centroid.
"""
if isDistData and len(data)>(nPts*(nPts-1)/2):
logger.warning("Distance matrix is too long")
nbrLists = [None]*nPts
for i in range(nPts): nbrLists[i] = []
dmIdx=0
for i in range(nPts):
for j in range(i):
if not isDistData:
dij = distFunc(data[i],data[j])
else:
dij = data[dmIdx]
dmIdx+=1
if dij<=distThresh:
nbrLists[i].append(j)
nbrLists[j].append(i)
#print nbrLists
# sort by the number of neighbors:
tLists = [(len(y),x) for x,y in enumerate(nbrLists)]
tLists.sort()
tLists.reverse()
res = []
seen = [0]*nPts
while tLists:
nNbrs,idx = tLists.pop(0)
if seen[idx]:
continue
tRes = [idx]
for nbr in nbrLists[idx]:
if not seen[nbr]:
tRes.append(nbr)
seen[nbr]=1
res.append(tuple(tRes))
return tuple(res)
| {
"repo_name": "rdkit/rdkit-orig",
"path": "rdkit/ML/Cluster/Butina.py",
"copies": "2",
"size": "2393",
"license": "bsd-3-clause",
"hash": -7361614801745215000,
"line_mean": 25.8876404494,
"line_max": 78,
"alpha_frac": 0.6130380276,
"autogenerated": false,
"ratio": 3.5663189269746645,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.031051693477891305,
"num_lines": 89
} |
""" Implementation of the clustering algorithm published in:
Butina JCICS 39 747-750 (1999)
"""
import numpy
from rdkit import RDLogger
logger = RDLogger.logger()
def EuclideanDist(pi, pj):
dv = numpy.array(pi) - numpy.array(pj)
return numpy.sqrt(dv * dv)
def ClusterData(data, nPts, distThresh, isDistData=False, distFunc=EuclideanDist, reordering=False):
""" clusters the data points passed in and returns the list of clusters
**Arguments**
- data: a list of items with the input data
(see discussion of _isDistData_ argument for the exception)
- nPts: the number of points to be used
- distThresh: elements within this range of each other are considered
to be neighbors
- isDistData: set this toggle when the data passed in is a
distance matrix. The distance matrix should be stored
symmetrically. An example of how to do this:
dists = []
for i in range(nPts):
for j in range(i):
dists.append( distfunc(i,j) )
- distFunc: a function to calculate distances between points.
Receives 2 points as arguments, should return a float
- reodering: if this toggle is set, the number of neighbors is updated
for the unassigned molecules after a new cluster is created such
that always the molecule with the largest number of unassigned
neighbors is selected as the next cluster center.
**Returns**
- a tuple of tuples containing information about the clusters:
( (cluster1_elem1, cluster1_elem2, ...),
(cluster2_elem1, cluster2_elem2, ...),
...
)
The first element for each cluster is its centroid.
"""
if isDistData and len(data) > (nPts * (nPts - 1) / 2):
logger.warning("Distance matrix is too long")
nbrLists = [None] * nPts
for i in range(nPts):
nbrLists[i] = []
dmIdx = 0
for i in range(nPts):
for j in range(i):
if not isDistData:
dij = distFunc(data[i], data[j])
else:
dij = data[dmIdx]
dmIdx += 1
if dij <= distThresh:
nbrLists[i].append(j)
nbrLists[j].append(i)
#print nbrLists
# sort by the number of neighbors:
tLists = [(len(y), x) for x, y in enumerate(nbrLists)]
tLists.sort(reverse=True)
res = []
seen = [0] * nPts
while tLists:
nNbrs, idx = tLists.pop(0)
if seen[idx]:
continue
tRes = [idx]
for nbr in nbrLists[idx]:
if not seen[nbr]:
tRes.append(nbr)
seen[nbr] = 1
# update the number of neighbors:
# remove all members of the new cluster from the list of
# neighbors and reorder the tLists
if reordering:
# get the list of affected molecules, i.e. all molecules
# which have at least one of the members of the new cluster
# as a neighbor
nbrNbr = [nbrLists[t] for t in tRes]
nbrNbr = frozenset().union(*nbrNbr)
# loop over all remaining molecules in tLists but only
# consider unassigned and affected compounds
for x, y in enumerate(tLists):
y1 = y[1]
if seen[y1] or (y1 not in nbrNbr):
continue
# update the number of neighbors
nbrLists[y1] = set(nbrLists[y1]).difference(tRes)
tLists[x] = (len(nbrLists[y1]), y1)
# now reorder the list
tLists.sort(reverse=True)
res.append(tuple(tRes))
return tuple(res)
| {
"repo_name": "jandom/rdkit",
"path": "rdkit/ML/Cluster/Butina.py",
"copies": "1",
"size": "3551",
"license": "bsd-3-clause",
"hash": -6320237006247684000,
"line_mean": 30.149122807,
"line_max": 100,
"alpha_frac": 0.6212334554,
"autogenerated": false,
"ratio": 3.683609958506224,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9736244288179975,
"avg_score": 0.0137198251452497,
"num_lines": 114
} |
import unittest
from rdkit.six.moves import cPickle
from rdkit import RDConfig
from rdkit.ML.Cluster import Butina
class TestCase(unittest.TestCase):
def test1(self):
dists = [1, 2, 1, 4, 3, 2, 6, 5, 4, 2, 7, 6, 5, 3, 1]
nPts = 6
cs = Butina.ClusterData(dists, nPts, 1.1, isDistData=1)
self.assertTrue(len(cs) == 3)
self.assertTrue(cs[0] == (1, 0, 2))
self.assertTrue(cs[1] == (5, 4))
self.assertTrue(cs[2] == (3, ))
def test2(self):
dists = [.5,
1,
.5,
2,
1.5,
1,
3,
2.5,
2,
1,
5,
4.5,
4,
3,
2,
8,
7.5,
7,
6,
5,
3,
9,
8.5,
8,
7,
6,
4,
1, ]
nPts = 8
cs = Butina.ClusterData(dists, nPts, 2.1, isDistData=1)
self.assertTrue(len(cs) == 3)
self.assertTrue(cs[0] == (3, 0, 1, 2, 4))
self.assertTrue(cs[1] == (7, 6))
self.assertTrue(cs[2] == (5, ))
def test3(self):
" edge case: everything a singleton "
dists = [1,
2,
1, ]
nPts = 3
cs = Butina.ClusterData(dists, nPts, 0.9, isDistData=1)
self.assertTrue(len(cs) == 3)
self.assertTrue(cs[0] == (2, ))
self.assertTrue(cs[1] == (1, ))
self.assertTrue(cs[2] == (0, ))
def test4(self):
" edge case: everything in one cluster "
dists = [1,
2,
1,
3,
2,
1, ]
nPts = 4
cs = Butina.ClusterData(dists, nPts, 2, isDistData=1)
self.assertTrue(len(cs) == 1)
self.assertTrue(cs[0] == (3, 0, 1, 2))
def test4(self):
" edge case: one in the middle leaves the edges lonely "
dists = [1.5,
2.5,
1,
3.5,
2,
1,
5,
3.5,
2.5,
1.5, ]
nPts = 5
cs = Butina.ClusterData(dists, nPts, 1.1, isDistData=1)
self.assertTrue(len(cs) == 3)
self.assertTrue(cs[0] == (2, 1, 3))
self.assertTrue(cs[1] == (4, ))
self.assertTrue(cs[2] == (0, ))
def test6(self):
" edge case: zero distances: "
dists = [1,
2,
0,
2,
0,
0,
4,
2,
2,
2, ]
nPts = 5
cs = Butina.ClusterData(dists, nPts, 0.9, isDistData=1)
self.assertTrue(len(cs) == 3)
self.assertTrue(cs[0] == (3, 1, 2))
self.assertTrue(cs[1] == (4, ))
self.assertTrue(cs[2] == (0, ))
def test7(self):
" reordering: no changes "
dists = [1, 2, 1, 4, 3, 2, 6, 5, 4, 2, 7, 6, 5, 3, 1]
nPts = 6
cs = Butina.ClusterData(dists, nPts, 1.1, isDistData=1, reordering=True)
self.assertTrue(len(cs) == 3)
self.assertTrue(cs[0] == (1, 0, 2))
self.assertTrue(cs[1] == (5, 4))
self.assertTrue(cs[2] == (3, ))
def test8(self):
" reordering: changes"
dists = [2,
3.5,
1.5,
5,
3,
1.5,
7,
5,
3.5,
2,
8,
6,
4.5,
3,
1,
9,
7,
5.5,
4,
2,
1, ]
nPts = 7
# without reordering
cs = Butina.ClusterData(dists, nPts, 2.1, isDistData=1)
self.assertTrue(len(cs) == 3)
self.assertTrue(cs[0] == (4, 3, 5, 6))
self.assertTrue(cs[1] == (2, 1))
self.assertTrue(cs[2] == (0, ))
# with reordering
cs = Butina.ClusterData(dists, nPts, 2.1, isDistData=1, reordering=True)
self.assertTrue(len(cs) == 2)
self.assertTrue(cs[0] == (4, 3, 5, 6))
self.assertTrue(cs[1] == (1, 0, 2))
profileTest = 0
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "jandom/rdkit",
"path": "rdkit/ML/Cluster/UnitTestButina.py",
"copies": "1",
"size": "4057",
"license": "bsd-3-clause",
"hash": 8340812294716463000,
"line_mean": 21.6648044693,
"line_max": 76,
"alpha_frac": 0.4269164407,
"autogenerated": false,
"ratio": 3.0230998509687033,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8936703213815784,
"avg_score": 0.002662615570583815,
"num_lines": 179
} |
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import Lipinski,Descriptors,Crippen
from rdkit.Dbase.DbConnection import DbConnect
from rdkit.Dbase import DbModule
import re
#set up the logger:
import rdkit.RDLogger as logging
logger = logging.logger()
logger.setLevel(logging.INFO)
def ProcessMol(mol,typeConversions,globalProps,nDone,nameProp='_Name',nameCol='compound_id',
redraw=False,keepHs=False,
skipProps=False,addComputedProps=False,
skipSmiles=False,
uniqNames=None,namesSeen=None):
if not mol:
raise ValueError('no molecule')
if keepHs:
Chem.SanitizeMol(mol)
try:
nm = mol.GetProp(nameProp)
except KeyError:
nm = None
if not nm:
nm = 'Mol_%d'%nDone
if uniqNames and nm in namesSeen:
logger.error('duplicate compound id (%s) encountered. second instance skipped.'%nm)
return None
namesSeen.add(nm)
row = [nm]
if not skipProps:
if addComputedProps:
nHD=Lipinski.NumHDonors(mol)
mol.SetProp('DonorCount',str(nHD))
nHA=Lipinski.NumHAcceptors(mol)
mol.SetProp('AcceptorCount',str(nHA))
nRot=Lipinski.NumRotatableBonds(mol)
mol.SetProp('RotatableBondCount',str(nRot))
MW=Descriptors.MolWt(mol)
mol.SetProp('AMW',str(MW))
logp=Crippen.MolLogP(mol)
mol.SetProp('MolLogP',str(logp))
pns = list(mol.GetPropNames())
pD={}
for pi,pn in enumerate(pns):
if pn.lower()==nameCol.lower(): continue
pv = mol.GetProp(pn).strip()
if pv.find('>')<0 and pv.find('<')<0:
colTyp = globalProps.get(pn,2)
while colTyp>0:
try:
tpi = typeConversions[colTyp][1](pv)
except Exception:
colTyp-=1
else:
break
globalProps[pn]=colTyp
pD[pn]=typeConversions[colTyp][1](pv)
else:
pD[pn]=pv
else:
pD={}
if redraw:
AllChem.Compute2DCoords(m)
if not skipSmiles:
row.append(Chem.MolToSmiles(mol,True))
row.append(DbModule.binaryHolder(mol.ToBinary()))
row.append(pD)
return row
def ConvertRows(rows,globalProps,defaultVal,skipSmiles):
for i,row in enumerate(rows):
newRow = [row[0],row[1]]
pD=row[-1]
for pn in globalProps:
pv = pD.get(pn,defaultVal)
newRow.append(pv)
newRow.append(row[2])
if not skipSmiles:
newRow.append(row[3])
rows[i] = newRow
def LoadDb(suppl,dbName,nameProp='_Name',nameCol='compound_id',silent=False,
redraw=False,errorsTo=None,keepHs=False,defaultVal='N/A',skipProps=False,
regName='molecules',skipSmiles=False,maxRowsCached=-1,
uniqNames=False,addComputedProps=False,lazySupplier=False,
startAnew=True):
if not lazySupplier:
nMols = len(suppl)
else:
nMols=-1
if not silent:
logger.info("Generating molecular database in file %s"%dbName)
if not lazySupplier:
logger.info(" Processing %d molecules"%nMols)
rows = []
globalProps = {}
namesSeen = set()
nDone = 0
typeConversions={0:('varchar',str),1:('float',float),2:('int',int)}
for m in suppl:
nDone +=1
if not m:
if errorsTo:
if hasattr(suppl,'GetItemText'):
d = suppl.GetItemText(nDone-1)
errorsTo.write(d)
else:
logger.warning('full error file support not complete')
continue
row=ProcessMol(m,typeConversions,globalProps,nDone,nameProp=nameProp,
nameCol=nameCol,redraw=redraw,
keepHs=keepHs,skipProps=skipProps,
addComputedProps=addComputedProps,skipSmiles=skipSmiles,
uniqNames=uniqNames,namesSeen=namesSeen)
if row is None: continue
rows.append([nDone]+row)
if not silent and not nDone%100:
logger.info(' done %d'%nDone)
if len(rows)==maxRowsCached:
break
nameDef='%s varchar not null'%nameCol
if uniqNames:
nameDef += ' unique'
typs = ['guid integer not null primary key',nameDef]
pns = []
for pn,v in globalProps.items():
addNm = re.sub(r'[\W]','_',pn)
typs.append('%s %s'%(addNm,typeConversions[v][0]))
pns.append(pn.lower())
if not skipSmiles:
if 'smiles' not in pns:
typs.append('smiles varchar')
else:
typs.append('cansmiles varchar')
typs.append('molpkl %s'%(DbModule.binaryTypeName))
conn = DbConnect(dbName)
curs = conn.GetCursor()
if startAnew:
try:
curs.execute('drop table %s'%regName)
except Exception:
pass
curs.execute('create table %s (%s)'%(regName,','.join(typs)))
else:
curs.execute('select * from %s limit 1'%(regName,))
ocolns = set([x[0] for x in curs.description])
ncolns = set([x.split()[0] for x in typs])
if ncolns != ocolns:
raise ValueError('Column names do not match: %s != %s'%(ocolns,ncolns))
curs.execute('select max(guid) from %s'%(regName,))
offset = curs.fetchone()[0]
for row in rows:
row[0] += offset
qs = ','.join([DbModule.placeHolder for x in typs])
ConvertRows(rows,globalProps,defaultVal,skipSmiles)
curs.executemany('insert into %s values (%s)'%(regName,qs),rows)
conn.Commit()
rows = []
while 1:
nDone +=1
try:
m = next(suppl)
except StopIteration:
break
if not m:
if errorsTo:
if hasattr(suppl,'GetItemText'):
d = suppl.GetItemText(nDone-1)
errorsTo.write(d)
else:
logger.warning('full error file support not complete')
continue
tmpProps={}
row=ProcessMol(m,typeConversions,globalProps,nDone,nameProp=nameProp,
nameCol=nameCol,redraw=redraw,
keepHs=keepHs,skipProps=skipProps,
addComputedProps=addComputedProps,skipSmiles=skipSmiles,
uniqNames=uniqNames,namesSeen=namesSeen)
if not row: continue
rows.append([nDone]+row)
if not silent and not nDone%100:
logger.info(' done %d'%nDone)
if len(rows)==maxRowsCached:
ConvertRows(rows,globalProps,defaultVal,skipSmiles)
curs.executemany('insert into %s values (%s)'%(regName,qs),rows)
conn.Commit()
rows = []
if len(rows):
ConvertRows(rows,globalProps,defaultVal,skipSmiles)
curs.executemany('insert into %s values (%s)'%(regName,qs),rows)
conn.Commit()
| {
"repo_name": "adalke/rdkit",
"path": "rdkit/Chem/MolDb/Loader_orig.py",
"copies": "1",
"size": "6608",
"license": "bsd-3-clause",
"hash": -8750633315164838000,
"line_mean": 30.317535545,
"line_max": 92,
"alpha_frac": 0.6372578692,
"autogenerated": false,
"ratio": 3.209324915007285,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9165449428095416,
"avg_score": 0.0362266712223738,
"num_lines": 211
} |
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import Lipinski, Descriptors, Crippen
from rdkit.Dbase.DbConnection import DbConnect
from rdkit.Dbase import DbModule
import re
#set up the logger:
import rdkit.RDLogger as logging
logger = logging.logger()
logger.setLevel(logging.INFO)
def ProcessMol(mol, typeConversions, globalProps, nDone, nameProp='_Name', nameCol='compound_id',
redraw=False, keepHs=False, skipProps=False, addComputedProps=False,
skipSmiles=False, uniqNames=None, namesSeen=None):
if not mol:
raise ValueError('no molecule')
if keepHs:
Chem.SanitizeMol(mol)
try:
nm = mol.GetProp(nameProp)
except KeyError:
nm = None
if not nm:
nm = 'Mol_%d' % nDone
if uniqNames and nm in namesSeen:
logger.error('duplicate compound id (%s) encountered. second instance skipped.' % nm)
return None
namesSeen.add(nm)
row = [nm]
if not skipProps:
if addComputedProps:
nHD = Lipinski.NumHDonors(mol)
mol.SetProp('DonorCount', str(nHD))
nHA = Lipinski.NumHAcceptors(mol)
mol.SetProp('AcceptorCount', str(nHA))
nRot = Lipinski.NumRotatableBonds(mol)
mol.SetProp('RotatableBondCount', str(nRot))
MW = Descriptors.MolWt(mol)
mol.SetProp('AMW', str(MW))
logp = Crippen.MolLogP(mol)
mol.SetProp('MolLogP', str(logp))
pns = list(mol.GetPropNames())
pD = {}
for pi, pn in enumerate(pns):
if pn.lower() == nameCol.lower():
continue
pv = mol.GetProp(pn).strip()
if pv.find('>') < 0 and pv.find('<') < 0:
colTyp = globalProps.get(pn, 2)
while colTyp > 0:
try:
tpi = typeConversions[colTyp][1](pv)
except Exception:
colTyp -= 1
else:
break
globalProps[pn] = colTyp
pD[pn] = typeConversions[colTyp][1](pv)
else:
pD[pn] = pv
else:
pD = {}
if redraw:
AllChem.Compute2DCoords(m)
if not skipSmiles:
row.append(Chem.MolToSmiles(mol, True))
row.append(DbModule.binaryHolder(mol.ToBinary()))
row.append(pD)
return row
def ConvertRows(rows, globalProps, defaultVal, skipSmiles):
for i, row in enumerate(rows):
newRow = [row[0], row[1]]
pD = row[-1]
for pn in globalProps:
pv = pD.get(pn, defaultVal)
newRow.append(pv)
newRow.append(row[2])
if not skipSmiles:
newRow.append(row[3])
rows[i] = newRow
def LoadDb(suppl, dbName, nameProp='_Name', nameCol='compound_id', silent=False, redraw=False,
errorsTo=None, keepHs=False, defaultVal='N/A', skipProps=False, regName='molecules',
skipSmiles=False, maxRowsCached=-1, uniqNames=False, addComputedProps=False,
lazySupplier=False, startAnew=True):
if not lazySupplier:
nMols = len(suppl)
else:
nMols = -1
if not silent:
logger.info("Generating molecular database in file %s" % dbName)
if not lazySupplier:
logger.info(" Processing %d molecules" % nMols)
rows = []
globalProps = {}
namesSeen = set()
nDone = 0
typeConversions = {0: ('varchar', str), 1: ('float', float), 2: ('int', int)}
for m in suppl:
nDone += 1
if not m:
if errorsTo:
if hasattr(suppl, 'GetItemText'):
d = suppl.GetItemText(nDone - 1)
errorsTo.write(d)
else:
logger.warning('full error file support not complete')
continue
row = ProcessMol(m, typeConversions, globalProps, nDone, nameProp=nameProp, nameCol=nameCol,
redraw=redraw, keepHs=keepHs, skipProps=skipProps,
addComputedProps=addComputedProps, skipSmiles=skipSmiles, uniqNames=uniqNames,
namesSeen=namesSeen)
if row is None:
continue
rows.append([nDone] + row)
if not silent and not nDone % 100:
logger.info(' done %d' % nDone)
if len(rows) == maxRowsCached:
break
nameDef = '%s varchar not null' % nameCol
if uniqNames:
nameDef += ' unique'
typs = ['guid integer not null primary key', nameDef]
pns = []
for pn, v in globalProps.items():
addNm = re.sub(r'[\W]', '_', pn)
typs.append('%s %s' % (addNm, typeConversions[v][0]))
pns.append(pn.lower())
if not skipSmiles:
if 'smiles' not in pns:
typs.append('smiles varchar')
else:
typs.append('cansmiles varchar')
typs.append('molpkl %s' % (DbModule.binaryTypeName))
conn = DbConnect(dbName)
curs = conn.GetCursor()
if startAnew:
try:
curs.execute('drop table %s' % regName)
except Exception:
pass
curs.execute('create table %s (%s)' % (regName, ','.join(typs)))
else:
curs.execute('select * from %s limit 1' % (regName, ))
ocolns = set([x[0] for x in curs.description])
ncolns = set([x.split()[0] for x in typs])
if ncolns != ocolns:
raise ValueError('Column names do not match: %s != %s' % (ocolns, ncolns))
curs.execute('select max(guid) from %s' % (regName, ))
offset = curs.fetchone()[0]
for row in rows:
row[0] += offset
qs = ','.join([DbModule.placeHolder for x in typs])
ConvertRows(rows, globalProps, defaultVal, skipSmiles)
curs.executemany('insert into %s values (%s)' % (regName, qs), rows)
conn.Commit()
rows = []
while 1:
nDone += 1
try:
m = next(suppl)
except StopIteration:
break
if not m:
if errorsTo:
if hasattr(suppl, 'GetItemText'):
d = suppl.GetItemText(nDone - 1)
errorsTo.write(d)
else:
logger.warning('full error file support not complete')
continue
tmpProps = {}
row = ProcessMol(m, typeConversions, globalProps, nDone, nameProp=nameProp, nameCol=nameCol,
redraw=redraw, keepHs=keepHs, skipProps=skipProps,
addComputedProps=addComputedProps, skipSmiles=skipSmiles, uniqNames=uniqNames,
namesSeen=namesSeen)
if not row:
continue
rows.append([nDone] + row)
if not silent and not nDone % 100:
logger.info(' done %d' % nDone)
if len(rows) == maxRowsCached:
ConvertRows(rows, globalProps, defaultVal, skipSmiles)
curs.executemany('insert into %s values (%s)' % (regName, qs), rows)
conn.Commit()
rows = []
if len(rows):
ConvertRows(rows, globalProps, defaultVal, skipSmiles)
curs.executemany('insert into %s values (%s)' % (regName, qs), rows)
conn.Commit()
| {
"repo_name": "rvianello/rdkit",
"path": "rdkit/Chem/MolDb/Loader_orig.py",
"copies": "12",
"size": "6741",
"license": "bsd-3-clause",
"hash": 645975154123385900,
"line_mean": 30.9478672986,
"line_max": 99,
"alpha_frac": 0.6246847649,
"autogenerated": false,
"ratio": 3.220735785953177,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9845420550853177,
"avg_score": null,
"num_lines": null
} |
import os
import io
import sys
import unittest
from rdkit import RDConfig
#import pickle
from rdkit.six.moves import cPickle as pickle
from rdkit import DataStructs as ds
class TestCase(unittest.TestCase):
def setUp(self) :
pass
def test1Discrete(self):
v1 = ds.DiscreteValueVect(ds.DiscreteValueType.ONEBITVALUE, 30)
for i in range(15):
v1[2*i] = 1;
self.assertTrue(len(v1) == 30)
self.assertTrue(v1.GetTotalVal() == 15)
for i in range(len(v1)):
self.assertTrue(v1[i] == (i+1)%2)
self.assertRaises(ValueError, lambda : v1.__setitem__(5, 2))
v1 = ds.DiscreteValueVect(ds.DiscreteValueType.TWOBITVALUE, 30)
for i in range(len(v1)):
v1[i] = i%4;
self.assertTrue(len(v1) == 30)
for i in range(len(v1)):
self.assertTrue(v1[i] == i%4)
self.assertRaises(ValueError, lambda : v1.__setitem__(10, 6))
v1 = ds.DiscreteValueVect(ds.DiscreteValueType.FOURBITVALUE, 30)
for i in range(len(v1)):
v1[i] = i%16;
self.assertTrue(len(v1) == 30)
self.assertTrue(v1.GetTotalVal() == 211)
for i in range(len(v1)):
self.assertTrue(v1[i] == i%16)
self.assertRaises(ValueError, lambda : v1.__setitem__(10, 16))
v1 = ds.DiscreteValueVect(ds.DiscreteValueType.EIGHTBITVALUE, 32)
for i in range(len(v1)):
v1[i] = i%256;
self.assertTrue(len(v1) == 32)
self.assertTrue(v1.GetTotalVal() == 496)
for i in range(len(v1)):
self.assertTrue(v1[i] == i%256)
self.assertRaises(ValueError, lambda : v1.__setitem__(10, 256))
v1 = ds.DiscreteValueVect(ds.DiscreteValueType.SIXTEENBITVALUE, 300)
for i in range(len(v1)):
v1[i] = i%300;
self.assertTrue(len(v1) == 300)
self.assertTrue(v1.GetTotalVal() == 44850)
self.assertRaises(ValueError, lambda : v1.__setitem__(10, 65536))
def test2VectDistances(self):
v1 = ds.DiscreteValueVect(ds.DiscreteValueType.ONEBITVALUE, 30)
v2 = ds.DiscreteValueVect(ds.DiscreteValueType.ONEBITVALUE, 30)
for i in range(15):
v1[2*i] = 1
v2[2*i] = 1
self.assertTrue(ds.ComputeL1Norm(v1, v2) == 0)
for i in range(30):
if (i%3 == 0):
v2[i] = 1
else:
v2[i] = 0
self.assertTrue(ds.ComputeL1Norm(v1, v2) == 15)
v1 = ds.DiscreteValueVect(ds.DiscreteValueType.TWOBITVALUE, 30)
v2 = ds.DiscreteValueVect(ds.DiscreteValueType.TWOBITVALUE, 30)
for i in range(30):
v1[i] = i%4
v2[i] = (i+1)%4
self.assertTrue(ds.ComputeL1Norm(v1, v2) == 44)
v1 = ds.DiscreteValueVect(ds.DiscreteValueType.FOURBITVALUE, 16)
v2 = ds.DiscreteValueVect(ds.DiscreteValueType.FOURBITVALUE, 16)
for i in range(16):
v1[i] = i%16
v2[i] = i%5
self.assertTrue(ds.ComputeL1Norm(v1, v2) == 90)
v1 = ds.DiscreteValueVect(ds.DiscreteValueType.EIGHTBITVALUE, 5)
v2 = ds.DiscreteValueVect(ds.DiscreteValueType.EIGHTBITVALUE, 5)
v1[0] = 34
v1[1] = 167
v1[2] = 3
v1[3] = 56
v1[4] = 128
v2[0] = 14
v2[1] = 67
v2[2] = 103
v2[3] = 6
v2[4] = 228
self.assertTrue(ds.ComputeL1Norm(v1, v2) == 370)
v1 = ds.DiscreteValueVect(ds.DiscreteValueType.SIXTEENBITVALUE, 3)
v2 = ds.DiscreteValueVect(ds.DiscreteValueType.SIXTEENBITVALUE, 3)
v1[0] = 2345
v1[1] = 64578
v1[2] = 34
v2[0] = 1345
v2[1] = 54578
v2[2] = 10034
self.assertTrue(ds.ComputeL1Norm(v1, v2) == 21000)
def test3Pickles(self):
#outF = file('dvvs.pkl','wb+')
with open(
os.path.join(RDConfig.RDBaseDir,
'Code/DataStructs/Wrap/testData/dvvs.pkl'),
'r'
) as inTF:
buf = inTF.read().replace('\r\n', '\n').encode('utf-8')
inTF.close()
with io.BytesIO(buf) as inF:
v1 = ds.DiscreteValueVect(ds.DiscreteValueType.ONEBITVALUE, 30)
for i in range(15):
v1[2*i] = 1
v2 = pickle.loads(pickle.dumps(v1))
self.assertTrue(ds.ComputeL1Norm(v1, v2) == 0)
#cPickle.dump(v1,outF)
v2=pickle.load(inF, encoding='bytes')
self.assertTrue(ds.ComputeL1Norm(v1, v2) == 0)
self.assertTrue(v1.GetTotalVal()==v2.GetTotalVal())
self.assertTrue(v2.GetTotalVal()!=0)
v1 = ds.DiscreteValueVect(ds.DiscreteValueType.TWOBITVALUE, 30)
for i in range(30):
v1[i] = i%4
v2 = pickle.loads(pickle.dumps(v1))
self.assertTrue(ds.ComputeL1Norm(v1, v2) == 0)
#pickle.dump(v1,outF)
v2=pickle.load(inF, encoding='bytes')
self.assertTrue(ds.ComputeL1Norm(v1, v2) == 0)
self.assertTrue(v1.GetTotalVal()==v2.GetTotalVal())
self.assertTrue(v2.GetTotalVal()!=0)
v1 = ds.DiscreteValueVect(ds.DiscreteValueType.FOURBITVALUE, 16)
for i in range(16):
v1[i] = i%16
v2 = pickle.loads(pickle.dumps(v1))
self.assertTrue(ds.ComputeL1Norm(v1, v2) == 0)
#pickle.dump(v1,outF)
v2=pickle.load(inF, encoding='bytes')
self.assertTrue(ds.ComputeL1Norm(v1, v2) == 0)
self.assertTrue(v1.GetTotalVal()==v2.GetTotalVal())
self.assertTrue(v2.GetTotalVal()!=0)
v1 = ds.DiscreteValueVect(ds.DiscreteValueType.EIGHTBITVALUE, 5)
v1[0] = 34
v1[1] = 167
v1[2] = 3
v1[3] = 56
v1[4] = 128
v2 = pickle.loads(pickle.dumps(v1))
self.assertTrue(ds.ComputeL1Norm(v1, v2) == 0)
#pickle.dump(v1,outF)
v2=pickle.load(inF, encoding='bytes')
self.assertTrue(ds.ComputeL1Norm(v1, v2) == 0)
self.assertTrue(v1.GetTotalVal()==v2.GetTotalVal())
self.assertTrue(v2.GetTotalVal()!=0)
v1 = ds.DiscreteValueVect(ds.DiscreteValueType.SIXTEENBITVALUE, 3)
v1[0] = 2345
v1[1] = 64578
v1[2] = 34
v2 = pickle.loads(pickle.dumps(v1))
self.assertTrue(ds.ComputeL1Norm(v1, v2) == 0)
#pickle.dump(v1,outF)
v2=pickle.load(inF, encoding='bytes')
self.assertTrue(ds.ComputeL1Norm(v1, v2) == 0)
self.assertTrue(v1.GetTotalVal()==v2.GetTotalVal())
self.assertTrue(v2.GetTotalVal()!=0)
def test4DiscreteVectOps(self):
v1 = ds.DiscreteValueVect(ds.DiscreteValueType.TWOBITVALUE, 8)
for i in range(4):
v1[2*i] = 2
self.assertTrue(v1.GetTotalVal()==8)
v2 = ds.DiscreteValueVect(ds.DiscreteValueType.TWOBITVALUE, 8)
for i in range(4):
v2[2*i+1] = 2
v2[2*i] = 1
self.assertTrue(v2.GetTotalVal()==12)
v3 = v1|v2
self.assertTrue(len(v3)==len(v2))
self.assertTrue(v3.GetTotalVal()==16)
v3 = v1&v2
self.assertTrue(len(v3)==len(v2))
self.assertTrue(v3.GetTotalVal()==4)
v4 = v1+v2
self.assertTrue(len(v4)==len(v2))
self.assertTrue(v4.GetTotalVal()==20)
v4 = v1-v2
self.assertTrue(v4.GetTotalVal()==4)
v4 = v2-v1
self.assertTrue(v4.GetTotalVal()==8)
v4 = v2
v4 -= v1
self.assertTrue(v4.GetTotalVal()==8)
v4 -= v4
self.assertTrue(v4.GetTotalVal()==0)
def testIterator(self):
"""
connected to sf.net issue 1719831:
http://sourceforge.net/tracker/index.php?func=detail&aid=1719831&group_id=160139&atid=814650
"""
v1 = ds.DiscreteValueVect(ds.DiscreteValueType.ONEBITVALUE, 30)
for i in range(15):
v1[2*i] = 1;
l1 = list(v1)
self.assertTrue(len(l1)==len(v1))
for i,v in enumerate(v1):
self.assertTrue(l1[i]==v)
self.assertRaises(IndexError,lambda :v1[40])
def test9ToNumpy(self):
import numpy
bv = ds.DiscreteValueVect(ds.DiscreteValueType.FOURBITVALUE,32)
bv[0]=1
bv[1]=4
bv[17]=1
bv[23]=8
bv[31]=12
arr = numpy.zeros((3,),'i')
ds.ConvertToNumpyArray(bv,arr)
for i in range(len(bv)):
self.assertEqual(bv[i],arr[i])
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "soerendip42/rdkit",
"path": "Code/DataStructs/Wrap/testDiscreteValueVect.py",
"copies": "3",
"size": "7784",
"license": "bsd-3-clause",
"hash": -1170882445160915700,
"line_mean": 28.7099236641,
"line_max": 96,
"alpha_frac": 0.6202466598,
"autogenerated": false,
"ratio": 2.6548431105047747,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.964637917957194,
"avg_score": 0.02574211814656705,
"num_lines": 262
} |