'''
Created on May 1, 2012

@author: tel
'''
import bisect, re
from collections import OrderedDict

from datasql import MakeDataSQL, DataSQL, hasColumn
from rules import PKRule, FortRule, MCCECmpMixin
from levels_error import ConformerEmptyError, AtomMismatchError

import numpy
from scipy.sparse import coo_matrix, lil_matrix, vstack
from sqlalchemy import Column, PickleType
from sqlalchemy.schema import Index
from sqlalchemy.orm import mapper

PH_TO_KCAL = 1.364

#important info, taken from the mcce .tpl files
sol_pk = {'HIS' : 6.98, 'ARG' : 12.5, 'ASP' : 4.75, 'GLU' : 4.75, 'LYS' : 10.4 , 'TYR' : 10.2, 'CYS' : 9.1, 'NTR' : 8.0, 'NTG' : 8.0, 'CTR' : 3.5, 'SER' : 0.0, 'LEU' : 0.0, '_ZN': -91.248, 'ALA': 0.0, 'ILE': 0.0, 'PRO': 0.0, 'GLY': 0.0, 'ASN':0.0, 'GLN':0.0, 'PHE':0.0, 'SO4': 0.0, 'VAL':0.0, 'THR':0.0, 'MET':0.0, '_CU':0.0, 'OCS':0.0, 'FE2':0.0, 'HEM':0.0, 'NH4':0.0 }
oxidase_res_sol_pk = {'TYF' : 8.9,  'PEH' : 9.8, 'HEA' : 0, 'PAA' : 4.9, 'PDD' : 4.9, 'FAL' : 0, 'HE3' : 7.0, 'CUA' : 0, '_MG' : 0,'_CA' : 0, '_CL' : 0, '_NA' : 0}
bacteriorhodopsin_res_sol_pk = {'RSB' : 7.0}
sol_pk.update(oxidase_res_sol_pk) 
sol_pk.update(bacteriorhodopsin_res_sol_pk)


def Stitcher(dict, bkey, okeys):
    starts = [0]*len(okeys)
    for bele in dict[bkey]:
        tmpd = {}
        for i, okey in enumerate(okeys):
            tmpl = []
            for end, oele in enumerate(dict[okey][starts[i]:]):
                if bele==oele:
                    tmpl.append(oele)
                elif tmpl!=[]:
                    starts[i] = starts[i]+end
                    break
            if tmpl != []:
                tmpd[okey] = tmpl
        yield (bele, tmpd)

def SortedListMatch(list1, list2):
    '''
    takes two lists and returns a list of tuples
    tup[0] will contain an eleement from list1
    tup[1] will contain all elements from list2 that match the given element from list1
    '''
    matches = []
    start = 0
    for ele1 in list1:
        tmp = []
        for end, ele2 in enumerate(list2[start:]):
            if ele1==ele2:
                tmp.append(ele2)
            elif tmp!=[]:
                start = start+end
                break
        matches.append((ele1, tmp))
    return matches

def SortedMatch(list, start, mtok, mfunc):
    '''
    searches through list from start element
    checks every element of list against some mtok, using mfunc
    for a given element, if mfunc is true, the element is added to the list of matches
    expects to find a single block of matching elements in list, so if at least one element has already been found the search loop breaks if mfunc is then false
    returns both 
    '''
    matches = []
    found = False
    for end, elem in enumerate(list[start:]):
        if mfunc(mtok, elem):
            matches.append(elem)
            found = True
        elif found:
            #in order to reach this condition, mfunc must be false now but have been true on a previous for loop iteration
            return start+end, matches
            #start+end is the absolute index of elem in list, rather than index of elem in the list slice
    if found:
        #this if then deals with the exhaustion of the for loop. If something's been found, return that. Otherwise, return some default values
        return start+end, matches
    else:
        return (0, [])

def RemoveOne(list, mtok, mfunc):
    '''
    searches through each elem of list in place
    pops the first entry for which mfunc(mtok, elem) returns true
    '''
    for i, elem in enumerate(list):
        if mfunc(mtok, elem):
            list.pop(i)
            return True
    return False

def ObjAvg(list, attr):
    count = 0
    total = 0
    for obj in list:
        total += float(getattr(obj, attr))
        count += 1
    try:
        return str(total/count)
    except ZeroDivisionError:
        return 0

def SliceCol(list, coln):
    tmp = []
    for elem in list:
        tmp.append(elem[coln])
    return tmp

class Level(DataSQL, MCCECmpMixin):
    
    @classmethod
    def Org(cls, data):
        '''
        in current implementation, data is a dict that contains Data objects that are keyed by MCCE filename
        '''
        pass

    @classmethod
    def RAddChildren(cls, instance, child):
        '''
        recursive method for adding children to Level objects
        '''
        instance.__getattribute__(child.dtype + 's').append(child)
        if child.ChildDClass!=None:
            for sub_child in child.__getattribute__(child.ChildDClass.dtype + 's'):
                cls.RAddChildren(instance, sub_child)
        else:
            return
    
    @classmethod
    def AddChildren(cls):
        pass
    
    @classmethod
    def BuildTable(cls, data_tuples, reloadt):
        super(Level,cls).BuildTable(data_tuples, reloadt)
        for attr in ('pdb','chainid','res','resn','confn','atomn','seqadv'):
            if hasColumn(cls.table, attr):
                Index('ix_%s.%s' % (cls.dtype,attr), cls.table.c.get(attr))
    
    @classmethod
    def Store(cls, data_tuples):
        super(Level,cls).Store(data_tuples)

class AtomData(Level):   
    dtype = 'Atom'
    parent_dtype = 'Conf'
    super_parent_dtypes = ['Prot', 'Run', 'Res']
    ChildDClass = None
    SubChildDClasses = []
    
    @classmethod
    def Org(cls, data):
        for datum in data['step2_out.pdb']:
            data_tuples=datum.GetDataTuples()
            cls.Store(data_tuples)
            
    def Dist(self, other):
        return numpy.sqrt((self.x-other.x)**2+(self.y-other.y)**2+(self.z-other.z)**2)
    
    def IsHeavy(self):
        return not bool(re.match('^[0-9]?H', self.atom))
    
    def IsSame(self, other):
        for attr in ('chainid', 'resn', 'res', 'atom'):
            if self.__getattribute__(attr)!=other.__getattribute__(attr):
                return False
        return True
    
    def __str__(self):
        return '%s/e%s\t%s\t%s\t%s\t%s\t%s\t%s\t' % (self.Prot.pdb,
                                                     self.Run.EPSILON_PROT,
                                                     self.Res.res,
                                                     self.Res.chainid,
                                                     self.Res.resn,
                                                     self.Conf.confn,
                                                     self.atomn,
                                                     self.atom)
    
    @staticmethod
    def StrHeaders():
        return '%s/%s\t%s\t%s\t%s\t%s\t%s\t%s\t' % ('pdb','run_id','res_name','chainid','res_number','conf_number','atom_number','atom_name')
     
class ConfData(Level):
    dtype = 'Conf'
    parent_dtype = 'Res'
    super_parent_dtypes = ['Prot', 'Run']
    ChildDClass = AtomData
    SubChildDClasses = []
    
    def __str__(self):
        return '%s/e%s\t%s\t%s\t%s\t%s\t' % (self.Prot.pdb,
                                             self.Run.EPSILON_PROT,
                                             self.Res.res,
                                             self.Res.chainid,
                                             self.Res.resn,
                                             self.confn)
    
    @classmethod
    def Org(cls, data):
        for h3datum, match in Stitcher(data, 'head3.lst', (('fort.38'),)):
            if h3datum.__getattribute__('conft')[0]=='0':
                h3datum.__setattr__('conft', 'n'+h3datum.__getattribute__('conft')[1])
            if h3datum.__getattribute__('confn')=='001':    #this makes a data-lite conformer for storing the backbone atoms
                backbone = [('confn','000'),
                            ('conft','b0')]
                for attr in ('res','chainid','resn'):
                    backbone += [(attr,h3datum.__getattribute__(attr))]
                cls.Store(backbone)
            data_tuples = h3datum.GetDataTuples()
            data_tuples += [('occupancy',dict([(float(key),float(value)) for key, value in match['fort.38'][0].GetDataTuples(exclude=zip(*FortRule.nsub_tups)[0]+('typ',))]))]
            cls.Store(data_tuples)
    
    @classmethod
    def AddChildren(cls):
        for conf, atoms in SortedListMatch(cls.instances, AtomData.instances):
            for atom in atoms:
                cls.RAddChildren(conf, atom)
    
    @classmethod
    def BuildTable(cls, data_tuples, reloadt):
        for i, instance in enumerate(cls.instances):
            if instance[zip(*instance)[0].index('confn')][1]!='000':
                break
        super(ConfData,cls).BuildTable(cls.instances[i], reloadt)
    
    @classmethod
    def ReloadTable(cls, standalone, override_columns=None):
        override_columns=[Column('occupancy',PickleType())]
        super(ConfData,cls).ReloadTable(standalone=False, override_columns=override_columns)
    
    def RMSD(self, other, session):
        D = []
        for atom in self.Atoms:
            oatom = session.query(AtomData).with_parent(other, 'Atoms').filter(AtomData.atom==atom.atom).first()
            if oatom!=None:
                if not re.match('^[0-9]?H', atom.atom):
                    D.append(atom.Dist(oatom))
        if D==[]:
            return -1
        else:
            return numpy.sqrt(numpy.mean(numpy.power(D,2)))
    
    def GenHeavyAtoms(self):
        for atom in self.Atoms:
            if atom.IsHeavy():
                yield atom
    
    def CRMSD(self):
        '''
        calculates heavy atom RMSD wrt crystal conformer
        '''
        other = self.Res.Confs[1]
        D = []
        for atom, oatom in zip(self.GenHeavyAtoms(), other.GenHeavyAtoms()):
            if not atom.IsSame(oatom):
                raise AtomMismatchError(self, other)
            D.append(atom.Dist(oatom))
        if D==[]:
            return -1
        else:
            return numpy.sqrt(numpy.mean(numpy.power(D,2)))
        
    def GetOccCol(self):
        '''
        fetches the occmatrix column from Run that stores this conformer's occupancy at all titration points
        '''
        return self.Run.occmatrix[:, self.iConf]
        
    def MinDist(self, other):
        min = self.Atoms[0].Dist(other.Atoms[0])
        for oatom in other.Atoms[1:]:
            dist = self.Atoms[0].Dist(oatom)
            if dist < min:
                min = dist
        for atom in self.Atoms[1:]:
            for oatom in other.Atoms:
                dist = atom.Dist(oatom)
                if dist < min:
                    min = dist
        return min
    
    def Getdgxpw(self, titr, kind):
        #the 0 in the trailing index is due to the fact that WeightedSumEnergies are stored in a 1 X N sparse matrix, not a list
        return self.Run.GetWeightedSumEnergies(titr, kind)[0, self.iConf]
    
    def Getdgepw(self, titr):
        return self.Getdgxpw(titr, 'epw')
    
    def Getdgvpw(self, titr):
        return self.Getdgxpw(titr, 'vpw')*self.Run.SCALE_VDW
    
    @property
    def epw(self):
        try:
            return self._epw
        except AttributeError:
            self._epw = self.Run.epw[self.iConf,:]
            return self._epw
        
    @property
    def vpw(self):
        try:
            return self._vpw
        except AttributeError:
            self._vpw = self.Run.epw[self.iConf,:]
            return self._vpw
    
    @property
    def dgself(self):
        return self.vdw0*self.Run.SCALE_VDW0 + self.vdw1*self.Run.SCALE_VDW1 + self.tors + self.epol + self.dsolv + self.extra
    
    @staticmethod
    def StrHeaders():
        return '%s/%s\t%s\t%s\t%s\t%s\t' % ('pdb','run_id','res_name','chainid','res_number','conf_number')
        
class ResData(Level):
    dtype = 'Res'
    parent_dtype = 'Run'
    super_parent_dtypes = ['Prot']
    ChildDClass = ConfData
    SubChildDClasses = [AtomData]
    
    def __str__(self):
        try:
            return '%s/e%s\t%s\t%s\t%s\t' % (self.Prot.pdb,self.Run.RUN_ID,self.res,self.chainid,self.resn)
        except AttributeError:
            return '%s/e%s\t%s\t%s\t%s\t' % (self.Prot.pdb,self.Run.EPSILON_PROT,self.res,self.chainid,self.resn)
    
    @classmethod
    def Org(cls, data):
        for h1datum, mdict in Stitcher(data, 'head1.lst', ('pK.out', 'head2.lst', 'seqadv')):
            data_tuples = h1datum.GetDataTuples()
            try:
                pkdatum = mdict['pK.out'][0]
                pka = getattr(pkdatum, 'pKa/Em')
                if '>' in pka:
                    orh = 1
                    orl = 0
                    toosharp = 0
                    pka = pka.strip('>')
                    setattr(pkdatum, 'pKa/Em', float(pka))
                    setattr(pkdatum, 'n(slope)', 0)
                    setattr(pkdatum, '1000*chi2', 0)
                elif '<' in pka:
                    orh = 0
                    orl = 1
                    toosharp = 0
                    pka = pka.strip('<')
                    setattr(pkdatum, 'pKa/Em', float(pka))
                    setattr(pkdatum, 'n(slope)', 0)
                    setattr(pkdatum, '1000*chi2', 0)
                elif 'titr' in pka:
                    orh = 0
                    orl = 0
                    toosharp = 1
                    setattr(pkdatum, 'pKa/Em', 0)
                    setattr(pkdatum, 'n(slope)', 0)
                    setattr(pkdatum, '1000*chi2', 0)
                else:
                    orh = 0
                    orl = 0
                    toosharp = 0
                if getattr(pkdatum, 'charge')== '+':
                    charge = 1
                else:
                    charge = -1
                dpk = (sol_pk[pkdatum.res] - float(getattr(pkdatum, 'pKa/Em')))*charge
                ASA = ObjAvg(mdict['head2.lst'], 'ASA')

                data_tuples += pkdatum.GetDataTuples(exclude=zip(*PKRule.nsub_tups)[0])
                data_tuples += [('orh', orh),
                                ('orl', orl),
                                ('toosharp', toosharp),
                                ('charge', charge),
                                ('dpk', dpk),
                                ('ASA', ASA)]
            except KeyError:
                pass
            try:
                seqadvdatum = mdict['seqadv'][0]
                data_tuples += [('seqadv', seqadvdatum.conflict),
                                ('wt', seqadvdatum.dbres)]
            except KeyError:
                data_tuples += [('seqadv', ''),
                                ('wt', '')]

            cls.Store(data_tuples)
            
    @classmethod
    def AddChildren(cls):
        for res, confs in SortedListMatch(cls.instances, ConfData.instances):
            for conf in confs:
                cls.RAddChildren(res, conf)
    
    @classmethod
    def BuildTable(cls, data_tuples, reloadt):
        for i, instance in enumerate(cls.instances):
            try:
                zip(*instance)[0].index('dsol')
                break
            except ValueError:
                pass
        super(ResData,cls).BuildTable(cls.instances[i], reloadt)
    
    def MinDist(self, other):
        mins = []
        for i,j in [(i,j) for i in [0,1] for j in [0,1]]:
            try:
                mins.append(self.Confs[i].MinDist(other.Confs[j]))
            except IndexError:
                pass
        if mins==[]:
            raise ConformerEmptyError(self, other)
        else:
            return min(mins)

    def SumCrgAt(self, titr):
        if float(titr) in self.Confs[1].occupancy.keys():
            tmplist = []
            for conf in self.Confs:
                if conf.conft[0]=='+' or conf.conft[0]=='-':
                    tmplist.append(conf.occupancy[float(titr)])
            return int(conf.conft[0]+'1')*numpy.sum(tmplist)
        else:
            sortedocc = sorted(self.Confs[1].occupancy.keys())
            i = bisect.bisect_right(sortedocc, float(titr))
            lkey,rkey = sortedocc[i-1], sortedocc[i]
            rweight = (float(titr)-lkey)/(rkey-lkey)
            lweight = 1-rweight
            ltmplist,rtmplist=[],[]
            for conf in self.Confs:
                if conf.conft[0]=='+' or conf.conft[0]=='-':
                    ltmplist.append(conf.occupancy[lkey])
                    rtmplist.append(conf.occupancy[rkey])
            return int(conf.conft[0]+'1')*(numpy.sum(rtmplist)*rweight + numpy.sum(ltmplist)*lweight)

    def MostOccAt(self, titr):
        occlist = []
        for conf in self.Confs[1:]:
            occlist.append(conf.occupancy[float(titr)])
        i = occlist.index(max(occlist))
        return self.Confs[i]
    
    class MFE(object):
        
        class Ionization(object):
            conf_attrs = ('extra', 'tors', 'vdw0', 'vdw1', 'vpw', 'dsolv', 'epol', 'epw')
            mfe_attrs = ('offset', 'tors', 'v_s', 'v_b', 'v_r', 'dsol', 'e_b', 'e_r', 'total')
            
            def __init__(self, titr, pka, temp, charges, confss):
                self.titr = titr
                self.KCAL_TO_KT = float(1)/(temp * 0.0019872041)
                dgpk = cmp(charges[0], 0) * (titr - pka) * PH_TO_KCAL
                dg = lambda x: dgpk + x.dgself + (x.Getdgepw(titr) + x.Getdgvpw(titr))
                normoccss = []
                for confs in confss:
                    #use a lambda function + map in order to calculate the eref of entire lists of conformers at once
                    erefs = map(dg, confs)
                    mineref = min(erefs)
                    index = lambda x: (mineref - x) * self.KCAL_TO_KT
                    rawoccs = numpy.exp(map(index, erefs))
                    normoccss.append(numpy.divide(rawoccs, float(numpy.sum(rawoccs))))
                self.total = 0
                #uses a list of lists in order to account for naming differences between head3.lst and pK.out (temporary?)
                for conf_attr, mfe_attr in zip(self.conf_attrs, self.mfe_attrs[:-1]):
                    if conf_attr=='epw':
                        getter = lambda x: x.Getdgxpw(titr, conf_attr)
                    elif conf_attr=='vpw':
                        getter = lambda x: x.Getdgxpw(titr, conf_attr) * x.Run.SCALE_VDW
                    elif conf_attr=='vdw0':
                        getter = lambda x: x.__getattribute__(conf_attr) * x.Run.SCALE_VDW0
                    elif conf_attr=='vdw1':
                        getter = lambda x: x.__getattribute__(conf_attr) * x.Run.SCALE_VDW1
                    else:
                        getter = lambda x: x.__getattribute__(conf_attr)
                    dgstate = []
                    for confs, normoccs in zip(confss, normoccss):
                        dgstate.append(numpy.sum(numpy.multiply(map(getter, confs), normoccs)))
                    self.__setattr__(mfe_attr, (dgstate[0] - dgstate[1])/PH_TO_KCAL)
                    self.total += ((dgstate[0] - dgstate[1])/PH_TO_KCAL)
            
            def __str__(self):
                out = ''
                for attr in self.mfe_attrs:
                    out += '%.3f\t' % self.__getattribute__(attr)
                return out
            
        def __init__(self, titr, res):
            self.titr = titr
            self.ionizations = []
            for (chargeFinal, confsFinal), (chargeInitial, confsInitial) in zip(res.confsByCharge.items()[:-1], res.confsByCharge.items()[1:]):
                if chargeFinal >=0 and chargeInitial >=0:
                    chargeFinal, confsFinal, chargeInitial, confsInitial = chargeInitial, confsInitial, chargeFinal, confsFinal
                self.ionizations.append(self.Ionization(titr, res.__getattribute__('pKa/Em'), res.Run.MONTE_T, (chargeFinal, chargeInitial), (confsFinal, confsInitial)))
        
        def __str__(self):
            out = ''
            for ionization in self.ionizations:
                out += ionization.__str__() + '\n'
            return out
        
        @classmethod
        def StrHeaders(cls):
            out = ''
            for attr in cls.Ionization.mfe_attrs:
                out += '%s\t' % attr
            return out

    def GetMFE(self, titr=None):
        if titr==None:
            titr = self.__getattribute__('pKa/Em')
        self.mfe = self.__class__.MFE(titr, self)
        return self.mfe
    
    def GetJiggle(self, ionized=False, atpk=False):
        if self.res=='GLY':
            return 0
        if ionized:
            confs = self.ionizedConfs
        else:
            confs = self.neutralConfs
        numerator = 0
        denominator = 0
        for conf in confs:
            if atpk:
                if atpk==True:
                    total_occ = self.Run.GetOccRow(self.__getattribute__('pKa/Em'))[0,conf.iConf]
                else:
                    total_occ = self.Run.GetOccRow(atpk)[0,conf.iConf]
            else:
                total_occ = conf.GetOccCol().sum()
            numerator += total_occ*conf.CRMSD()
            denominator += total_occ
        return float(numerator)/denominator
    
    @property
    def sumcrg(self):
        try:
            return self._sumcrg
        except AttributeError:
            self._sumcrg = {}
            for key in self.Confs[1].occupancy.keys():
                self._sumcrg[key] = self.SumCrgAt(key)
            return self._sumcrg
        
    @property
    def confsByCharge(self):
        try: 
            return self._confsByCharge
        except AttributeError:
            confsByCharge = {}
            for conf in self.Confs[1:]:
                tlist = confsByCharge.get(conf.nH, [])
                tlist.append(conf)
                confsByCharge[conf.nH] = tlist
            self._confsByCharge = OrderedDict(sorted(confsByCharge.items(), key=lambda x: x[0]))
            return self._confsByCharge
    
    @property
    def ionizedConfs(self):
        for key in self.confsByCharge.keys():
            if int(key)!=0:
                return self.confsByCharge[key]
    
    @property
    def neutralConfs(self):
        for key in self.confsByCharge.keys():
            if int(key)==0:
                return self.confsByCharge[key]
    
    @staticmethod
    def StrHeaders():
        return '%s/%s\t%s\t%s\t%s\t' % ('pdb','run_id','res_name','chainid','res_number')
        
class RunData(Level):
    dtype = 'Run'
    parent_dtype = 'Prot'
    super_parent_dtypes = []
    ChildDClass = ResData
    SubChildDClasses = [ConfData, AtomData]
    
    @classmethod
    def Org(cls, data):
        titrs = sorted([float(titr[0]) for titr in data['fort.38'][0].GetDataTuples(exclude=zip(*FortRule.nsub_tups)[0] + ('typ',))])
        data_tuples = data['run.prm'][0].GetDataTuples()
        data_tuples += data['energies.opp'][0].GetDataTuples()
        #occmatirx making code
        #occmatrix, like epw and vpw, is padded with zeros in front of all rows, but unlike them occmatrix's columns are not zero padded
        oi,oj,ov = [],[],[]
        for i,occrow in enumerate(data['fort.38']):
            for j,occ in enumerate(occ[1] for occ in sorted([(float(occtup[0]),float(occtup[1])) for occtup in occrow.GetDataTuples(exclude=zip(*FortRule.nsub_tups)[0] + ('typ',))])):
                if occ!=0:
                    oi.append(i+1)
                    oj.append(j)
                    ov.append(occ)
        occmatrix = coo_matrix((ov,(oj,oi)),shape=(len(titrs) + 1,len(data['fort.38']) + 1))  #switching i and j here to transpose the matrix so all sparses can be in csr format
        occmatrix = occmatrix.tocsr()
        data_tuples += [('occmatrix',occmatrix),
                        ('titrs',titrs)]
        cls.Store(data_tuples)
    
    @classmethod
    def AddChildren(cls):
        for res in cls.ChildDClass.instances:
            cls.RAddChildren(cls.instances[0], res)
    
    @classmethod
    def ReloadTable(cls, standalone, override_columns=None):
        override_columns = []
        for attr in ('occmatrix', 'titrs'): #'epw', 'vpw', 
            override_columns.append(Column(attr, PickleType()))
        super(RunData,cls).ReloadTable(standalone=False, override_columns=override_columns)
    
    def GetOccRow(self, titr):
        try:
            return self.occmatrix.getrow(self.titrs.index(float(titr)))
        except ValueError:
            lri = bisect.bisect_left(self.titrs, float(titr)) - 1
            rri = lri + 1
            ltitr,rtitr = self.titrs[lri], self.titrs[rri]
            rweight = (float(titr)-ltitr)/(rtitr-ltitr)
            lweight = 1-rweight
            return self.occmatrix.getrow(lri)*lweight + self.occmatrix.getrow(rri)*rweight
        
    def GetOccWeightedMatrix(self, titr, kind):
        try:
            return self.__getattribute__('_weighted_'+kind)[float(titr)]
        except KeyError:
            pass
        except AttributeError:
            self.__setattr__('_weighted_'+kind, {})
        occrow = self.GetOccRow(titr)
        occmat = []
        for i in xrange(self.epw.getshape()[0]):
            occmat.append(occrow)
        occmat = vstack(occmat)
        self.__getattribute__('_weighted_'+kind)[float(titr)] = self.__getattribute__(kind).multiply(occmat)
        return self.__getattribute__('_weighted_'+kind)[float(titr)]
    
    def GetWeightedSumEnergies(self, titr, kind):
        try:
            return self.__getattribute__('_weightedsum_'+kind)[float(titr)]
        except KeyError:
            pass
        except AttributeError:
            self.__setattr__('_weightedsum_'+kind, {})
        occrow = self.GetOccRow(titr)
        self.__getattribute__('_weightedsum_'+kind)[float(titr)] = occrow * self.__getattribute__(kind)
        return self.__getattribute__('_weightedsum_'+kind)[float(titr)]
        
    @staticmethod
    def InterpolateColumns(matrix, c1, c2, c1weight=.5):
        matrix.getcol(c1)*c1weight + matrix.getcol(c2)*(1-c1weight)
            
class ProtData(Level):
    dtype = 'Prot'
    parent_dtype = None
    super_parent_dtypes = []
    ChildDClass = RunData
    SubChildDClasses = [ResData, ConfData, AtomData]
    
    @classmethod
    def Org(cls, data):
        if cls.instances==[]:
            data_tuples=data['Prot']
            cls.Store(data_tuples)    
    
    @classmethod
    def AddChildren(cls):
        for run in cls.ChildDClass.instances:
            cls.RAddChildren(cls.instances[0], run)
    @classmethod
    def SQLize(cls, meta=None):
        if meta!=None:
            cls.meta = meta
        if not isinstance(cls.instances[0], cls):
            super(ProtData,cls).SQLize()

#ancilliary dbs

class CSAData(Level):
    dtype = 'CSA'
    parent_dtype = None
    super_parent_dtypes = []
    ChildDClass = None
    SubChildDClasses = []
    
    @classmethod
    def Org(cls, data):
        for datum in data['csa.dat']: 
            data_tuples = datum.GetDataTuples()
            cls.Store(data_tuples)
            
class ConsurfData(Level):
    dtype = 'Consurf'
    parent_dtype = None
    super_parent_dtypes = []
    ChildDClass = None
    SubChildDClasses = []
    
    @classmethod
    def Org(cls, data):
        for datum in data['consurf.grades']:
            data_tuples = datum.GetDataTuples()
            cls.Store(data_tuples)
        

hierarchy = [ProtData, RunData, ResData, ConfData, AtomData]
