"""
Predicts the features

>>> # dataloader test must be run before this test passes
>>> from home import testconf
>>> predictor( testconf )

$Rev: 64 $ 
$Author: ialbert $ 
$Date: 2008-01-07 17:44:43 -0500 (Mon, 07 Jan 2008) $
"""
import gc
import atlas
from atlas import peakfunc, sql, commify, util, hdf

@util.stop_on_error
def predictor( conf ):
    """
    Predicts nucleosomes based on values listed in the parameters and
    inserts these into the database.
    """
    # prepare sql engine and session

    #param, engine, data

    atlas.info( "fit label=%s" % conf.FIT_LABEL )
    atlas.info( "peak label=%s, vector size=%s" % ( conf.PEAK_LABEL, commify(conf.DATA_SIZE)))
    atlas.info( "hdf_database='%s'" % conf.HDF_DATABASE )
    atlas.info( "sql_uri='%s'" % conf.SQL_URI )
    atlas.isfile( conf.HDF_DATABASE )
    atlas.info( "peak exclusion=%d, minimum=%s" % ( conf.EXCLUSION_ZONE, conf.MINIMUM_PEAK_SIZE))

    # get the data that will be used for peak detection
    db = hdf.hdf_open( conf.HDF_DATABASE, mode='a', title='HDF database')
    data = hdf.GroupData(db=db, name=conf.FIT_LABEL)
        
    # create the sql engine
    engine = sql.get_engine( conf.SQL_URI )
       
    # drop sql indices (speeds up bulk inserting) 
    sql.drop_indices(engine)
        
    # inserts label for the peak, if clobber is True will remove all data for this label!
    label_id = sql.make_label( engine, name=conf.PEAK_LABEL, clobber=conf.CLOBBER )

    LEFT, RIGHT = conf.LEFT_SHIFT, conf.RIGHT_SHIFT

    for chrom in data.labels:
        STRANDS = '+-A' 
        # operates on each strand separately
        for index, strand in enumerate( STRANDS ):
            # collects the data to be inserted
            bulk = [] 
            table = data.get_table( chrom )

            def generator(index, table):
                "Returns the index and value of the data for each strand"
                for row in table:
                    yield row[0], row[index]

            values = generator( index=index+1, table=table )
            peaks  = peakfunc.detect_peaks( values )
            peaks  = peakfunc.select_peaks( peaks, width=conf.EXCLUSION_ZONE, level=conf.MINIMUM_PEAK_SIZE )
            
            peaks = list(peaks)
            for peak in peaks:
                mid, value = peak
                name  = 'N%d' % mid
                entry = dict(strand=strand, name=name, start=mid-LEFT, 
                    end=mid+RIGHT, chrom=chrom, label_id=label_id, freetext='', altname='',
                    value=value )
                bulk.append( entry )
            
            size = commify(len(bulk))
            atlas.info('label=%s, strand=%s, peaks=%s' % (chrom, strand, size) )
            sql.bulk_insert(engine=engine, values=bulk )

            # minimize memory use by invoking the garbage collector
            del bulk
            gc.collect()
    
    # get the indices back
    sql.create_indices(engine)
    db.close()

def test(verbose=0):
    """
    Main testrunnner
    """
    import doctest
    doctest.testmod( optionflags=doctest.ELLIPSIS + doctest.NORMALIZE_WHITESPACE, verbose=verbose )

if __name__ == '__main__':
    test(verbose=0)