"""
Data fitter utilities

>>> from home import testconf
>>> fitter( testconf )

$Rev: 56 $ 
$Author: ialbert $ 
$Date: 2008-01-01 07:33:58 -0500 (Tue, 01 Jan 2008) $
"""
import numpy
import atlas
from atlas import hdf, commify, util
from math import log, exp
from mod454.schema import Mod454Schema as SCHEMA

def normal_function( param ):
    """
    Defaulf fitting function, it returns values from a normal distribution
    """
    log2    = log(2)
    sigma2  = float(param.SIGMA)**2
    width   = param.WIDTH
    lo, hi  = width, width+1
    
    # function definition
    def normal_func(value, index):
        return value * exp( -index*index/sigma2 * log2 )    
    
    values = [ normal_func(1, x) for x in range(-lo, hi) ]
    values = numpy.array( values )

    # this can accomodate non-symmetrical  ranges
    return lo, hi, values

@util.stop_on_error
def fitter( conf ):
    """
    Fits data with a sum of normal curves 
    """

    atlas.info( "data label=%s" % conf.DATA_LABEL )
    atlas.info( "fit label=%s, vector size=%s" % ( conf.FIT_LABEL, commify(conf.DATA_SIZE)))
    atlas.info( "fit sigma=%s, width=%s, minimum=%s" % ( conf.SIGMA, conf.WIDTH, conf.MINIMUM_PEAK_SIZE))
    atlas.info( "hdf_database='%s'" % conf.HDF_DATABASE )
    atlas.isfile( conf.HDF_DATABASE )

    # open database and create fit labels
    db = hdf.hdf_open( conf.HDF_DATABASE, mode='a', title='HDF database')
    data  = hdf.GroupData( db=db, name=conf.DATA_LABEL )
    group = hdf.create_group( db=db, name=conf.FIT_LABEL, desc='fit for %s' % conf.DATA_LABEL, clobber=conf.CLOBBER ) 

    # ranges and values
    lo, hi, normal = normal_function( conf )
    
    size, epsilon = conf.DATA_SIZE, conf.MINIMUM_PEAK_SIZE
    atlas.info('normal function fitting')

    pad = hi + 1
    assert pad > hi, 'Padding must be larger than the fitting interval' # sanity check

    # iterates over all chromosomes
    for label in data.labels:

        # create table that will containing the fit
        fit_table  = hdf.create_table( db=db, name=label, where=group, schema=SCHEMA, clobber=False )
        data_table = data.get_table( label )

        atlas.debug('started fitting label %s' % label)
        column = data_table.cols.ix

        wstich = cstich = astich = numpy.zeros( lo )

        # stepping trough the data by chunks
        for index, start, end in data.intervals(label=label, size=size):
            atlas.debug('processing interval %s to %s' % (start, end))

            # maintains the collected values for the current chunk
            wxfit = numpy.zeros( size + pad )
            cxfit = numpy.zeros( size + pad )
            axfit = numpy.zeros( size + pad )
            
            # apply previous stich
            wxfit[:lo] = wstich
            cxfit[:lo] = cstich
            axfit[:lo] = astich
            
            # data indices used to stich toghether overlapping data
            sl, eh, = hdf.index_search( table=data_table, args=( start+lo, end+hi ) )
        
            def safe_add(fit, ix, lo, hi, values):
                "Mutates fit data, adds values to the slice"
                actual = len ( fit[ix-lo:ix+hi] )
                assert actual > 0, 'Out of bounds'
                fit[ix-lo:ix+hi] += values[:actual]
            
            # generates the smoothed curves
            for ix, wx, cx, ax in data_table[sl:eh]:
                ix = ix - start
                if wx > 0.0:
                    safe_add( fit=wxfit, ix=ix, lo=lo, hi=hi, values=normal*wx )
                if cx > 0.0:
                    safe_add( fit=cxfit, ix=ix, lo=lo, hi=hi, values=normal*cx )
                if ax > 0.0:
                    safe_add( fit=axfit, ix=ix, lo=lo, hi=hi, values=normal*ax )
            
            # set wxfit and cxfit to epsilon where it dips below
            reset = [ wxfit, cxfit ]
            for target in reset:
                indices = ( target < epsilon ).nonzero()[0]
                values  = numpy.ones( len(indices), numpy.float) * epsilon
                target.put(indices=indices, values=values)
            
            #
            # stiching is not entirely seamless, small artifacts may be present
            # if the data has continous values at the stich point
            #
            wstich = wxfit[-lo:] 
            cstich = cxfit[-lo:] 
            astich = axfit[-lo:] 

            # cut off high end                
            axfit = axfit[:-hi]

            # write out existing values
            nonzero = ( axfit > epsilon ).nonzero()[0]
            nsize = commify(len(nonzero))
            atlas.info('label=%s, chunk=%s, writing %s points' % (label, index, nsize) )
            row = fit_table.row
            for ix in nonzero:
                row['ix'], row['wx'], row['cx'], row['ax'] = ( ix+start, wxfit[ix], cxfit[ix], axfit[ix] )
                row.append()
            fit_table.flush()

        oldsize, newsize = map(commify, map(len, [data_table, fit_table]))
        atlas.info('label=%s fitted %s -> %s datapoints' % (label, oldsize, newsize) )

    db.close()

def test(verbose=0):
    """
    Main testrunnner
    """
    import doctest
    doctest.testmod( optionflags=doctest.ELLIPSIS + doctest.NORMALIZE_WHITESPACE, verbose=verbose )

if __name__ == '__main__':
    test(verbose=0)

    
