# this is just a collection of handy functions that I couldn't find in the standard set of modules.

from scipy import array
from scipy import interpolate,argsort
from scipy import exp
from scipy import optimize
from scipy import nan
from numpy import histogram,ones
import numpy
from pylab import find

def array_extend(base_array,tail_array):
    # this returns the base_array extended by the tail_array
    converted_base = False
    converted_tail = False
    for fn in dir(base_array):
        if 'tolist' in fn and not converted_base:
            base_list = base_array.tolist()
            converted_base = True
    for fn in dir(tail_array):
        if 'tolist' in fn and not converted_tail:
            tail_list = tail_array.tolist()
            converted_tail = True

    # if either arguements are already lists, don't need to convert them
    if not converted_base:
        base_list = base_array
    if not converted_tail:
        tail_list = tail_array

    base_list.extend(tail_list)
    return array(base_list)

def array_insert(base_array,new_head,before_index):
    # this returns the base_array extended by the tail_array
    base_list = base_array.tolist()
    base_list.insert(before_index,head)
    return array(base_list)

def interp1d(x,y,kind='linear',copy=True,bounds_error=True,fill_value=nan):
    # sorts y and x together and passes things along to scipy.interpolate.interp1d
    if type(x) == list:
        x = numpy.array(x,dtype=numpy.float64)
    if type(y) == list:
        y = numpy.array(y,dtype=numpy.float64)
    x_indexes = argsort(x)
    x = x[x_indexes]
    y = y[x_indexes]
    return interpolate.interp1d(x,y,copy=copy,bounds_error=bounds_error,fill_value=fill_value)

def linear_resample(x,y,newy):
    # given arrays x and y it will return x(newy) linearly interpolated from x,y (no extrapolation)
    return interp1d(x,y)(newy)


# proceedure that makes a n point running average of the given list or array
def running_average(l,n):
    lseg = []
    lreturn = []
    for i in range(len(l)):
        if i < n:
            lseg.append(l[i])
            lreturn.append(sum(lseg)/len(lseg))
        else:
            lseg = lseg[1:]
            lseg.append(l[i])
            lreturn.append(sum(lseg)/len(lseg))
    return lreturn

# a function that finds the best exponential fit to some data.
def exp_fit_bfgs(datax,datay,xmin,xmax):
    ''' Returns the scale,offset and tau of an exponential function that best fits the data'''

    # determine the index of xmin and xmax
    imin = find(datax<xmin)[-1]
    imax = find(datax>xmax)[0]

    dxw = datax[imin:imax]
    dyw = datay[imin:imax]

    #-------------- INITIAL GUESS --------------------------------
    xoffset_guess = min(xmin,xmax)
    tau_guess = abs(xmin-xmax)
    scale_guess = 1
    # determine the yoffset by finding the most represented y value in a histogram
    histo = histogram(dyw,30)
    max_index = histo[0].argsort()[-1]
    yoffset_guess = histo[1][max_index]


    exp_decay = lambda scale,xoffset,yoffset,tau,x: scale * exp(-(x-xoffset)/tau) + yoffset
    # diff is just the difference between the data and the guessed function 
    #    if X is a list of parameters
    diff = lambda X: ((dyw - exp_decay(X[0],X[1],X[2],X[3],dxw))**2).sum()
    def diff_prime(X):
        ddiff_dX0 = ((-exp(-(dxw-X[1])/X[3]))**2).sum()
        ddiff_dX1 = ((-X[0] * exp(-(dxw-X[1])/X[3]) * X[1]/X[3])**2).sum()
        ddiff_dX2 = ((-ones(len(dxw)))**2).sum()
        ddiff_dX3 = ((-X[0] * exp(-(dxw-X[1])/X[3]) * -(dxw-X[1]) * -1/X[3]**2)**1).sum()
        return array([ddiff_dX0,ddiff_dX1,ddiff_dX2,ddiff_dX3])

    X_guess = [scale_guess,xoffset_guess,yoffset_guess,tau_guess]
    X1 = optimize.fmin_bfgs(diff,X_guess,fprime=diff_prime)
    # run it again with a negative scale, it might have trouble crossing zero.
    X_guess = [-scale_guess,xoffset_guess,yoffset_guess,tau_guess]
    X2 = optimize.fmin_bfgs(diff,X_guess,fprime=diff_prime)

    # return the better of the two trials
    X1_diff = diff(X1)
    X2_diff = diff(X2)
    if X1_diff < X2_diff:
        return X1
    else:
        return X2



def exp_fit_cobyla(datax,datay,xmin,xmax):
    ''' Returns the scale,offset and tau of an exponential function that best fits the data
        -- uses the cobyla (Constrained Optimization BY Linear Approximation) method'''

    # determine the index of xmin and xmax
    imin = find(datax<xmin)[-1]
    imax = find(datax>xmax)[0]

    dxw = datax[imin:imax]
    dyw = datay[imin:imax]

    #-------------- INITIAL GUESS --------------------------------
    xoffset_guess = min(xmin,xmax)
    tau_guess = abs(xmin-xmax)
    scale_guess = 1
    # determine the yoffset by finding the most represented y value in a histogram
    histo = histogram(dyw,30)
    max_index = histo[0].argsort()[-1]
    yoffset_guess = histo[1][max_index]


    exp_decay = lambda scale,xoffset,yoffset,tau,x: scale * exp(-(x-xoffset)/tau) + yoffset
    # diff is just the difference between the data and the guessed function 
    #    if X is a list of parameters
    squared_residual = lambda X: ((dyw - exp_decay(X[0],X[1],X[2],X[3],dxw))**2).sum()
    constraint = lambda X: 1

    X_guess = [scale_guess,xoffset_guess,yoffset_guess,tau_guess]
    X1 = optimize.fmin_cobyla(squared_residual,X_guess,constraint,rhobeg=1.5,rhoend=0.00001,iprint=1,maxfun=2000)
    # run it again with a negative scale, it might have trouble crossing zero.
    X_guess = [-scale_guess,xoffset_guess,yoffset_guess,tau_guess]
    X2 = optimize.fmin_cobyla(squared_residual,X_guess,constraint,rhobeg=0.5,rhoend=0.00001,iprint=1,maxfun=1000)

    # return the better of the two trials
    X1_residual = squared_residual(X1)
    X2_residual = squared_residual(X2)
    if X1_residual < X2_residual:
        return X1
    else:
        return X2



# a function that finds the best exponential fit to some data.
def exp_fit(datax,datay,xmin,xmax):
    ''' Returns the scale,offset and tau of an exponential function that best fits the data'''

    # determine the index of xmin and xmax
    imin = find(datax<xmin)[-1]
    imax = find(datax>xmax)[0]

    dxw = datax[imin:imax]
    dyw = datay[imin:imax]

    #-------------- INITIAL GUESS --------------------------------
    xoffset_guess = min(xmin,xmax)
    tau_guess = abs(xmin-xmax)
    scale_guess = 1
    # determine the yoffset by finding the most represented y value in a histogram
    histo = histogram(dyw,30)
    max_index = histo[0].argsort()[-1]
    yoffset_guess = histo[1][max_index]


    exp_decay = lambda scale,xoffset,yoffset,tau,x: scale * exp(-(x-xoffset)/tau) + yoffset
    # diff is just the difference between the data and the guessed function 
    #    if X is a list of parameters
    diff = lambda X: dyw - exp_decay(X[0],X[1],X[2],X[3],dxw)
    def diff_prime(X):
        ddiff_dX0 = -exp(-(dxw-X[1])/X[3])
        ddiff_dX1 = -X[0] * exp(-(dxw-X[1])/X[3]) * X[1]/X[3]
        ddiff_dX2 = -ones(len(dxw))
        ddiff_dX3 = -X[0] * exp(-(dxw-X[1])/X[3]) * -(dxw-X[1]) * -1/X[3]**2
        return array([ddiff_dX0,ddiff_dX1,ddiff_dX2,ddiff_dX3])

    X_guess = [scale_guess,xoffset_guess,yoffset_guess,tau_guess]
    X1,success = optimize.leastsq(diff,X_guess,Dfun=diff_prime,col_deriv=1,warning=True)
    # run it again with a negative scale, it might have trouble crossing zero.
    X_guess = [-scale_guess,xoffset_guess,yoffset_guess,tau_guess]
    X2,success = optimize.leastsq(diff,X_guess,Dfun=diff_prime,col_deriv=1,warning=True)

    # return the better of the two trials
    X1_diff = (diff(X1)*diff(X1)).sum()
    X2_diff = (diff(X2)*diff(X2)).sum()
    if X1_diff < X2_diff:
        return X1
    else:
        return X2


# a function that finds the best exponential fit to some data.
def exp_fit_w_offset(datax,datay,xmin,xmax,yoffset):
    ''' Returns the scale,offset and tau of an exponential function that best fits the data'''

    # determine the index of xmin and xmax
    imin = find(datax<xmin)[-1]
    imax = find(datax>xmax)[0]

    dxw = datax[imin:imax]
    dyw = datay[imin:imax]

    exp_decay = lambda scale,xoffset,yoffset,tau,x: scale * exp(-(x-xoffset)/tau) + yoffset
    # diff is just the difference between the data and the guessed function 
    #    if X is a list of parameters
    diff = lambda X: dyw - exp_decay(X[0],X[1],yoffset,X[2],dxw)
    def diff_prime(X):
        ddiff_dX0 = -exp(-(dxw-X[1])/X[2])
        ddiff_dX1 = -X[0] * exp(-(dxw-X[1])/X[2]) * X[1]/X[2]
        ddiff_dX2 = -X[0] * exp(-(dxw-X[1])/X[2]) * -(dxw-X[1]) * -1/X[2]**2
        return array([ddiff_dX0,ddiff_dX1,ddiff_dX2])

    X_guess = [1,xmin,1] # initial guesses for the parameters
    X1,success = optimize.leastsq(diff,X_guess,Dfun=diff_prime,col_deriv=1,warning=True)
    X_guess = [-1,xmin,1]
    X2,success = optimize.leastsq(diff,X_guess,Dfun=diff_prime,col_deriv=1,warning=True)

    # return the better of the two trials
    X1_diff = (diff(X1)*diff(X1)).sum()
    X2_diff = (diff(X2)*diff(X2)).sum()
    if X1_diff < X2_diff:
        return X1
    else:
        return X2


