import numpy as np
import matplotlib.pyplot as pl
from math import sqrt
from astro.utilities import Bunch
import inspect

# the smallest representable difference using the default python float
EPS = np.finfo(float).eps
SQRT_EPS = sqrt(EPS)
MAXSTEPS = 1000

def findderiv(f, x, a, i, sqrteps=SQRT_EPS):
    """ Find the partial derivative of f(x, a) with respect to a[i]
    for each value of x using finite differences.

    Inputs
    ------
    f : function
        Find the partial derivatives of this function. It takes x and
        a as parameters, f(x, *a), and returns one function value for
        each x value.
    x : array of shape (N,)
        The x values at which to find the derivatives.
    a : array of shape (M,)
        The parameter values at which to find the derivatives.
    i : int
        Find the partial derivative of f with respect to a[i].

    Returns
    -------
    dfda : array of shape (N,)
       The derivatives of f with respect to a[i] at each value of x.
    """
    a_i = a[i]
    # max needed here in case a[i] is close to zero
    h = sqrteps * max(a_i, 1.)
    # see numerical recipes, section 5.7 for why temp is necessary
    temp = a_i + h
    h = temp - a_i
    a_da = a.copy()
    a_da[i] += h
    #print 'a0', a
    #print 'a[%i] + da' % i, a_da
    dfda = (f(x, *a_da) - f(x, *a)) / h
    return dfda

def findjacobian(x, a, func=None, funcderiv=None):
    """ Calculate the Jacobian, defined as the matrix of first partial
    derivatives of the model being fit to the data with respect to
    each parameter, at each fitting point.

    Inputs
    ------
    x : array of shape (N,)
        The x values at which to find the derivatives.
    a : array of shape (M,)
        The parameter values at which to find the derivatives.
    func : function, optional    
        Find the derivatives of this function. It takes x and a as
        parameters, func(x, *a), and returns one value for each x
        value.
    funcderiv : function, optional
        A function that gives the derivatives. It must take x, a and
        the index i of the paramter to differentiate with respect to
        as arguments, funcderiv(x, a, i), and return one derivative
        value for each x value.

    Returns
    -------
    Jacobian: array of shape (M, N)
        Matrix of the derivatives with respect to each parameter at
        each value of x.

    Notes
    -----
    One of {func, funcderiv} must be given.
    """
    derivs = []
    for i in range(len(a)):
        if func is not None:
            dfda = findderiv(func, x, a, i)
        else:
            dfda = funcderiv(x, a, i) 
        derivs.append(dfda)

    derivs = np.array(derivs)
    return derivs

def minchi2(x, y, ysig, func, guess, fix=None, deriv=None,
            chistop=0.001, lamb=1.0, plot=True):
    """
    Inputs
    ------
    x, y, ysig : arrays of shape (N,)
        Data values.
    ysig : array of shape (N,)
        One sigma errors on y. 
    func : function 
        The function giving the data model. We are approximating y
        with func(x). It takes x and the parameter values as
        arguments, e.g.  func(x, *guess), and returns one value for
        each x value.
    guess : array of shape (M,)
        The initial guesses for the parameter values.
    fix : string
        Whitespace-separated parameters to fix at the guess values.

    Returns
    -------
    results : object
        The output object has attributes giving the best fitting
        parameters, their one sigman errors, the covariance matrix for
        the errors and the correlation matrix.
    """
    x, y, ysig, guess = (np.asarray(arr, float) for arr in (x, y, ysig, guess))
    invsig2 = 1 / ysig ** 2
    parnames = inspect.getargs(func.__code__).args[1:]
    npar = len(guess)
    print 'guesses', guess
    
    # deal with fixed parameters
    if fix is not None:
        fix = fix.split()
        ifixed = sorted(parnames.index(p) for p in fix) 
        fixed = dict((i, guess[i]) for i in ifixed)
        fix = dict((parnames[i], guess[i]) for i in ifixed)
        def f(x, *a):
            a = list(a)
            for i in ifixed:
                a.insert(i, fixed[i])
            return func(x, *a)

        guess = np.array([guess[i] for i in range(npar) if i not in ifixed])
        npar = len(guess)
        parnames = [p for p in parnames if p not in fix]
        print fix, parnames
    else:
        f = func

    a = guess.copy()
    
    def findchisq(a):
        dy = y - f(x, *a)
        chi2 =  dy ** 2 * invsig2
        return chi2.sum()

    # indices to find diagonal elements of a matrix, used below
    i = np.arange(len(a))
    idiag = i,i

    chisq1 = findchisq(a)
    nsteps = 0
    while nsteps < MAXSTEPS:
        print 'lambda', lamb
        print 'X^2', chisq1
        # perturb parameters, hopefully towards solution.

        # estimate the first derivative of the function we are trying
        # to fit the data in the direction of each parameter at each
        # x value
        jac = findjacobian(x, a, func=f)
        #print 'jacobian', jac.shape
        # find the gradient vector for the parameters. This is called
        # beta in Numerical Recipes section 15.5
        grad = np.dot(jac, invsig2 * (y - f(x, *a)) )
        #print 'grad', grad

        # find the curvature matrix (second derivative of chisq with
        # respect to each combination of parameter values). This is
        # called alpha in Numerical Recipes.
        hess = np.dot(jac,  (jac * invsig2).T)
        #print 'hess', hess

        # add extra to the diagonal elements and invert
        hess[idiag] *= 1 + lamb
        #print 'hess mult', hess
        invhess = np.linalg.inv(hess)
        #print 'invhess', invhess

        # find the new parameter values
        da = np.dot(invhess, grad)
        print 'da', da
        a += da

        # evaluate chisq with the new parameter values
        chisq2 = findchisq(a)
        #print 'X^2', chisq2
        if plot:
            pl.plot(x, f(x, *a))
            pl.show()
            raw_input('continue...')

        if chisq2 < chisq1:
            if chisq1 - chisq2 < chistop:
                #print 'found solution!'
                break
            # Otherwise we've got closer to the solution, decrease
            # step size.
            lamb *= 0.1
        else:
            #print 'getting further away from solution...'
            # we've moved away from the solution, increase step size.
            lamb *= 10

        chisq1 = chisq2            
        nsteps += 1

    if nsteps > MAXSTEPS:
        raise RuntimeError('Exceeded %i iterations' % MAXSTEPS)

    # we've found a solution! return the best-fitting
    # parameters and the covariance matrix.

    # find covariance matrix
    jac = findjacobian(x, a, func=f)
    hess = np.dot(jac, (jac * invsig2).T)
    covar = np.linalg.inv(hess)
    parsig = np.sqrt(covar[idiag])
    corr = np.empty((npar, npar), float)
    for i in xrange(npar):
        for j in xrange(npar):
            corr[i,j] = covar[i,j] / (parsig[i] * parsig[j])

    results = Bunch(parnames=parnames, par=a, parsig=parsig,
                    covar=covar, corr=corr, func=f, x=x, y=y, ysig=ysig,
                    guess=guess, findchisq=findchisq, fixed=fix)
    return results
    

def test1():
    """ Check fitting straight line.""" 
    pl.figure()
    def func(x, a, b):
        return a * x + b
    # independent variables
    x = np.arange(100.)
    # dependent variables
    ymod = func(x, 1.0, 5.)
    np.random.seed(99)
    y = ymod + np.random.randn(len(x))*20.
    # 1 sigma errors in y
    sig = np.ones(len(x)) * 20.
    # initial guesses for parameters
    pars = 0.5, 7.
    pl.plot(x,y)
    # function that takes x, par values and gives model ys
    return minchi2(x, y, sig, func, pars)

def test2():
    """ Check fitting gaussian."""
    pl.figure()
    def func(x, a, b, c, d):
        return a * np.exp(-0.5 * ((x-b) / c)**2 ) + d
    x = np.arange(100.)
    # dependent variables
    ymod = func(x, 20.0, 50., 10, 0)
    np.random.seed(99)
    y = ymod + np.random.randn(len(x))*5.
    # 1 sigma errors in y
    sig = np.ones(len(x)) * 5.
    # initial guesses for parameters
    pars = 10, 43, 25, 3
    pl.plot(x,y)
    return minchi2(x, y, sig, func, pars, plot=1, fix='d')
   
def test3():
    """ Check weighting by errors works."""
    pl.figure()
    def func(x, a, b):
        return a * x + b
    # independent variables
    x = np.arange(100.)
    # dependent variables
    ymod = func(x[:50], 1.0, 5.)
    ymod2 = func(x[:50], 3, 5.+50)

    plot(x, list(ymod) + list(ymod2) )
    y = np.empty_like(x)
    np.random.seed(99)
    y[:50] = ymod + np.random.randn(50)*1.
    y[50:] = ymod2 + np.random.randn(50)*10.
    # 1 sigma errors in y
    sig = np.empty_like(x)
    sig[:50] = 1.
    sig[50:] = 10.
    # initial guesses for parameters
    pars = 0.5, 7.
    pl.plot(x,y)
    # function that takes x, par values and gives model ys
    return minchi2(x, y, ysig, func, pars)
