"""
Tools for performing Probabilistic Collocation Method (PCM)

Functions
---------
pcm_cc          Clenshaw-Curtis quadrature
pcm_gq          Gaussian quadrature
pcm_lr          Linear Regression
"""

import numpy as np
from scipy import linalg as la
from scipy.optimize import fmin

try:
    from sklearn import linear_model as lm
except:
    print "sklearn not imported, advanced regression not available"

import poly as po
import quadrature as qu
from orthogonal import orth_select
from dist import sample_select

__version__ = "1.0"

def pcm(func, porder, dist, rule="C", sorder=None, proxy_dist=None,
        orth=None, orth_acc=100, sparse=False, composit=1,
        antithetic=None, lr="LS", lr_kws={}):
    """
Probabilistic Collocation Method using Clenshaw-Curtis quadrature

Parameters
----------
Required arguments

func : callable
    The model to be approximated.
    Must accept arguments on the form `func(z, *args, **kws)`
    where `z` is an 1-dimensional array with `len(z)==len(dist)`.
porder : int
    The order of the polynomial approximation
dist_out : Dist
    Distributions for models parameter
rule : str
    The rule for estimating the Fourier coefficients.
    For spectral projection/quadrature rules, see quadgen.
    For point collocation/nummerical sampling, see samplegen.

Optional arguments

proxy_dist : Dist
    If included, the expansion will be created in proxy_dist and
    values will be mapped to dist using a double Rosenblatt
    transformation.
sorder : float
    The order of the sample scheme used.
    If omited, default values will be used.
orth : int, str, callable, Poly
    Orthogonal polynomial generation.

    int, str :
        orth will be passed to orth_select
        for selection of orthogonalization.
        See orth_select doc for more details.

    callable :
        the return of orth(order, dist) will be used.

    Poly :
        it will be used directly.
        All polynomials must be orthogonal for method to work
        properly if spectral projection is used.
orth_acc : int
    Accuracy used in the estimation of polynomial expansion.

Spectral projection arguments

sparse : bool
    If True, Smolyak sparsegrid will be used instead of full
    tensorgrid.
composit : int
    Use composit rule. Note that the number of evaluations may grow
    quickly.

Point collocation arguments

antithetic : bool, array_like
    Use of antithetic variable
lr : str
    Linear regresion method.
    See fitter_lr for more details.
lr_kws : dict
    Extra keyword arguments passed to fitter_lr.

Returns
-------
q : Poly
    Polynomial approximation of a given a model.

Examples
--------

Define function and distribution:
>>> func = lambda z: -z[1]**2 + 0.1*z[0]
>>> dist = pc.J(pc.Uniform(), pc.Uniform())

Perform pcm:
>>> q = pc.pcm(func, 2, dist)
>>> print pc.around(q, 10)
-q1^2+0.1q0

See also
--------
quadgen         Generator for quadrature rules
samplegen       Generator for sampling schemes
    """

    # The polynomial expansion
    if orth is None:
        if dist.dependent():
            orth = "svd"
        else:
            orth = "ttr"
    if isinstance(orth, (str, int, long)):
        orth = orth_select(orth)
    if not isinstance(orth, po.Poly):
        orth = orth(porder, dist)

    # Proxy variable
    if proxy_dist is None:
        trans = lambda x:x
    else:
        dist, dist_ = proxy_dist, dist
        trans = lambda x: dist_.inv(dist.fwd(x))

    # Applying scheme
    rule = rule.upper()
    if rule in "GLC":
        if sorder is None:
            sorder = porder+1
        z,w = qu.quadgen(sorder, dist, orth_acc, sparse=sparse,
                rule=rule, composit=composit)

        x = trans(z)
        y = np.array(map(func, x.T))
        Q = fitter_quad(orth, x, w, y)

    else:
        if sorder is None:
            sorder = 2*len(orth)
        z = dist.sample(sorder, rule=rule, antithetic=antithetic)

        x = trans(z)
        y = np.array(map(func, x.T))
        Q = fitter_lr(orth, x, y, rule=lr, **lr_kws)

    return Q


def pcm_cc(func, order, dist_out, dist_in=None, acc=None,
        orth=None, retall=False, sparse=False):
    """
Probabilistic Collocation Method using Clenshaw-Curtis quadrature

Parameters
----------
Required arguments

func : callable
    The model to be approximated.
    Must accept arguments on the form `func(z, *args, **kws)`
    where `z` is an 1-dimensional array with `len(z)==len(dist)`.
order : int
    The order of the polynomial approximation
dist_out : Dist
    Distributions for models parameter

Optional arguments

dist_in : Dist
    If included, space will be mapped using a Rosenblatt
    transformation from dist_out to dist_in before creating an
    expansin in terms of dist_in
acc : float
    The order of the sample scheme used
    If omitted order+1 will be used
orth : int, str, callable, Poly
    Orthogonal polynomial generation.

    int, str :
        orth will be passed to orth_select
        for selection of orthogonalization.
        See orth_select doc for more details.

    callable :
        the return of orth(M, dist) will be used.

    Poly :
        it will be used directly.
        All polynomials must be orthogonal for method to work
        properly.
retall : bool
    If True, return extra values.
sparse : bool
    If True, Smolyak sparsegrid will be used instead of full
    tensorgrid

Returns
-------
q[, x, w, y]

q : Poly
    Polynomial estimate of a given a model.
x : np.ndarray
    Nodes used in quadrature with `x.shape=(dim, K)` where K is the
    number of samples.
w : np.ndarray
    Weights used in quadrature with `w.shape=(K,)`.
y : np.ndarray
    Evauluations of func with `len(y)=K`.

Examples
--------

Define function and distribution:
>>> func = lambda z: -z[1]**2 + 0.1*z[0]
>>> dist = pc.J(pc.Uniform(), pc.Uniform())

Perform pcm:
>>> q, x, w, y = pc.pcm_cc(func, 2, dist, acc=2, retall=1)
>>> print pc.around(q, 10)
-q1^2+0.1q0
>>> print len(w)
9

With Smolyak sparsegrid
>>> q, x, w, y = pc.pcm_cc(func, 2, dist, acc=2, retall=1, sparse=1)
>>> print pc.around(q, 10)
-q1^2+0.1q0
>>> print len(w)
13

    """
    if acc is None:
        acc = order+1

    if dist_in is None:
        z,w = qu.quadgen(acc, dist_out, 100, sparse, False)
        x = z
        dist = dist_out
    else:
        z,w = qu.quadgen(acc, dist_in, 100, sparse, False)
        x = dist_out.ppf(dist_in.cdf(z))
        dist = dist_in

    if orth is None:
        if dist.dependent:
            orth = "chol"
        else:
            orth = "ttr"
    if isinstance(orth, (str, int, long)):
        orth = orth_select(orth)
    if not isinstance(orth, po.Poly):
        orth = orth(order, dist)

    y = np.array(map(func, x.T))
    Q = fitter_quad(orth, x, w, y)

    if retall:
        return Q, x, w, y
    return Q

def fitter_quad(orth, X, W, Y):

    orth = po.Poly(orth)
    X = np.asfarray(X)
    W = np.asfarray(W)
    Y = np.asfarray(Y)

    shape = Y.shape
    Y = Y.reshape(W.size, Y.size/W.size)

    ovals = orth(*X)
    vals1 = [(val*Y.T*W).T for val in ovals]
    vals2 = [(val**2*W).T for val in ovals]

    coef = (np.sum(vals1, 1).T/np.sum(vals2, 1)).T

    coef = coef.reshape(len(coef), *shape[1:])
    Q = po.transpose(po.sum(orth*coef.T, -1))

    return Q



def pcm_gq(func, order, dist_out, dist_in=None, acc=None,
        orth=None, retall=False, sparse=False):
    """
Probabilistic Collocation Method using optimal Gaussian quadrature

Parameters
----------
Required arguments

func : callable
    The model to be approximated.
    Must accept arguments on the form `func(z, *args, **kws)`
    where `z` is an 1-dimensional array with `len(z)==len(dist)`.
order : int
    The order of the polynomial approximation
dist_out : Dist
    Distributions for models parameter

Optional arguments

dist_in : Dist
    If included, space will be mapped using a Rosenblatt
    transformation from dist_out to dist_in before creating an
    expansin in terms of dist_in
acc : float
    The order of the sample scheme used
    If omitted order+1 will be used
orth : int, str, callable, Poly
    Orthogonal polynomial generation.

    int, str :
        orth will be passed to orth_select
        for selection of orthogonalization.
        See orth_select doc for more details.

    callable :
        the return of orth(M, dist) will be used.

    Poly :
        it will be used directly.
        All polynomials must be orthogonal for method to work
        properly.
args : itterable
    Extra positional arguments passed to `func`.
kws : dict
    Extra keyword arguments passed to `func`.
retall : bool
    If True, return also number of evaluations
sparse : bool
    If True, Smolyak sparsegrid will be used instead of full
    tensorgrid

Returns
-------
Q[, X]

Q : Poly
    Polynomial estimate of a given a model.
X : np.ndarray
    Values used in evaluation

Examples
--------
Define function:
>>> func = lambda z: z[1]*z[0]

Define distribution:
>>> dist = pc.J(pc.Normal(), pc.Normal())

Perform pcm:
>>> p, x, w, y = pc.pcm_gq(func, 2, dist, acc=3, retall=True)
>>> print pc.around(p, 10)
q0q1
>>> print len(w)
16

    """
    if acc is None:
        acc = order+1

    if dist_in is None:
        z,w = qu.quadgen(acc, dist_out, 100, sparse, True)
        x = z
        dist = dist_out
    else:
        z,w = qu.quadgen(dist_in, acc, 100, sparse, True)
        x = dist_out.ppf(dist_in.cdf(z))
        dist = dist_in

    y = np.array(map(func, x.T))
    shape = y.shape
    y = y.reshape(w.size, y.size/w.size)

    if orth is None:
        if dist.dependent:
            orth = "chol"
        else:
            orth = "ttr"
    if isinstance(orth, (str, int, long)):
        orth = orth_select(orth)
    if not isinstance(orth, po.Poly):
        orth = orth(order, dist)

    ovals = orth(*z)
    vals1 = [(val*y.T*w).T for val in ovals]
    vals2 = [(val**2*w).T for val in ovals]
    coef = (np.sum(vals1, 1).T/np.sum(vals2, 1)).T

    coef = coef.reshape(len(coef), *shape[1:])
    Q = po.transpose(po.sum(orth*coef.T, -1))

    if retall:
        return Q, x, w, y
    return Q


def pcm_lr(func, order, dist_out, sample=None,
        dist_in=None, scheme="HMIL",
        orth=3, regression="LS", retall=False):
    """
Probabilistic Collocation Method using Linear Least Squares fit

Parameters
----------
Required arguemnts

func : callable
    The model to be approximated.  Must accept arguments on the
    form `z` is an 1-dimensional array with `len(z)==len(dist)`.
order : int
    The order of chaos expansion.
dist_out : Dist
    Distributions for models parameter.

Optional arguments

sample : int
    The order of the sample scheme to be used.
    If omited it defaults to order.
dist_in : Dist
    If included, space will be mapped using a Rosenblatt
    transformation from dist_out to dist_in before creating an
    expansin in terms of dist_in
scheme: str, Sampler, array_like
    Method for generating nodes.

    str :
        method will be passed to sample_select for selection of
        collocation nodes.
        See sample_select doc for more details.

    Sampler :
        will used Sampler object directly. See Sampler doc for more
        details.

    array_like :
        it will be used directly
        Must have `len(sample)==len(dist)`
orth : int, str, callable, Poly
    Orthogonal polynomial generation.

    int, str :
        orth will be passed to orth_select
        for selection of orthogonalization.
        See orth_select doc for more details.

    callable :
        the return of orth(M, dist) will be used.

    Poly :
        it will be used directly.
        It must be of length N+1=comb(M+D, M)
regression : str
    Linear regression method used.
    See pc.fitter_lr for more details.
retall : bool
    If True, return extra values.

Examples
--------

Define function:
>>> func = lambda z: -z[1]**2 + 0.1*z[0]

Define distribution:
>>> dist = pc.J(pc.Normal(), pc.Normal())

Perform pcm:
>>> q, x, y = pc.pcm_lr(func, 2, dist, sample=5, retall=True)
>>> print pc.around(q, 10)
-q1^2+0.1q0
>>> print len(x.T)
6
    """

    if dist_in is None:
        dist = dist_out
    else:
        dist = dist_in
    dim = len(dist)

    if sample is None:
        sample = order**dim

    # node generator
    if isinstance(scheme, str):
        scheme = sample_select(scheme, dist)
    if not isinstance(scheme, np.ndarray):
        scheme = scheme(sample)
    if dim==1:
        scheme = scheme[0]

    # orthogonalization
    if orth is None:
        if dist.dependent:
            orth = "chol"
        else:
            orth = "ttr"
    if isinstance(orth, (str, int, long)):
        orth = orth_select(orth)
    if not isinstance(orth, po.Poly):
        orth = orth(order, dist)

    # Rosenblatt
    if dist_in is None:
        x = scheme
    else:
        x = dist_out.ppf(dist_in.cdf(scheme))

    # evals
    y = np.array(map(func, x.T))
    shape = y.shape[1:]
    y = y.reshape(len(y), y.size/len(y))
    if sample==0:
        y_ = y[:]
        R = orth * y
    else:
        R, y_ = fitter_lr(orth, x, y, regression, retall=1)

    R = po.reshape(R, shape)

    if retall:
        return R, x, y
    return R

def rlstsq(A, b, order=0, alpha=None, cross=False):
    """
Least Squares Minimization using Tikhonov regularization, and
robust generalized cross-validation.

Parameters
----------
A : array_like, shape (M,N)
    "Coefficient" matrix.
b : array_like, shape (M,) or (M, K)
    Ordinate or "dependent variable" values. If `b` is
    two-dimensional, the least-squares solution is calculated for
    each of the `K` columns of `b`.
order : int, array_like
    If int, it is the order of Tikhonov regularization.
    If array_like, it will be used as regularization matrix.
alpha : float, optional
    Dampening parameter.
    If omited, it will be calculated from robust generalized
    cross-validation.
cross : bool
    Use cross validation
    """

    A = np.array(A)
    b = np.array(b)
    m,l = A.shape

    if cross:
        out = np.empty((m,l) + b.shape[1:])
        A_ = np.empty((m-1,l))
        b_ = np.empty((m-1,) + b.shape[1:])
        for i in xrange(m):
            A_[:i] = A[:i]
            A_[i:] = A[i+1:]
            b_[:i] = b[:i]
            b_[i:] = b[i+1:]
            out[i] = rlstsq(A_, b_, order, alpha, False)

        return np.median(out, 0)

    if order==0:
        L = np.eye(l)
    elif order==1:
        L = np.zeros((l-1,l))
        L[:,:-1] -= np.eye(l-1)
        L[:,1:] += np.eye(l-1)
    elif order==2:
        L = np.zeros((l-2,l))
        L[:,:-2] += np.eye(l-2)
        L[:,1:-1] -= 2*np.eye(l-2)
        L[:,2:] += np.eye(l-2)
    elif order is None:
        L = np.zeros(1)
    else:
        L = np.array(order)
        assert L.shape[-1]==l or L.shape in ((), (1,))

    if alpha is None and not (order is None):

        gamma = 0.1

        def rgcv_error(alpha):
            A_ = np.dot(A.T,A)+alpha**2*(np.dot(L.T,L))
            A_ = np.dot(la.inv(A_), A.T)
            x = np.dot(A_, b)
            res2 = np.sum((np.dot(A,x)-b)**2)
            K = np.dot(A, A_)
            V = m*res2/np.trace(np.eye(m)-K)**2
            mu2 = np.trace(np.dot(K,K))/m

            return (gamma + (1-gamma)*mu2)*V

        alpha = np.abs(fmin(rgcv_error, .1, disp=0)[0])
        alpha = max(alpha, 1.e-5)

    out = la.inv(np.dot(A.T,A) + alpha*np.dot(L.T, L))
    out = np.dot(out, np.dot(A.T, b))
    return out


def fitter_lr(P, x, u, rule="LS", retall=False, **kws):
    """
Fit a polynomial chaos expansion using linear regression.

Parameters
----------
P : Poly
    Polynomial chaos expansion with `P.shape=(M,)` and `P.dim=D`.
x : array_like
    Collocation nodes with `x.shape=(D,K)`.
u : array_like
    Model evaluations with `len(u)=K`.
retall : int, bool
    0 : return R
    1 : return R, uhat
    2 : return R, uhat, res, rank, sing
rule : str
    Regression method used.

    The follwong methods uses scikits-learn as backend.
    See `sklearn.linear_model` for more details.

    Key     Scikit-learn    Description
    ---     ------------    -----------
    "BARD"  ARDRegression   Bayesian ARD Regression
    "BR"    BayesianRidge   Bayesian Ridge Regression
    "EN"    ElastiNet       Elastic Net
    "ENC"   ElasticNetCV    EN w/Cross Validation
    "LAR"   Lars            Least Angle Regression
    "LARC"  LarsCV          LAR w/Cross Validation
    "LAS"   Lasso           Least Absolute Shrinkage and
                            Selection Operator
    "LASC"  LassoCV         LAS w/Cross Validation
    "LL"    LassoLars       Lasso and Lars model
    "LLC"   LassoLarsCV     LL w/Cross Validation
    "LLIC"  LassoLarsIC     LL w/AIC or BIC
    "OMP"   OrthogonalMatchingPursuit

    Local methods

    Key     Description
    ---     -----------
    "LS"    Ordenary Least Squares
    "T"     Ridge Regression/Tikhonov Regularization
            kws : optional
                order : (0,1,2) order of regularization
                alpha : (None) Dampning parameter. Automatically
                        calculated if omited.
    "TC"    T w/Cross Validation
            kws : optional
                order : (0,1,2) order of regularization
                alpha : (None) Dampning parameter. Automatically
                        calculated if omited.

**kws : optional
    Keywords passed to scikits-model, if used.
    See sklearn.linear_model for local documentation


Returns
-------
R[, uhat]

R : Poly
    Fitted polynomial with `R.shape=u.shape[1:]` and `R.dim=D`.

Examples
--------
>>> P = pc.Poly([1, x, y])
>>> x = [[-1,-1,1,1], [-1,1,-1,1]]
>>> u = [0,1,1,2]
>>> print fitter_lr(P, x, u)
0.5q1+0.5q0+1.0

    """

    x = np.array(x)
    u = np.array(u)

    if P.dim==1:
        Q = P(x).T
    else:
        Q = P(*x).T
    shape = u.shape[1:]
    u = u.reshape(u.shape[0], np.prod(u.shape[1:]))


    rule = rule.upper()
    # Scikit-learn wrapper
    if rule=="BARD":
        solver = lm.ARDRegression(fit_intercept=False,
                copy_X=False, **kws)

    elif rule=="BR":
        kws["fit_intercept"] = kws.get("fit_intercept", False)
        solver = lm.BayesianRidge(**kws)

    elif rule=="EN":
        kws["fit_intercept"] = kws.get("fit_intercept", False)
        solver = lm.ElasticNet(**kws)

    elif rule=="ENC":
        kws["fit_intercept"] = kws.get("fit_intercept", False)
        solver = lm.ElasticNetCV(**kws)

    elif rule=="LAR":
        kws["fit_intercept"] = kws.get("fit_intercept", False)
        solver = lm.Lars(**kws)

    elif rule=="LARC":
        kws["fit_intercept"] = kws.get("fit_intercept", False)
        solver = lm.LarsCV(**kws)

    elif rule=="LAS":
        kws["fit_intercept"] = kws.get("fit_intercept", False)
        solver = lm.Lasso(**kws)

    elif rule=="LASC":
        kws["fit_intercept"] = kws.get("fit_intercept", False)
        solver = lm.LassoCV(**kws)

    elif rule=="LL":
        kws["fit_intercept"] = kws.get("fit_intercept", False)
        solver = lm.LassoLars(**kws)

    elif rule=="LLC":
        kws["fit_intercept"] = kws.get("fit_intercept", False)
        solver = lm.LassoLarsCV(**kws)

    elif rule=="LLIC":
        kws["fit_intercept"] = kws.get("fit_intercept", False)
        solver = lm.LassoLarsIC(**kws)

    elif rule=="OMP":
        kws["fit_intercept"] = kws.get("fit_intercept", False)
        solver = lm.OrthogonalMatchingPursuit(**kws)

    # Local rules
    if rule=="LS":
        uhat = la.lstsq(Q, u)[0]

    elif rule=="T":
        uhat = rlstsq(Q, u, kws.get("order",0),
                kws.get("alpha", None), False)

    elif rule=="TC":
        uhat = rlstsq(Q, u, kws.get("order",0),
                kws.get("alpha", None), True)

    else:
        uhat = [solver.fit(Q, _).coef_ for _ in u.T]
        uhat = np.array(uhat).T

    u = u.reshape(u.shape[0], *shape)

    R = po.sum((P*uhat.T), -1)
    R = po.reshape(R, shape)

    if retall==1:
        return R, uhat
    elif retall==2:
        return R, uhat, Q
    return R


if __name__=="__main__":
    import numpy as np
    import __init__ as pc
    import doctest
    x, y = pc.variable(2)

    doctest.testmod()

