""" Implementation and testing of some osc and osc derived functions."""


from scipy import *
import sys

sys.path.append("/home/flatberg/Repositories/pyblm")
import pyblm
from pyblm import pls, pca, pls_val


def sw_osc(X, y, nc, inc, tol=1e-10, max_iter=100):
    """S. Wolds first implementation.

    Input:
    ======
        X : {ndarray}
        Y : {ndarray}
        nc : {integer}
            Number of osc components for removal
        inc : {integer}
            Number of compoments of `inner PLS`
    """
    y = atleast_2d(y)
    if y.shape[0] == 1:
        y = y.T
    X = X - X.mean(0)
    y = y - y.mean(0)
    #for a in nc:
    u, s, vt = svd(X, 0)
    t0 = (u*s)[:,:1]
    err = 100
    while err > tol or niter < max_iter:
        y2 = dot(y.T, y)
        t = t0 - dot(dot(y.T, t0), y)/y2
        res = pls(X, t, inc)
        wosc = dot(dot(W, linalg.inv(dot(P.T, W))), q)
        t0 = dot(X, wosc)
        err = ((t0 - t)**2).sum()
    p = dot(X.T, t0)/dot(t0.T, t0)
    Xosc = X - dot(t0, p.T)
    return Xosc, t, p

def dorth(X, Y):
    """Direct orthogonalisation."""
    pass
    
def fearn_osc(X, Y, nc_osc):
    X = X - X.mean(0)
    Y = Y - Y.mean(0)
    xy = dot(X.T, Y)

    I = eye(xy.shape[0])
    M = I - dot(xy/dot(xy.T, xy), xy.T)
    p = linalg.svd(dot(X, M), 0)[-1].T
    p = p[:,:nc_osc]
    wosc = p.copy()
    tosc = dot(X, wosc)
    posc = dot(X.T, tosc)/dot(tosc.T, tosc)
    Xosc = X - dot(tosc, posc.T)

    return Xosc, tosc, posc


def mod_osc(X, Y, R, nc, nci, center=[0, 0]):
    """ A modified osc to remove Y-orthogonal (Co-)variance
    consistent across replicates.

    Parameters:
    ===========

        Input:
            X : {ndarray}
                data matrix
            Y : {ndarray}
                Predictors
            R : {ndarray}
                Replicate dummy matrix
            nc : {integer}
                Number of osc components to remove
            nci : {integer}
                Number of `internal` pls components used
                to predict replicate structure.

        Output:
            Xosc : {ndarray}
                Transformed data matrix
            Wosc : {ndarray}
                osc components used in transform
            Tosc : {ndarray}
                sample osc components used in transform

    Notes:
    ======

        This algorithm is based on the osc algorithm of T. Fearn.
        However, an adjustemnt is made to ensure that the Y-orthogonal
        information has maximum covariance with replicate structure.

        Problem definition:
        Find w such that: $\argmax{w} w^TX^T_jR_jR^T_jX_jw$, under the
        constraints of $w^Tw=1$ and $wX^T_jY=0$

        Motivation:
            Because gene expression data oftain contain major sources
            of variation that are not related to the phenomen under
            investigation, it may be beneficial to apply a pre-treatment to
            the data before embarking on PLS regression. In cases where
            the design includes multiple (biological) replicates, this
            unwanted sources of variation may be reliably removed if
            we meet the following two criteria:

            * The extracted information must be orthogonal to Y
            (No predicitive value)

            * The extracted information must be consistant within
            each biological replicate.
            (maximum covariance with replicate information)

            Alone 
    Reference:
    ==========

        `On orthogonal signal correction`, T. Fearn, Chemometrics and
        Intelligent Laboratory Systems, 50, 2000, 47-52
    
    """
    assert(nc <= nci)
    compress = False
    m, n = X.shape
    k, o = Y.shape
    if n > 1000:
        compress = True
    if center[0] == 0:
        mnx = X.mean(0)
        X = X - mnx
    if center[1] == 0:
        Y = Y - Y.mean(0)
    if compress:
        u, s , vt = linalg.svd(X, 0)
        Xold = X.copy()
        X = u*s
        decompress = vt
    xy = dot(X.T, Y)
    I = eye(xy.shape[0])
    # Z: part of X orthogonal to Y
    if o == 1:
        inv_yxxy = 1./dot(xy.T, xy)
        M = I - inv_yxxy*dot(xy, xy.T)
    else:
        inv_yxxy = linalg.inv(dot(xy.T, xy))
        M = I - dot(dot(xy, inv_yxxy), xy.T)
    Z = dot(X, M)
    if compress:
        Z = dot(Z, decompress)
        del X
        X = Xold
    
    ## crossvalidate pls to find aopt
    # rmsep, yhat, error = pyblm.crossvalidation.pls_val(Z, R, 10)
    # aopt = rmsep.mean(0).argmin()
    # print "Aopt: %d" % aopt

    # build a pls model to capture replicate information
    if R != None:
        if nci == 'auto':
            rmsep, yhat, error = pls_val(Z, R, 17, nsets=8)
            nci = rmsep.mean(0).argmin()
            print "Estimated aopt : %d " % aopt_est
        res = pls(Z, R, nci)
        Wosc = res['W'][:,:nc]
    else:
        # do pca on Y-orth var/covariance
        res = pca(Z, nci)
        Wosc = res['P']
    Tosc = dot(X, Wosc)
    Posc = dot(dot(X.T, Tosc), linalg.pinv(dot(Tosc.T, Tosc)))
    Xosc = X - dot(Tosc, Posc.T)
    if center[0] == 0:
        Xosc = Xosc + mnx

    return Xosc, Wosc, Tosc, Posc


def test_osc(X, Y, n):
    X, Y = _create_testdata('osc', type=1)
    

def test_mod_odc(X, Y, R, n, ni):
    pass


def _create_testdata(datatype='1', Y=None, R=None):
    """
    Test data :

    large/small variance
    
    orthogonal/correlated to Y
    orthogonal/correlated to R
    overlapping/non-overlapping
    
    
    1.) 8 gaussians (non overlapping)
        a.) Correlated to Y, small variance
        b.) Correlated to Y, large variance
        c.) Orthogonal to Y, small variance
        d.) Orthogonal to Y, large variance

    2.) 
    
    """
    if datatype == '1':
        
        x = zeros((1000,))
        centers = [100, 200, 300, 400, 500, 600, 700, 800]
        width = 50
        heights = [1, 1, 1, 1, 1, 1, 1, 1]
        
    pass

def _gaussian_peak(center=50, rel_std=0.5, width=20, height=1, n=100,
                   sym=False, noise='normal', noise_weight=0.1, **kwd):
    # signal
    x = zeros((n,))
    x[center:center+10] = height
    m = signal.gaussian(width, 0.1*rel_std*width, sym=sym)
    x = signal.convolve(x, m, mode='same')
    # noise
    if noise == 'normal':
        x += noise_weight*randn(x.shape[0])
    elif noise == 'normal_hitch':
        err = noise_weight*randn(x.shape[0])
        err = signal.convolve(err, m, mode='same')
        x += err
    elif noise == 'unif':
        pass
    elif noise == 'experiment':
        pass
        
    elif (not noise) or noise in ['None', 'none', 'False', '0']:
        pass
    
    return x + noise

def _noise_from_data(X, Y=None):
    has_several_replicates = False
    if Y != None:
        has_several_replicates = True
        # repplicate info if needed
        assert(X.shape[0] == Y.shape[0])
        rep_means = []
        rep_stds = []

    if has_several_replicates:
        Y = asarray(Y).ravel()
        # Y is a replicate number array-like argument
        reps = unique(Y)
        for r in reps:
            xr = X[Y==r,:]
            rep_means.append(xr.mean(0))
            rep_stds.append(xr.std(0))
    else:
        rep_means.append(X.mean(0))
        rep_stds.append(X.std(0))
    rep_means = asarray(rep_means)
    rep_stds = asarray(rep_stds)
    x = rep_means.mean(0)
    y = rep_stds.mean(0)
    IP = interpolate.interp1d(x, y, kind='cubic')
    return IP
