from mdp.nodes import PCANode
import numpy as np

def percent_change(data):
    """
    Calculate the percentage change of the column vectors in data.
    data - n x m array
    return - n-1 x m array
    """
    p_change = (data[1:] - data[:-1]) / data[:-1]
    return p_change

def calc_vector_normalize(data):
    """
    Calculate the vector normalization for the column vector data.
    d'[i,:] = d[i,:] / |d[i,:]|
    data - n x m array (m column vectors)
    return - n x m array
    """
    norm = 1.0 / np.sqrt(np.sum(data**2, axis=0))
    normed_data = np.array([data[:,i] * norm[i] for i in range(data.shape[1])]).transpose()
    return normed_data

def calc_std_normal_variate(data):
    """
    Calculate the standard normal variates of the column vectors of data
    d'[i,j] = ( d[i,j] - <d[i,:]> ) / std(d[i,:])
    data - n x m array
    return - n x m array
    """
    averages = np.average(data,axis=0)
    std_dev = np.std(data,axis=0)
    data = (data-averages)/std_dev
    return data

def linear_fit(b):
    """
    Calculate  the linear fit of the columns of b
    b - n x p array
    return - [2xp] array of [slope,intercept]
    """
    pts = b.shape[0]
    A = np.array([np.arange(pts),np.ones(pts)]).transpose()
    x = lstsq(A,b)[0]
    e = np.array(np.matrix(b) - np.matrix(A)*np.matrix(x))
    return [x,e]

def calc_regression(A, b):
    """
    Calculate the linear regression coefficients, x, from
    the linear equations Ax=b.
    A - n x m array (coefficient matrix)
    b - n x p array (fit vector)
    return - list [x - m x p array (fit parameters),
                   e - n x p array (residuals)]
    """
    x = lstsq(A,b)[0]
    e = np.array(np.matrix(b) - np.matrix(A)*np.matrix(x))
    return [x,e]

def PCA(data, N=4):
    """
    Calculate the first four PCAs of the columns of data
    data - [m x n] np array
    N - integer number of eigenvectors
    return list [[nx4],[mx4]] [eigenvalues, eigenvectors]
    """
    pca = PCANode(output_dim=N)
    pca.train(data)
    pca.stop_training()
    eigenvalues = pca.get_projmatrix()
    eigenvectors = pca.execute(data)
    return [eigenvalues,eigenvectors]

def calc_correlation(data1, data2):
    """
    Calculate the correlation of the columns of data.
    Note that the correlation is not normalized (valid at 0 frequency with
    increasing deviation of true correlation with increasing frequency).
    data1 - [m x n] np array
    data2 - [m x n] np array
    return - [m x n] np array of autocorrelation
    """
    corr = np.fft.ifft(np.fft.fft(data1,axis=0)*np.conj(np.fft.fft(data2,axis=0)),axis=0)
    return corr

def calc_cumulative(data):
    """
    Calculate the moving cumulative sum of a column vector from the first
    data point to the moving current data point.  Data is column vectors.
    data - n x m array
    return - n x m array
    """
    data_sum = np.zeros((data.shape),dtype='f8')
    data_sum[0] = data[0]
    for i in range(1,data.shape[0]):
        data_sum[i] = data_sum[i-1] + data[i]
    return data_sum   #* (np.arange(data.shape[0],0,-1,dtype='f8'))**-1

def calc_exp_moving_average(data, window_size, exponent):
    """
    Calculate the moving average of the columns of data using a window size
    of window_size.
    data - n x m array of time series data (e.g. stock price)
    window_size - integer indicating the size of the moving window (days)
    exponent - integer indicating the weighting exponent of the exponential average
    return - n x m array of exponentially averaged data
    """
    e = float(exponent)
    weight = np.exp(e*np.arange(window_size,0,-1))
    ma = np.zeros(data.shape,dtype='f4')
    for i in range(window_size,data.shape[0]):
        for j in range(data.shape[1]):
            ma[i,j] = np.sum(data[i-window_size:i,j]*weight)
    ma = ma / np.sum(weight)
    return ma[window_size:]
