import sys
sys.path.append("../utils")
config_path = "./config.cfg"

import ConfigParser
import time
import datetime as dt
import operator

import matplotlib.pyplot as plt
import numpy as np

import get_data
import pls_model

from metrics import pls_simple

from metrics import pca_svd as pca
from metrics import pca_nipals


def finding_order(num_of_samples):
    colors = ("#fce94f", "#fcaf3e", "#e9b96e", "#8ae234", "#729fcf", "#ad7fa8", "#ef2929")
    start_time = time.time()
    config = ConfigParser.ConfigParser()
    config.read(config_path)
    turbines = config.get('options', 'turbines').split(',')

    bin_path = config.get("options", "bin_path")
    h5file = config.get("options", "h5file")
    f_h5 = bin_path + h5file
    data_sets = get_data.from_turbines(turbines, f_h5)

    key_list = data_sets[0].dtype.names
    y_para = "GearBearTempAvg"
    para_sets = pls_model.get_temp_set(key_list, y_para)

    zero_days = [dt.datetime(2011,1,1), dt.datetime(2011,2,1),
            dt.datetime(2011,3,1), dt.datetime(2011,4,1),
            dt.datetime(2011,5,1), dt.datetime(2011,6,1)]
    for k, day_zero in enumerate(zero_days):
        print day_zero
        rms_pts = []
        for para_set in para_sets:
            for j, data in enumerate(data_sets):
                out = pls_model.set_up(data, len(para_set['x_para']), para_set,
                        stop_day=day_zero, length_of_model_period=num_of_samples)
                rms_pts.append(out['rmsept'])
        rms_m = np.array(rms_pts[0])
        for rms in rms_pts[1:]:
            rms_m = np.vstack((rms_m, rms))
        plt.plot(range(1, len(para_set['x_para']) + 1), rms_m.mean(axis=0),
                'o', color=colors[k], label=str(day_zero)[:10])
    plt.xlabel('PCs')
    plt.ylabel('RMSEP')
    plt.title('Determining the number of principal components')
    plt.legend(loc=1, fancybox=True)
    print "Done in:",
    print (time.time() - start_time)
    plt.show()
    return

colors = ("#fce94f", "#fcaf3e", "#e9b96e", "#8ae234", "#729fcf", "#ad7fa8", "#ef2929")

def finding_model_samples():
    start_time = time.time()
    config = ConfigParser.ConfigParser()
    config.read(config_path)
    turbines = config.get('options', 'turbines').split(',')
    bin_path = config.get("options", "bin_path")
    h5file = config.get("options", "h5file")
    f_h5 = bin_path + h5file
    data_sets = get_data.from_turbines(turbines, f_h5)

    key_list = data_sets[0].dtype.names
    y_para = "GearBearTempAvg"
    para_sets = pls_model.get_temp_set(key_list, y_para)

    zero_days = [dt.datetime(2011,1,1), dt.datetime(2011,2,1),
            dt.datetime(2011,3,1), dt.datetime(2011,4,1),
            dt.datetime(2011,5,1), dt.datetime(2011,6,1)]
    #zero_days = [dt.datetime(2010,2,1), dt.datetime(2010,3,1),
    #        dt.datetime(2010,4,1), dt.datetime(2010,5,1)]
    para_set = para_sets[0]
    model_periods = range(100,3000,100)
    for k, day_zero in enumerate(zero_days):
        to_plot = []
        print day_zero
        for model_period in model_periods:
            for para_set in para_sets:
                rmsepts = []
                for j, data in enumerate(data_sets):
                    out = pls_model.set_up(data, len(para_set['x_para']), para_set,
                            stop_day=day_zero, length_of_model_period=model_period)
                    rmsepts.append(out['RMSEPT'])
            to_plot.append((model_period, np.mean(rmsepts)))
        for i, pld in enumerate(to_plot):
            if i == 0:
                plt.plot(pld[0], pld[1], 'o', color=colors[k], label=str(day_zero)[:10])
            else:
                plt.plot(pld[0], pld[1], 'o', color=colors[k])

    plt.xlabel('Samples')
    plt.ylabel('RMSEP')
    plt.title('RMSEP depending on sample size with different starting days')
    plt.legend(loc=1, fancybox=True)
    print "Done in:",
    print (time.time() - start_time)
    plt.show()

def finding_prediction_period(num_of_model_samples, pls_order):
    start_time = time.time()
    config = ConfigParser.ConfigParser()
    config.read(config_path)
    turbines = config.get('options', 'turbines').split(',')
    bin_path = config.get("options", "bin_path")
    h5file = config.get("options", "h5file")
    f_h5 = bin_path + h5file
    data_sets = get_data.from_turbines(turbines, f_h5)

    key_list = data_sets[0].dtype.names
    y_para = "GearBearTempAvg"
    para_sets = pls_model.get_temp_set(key_list, y_para)

    pred_range = range(10,100,5)
    zero_days = [dt.datetime(2011,1,1), dt.datetime(2011,2,1),
            dt.datetime(2011,3,1), dt.datetime(2011,4,1),
            dt.datetime(2011,5,1), dt.datetime(2011,6,1)]
    for k, day_zero in enumerate(zero_days):
        to_plot = []
        print day_zero
        for pred in pred_range:
            for para_set in para_sets:
                rmsepts = []
                for data in data_sets:
                    out = pls_model.set_up(data, pls_order, para_set, stop_day=day_zero, length_of_model_period=num_of_model_samples, pred_days=pred)
                    rmsepts.append(out['RMSEPT'])
            to_plot.append((pred, np.mean(rmsepts)))
        for i, pld in enumerate(to_plot):
            if i == 0:
                plt.plot(pld[0], pld[1], 'o', color=colors[k], label=str(day_zero)[:10])
            else:
                plt.plot(pld[0], pld[1], 'o', color=colors[k])
    plt.xlabel('Prediction length [days]')
    plt.ylabel('RMSEP')
    plt.title('RMSEP depending on the prediction length')
    plt.legend(loc=1, fancybox=True)
    print "Done in:",
    print (time.time() - start_time)
    plt.show()
    return

def reduce_model_order():
    start_time = time.time()
    config = ConfigParser.ConfigParser()
    config.read(config_path)
    turbines = config.get('options', 'turbines').split(',')
    bin_path = config.get("options", "bin_path")
    h5file = config.get("options", "h5file")
    f_h5 = bin_path + h5file
    data_sets = get_data.from_turbines(turbines, f_h5)

    key_list = data_sets[0].dtype.names
    print key_list
    print len(key_list)
    return
    y_para = "GearBearTempAvg"
    para_sets = pls_model.get_temp_set(key_list, y_para)
    x_para = para_sets[0]['x_para']
    pred = 2
    num_of_model_samples = 3000
    zero_days = [dt.datetime(2011,1,1), dt.datetime(2010,2,1),
            dt.datetime(2010,3,1), dt.datetime(2010,4,1),
            dt.datetime(2010,5,1), dt.datetime(2010,6,1)]

    zero_days = [dt.datetime(2011,1,1), dt.datetime(2010,2,1),
            dt.datetime(2010,3,1), dt.datetime(2010,4,1),
            dt.datetime(2010,5,1), dt.datetime(2010,6,1)]

    pls_order = len(para_sets[0]['x_para'])
    data = data_sets[0]
    para_set = para_sets[0]
# Initialization of RPLS
    out = pls_model.set_up(data, pls_order, para_set, stop_day=zero_days[-1], length_of_model_period=num_of_model_samples, pred_days=pred)
# New data set available
    b =  out['b']
    print "b0: ",
    print b
    means = out['xmean'], out['ymean']
# New model structure to do RPLS
    over_filter = ["(ActivePowerAvg >= 200)", "(ActivePowerAvg < 2200)"]
    start_day = dt.datetime(2011,1,1)
    stop_day = dt.datetime(2011,6,1)
    time_range = ["(Time >= " + str(date2num(start_day)) + ")", "(Time < " + str(date2num(stop_day)) + ")"]
    # Block size in samples
    block_size = 500
    filt = over_filter + time_range
    dat = get_data.filter_h5data(data, filt)
    num_of_blocks = len(dat)/block_size
    low_idxs = range(0, block_size * (num_of_blocks), block_size)
    upper_idxs = [low_idx + block_size for low_idx in low_idxs]
    #print upper_idxs
    print len(dat)
    x_para = para_sets[0]['x_para']
    y_para = para_sets[0]['y_para']
    print x_para
    print y_para
    pls_order = 4
    # DO PLS on full set to see if it gives the same result
    from sklearn.preprocessing import scale
    samples = dat[low_idxs[0]:upper_idxs[-1]]
    print len(samples)
    X0 = util.get_specific_data(samples, x_para)
    Y0 = util.get_specific_data(samples, y_para)
    X0_m = np.mean(X0)
    Y0_m = np.mean(Y0)
    #X0 = X0 - X0_m
    #Y0 = Y0 - Y0_m
    X0 = scale(X0)
    Y0 = scale(Y0)
    out = pls_simple(X0, Y0, pls_order)
    b_all = out['b']
    # STEP 1
    samples = dat[low_idxs[0]:upper_idxs[0]] 
    X0 = util.get_specific_data(samples, x_para)
    Y0 = util.get_specific_data(samples, y_para)
    X0_m = np.mean(X0)
    Y0_m = np.mean(Y0)
    #X0 = X0 - X0_m
    #Y0 = Y0 - Y0_m
    X0 = scale(X0)
    Y0 = scale(Y0)
    out = pls_simple(X0, Y0, pls_order)
    P0 = out['P']
    b0 = out['b']
    B0 = np.diag(b0[0])
    Q0 = out['Q']

    B = B0
    P = P0
    Q = Q0

    for low, upper in zip(low_idxs[1:], upper_idxs[1:]):
        samples = dat[low:upper]
        X1 = util.get_specific_data(samples, x_para)
        Y1 = util.get_specific_data(samples, y_para)
        # Scale it as in step 1
        #X1 = X1 - X0_m
        #Y1 = Y1 - Y0_m
        X1 = scale(X1)
        Y1 = scale(Y1)
        # Construct new X and Y
        X = np.concatenate((P, X1))
        Y = np.concatenate((np.dot(B, Q), Y1))
        #X_m = np.mean(X)
        #Y_m = np.mean(Y)
        #X = X - X_m
        #Y = Y - Y_m
        out = pls_simple(X, Y, pls_order)

        P0 = out['P']
        b0 = out['b']
        B0 = np.diag(b0[0])
        Q0 = out['Q']

        B = B0
        P = P0
        Q = Q0
        print b0
    print "full"
    print b_all
    return
    for days in zero_days[1:]:
        P = out['P']
        Q = out['Q']
        b = out['b'][0]
        B = np.diag(b)
        P1 = out1['P']
        Q1 = out1['Q']
        b1 = out1['b'][0]
        B1 = np.diag(b1)
        Y0 = np.dot(B, Q)
        Y1 = np.dot(B1, Q1)
        Y = np.concatenate((Y0, Y1))
        X = np.concatenate((P.T, P1.T))
        out2 = pls_simple(X, Y, pls_order)
        b1 = out1['b']
        b2 = out2['b']
        print "b1: ",
        print b1
        print "b2: ",
        print b2
        out = out1
        out1 = out2
    print para_set['x_para']
    #out1 = pls_model.set_up2(data, pls_order, para_set, means, stop_day=days, length_of_model_period=num_of_model_samples, pred_days=pred)
    return
    for day_zero in zero_days[0:1]:
        out = pls_model.set_up(data, 7, para_set, stop_day=day_zero, length_of_model_period=num_of_model_samples, pred_days=pred)
        #print abs(out['b'])
        #print min(abs(out['b'][0]))
        min_index, min_value = min(enumerate(abs(out['b'][0])), key=operator.itemgetter(1))
        #print min_index
        #print min_value
        #tot_var = sum(out['varx'])
        #plt.plot(np.cumsum(out['varx'] / tot_var * 100), 'o')
        #plt.plot(out['varx'], 'o')
    #plt.show()
    del(para_sets[0]['x_para'][min_index])
    #print para_sets[0]['x_para']
    print "Done in:",
    print (time.time() - start_time)
    return

from matplotlib.dates import date2num
import utilsabbo as util

def princomp(A, numpc=0):
    # computing eigenvalues and eigenvectors of covariance matrix
    mean = np.mean(A.T, axis=1)
    M = (A - mean).T # subtract the mean (along columns)
    [latent, coeff] = np.linalg.eig(np.cov(M))
    p = np.size(coeff, axis=1)
    idx = np.argsort(latent) # sorting the eigenvalues
    idx = idx[::-1]       # in ascending order
    # sorting eigenvectors according to the sorted eigenvalues
    coeff = coeff[:, idx]
    latent = latent[idx] # sorting eigenvalues
    if numpc < p or numpc >= 0:
        coeff = coeff[:, range(numpc)] # cutting some PCs
    score = np.dot(coeff.T, M) # projection of the data in the new space
    return coeff, score, latent, mean


def find_num_of_pca():
    start_time = time.time()
    config = ConfigParser.ConfigParser()
    config.read(config_path)
    turbines = config.get('options', 'turbines').split(',')
    bin_path = config.get("options", "bin_path")
    h5file = config.get("options", "h5file")
    f_h5 = bin_path + h5file
    data_sets = get_data.from_turbines(turbines, f_h5)
    zero_days = [dt.datetime(2011,1,1), dt.datetime(2010,2,1),
            dt.datetime(2010,3,1), dt.datetime(2010,4,1),
            dt.datetime(2010,5,1), dt.datetime(2010,6,1)]
    stop_day = zero_days[0]
    data_tab = data_sets[0]
    over_filter = ["(ActivePowerAvg >= 200)", "(ActivePowerAvg < 2200)"]
    filt = over_filter + ["(Time < " + str(date2num(stop_day)) + ")"]
    xdat = get_data.filter_h5data(data_tab, filt, startnum=0, steplen=2)
    key_list = data_sets[0].dtype.names
    temp_keys = pls_model.get_temp_keys(key_list)
    xdata = util.get_specific_data(xdat, temp_keys)
    num_of_model_samples = 500
    xdat = xdata[-num_of_model_samples:]
    #tt, pp, pr, eigs = pca(xdat)
    dist = []
    full_pc = np.size(xdat,axis=1)
    for numpc in range(0, full_pc + 1, 1):
        coeff, score, latent, mean = princomp(xdat, numpc)
        Ar = np.dot(coeff, score).T + np.mean(xdat, axis=0)
        dist.append(np.linalg.norm(xdat - Ar, 'fro'))
    perc = np.cumsum(latent) / sum(latent)
    print len(dist)
    scale = 1 / (float(num_of_model_samples * 20))
    dist = [di*scale for di in dist]
    print scale
    #dist
    #dist = dist / max(dist)
    plt.plot(range(len(perc)), perc, 'o', label="Sum eigenvalues")
    plt.plot(range(0,full_pc + 1, 1), dist,'ro', label="PRESS")
    plt.xlabel("Number of eigenvalues")
    plt.axis([0,len(perc),0,1.1])
    plt.legend()
    print "Done in:",
    print (time.time() - start_time)
    plt.show()
    return

def run_pca():
    start_time = time.time()
    config = ConfigParser.ConfigParser()
    config.read(config_path)
    turbines = config.get('options', 'turbines').split(',')
    bin_path = config.get("options", "bin_path")
    h5file = config.get("options", "h5file")
    f_h5 = bin_path + h5file
    data_sets = get_data.from_turbines(turbines, f_h5)
    zero_days = [dt.datetime(2011,1,1), dt.datetime(2011,2,1)]
    zero_days = [dt.datetime(2010,4,1), dt.datetime(2010,3,1), dt.datetime(2010,4,1), dt.datetime(2010,5,1), dt.datetime(2010,6,1), dt.datetime(2010,7,1)]
    stop_day = zero_days[0]
    data_tab = data_sets[0]
    num_of_model_samples = 500
    print stop_day
    over_filter = ["(ActivePowerAvg >= 1800)", "(ActivePowerAvg < 2200)"]
    for j, data_tab in enumerate(data_sets[0:6]):
        filt = over_filter + ["(Time < " + str(date2num(stop_day)) + ")"]
        xdat = get_data.filter_h5data(data_tab, filt, startnum=0, steplen=1)
        key_list = data_sets[0].dtype.names
        temp_keys = pls_model.get_temp_keys(key_list)
        temp_keys = tuple(['GearOilTempAvg', 'GeneratorBearTempAvg', 'HydraulicOilTempAvg', 'NacelleTempAvg'])
        xdata = util.get_specific_data(xdat, temp_keys)
        xdat = xdata[-num_of_model_samples:]
        coeff, score, latent, mean = princomp(xdat, 4)
        #plt.plot(score[0], score[1], 'o', label=turbines[i])
        # Start predicting the future to see if it fits the other
        f, axes = plt.subplots(1, sharex=True, sharey=True)
        ax = axes
        ax.set_title(turbines[j])
        for i, data_tab in enumerate(data_sets[0:6]):
            filt = over_filter + ["(Time < " + str(date2num(stop_day)) + ")"]
            xdat = get_data.filter_h5data(data_tab, filt, startnum=0, steplen=1)
            A = util.get_specific_data(xdat, temp_keys)
            A = A[-num_of_model_samples:]
            M = (A - mean).T
            pred = np.dot(coeff.T, M)
            ax.plot(pred[0], pred[1], 'o', label=turbines[i])
        ax.legend()
    plt.show()
    return

"""
    Ar = np.dot(coeff, score).T + np.mean(xdat, axis=0)
    M = (A - mean).T 
    score = np.dot(coeff.T, M) # projection of the data in the new space
"""

def main():
    #finding_model_samples()
    #finding_order(1500)
    #finding_prediction_period(1500, 4)
    reduce_model_order()
    #find_num_of_pca()
    #run_pca()
    #test()
    return

if __name__=='__main__':
    main()
