import sys
sys.path.append("../reading_data")


import logging
from logging import debug
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)




import time
import datetime as dt
import re

from collections import defaultdict

import numpy as np

import matplotlib.pyplot as plt
from matplotlib.dates import date2num
from matplotlib.dates import num2date
from metrics import pls
from numpy.lib import recfunctions


from read_from_h5 import load_data


def date_idx(data, start_date, stop_date):
    return np.where([tid <= stop_date and (tid > start_date) \
            for tid in data['time']])


def get_specific_data(data, parameters):
    """Simple function to construct array of selected parameters
    """
    for i, para in enumerate(parameters):
        if i == 0:
            ret_data = np.array([data[para]]).T
        else:
            ret_data = np.hstack((ret_data, np.array([data[para]]).T))
    return ret_data


def get_clean_array(data, sel_tbs=None):
    all_tbs = data.dtype.names
    if not sel_tbs:
        sel_tbs = all_tbs
    arr_data = np.array([data[sel_tbs[0]]]).T
    for tb in sel_tbs[1:]:
        arr_data = np.append(arr_data, np.array([data[tb]]).T, 1)

    return arr_data


def remove_nans(arr):
    no_nan_arr = np.copy(arr[~np.isnan(arr).any(1)])
    return no_nan_arr


def generate_residual(data_sets, pls_outs, turbines,
        para_set, start_res_day, num_of_res_days=dt.timedelta(days=15)):
    x_parameters = para_set['x_para']
    y_parameters = para_set['y_para']
    stop_res_day = start_res_day + num_of_res_days
    res_dict = {}
    for i, turbine in enumerate(turbines):
        res_dict2 = {}
        for j, data in enumerate(data_sets):
            res_data = data[date_idx(data, date2num(start_res_day), \
                    date2num(stop_res_day))]
            xres = get_specific_data(res_data, x_parameters)
            yres = get_specific_data(res_data, y_parameters)
            ypred = predict_pls(xres,
                    pls_outs[i]['b'], pls_outs[i]['xmean'],
                    pls_outs[i]['ymean'])
            res = yres - ypred
            if len(res) != 0:
                res_dict2[turbines[j] + '-Residual'] = remove_nans(res)
                res_dict2[turbines[j] + '-time'] = \
                        np.copy(res_data['time'][~np.isnan(res).any(1)])
                res_dict2[turbines[j] + '-Yreal'] = remove_nans(yres)
                res_dict2[turbines[j] + '-Ypred'] = remove_nans(ypred)
            else:
                res_dict2[turbines[j] + '-Residual'] = res
                res_dict2[turbines[j] + '-time'] = res_data['time']
                res_dict2[turbines[j] + '-Yreal'] = yres
                res_dict2[turbines[j] + '-Ypred'] = ypred


        res_dict[turbine] = res_dict2
    return res_dict


def residual_evaluation(pls_outs, res_dict, turbines, threshold):
    """
    for pls_out in pls_outs:
        pls_out['vari'] = np.var(pls_out['predictions'] - pls_out['ymean'])
    """

    for i, turbine in enumerate(turbines):
        res = res_dict[turbine]
        #vari = pls_outs[i]['vari']
        res1 = res[turbine + '-Residual']
        mean = np.mean(res1)
        var = np.var(res1)
        std = np.std(res1)
        my0 = mean
        my1 = my0 + std * 6
        for turb in turbines:
            #print turb,
            z = res[turb + '-Residual']
            #print np.mean(z),
            #print np.var(z)
            cusum = (my1 - my0) / (var) * (z - (my0 + my1) / (2))
            res[turb + '-Cusum'] = cusum
            cumsum = np.cumsum(cusum)
            lastval = 0
            mindst = []

            for val in cumsum:
                mindst.append(min(val, lastval))
                lastval = mindst[-1]
            gumsum = cumsum - mindst
            res[turb + '-Gusum'] = gumsum

            bin_gu = []
            for gu in gumsum:
                if gu >= threshold:
                    bin_gu.append(1)
                else:
                    bin_gu.append(0)
            res[turb + '-Faultbin'] = bin_gu
    return res_dict


def residual_voting(res_dict, turbines, turbines_voting):
    """ Combining the votes from the voting turbines
    to have the total number of votes
    """
    voting_dict = {}
    for turbine in turbines:
        for i, turb in enumerate(turbines_voting):
            if i == 0:
                vote = res_dict[turb][turbine + '-Faultbin']
                tid = res_dict[turb][turbine + '-time']
            else:
                vote = np.add(vote, res_dict[turb][turbine + '-Faultbin'])
        voting_dict[turbine + '-Vote'] = vote
        voting_dict[turbine + '-time'] = tid
        if len(vote) > 0:
            if max(vote) > 0.5 * len(turbines_voting):
                voting_dict[turbine + '-Faulty'] = 1
            else:
                voting_dict[turbine + '-Faulty'] = 0
        else:
            voting_dict[turbine + '-Faulty'] = 0
    return voting_dict


def get_red_color(i):
    from matplotlib.colors import rgb2hex
    color_string = "255, 245, 240;\
            254, 224, 210;\
            252, 187, 161;\
            252, 146, 114;\
            251, 106, 74;\
            239, 59, 44;\
            203, 24, 29;\
            165, 15, 21;\
            103, 0, 13;"
    colors = color_string.split(";")[0:-1]
    if i >= len(colors):
        i = len(colors) - 1
    rgb_color = [float(color) / 255 for color in colors[i].split(",")]
    return rgb2hex(rgb_color)


def evaluate_votes(voting_dicts):
    combined_voting_dict = defaultdict(lambda: defaultdict(list))
    for tid, voting_dict in sorted(voting_dicts.iteritems()):
        for key, val in sorted(voting_dict.iteritems()):
            turbine = key.split('-')[0]
            meas_type = key.split('-')[1]
            if meas_type == "Faulty":
                combined_voting_dict[turbine][meas_type].extend(\
                    [val for i in combined_voting_dict[turbine]["Vote"]])
                continue
            combined_voting_dict[turbine][meas_type].extend(val)

    #num_of_turbines = len(combined_voting_dict)
    turbines = []
    fig, axs = plt.subplots(1, 1)
    for turbine, vote_dict in sorted(combined_voting_dict.iteritems()):
        turbines.append(turbine)
        old_time = vote_dict["time"][0]
        next_time = date2num(num2date(old_time) + dt.timedelta(days=5))
        for i, tid in enumerate(vote_dict["time"]):
            if tid >= next_time:
                next_time = date2num(num2date(tid) + dt.timedelta(days=5))
                vote = vote_dict["Vote"][i]
                color = get_red_color(vote)
                axs.plot(num2date(tid),
                        len(turbines) - 1,
                        color=color,
                        ms=12,
                        marker='o')
            else:
                pass

    axs.set_yticks(range(len(turbines)))
    axs.set_yticklabels(range(1, len(turbines) + 1))
    plt.ylabel('Turbine')
    axs.set_ymargin(0.5)
    axs.set_ylim((-0.5, len(turbines) - 0.5))
    axs.spines['top'].set_visible(False)
    axs.spines['right'].set_visible(False)
    axs.get_xaxis().tick_bottom()
    axs.get_yaxis().tick_left()
    fig.autofmt_xdate()
    return


def pls_set_up(data, pls_order, para_set,
        start_day=dt.datetime(2011, 02, 01), turbine=None, num_of_model_samples=1500):
    fake_start_day = start_day + dt.timedelta(days=1)
    num_of_steps = 0
    num_of_iter = 0
    x_parameters = para_set['x_para']
    y_parameters = para_set['y_para']
    # This keeps going back in time until a period of enough data is fund
    while 1:
        mod_day = fake_start_day - dt.timedelta(days=1)
        #model_days = [mod_day, mod_day + dt.timedelta(days=10),
        #mod_day + dt.timedelta(days=10), mod_day + dt.timedelta(days=15)]
        model_days = [mod_day + dt.timedelta(days=5),   # Start model
                mod_day + dt.timedelta(days=15),        # Stop model
                mod_day,                                # Start xval model
                mod_day + dt.timedelta(days=5),         # Stop xval model
                mod_day + dt.timedelta(days=15),        # Start predicting
                mod_day + dt.timedelta(days=25)]        # Stop predicting

        xdat = data[date_idx(data,
            date2num(model_days[0]),
            date2num(model_days[1]))]
        xvaldat = data[date_idx(data,
            date2num(model_days[2]),
            date2num(model_days[3]))]
        xpreddat = data[date_idx(data,
            date2num(model_days[4]),
            date2num(model_days[5]))]

        model_data = [xdat, xvaldat, xpreddat]

        # Find the x and y data to use for the PLS
        xydata = get_specific_data(model_data[0],
                x_parameters + y_parameters)
        xyval = get_specific_data(model_data[1],
                x_parameters + y_parameters)
        xypred = get_specific_data(model_data[2],
                x_parameters + y_parameters)
        num_of_steps += 1
        if len(xydata) >= 200 and len(xyval) >= 100 and len(xypred) >= 1:
            xydata = remove_nans(xydata)
            xyval = remove_nans(xyval)
            xypred = remove_nans(xypred)
            if len(xydata) >= 200 and len(xyval) >= 100:
                xdata = xydata[-num_of_model_samples:, 0:-len(y_parameters)]
                ydata = xydata[-num_of_model_samples:, -len(y_parameters):]
                xval = xyval[:, 0:-len(y_parameters)]
                yval = xyval[:, -len(y_parameters):]
                xpred = xypred[:, 0:-len(y_parameters)]
                ypred = xypred[:, -len(y_parameters):]
                if num_of_iter >= 2:
                    print turbine,
                    print " find good data in:",
                    print num_of_iter
                break
        num_of_iter += 1
        fake_start_day = mod_day
    mskdata = np.zeros((len(xdata), 1))
    mskval = np.ones((len(xval), 1))
    mskcross = 2*np.ones((len(xpred), 1))
    msk = np.vstack((mskdata, mskval, mskcross))
    xmodel = np.vstack((xdata, xval, xpred))
    ymodel = np.vstack((ydata, yval, ypred))
    pls_out = pls(xmodel, ymodel, msk, pls_order)
    pls_out['xmean'] = np.mean(xmodel, axis=0)
    pls_out['ymean'] = np.mean(ymodel, axis=0)
    return pls_out


def predict_pls(x, b, x_mean, y_mean):
    return np.dot(x, b.T) + y_mean - np.dot(x_mean, b.T)


def get_temp_set(key_list, y_parameter='generatorbear2tempavg'):
    """ Construct different combination of parameters
    to be used for fault detection
    """
    temp_keys = []
    for key in sorted(key_list):
        match = re.search(r"(.*temp)?", key)
        if match:
            if match.group(1):
                temp_keys.append(key)
    para_sets = []
    para_set = {}
    para_set['y_para'] = [y_parameter, ]
    spam = []
    for temp_key in temp_keys:
        if temp_key != para_set['y_para'][0]:
            spam.append(temp_key)

    para_set['x_para'] = spam
    para_sets.append(para_set)
    return para_sets, temp_keys


def get_data_from_turbines(turbines):
    filt_strings = ["(activepoweravg > 100)",
                    "(activepoweravg < 2200)",
                    "(time >= " + \
                    str(date2num(dt.datetime(2009, 01, 01))) + ")",
                    "(time <= " + \
                    str(date2num(dt.datetime(2011, 04, 01))) + ")"]
    h5_file = "../../bindata/wh1.h5"
    turb_string = []
    for turb in turbines:
        turb_string.append("(turbid == '" + turb + "')")
    or_string = "( " + " | ".join(turb_string) + " )"
    print or_string
    filt_strings.append(or_string)
    data = load_data(h5_file, filt=filt_strings)
    return data


def plot_sap_data(turbines):
    import sap_data
    filepath = ['/home/andbo/work/data_software/code/gui/sap-data-adjustment-rdspp-step-3.csv']

    clean_data, data_dict = sap_data.get_data_from_csv(filepath)
    start_date = dt.datetime(2009, 10, 1)
    stop_date = dt.datetime(2010, 6, 1)
    time_range = (start_date, stop_date)
    data = sap_data.find_specific_data(data_dict, turbines, time_range)
    turbines = [turb.lower() for turb in turbines]
    for i, turb in enumerate(turbines):
        for j, tid in enumerate(data["time"]):
            if turb == data["turbine"][j]:
                if (data["rdspp"][j] == "MDK20TL001") or (data["rdspp"][j] == "MDK20"):
                    plt.text(tid, i, "Gear", horizontalalignment='center', rotation=30, verticalalignment='center')
                if (data["rdspp"][j] == "MDK51"):
                    plt.text(tid, i, "Filter", horizontalalignment='center', rotation=30, verticalalignment='center')
                #else:
                #    plt.text(tid, i, "X", horizontalalignment='center', rotation=30, verticalalignment='center')
    return


def find_voting_turbines(voting_dict, turbines_voting):
    voting_turbs = []
    keep_looking = False
    for turb in turbines_voting:
        # if a voter is faulty look aging
        if voting_dict[turb + '-Faulty'] == 2:
            keep_looking = True
        else:
            voting_turbs.append(turb)
    return voting_turbs, keep_looking


def find_model_order():
    start_time = time.time()
    turbines = "WH1227","WH1237","WH1115"
    y_parameter = "gearbeartempavg"
    data = get_data_from_turbines(turbines)
    zero_days = [dt.datetime(2010, 1, 10), dt.datetime(2010, 2, 10), dt.datetime(2010, 3, 10)]
            #dt.datetime(2010,3,1), dt.datetime(2010,4,1),
            #dt.datetime(2010,5,1), dt.datetime(2010,6, 1)]

    data = recfunctions.drop_fields(data, "generatorbeartempavg")
    para_sets, temp_keys = get_temp_set(data.dtype.names, y_parameter)
    data_sets = []
    for turb in turbines:
        data_sets.append(data[np.where(data["turbid"] == turb)])

    fig, axs = plt.subplots(1,1)
    ax = axs

    rms_pts = []
    for k, day_zero in enumerate(zero_days):
        start_day = day_zero
        print start_day
        for para_set in para_sets:
            for j, dat in enumerate(data_sets):
                out = pls_set_up(dat, len(para_set['x_para']), para_set, start_day=day_zero)
                rms_pts.append(out['rmsept'])
    rms_m = np.array(rms_pts[0])
    for rms in rms_pts[1:]:
        rms_m = np.vstack((rms_m, rms))
    print len(para_set['x_para'])
    print rms_m
    ax.plot(range(1, len(para_set['x_para']) + 1), rms_m.mean(axis=0), 'bo')#, label=str(day_zero)[:10])
    #plt.plot(rms_m.mean(axis=0), 'o', label=str(day_zero)[:10])
    ax.set_xlabel('Number of principal components')
    ax.set_ylabel('RMSEP')
    #ax.legend(loc=1, fancybox=True)
    ax.spines['top'].set_visible(False)
    ax.spines['right'].set_visible(False)
    ax.get_xaxis().tick_bottom()
    ax.get_yaxis().tick_left()
    print "Done in:",
    print (time.time() - start_time)
    plt.show()


def find_model_samples():
    start_time = time.time()
    turbines = "WH1227","WH1237","WH1115"
    y_parameter = "gearbeartempavg"
    data = get_data_from_turbines(turbines)
    zero_days = [dt.datetime(2010, 1, 1), dt.datetime(2010, 2, 1), dt.datetime(2010, 3, 1)]

    data = recfunctions.drop_fields(data, "generatorbeartempavg")
    para_sets, temp_keys = get_temp_set(data.dtype.names, y_parameter)
    data_sets = []
    for turb in turbines:
        data_sets.append(data[np.where(data["turbid"] == turb)])

    fig, axs = plt.subplots(1,1)
    ax = axs

    to_plot = []
    model_periods = range(50,2500,50)
    for model_period in model_periods:
        rmsepts = []
        for k, day_zero in enumerate(zero_days):
            start_day = day_zero
            print start_day
            for para_set in para_sets:
                for j, dat in enumerate(data_sets):
                    out = pls_set_up(dat, len(para_set['x_para']), para_set, start_day=day_zero, num_of_model_samples=model_period)
                    rmsepts.append(out['RMSEPT'])
        to_plot.append((model_period, np.mean(rmsepts)))
    print to_plot
    for i, pld in enumerate(to_plot):
        if i == 0:
            ax.plot(pld[0], pld[1], 'bo')
        else:
            ax.plot(pld[0], pld[1], 'bo')
    #plt.plot(rms_m.mean(axis=0), 'o', label=str(day_zero)[:10])
    ax.set_xlabel('Number of samples in model')
    ax.set_ylabel('RMSEP')
    #ax.legend(loc=1, fancybox=True)
    ax.spines['top'].set_visible(False)
    ax.spines['right'].set_visible(False)
    ax.get_xaxis().tick_bottom()
    ax.get_yaxis().tick_left()
    print "Done in:",
    print (time.time() - start_time)
    plt.show()





def fdi_over_the_year():
    start_time = time.time()
    """
    This function should try to run through a specified time period
    and then do FDI
    """
    turbines = "WH1115", "WH1116", "WH1117",\
            "WH1225", "WH1226", "WH1227",\
            "WH1235", "WH1236", "WH1237"
    turbines = "WH1115", "WH1116", "WH1117",\
            "WH1225", "WH1226", "WH1227",\
            "WH1235", "WH1236", "WH1237",\
            "WH1345", "WH1346", "WH1347",\
            "WH1355", "WH1356",\
            "WH1465", "WH1466", "WH1467"
    turbines = "WH1101","WH1102","WH1103","WH1104","WH1105","WH1106","WH1107","WH1108",\
               "WH1111",
    """
    turbines = "WH1112", "WH1113", "WH1114",\
            "WH1222", "WH1223", "WH1224",\
            "WH1232", "WH1233", "WH1234"
    """
    """
    turbines = "WH1101", "WH1102", "WH1103",\
            "WH1111", "WH1112", "WH1113",\
            "WH122]", "WH1222", "WH1223"
    """
    #turbines = "WH1236",

    start_days = [dt.datetime(2009, 9, 5) + dt.timedelta(days=10 * i)\
            for i in range(26)]
    start_days = [dt.datetime(2009, 1, 1) + dt.timedelta(days=10 * i)\
            for i in range(60)]
    y_parameter = "gearbeartempavg"
    print start_days[0]
    print start_days[-1]
    data = get_data_from_turbines(turbines)

    data_sets = []
    # Remove empty temperature measurement
    data = recfunctions.drop_fields(data, "generatorbeartempavg")

    para_sets, temp_keys = get_temp_set(data.dtype.names, y_parameter)
    print temp_keys

    for turb in turbines:
        data_sets.append(data[np.where(data["turbid"] == turb)])

    para_set = para_sets[0]
    # start generating models for each period
    pls_dict = defaultdict(list)
    for start_day in start_days:
        for (data, turb) in zip(data_sets, turbines):
            print turb
            pls_dict[start_day].append(pls_set_up(data, 6,
                para_set, start_day, turb))
    print "Start predicting"
    # now there is a PLS model for each period for each turbine
    #for threshold in range(80,120,20):
    threshold = 80
    res_dicts = {}
    voting_dicts = {}
    list_of_looks = []
    for tid, pls_outs in pls_dict.items():
        print tid
        # model is based on the first 15 days thus the offset
        res_day = tid + dt.timedelta(days=15)
        res_dict = generate_residual(data_sets, pls_outs,
                turbines, para_set, res_day)

        residual_evaluation(pls_outs, res_dict, turbines, threshold)
        turbines_voting = turbines
        looking_for_voters = True
        num_of_looks = 0
        while (looking_for_voters):
            num_of_looks += 1
            voting_dict = residual_voting(res_dict, turbines, turbines_voting)
            turbines_voting, looking_for_voters = find_voting_turbines(voting_dict, turbines_voting)
        list_of_looks.append(num_of_looks)
        res_dicts[tid] = res_dict
        voting_dicts[tid] = voting_dict
    evaluate_votes(voting_dicts)
    plot_sap_data(turbines)
    print "Done in:",
    print (time.time() - start_time)
    #plt.title(y_parameter)
    plt.savefig('prediction_' + y_parameter + '.pdf', bbox_inches='tight')
    plt.show()
    return


if __name__ == '__main__':
    #import cProfile
    #cProfile.run('fdi_over_the_year()')
    fdi_over_the_year()
    #find_model_order()
    #find_model_samples()
