'''
Created on Aug 29, 2012

@author: Andre Dozier
'''

import numpy as np
import datapy as d
import matplotlib.pyplot as plt
import random
import string 
import math

def get_MetricValue(perf, metric):
    getmetric = getattr(Performance, metric)
    return getmetric(perf)

def get_MetricValues(perfs, metric):
    '''
    Gets an array of performance values for a list of Performance 
    objects generated from random scenarios
    '''
    getmetric = getattr(Performance, metric)
    return np.array([np.mean(getmetric(perf)) for perf in perfs])

def get_MeanMetricValues(listOfPerfs, metric):
    c = get_MetricValues
    return np.array([c(perfs, metric).mean() for perfs in listOfPerfs])

def get_AllMetricValues(listOfPerfs, metric):
    return [get_MetricValues(perfs, metric) for perfs in listOfPerfs]

def savePerformances(basename, listOfPerfs):
    perfs = get_AllMetricValues(listOfPerfs, "RMSE")
    np.savetxt(basename + "RMSE.csv", perfs, delimiter=',')
    perfs = get_AllMetricValues(listOfPerfs, "RSquared")
    np.savetxt(basename + "RSquared.csv", perfs,
               delimiter=',')
    perfs = get_AllMetricValues(listOfPerfs, "NSCE")
    np.savetxt(basename + "NSCE.csv", perfs, delimiter=',')
    perfs = get_AllMetricValues(listOfPerfs, "BIAS")
    np.savetxt(basename + "BIAS.csv", perfs, delimiter=',')
    perfs = get_AllMetricValues(listOfPerfs, "PWRMSE")
    np.savetxt(basename + "PWRMSE.csv", perfs,
               delimiter=',')
    
def makeLLS(Xtrain, Ttrain, lamb=None, stdizeOutputs=False):
    '''
    @summary:
    Approximates weights for a linear system based on the least squares 
    approximation technique. A lambda can be specified that corresponds
    to a weight on variables themselves, which tends to make the model 
    simpler (for regularized least squares).
    
    @return: 
    Returns the weights and functions to standardize and unstandardize 
    the data (XStdizer and TStdizer). 
    '''
    XStdizer = d.Standardizer(Xtrain)
    TStdizer = d.Standardizer(Ttrain)
    XStd1 = d.AddOnes(XStdizer.StandardizedData)
    XStd1_T = XStd1.T
    if stdizeOutputs: 
        TStd = TStdizer.StandardizedData
    else: 
        TStd = Ttrain  
    if lamb == None: 
        # return np.linalg.solve(np.dot(X_T, Xtrain), np.dot(X_T, Ttrain))
        w = np.linalg.lstsq(np.dot(XStd1_T, XStd1),
                            np.dot(XStd1_T, TStd))[0]
    else: 
        lambdaI = lamb * np.eye(XStd1.shape[1])
        lambdaI[0, 0] = 0
        w = np.linalg.lstsq(np.dot(XStd1_T, XStd1) + lambdaI,
                            np.dot(XStd1_T, TStd))[0]        
    return LinearModel(w, XStdizer, TStdizer, stdizeOutputs)
    
def useLLS(Xtrain, linmodel):
    '''
    @summary: 
    Using weights for a linear system Xtrain, returns the prediction of 
    the target variable.
    '''
    XStd = linmodel.XStdizer.Standardize(Xtrain)
    XStd = d.AddOnes(XStd)
    TStd = np.dot(XStd, linmodel.weights)
    if linmodel.stdizeOutputs: 
        return linmodel.TStdizer.Unstandardize(TStd)
    else: 
        return TStd

def PlotScenarios(scenarios, metric="RMSE", outFilePath="",
                  bounds=None, logScale=False):
    '''
    @summary: 
    Plots root mean squared errors as a function of the training /
    testing fractions  
    '''
    # Instantiate variables
    i = len(outFilePath) - 4
    base = outFilePath[:i]
    ext = outFilePath[i:]
    
    # Mean values plot
    _PlotMeanValues(scenarios, metric, bounds, logScale=logScale,
                    outFilePath=outFilePath)
    
    # Box plot of training dataset
    boxfile = base + "_boxTrain" + ext
    _PlotBoxPlots(scenarios, metric, True, outFilePath=boxfile)
    
    # Box plot of testing dataset
    boxfile = base + "_boxTest" + ext
    _PlotBoxPlots(scenarios, metric, False, outFilePath=boxfile)

def _PlotMeanValues(scenarios, metric, bounds=None, logScale=False,
                    outFilePath=""):
    PlotMeanValues(scenarios.xaxisVals, scenarios.allTrainPerfs,
                   scenarios.allTestPerfs, None, metric, bounds,
                   scenarios.xaxisLabel, logScale, outFilePath)
    
def PlotMeanValues(xaxisVals,
        trainPerfsList, testPerfsList=None, validPerfsList=None,
        metric="RMSE", bounds=None, xlabel="", logScale=False,
        outFilePath=""):
    
    # set up the figure
    d.newFigure(xlabel, metric)
    
    # plot training 
    vals = get_MeanMetricValues(trainPerfsList, metric)
    plt.plot(xaxisVals, vals, 'k-', label='Training')
    
    # plot validation 
    if validPerfsList != None:
        vals = get_MeanMetricValues(validPerfsList, metric) 
        plt.plot(xaxisVals, vals, 'k:', label='Validation')
        
    # plot testing 
    if testPerfsList != None: 
        vals = get_MeanMetricValues(testPerfsList, metric) 
        plt.plot(xaxisVals, vals, 'k--', label='Testing')
        
    if bounds != None: 
        plt.gca().set_ybound(bounds[0], bounds[1])
    
    if logScale: 
        plt.xscale('log')

    # show legend
    plt.legend()

    if outFilePath != "": 
        d.saveFigure(outFilePath)
    
def _PlotBoxPlots(scenarios, metric="RMSE", plotTraining=False, bounds=None,
                  outFilePath=""):
    PlotBoxPlots(scenarios.xaxisVals,
         scenarios.allTrainPerfs if plotTraining else scenarios.allTestPerfs,
         metric, scenarios.xaxisLabel, bounds, outFilePath)
    
def PlotBoxPlots(xaxisVals, perfsList, metric="RMSE", xlabel="",
                 bounds=None, outFilePath=""):    
    d.newFigure(xlabel, metric)
    vals = get_AllMetricValues(perfsList, metric)
    if vals[0].shape[0] > 29:
        plt.boxplot(vals, notch=0, bootstrap=1000)
    else: 
        plt.boxplot(vals, notch=0)    
    if bounds != None: 
        plt.gca().set_ybound(bounds[0], bounds[1])

    # xtick labels
    plotBins = xaxisVals
    plt.xticks(range(len(plotBins) + 1), [''] + 
               # ['%0.E' % i for i in plotBins],
               [str(i) for i in plotBins],
               size='small', rotation=90)
    plt.gca().set_position([0.1, 0.15, 0.8, 0.7])

    # save to file 
    if outFilePath != "": 
        d.saveFigure(outFilePath)
    
def PlotRegression(Y, Ttrain, TNames, basename="", newdpi=None,
                   trainI=None, testI=None, validI=None):
    '''
    Plots the regression curves for the training and testing 
    datasets at the last solved training fraction scenario    
    '''
    # min and max bounds
    a = max(Ttrain.min(), Y.min())
    b = min(Ttrain.max(), Y.max()) 
         
    #===============================================================
    # target variables separately
    #===============================================================
    for i in range(len(TNames)):
        d.newFigure("Predicted " + TNames[i], "Actual " + TNames[i])
        plt.plot([a, b], [a, b], 'k-', label='One-to-One')
        if trainI == None: 
            plt.plot(Y[:, i], Ttrain[:, i], 'o')
        else: 
            plt.plot(Y[trainI, i], Ttrain[trainI, i], 'o', label='Training')
            if validI != None: 
                plt.plot(Y[validI, i], Ttrain[validI, i], 'o', label='Validation')
            if testI != None: 
                plt.plot(Y[testI, i], Ttrain[testI, i], 'o', label='Testing')
            plt.legend(loc='best') 
        
        # Save the figure 
        if basename != "":
            currFile = basename + '_' + TNames[i] + '.png'                
            d.saveFigure(currFile, newdpi)

    #===============================================================
    # All target variables 
    #===============================================================
    d.newFigure("Predicted Target Values", "Actual Target Values")
    l1 = plt.plot([a, b], [a, b], 'k-', label='One-to-One')
    if trainI == None: 
        l2 = plt.plot(Y, Ttrain, 'o')
        plt.legend(l1 + l2, ['One-to-One'] + TNames, loc='lower right') 
    else: 
        plt.plot(Y[trainI, i], Ttrain[trainI, i], 'o', label='Training')
        if validI != None: 
            plt.plot(Y[validI, i], Ttrain[validI, i], 'o', label='Validation')
        if testI != None: 
            plt.plot(Y[testI, i], Ttrain[testI, i], 'o', label='Testing')
        plt.legend(loc='best') 
        
    
    # Save the figure 
    if basename != "":
        currFile = basename + '_All.png'                
        d.saveFigure(currFile, newdpi)

class Performance:
    '''
    Gives the performance of a model
    '''
    def __init__(self, observed, modeled):
        self.targetVals = np.array(observed)
        self.modeledVals = np.array(modeled)
        self.avg = self.targetVals.mean(0)
        self.nvals = self.targetVals.shape[0]
    
    def BIAS(self):
        # Model bias
        return (self.targetVals - self.modeledVals).sum(0)

    def DEV(self):
        # Model deviation
        return self.modeledVals.sum(0) / self.targetVals.sum(0) 

    def SAE(self):
        # Model sum of absolute error
        return (self.targetVals - self.modeledVals).__abs__()
    
    def MAE(self):
        # Model mean absolute error
        return self.SAE() / self.nvals
        
    def MRE(self):
        # Model mean relative error
        return ((self.targetVals - self.modeledVals).__abs__() 
                / self.targetVals).sum(0) / self.nvals
    
    def SSE(self):
        # Model sum of squared errors
        return ((self.targetVals - self.modeledVals) ** 2).sum(0)
    
    def SSTotal(self):
        # Total sum of squares, sum(targetVals-avg)^2
        return ((self.targetVals - self.avg) ** 2).sum(0)
    
    def SSReg(self):
        # Explained / regression sum of squares, sum(modeledVals-avg)^2
        return ((self.modeledVals - self.avg) ** 2).sum(0)
    
    def MSE(self):
        # Model mean squared error
        return self.SSE() / self.nvals
    
    def RMSE(self): 
        # Model root mean squared error 
        return self.MSE() ** 0.5
    
    def PWRMSE(self):
        # Model peak-weighted root mean squared error
        return ((((self.targetVals - self.modeledVals) ** 2) 
                * (self.targetVals + self.avg) / (2 * self.avg)).sum(0) 
                / self.nvals) ** 0.5

    def NSCE(self):
        # Model Nash-Sutcliffe coefficient of efficiency
        return 1 - self.SSE() / self.SSTotal()
    
    def RSquared(self):
        # R-squared, coefficient of determination
        return self.SSReg() / self.SSTotal() 
    
    def MCE(self):
        # Model modified coefficient of efficiency
        return 1 - self.SAE() / (self.targetVals - self.avg).__abs__().sum(0)

class LinearModel: 
    '''
    @summary: 
    A class that houses inputs to a linear model (weights and 
    standardizing and unstandardizing functions)
    '''
    def __init__(self, w, XStdizer, TStdizer, stdizeOutputs=False):
        self.weights = w
        self.XStdizer = XStdizer
        self.TStdizer = TStdizer
        self.stdizeOutputs = stdizeOutputs
        
    def Use(self, Xtrain):
        return useLLS(Xtrain, self)

class LinearScenario:
    '''
    @summary:
    Builds and solves a linear system 
    '''
    bounds = None
    
    def __init__(self, Xtrain, Ttrain):
        self.X = Xtrain
        self.T = Ttrain 
    
    def Run(self, numOfScenarios, TrainingFraction=0.75, lamb=None,
            rndSeed=None, bootstrap=False):
        
        if lamb != None: 
            s = 'lambda = ' + str(lamb)
        else: 
            s = ''
        print('Solving ' + str(numOfScenarios) + ' scenarios. '
              + str(TrainingFraction * 100) + '% training. ' + s)
        
        # Lists
        self.scenWeights = []
        self.scenXTrains = []
        self.scenXTests = []
        self.scenLinModels = []
        self.scenTrainPerfs = []
        self.scenTestPerfs = []
        self.NumOfScenarios = numOfScenarios
        self.TrainingFraction = TrainingFraction
        self.WasBootStrapped = bootstrap
        
        # set random seed 
        if rndSeed != None: 
            random.seed(rndSeed)
        
        indices = range(self.X.shape[0])
        origI = indices 
                
        # Start scenario loop
        for _ in range(numOfScenarios):
            if bootstrap: 
                indices = [random.choice(origI) for _ in origI]
            
            # split data
            [XTrain, TTrain, XTest, TTest, _, _, _, _, _] = d.SplitData(
                                                 self.X[indices, :],
                                                 self.T[indices, :],
                                                 TrainingFraction)
            
            # solve linear system and simulate
            linModel = makeLLS(XTrain, TTrain, lamb)
            YTrain = linModel.Use(XTrain)
            if self.TrainingFraction < 1: 
                YTest = linModel.Use(XTest)
            
            # append data to lists
            self.scenWeights.append(linModel.weights)
            self.scenXTrains.append(XTrain)
            self.scenXTests.append(XTest)
            self.scenLinModels.append(linModel)
            self.scenTrainPerfs.append(Performance(TTrain, YTrain))
            if self.TrainingFraction < 1: 
                self.scenTestPerfs.append(Performance(TTest, YTest))
        
    def ArgMin(self, metric="RMSE", avgMetrics=True):
        '''
        Gets the index of the scenario with the minimized value of the 
        metric
        '''
        if self.TrainingFraction >= 1.0: 
            a = get_MetricValues(self.scenTrainPerfs, metric) 
        else: 
            a = get_MetricValues(self.scenTestPerfs, metric)
        if a.ndim > 1 and avgMetrics:
            if a.shape[1] > 1:  
                a = a.mean(1)
        return np.argmin(a, 0)

    def ArgMedian(self, metric="RMSE", avgMetrics=True):
        '''
        Gets the index of the scenario with the median value of the 
        metric 
        '''
        if self.TrainingFraction >= 1.0: 
            a = get_MetricValues(self.scenTrainPerfs, metric) 
        else: 
            a = get_MetricValues(self.scenTestPerfs, metric)
        if a.ndim > 1 and avgMetrics:
            if a.shape[1] > 1:  
                a = a.mean(1)
        indices = np.argsort(a, 0)
        midIndex = int(np.floor(len(a) / 2))
        return indices[midIndex]
    
    def ArgMax(self, metric="RMSE", avgMetrics=True):
        '''
        Gets the index of the scenario with the maximized value of the 
        metric
        '''
        if self.TrainingFraction >= 1.0: 
            a = get_MetricValues(self.scenTrainPerfs, metric) 
        else: 
            a = get_MetricValues(self.scenTestPerfs, metric)
        if a.ndim > 1 and avgMetrics:
            if a.shape[1] > 1:  
                a = a.mean(1)
        return np.argmax(a, 0)

class LinearScenarioPlotter:
    '''
    @summary: 
    Contains various functions that plot a scenario or multiple 
    scenarios
    '''        
    
    def __init__(self, linScenario, XNames, TNames):
        self.XNames = XNames 
        self.TNames = TNames
        self.linScenario = linScenario
    
    def PlotRegression(self, scenarioI, outFilePath="", newdpi=None,
                       stdize=False):
        '''
        Plots the regression curves for the training and testing 
        datasets at the last solved training fraction scenario    
        '''
        # quick exit
        if scenarioI < 0 or len(self.linScenario.scenWeights) <= scenarioI: 
            return 
        
        # outfile 
        if outFilePath != "": 
            i = len(outFilePath) - 4
            basename = outFilePath[:i] 
            extension = outFilePath[i:]
                    
        # retrieve datasets from selected scenario
        modTrain = self.linScenario.scenTrainPerfs[scenarioI].modeledVals
        targTrain = self.linScenario.scenTrainPerfs[scenarioI].targetVals  
        modTest = self.linScenario.scenTestPerfs[scenarioI].modeledVals
        targTest = self.linScenario.scenTestPerfs[scenarioI].targetVals
        if stdize: 
            self.linScenario.scenLinModels[scenarioI].Standardize(modTrain)
            self.linScenario.scenLinModels[scenarioI].Standardize(targTrain)
            self.linScenario.scenLinModels[scenarioI].Standardize(modTest)
            self.linScenario.scenLinModels[scenarioI].Standardize(targTest)

        # min and max bounds
        a = max(targTest.min(), modTest.min())
        b = min(targTest.max(), modTest.max()) 
             
        #===============================================================
        # target variables separately
        #===============================================================
        for i in range(len(self.TNames)):
            d.newFigure("Predicted " + self.TNames[i],
                        "Actual " + self.TNames[i])
    
            # plot one-to-one line
            plt.plot([a, b], [a, b], 'k-', label='One-to-One')
    
            # plot training data 
            plt.plot(modTrain[:, i], targTrain[:, i], 'o',
                     label='Training')
            
            # plot testing data
            plt.plot(modTest[:, i], targTest[:, i], 'x',
                     label='Testing')
            
            # show legend
            plt.legend(loc='lower right') 
            
            # Save the figure 
            if outFilePath != "":
                currFile = basename + '_' + self.TNames[i] + extension                
                print("Saving regression figure to " + currFile)
                d.saveFigure(currFile, newdpi)

        #===============================================================
        # training data with all three target variables 
        #===============================================================
        d.newFigure("Predicted Target Values",
                    "Actual Target Values")

        # plot one-to-one line
        l1 = plt.plot([a, b], [a, b], 'k-')

        # plot training data 
        l2 = plt.plot(modTrain, targTrain, 'o')
        
        # show legend
        plt.legend(l1 + l2, ['One-to-One'] + self.TNames, loc='lower right') 
        
        # Save the figure 
        if outFilePath != "":
            currFile = basename + '_TrainAll' + extension                
            print("Saving regression figure to " + currFile)
            d.saveFigure(currFile, newdpi)

        #===============================================================
        # testing data with all three target variables
        #===============================================================
        d.newFigure("Predicted Target Values",
                    "Actual Target Values")

        # plot one-to-one line
        l1 = plt.plot([a, b], [a, b], 'k-', label='One-to-One')

        # plot testing data 
        l2 = plt.plot(modTest, targTest, 'o')
        
        # show legend
        plt.legend(l1 + l2, ['One-to-One'] + self.TNames, loc='lower right') 
        
        # Save the figure 
        if outFilePath != "":
            currFile = basename + '_TestAll' + extension                
            print("Saving regression figure to " + currFile)
            d.saveFigure(currFile, newdpi)
    
    def PlotWeights(self, outFilePath, CInterval, newdpi=None,
                    includeBias=True):
        # output file
        if outFilePath != "": 
            i = len(outFilePath) - 4
            basename = outFilePath[:i] + "_weights"
            extension = outFilePath[i:]

        # fill weights arrays 
        weights = np.array(self.linScenario.scenWeights)
        weights.sort(0)
        if includeBias:
            nvar = len(self.XNames) + 1
            WeightNames = ['bias'] + self.XNames
        else: 
            nvar = len(self.XNames)
            WeightNames = self.XNames
            weights = weights[:, 1:, :]
        WeightIndices = [i + 1 for i in range(nvar)]
        bootstrap = self.linScenario.WasBootStrapped

        # confidence interval
        n = weights.shape[0]
        loI = int(math.ceil(CInterval[0] * n))
        hiI = int(math.floor(CInterval[1] * n))
        
        # A separate figure for each target variable
        for i in range(len(self.TNames)):
            d.newFigure("Variables", "Weights")

            # plot all weights at labeled indices
            for j in WeightIndices: 
                plt.plot([j] * n, weights[:, j - 1, i], 'ko',
                         alpha=0.5)
                xint = [j - 0.2, j + 0.2]
                if bootstrap:
                    ylo = weights[loI, j - 1, i]
                    yhi = weights[hiI, j - 1, i]
                    plt.plot(xint, [ylo, ylo], 'r-', linewidth=1)
                    plt.plot(xint, [yhi, yhi], 'r-', linewidth=1)
#                else:
#                    raise Exception("Does not have option to plot weights without using the bootstrap method")
#                    mean = weights[:, j - 1, i].mean()
#                    stdev = weights[:, j - 1, i].mean()
#                    ylo = mean - stats. stdev / (n ** 0.5) 
#                    yhi = mean + stdev / (n ** 0.5)
            
            # update x-axis
            xlims = [0, len(WeightIndices) + 1]
            plt.plot(xlims, [0, 0], 'k--')
            plt.xlim(xlims)
            plt.xticks(WeightIndices, WeightNames, size='small',
                       rotation=90)
            plt.gca().set_position([0.1, 0.2, 0.8, 0.7])

            # Save the figure 
            if outFilePath != "":
                currFile = basename + '_' + self.TNames[i] + extension                
                print("Saving regression figure to " + currFile)
                d.saveFigure(currFile, newdpi)
        
        
    def PrintWeights(self, scenarioI, logfile=""):
        scen = self.linScenario
        XStdizer = scen.scenLinModels[scenarioI].XStdizer
        TStdizer = scen.scenLinModels[scenarioI].TStdizer
        Weights = scen.scenLinModels[scenarioI].weights
        if logfile != "": 
            f = open(logfile, "w")
            for n, m, std in zip(self.TNames,
                                 list(TStdizer.Means),
                                 list(TStdizer.Stdevs)):
                f.write("     targetvar,%14.7f,%14.7f,%s\n" 
                        % (m, std, n))
        for n, m, std, w  in zip(['bias'] + self.XNames,
                                [1] + list(XStdizer.Means),
                                [0] + list(XStdizer.Stdevs),
                                Weights):
            s = "%14s,%14.7f,%14.7f,%s" % (n, m, std,
                string.join(
                ["%14.7f" % w[i] for i in range(w.shape[0])], ",")) 
            print(s)
            if logfile != "": 
                f.write(s + "\n")
                
        if scen.TrainingFraction < 1:
            rmse = "Test RMSE = " + str(
                get_MetricValues(scen.scenTestPerfs, "RMSE")[scenarioI])
            rsq = "Test R-Squared= " + str(
                get_MetricValues(scen.scenTestPerfs, "RSquared")[scenarioI]) 
            print(rmse + "\n" + rsq)
            f.write(rmse + "\n" + rsq + "\n")
            
        rmse = "Train RMSE = " + str(
            get_MetricValues(scen.scenTrainPerfs, "RMSE")[scenarioI])
        rsq = "Train R-Squared= " + str(
            get_MetricValues(scen.scenTrainPerfs, "RSquared")[scenarioI]) 
        print(rmse + "\n" + rsq)
        f.write(rmse + "\n" + rsq + "\n")
    
class LinearScenarioGroup: 
    '''
    @summary: 
    Solves and simulates multiple linear modeling scenarios 
    '''
    def __init__(self, Xtrain, Ttrain):
        self.X = Xtrain
        self.T = Ttrain
    
    def Run(self, numOfScenarios, TrainingFractions=[0.75], lambdas=[0],
            rndSeed=None):
        # set instance variables
        self.trainingFractions = TrainingFractions
        self.lambdas = lambdas
        self.xaxisVals = []
        self.xaxisLabel = ''
        if len(lambdas) == 1: 
            self.xaxisVals = TrainingFractions
            self.xaxisLabel = 'Training dataset fraction'
        elif len(TrainingFractions) == 1: 
            self.xaxisVals = lambdas
            self.xaxisLabel = "$\lambda$"
        else: 
            for _ in range(len(TrainingFractions)): 
                self.xaxisVals = self.xaxisVals + lambdas
            self.xaxisLabel = "$\lambda$"
            
        # set random seed 
        if rndSeed != None: 
            random.seed(rndSeed)
        
        # run all scenarios
        self.allScenarios = []
        self.allWeights = []
        self.allTrainPerfs = []
        self.allTestPerfs = []
        for prct in TrainingFractions:
            for lamb in lambdas:
                linScen = LinearScenario(self.X, self.T)
                linScen.Run(numOfScenarios, prct, lamb)
                self.allScenarios.append(linScen)
                self.allWeights.append(linScen.scenWeights)
                self.allTrainPerfs.append(linScen.scenTrainPerfs)
                self.allTestPerfs.append(linScen.scenTestPerfs)


        
        
        
        
        
