# standard
import sys

# third party - numpy, matplotlib, sklearn
import numpy
import pylab as pl
import matplotlib.pyplot as mpl
from mpl_toolkits.mplot3d import Axes3D
from sklearn import manifold, svm, datasets, decomposition

# our imports
from loadData import DataPreprocessor
import lle

pt_size = 50
darkRed = '#c11b17'
lightRed = '#ff0000'
darkBlue = '#15317e'
lightBlue = '#2b60de'
darkGreen = '#347c17'
lightGreen = '#00ff00'


def main(filename):

    print '---Preprocessing data---'
    dp = DataPreprocessor()
    try:
        dp.loadFromFile(filename)
    except Exception,e:
        print e
        exit(1)

    # grab data matrix and normalize
    rawData = dp.mat
    normData = dp.normalize(rawData)

    # LLE params
    num_neighbors = 20
    output_dim = 10
    distance_function = 'infinity'

    print('---Preparing LLE using {} neighbors output dimension '\
              '{}---').format(num_neighbors, output_dim)
    # LLE using new ridge method
    ridgeLLE = lle.LocallyLinearEmbedding(num_neighbors, 
                                          output_dim, 
                                          method='ridge',
                                          distance_function=distance_function)
    # LLE using standard method
    stdLLE = manifold.LocallyLinearEmbedding(num_neighbors,
                                             output_dim,
                                             reg=0.0,
                                             method='standard')
    # PCA
    pca = decomposition.PCA(n_components=output_dim)

    # input to fit_transform should be [n_samples, n_features]
    # output of fit_transform is [n_samples, out_dim]
    #NOTE: there seems to be a random component to fit transform since
    # multiple identical calls yield the same results
    print '---Performing Ridge LLE---'
    ridgeOutput = ridgeLLE.fit_transform(normData)
    print '---Performing Standard LLE---'
    stdOutput = stdLLE.fit_transform(normData)
    print '---Performing PCA---'
    pcaOutput = pca.fit_transform(normData)

    '''
    #TODO: not sure if we should do this or not
    norm_method = 'scale'
    ridgeOutput = dp.normalize(ridgeOutput, method=norm_method)
    stdOutput = dp.normalize(stdOutput, method=norm_method)
    pcaOutput = dp.normalize(pcaOutput, method=norm_method)
    '''

    # SVM params
    C = 64
    coef0 = 0.0
    degree = 5
    gamma = 40
    kernel = 'rbf'
    probability = True
    shrinking = True
    tol = 0.000001
    classWeight = 'auto'#{0:1.0, 1:0.1}#'auto'

    numIters = 1000
    ridgeResults = numpy.zeros((numIters))
    stdResults = numpy.zeros((numIters))
    pcaResults = numpy.zeros((numIters))


    for i in range(numIters):
        #print '---Selecting training and testing data---'
        # pick indices for training data
        trainingInd = dp.selectTrainingSet(pct=0.75)#, rseed = 234675687)
        # trainingInd = dp.selectBalancedTrainingSet(pct=0.75)
        trainingClasses = dp.case[trainingInd].astype('int')
        # calculate indices of testing cases
        testingInd = numpy.zeros((normData.shape[0]))
        for j in range(len(testingInd)): testingInd[j] = j
        testingInd =  numpy.setdiff1d(testingInd,trainingInd).astype('int')
        testingClasses = dp.case[testingInd].astype('int')

        # select training and testing data
        ridgeTrainingData = ridgeOutput[trainingInd,:]
        ridgeTestingData = ridgeOutput[testingInd,:]
        stdTrainingData = stdOutput[trainingInd,:]
        stdTestingData = stdOutput[testingInd,:]
        pcaTrainingData = pcaOutput[trainingInd,:]
        pcaTestingData = pcaOutput[testingInd,:]

        #print '---Training SVM---'
        # train SVM on ridge data
        ridgeClf = svm.SVC(C=C, coef0=coef0, degree=degree, gamma=gamma, 
                           kernel=kernel, probability=probability, 
                           shrinking=shrinking, tol=tol)
        ridgeClf.fit(ridgeTrainingData, trainingClasses,
                     class_weight=classWeight)
        
        # train SVM on std data 
        stdClf = svm.SVC(C=C, coef0=coef0, degree=degree, gamma=gamma, 
                         kernel=kernel, probability=probability, 
                         shrinking=shrinking, tol=tol)
        stdClf.fit(stdTrainingData, trainingClasses,
                   class_weight=classWeight)

        pcaClf = svm.SVC(C=C, coef0=coef0, degree=degree, gamma=gamma,
                         kernel=kernel, probability=probability,
                         shrinking=shrinking, tol=tol)
        pcaClf.fit(pcaTrainingData, trainingClasses,
                   class_weight=classWeight)

        #print '---Performing SVM prediction using testing data---'
        ridgePredict = ridgeClf.predict(ridgeTestingData)
        stdPredict = stdClf.predict(stdTestingData)
        pcaPredict = pcaClf.predict(pcaTestingData)

        #print '---SVM predict results: Ridge---'
        ridgeTotal = len(ridgePredict)
        ridgeMatch = numpy.logical_not(numpy.logical_xor(testingClasses, 
                                                         ridgePredict))
        ridgeCorrect = len(numpy.where(ridgeMatch==True)[0])
        #ridgeSum += ridgeCorrect
        ridgeAccuracy = ridgeCorrect/float(ridgeTotal)
        ridgeResults[i] = ridgeAccuracy
        '''
        print('\t{} total predictions').format(ridgeTotal)
        print('\t{} correct predictions').format(ridgeCorrect)
        print('\t{} classification accuracy').format(ridgeAccuracy)
        print('\t{} case (expected {})').format(int(numpy.sum(ridgePredict)), int(numpy.sum(dp.case[testingInd])))
        print('\t{} control (expected {})').format(len(ridgePredict)-int(numpy.sum(ridgePredict)), len(testingInd)-int(numpy.sum(dp.case[testingInd])))
        '''
        
        #print '---SVM predict results: Std---'
        stdTotal = len(stdPredict)
        stdMatch = numpy.logical_not(numpy.logical_xor(testingClasses, 
                                                       stdPredict))
        stdCorrect = len(numpy.where(stdMatch==True)[0])
        #stdSum += stdCorrect
        stdAccuracy = stdCorrect/float(stdTotal)
        stdResults[i] = stdAccuracy
        '''
        print('\t{} total predictions').format(stdTotal)
        print('\t{} correct predictions').format(stdCorrect)
        print('\t{} classification accuracy').format(stdAccuracy)
        print('\t{} case (expected {})').format(int(numpy.sum(stdPredict)), int(numpy.sum(dp.case[testingInd])))
        print('\t{} control (expected {})').format(len(stdPredict)-int(numpy.sum(stdPredict)), len(testingInd)-int(numpy.sum(dp.case[testingInd])))
        '''

        #print '---SVM predict results: PCA---'
        pcaTotal = len(pcaPredict)
        pcaMatch = numpy.logical_not(numpy.logical_xor(testingClasses,
                                                       pcaPredict))
        pcaCorrect = len(numpy.where(pcaMatch==True)[0])
        #pcaSum += pcaCorrect
        pcaAccuracy = pcaCorrect/float(pcaTotal)
        pcaResults[i] = pcaAccuracy
        '''
        print('\t{} total predictions').format(pcaTotal)
        print('\t{} correct predictions').format(pcaCorrect)
        print('\t{} classification accuracy').format(pcaAccuracy)
        print('\t{} case (expected {})').format(int(numpy.sum(pcaPredict)), int(numpy.sum(dp.case[testingInd])))
        print('\t{} control (expected {})').format(len(pcaPredict)-int(numpy.sum(pcaPredict)), len(testingInd)-int(numpy.sum(dp.case[testingInd])))
        '''


    print('\n\n---Results after {} iterations---').format(numIters)
    print 'Best'
    print('Ridge: {}').format(numpy.max(ridgeResults))
    print('Std: {}').format(numpy.max(stdResults))
    print('PCA: {}').format(numpy.max(pcaResults))
    print '\nMean'
    print('Ridge: {}').format(numpy.mean(ridgeResults))
    print('Std: {}').format(numpy.mean(stdResults))
    print('PCA: {}').format(numpy.mean(pcaResults))
    print '\nWorst'
    print('Ridge: {}').format(numpy.min(ridgeResults))
    print('Std: {}').format(numpy.min(stdResults))
    print('PCA: {}').format(numpy.min(pcaResults))
    print '\nStandard Deviation'
    print('Ridge: {}').format(numpy.std(ridgeResults))
    print('Std: {}').format(numpy.std(stdResults))
    print('PCA: {}').format(numpy.std(pcaResults))
    print '\nVariance'
    print('Ridge: {}').format(numpy.var(ridgeResults))
    print('Std: {}').format(numpy.var(stdResults))
    print('PCA: {}').format(numpy.var(pcaResults))


    # first control index
    fci = numpy.where(dp.case==0)[0][0]

    # create plots


    #create2dScatter(fci, ridgeOutput)
    '''
    create2dScatter(fci, ridgeOutput, pca=pcaOutput, 
                    title=('First Two Components of Ridge LLE and PCA'\
                        ': {} neighbors, {} output '\
                               'dimensions').format(num_neighbors, output_dim))
    create2dScatter(fci, ridgeOutput, std=stdOutput, 
                    title=('First Two Components of Ridge and Standard LLE'\
                        ': {} neighbors, {} output '\
                               'dimensions').format(num_neighbors, output_dim))
    create2dScatter(fci, ridgeOutput, std=stdOutput, pca=pcaOutput,
                    title=('First Two Components of Ridge, Standard, and '\
                        'PCA: {} neighbors, {} output '\
                               'dimensions').format(num_neighbors, output_dim))
    create3dScatter(fci, ridgeOutput, std=stdOutput, pca=pcaOutput,
                    title=('First Three Components of Ridge, Standard, and '\
                        'PCA: {} neighbors, {} output '\
                               'dimensions').format(num_neighbors, output_dim))
    '''


    #NOTE: output_dim has to be 2 for this plot to work
    #fci = numpy.where(trainingClasses==0)[0][0]
    #create2dContour(ridgeClf, ridgeTrainingData, fci, density=0.001)
    #create2dContour(stdClf, stdTestingData, fci)
    #create2dContour(pcaClf, pcaTestingData, fci)



def create2dContour(clf, data, fci, showTesting=True, density=0.01):
    '''Create 2d contour plot of SVM prediction data'''

    # generate mesh to do predictions on SVM
    extra = 0.1
    xmin = data[:,0].min()-extra
    xmax = data[:,0].max()+extra
    ymin = data[:,1].min()-extra
    ymax = data[:,1].max()+extra
    xx,yy = numpy.meshgrid(numpy.arange(xmin,xmax,density),
                           numpy.arange(ymin,ymax,density))

    # perform SVM predictions on dense mesh
    Z = clf.predict(numpy.c_[xx.ravel(), yy.ravel()])

    # plot prediction results on mesh
    Z = Z.reshape(xx.shape)
    pl.set_cmap(pl.cm.Paired)
    colorMesh = pl.pcolormesh(xx, yy, Z)

    if showTesting:
        # plot prediction results from testing data
        pl.scatter(data[:fci,0], data[:fci,1], c=lightBlue, s=pt_size)
        pl.scatter(data[fci:,0], data[fci:,1], c=lightRed, s=pt_size)

    pl.show()



def create2dScatter(fci, ridge, std=None, pca=None, title=None):
    '''Plot first two components of LLE outputs, separate case vs control'''
    
    # normalize data
    dp = DataPreprocessor()
    #ridge = dp.normalize(ridge, method='l1')
    #if std is not None: std = dp.normalize(std, method='l1')
    if pca is not None: pca = dp.normalize(pca, method='l1')

    ridgeCase = pl.scatter(ridge[:fci,0],
                           ridge[:fci,1],
                           c=lightRed,
                           s=pt_size,
                           marker='o',#marker='s',
                           label='Ridge Case')
    ridgeControl = pl.scatter(ridge[fci:,0],
                              ridge[fci:,1],
                              c=lightBlue,
                              s=pt_size,
                              marker='o',
                              label='Ridge Control')
    if std is not None:
        stdCase = pl.scatter(std[:fci,0],
                             std[:fci,1],
                             c=darkBlue,
                             s=pt_size,
                             marker='o',#marker='s',
                             label='Standard Case')
        stdControl = pl.scatter(std[fci:,0],
                                std[fci:,1],
                                c=lightBlue,
                                s=pt_size,
                                marker='s',
                                label='Standard Control')
    if pca is not None:
        pcaCase = pl.scatter(pca[:fci,0],
                             pca[:fci,1],
                             c=darkGreen,
                             s=pt_size,
                             marker='o',#marker='s',
                             label='PCA Case')
        pcaControl = pl.scatter(pca[fci:,0],
                                pca[fci:,1],
                                c=lightGreen,
                                s=pt_size,
                                marker='s',
                                label='PCA Control')
        
    pl.xlabel('First Component')
    pl.ylabel('Second Component')
    if title is not None: pl.title(title)

    pl.legend()
    pl.show()


def create3dScatter(fci, ridge, std=None, pca=None, title=None):

    # normalize data
    dp = DataPreprocessor()
    ridge = dp.normalize(ridge, method='l1')
    if std is not None: std = dp.normalize(std, method='l1')
    if pca is not None: pca = dp.normalize(pca, method='l1')

    # plot using 3 dimensions
    fig = pl.figure()
    ax = Axes3D(fig)
    ridgeCase = ax.scatter(ridge[:fci,0], ridge[:fci,1], ridge[:fci,2],
                           c=darkRed, marker='o', s=pt_size, 
                           label='Ridge Case')
    ridgeControl = ax.scatter(ridge[fci:,0], ridge[fci:,1], ridge[fci:,2],
                              c=lightRed, marker='s', s=pt_size,
                              label='Ridge Control')
    if std is not None:
        stdCase = ax.scatter(std[:fci,0], std[:fci,1], std[:fci,2],
                             c=darkBlue, marker='o', s=pt_size,
                             label='Standard Case')
        stdControl = ax.scatter(std[fci:,0], std[fci:,1], std[fci:,2],
                                c=lightBlue, marker='s', s=pt_size,
                                label='Standard Control')
    if pca is not None:
        pcaCase = ax.scatter(pca[:fci,0], pca[:fci,1], pca[:fci,2],
                             c=darkGreen, marker='o', s=pt_size,
                             label='PCA Case')
        pcaControl = ax.scatter(pca[fci:,0], pca[fci:,1], pca[fci:,2],
                                c=lightGreen, marker='s', s=pt_size,
                                label='PCA Control')
        
    ax.set_xlabel('First Component')
    ax.set_ylabel('Second Component')
    ax.set_zlabel('Third Component')

    #TODO: title and legend won't display for some reason
    #ax.set_title('title',visible=True)
    #ax.legend()

    pl.show()

    

# display the proper command usage and exit
def commandFormat():
    print 'python analyze.py <path_to_data>'
    exit(1)


if __name__=='__main__':
    # check for valid arguments
    if len(sys.argv)<2:
        commandFormat()
    # run analysis on given file
    main(sys.argv[1])
