# standard
import sys

# third party - numpy, matplotlib, sklearn
import numpy
import pylab as pl
import matplotlib.pyplot as mpl
from sklearn import manifold, svm, datasets

# our imports
from loadData import DataPreprocessor
import lle


def main(filename):

    # load the data
    dp = DataPreprocessor()
    try:
        dp.loadFromFile(filename)
    except Exception,e:
        print e
        exit(1)

    # grab the data matrix and normalize it
    rawData = dp.mat

    #NOTE: The LLE and SVM params to be used for sweeps must be specified as
    # lists.  range() is a useful funtion for this purpose.  Otherwise, just
    # specify an exact list, such as [1,2,3].  cross_validation is the number
    # of times training and testing sets should be selected and run through
    # SVM.

    # LLE params
    num_neighbors = [20]
    output_dim = [10]
    distance_function = ['manhattan']
    norm_method = ['l1','l2','scale','old']

    # SVM params
    C = [4] # 2^C will be used so specify the exponent not the value
    gamma = [30]
    class_weights = ['auto']

    # number of times to run a given SVM configuration
    cross_validation = 5

    # best classification accuracy
    bestCaseRate = -1.0
    bestControlRate = -1.0
    bestAllRate = -1.0

    # best parameter configuration for case, control, and overall
    bestCaseConfig = None
    bestControlConfig = None
    bestAllConfig = None

    # iterate over normalization methods
    for n in norm_method:
        # normalize data
        normData = dp.normalize(rawData, method=n)
        
        # iterate over LLE params
        for k in num_neighbors:
            for d in output_dim:
                for f in distance_function:

                    # LLE using new ridge method
                    ridgeLLE = lle.LocallyLinearEmbedding(k, d, method='ridge',
                                                          distance_function=f)
                    ridgeOutput = ridgeLLE.fit_transform(normData)
                    
                    # iterate over SVM params
                    for c in C:
                        for g in gamma:
                            for w in class_weights:

                                # other SVM params
                                coef0 = 0.0
                                degree = 5
                                kernel = 'rbf'
                                probability = True
                                shrinking = True
                                tol = 0.000001
                                
                                # stats (num case/control)
                                caseCorrect = 0
                                caseTotal = 0
                                controlCorrect = 0
                                controlTotal = 0

                                # perform cross validation
                                for v in range(cross_validation):
                                    
                                    # pick indices for training data
                                    trInd = dp.selectTrainingSet(pct=0.75)
                                    trClasses = dp.case[trInd].astype('int')
                                    
                                    # calculate indices of testing cases
                                    teInd = numpy.zeros((normData.shape[0]))
                                    for i in range(len(teInd)): teInd[i] = i
                                    teInd =  numpy.setdiff1d(teInd,
                                                             trInd).astype('int')
                                    teClasses = dp.case[teInd].astype('int')
                                    
                                    # select training and testing data
                                    trData = ridgeOutput[trInd,:]
                                    teData = ridgeOutput[teInd,:]
                            
                                    # train SVM on ridge data
                                    ridgeClf = svm.SVC(C=2**c, 
                                                       coef0=coef0, 
                                                       degree=degree, 
                                                       gamma=g, 
                                                       kernel=kernel, 
                                                       probability=probability, 
                                                       shrinking=shrinking, 
                                                       tol=tol)
                                    ridgeClf.fit(trData,
                                                 trClasses,
                                                 class_weight=w)
                                    
                                    # perform predictions on training set
                                    predict = ridgeClf.predict(teData)
                                
                                    # calculate total case vs control
                                    caseTotal += numpy.sum(teClasses)
                                    controlTotal += len(teClasses)-numpy.sum(teClasses)

                                    # first index of control
                                    fci = numpy.where(teClasses==0)[0][0]

                                    # find matches
                                    caseMatch = numpy.logical_not(
                                        numpy.logical_xor(teClasses[:fci], 
                                                          predict[:fci]))
                                    controlMatch = numpy.logical_not(
                                        numpy.logical_xor(teClasses[fci:],
                                                          predict[fci:]))

                                    # accumulate stats
                                    caseCorrect += len(numpy.where(caseMatch==True)[0])
                                    controlCorrect += len(numpy.where(controlMatch==True)[0])
                            
                                    # calculate accuracy from previous runs
                                if caseTotal > 0:
                                    caseRate = caseCorrect/float(caseTotal)
                                else:
                                    caseRate = 0

                                if controlTotal > 0:
                                    controlRate = controlCorrect/float(controlTotal)
                                else:
                                    controlRate = 0
                                if caseTotal+controlTotal > 0:
                                    allRate = (caseCorrect+controlCorrect)/float(caseTotal+controlTotal)
                                else:
                                    allRate = 0
                            
                                if bestCaseRate < caseRate:
                                    bestCaseRate = caseRate
                                    bestCaseConfig = [n, k, d, f, 2**c, g, w,
                                                      caseRate,controlRate,
                                                      allRate]

                                if bestControlRate < controlRate:
                                    bestControlRate = controlRate
                                    bestControlConfig = [n,k, d, f, 2**c, g, w,
                                                         caseRate,controlRate,
                                                         allRate]
                                    
                                if bestAllRate < allRate:
                                    bestAllRate = allRate
                                    bestAllConfig = [n, k, d, f, 2**c, g, w,
                                                     caseRate, controlRate, 
                                                     allRate]


    print '\n\n---Parameter sweep finished---'
    print '---Normalization methods swept---'
    print('Normalization methods: {}').format(norm_method)

    print '\n---LLE parameters swept---'
    print('Number of neighbors: {}').format(num_neighbors)
    print('Output dimension: {}').format(output_dim)
    print('Distance function: {}').format(distance_function)

    print '\n---SVM parameters swept---'
    print('C: {}').format(C)
    print('Gamma: {}').format(gamma)
    print('Class weights: {}').format(class_weights)

    print('\nNumber of cross validations: {}').format(cross_validation)
        
    print '\n---Best configurations---'
    print('Best case config: {}').format(bestCaseConfig)
    print('Best control config: {}').format(bestControlConfig)
    print('Best combined config: {}').format(bestAllConfig)


# display the proper command usage and exit
def commandFormat():
    print 'python parameterSweep.py <path_to_data>'
    exit(1)


if __name__=='__main__':
    # check for valid arguments
    if len(sys.argv)<2:
        commandFormat()
    # run analysis on given file
    main(sys.argv[1])
