import sys
import pylab as pl
from sklearn import manifold, svm, datasets
import numpy
import matplotlib.pyplot as mpl

import loadData
import lle

def main(filename):
    # load data from file
    try:
        mats = loadData.load(filename, rseed = 23453267)
        trainingMat = mats[0]
        testingMat = mats[1]
        trainingClasses = mats[2]
        testingClasses = mats[3]
        print 'Data loaded'
    except Exception,e:
        print e
        exit(1)

    # perform LLE
    num_neighbors = 15
    output_dim = 2
    # LLE using new ridge method
    ridgeLLE = lle.LocallyLinearEmbedding(num_neighbors, 
                                       output_dim, 
                                       method='ridge')
    # LLE using standard method
    stdLLE = lle.LocallyLinearEmbedding(num_neighbors, 
                                        output_dim, 
                                        method='standard')

    #TODO: only transpose here if we transposed when data was loaded
    t = trainingMat.transpose()

    # input to fit_transform should be [n_samples, n_features]
    # output of fit_transform is [n_samples, out_dim]
    #NOTE: there seems to be a random component to fit transform since
    # multiple identical calls yield the same results
    ridgeOutput = ridgeLLE.fit_transform(t)
    stdOutput = stdLLE.fit_transform(t)

    
    # plot first two components of LLE outputs
    '''p1 = pl.scatter(ridgeOutput[:,0],
                    ridgeOutput[:,1],
                    c='r',
                    label='Ridge')
    p2 = pl.scatter(stdOutput[:,0],
                    stdOutput[:,1],
                    c='g',
                    label='Standard')
    pl.legend()'''
    #pl.show()
    

    print('stdout.shape: {}; trainingClasses.shape: {}'.format(stdOutput.shape, trainingClasses.shape))
    print('trainingClasses: {}'.format(trainingClasses))

    # run SVM with linear kernel
    
    # DE[11-29-11] the SVM seems to break if you give it too many of either Case/Control. It does better with 50/50
    # DE[11-29-11] Gamma being high (i.e >= 50) is critical for accuracy
    # DE[11-29-11] C being greater than 1 is helpful (5 and 9 seem equivalent with gamma 55 rbf
    #clf = svm.SVC(kernel='rbf', C=1.0, coef0=10.0, degree=5, gamma=0.5, probability=True, shrinking=True, tol=0.0001)
    
    clf = svm.SVC(C=5.0, coef0=0.0, degree=5, gamma=55.5, kernel='rbf', probability=True, shrinking=True, tol=0.000001)
    #clf.class_weight = 'auto'
    
    print(clf)

    
    #print('train')
    #trainSVM(clf, trainingMat.transpose(), trainingClasses, stdout=True)
    #testSVM(clf, trainingMat.transpose(), trainingClasses, stdout=True)


    '''    toyOutput = stdOutput[142:148,:]
    toyClasses = trainingClasses[142:148]'''

    toyOutput = stdOutput[118:172,:]
    toyClasses = trainingClasses[118:172]
    print(toyOutput, toyClasses)
    pl.scatter(toyOutput[:,0], toyOutput[:,1], c='g', label='toy set')

    trainSVM(clf, toyOutput, toyClasses, stdout=True)
    testSVM(clf, toyOutput, toyClasses, stdout=True)

    #trainSVM(clf, stdOutput, trainingClasses, stdout=True)
    #testSVM(clf, stdOutput, trainingClasses, stdout=True)
    
    #trainSVM(clf, ridgeOutput, trainingClasses, stdout=True)
    #testSVM(clf, ridgeOutput, trainingClasses, stdout=True)

    
    #print(clf.fit(stdOutput, trainingClasses))
    
    print('predict[1], {}, Ans: {}'.format(clf.predict(stdOutput[1]), trainingClasses[1]))
    print('predict[-1], {}, Ans: {}'.format(clf.predict(stdOutput[-1]), trainingClasses[-1]))

    #clfRidge = svm.SVC(kernel='rbf', C=10.0, coef0=0.0, degree=3, gamma=0.5, probability=False, shrinking=True, tol=0.001)
    #print(clfRidge.fit(ridgeOutput, trainingClasses))
    
    #print('predict[1], {}, Ans: {}'.format(clf.predict(ridgeOutput[1]), trainingClasses[1]))
    #print('predict[-1], {}, Ans: {}'.format(clf.predict(ridgeOutput[-1]), trainingClasses[-1]))

    firstControlIndex = numpy.where(trainingClasses==0)[0][0]
    print(firstControlIndex)

    # generate mesh to do predictions on SVM
    h = 0.01
    extra = 0.1
    
    xmin = toyOutput[:,0].min()-extra
    xmax = toyOutput[:,0].max()+extra
    ymin = toyOutput[:,1].min()-extra
    ymax = toyOutput[:,1].max()+extra

    
    '''xmin = stdOutput[:,0].min()-extra
    xmax = stdOutput[:,0].max()+extra
    ymin = stdOutput[:,1].min()-extra
    ymax = stdOutput[:,1].max()+extra'''
    
    print('xmin: {}, xmax: {}, ymin: {}, ymax: {}'.format(xmin, xmax, ymin, ymax))
    xx,yy = numpy.meshgrid(numpy.arange(xmin,xmax,h),
                           numpy.arange(ymin,ymax,h))
    

    print('xx.shape: {}; yy.shape: {}'.format(xx.shape, yy.shape))

    predict = numpy.c_[xx.ravel(), yy.ravel()]
    print('predict.shape: {}; predict: {}'.format(predict.shape, predict))
    print(xx.ravel())
    Z = clf.predict(numpy.c_[xx.ravel(), yy.ravel()])

    
    print('Z.shape: {}'.format(Z.shape))
    print(Z)
    # plot prediction results
    Z = Z.reshape(xx.shape)


    print('Z.shape: {}'.format(Z.shape))
    print('Z: {}'.format(Z))
    pl.set_cmap(pl.cm.Paired)
    p1 = pl.pcolormesh(xx, yy, Z)
    #p3 = pl.pcolormesh(xx, yy, Z)
    #pl.scatter(xx, yy, c=Z)
    pl.show()
    #print clf.predict([[0,0.5]])

    '''
    # plot training points
    p2 = pl.scatter(stdOutput[:firstControlIndex,0], 
                    stdOutput[:firstControlIndex,1], 
                    #c=trainingClasses, 
                    c='b',
                    label='Case')
    p3 = pl.scatter(stdOutput[firstControlIndex:,0], 
                    stdOutput[firstControlIndex:,1], 
                    #c=trainingClasses, 
                    c='r',
                    label='Control')
    pl.axis('tight')
    pl.legend()
    pl.show()
    '''

    #TODO: train data, run test data through, analyze results

# Train the SVM with Case/Control data and a Y array of discrete Case/Control values (i.e. known values)
def trainSVM(clf, trainingMat, Y, stdout=False):
    logging = clf.fit(trainingMat, Y)
    if(stdout):
        print(logging)

# Test the SVM against data with known Case/Control values and compute accuracy
def testSVM(clf, testingMat, Y, stdout=False):
    numCorrect = 0
    for i in range(0,len(testingMat)):
        if(stdout):
            print('testingDiscreteMat[{}]: Y: {}; prediction: {}'.format(i, Y[i], clf.predict(testingMat[i])))
        if clf.predict(testingMat[i])[0] == Y[i]:
            numCorrect = numCorrect + 1
    print('numCorrect: {}  (out of {})'.format(numCorrect, len(testingMat)))
    

# display the proper command usage and exit
def commandFormat():
    print 'python analyze.py <path_to_data>'
    exit(1)


if __name__=='__main__':
    # check for valid arguments
    if len(sys.argv)<2:
        commandFormat()
    # run analysis on given file
    main(sys.argv[1])
