'''
Created on Aug 7, 2012

@author: philipp
'''

if __name__ == '__main__':
    pass

import math as m
import numpy as np
import ImagProcPhysTools as IPPT
import h5rw 
from time import time    
    
import pylab as pl
from matplotlib import offsetbox
from sklearn.utils.fixes import qr_economic
from sklearn import manifold, datasets, decomposition, lda
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition import PCA
from sklearn import linear_model
from sklearn import preprocessing
from sklearn.svm import SVC
from sklearn import svm, grid_search, datasets
from sklearn.cluster import KMeans
from sklearn import metrics

from numpy import linalg as LA
from misc import *
import os
  
    
def main():  
#    processFiles()
    #testClassifier()
    test()

def testClassifier():
    X_train, y_train, X_test, y_test = loadData('data.h5')
        
    testSVC(X_train, y_train, X_test, y_test)
    testSGD(X_train, y_train, X_test, y_test)
        
    testClustering(X_train, y_train, X_test, y_test)
        

#----------------------------------------------------------------------
# Scale and visualize the embedding vectors

def plot_embedding(X, title=None):
    x_min, x_max = np.min(X, 0), np.max(X, 0)
    X = (X - x_min) / (x_max - x_min)

    pl.figure()
    ax = pl.subplot(111)
    for i in range(X.shape[0]):
        pl.text(X[i, 0], X[i, 1], str(digits.target[i]),
                color=pl.cm.Set1(y[i] / 10.),
                fontdict={'weight': 'bold', 'size': 9})
    pl.xticks([]), pl.yticks([])
    if title is not None:
        pl.title(title)

def testManifold(X_train, y_train, X_test, y_test):
    X = np.vstack((X_train, X_test))
    y = np.concatenate((y_train, y_test))
    
    n_samples, n_features = X.shape
    n_neighbors = 30 
    
    #----------------------------------------------------------------------
    # Plot images of the digits
    N = 20
    img = np.zeros((10 * N, 10 * N))
    for i in range(N):
        ix = 10 * i + 1
        for j in range(N):
            iy = 10 * j + 1
            img[ix:ix + 8, iy:iy + 8] = X[i * N + j].reshape((8, 8))
    pl.imshow(img, cmap=pl.cm.binary)
    pl.xticks([])
    pl.yticks([])
    pl.title('A selection from the 64-dimensional digits dataset')
    
    
    #----------------------------------------------------------------------
    # Random 2D projection using a random unitary matrix
    print "Computing random projection"
    rng = np.random.RandomState(42)
    Q, _ = qr_economic(rng.normal(size=(n_features, 2)))
    X_projected = np.dot(Q.T, X.T).T
    plot_embedding(X_projected, "Random Projection of the digits")
    
    
    #----------------------------------------------------------------------
    # Projection on to the first 2 principal components
    
    print "Computing PCA projection"
    t0 = time()
    X_pca = decomposition.RandomizedPCA(n_components=2).fit_transform(X)
    plot_embedding(X_pca,
        "Principal Components projection of the digits (time %.2fs)" % 
        (time() - t0))
    
    #----------------------------------------------------------------------
    # Projection on to the first 2 linear discriminant components
    
    print "Computing LDA projection"
    X2 = X.copy()
    X2.flat[::X.shape[1] + 1] += 0.01  # Make X invertible
    t0 = time()
    X_lda = lda.LDA(n_components=2).fit_transform(X2, y)
    plot_embedding(X_lda,
        "Linear Discriminant projection of the digits (time %.2fs)" % 
        (time() - t0))
    
    
    #----------------------------------------------------------------------
    # Isomap projection of the digits dataset
    print "Computing Isomap embedding"
    t0 = time()
    X_iso = manifold.Isomap(n_neighbors, n_components=2).fit_transform(X)
    print "Done."
    plot_embedding(X_iso,
        "Isomap projection of the digits (time %.2fs)" % 
        (time() - t0))
    
    
    #----------------------------------------------------------------------
    # Locally linear embedding of the digits dataset
    print "Computing LLE embedding"
    clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
                                          method='standard')
    t0 = time()
    X_lle = clf.fit_transform(X)
    print "Done. Reconstruction error: %g" % clf.reconstruction_error_
    plot_embedding(X_lle,
        "Locally Linear Embedding of the digits (time %.2fs)" % 
        (time() - t0))
    
    
    #----------------------------------------------------------------------
    # Modified Locally linear embedding of the digits dataset
    print "Computing modified LLE embedding"
    clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
                                          method='modified')
    t0 = time()
    X_mlle = clf.fit_transform(X)
    print "Done. Reconstruction error: %g" % clf.reconstruction_error_
    plot_embedding(X_mlle,
        "Modified Locally Linear Embedding of the digits (time %.2fs)" % 
        (time() - t0))
    
    
    #----------------------------------------------------------------------
    # HLLE embedding of the digits dataset
    print "Computing Hessian LLE embedding"
    clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
                                          method='hessian')
    t0 = time()
    X_hlle = clf.fit_transform(X)
    print "Done. Reconstruction error: %g" % clf.reconstruction_error_
    plot_embedding(X_hlle,
        "Hessian Locally Linear Embedding of the digits (time %.2fs)" % 
        (time() - t0))
    
    
    #----------------------------------------------------------------------
    # LTSA embedding of the digits dataset
    print "Computing LTSA embedding"
    clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
                                          method='ltsa')
    t0 = time()
    X_ltsa = clf.fit_transform(X)
    print "Done. Reconstruction error: %g" % clf.reconstruction_error_
    plot_embedding(X_ltsa,
        "Local Tangent Space Alignment of the digits (time %.2fs)" % 
        (time() - t0))
    
    #----------------------------------------------------------------------
    # MDS  embedding of the digits dataset
    print "Computing MDS embedding"
    clf = manifold.MDS(n_components=2, n_init=1, max_iter=100)
    t0 = time()
    X_mds = clf.fit_transform(euclidean_distances(X))
    print "Done. Stress: %f" % clf.stress_
    plot_embedding(X_mds,
        "MDS embedding of the digits (time %.2fs)" % 
        (time() - t0))
    
    pl.show()
    
def testSVC(X_train, y_train, X_test, y_test):
    print "==================================="
    print "Testing SVC"
    print "..................................."
    parameters = {'kernel':('linear', 'rbf', 'sigmoid'), 'C':[2 ** i for i in range(-2, 6, 1)]}
    for i in range(-2, 6):
        svr = svm.SVC(C=2 ** i, kernel='linear')
    #    clf = grid_search.GridSearchCV(svr, parameters)    
        svr.fit(X_train, y_train)    
        correct = svr.score(X_test, y_test)  
    #    print clf.best_estimator_  
    #    print clf.grid_scores_
        print "Correctly predicted: %f, C=%f" % (correct, 2 ** i)

def testSGD(X_train, y_train, X_test, y_test):
    print "==================================="
    print "Testing SGD"
    print "..................................."
    clf = linear_model.SGDClassifier()             
    clf.fit(X_train, y_train)
    correct = clf.score(X_test, y_test)    
    print "Correctly predicted: %f" % correct
    
def testClustering(X_train, y_train, X_test, y_test):
    data = np.vstack((X_train, X_test))
    labels = np.concatenate((y_train, y_test))
    print 79 * '_'
    print ('% 9s' % 'init        time  inertia    homo   compl  v-meas     ARI     AMI  silhouette')
    means = KMeans(2, init='k-means++', n_init=10)
    bench_k_means(means, name="k-means++", data=data, labels=labels, sample_size=labels.shape[0])
    means = KMeans(2, init='random', n_init=10)
    bench_k_means(means, name="random", data=data, labels=labels, sample_size=labels.shape[0])

def bench_k_means(estimator, name, data, labels, sample_size):
    t0 = time()
    estimator.fit(data)
    print '% 9s   %.2fs    %i   %.3f   %.3f   %.3f   %.3f   %.3f    %.3f' % (
         name, (time() - t0), estimator.inertia_,
         metrics.homogeneity_score(labels, estimator.labels_),
         metrics.completeness_score(labels, estimator.labels_),
         metrics.v_measure_score(labels, estimator.labels_),
         metrics.adjusted_rand_score(labels, estimator.labels_),
         metrics.adjusted_mutual_info_score(labels, estimator.labels_),
         metrics.silhouette_score(data, estimator.labels_,
                                  metric='euclidean',
                                  sample_size=sample_size),
         )
    
def loadData(fname):
    xte = h5rw.h5read(fname)['test']
    xtr = h5rw.h5read(fname)['train']
    yte = h5rw.h5read(fname)['test_labels']
    ytr = h5rw.h5read(fname)['train_labels']
    return xtr, ytr, xte, yte

def processFiles(thresh=0, beamstopCutoffradius=50, patternEndRadius=300, rstep=2 ** 1, phisteps=192, doPlot=True):
    train = []
    train_class = []
    train_freq = 20
    dim = 1024
    clf = linear_model.SGDClassifier()
    train_dirname = './test'
    test_dirname = './test/test'
    noise = 1e4
    gamma = 0.25
    
    for i, filename in enumerate(os.listdir(train_dirname)):
        if os.path.splitext(filename)[1] == '.h5':
            print "Reading %s" % filename
            
            ob = h5rw.h5read(train_dirname + '/' + filename)['data']['data']
            ob = np.power(ob, gamma)    
            ob = addNoise(ob, noise)                   
                  
            img = transformImage(ob, dim, horiz=True, thresh=thresh, beamstopCutoffradius=beamstopCutoffradius, patternEndRadius=patternEndRadius, rstep=rstep, phisteps=phisteps, doPlot=doPlot)
            
            train.append(img.flatten())
            
            if "icos1" in filename or "icos2" in filename or "icos3" in filename: train_class.append(1)
            else: train_class.append(0)
            
            
            img = transformImage(ob, dim, horiz=False, thresh=thresh, beamstopCutoffradius=beamstopCutoffradius, patternEndRadius=patternEndRadius, rstep=rstep, phisteps=phisteps, doPlot=doPlot)
            train.append(img.flatten())
            
            if "icos1" in filename or "icos2" in filename or "icos3" in filename: train_class.append(1)
            else: train_class.append(0)
            
    X = np.array(train)
    Y = np.array(train_class)      
              
    scaler = preprocessing.Scaler().fit(X)
    X_train = scaler.transform(X)
    
    X_train_raw = X_train
    
    print "Starting PCA"
    pca = RandomizedPCA(X_train.shape[0])
    X_scaled = pca.fit_transform(X_train)    
     
    print pca.explained_variance_ratio_
    print "Explained Variance ration: %f" % sum(pca.explained_variance_ratio_) 
    
    print "Training done - Start Testing"
    test = []
    test_label = []
    
    for i, filename in enumerate(os.listdir(test_dirname)):
        if os.path.splitext(filename)[1] == '.h5':
            print "Reading %s" % filename
            
            ob0 = h5rw.h5read(test_dirname + '/' + filename)['data']['data']
                
            img = transformImage(ob0, dim, True)
            test.append(img.flatten())
            
            if "icos1" in filename or "icos2" in filename or "icos3" in filename: test_label.append(1)
            else: test_label.append(0)
            
            img = transformImage(ob0, dim, False)  
            train.append(img.flatten())
            
            if "icos1" in filename or "icos2" in filename or "icos3" in filename: train_class.append(1)
            else: train_class.append(0)
    
    
    
    y_test = np.array(test_label)
    X_test = np.array(test)
    
    X_test = scaler.transform(X_test)
    X_test_raw = X_test
    
    X_test = pca.transform(X_test)
    
    print "Writing to file."
    h5rw.h5write("data.h5", train=X_scaled, train_labels=Y, test=X_test, test_labels=y_test)
    h5rw.h5write("data_raw.h5", train=X_train_raw, train_labels=Y, test=X_test_raw, test_labels=y_test)
    print "Done."
#    clf.fit(X_scaled, Y)
#    correct = clf.score(X_test, y_test)
#    
#    print "Correctly predicted: %f" % correct
                        
def transformImage(img, dim, horiz, thresh=0.4, beamstopCutoffradius=0, patternEndRadius=1024, rstep=2 ** 1, phisteps=256, doPlot=False):            
    img = img[0:dim, 0:dim]
    img = img.astype(float)
    
    img = moveCenterOfMassToCenter(img)
    #ob /= ob.max()
    if doPlot:
        plt.imsave('moveCenterOfMassToCenter.png', img)   
#        plot3d(img, 'moveCenterOfMassToCenter', 1)
    
    if horiz: img = introduceHStripe(img, 10)
    else: introduceVStripe(img, 10)
    
    img = np.power(img, 0.25)
    if doPlot:
        plt.imsave('introduceVHStripe.png', img)        
#        plot3d(img, 'introduceVHStripe', 1)
           
    img = cartesianToPolar(img, r_step=rstep, r_max=patternEndRadius, phi_steps=phisteps)

    img = img[beamstopCutoffradius / rstep: img.shape[0], :]
    img = img[0: patternEndRadius / rstep, :]

    img = threshold(img, img.max() * thresh)      
    if doPlot:
        plt.imsave('threshold.png', np.power(img, 0.25))
 #       plot3d(img, 'threshold', 1)
    
    img = FFTonAngles(img)
    if doPlot:
        plt.imsave('FFTonAngles.png', np.power(img, 0.25))
#        plot3d(img, 'FFTonAngles', 1)
    
    img = DWTonRadius(img)   
    if doPlot:
        plt.imsave('DWTonRadius.png', np.power(img, 0.25))
 #       plot3d(img, 'DWTonRadius', 1)
    img = img[:, 0:40]
    
    return img 
    
def addNoise(img, noise):
    img = img / img.max() * noise
    img = np.random.poisson(img)
    return img
            
def threshold(img, threshold):
    for i, row in enumerate(img):        
        for j, field in enumerate(row):       
            if field < threshold:
                img[i, j] = 0
            if field == float("-inf") or field == float("inf"):
                img[i, j] = 0
    return img

def test():
    name = './test/icos-dimer-r1'
    name1 = 'r0156_181706_5505'
    noise = 1e4
    dim = 1024
    for nr in [1]:
        img = h5rw.h5read(name + '.h5')['data']['data']
        img = np.power(img, 0.25)
        plt.imsave("no_noise.png", img)     
        img = addNoise(img, noise)        
        #img = np.power(img, 0.25)
        plt.imsave("noise1e4.png", np.power(img, 0.25))          
              
        img = transformImage(img, dim, True, thresh=0.0, beamstopCutoffradius=50, patternEndRadius=300, rstep=2 ** 1, phisteps=192, doPlot=True)
        
        print "Done"
        return

        X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
        Y = np.array([1, 1, 2, 2])
        clf = linear_model.SGDClassifier()
        clf.fit(X, Y)        

        print clf.predict([[-0.8, -1]])
        
def test2():
    import numpy as np
    from sklearn.decomposition import PCA
    X = np.array([[-1, -1, -1, -1, -1, -1, -1], [-2, -1, -2, -2, -2, -2, -2], [-3, -2, 1, 1, 1, 1, 1], [1, 1, -2, -2, -2, -2, -2], [2, 1, 1, 1, 1, 1, 1], [3, 2, 1, 1, 1, 1, 1], [3, 2, 1, 4, 1, 1, 1], [3, 2, 1, 1, 1, 2, 1]])
    comp = 5
    pca = PCA(comp)
    pca2 = RandomizedPCA(n_components=comp)
    pca.fit(X)
    pca2.fit(X)
    
    h5rw.h5write("training.h5", x=X)
    
    print(pca.explained_variance_ratio_) 
    print sum(pca.explained_variance_ratio_)    
    print(pca2.explained_variance_ratio_) 
    print sum(pca2.explained_variance_ratio_)


#eps = .5
#eps_xy_list = eps * (np.indices((3, 3)) - 1).T.reshape((9, 2))
#
#fit_matrix = np.vstack([np.array([-4, 8, -4, 8, 20, 8, -4, 8, -4]),
#                        np.array([-6, 0, 6, -6, 0, 6, -6, 0, 6]) / eps,
#                        np.array([-6, -6, -6, 0, 0, 0, 6, 6, 6]) / eps,
#                        np.array([6, -12, 6, 6, -12, 6, 6, -12, 6]) / eps ** 2,
#                        np.array([9, 0, -9, 0, 0, 0, -9, 0, 9]) / eps ** 2,
#                        np.array([6, 6, 6, -12, -12, -12, 6, 6, 6]) / eps ** 2]) / 36.

main()
pass




















