#!/usr/bin/python

sections = ['decision_tree_simple', 'decision_tree_Kfold', 'ANN', 'linear_regression', 'regularized_linear_regression', 'ANN_regression', 'K_out_of_N', 'comparison_classification', 'comparison_regression']
sections = ['comparison_classification']
section_message = "The following sections will be considered: "
for s in sections: section_message += s + "\t"
print section_message
from pylab import *
import numpy as np
import pandas as pd
import scipy.linalg as linalg
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
fontP = FontProperties()
fontP.set_size('small')
try:
    from scikits.statsmodels.tools.tools import categorical
except ImportError:
    from statsmodels.tools.tools import categorical
from scipy import stats
from sklearn import cross_validation, tree
import neurolab as nl
import sklearn.linear_model as lm
from sklearn import cross_validation
from toolkit_libs.toolbox_02450 import feature_selector_lr, bmplot, rlr_validate
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix

# disable all warnings
import warnings
warnings.filterwarnings("ignore")

# our snow imports
from snow_similarity import get_similarity2index

def undiscretize(data, attributenames, default):
    def row_attribute(row):
        res=0
        try:
            res=attributenames[list(row).index('1.0')]
        except ValueError:
            res=default
        return res
    return map(lambda row: row_attribute(row), data)

def undiscretizeToInt(dataOrg):
    rows,columns = dataOrg.shape
    newRow = np.zeros(rows)

    for i in xrange(rows):
        for j in xrange(columns):
            if dataOrg[i,j] > 0.0:
                newRow[i] = j

    return newRow

def deleteEmptyRowsInInput(dataX, dataY):
    rows, columns = np.shape(dataY)
    nbrOfNonZerosInrow = np.zeros(rows)
    for i in xrange(rows):
        nbrOfNonZerosInrow[i] = np.count_nonzero(dataY[i,:])
    
    dataY = dataY[~(nbrOfNonZerosInrow[:] < 1)]
    dataX = dataX[~(nbrOfNonZerosInrow[:] < 1)]
    return dataX, dataY

def combine2Rows(dataY):
    rows, columns = np.shape(dataY)
    newY = np.zeros(rows)
    print dataY
    print np.shape(dataY)
    for i in xrange(rows):
        if dataY[i,1] > 0.0: newY[i] = 1

    return newY

def removeZeroValues(dataXY):
    rows, columns = np.shape(dataXY)
    for i in xrange(columns):
        dataXY = dataXY[~(dataXY[:,i] < 1)]

    return dataXY

def removeValuesUnderZero(dataXY):
    rows, columns = np.shape(dataXY)
    nbrOfUnderZeros = np.zeros(rows)
    for i in xrange(rows):
        if dataXY[i,0] < 0.0:
            nbrOfUnderZeros[i] = 1

    dataXY = dataXY[(nbrOfUnderZeros[:] < 1)]

    return dataXY
  
censusdata = np.loadtxt('./census-income/reducedDataMatrix', dtype=np.str,delimiter=',')
attributeNames = np.loadtxt('./census-income/attributeNamesReduced', dtype=np.str,delimiter=',')
classifInputCols = [3,4,7,8,17,18,19,20,21,22,23,24,25]
classifOutputCols = [5,6,27,28,29,30]
#for i,e in enumerate(attributeNames): print str(i) + " " + str(e)
classifInputAttributeNames = attributeNames[classifInputCols]
classifOutputAttributeNames = attributeNames[classifOutputCols]
classifInput = censusdata[:,classifInputCols]
classifOutput = undiscretize(censusdata[:,classifOutputCols], classifOutputCols, -1)
regrInputCols = [0,2]
regrOutputCols = [16]
regrInputAttributeNames = attributeNames[regrInputCols]
regrOutputAttributeNames = attributeNames[regrOutputCols]
regrInput = np.array(map(lambda row: map(float, row), censusdata[:,regrInputCols]))
regrOutput = np.array(map(lambda row: map(float, row), censusdata[:,regrOutputCols]))

# ------------ COMPARISON OF REGRESSION ------------ #
if 'comparison_regression' in sections:

    N, M = regrInput.shape
    X=np.concatenate((np.ones((N,1)),regrInput),1)
    X=np.concatenate((X,np.square(np.matrix(regrInput[:,0]).T),np.square(np.matrix(regrInput[:,1]).T)),1)
    regrInputAttributeNames = np.array([u'Offset']+list(regrInputAttributeNames)
                                   + map(lambda s: "SQ"+s, regrInputAttributeNames)
                                   )
    M += 1+2
    y=regrOutput
    meanY = np.mean(y)
    print "The mean is " + str(meanY)


    #params!
    K = 20
    internal_K = 4
    
    CV = cross_validation.KFold(N,K,shuffle=True)

    #Norm
    Features_norm = np.zeros((M,K))
    Error_test_norm = np.empty((K,1))
    Error_test_fs_norm = np.empty((K,1))
    Error_test_nofeatures_norm = np.empty((K,1))

    #Regu
    lambdas = np.power(10.,range(-5,9))
    Features_regu = np.zeros((M,K))
    Error_test_regu = np.empty((K,1))
    Error_test_rlr_regu = np.empty((K,1))
    Error_test_nofeatures_regu = np.empty((K,1))
    w_rlr = np.matrix(np.empty((M,K)))
    w_noreg = np.matrix(np.empty((M,K)))

    #ANN
    N_ann, M_ann = regrInput.shape
    X_ann=regrInput
    y_ann=regrOutput
    # Parameters for neural network classifier
    n_hidden_units = 6    # number of hidden units
    n_train = 1             # number of networks trained in each k-fold
    n_output_units = 1
    learning_rate = 0.0005  # rate of weights adaptation
    learning_goal = 0.0001   # stop criterion 1 (train mse to be reached)
    max_epochs = 24         # stop criterion 2 (max epochs in training)
    Error_test_ann = np.empty((K,1))
    errors_ann = np.zeros(K)
    error_hist_ann = np.zeros((max_epochs,K))
    bestnet = list()

    #y = np.log(y)

    k=0
    for train_index, test_index in CV:
        print('Computing CV fold: {0}/{1}..'.format(k+1,K))
    
        # extract training and test set for current CV fold
        X_train, y_train = X[train_index,:], np.array(y)[train_index,:]
        X_test, y_test = X[test_index,:], np.array(y)[test_index,:]
        # Compute squared error without using the input data at all

        #Normal linear regression
        Error_test_nofeatures_norm[k] = np.square(y_test-y_test.mean()).sum()/y_test.shape[0]    

        # Compute squared error with all features selected (no feature selection)
        m = lm.LinearRegression().fit(X_train, y_train)
        Error_test_norm[k] = np.square(y_test-m.predict(X_test)).sum()/y_test.shape[0]

        # Compute squared error with feature subset selection
        selected_features, features_record, loss_record = feature_selector_lr(X_train, y_train, internal_K)
        Features_norm[selected_features,k]=1
        # .. alternatively you could use module sklearn.feature_selection
        m = lm.LinearRegression().fit(X_train[:,selected_features], y_train)
        Error_test_fs_norm[k] = np.square(y_test-m.predict(X_test[:,selected_features])).sum()/y_test.shape[0]

        #Reguralized linear regression
        opt_val_err, opt_lambda, mean_w_vs_lambda, train_err_vs_lambda, test_err_vs_lambda = \
            rlr_validate(X_train, y_train, lambdas, internal_K)
        Xty = X_train.T*y_train
        XtX = X_train.T*X_train
    
        # Compute squared error without using the input data at all
        Error_test_nofeatures_regu[k] = np.square(y_test-y_test.mean()).sum()/y_test.shape[0]    

        # Estimate weights for the optimal value of lambda, on entire training set
        w_rlr[:,k] = linalg.lstsq(XtX+opt_lambda*np.eye(M),Xty)[0]
        # Compute mean squared error with regularization with optimal lambda
        Error_test_rlr_regu[k] = np.square(y_test-X_test*w_rlr[:,k]).sum()/y_test.shape[0]
    
        # Estimate weights for unregularized linear regression, on entire training set
        w_noreg[:,k] = linalg.lstsq(XtX,Xty)[0]
        # Compute mean squared error without regularization
        Error_test_regu[k] = np.square(y_test-X_test*w_noreg[:,k]).sum()/y_test.shape[0]

        #ANN
        X_train, y_train = X_ann[train_index,:], np.array(y_ann)[train_index,:]
        X_test, y_test = X_ann[test_index,:], np.array(y_ann)[test_index,:]

        # Create randomly initialized network with 2 layers
        ann = nl.net.newff([[0, 1]]*M_ann, [n_hidden_units, n_output_units], [nl.trans.LogSig(),nl.trans.LogSig()])
        # train network
        train_error = ann.train(X_train, y_train, goal=learning_goal, epochs=max_epochs, show=round(max_epochs/8))
        bestnet.append(ann)
        best_train_error = train_error[-1]
        error_hist_ann[range(len(train_error)),k] = train_error
        y_est = bestnet[k].sim(X_test)
        Error_test_ann[k] = np.square(y_test-y_est).sum()/y_test.shape[0]

        k+=1

    figure(1)
    plot(Error_test_ann, 'r', label='ANN')
    plot(Error_test_regu,'b', label='Linear regression (regularization)')
    plot(Error_test_norm,'g', label='Linear regression')
    #plot(Error_test_fs_norm,'y', label='Linear regression feature selection')
    #plot(Error_test_rlr_regu,'m', label='rlr regularized')
    legend(bbox_to_anchor=(0, 0, 1, 1), bbox_transform=gcf().transFigure)
    xlabel('Cross validation fold')
    ylabel('Mean square error')

    figure(2)
    boxplot([Error_test_ann, Error_test_regu, Error_test_norm], notch=0, sym='+', vert=1, whis=1)
    plt.xticks([1,2,3], ['ANN', 'Linear regression (regularization)', 'Linear regression'], size='small')
    plt.grid(which='major', axis='y')
    ylabel('Mean square error')

    # Use T-test to check if classifiers are significantly different
    [tstatistic, pvalue] = stats.ttest_ind(Error_test_ann, Error_test_regu)
    if pvalue<=0.05:
        print('Classifiers are significantly different. (p={0})'.format(pvalue))
    else:
        print('Classifiers are not significantly different (p={0})'.format(pvalue))        

    show()

# ------------ COMPARISON OF CLASSIFICATION ------------ #
if 'comparison_classification' in sections:
    X = classifInput
    y = np.matrix(censusdata[:,classifOutputCols]).astype(float)
    #X, y = deleteEmptyRowsInInput(X, y)
    y_ann = y.astype(int)
    y_tree = classifOutput
    y_kon = np.transpose(np.matrix(undiscretizeToInt(y))).astype(int)

    #y_count = np.zeros(6)
    #for i in [0,1,2,3,4,5]:
    #    y_count[i] = np.count_nonzero(y_ann[:,i])

    #print y_count
    #print y_count[4]/y.shape[0]

    #exit(0)

    #Common parameters
    K = 30
    N, M = X.shape
    CV = cross_validation.KFold(N,K,shuffle=True)

    #Tree classifier
    splitting_criterion='entropy'; # one of {gini, entropy}
    t=7
    
    #K_out_of_N
    l = 10
    errors_kon = np.zeros(K)

    #ANN
    # Parameters for neural network classifier
    n_hidden_units = 8     # number of hidden units
    n_train = 1             # number of networks trained in each k-fold
    n_output_units = 6
    learning_rate = 0.0005  # rate of weights adaptation
    learning_goal = 200.0   # stop criterion 1 (train mse to be reached)
    max_epochs = 24         # stop criterion 2 (max epochs in training)
    bestnet = list()
    errors_ANN = np.zeros(K)
    error_hist_ANN = np.zeros((max_epochs,K))
    bestnet = list()

    # Initialize variables
    Error_test_tree = np.empty(K)
    
    #Error_train_KN = np.empty((len(tc),K))
    #Error_test_KN = np.empty((len(tc),K))
    
    #Error_train_ANN = np.empty((len(tc),K))
    #Error_test_ANN = np.empty((len(tc),K))
    
    misclass_rate_test_tree = 0
    misclass_rate_train_tree = 0

    k=0
    for train_index, test_index in CV:
        print('Computing CV fold: {0}/{1}..'.format(k+1,K))
    
        # extract training and test set for current CV fold
        #Other data
        X_train = X[train_index,:]
        X_test = X[test_index,:]

        #KON data
        y_train_kon = y_kon[train_index,:]
        y_test_kon = y_kon[test_index,:]

        #ANN data
        y_train_ann = y_ann[train_index,:]
        y_test_ann = y_ann[test_index,:]

        #Tree data
        y_train_tree = np.matrix(y_tree).T[train_index,:]
        y_test_tree = np.matrix(y_tree).T[test_index,:]

        #Tree clasifiers
        # Fit decision tree classifier, Gini split criterion, different pruning levels
        dtc = tree.DecisionTreeClassifier(criterion=splitting_criterion, max_depth=t)
        dtc = dtc.fit(X_train,y_train_tree)
        y_est_test_tree = dtc.predict(X_test)
        y_est_train_tree = dtc.predict(X_train)
        # Evaluate misclassification rate over train/test data (in this CV fold)
        misclass_rate_test_tree = (np.not_equal(y_est_test_tree, np.array(y_test_tree).T).sum()) / float(len(y_est_test_tree))
        Error_test_tree[k] = misclass_rate_test_tree
    
        #ANN clasifiers
        #best_train_error_ANN = 1e100
        #for i in range(n_train):
            # Create randomly initialized network with 2 layers
            #ann = nl.net.newff([[0, 1]]*M, [n_hidden_units, n_output_units], [nl.trans.LogSig(),nl.trans.LogSig()])
            # train network
            #train_error_ANN = ann.train(X_train, y_train_ann, goal=learning_goal, epochs=max_epochs, show=round(max_epochs/8))
            #if train_error_ANN[-1]<best_train_error_ANN:
            #    bestnet.append(ann)
            #    best_train_error_ANN = train_error_ANN[-1]
            #    error_hist_ANN[range(len(train_error_ANN)),k] = train_error_ANN

        #KON clasifiers
        knclassifier = KNeighborsClassifier(n_neighbors=l, warn_on_equidistant=False);
        knclassifier.fit(X_train, y_train_kon)
        y_est_kon = knclassifier.predict(X_test)
        errors_kon[k] = (np.not_equal(y_est_kon,y_test_kon.T)).sum()/float(len(y_est_kon))

        k+=1

    print 'Variance for k_nearest_neighbors' + str(np.var(errors_kon))

    print 'Tree' + str(np.var(Error_test_tree))


    x_axis = np.arange(K)
    figure(1).hold(True)
    plot(x_axis, 100*errors_kon, label='K nearest neighbors')
    plot(x_axis, 100*Error_test_tree, label='Tree')
    plt.grid(which='major', axis='y')
    legend(bbox_to_anchor=(0, 0, 1, 1), bbox_transform=gcf().transFigure)
    xlabel('Validation fold')
    ylabel('Error (misclassification %)')

    figure(2)
    boxplot([100*errors_kon, 100*Error_test_tree], notch=0, sym='+', vert=1, whis=1)
    plt.xticks([1,2], ['K nearest neighbors', 'Tree clasifier'], size='small')
    plt.grid(which='major', axis='y')
    ylabel('Error (misclassification %)')

    # Use T-test to check if classifiers are significantly different
    [tstatistic, pvalue] = stats.ttest_ind(errors_kon, Error_test_tree)
    if pvalue<=0.05:
        print('Classifiers are significantly different. (p={0})'.format(pvalue))
    else:
        print('Classifiers are not significantly different (p={0})'.format(pvalue))        

    show()

# ------------ DECISION TREE CLASSIFIER ------------ #
# --------  (holdout-set cross validation) --------- #    
if 'decision_tree_simple' in sections:
    #params!
    test_proportion = 0.2
    tc = np.arange(2, 21, 1)
    splitting_criterion='gini'; # one of {gini, entropy}
    
    traindata, testdata, y_train, y_test = cross_validation.train_test_split(classifInput,classifOutput,test_size=test_proportion)
    
    # Initialize variables
    Error_train = np.empty((len(tc),1))
    Error_test = np.empty((len(tc),1))
    
    for i, t in enumerate(tc):
        # Fit decision tree classifier, Gini split criterion, different pruning levels
        dtc = tree.DecisionTreeClassifier(criterion='gini', max_depth=t)
        dtc = dtc.fit(traindata,y_train)
    
        # Evaluate classifier's misclassification rate over train/test data
        y_est_test = dtc.predict(testdata)
        y_est_train = dtc.predict(traindata)
        misclass_rate_test = (np.not_equal(y_est_test, y_test).sum()) / float(len(y_est_test))
        misclass_rate_train = (np.not_equal(y_est_train, y_train).sum()) / float(len(y_est_train))
        Error_test[i], Error_train[i] = misclass_rate_test, misclass_rate_train
        
    f = figure(); f.hold(True)
    plot(tc, Error_train)
    plot(tc, Error_test)
    xlabel('Model complexity (max tree depth)')
    ylabel('Error (misclassification rate)')
    legend(['Error_train','Error_test'])
    show()

# ------------ DECISION TREE CLASSIFIER ------------ #
# --------  (K-fold cross validation) --------- #
X=classifInput
N=classifInput.shape[0]
y=classifOutput

if 'decision_tree_Kfold' in sections:
    #params!
    K = 10
    tc = np.arange(2, 21, 1)
    splitting_criterion='entropy'; # one of {gini, entropy}
    
    CV = cross_validation.KFold(N,K,shuffle=True)

    # Initialize variables
    Error_train = np.empty((len(tc),K))
    Error_test = np.empty((len(tc),K))
    
    k=0
    for train_index, test_index in CV:
        print('Computing CV fold: {0}/{1}..'.format(k+1,K))
    
        # extract training and test set for current CV fold
        X_train, y_train = X[train_index,:], np.matrix(y).T[train_index,:]
        X_test, y_test = X[test_index,:], np.matrix(y).T[test_index,:]
    
        for i, t in enumerate(tc):
            # Fit decision tree classifier, Gini split criterion, different pruning levels
            dtc = tree.DecisionTreeClassifier(criterion=splitting_criterion, max_depth=t)
            dtc = dtc.fit(X_train,y_train)
            y_est_test = dtc.predict(X_test)
            y_est_train = dtc.predict(X_train)
            # Evaluate misclassification rate over train/test data (in this CV fold)
            misclass_rate_test = (np.not_equal(y_est_test, np.array(y_test).T).sum()) / float(len(y_est_test))
            misclass_rate_train = (np.not_equal(y_est_train, np.array(y_train).T).sum()) / float(len(y_est_train))
            Error_test[i,k], Error_train[i,k] = misclass_rate_test, misclass_rate_train
        k+=1
        
    f = figure(); f.hold(True)
    boxplot(Error_test.T)
    xlabel('Model complexity (max tree depth)')
    ylabel('Test error across CV folds, K={0})'.format(K))
    
    f = figure(); f.hold(True)
    plot(tc, Error_train.mean(1))
    plot(tc, Error_test.mean(1))
    xlabel('Model complexity (max tree depth)')
    ylabel('Error (misclassification rate, CV K={0})'.format(K))
    legend(['Error_train','Error_test'])
        
    show()

# ------------ NEURAL NETWORK ------------ #
if 'ANN' in sections:
 #Select the data for clasification
    #dataSections = ['sex', 'occupation', 'pvcOn']
    dataSections = ['', '']

    if 'sex' in dataSections:
        #Input columns for sex
        classifInputCols = [0,2,3,4,5,6,17,18,19,5,6,20,21,22,23,24,25,27,28,29,30]
        #Sex columns 0 = female 1 = male
        classifOutputCols = [7,8]

    X = classifInput
    y = np.matrix(censusdata[:,classifOutputCols]).astype(float)
    X, y = deleteEmptyRowsInInput(X, y)
    y = y.astype(int)
    #y = np.transpose(np.matrix(undiscretizeToInt(y))).astype(int)
    
    #X = np.matrix(censusdata[:,classifInputCols]).astype(float)
    #y = np.matrix(censusdata[:,classifOutputCols]).astype(float)
    #y = y.astype(int)
    #X, y = deleteEmptyRowsInInput(X, y)
    
    if 'sex' in dataSections:
        y = combine2Rows(y)
        y = y.reshape(-1,1)
        
    N, M = X.shape
    C = 2

    if 'normalizeInputs' in sections:
        X[:,4] = np.log(X[:,4])
        for i in [0,1,2,4]:
            X[:,i] = X[:,i]/np.max(X[:,i])

    if 'pvcOn' in sections:
        # Normalize and compute Principal Components
        #Xy = stats.zscore(X,0);
        mu = np.mean(X, axis=0)
        Xy = X - np.ones((X.shape[0],1)) * mu

        U,S,V = linalg.svd(Xy,full_matrices=False)
        V = mat(V).T
        # Components to be included as features
        k_pca = 8
        X = X*V[:,0:k_pca]
        N, M = X.shape

    # Parameters for neural network classifier
    nbrOfHiddenUnits = [6,8,10,12,16,18,22]     # number of hidden units
    n_train = 1             # number of networks trained in each k-fold
    n_output_units = 6
    
    if 'sex' in dataSections:
        nbrOfHiddenUnits = [6,8,10,12,16,18,22,26]     # number of hidden units
        n_output_units = 1
        nbrOfHiddenUnits = [8]

    learning_rate = 0.0005  # rate of weights adaptation
    learning_goal = 200.0   # stop criterion 1 (train mse to be reached)
    max_epochs = 16         # stop criterion 2 (max epochs in training)

    # K-fold crossvalidation
    K = 5                   # only five folds to speed up this example
    CV = cross_validation.KFold(N,K,shuffle=True)

    # Variable for classification error
    errors = np.zeros(K)
    L = len(nbrOfHiddenUnits)
    error_hist = np.zeros((max_epochs,K*L))
    bestnet = list()
    bestNumOfHidden =list()
    k=0
    numOfNets=0
    for train_index, test_index in CV:
        print('\nCrossvalidation fold: {0}/{1}'.format(k+1,K))    
        # extract training and test set for current CV fold
        X_train = X[train_index,:]
        y_train = y[train_index,:].astype(int)
        X_test = X[test_index,:]
        y_test = y[test_index,:].astype(int)

        best_train_error = 1e100
        for n_hidden_units in nbrOfHiddenUnits:
            
            for i in range(n_train):
                # Create randomly initialized network with 2 layers
                ann = nl.net.newff([[0, 1]]*M, [n_hidden_units, n_output_units], [nl.trans.LogSig(),nl.trans.LogSig()])
                # train network
                #lr=learning_rate
                train_error = ann.train(X_train, y_train, goal=learning_goal, epochs=max_epochs, show=round(max_epochs/8))
                if train_error[-1]<best_train_error:
                    bestnet.append(ann)
                    best_train_error = train_error[-1]
                    error_hist[range(len(train_error)),numOfNets] = train_error
                    bestNumOfHidden.append(n_hidden_units)
                    numOfNets+=1
            
            #y_est = bestnet[k].sim(X_test)
            #y_est = (y_est>.5).astype(int)

            #if 'occupation' in dataSections:
            #    for i in range(0,6):
            #        print np.count_nonzero(y_est[:,i])

            #if 'sex' in dataSections:
            #    errors[k] = (y_est[:]!=y_test[:]).sum().astype(float)/y_test.shape[0]
            #    print errors
            
            k+=1

    # Display exemplary networks learning curve (best network of each fold)
    figure(1); figsize=(10, 9); hold(True)
    bn_id = argmax(error_hist[-1,:])
    error_hist[error_hist==0] = learning_goal
    for bn_id in range(numOfNets):
        plot(error_hist[:,bn_id], label=str(bestNumOfHidden[bn_id])); xlabel('epoch');
        ylabel('train error (mse)'); title('Learning curve (best for each CV fold)');
        legend(bbox_to_anchor=(0, 0, 1, 1), bbox_transform=gcf().transFigure)

    plot(range(max_epochs), [learning_goal]*max_epochs, '-.')

    figure(2); figsize=(10, 9); hold(True)
    bn_id = argmax(error_hist[-1,:])
    errors = np.zeros(numOfNets)
    for bn_id in range(numOfNets):
        y_est = bestnet[bn_id].sim(X)
        #print undiscretizeToInt(y_est.astype(int))
        #print undiscretizeToInt(y)
        errors[bn_id] = 100*(np.not_equal(undiscretizeToInt(y_est.astype(int)).astype(int),undiscretizeToInt(y.astype(int)).astype(int).T)).sum()/N
        #errors[bn_id]=(np.matrix(y_est).astype(int)!=np.matrix(y).A.ravel().astype(int)).sum()
    
    ind = np.arange(numOfNets)  # the x locations for the groups
    width = 0.35       # the width of the bars
    plt.bar(ind+width, errors, width, label=str(bestNumOfHidden[:])); xlabel('ANN');
    ylabel('Classification error (%)'); title('');
    legend(bbox_to_anchor=(0, 0, 1, 1), bbox_transform=gcf().transFigure)
       
    show()    

if 'K_out_of_N' in sections:
    #Crossvalidation
    k_out_of_N_sections = ['']
    X = classifInput
    y = np.matrix(censusdata[:,classifOutputCols]).astype(float)
    y = np.transpose(np.matrix(undiscretizeToInt(y))).astype(int)
        
    N, M = X.shape
    C = 6
    
    if 'Crossvalidation' in k_out_of_N_sections:
        
        # K-nearest neighbors
        K=2
        # Maximum number of neighbors
        L=40
        CV = cross_validation.KFold(N,K,shuffle=True)
        errors = np.zeros((K,L))
        i=0
        for train_index, test_index in CV:
            print('Crossvalidation fold: {0}/{1}'.format(i+1,K))    
            
            # extract training and test set for current CV fold
            X_train = X[train_index,:]
            y_train = y[train_index,:]
            X_test = X[test_index,:]
            y_test = y[test_index,:]

            # Fit classifier and classify the test points (consider 1 to 40 neighbors)
            for l in range(1,L+1):
                knclassifier = KNeighborsClassifier(n_neighbors=l, warn_on_equidistant=False);
                knclassifier.fit(X_train, y_train);
                y_est = knclassifier.predict(X_test);
                errors[i,l-1] = (np.not_equal(y_est,y_test.T)).sum()

            i+=1
            
        # Plot the classification error rate
        figure()
        plot(100*sum(errors,0)/N)
        xlabel('Number of neighbors')
        ylabel('Classification error rate (%)')
        show()

    else:
        # Maximum number of neighbors
        # Maximum number of neighbors
        
        L=60

        # Cross-validation not necessary. Instead, compute matrix of nearest neighbor
        # distances between each pair of data points ..
        knclassifier = KNeighborsClassifier(n_neighbors=L+1, warn_on_equidistant=False).fit(X, y)
        neighbors = knclassifier.kneighbors(X)
        # .. and extract matrix where each row contains class labels of subsequent neighbours
        # (sorted by distance)
        ndist, nid = neighbors[0], neighbors[1]
        nclass = y[nid].flatten().reshape(N,L+1)

        # Use the above matrix to compute the class labels of majority of neighbors
        # (for each number of neighbors l), and estimate the test errors.
        errors = np.zeros(L)

        nclass_count = np.zeros((N,C))
        errorsSQ = np.zeros(L)
        for l in range(1,L+1):
            for c in range(C):
                nclass_count[:,c] = sum(nclass[:,1:l+1]==c,1).A.ravel()
            y_est = np.argmax(nclass_count,1)
            errors[l-1] = (y_est.astype(int)!=y.A.ravel().astype(int)).sum()
            y_est = np.mean(nclass[:,1:l+1],1)
            errorsSQ[l-1] = np.square(y_est-y).sum()/N

            
        # Plot the least-squares error as function of number of neighbors
        figure(0)
        plot(errorsSQ, label='Squares error')
        xlabel('Number of neighbors')
        #ylabel('Least-Squares error (%)')
        
        # Plot the classification error rate
        figure(1)
        plot(100*errors/N, label='Classification error rate (%)')
        xlabel('Number of neighbors')
        legend(bbox_to_anchor=(0, 0, 1, 1), bbox_transform=gcf().transFigure)
        #ylabel('Classification error rate (%)')

        figure(2)
        imshow(nclass, cmap='binary', interpolation='None'); xlabel("k'th neighbor"); ylabel('data point'); title("Neighbors class matrix");
        show()

# ------------ LINEAR REGRESSION ------------ #
# ----------- (feature selection) ----------- #
N, M = regrInput.shape
X=np.concatenate((np.ones((N,1)),regrInput),1)
X=np.concatenate((X,np.square(np.matrix(regrInput[:,0]).T),np.square(np.matrix(regrInput[:,1]).T)),1)
regrInputAttributeNames = np.array([u'Offset']+list(regrInputAttributeNames)
                                   + map(lambda s: "SQ"+s, regrInputAttributeNames)
                                   )
M += 1+2
y=regrOutput

if 'linear_regression' in sections:
    #params!
    K = 10
    internal_K = 5

    CV = cross_validation.KFold(N,K,shuffle=True)

    # Initialize variables
    Features = np.zeros((M,K))
    Error_train = np.empty((K,1))
    Error_test = np.empty((K,1))
    Error_train_fs = np.empty((K,1))
    Error_test_fs = np.empty((K,1))
    Error_train_nofeatures = np.empty((K,1))
    Error_test_nofeatures = np.empty((K,1))
    
    k=0
    for train_index, test_index in CV:
        print('Computing CV fold: {0}/{1}..'.format(k+1,K))
    
        # extract training and test set for current CV fold
        X_train, y_train = X[train_index,:], np.array(y)[train_index,:]
        X_test, y_test = X[test_index,:], np.array(y)[test_index,:]
        # Compute squared error without using the input data at all
        Error_train_nofeatures[k] = np.square(y_train-y_train.mean()).sum()/y_train.shape[0]
        Error_test_nofeatures[k] = np.square(y_test-y_test.mean()).sum()/y_test.shape[0]    

        # Compute squared error with all features selected (no feature selection)
        m = lm.LinearRegression().fit(X_train, y_train)
        Error_train[k] = np.square(y_train-m.predict(X_train)).sum()/y_train.shape[0]
        Error_test[k] = np.square(y_test-m.predict(X_test)).sum()/y_test.shape[0]

        # Compute squared error with feature subset selection
        selected_features, features_record, loss_record = feature_selector_lr(X_train, y_train, internal_K)
        Features[selected_features,k]=1
        # .. alternatively you could use module sklearn.feature_selection
        m = lm.LinearRegression().fit(X_train[:,selected_features], y_train)
        Error_train_fs[k] = np.square(y_train-m.predict(X_train[:,selected_features])).sum()/y_train.shape[0]
        Error_test_fs[k] = np.square(y_test-m.predict(X_test[:,selected_features])).sum()/y_test.shape[0]

        # the following is not really interesting: a single point (maximum 2)
        figure(k)
        subplot(1,2,1)
        plot(range(1,len(loss_record)), loss_record[1:])
        xlabel('Iteration')
        ylabel('Squared error (crossvalidation)')    
        
        subplot(1,3,3)
        bmplot(regrInputAttributeNames, range(1,features_record.shape[1]), -features_record[:,1:])
        clim(-1.5,0)
        xlabel('Iteration')

        print('Cross validation fold {0}/{1}'.format(k+1,K))
        #print('Train indices: {0}'.format(train_index))
        #print('Test indices: {0}'.format(test_index))
        print('Features no: {0}\n'.format(selected_features.size))

        k+=1

    # Display results
    print('\n')
    print('Linear regression without feature selection:\n')
    print('- Training error: {0}'.format(Error_train.mean()))
    print('- Test error:     {0}'.format(Error_test.mean()))
    print('- Training error standard error: {0}'.format(Error_train.std()))
    print('- Test error standard error:     {0}'.format(Error_test.std()))
    print('- R^2 train:     {0}'.format((Error_train_nofeatures.sum()-Error_train.sum())/Error_train_nofeatures.sum()))
    print('- R^2 test:     {0}'.format((Error_test_nofeatures.sum()-Error_test.sum())/Error_test_nofeatures.sum()))
    print('Linear regression with feature selection:\n')
    print('- Training error: {0}'.format(Error_train_fs.mean()))
    print('- Test error:     {0}'.format(Error_test_fs.mean()))
    print('- Training error standard error: {0}'.format(Error_train_fs.std()))
    print('- Test error standard error:     {0}'.format(Error_test_fs.std()))
    print('- R^2 train:     {0}'.format((Error_train_nofeatures.sum()-Error_train_fs.sum())/Error_train_nofeatures.sum()))
    print('- R^2 test:     {0}'.format((Error_test_nofeatures.sum()-Error_test_fs.sum())/Error_test_nofeatures.sum()))

    figure(k)
    subplot(1,3,2)
    bmplot(regrInputAttributeNames, range(1,Features.shape[1]+1), -Features)
    clim(-1.5,0)
    xlabel('Crossvalidation fold')
    ylabel('Attribute')

    # Inspect selected feature coefficients effect on the entire dataset and
    # plot the fitted model residual error as function of each attribute to
    # inspect for systematic structure in the residual
    for fold in range(0,K): # cross-validation fold to inspect
        ff=Features[:,fold].nonzero()[0]
        m = lm.LinearRegression().fit(X[:,ff], y)
    
        y_est= m.predict(X[:,ff])
        residual=y-y_est
    
        #if (len(ff) == 1):
        #    params = (m.intercept_, m.coef_)
        #    figure(k+2*fold+1)
        #    title("Income against Weeks worked in a year")
        #    for i in range(0,len(ff)):
        #        xi = X[:,ff[i]]
        #        line = params[0]+xi*params[1]
        #        plot(xi,line.T,'r-',xi,y,'o', linewidth=2.5)
        #        xlabel(regrInputAttributeNames[ff[i]])
        #        ylabel(regrOutputAttributeNames[0])
        #else: print "More that one feature selected ("+ str(len(ff)) \
        #        + "): the line fit for regression will not be shown" 

        figure(k+fold+1)
        title('Residual error vs. Attributes for features selected in cross-validation fold {0}'.format(f))
        for i in range(0,len(ff)):
            subplot(len(ff),1,i+1)
            plot(X[:,ff[i]],residual,'.')
            xlabel(regrInputAttributeNames[ff[i]])
            ylabel('Residual error')

    show()

# ------------ LINEAR REGRESSION ------------ #
# -------------- (regularized) -------------- #
N, M = regrInput.shape
X=np.concatenate((np.ones((N,1)),regrInput),1)
X=np.matrix(np.concatenate((X,np.square(np.matrix(regrInput[:,0]).T),np.square(np.matrix(regrInput[:,1]).T)),1))
regrInputAttributeNames = np.array([u'Offset']+list(regrInputAttributeNames)
                                   + map(lambda s: "SQ"+s, regrInputAttributeNames)
                                   )
M += 1+2
y=np.matrix(regrOutput)


if 'regularized_linear_regression' in sections:
    #params!
    K = 10
    internal_K = 5
    
    CV = cross_validation.KFold(N,K)
    # Values of lambda
    lambdas = np.power(10.,range(-5,9))

    # Initialize variables
    Features = np.zeros((M,K))
    Error_train = np.empty((K,1))
    Error_test = np.empty((K,1))
    Error_train_rlr = np.empty((K,1))
    Error_test_rlr = np.empty((K,1))
    Error_train_nofeatures = np.empty((K,1))
    Error_test_nofeatures = np.empty((K,1))
    w_rlr = np.matrix(np.empty((M,K)))
    w_noreg = np.matrix(np.empty((M,K)))
    
    k=0
    for train_index, test_index in CV:
        print('Computing CV fold: {0}/{1}..'.format(k+1,K))

        # extract training and test set for current CV fold
        X_train, y_train = X[train_index,:], np.array(y)[train_index,:]
        X_test, y_test = X[test_index,:], np.array(y)[test_index,:]

        opt_val_err, opt_lambda, mean_w_vs_lambda, train_err_vs_lambda, test_err_vs_lambda = \
            rlr_validate(X_train, y_train, lambdas, internal_K)
        Xty = X_train.T*y_train
        XtX = X_train.T*X_train
    
        # Compute squared error without using the input data at all
        Error_train_nofeatures[k] = np.square(y_train-y_train.mean()).sum()/y_train.shape[0]
        Error_test_nofeatures[k] = np.square(y_test-y_test.mean()).sum()/y_test.shape[0]    

        # Estimate weights for the optimal value of lambda, on entire training set
        w_rlr[:,k] = linalg.lstsq(XtX+opt_lambda*np.eye(M),Xty)[0]
        # Compute mean squared error with regularization with optimal lambda
        Error_train_rlr[k] = np.square(y_train-X_train*w_rlr[:,k]).sum()/y_train.shape[0]
        Error_test_rlr[k] = np.square(y_test-X_test*w_rlr[:,k]).sum()/y_test.shape[0]
    
        # Estimate weights for unregularized linear regression, on entire training set
        w_noreg[:,k] = linalg.lstsq(XtX,Xty)[0]
        # Compute mean squared error without regularization
        Error_train[k] = np.square(y_train-X_train*w_noreg[:,k]).sum()/y_train.shape[0]
        Error_test[k] = np.square(y_test-X_test*w_noreg[:,k]).sum()/y_test.shape[0]
        # OR ALTERNATIVELY: you can use sklearn.linear_model module for linear regression:
        #m = lm.LinearRegression().fit(X_train, y_train)
        #Error_train[k] = np.square(y_train-m.predict(X_train)).sum()/y_train.shape[0]
        #Error_test[k] = np.square(y_test-m.predict(X_test)).sum()/y_test.shape[0]
    
        if k<2: # we show just two of them, because all are similar
            figure(k)
            semilogx(lambdas,mean_w_vs_lambda.T,'.-')
            xlabel('Regularization factor')
            ylabel('Mean Coefficient Values')    

        if k+1<=8:
            figure(1000)
            plt.subplot(4,2,k+1)            
            if k+1>6: xlabel('Regularization factor')
            if k+1<=6: gca().xaxis.set_visible(False)
            semilogx(lambdas,train_err_vs_lambda.T,'b.-',lambdas,test_err_vs_lambda.T,'r.-')
            if k<2:
                title('Optimal lambda = {0}'.format(opt_lambda))
                legend(['Train error','Validation error'], bbox_to_anchor=(0., 1.02, 1., .102),
                       loc=3, ncol=2, mode="expand", borderaxespad=0.)

        print('Cross validation fold {0}/{1}:'.format(k+1,K))
        #print('Train indices: {0}'.format(train_index))
        #print('Test indices: {0}\n'.format(test_index))

        k+=1
    
    # Display results
    print('\n')
    print('Linear regression without feature selection:\n')
    print('- Training error: {0}'.format(Error_train.mean()))
    print('- Test error:     {0}'.format(Error_test.mean()))
    print('- Training error standard error: {0}'.format(Error_train.std()))
    print('- Test error standard error:     {0}'.format(Error_test.std()))
    print('- R^2 train:     {0}'.format((Error_train_nofeatures.sum()-Error_train.sum())/Error_train_nofeatures.sum()))
    print('- R^2 test:     {0}\n'.format((Error_test_nofeatures.sum()-Error_test.sum())/Error_test_nofeatures.sum()))
    print('Regularized Linear regression:')
    print('- Training error: {0}'.format(Error_train_rlr.mean()))
    print('- Test error:     {0}'.format(Error_test_rlr.mean()))
    print('- Training error standard error: {0}'.format(Error_train_rlr.std()))
    print('- Test error standard error:     {0}'.format(Error_test_rlr.std()))
    print('- R^2 train:     {0}'.format((Error_train_nofeatures.sum()-Error_train_rlr.sum())/Error_train_nofeatures.sum()))
    print('- R^2 test:     {0}\n'.format((Error_test_nofeatures.sum()-Error_test_rlr.sum())/Error_test_nofeatures.sum()))
    
    show()


# ------------ ARTIFICIAL NEURAL NETWORK ------------ #
if 'ANN_regression' in sections:
    

    normalizeInputs = True
    logarithmInputs = True

    N, M = regrInput.shape
    X=regrInput
    y=regrOutput
    dataXY = np.hstack((X,y)).astype(float)
    dataXY = removeZeroValues(dataXY)
    X = dataXY[:,[0,1]]
    N, M = X.shape
    y = dataXY[:,2].reshape(N,1)

    if logarithmInputs:
        y = np.log(y)

    #np.putmask(a, a >= m, m - 1)

    if normalizeInputs:
        #Normalize the input X for the NN
        X_norm = np.zeros((N,M)).astype(float)
        y_norm = np.zeros((N,1)).astype(float)
        X0_max = X[:,0].max()
        X1_max = X[:,1].max()
        y_max = y.max()

        X_norm[:,0] = X[:,0]/X0_max
        X_norm[:,1] = X[:,1]/X1_max
        y_norm = y/y_max

    #params!
    K = 5
    
    # Parameters for neural network classifier
    n_hidden_units = [2,4,6,8,10,12,14]     # number of hidden units
    n_train = 1             # number of networks trained in each k-fold
    n_output_units = 1
    learning_rate = 0.0005  # rate of weights adaptation
    learning_goal = 0.0001   # stop criterion 1 (train mse to be reached)
    max_epochs = 24         # stop criterion 2 (max epochs in training)
    
    CV = cross_validation.KFold(N,K,shuffle=True)

    # Initialize variables
    L = len(n_hidden_units)
    Features = np.zeros((M,K))
    Error_train = np.empty((K*L,1))
    Error_test = np.empty((K*L,1))
    Error_train_fs = np.empty((K*L,1))
    Error_test_fs = np.empty((K*L,1))
    Error_train_nofeatures = np.empty((K*L,1))
    Error_test_nofeatures = np.empty((K*L,1))

    errors = np.zeros(K)
    error_hist = np.zeros((max_epochs,K*L))
    bestnet = list()
    bestNumOfHidden = list()

    k=0
    numOfNets=0
    for train_index, test_index in CV:
        print('Computing CV fold: {0}/{1}..'.format(k+1,K*4))
        # Select training data
        X_train, y_train = X_norm[train_index,:], np.array(y_norm)[train_index,:]
        X_test, y_test = X_norm[test_index,:], np.array(y)[test_index,:]

        best_train_error = 1e100
        for nbrOfHiddenUnits in n_hidden_units: 
            for i in range(n_train):
                # Create randomly initialized network with 2 layers
                ann = nl.net.newff([[0, 1]]*M, [nbrOfHiddenUnits, n_output_units], [nl.trans.LogSig(),nl.trans.LogSig()])
                # train network
                #lr=learning_rate
                train_error = ann.train(X_train, y_train, goal=learning_goal, epochs=max_epochs, show=round(max_epochs/8))
                if train_error[-1]<best_train_error:
                    bestnet.append(ann)
                    best_train_error = train_error[-1]
                    error_hist[range(len(train_error)),numOfNets] = train_error
                    bestNumOfHidden.append(nbrOfHiddenUnits)
                    numOfNets+=1

            k+=1

    for bnd_id in range(numOfNets):
        y_est = bestnet[bnd_id].sim(X)*y_max
        Error_test[bnd_id] = np.square(y-y_est).sum()/y.shape[0]
        figure(0, figsize=(16,9))
        subplot(int(numOfNets/3),4,bnd_id)
        scatter(y_est, y, label=str(bestNumOfHidden[bnd_id])); ylabel('Real income'); xlabel('Predicted income'); legend(bbox_to_anchor=(0, 0, 1, 1))
        ylim([0,20])
        xlim([0,20])

    # Display exemplary networks learning curve (best network of each fold)
    figure(1); hold(True)
    bn_id = argmax(error_hist[-1,:])
    error_hist[error_hist==0] = learning_goal
    for bn_id in range(numOfNets):
        plot(error_hist[:,bn_id], label=str(bestNumOfHidden[bn_id])); xlabel('epoch'); ylabel('train error (mse)'); title('Learning curve (best for each CV fold)')
        legend(bbox_to_anchor=(0, 0, 1, 1), bbox_transform=gcf().transFigure)
    plot(range(max_epochs), [learning_goal]*max_epochs, '-.')
       
    show()    
