#!/usr/bin/python

from pylab import *
import numpy as np
import pandas as pd
import scipy.linalg as linalg
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
fontP = FontProperties()
fontP.set_size('small')
from scikits.statsmodels.tools.tools import categorical
from scipy.stats import zscore

# our snow imports
from snow_similarity import get_similarity2index


censusdata = np.loadtxt('./census-income/census-income_short.txt', dtype=np.str,delimiter=',')
censusdata = censusdata[:,:40]
attributeNames = np.loadtxt('./census-income/census_attributes.txt', dtype=np.str,delimiter=',')

# Original data visualization
### AGE ##########
col_age = censusdata[:,0].astype(float)
col_weights = censusdata[:,24].astype(float)
col_weights = np.ones(censusdata.shape[0]) # disable this line if you want to consider weights
plt.hist(col_age, bins=91, facecolor='green', weights=col_weights);
plt.xlabel('Age')
plt.ylabel('Frequency')
plt.title("Fig.1 - Distribution of saplings' ages in the original data")
#plt.show()
#exit(0)


# enable this if you do not have 8GB RAM available and you are working with the original whole dataset...
#exit(0)

#Data that has to be split in to arrays:
selectedColumns = [0, 4, 5, 8, 9, 12, 14, 16, 17, 18, 20, 24, 30, 31, 34, 39]
censusdataSelected = censusdata[:, selectedColumns]
attributeNamesSelected = attributeNames[:, selectedColumns]

#Indices of columns that have to bee split into numerical
splitColumns = [1, 3, 4, 5, 6, 10, 12, 13, 14]
#Indices of columns that are continous
continousColumns = [0, 2, 7, 8, 9, 11, 15]

#Parameters for whole data
dataRows, dataColumns = np.shape(censusdata)
dataMatrix = censusdata.astype(str)

#Parameters for selectedData
dataRowsSel, dataColumnsSel = np.shape(censusdataSelected)
dataMatrixStrSel = censusdataSelected.astype(str)

##CALCULATIONS FOR THE WHOLE DATA
#Calculate number of zeros and missing values in the continous attributes for selected data
numOfDifferentValues = np.zeros((dataColumns, 3)).astype(float)
for i in xrange(0, dataColumns):
	numOfZeros = 0
	numOfmissingValues = 0
	numOfNotInUniverse = 0

	for j in xrange(0, dataRows):
		if dataMatrix[j,i].strip() == "0":
			numOfZeros = numOfZeros + 1
		if dataMatrix[j,i].find("Not in") != -1:
			numOfNotInUniverse = numOfNotInUniverse + 1
		if dataMatrix[j,i].find("United-States") != -1:
			numOfmissingValues = numOfmissingValues + 1
	
	numOfDifferentValues[i,0] = float(numOfZeros)/dataRows
	numOfDifferentValues[i,1] = float(numOfNotInUniverse)/dataRows
	numOfDifferentValues[i,2] = float(numOfmissingValues)/dataRows

numOfDifferentValuesStr = numOfDifferentValues.astype(str)
numOfDifferentValuesStr = np.column_stack((attributeNames, numOfDifferentValuesStr))
print numOfDifferentValuesStr
print ""
exit(0)
##CALCULATIONS FOR SELECTED DATA

#exit(0)

##CATEGORICAL VALUES TO BINARY
#Splits nominal data into numerical values
n_of_nonSplitColumns = len(selectedColumns) - len(splitColumns)
class_idx = n_of_nonSplitColumns-1+1 # indicates where a binarized category starts from
class2index = {} # dictionary from classes to indexes at which they start in the new data matrix
for i in splitColumns:
	a = censusdataSelected[:, i]
        current_class = attributeNamesSelected[i].strip()
	b, dictNames = categorical(a, dictnames = True, drop=True)
        class2index[current_class] = (class_idx, len(dictNames))
        spanBeforeStacking = censusdataSelected.shape[1]
	censusdataSelected = np.hstack((censusdataSelected,b))

	for j in dictNames.keys():
		if dictNames[j].find("Not in") == -1:
	 		attributeNamesSelected = np.append(attributeNamesSelected, dictNames[j])
	 	else:     
	 		censusdataSelected = np.delete(censusdataSelected, spanBeforeStacking+j, axis=1)
                        class_idx -= 1
                        class2index[current_class] = (class2index[current_class][0],
                                                      class2index[current_class][1]-1)
        class_idx += len(dictNames)

#Gets rid of the old columns
censusdataSelected = np.delete(censusdataSelected, splitColumns, axis=1)
attributeNamesSelected = np.delete(attributeNamesSelected, splitColumns)

# debug class2index
#print attributeNamesSelected
#for c,v in zip(class2index.keys(), class2index.values()):
#        print "Class " + str(c) + " " + str(v) + ": " + str(attributeNamesSelected[v[0] : v[0]+v[1]])
#exit(0)

# CONVERT THE MATRIX TO NUMERICAL
dataMatrix = censusdataSelected.astype(float)
instanceWeight_col=5

##STATISTICAL INFORMATION
# Compute statistical information
# // Note: for not meaningful for all attributes (see binarized ones...)
mu = np.mean(dataMatrix, axis=0)
#alternative definition of mean:
#average = np.ma.average(np.ma.array(dataMatrix,
#                                    mask=map(lambda col: map(bool, col),
#                                             np.zeros(dataMatrix.shape))),
#                        axis=0)
variance = np.var(dataMatrix, axis=0)
stdDev = np.std(dataMatrix, axis=0)
median = np.median(dataMatrix, axis=0)

masked_array_no0 = np.ma.array(dataMatrix, mask = (dataMatrix==0))
average_no0 = np.ma.average(masked_array_no0, axis=0)
variance_no0 = np.ma.var(masked_array_no0, axis=0)
stdDev_no0 = np.ma.std(masked_array_no0, axis=0)
median_no0 = np.ma.median(masked_array_no0, axis=0)

print "mu = " + str(mu)
print "average without zero values = " + str(average_no0)
print "variance = " + str(variance)
print "variance without zero values = " + str(variance_no0)
print "stardard deviation = " + str(stdDev)
print "stardard deviation without zero values = " + str(stdDev_no0)
print "median = " + str(median)
print "median without zero values = " + str(median_no0)

# compute correlation between 'capital gains' and 'capital losses'
#capitalGains_col = 2
#capitalLosses_col = 3
#print "Correlation between capital gains and losses = " + \
#    str(np.correlate(dataMatrix[:,capitalGains_col], dataMatrix[:,capitalLosses_col]))
#exit(0)

#SIMILARITY BETWEEN SAMPLES
similarity_measure = 'smc' # one of {'SMC', 'Jaccard', 'ExtendedJaccard', 'Cosine', 'Correlation' }

# print the sample most similar to data[0][:]
target = 0
best_sim, best_i = get_similarity2index(target, dataMatrix, 'SMC')[-1]
#print "SMC measure of similarity tells that 'target' is similar to 'best' with measure " + str(best_sim)
#print "...where target = " + str(dataMatrix[target])
#print "...  and   best = " + str(dataMatrix[best_i])


#COMPUTE PCA OF DATA AND PLOT IT
# Subtract mean value from data (optionally remove the 'instance weight' column)
REMOVE_INSTANCE_WEIGHT=True
if not REMOVE_INSTANCE_WEIGHT:
        cleanDataMatrix = dataMatrix - np.ones((dataMatrix.shape[0],1)) * mu
else:
        cleanDataMatrix = np.delete(dataMatrix, instanceWeight_col, axis=1) - \
            np.ones((dataMatrix.shape[0],1)) * np.delete(mu, instanceWeight_col)
        
# PCA by computing SVD of cleanDataMatrix
U,S,V = linalg.svd(cleanDataMatrix, full_matrices=False)
V = mat(V).T

# Compute variance explained by principal components
rho = (S*S) / (S*S).sum()

# Plot variance explained
figure()
plot(range(1,len(rho)+1), rho, 'o-')
title('Variance explained by principal components');
xlabel('Principal component');
ylabel('Variance explained');


################################################################################
# Project the centered data onto principal component space
Z = cleanDataMatrix * V

# Plot PC 'PC1' and 'PC2' against each other wrt age
PC1 = 1; PC2 = 2 # principal components to be plotted
f = figure()
f.hold()
title('Census data: PCA wrt age')
for c in range(10):
    # select indices belonging to class c:
    class_mask = (np.array(map(lambda age: int(age)/20, dataMatrix[:,0])) == c)
    plot(Z[class_mask,PC1], Z[class_mask,PC2], 'o')
legend(['0-19', '20-39', '40-59', '60-79', '80-99'], loc=2)
xlabel('PC{0}'.format(PC1+1))
ylabel('PC{0}'.format(PC2+1))

# Plot PC 'PC1' and 'PC2' against each other wrt Major industry code (already binarized)
class_name = 'major industry code'
PC1 = 1; PC2 = 2 # principal components to be plotted
f = figure()
f.hold()
title('Census data: PCA wrt: ' + str(class_name))
for c in range(class2index[class_name][1]):
    # select indices belonging to class c:
    class_mask = (np.array(map(bool, dataMatrix[:,class2index[class_name][0] + c])))
    plot(Z[class_mask,PC1], Z[class_mask,PC2], 'o')
legend(attributeNamesSelected[class2index[class_name][0] :
                                      class2index[class_name][0] + \
                                      class2index[class_name][1]
                              ]
       , loc=(3 if REMOVE_INSTANCE_WEIGHT else 2), ncol=2, fancybox=True)
xlabel('PC{0}'.format(PC1+1))
ylabel('PC{0}'.format(PC2+1))

################################################################################

number_of_PC = len(rho)
variance_explained_by_first_k_PC = lambda k: sum(rho[0:k])
print "Total number of Principal Components = " + str(number_of_PC)
print "The first component explain " + \
    str(variance_explained_by_first_k_PC(1)*100) + "% " + \
    "of the data variance"
print "The first 2 components explain " + \
    str(variance_explained_by_first_k_PC(2)*100) + "% " + \
    "of the data variance"
print "The first 3 components explain " + \
    str(variance_explained_by_first_k_PC(3)*100) + "% " + \
    "of the data variance"


################################################################################

#PLOT THE DATAMATRIX
dataMatrix_standarized = zscore(dataMatrix, ddof=1)

figure()
imshow(dataMatrix_standarized, interpolation=None, aspect=(128./dataMatrix.shape[0]), cmap=cm.jet);
#xticks(range(128), range(128)) TOO MANY!
xlabel('Attributes')
ylabel('Data objects')
title('Census data matrix (complete)')
colorbar()

figure()
imshow(dataMatrix_standarized[:,:8], interpolation=None, aspect=(8./dataMatrix.shape[0]), cmap=cm.jet);
xticks(range(8), range(8))
xlabel('Attributes')
ylabel('Data objects')
title('Census data matrix (only continuous)')
colorbar()

figure()
imshow(dataMatrix_standarized[:100,:8], interpolation=None, aspect=(8./100), cmap=cm.jet);
xticks(range(8), range(8))
xlabel('Attributes')
ylabel('Data objects')
title('Census data matrix (zoom into the first 100)')
colorbar()

figure()
imshow(dataMatrix_standarized[:1000,:8], interpolation=None, aspect=(8./1000), cmap=cm.jet);
xticks(range(8), range(8))
xlabel('Attributes')
ylabel('Data objects')
title('Census data matrix (zoom into the first 1000)')
colorbar()


#PLOT THE SCATTERPLOT
age_col=0
wage_col=1
cgains_col=2
closses_col=3
sdivs_col=4
weeks_col=6
class_names = ['major industry code', 'education', 'sex']
# define a coloured plor wrt class_name
def colorPlot(attr1_col, attr2_col, class_name, sign):
        for c in range(class2index[class_name][1]):
            # select indices belonging to class c:
            class_mask = (np.array(map(bool, dataMatrix[:,class2index[class_name][0] + c])))
            plot(dataMatrix[class_mask,attr1_col], dataMatrix[class_mask,attr2_col], sign)
        legend(attributeNamesSelected[class2index[class_name][0] :
                                              class2index[class_name][0] + \
                                              class2index[class_name][1]
                                      ]
               , loc=2, ncol=2, fancybox=True)

for class_name in class_names:
        # ages against wage per hour
        figure()
        hold(True)
        colorPlot(age_col, wage_col, class_name, '.')
        xlabel('Age')
        ylabel('Wage/hour (dollarcent)')
        title('Ages against Wage per hour (wrt ' + str(class_name) + ')')
        # ages against capital gains
        figure()
        hold(True)
        colorPlot(age_col, cgains_col, class_name, '.')
        xlabel('Age')
        ylabel('Capital Gains (dollar)')
        title('Ages against Capital gains (wrt ' + str(class_name) + ')')
        # ages against capital losses
        figure()
        hold(True)
        colorPlot(age_col, closses_col, class_name, '.')
        xlabel('Age')
        ylabel('Capital Losses (dollar)')
        title('Ages against Capital losses (wrt ' + str(class_name) + ')')
        # ages against stock dividends
        figure()
        hold(True)
        colorPlot(age_col ,sdivs_col, class_name, '.')
        xlabel('Age')
        ylabel('Stock dividends (dollar)')
        title('Ages against Stock dividends (wrt ' + str(class_name) + ')')
        # ages against Weeks worked in year
        figure()
        hold(True)
        colorPlot(age_col, weeks_col, class_name, '.')
        xlabel('Age')
        ylabel('Weeks worked in year')
        title('Ages against Weeks worked in year (wrt ' + str(class_name) + ')')
        # wages per hour against capital gains
        figure()
        hold(True)
        colorPlot(wage_col, cgains_col, class_name, '.')
        xlabel('Wage/hour (dollarcent)')
        ylabel('Capital gains (dollars)')
        title('Wages per hour against Capital gains (wrt ' + str(class_name) + ')')
        # Stock dividends against capital gains
        figure()
        hold(True)
        colorPlot(sdivs_col, cgains_col, class_name, '.')
        xlabel('Stock dividends (dollars)')
        ylabel('Capital gains (dollars)')
        title('Stock dividends against Capital gains (wrt ' + str(class_name) + ')')

#plot the similarity matrix
dataMatrix_standarized = corrcoef(dataMatrix)
figure()
imshow(dataMatrix_standarized[:,:8], interpolation=None, aspect=(8./dataMatrix.shape[0]), cmap=cm.jet);
xticks(range(8), range(8))
xlabel('Attributes')
ylabel('Data objects')
title('Census data matrix (only continuous)')
colorbar()


#mng = plt.get_current_fig_manager()
#mng.resize(*mng.window.maxsize())
show()
##END OF SCRIPT
#print attributeNamesSelected
#print dataMatrix

