sections = ['Clustering', 'Anomaly', 'Association']
sections = ['Clustering']
section_message = "The following sections will be considered: "
for s in sections: section_message += s + "\t"
print section_message

from pylab import *
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.io import loadmat
from sklearn.mixture import GMM
from sklearn import cross_validation
from toolkit_libs.toolbox_02450 import clusterplot, gausKernelDensity
from scipy.cluster.hierarchy import linkage, fcluster, dendrogram
from mpl_toolkits.mplot3d import Axes3D
from subprocess import call
import re
import os
from sklearn.neighbors import NearestNeighbors
from sklearn import metrics
from toolkit_libs.similarity import binarize

def undiscretizeToInt(dataOrg):
    rows,columns = dataOrg.shape
    newRow = np.zeros(rows)

    for i in xrange(rows):
        for j in xrange(columns):
            if dataOrg[i,j] > 0.0:
                newRow[i] = j
    return newRow

def compareClustersToOrig(cls, y):
	data = np.hstack((np.matrix(cls).T,np.matrix(y).T))
	data = data[np.argsort(data.A[:,1])].astype(int)
	nbrOfRows = data.shape[0]
	nbrOfClusters = max(data[:,1])
	numberOfDifferentLabels = np.zeros((nbrOfClusters+1, nbrOfClusters + 2))

	for j in xrange(nbrOfRows):
		for i in xrange(nbrOfClusters+1):
			if i == data[j,1]:
				numberOfDifferentLabels[i,0] += 1
				numberOfDifferentLabels[i,data[j,0]+1] += 1

	errorRate = np.zeros(nbrOfClusters+1)
	sumOfMax = 0
	for i in xrange(nbrOfClusters+1):
		errorRate[i] = max(numberOfDifferentLabels[i,1:])/numberOfDifferentLabels[i,0]
		sumOfMax += max(numberOfDifferentLabels[i,1:])

	return errorRate.astype(float)

def mask(X, mask): return [elem for i, elem in enumerate(X) if mask[i]]
def mask2(X, mask, row): return [elem for i, elem in enumerate(X) if mask[row,i]]


censusdata = np.loadtxt('./census-income/reducedDataMatrix_03', dtype=np.str,delimiter=',')
attributeNames = np.loadtxt('./census-income/attributeNamesReduced_03', dtype=np.str,delimiter=',')
continuousColumns = [0,1,2,3,4,6,20]
continuousData = censusdata[:,continuousColumns].astype(float)
continuousNames = attributeNames[continuousColumns]
# discrete columns are those that are not continuous a part from the 'instance weight' one
discreteColumns = list(set(range(0,censusdata.shape[1]))-set(continuousColumns))[1:]
discreteData = np.array(censusdata[:,discreteColumns]).astype(float).astype(bool)
discreteNames = attributeNames[discreteColumns]
educationColumns = [7,8,21,22,23]
educationData = censusdata[:,educationColumns].astype(float)
educationNames = attributeNames[educationColumns]
occupationColumns = [9,10,31,32,33,34]
occupationData = censusdata[:,occupationColumns].astype(float)
occupationNames = attributeNames[occupationColumns]
industryColumns = [24,25,26,27,28,29,30]
industryData = censusdata[:,industryColumns].astype(float)
industryNames = attributeNames[industryColumns]
genderColumns = [11,12]
genderData = censusdata[:,genderColumns].astype(float)
genderNames = attributeNames[genderColumns]

educationAsRow = undiscretizeToInt(educationData)
occupationAsRow = undiscretizeToInt(occupationData)
industryAsRow = undiscretizeToInt(industryData)
genderAsRow = undiscretizeToInt(genderData)

#figure(10)
#plt.scatter(np.array(continuousData[:,0]), np.array(continuousData[:,6]), c=np.array(educationAsRow))
#legend(educationNames[:], loc=2, ncol=2, fancybox=True)
#show()
#exit(0)

if 'Clustering' in sections:
	glustering = ['GMM', 'Hierarchical', 'Comparison', 'Income', 'Occupation', 'ReducedMat']
	glustering = ['Comparison', 'Income', 'ReducedMat']

	if 'Occupation' in glustering:
		#X = continousData[:,[6,1]]
		inc = continuousData[:,6]
		inc = binarize(inc,None,1)
		inc = np.ravel(inc.T).astype(int)

		X = np.hstack((np.matrix(industryAsRow).T, np.matrix(educationAsRow).T, np.matrix(genderAsRow).T,np.matrix(inc).T, np.matrix(occupationAsRow).T))
		y = occupationAsRow

		if 'ReducedMat' in glustering:
			X = X[:5000,:]
			y = y[:5000]


	if 'Income' in glustering:
		y = continuousData[:,6]
		y = binarize(y,None,1)
		y = np.ravel(y.T).astype(int)
		X = np.hstack((np.matrix(industryAsRow).T, np.matrix(educationAsRow).T, np.matrix(genderAsRow).T, np.matrix(occupationAsRow).T, np.matrix(y).T))
		
		#X = np.hstack((np.matrix(industryAsRow).T, np.matrix(educationAsRow).T, np.matrix(genderAsRow).T))
		if 'ReducedMat' in glustering:
			X = X[:5000,:]
			y = y[:5000]

		#y = binarize(y,None,1)
		#y = np.ravel(y.T).astype(int)
		print np.count_nonzero(y)

	N, M = X.shape

	#Mean normalization
	mu = np.mean(X, axis=0)
	X = X - np.ones((X.shape[0],1))*mu
	
	for i in xrange(M):
		X[:,i] = X[:,i]/max(X[:,i])

	#fig0 = figure(0)
	#ax0 = fig0.add_subplot(111, projection='3d')
	#ax0.scatter(np.array(X[:,0]), np.array(X[:,1]), np.array(X[:,2]), zdir='z', s=10)#c=educationColumns
	#legend(educationNames.T)

	#U,S,V = linalg.svd(X,full_matrices=False)
	#V = mat(V).T
	#rho = (S*S) / (S*S).sum()

	# Plot variance explained
	#figure(1)
	#plot(range(1,len(rho)+1), rho, 'o-')
	#title('Variance explained by principal components');
	#xlabel('Principal component');
	#ylabel('Variance explained');

	# Components to be included as features
	#fig2 = figure(2)
	#ax = fig2.add_subplot(111, projection='3d')
	#k_pca = 3
	#X = X*V[:,0:k_pca]
	#N, M = X.shape
	#ax.scatter(np.array(X[:,0]), np.array(X[:,1]), np.array(X[:,2]), zdir='z', s=20)#c=educationColumns
	#legend(educationNames.T)
	            

	# Plot results
	if 'GMM' in glustering:
		# Range of K's to try
		KRange = range(1,20)
		T = len(KRange)

		covar_type = 'diag'     # you can try out 'diag' as well
		reps = 1                # number of fits with different initalizations, best result will be kept

		# Allocate variables
		BIC = np.zeros((T,1))
		AIC = np.zeros((T,1))
		CVE = np.zeros((T,1))

		# K-fold crossvalidation
		CV = cross_validation.KFold(N,5,shuffle=True)

		for t,K in enumerate(KRange):
		        print('Fitting model for K={0}\n'.format(K))

		        if 'GMM' in glustering:
			        # Fit Gaussian mixture model
			        gmm = GMM(n_components=K, covariance_type=covar_type, n_init=reps, params='wmc').fit(X)

			        # Get BIC and AIC
			        BIC[t,0] = gmm.bic(X)
			        AIC[t,0] = gmm.aic(X)

			        # For each crossvalidation fold
			        for train_index, test_index in CV:

			            # extract training and test set for current CV fold
			            X_train = X[train_index]
			            X_test = X[test_index]

			            # Fit Gaussian mixture model to X_train
			            gmm = GMM(n_components=K, covariance_type=covar_type, n_init=reps, params='wmc').fit(X_train)

			            # compute negative log likelihood of X_test
			            CVE[t] += -gmm.score(X_test).sum()

		figure(4); hold(True)
		plot(KRange, BIC)
		plot(KRange, AIC)
		plot(KRange, 2*CVE)
		legend(['BIC', 'AIC', 'Crossvalidation'])
		xlabel('K')

		gmm = GMM(n_components=6, covariance_type=covar_type, n_init=reps, params='wmc').fit(X)
		cls = gmm.predict(X)    # extract cluster labels
		cds = gmm.means_        # extract cluster centroids (means of gaussians)
		covs = gmm.covars_      # extract cluster shapes (covariances of gaussians)

		if covar_type == 'diag':
		    new_covs = np.zeros([K,M,M])
		    count = 0

		    for elem in covs:
		        temp_m = np.zeros([M,M])

		        for i in range(len(elem)):
		            temp_m[i][i] = elem[i]

		        new_covs[count] = temp_m
		        count += 1

		    covs = new_covs

		#figure(3)
		#fig0 = figure(0)
		#ax0 = fig0.add_subplot(111, projection='3d')
		#ax0.plot(np.array(X[:,0]), np.array(X[:,1]), np.array(X[:,2]), zdir='z', s=10)
		
		#hold(True)
		#ncolors = np.size(np.unique(y))
		#colors = [0]*ncolors
		#cm = plt.get_cmap('Set1')

		#X-plot
		#Xplot = X
		#for i in xrange(X.shape[0]):
		#	Xplot[i,0] += (-0.5 - 0.5) * random_sample() + 0.5
		#	Xplot[i,1] += (-0.5 - 0.5) * random_sample() + 0.5
		#	Xplot[i,2] += (-0.5 - 0.5) * random_sample() + 0.5

		#for color in range(ncolors):
			#colors[color] = cm.jet.__call__(color*255/(ncolors-1))[:3]
		#	colors[color] = (color*255/(ncolors-1))

		#for i,cs in enumerate(np.unique(y.astype(int))):
			#x0.plot(np.array(X[:,0]), np.array(X[:,1]), np.array(X[:,2]), zdir='z', s=10)
			#ax0.scatter(np.array(X[(y==cs).ravel(),0]), np.array(X[(y==cs).ravel(),1]), np.array(X[(y==cs).ravel(),2]), 'o', markeredgecolor='k', markerfacecolor=colors[i],markersize=6, zorder=2)
		#	ax0.scatter(np.array(Xplot[(y==cs).ravel(),0]), np.array(Xplot[(y==cs).ravel(),1]), np.array(Xplot[(y==cs).ravel(),2]), 'o', c=colors[i], cmap=cm)
		#	hold(False)

		# create legend
		#legend_items = np.unique(y.astype(int)).tolist()
		#for i in range(len(legend_items)):
		#	if i<C: legend_items[i] = 'Class: {0}'.format(legend_items[i]);
		#	elif i<C+K: legend_items[i] = 'Cluster: {0}'.format(legend_items[i]);
		#	else: legend_items[i] = 'Centroid: {0}'.format(legend_items[i]);

		#legend(legend_items, numpoints=1, markerscale=.75, prop={'size': 9})

		# Plot results:
		#figure(3, figsize=(14,9))
		#clusterplot(X, clusterid=cls, centroids=cds, y=y, covars=covs)

	if 'Hierarchical' in glustering:

		# Perform hierarchical/agglomerative clustering on data matrix
		Method = 'single'
		Metric = 'euclidean'
		print 'Linkage'
		Z = linkage(X, method=Method, metric=Metric)
		
		# Compute and display clusters by thresholding the dendrogram
		Maxclust = 2
		cls = fcluster(Z, criterion='maxclust', t=Maxclust)
		#figure(3)
		#clusterplot(X, cls.reshape(cls.shape[0],1))

		# Display dendrogram
		max_display_levels=2
		figure(1)
		dendrogram(Z, truncate_mode='level', p=max_display_levels)
		show()

	if 'Comparison' in glustering:
		covar_type = 'diag'
		reps = 1

		#GMM
		gmm = GMM(n_components=2, covariance_type=covar_type, n_init=reps, params='wmc').fit(X)
		clsGMM = gmm.predict(X) # extract cluster labels
		cds = gmm.means_        # extract cluster centroids (means of gaussians)
		covs = gmm.covars_      # extract cluster shapes (covariances of gaussians)

		# Perform hierarchical/agglomerative clustering on data matrix
		Method = 'single'
		Metric = 'euclidean'
		Z = linkage(X, method=Method, metric=Metric)
		
		# Compute and display clusters by thresholding the dendrogram
		Maxclust = 2
		clsHier = fcluster(Z, criterion='maxclust', t=Maxclust)

		max_display_levels=2
		figure(1)
		dendrogram(Z, truncate_mode='level', p=max_display_levels)
		
		#errorRateGMM = compareClustersToOrig(clsGMM, y[:3000])
		#errorRateHier = compareClustersToOrig(clsHier, y[:3000])

		#Metrics
		labels_true = y
		labels_predGMM = clsGMM.astype(int)
		labels_predHier = clsHier.astype(int)

		print np.count_nonzero(clsGMM)
		print np.count_nonzero(clsHier)

		adjRandScoreHier = metrics.adjusted_rand_score(labels_true, labels_predHier)
		adjRandScoreGMM = metrics.adjusted_rand_score(labels_true, labels_predGMM)

		adjMutualInfoHier = metrics.adjusted_mutual_info_score(labels_true, labels_predHier)
		adjMutualInfoGMM = metrics.adjusted_mutual_info_score(labels_true, labels_predGMM)

		homoHier = metrics.homogeneity_score(labels_true, labels_predHier)
		homoGMM = metrics.homogeneity_score(labels_true, labels_predGMM)

		compHier = metrics.completeness_score(labels_true, labels_predHier)
		compGMM = metrics.completeness_score(labels_true, labels_predGMM)

		vmesHier = metrics.v_measure_score(labels_true, labels_predHier)
		vmesGMM = metrics.v_measure_score(labels_true, labels_predGMM)

		print('Adjusted rand score Hier: {0}'.format(adjRandScoreHier))
		print 'Adjusted rand score GMM: {0}'.format(adjRandScoreGMM)

		print('Adjusted Mutual score Hier: {0}'.format(adjMutualInfoHier))
		print 'Adjusted Mutual score GMM: {0}'.format(adjMutualInfoGMM)

		print 'Homogenity score Hier: {0}'.format(homoHier)
		print 'Homogenity score GMM: {0}'.format(homoGMM)

		print 'Completeness score Hier: {0}'.format(compHier)
		print 'Completeness score GMM: {0}'.format(compGMM)

		print 'Vscore score Hier: {0}'.format(vmesHier)
		print 'Vscore score GMM: {0}'.format(vmesGMM)		

		#print errorRateGMM
		#print errorRateHier

	show()

### ANOMALY DETECTION #################################################################
if 'Anomaly' in sections:
	#X = censusdata.astype(float)

	X = np.hstack((np.matrix(continuousData), np.matrix(industryAsRow).T, np.matrix(educationAsRow).T, np.matrix(genderAsRow).T, np.matrix(occupationAsRow).T))
	censusdata = X

	for i in xrange(np.shape(X)[1]):
		X[:,i] = X[:,i]/max(X[:,i]) 

	#y = 
	N, M = np.shape(X)
	nbrOfBars = 10000
	
	#Mean normalization
	#mu = np.mean(X, axis=0)
	#X = X - np.ones((X.shape[0],1))*mu

	#for i in xrange(M):
		#X[:,i] = X[:,i]/max(X[:,i])	

	### Gausian Kernel density estimator
	# cross-validate kernel width by leave-one-out-cross-validation
	# (efficient implementation in gausKernelDensity function)
	# evaluate for range of kernel widths
	widths = X.var(axis=0).max() * (2.0**np.arange(-10,3))
	logP = np.zeros(np.size(widths))
	for i,w in enumerate(widths):
	   density, log_density = gausKernelDensity(X,w)
	   logP[i] = log_density.sum()
	val = logP.max()
	ind = logP.argmax()

	width=widths[ind]
	print('Optimal estimated width is: {0}'.format(width))

	# evaluate density for estimated width
	density, log_density = gausKernelDensity(X,width)

	# Sort the densities
	i = (density.argsort(axis=0)).ravel()
	density = density[i]
	censusdataGKD = censusdata[i,:]

	print density
	print density.shape

	# Plot density estimate of outlier score
	figure(1)
	bar(range(nbrOfBars),density[:nbrOfBars])
	title('Gaussian Kernel Density Density estimate')

	print 'Gaussian Kernel Density first 5'
	print censusdataGKD[:5,:]

	### K-neighbors density estimator
	# Neighbor to use:
	K = 5

	# Find the k nearest neighbors
	knn = NearestNeighbors(n_neighbors=K).fit(X)
	D, i = knn.kneighbors(X)

	density = 1./(D.sum(axis=1)/K)

	# Sort the scores
	i = density.argsort()
	density = density[i]
	censusdataKNN = censusdata[i,:]

	# Plot k-neighbor estimate of outlier score (distances)
	figure(2)
	bar(range(nbrOfBars),density[:nbrOfBars])
	title('KNN density: Outlier score')
	
	print 'KNN first 5'
	print censusdataKNN[:5,:]

	### K-nearest neigbor average relative density
	# Compute the average relative density

	knn = NearestNeighbors(n_neighbors=K).fit(X)
	D, i = knn.kneighbors(X)
	density = 1./(D.sum(axis=1)/K)
	avg_rel_density = density/(density[i[:,1:]].sum(axis=1)/K)

	# Sort the avg.rel.densities
	i_avg_rel = avg_rel_density.argsort()
	avg_rel_density = avg_rel_density[i_avg_rel]
	censusdataAVG = censusdata[i_avg_rel,:]

	# Plot k-neighbor estimate of outlier score (distances)
	figure(3)
	bar(range(nbrOfBars),avg_rel_density[:nbrOfBars])
	title('KNN average relative density: Outlier score')

	print 'KNN ar first 5'
	print censusdataAVG[:5,:].astype(float)

	### Distance to 5'th nearest neighbor outlier score
	K = 5

	# Find the k nearest neighbors
	knn = NearestNeighbors(n_neighbors=K).fit(X)
	D, i = knn.kneighbors(X)

	# Outlier score
	score = D[:,K-1]
	# Sort the scores
	i = score.argsort()
	score = score[i[::-1]]
	censusdataKNN5 = censusdata[i_avg_rel,:]

	# Plot k-neighbor estimate of outlier score (distances)
	figure(4)
	bar(range(nbrOfBars),score[:nbrOfBars])
	title('5th neighbor distance: Outlier score')

	print '5th neighbor distance: first 5'
	print censusdataKNN5[:5,:].astype(float)

	show()

### CLUSTERING #################################################################
if 'Association' in sections:
    X = discreteData
    for i in range(0, continuousData.shape[1]):
        X = hstack((X, ~binarize(continuousData[:,i],None,1).T.astype(bool)))
    for i in range(0, continuousData.shape[1]):
        X = hstack((X, binarize(continuousData[:,i],None,1).T.astype(bool)))
    X = np.array(X)
    associationNames = discreteNames
    associationNames = np.append(associationNames, map(lambda name: name+" below the median", continuousNames))
    associationNames = np.append(associationNames, map(lambda name: name+" above the median", continuousNames))

    apriori_filename = 'apriori_inputfile.txt'
    apriori_input = map(lambda line: associationNames[line], X)

    apriori_input_str = reduce(lambda acc,line: acc+"\n"+reduce(lambda acc,x: acc+","+x, line, "")[1:], apriori_input, "")[1:]
    f = open(apriori_filename,'w')
    f.write(apriori_input_str)
    f.close()
    
    minSup = 80
    minConf = 95
    maxRule = 4

    # Run Apriori Algorithm
    print('Mining for frequent itemsets by the Apriori algorithm')
    status1 = call('./toolkit_libs/apriori -f"," -s{0} -v"[Sup. %0S]" {1} apriori_temp1.txt'.format(minSup, apriori_filename), shell=True)
    if status1!=0:
        print('An error occured while calling apriori')
        print('A likely cause is that minSup was set too high and no frequent itemsets were generated')
        print('Another cause might be spaces included in the path to the apriori files.')
        exit(-1)
    if minConf>0:
        print('Mining for associations by the Apriori algorithm')
        status2 = os.system('./toolkit_libs/apriori -tr -f"," -n{0} -c{1} -s{2} -v"[Conf. %0C,Sup. %0S]" {3} apriori_temp2.txt'.format(maxRule, minConf, minSup, apriori_filename))
        if status2!=0:
            print('An error occured while calling apriori')
            exit(-1)
    print('Apriori analysis done, extracting results')
    
    
    # Extract information from stored files apriori_temp1.txt and apriori_temp2.txt
    f = open('apriori_temp1.txt','r')
    lines = f.readlines()
    f.close()
    # Extract Frequent Itemsets
    FrequentItemsets = ['']*len(lines)
    sup = np.zeros((len(lines),1))
    for i,line in enumerate(lines):
        FrequentItemsets[i] = line[0:-1]
        sup[i] = re.findall(' \d..\d*]', line)[0][1:-1]
    os.remove('apriori_temp1.txt')
    
    # Read the file
    f = open('apriori_temp2.txt','r')
    lines = f.readlines()
    f.close()
    # Extract Association rules
    AssocRules = ['']*len(lines)
    conf = np.zeros((len(lines),1))
    for i,line in enumerate(lines):
        AssocRules[i] = line[0:-1]
        conf[i] = re.findall(' \d..\d*,', line)[0][1:-1]
    os.remove('apriori_temp2.txt')    
    
    # sort (FrequentItemsets by support value, AssocRules by confidence value)
    AssocRulesSorted = [AssocRules[item] for item in np.argsort(conf,axis=0).ravel()]
    AssocRulesSorted.reverse()
    FrequentItemsetsSorted = [FrequentItemsets[item] for item in np.argsort(sup,axis=0).ravel()]
    FrequentItemsetsSorted.reverse()
    
    # Print the results
    import time; time.sleep(.5)    
    print('\n')
    print('RESULTS:\n')
    print('Frequent itemsets:')
    for i,item in enumerate(FrequentItemsetsSorted):
        print('Item: {0}'.format(item))
    print('\n')
    print('Association rules:')
    for i,item in enumerate(AssocRulesSorted):
        print('Rule: {0}'.format(item))



