#!/usr/bin/env python
#
# clustering notes
# (1) the columns of the data are features and the rows are observations
# (2) the rank may only be 1 or 2.
# (3) clusters are returned one per row

import unittest,sys
import numpy as np
from scipy.cluster.vq import vq, kmeans2, whiten
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab

## class to test that the kmeans algorithm is functioning
class KmeansTest(unittest.TestCase):

    ## internal method to return a list of possible permutations
    def _permute_list(self,lst):
        sz = len(lst)
        if sz <= 1:
            return [lst]
        return [p[:i]+[lst[0]]+p[i:] for i in xrange(sz) for p in self._permute_list(lst[1:])]

    ## internal methods to permute the newLabels until the difference from true is minimuzed 
    def _permute_labels(self,trueLabels,newLabels,k):
        permutations = self._permute_list(range(k))
        minDiff = np.sum(np.abs(trueLabels - newLabels))

        for perm in permutations:
            permLabels = np.array([perm[i] for i in newLabels])
            absDiff = np.abs(trueLabels - permLabels)
            diff = len(np.where(absDiff >= 1)[0])

            if diff < minDiff:
                newLabels = permLabels
                minDiff = diff

        return newLabels

    ## the main test method to test if kmeans is working correctly
    def testKmeansByScipy(self):
    
        ## declare variables
        n = 10                                                      # specify the number of observations for each cluster
        np.random.seed(42)                                          # provide a seed for the random number generator
        feature1 = np.array([np.random.normal(3,1,n)]).transpose()  # creates a colums vector from a gaussian for cluster 1
        feature2 = np.array([np.random.normal(3,1,n)]).transpose()  # creates a colums vector from a gaussian for cluster 1
        cluster1 = np.hstack((feature1,feature2))                   # combines the col vectors into one matrix
        feature1 = np.array([np.random.normal(7,1,n)]).transpose()  # creates a colums vector from a gaussian for cluster 1
        feature2 = np.array([np.random.normal(7,1,n)]).transpose()  # creates a colums vector from a gaussian for cluster 1
        cluster2 = np.hstack((feature1,feature2))                   # combines the col vectors into one matrix
        x = np.vstack((cluster1,cluster2))                          # combines the clusters
        k = 2                                                       # specifies the k
        actualLabels = np.zeros(n*2,dtype=int)                      # specifies the actual labels 
        actualLabels[-n:] = 1
        
        ## randomly permute the rows of the data
        inds = np.arange(n*2)
        randInds = np.random.permutation(inds)
        newX = None
        newActualLabels = np.zeros(n*2,dtype=int)
        
        for i in inds:
            randi = randInds[i]
            row = x[randi,:]
            if newX == None:
                newX = row
            else:
                newX = np.vstack((newX,row))
           
            newActualLabels[i] = actualLabels[randi]
        
        x = newX
        actualLabels = newActualLabels

        ## perform kmeans on raw features
        centroids, labels = kmeans2(x,k)
        newLabels = self._permute_labels(actualLabels,labels,k)

        ## test assertions
        misClassified = np.sum(np.array(newLabels,dtype=float) - np.array(actualLabels,dtype=float))
        self.failUnless(misClassified == 0.0)

### Run the tests
if __name__ == '__main__':
    unittest.main()
