/**
   \file  kmeans.hh
   \brief Defines a generic API for multithreaded k-means clustering.

   This file provides an API for performing k-means clustering. It
   provides implementations of the k-means algorithms described in
   the following papers:

   Hartigan, J. A., Wong, M. A.
   Algorithm AS 136: A K-Means Clustering Algorithm
   Journal of the Royal Statistical Society, Series C (Applied
   Statistics), 1979, 28(1):100--108.

   Dhillon, I. S., Modha, D. S.
   A Data Clustering Algorithm on Distributed Memory Multiprocessors
   Large-Scale Parallel Data Mining, Lecture Notes in Artificial
   Intelligence, March 2000, 1759:245--260, Springer-Verlag.

   NOTE: This is a header-only interface. Thus, clients only need to
   include this file and do not have to link with an object module.
   However, since we use Boost.Thread for the multithreading
   functionality, you will need to link against libboost_thread
   eventually.

   NOTE 2: This algorithm does not really belong in libgist. Ideally, it
   should be part of a machine learning library (Boost.ML anyone?).
   However, at the time of writing (circa November 2011), since we were
   unable to find a suitable generic C++ implementation of multithreaded
   k-means, we had to implement it as part of libgist.
*/

/*
   This file is part of libgist.

   libgist is free software; you can redistribute it and/or modify it
   under the terms of the GNU General Public License as published by the
   Free Software Foundation; either version 2 of the License, or (at your
   option) any later version.

   libgist is distributed in the hope that it will be useful, but WITHOUT
   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
   for more details.

   You should have received a copy of the GNU General Public License
   along with libgist; if not, write to the Free Software Foundation,
   Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
*/

/*
   REVISION HISTORY

   $HeadURL: http://libgist.googlecode.com/svn/branches/lazebnik_dev/src/kmeans.hh $
   $Id: kmeans.hh 179 2012-04-12 03:18:05Z libgist@gmail.com $
*/

#ifndef GIST_KMEANS_DOT_HH
#define GIST_KMEANS_DOT_HH

//------------------------------ HEADERS --------------------------------

// libgist
#include "thread_pool.hh"
#include "dynamic_barrier.hh"

// Boost
#include <boost/thread/locks.hpp>
#include <boost/thread/mutex.hpp>

#include <boost/random/variate_generator.hpp>
#include <boost/random/uniform_int.hpp>
#include <boost/random/mersenne_twister.hpp>

#include <boost/numeric/ublas/matrix_proxy.hpp>
#include <boost/numeric/ublas/matrix.hpp>
#include <boost/numeric/ublas/vector.hpp>

#include <boost/lambda/lambda.hpp>
#include <boost/function/function0.hpp>

// Standard C++
#include <algorithm>
#include <numeric>
#include <vector>
#include <stdexcept>
#include <memory>
#include <limits>
#include <utility>

// Standard C
#include <time.h>

//-------------------------- MAIN NAMESPACE -----------------------------

namespace gist {

//----------------------- FORWARD DECLARATIONS --------------------------

/**
   \namespace kmeans_init
   \brief Namespace encapsulating various k-means initialization policies
   provided by this library.

   libgist's implementations of k-means algorithms attempt to be as
   generic as possible. Clients may customize libgist's k-means
   implementations by providing function objects to implement different
   aspects of the k-means algorithm that they need done in some specific
   way. This particular namespace encapsulates various k-means
   initialization policies already provided by libgist.
*/
namespace kmeans_init {

// Hartigan and Wong's centroids initialization method
template<typename T>
class init_centroids_hw ;

// Initialize centroids by picking k data points at random
template<typename T>
class init_centroids_random ;

} // namespace kmeans_init

/**
   \namespace kmeans_impl
   \brief Namespace encapsulating different k-means algorithm
   implementations.

   libgist implements a couple of different parallel k-means algorithms.
   This namespace encapsulates all the k-means implementations provided
   by libgist.

   NOTE: Users may supply their own implementation if they so wish.
   Details are in the documentation of the gist::kmeans class.
*/
namespace kmeans_impl {

// Parallelized version of the Hartigan-Wong algorithm
template<typename T>
class hartigan_wong ;

// The Dhillon-Modha parallel k-means algorithm
template<typename T>
class dhillon_modha ;

// Helper function to return square of Euclidean distance between two
// points.
template<typename T>
T dist2(const boost::numeric::ublas::vector<T>& a,
        const boost::numeric::ublas::vector<T>& b)
{
   boost::numeric::ublas::vector<T> d = a - b ;
   return boost::numeric::ublas::inner_prod(d, d) ;
}

} // namespace kmeans_impl

//------------------- K-MEANS INTERFACE: EXCEPTIONS ---------------------

/**
   \namespace kmeans_errors
   \brief Namespace encapsulating different errors reported by libgist's
   k-means module.
*/
namespace kmeans_errors {

/**
   \brief Exception to indicate centroids initialization failure.

   Most k-means algorithms require an initial set of cluster centers
   before they can commence. By default, we select k data points using
   the method described in the "Additional Comments" section of the
   Hartigan-Wong paper.

   However, libgist's k-means class allows clients to specify an
   alternative centroids initialization policy by supplying an
   appropriate function object to the kmeans template.

   This function object must return a d-by-k matrix, where d is the
   input data dimensionality and k the number of clusters. If the
   matrix returned by the centroids initialization policy does not
   have these dimensions, the kmeans clustering methods will throw an
   instance of this class to indicate the error.
*/
struct bad_centroids_initialization: public std::runtime_error {
   bad_centroids_initialization() ;
} ;

bad_centroids_initialization::bad_centroids_initialization()
   : std::runtime_error("centroids initialization policy "
                        "produced a matrix of the wrong size")
{}

/**
   \brief Exception to indicate an empty cluster.

   Most k-means algorithms require an initial set of cluster centers
   before they can commence. If, however, these initial cluster centers
   are not chosen properly and result in an empty cluster during the
   initial clustering step, we will throw an instance of this class to
   indicate the problem.

   We also throw this exception when we encounter an empty clustering
   during the clustering process. Thus, the k-means algorithm should be
   implemented so as to prevent any empty clusters from forming.
*/
struct empty_cluster: public std::runtime_error {
   /// This member indicates the iteration at which we encountered an
   /// empty cluster. If we find an empty cluster before we ever begin
   /// the main k-means clustering loop, the value of this member will
   /// be -1.
   /// \return
   int iteration ;

   /// This member specifies the index of the first empty cluster we
   /// found.
   /// \return
   int cluster ;

   /**
      \brief  Initialize empty_cluster exception.
      \param  c Index of first empty cluster.
      \return Properly initialized empty_cluster exception.
   */
   empty_cluster(int c) ;
} ;

empty_cluster::empty_cluster(int c)
   : std::runtime_error("at least one cluster is empty"),
     iteration(-1), cluster(c)
{}

/**
   \brief Exception to indicate fewer data points than clusters.

   If users supply n data points but request libgist's k-means to create
   more than n clusters, we will be very unhappy and them know it with an
   instance of this class.
*/
struct not_enough_data: public std::runtime_error {
   not_enough_data() ;
} ;

not_enough_data::not_enough_data()
   : std::runtime_error("cannot create more clusters "
                        "than there are data points")
{}

} // namespace kmeans_errors

//--------------------- K-MEANS INTERFACE: CLASS ------------------------

/**
   \brief Encapsulation of multithreaded k-means clustering.

   This class implements an interface for parallel k-means clustering.
   The actual clustering implementation is provided by a pair of policy
   classes, viz., the centroids initialization policy and the k-means
   algorithm policy. libgist provides a few different implementations for
   these two policies and allows clients to supply their own.

   Before we discuss the policy classes, here is a brief description of
   this class's interface: clients should create an instance (usually, a
   temporary will suffice) and then, depending on exactly what they want
   returned by the clustering operation, invoke the appropriate
   clustering method. The following snippet of code illustrates typical
   usage of this class:

   \code
       typedef boost::numeric::ublas::matrix<float> matrix ;
       matrix data = get_input_data_matrix_somehow() ;
       matrix centroids = gist::kmeans<>().centroids(k, data) ;
   \endcode

   As shown above, the input data to be clustered has to be supplied via
   a Boost.uBLAS matrix. The data points in this matrix are expected to
   be arranged as column vectors. Thus, the number of rows in the input
   matrix will correspond to the number of dimensions in the data and the
   number of columns will be the number of input data points to be
   clustered.

   The above code snippet relied on the defaults for the different
   template arguments accepted by this class. The following paragraphs
   describe these types in more detail.

   The first template parameter to this class should be a suitable
   numeric type specifying the contents of the input matrix described
   above. If clients do not supply this type explicitly, it will default
   to float.

   The second template parameter is an initialization policy for the
   cluster centers. If not supplied, it will default to a function object
   that initializes the cluster centers using the technique described in
   the Hartigan-Wong paper.

   However, if you need to supply a different centroids initialization
   policy, you should implement a suitable function object. The function
   call operator for this object will be passed two parameters, viz., k
   and data. The first parameter k is an int and specifies the number of
   clusters. The second parameter data is a const reference to the input
   matrix.

   The centroids initialization function object should return a matrix
   containing the initial cluster centers. Like the input data, these
   centroids should be arranged as column vectors. Thus, the matrix
   returned should have d rows, where d is the number of dimensions in
   the input data, i.e., the same number of rows as the input matrix. And
   it should have k columns, i.e., the number of clusters to be produced
   by k-means.

   If the client-supplied centroids initialization policy function object
   does not return a d-by-k matrix as described above, the clustering
   methods will throw a gist::kmeans_error::bad_centroids_initialization
   exception (which is derived from std::runtime_error).

   The third template argument is a function object that implements
   k-means clustering. This function object will be passed the following
   arguments:

       -# k: number of clusters to create
       -# data: matrix containing the data points to be clustered
       -# max_iterations: an integer specifying the maximum number of
          iterations allowed for clustering
       -# num_threads: number of threads to use for parallelizing the
          clustering operation
       -# clusters: a reference to an std::vector<int>
       -# centroids: a reference to a boost::numeric::ublas::matrix<T>

   The kmeans interface object (an instance of this class) expects the
   implementation object to return the cluster assignments and centroids
   in the last two of the above parameters. The clusters parameter must
   be initialized by the implementation object. The centroids parameter,
   however, will be initialized by the interface object to a d-by-k
   matrix containing the initial cluster centroids.

   As it clusters the dataset, the implementation object is expected to
   not exceed the number of iterations specified by the third parameter.
   However, this is not enforced in any way by the interface object. If
   you implement your own k-means algorithm and supply it as a policy to
   this clustering interface class, please be good and follow this
   restriction.

   The interface object will specify how many threads the implementation
   object should use to parallelize its operations. Implementation
   objects should create these threads on their own.

   Finally, the implementation object's function call operator is
   expected to return the number of iterations to convergence.

   The default k-means algorithm used by libgist is the Hartigan-Wong
   algorithm extended to take advantage of multiple CPU's. Additionally,
   libgist provides an implementation of Dhillon and Modha's parallel
   k-means algorithm.

   Although we mentioned parallelization and multithreading, have not
   illustrate it in any concrete way. Fear not, the constructor
   documentation will reveal all. Now, go forth and cluster to ye heart's
   content!
*/
template<typename T = float,
         typename centroids_init_policy = kmeans_init::init_centroids_hw<T>,
         typename clustering_algorithm  = kmeans_impl::hartigan_wong<T> >
class kmeans {
public:
   /// Convenient typedefs for the input matrix and its individual
   /// columns.
   /// \return
   //@{
   typedef boost::numeric::ublas::matrix<T> matrix ;
   typedef boost::numeric::ublas::vector<T> vector ;
   //@}

private:
   // The goal of k-means clustering is to compute the cluster indices of
   // the closest cluster for each data point. This data member is used
   // to store these indices.
   std::vector<int> m_clusters ;

   // In addition to the cluster indices, we also need to keep track of
   // the cluster centers. This data member does just that.
   matrix m_centroids ;

   // The overall within-cluster sum-of-squares.
   T m_wss ;

   // By default, we will iterate a maximum of 1000 times. However, we
   // allow users to specify a different value for this parameter. We
   // also keep track of the total number of iterations so that, at the
   // end of the clustering, users can tell exactly how many iterations
   // it took to cluster the input data.
   int m_max_iterations, m_iteration ;

   // The kmeans interface object simply requires implementation objects
   // to parallelize their operations whenever possible. It does not
   // maintain any infrastructure for this purpose apart from storing the
   // number of threads that clients want to use. It is up to the
   // implementation objects to create the desired number of threads and
   // parallelize as they see fit. Therefore, we only need to store the
   // number of threads specified by the client module and pass that to
   // the implementation object at the right time.
   int m_num_threads ;

public:
   /**
      \brief  K-means initialization.
      \param  num_threads The number of threads for parallelization.
      \param  max_iterations The maximum number of iterations.
      \return Initialized kmeans object ready to perform clustering.

      When this class is instantiated, it will setup its internal thread
      pool. If the size of the thread pool is not specified, the k-means
      algorithm will be single-threaded.

      Additionally, during instantiation, clients may specify the maximum
      number of iterations the clustering should take. The default
      maximum is 1000 iterations.
   */
   kmeans(int num_threads = 1, int max_iterations = 1000) ;

   /**
      \brief  Perform k-means clustering.
      \param  k Number of clusters to find in the input data.
      \param  data The input data arranged in a matrix.
      \return Cluster assignments for each data point plus the cluster centers.

      This function clusters the input data into k groups and returns the
      cluster assignments for each data point via an STL vector of
      integer cluster indices and the cluster centers via a Boost.uBLAS
      matrix. These two return values, i.e., cluster assignments and
      centroids, are bundled together in an STL pair.

      The input data should be arranged as column vectors of a d-by-n
      matrix. Thus, d, the number of rows in this matrix, is the data
      dimensionality, and n, the number of columns, corresponds to the
      number of data points to be clustered.

      The first return value is an STL vector containing n elements,
      i.e., one for each input data point. Each element in this vector
      will be a number in the range [0, k), indicating the index of the
      cluster to which that data point belongs.

      The second return value is a Boost.uBLAS matrix of T (where T is a
      numeric type passed to the kmeans template class; defaults to
      float). The size of this returned matrix is d-by-k and, like the
      input data matrix, is also arranged column-wise. That is, each of
      the k cluster centroids makes up one column.
   */
   std::pair<std::vector<int>, matrix> cluster(int k, const matrix& data) ;

   /**
      \brief  Perform k-means clustering.
      \param  k Number of clusters to find in the input data.
      \param  data The input data arranged in a matrix.
      \return Cluster assignments for each data point.

      This function clusters the input data into k groups and returns the
      cluster assignments for each data point via an STL vector of
      integer cluster indices. Use this function when you want to perform
      clustering and are only interested in the cluster assignments and
      don't care for the cluster centroids.

      The input data should be arranged as column vectors of a d-by-n
      matrix. Thus, d, the number of rows in this matrix, is the data
      dimensionality, and n, the number of columns, corresponds to the
      number of data points to be clustered.

      The return value is an STL vector containing n elements, i.e., one
      for each input data point. Each element in this vector will be a
      number in the range [0, k), indicating the index of the cluster to
      which that data point belongs.
   */
   std::vector<int> clusters(int k, const matrix& data) ;

   /**
      \brief  Perform k-means clustering.
      \param  k Number of clusters to find in the input data.
      \param  data The input data arranged in a matrix.
      \return Centroids of the data clusters.

      This function clusters the input data into k groups and returns the
      coordinates of the cluster centers via a matrix. Use this function
      when you want to perform clustering and are only interested in the
      cluster centroids and not the cluster assignments themselves.

      The input data should be arranged as column vectors of a d-by-n
      matrix. Thus, d, the number of rows in this matrix, is the data
      dimensionality, and n, the number of columns, corresponds to the
      number of data points to be clustered.

      The return value is a Boost.uBLAS matrix of T (where T is a numeric
      type passed to the kmeans template class; defaults to float). The
      size of this returned matrix is d-by-k and, like the input data
      matrix, is also arranged column-wise. That is, each of the k
      cluster centroids makes up one column.
   */
   matrix centroids(int k, const matrix& data) ;

   /**
      \brief  Return the overall within-cluster sum-of-squares.
      \return Overall within-cluster sum-of-squares.

      The k-means algorithm creates clusters by trying to minimize the
      following objective function:

      \verbatim
          sum(1,n) sum(1,k) {I(Xi, Ck) * (Xi - Ck) * (Xi - Ck)}
      \endverbatim

      In the above expression, I(Xi, Ck) is an indicator function that
      returns one if point Xi belongs to cluster Ck and zero otherwise.
      The term (Xi - Ck) is the distance between point Xi and the center
      of cluster k. The sum() functions are the capital sigma summation
      functions. Thus, the inner summation is for each cluster and the
      outer summation for each point. The following paper explains the
      above function:

      Maitra, R., Peterson, A. D., Ghosh, A. P.
      A Systematic Evaluation of Different Methods for Initializing the
      K-means Clustering Algorithm.
      IEEE Transactions on Knowledge and Data Engineering, 2010.

      In effect, the above objective function computes the sum of the
      squared distances between each of the data points and their
      respective cluster centers.

      After clustering, this implementation of k-means will compute the
      value of the above function. Users can call this method to retrieve
      the WSS associated with the clustering.
   */
   T wss() const {return m_wss ;}

   /**
      \brief  Change or retrieve the maximum number of iterations.
      \param  m The new maximum number of iterations.
      \return The maximum number of iterations.

      After initialization, most k-means algorithms enter a loop to
      iteratively converge toward a local minimum.

      If the initial cluster centers were chosen well, this loop will
      usually converge fairly soon. However, in some cases, it may not.
      To prevent a runaway loop, we limit the maximum number of
      iterations of this loop. The default maximum is 1000 iterations.
      However, clients may change this maximum to a different number and
      check the current maximum using these functions.

      After clustering is done, clients may also find it useful to see
      how many iterations were performed, which they can do by calling
      the iterations method.
   */
   //@{
   void max_iterations(int m)  {m_max_iterations = std::max(m, 1) ;}
   int  max_iterations() const {return m_max_iterations ;}
   int  iterations()     const {return m_iteration ;}
   //@}

private:
   /**
      \brief  Perform k-means clustering.
      \param  k Number of clusters to find in the input data.
      \param  data The input data arranged in a matrix.
      \return Nothing.

      This method is the top-level implementation of the k-means
      clustering algorithm. It is the one that actually performs the
      clustering. The public interface methods simply forward their calls
      to this internal function.

      The input data should be arranged as column vectors of a d-by-n
      matrix. Thus, d, the number of rows in this matrix, is the data
      dimensionality, and n, the number of columns, corresponds to the
      number of data points to be clustered.

      When it is done, the cluster indices will be stored in the
      m_clusters data member and the cluster centers will be in
      m_centroids.

      NOTE: The clustering is actually performed by the centroids
      initialization and k-means implementation policy objects. This
      method simply instantiates and invokes these policies to get its
      work done.
   */
   void find_clusters(int k, const matrix& data) ;
} ;

// Initialization
template<typename T, typename P, typename I>
kmeans<T, P, I>::kmeans(int num_threads, int max_iter)
   : m_wss(0), m_max_iterations(std::max(max_iter, 1)), m_iteration(0),
     m_num_threads(num_threads)
{}

// Cluster and return both cluster assignments as well as cluster centers
template<typename T, typename P, typename I>
std::pair<std::vector<int>, typename kmeans<T, P, I>::matrix>
kmeans<T, P, I>::
cluster(int k, const matrix& data)
{
   find_clusters(k, data) ;
   return std::make_pair(m_clusters, m_centroids) ;
}

// Cluster and return only the cluster assignments
template<typename T, typename P, typename I>
std::vector<int>
kmeans<T, P, I>::
clusters(int k, const matrix& data)
{
   find_clusters(k, data) ;
   return m_clusters;
}

// Cluster and return only the cluster centers
template<typename T, typename P, typename I>
typename kmeans<T, P, I>::matrix
kmeans<T, P, I>::
centroids(int k, const matrix& data)
{
   find_clusters(k, data) ;
   return m_centroids ;
}

// Top-level routine implementing parallel k-means algorithm
template<typename T, typename init_centroids, typename cluster_data>
void
kmeans<T, init_centroids, cluster_data>::
find_clusters(int k, const matrix& data)
{
   // Cannot create more clusters than there are points
   const int n = data.size2() ;
   if (k > n)
      throw kmeans_errors::not_enough_data() ;

   // Seed the k-means using the centroids initialization policy
   m_centroids = init_centroids()(k, data) ;
   const int num_centroids  = m_centroids.size2() ;
   if (m_centroids.size1() != data.size1() || num_centroids != k)
      throw kmeans_errors::bad_centroids_initialization() ;

   // Cluster data using k-means algorithm implementation policy
   m_iteration = cluster_data()(k, data,
                                m_max_iterations, m_num_threads,
                                m_clusters, m_centroids) ;

   // Compute within-cluster sum-of-squares for the clustering we produced
   using boost::numeric::ublas::column ;
   m_wss = 0 ;
   for (int i = 0; i < n; ++i)
      m_wss += kmeans_impl::dist2<T>(column(data, i),
                                     column(m_centroids, m_clusters[i])) ;
}

//----------------- CENTROIDS INITIALIZATION POLICIES -------------------

namespace kmeans_init {

/**
   \brief Initialize k-means centroids using the method described by
   Hartigan and Wong.

   An important initialization step for Hartigan and Wong's k-means is
   selecting the cluster centers. This determines how good the clustering
   results will eventually be. libgist's k-means allows clients to
   specify a suitable initialization policy for this purpose.

   This particular function object initializes the centroids by sorting
   the input data points on the basis of their distance to the overall
   mean of the dataset and then picking k points offset from the first
   one by n/k (where n is the total number of data points).

   According to the Hartigan-Wong paper, this initialization method
   guarantees that no cluster will be empty when we seed the initial
   cluster assignments.

   NOTE: This is the default cluster centroid initialization policy.
*/
template<typename T>
class init_centroids_hw {
   typedef boost::numeric::ublas::matrix<T> matrix ;
   typedef boost::numeric::ublas::vector<T> vector ;
public:
   matrix operator()(int k, const matrix& data) ;
private:
   class dist_to_mean {
      const matrix& data ;
      vector mean ;
   public:
      dist_to_mean(const matrix& data) ;
      bool operator()(int i, int j) const ;
   } ;
} ;

template<typename T>
typename init_centroids_hw<T>::matrix
init_centroids_hw<T>::
operator()(int k, const matrix& data)
{
   // First, we create a vector of indices referring to the columns of
   // the input data matrix and then sort these indices based on the
   // distances between the points they index and the dataset's mean.
   int i = 0 ;
   std::vector<int> points(data.size2()) ;
   std::generate(points.begin(), points.end(), boost::lambda::var(i)++) ;
   std::stable_sort(points.begin(), points.end(), dist_to_mean(data)) ;

   // Next, we pick k points from the sorted data so that each point is
   // at least n/k elements away from the previous one...
   using boost::numeric::ublas::column ;
   matrix centroids(data.size1(), k) ;
   const int f = data.size2()/k ;
   for  (int i = 0, j = 0; i < k; ++i, j += f)
      column(centroids, i) = column(data, points[j]) ;
   return centroids ;
}

// Helper function object for sorting the data points based on their
// respective distances to the dataset mean.
template<typename T>
init_centroids_hw<T>::dist_to_mean::dist_to_mean(const matrix& d)
   : data(d)
{
   using boost::numeric::ublas::zero_vector ;
   using boost::numeric::ublas::column ;

   // Compute dataset mean
   mean = zero_vector<T>(data.size1()) ;
   const int n = data.size2() ;
   for  (int i = 0; i < n; ++i)
      mean += column(data, i) ;
   mean /= n ;
}

// Returns true if data point i is closer to dataset mean than point j
template<typename T>
bool init_centroids_hw<T>::dist_to_mean::operator()(int i, int j) const
{
   using boost::numeric::ublas::column ;
   using boost::numeric::ublas::inner_prod ;

   vector a = column(data, i) - mean ; // vector from dataset mean to point i
   vector b = column(data, j) - mean ; // vector from dataset mean to point j

   // To compare the distance between point i and the dataset mean to the
   // distance between point j and the dataset mean, we use the dot
   // products of the vectors a and b with themselves. The dot product of
   // a vector with itself yields the square of the magnitude of that
   // vector. In this case, since a and b are the vectors from points i
   // and j respectively to the dataset mean, these dot products will
   // result in the square of the distances between points i and j
   // respectively and the dataset mean.
   //
   // NOTE: For comparing distances, the square of the respective
   // distances will yield the same result as the actual distances
   // themselves. Thus, we can avoid taking square roots here.
   return inner_prod(a, a) < inner_prod(b, b) ;
}

/**
   \brief Initialize k-means centroids randomly.

   An important initialization step for Hartigan and Wong's k-means is
   selecting the cluster centers. This determines how good the clustering
   results will eventually be. libgist's k-means allows clients to
   specify a suitable initialization policy for this purpose.

   This particular function object initializes the centroids by choosing
   k input data points at random to serve as the initial cluster centers.

   NOTE: Although we provide this random initialization policy, it is
   not recommended for general use as the initial cluster assignments
   based on a random selection of k data points can result in one or
   more empty clusters, which, in turn, will cause the Hartigan-Wong
   algorithm to fail.
*/
template<typename T>
class init_centroids_random {
   typedef boost::numeric::ublas::matrix<T> matrix ;
   typedef boost::numeric::ublas::vector<T> vector ;
public:
   matrix operator()(int k, const matrix& data) ;
} ;

template<typename T>
typename init_centroids_random<T>::matrix
init_centroids_random<T>::
operator()(int k, const matrix& data)
{
   typedef boost::mt19937 gen_t ;
   typedef boost::uniform_int<> dist_t ;

   gen_t  gen(static_cast<unsigned int>(time(0))) ;
   dist_t dist(0, data.size2() - 1) ;
   boost::variate_generator<gen_t&, dist_t> rng(gen, dist) ;

   using boost::numeric::ublas::column ;
   matrix centroids(data.size1(), k) ;
   for (int i = 0; i < k; ++i)
      column(centroids, i) = column(data, rng()) ;
   return centroids ;
}

} // namespace kmeans_init

//--------------- K-MEANS IMPLEMENTATION: HARTIGAN-WONG -----------------

namespace kmeans_impl {

/**
   \brief Implementation of Hartigan-Wong k-means algorithm.

   This class implements the k-means algorithm described in the following
   papers:

   Hartigan, J. A., Wong, M. A.
   Algorithm AS 136: A K-Means Clustering Algorithm.
   Journal of the Royal Statistical Society, Series C (Applied
   Statistics), 1979, 28(1):100--108.

   Maitra, R., Peterson, A. D., Ghosh, A. P.
   A Systematic Evaluation of Different Methods for Initializing the
   K-means Clustering Algorithm.
   IEEE Transactions on Knowledge and Data Engineering, 2010.

   This implementation of Hartigan-Wong parallelizes the optimal transfer
   stage in order to speed up convergence when multiple CPU's are
   available. It is meant to be used in conjunction with the gist::kmeans
   class as the third template argument, viz., the k-means algorithm
   implementation policy. Clients cannot and should not use it directly.

   NOTE: Ideally, k-means implmentation objects should not encounter
   empty clusters and should throw a kmeans_error::empty_cluster
   exception in case they do. Unfortunately, this implementation of
   Hartigan-Wong does not guarantee non-empty clusterings. That is, it is
   possible for some clusters produced by this k-means implementation
   object to be empty. Client functions/modules should check the
   clustering results to determine whether or not this is the case.

   We had to allow empty clusters due to the way the parallelized
   optimal transfer stage works. If we were to use a lock in the
   optimal transfer stage, the benefits of parallelization would be
   lost as all but one of the threads would simply block in most
   situations.

   The least disruptive way of fixing this problem turned out to be to
   disable the empty cluster check in the internal update_centroids()
   method of this class. It looks like the Hartigan-Wong algorithm is
   ill-suited to parallelization and that we will have to reimplement
   parallel k-means using an algorithm expressly designed to exploit
   multiple cores...

   Therefore, for now (circa April 2012), until this issue is properly
   fixed, users should consider this implementation of multithreaded
   Hartigan-Wong k-means broken and buggy.

   Note, however, that for very large datasets, it may simply be that
   there are no empty clusters and the explicit check is unnecessary.
   Thus, this class may well work in those situations.
*/
template<typename T>
class hartigan_wong {
   // To ensure that client modules not be able to use this class
   // directly, we make everything in it private and allow only the
   // kmeans class to access it.
   template<typename U, typename P, typename I>
   friend class kmeans ;

   // Handy shortcuts
   typedef boost::numeric::ublas::matrix<T> matrix ;
   typedef boost::numeric::ublas::vector<T> vector ;

   // The kmeans interface object will provide various parameters we need
   // to implement Hartigan-Wong. This structure holds these parameters
   // in one place.
   struct params {
      // This data member is where the implementation object has to store
      // the cluster assignments.
      std::vector<int>& clusters ;

      // This data member will contain the initial cluster centroids
      // computed by the interface object and will be expected to contain
      // the final cluster centroids as computed by the implementation
      // object.
      matrix& centroids ;

      // This is the maximum number of iterations the implementation
      // object can take to converge.
      int max_iterations ;

      // Internal thread pool for parallelizing Harigan-Wong
      thread_pool<boost::function<void ()> > pool ;

      // Initialization.
      params(std::vector<int>& clusters, matrix& centroids,
             int max_iterations, int num_threads);
   } ;
   std::auto_ptr<params> m_params ;

   // The Hartigan-Wong algorithm requires not only the closest cluster
   // indices but also the second-closest cluster indices for each data
   // point.
   std::vector<int> m_clusters2 ;

   // Various per-cluster intermediate computations.
   struct cluster_data {
      int  n ;    // current number of points in a cluster
      bool live ; // whether or not this cluster is part of the live set
      cluster_data() ;
   } ;

   // As clustering proceeds, we update various pieces of data about the
   // clusters with this member variable.
   std::vector<cluster_data> m_cluster_data ;

   // According to the original Hartigan-Wong paper, the quick transfer
   // stage continues as long as there are transfers from closest to
   // second-closest clusters. However, in some perverse situations, this
   // can result in an infinite loop because just one point or a small
   // number of points may keep oscillating back and forth between its or
   // their respective closest and second-closest clusters (which can
   // happen because the centroids update after a transfer can cause R1
   // and R2 to reverse their inequality).
   //
   // To prevent this infinite loop, in the quick transfer stage, this
   // implementation of the Hartigan-Wong k-means algorithm allows a
   // point to be transferred from its closest to its second-closest
   // cluster only once. To that end, we use this vector of bools to keep
   // track of whether or not a point has already been transferred.
   std::vector<bool> m_swapped ;

   /**
      \brief  Hartigan-Wong implementation.
      \param  k Number of clusters to be created.
      \param  data Matrix containing data points to be clustered.
      \param  x Maximum number of iterations allowed.
      \param  t Number of threads to use for parallelizing Hartigan-Wong.
      \param  clusters STL vector of ints containing cluster assignments.
      \param  centroids Matrix containing cluster centroids.
      \return Number of iterations to convergence.

      This is the implementation object's "interface" method that
      actually performs the clustering. In this case, this function
      implements the top-level steps of the Hartigan-Wong algorithm.
   */
   int operator()(int k, const matrix& data, int x, int t,
                  std::vector<int>& clusters, matrix& centroids) ;

   /**
      \brief  Find the two closest clusters for each data point.
      \param  k Number of clusters to find in the input data.
      \param  data The input data arranged in a matrix.
      \return Initial cluster assignments in m_clusters and m_clusters2.

      After initializing the cluster centers, the Hartigan-Wong algorithm
      computes the two closest clusters for each data point. This
      function performs the closest and second-closest cluster
      computation.

      The input data should be arranged as column vectors of a d-by-n
      matrix. Thus, d, the number of rows in this matrix, is the data
      dimensionality, and n, the number of columns, corresponds to the
      number of data points to be clustered.

      After it is done, the m_params->clusters data member will contain
      the cluster indices of the closest clusters and m_clusters2 the
      indices of the second-closest centers. Both these members will have
      n elements each, one for each of the input data points. Each
      element will be a number in the range [0, k).

      To find the closest and second-closest cluster for a data point, we
      have to find the Euclidean distance between it and each of the k
      cluster centers. For d-dimensional points, each distance
      computation involves d multiplications and d-1 additions. As we have
      n points and k clusters, we will have to perform these
      multiplications and additions n*k times.

      Needless to say, for large d, n, and k, this closest cluster
      computation thingy will involve a whole lotta number crunching...
      Luckily, we can do this for one point independent of all the other
      points. Therefore, this computation is a prime candidate for
      parallelization.

      Consequently, this function simply breaks up the task of finding
      the closest and second-closest clusters for n data points into n
      tasks and hands them off to the internal kmeans thread pool to get
      the job done as quickly as possible.
   */
   void find_two_closest_centroids(int k, const matrix& data) ;

   /**
      \brief Function object to find two closest centroids for each point.

      The thread pool needs parameterless functions that return nothing.
      Since we need to use the k-means input data and other parameters
      and store the results of the closest centroids computations in
      member variables of the k-means class, we have to use a properly
      initialized function object for each task.

      This inner class is the function object described above.
   */
   class two_closest_centroids_finder {
      // To find the two closest clusters for a data point, we need
      // access to the k-means input data matrix plus the current cluster
      // centers. These two data members store references to the
      // necessary data structures.
      const matrix& m_data ;
      const matrix& m_centroids;

      // We could determine the number of clusters K from the m_centroids
      // data member. However, this parameter is readily available
      // throughout the k-means API and is easily enough just passed in
      // to this class.
      const int m_num_clusters ;

      // Since finding the closest two clusters for one data point is
      // independent of the same computation for other points, we break
      // up the overall job into sub-tasks and execute them in parallel.
      // Each sub-task finds the closest two clusters for one data point.
      //
      // This variable specifies the index of the data point on which
      // this task must concentrate its efforts.
      const int m_index ;

      // The indices of the two closest clusters are stored in the
      // m_clusters and m_clusters2 member variables of the (outer)
      // k-means object. These two references simply point to those
      // variables.
      std::vector<int>& m_clusters ;
      std::vector<int>& m_clusters2;

   public:
      /**
         \brief  Initialize function object for finding two closest clusters.
         \param  k The number of clusters.
         \param  data Input data matrix containing the points to be clustered.
         \param  i Index of point for which to find two closest clusters.
         \param  centroids Matrix containing current cluster centers.
         \param  c1 Cluster indices for closest clusters.
         \param  c2 Cluster indices for second closest clusters.
         \return Properly initialized task for the thread pool.

         When a closest centroids finder task is instantiated, it has to
         be supplied the appropriate parameters so that the function
         object has all the state it needs to get its job done.
      */
      two_closest_centroids_finder(int k, const matrix& data, int i,
                                   const matrix& centroids,
                                   std::vector<int>& c1,
                                   std::vector<int>& c2) ;

      /**
         \brief  Find the two closest clusters for point indexed by m_index.
         \return Nothing.

         This function checks the Euclidean distance between the data
         point indexed by m_index and each of the K cluster centers to
         find the data point's closest and second closest clusters.
      */
      void operator()() const ;
   } ;

   /**
      \brief  Calculate the latest cluster centers.
      \param  k Number of clusters.
      \param  data The input matrix containing the data points to be clustered.
      \return Cluster centers via m_centroids member.

      This function computes the k cluster centers using the current
      cluster assignments stored in the m_clusters member variable.

      The input data should be arranged as column vectors of a d-by-n
      matrix. Thus, d, the number of rows in this matrix, is the data
      dimensionality, and n, the number of columns, corresponds to the
      number of data points to be clustered.

      When it is done, the cluster centers will be stored in the
      m_centroids data member.

      NOTE: Before invoking this method, callers must ensure that the
      number of points for each cluster is set to zero. That is
      m_cluster_data[i].n must be zero for all i in the range [0, k).
   */
   void update_centroids(int k, const matrix& data) ;

   /**
      \brief  The optimal transfer stage.
      \param  k Number of clusters to find in the input data.
      \param  data The input data arranged in a matrix.
      \return True if there were reallocations, false otherwise.

      This method implements the optimal transfer step described in the
      Hartigan-Wong paper. The goal of the optimal transfer stage is to
      reallocate points to clusters so as to induce the maximum reduction
      in the within-cluster sum-of-squares.

      Clustering will terminate when we perform n optimal transfer steps
      without any reallocations (n is the number of input data points).

      The input data should be arranged as column vectors of a d-by-n
      matrix. Thus, d, the number of rows in this matrix, is the data
      dimensionality, and n, the number of columns, corresponds to the
      number of data points to be clustered.

      In the optimal transfer stage, for each data point, we find the
      minimum R2 over all the clusters. R2 is computed using the formula
      dist2(i, c) * n(c)/(n(c) + 1), where c is the cluster to which
      point i belongs, n(c) is the number points in cluster c, and dist2
      is a function that returns the square of the Euclidean distance
      between point i and the center of cluster c.

      If the minimum R2 for a point is less than
      dist2(i, c) * n(c)/(n(c) - 1), we will reallocate the point to the
      cluster corresponding to the minimum R2.
   */
   bool optra(int k, const matrix& data) ;

   /**
      \brief Function object to help parallelize the optimal transfer stage.
   */
   class optra_task {
      // Each optra task operates on one of the data points. This member
      // specifies the column index of the input data matrix that this
      // task should handle.
      const int i ;

      // This member specifies the number of clusters into which we have
      // to partition the input data points.
      const int k ;

      // This member holds a reference to the input data matrix.
      const matrix& data ;

      // The optimal transfer stage has to reassign points to clusters
      // based on certain distance measures to the current cluster
      // centers. Instead of using references to the enclosing kmeans
      // object's members, we simply pass the optra task a pointer to the
      // kmeans object itself so it can access all the variables it
      // needs.
      hartigan_wong<T>* hw ;

      // When optra reassigns a point from one cluster to another, both
      // clusters become part of the live set. Instead of marking this
      // event in the enclosing kmeans object's m_cluster_data array, we
      // flag the condition in a local array and then copy results back
      // to kmeans's m_cluster_data.
      //
      // DEVNOTE: The original Hartigan-Wong implementation marked the
      // condition within the optra loop and also recomputed the new
      // cluster centers right away. However, we cannot do this in our
      // multithreaded implementation. Hence the need for a separate
      // array and for copying and calculating centroids at the end after
      // all the reassignments.
      static std::vector<bool> live ;

      // The Hartigan-Wong algorithm terminates when the optimal transfer
      // stage stops reallocating points to different clusters. This flag
      // indicates whether there were any reallocations in one invocation
      // of optra.
      static bool realloc ;

   public:
      /**
         \brief  Initialization prior to launching parallel optra tasks.
         \param  k Total number of clusters.
         \return Nothing.

         Before the main kmeans thread can launch all the optra tasks to
         work on the entire dataset, it must first call this method to
         ensure that all the necessary setup is done properly.
      */
      static void reset(int k) {realloc = false; live.resize(k, false) ;}

      /**
         \brief  Setup a parallel optra task for point i.
         \param  i Index of the data point this task should process.
         \param  k Total number of clusters to be created.
         \param  data The input data matrix.
         \param  hw The enclosing hartigan_wong object.
         \return Properly initialized optra task.
      */
      optra_task(int i, int k, const matrix& data, hartigan_wong<T>* hw) ;

      /**
         \brief  Optimal transfer steps for i-th point.
         \return New cluster assignment for i-th point (if necessary).

         This method implements the body of the optimal transfer stage's
         main loop for one point. Multiple optra tasks executing in
         parallel will get the job done for the entire dataset.

         For each point i, the optimal transfer stage has to find the
         minimum R2 over all clusters. R2 for a cluster c is computed
         with the formula dist2(i, c) * n(c)/(n(c) + 1), where n(c)
         returns the number of points in cluster c, and dist2 calculates
         the square of the Euclidean distance between point i and cluster
         c's center.

         If the minimum R2 is less than dist2(i, L1) * n(L1)/(n(L1) - 1),
         where L1 is the cluster to which point i belongs prior to optra,
         then i will be moved from cluster L1 to the cluster
         corresponding to the minimum R2.
      */
      void operator()() const ;

   private:
      /**
         \brief  Find minimum R2 for the given data point.
         \param  i Index of the data point for which we want the minimum R2.
         \param  k The total number of clusters.
         \param  d The i-th data point.
         \param  a Whether to consider all clusters or live ones only.
         \return STL pair containing min R2 and corresponding cluster index.

         The optimal transfer stage has to find the minimum R2 for all the
         data points. This helper function implements the necessary
         computation for one data point, i.e., for point i, it finds the
         cluster that has the minimum R2, where R2 = dist2(i,c) * n(c)/(n(c)+1).

         In the above expression, c is the cluster index, n(c) is the
         number of points in cluster c, and dist2(i,c) is a function that
         returns the square of the Euclidean distance between point i and
         the center of cluster c.

         To reduce the amount of computation, the Hartigan-Wong algorithm
         uses the notion of a live set. The optimal transfer stage has to
         find the minimum R2 for a data point only over the clusters in
         the live set. Thus, if cluster c belongs to the live set, this
         function will compute min R2 over all the clusters. If, however,
         cluster c does not belong to the live set, then we compute min
         R2 only over clusters in the live set. The fourth parameter,
         viz., a, is used to determine whether we should iterate over all
         the clusters (step 4a in the Hartigan-Wong paper) or only the
         clusters in the live set (step 4b in the paper).

         Once it's done, this function will return the minimum R2 and the
         corresponding cluster index to its caller using an STL pair. The
         first element of this pair is the minimum R2; the second element is
         the corresponding cluster index.
      */
      std::pair<T, int>
      compute_min_r2(int i, int k, const vector& d, bool a) const ;

   public:
      /// \return True if optra reallocated points from one cluster to another.
      static bool reallocs() {return realloc ;}

      /// \return True if i-th cluster is in live set after optra.
      static bool live_set(int i) {return live[i] ;}
   } ;

   /**
      \brief  The quick transfer stage.
      \param  k The number of clusters to be created.
      \param  data The input data matrix.
      \return Nothing.

      This method implements the quick transfer stage, whose purpose is
      to reassign points, if necessary, to their respective
      second-closest clusters (i.e., the one to which they will most
      likely move in the next optra).

      In the quick transfer stage, we have to compute r1 and r2 for each
      point using the following formulae:

      \verbatim
          r1 = dist(i, c1) * n(c1)/(n(c1) - 1)
          r2 = dist(i, c2) * n(c2)/(n(c2) + 1)
      \endverbatim

      In the above expressions, i denotes the i-th point, c1 and c2 are
      its closest and second-closest clusters respectively, n(c) returns
      the number of points in cluster c, and dist2(i, c) returns the
      square of the Euclidean distance between point i and the center of
      cluster c.

      If r2 < r1, qtran will swap the closest and second-closest
      clusters and update the cluster centroids. It will also make these
      two clusters part of the live set.

      After it is done with optra and before calling qtran, the kmeans
      main loop must recompute the cluster centroids to ensure that qtran
      has the centroids associated with optra's latest reassignments.
   */
   void qtran(int k, const matrix& data) ;
} ;

// Parameter initialization
template<typename T>
hartigan_wong<T>::params::params(std::vector<int>& c, matrix& ce, int x, int t)
   : clusters(c), centroids(ce), max_iterations(x), pool(t)
{}

// Intermediate cluster data initialization: all clusters are empty and live
template<typename T>
hartigan_wong<T>::cluster_data::cluster_data()
   : n(0), live(true)
{}

// Hartigan-Wong main loop
template<typename T>
int hartigan_wong<T>::operator()(int k, const matrix& data, int x, int t,
                                 std::vector<int>& clusters, matrix& centroids)
{
   // Store parameters to avoid constantly passing them as function
   // arguments. This will also create the thread pool.
   m_params.reset(new params(clusters, centroids, x, t)) ;

   // After initial cluster centroids computation (done by kmeans
   // interface object), we have to find the closest and second-closest
   // clusters for each point.
   find_two_closest_centroids(k, data) ;

   // Initialize the intermediate clustering computations
   m_cluster_data.clear() ;
   m_cluster_data = std::vector<cluster_data>(k, cluster_data()) ;
   m_swapped.resize(data.size2()) ;

   // Recompute centroids using latest cluster assignments made above
   update_centroids(k, data) ;

   // Main loop: optra and qtran till no more reallocs in optra
   int i ;
   try
   {
      for (i = 0; i < m_params->max_iterations; ++i)
      {
         bool realloc = optra(k, data) ;
         update_centroids(k, data) ;
         if (!realloc) // no more reallocations in optra ==> we're done
            break ;
         qtran(k, data) ;
      }
   }
   catch (kmeans_errors::empty_cluster& e)
   {
      e.iteration = i ;
      throw e ;
   }
   return i ;
}

// Find the two closest clusters for each data point
template<typename T>
void hartigan_wong<T>::find_two_closest_centroids(int k, const matrix& data)
{
   const int n = data.size2() ;
   m_params->clusters.resize(n) ;
   m_clusters2.resize(n) ;

   // Break up the job of finding the two closest clusters for n data
   // points into n parallel tasks each of which finds the two closest
   // clusters for one of the data points.
   for (int i = 0; i < n; ++i)
      m_params->pool.add_task(two_closest_centroids_finder(k, data, i,
                                                           m_params->centroids,
                                                           m_params->clusters,
                                                           m_clusters2)) ;

   // Wait for the thread pool to complete finding the two closest
   // clusters for all the data points,
   m_params->pool.wait() ;
}

// Setup state for closest clusters computation task
template<typename T>
hartigan_wong<T>::two_closest_centroids_finder::
two_closest_centroids_finder(int k, const matrix& data, int i, const matrix& c,
                             std::vector<int>& c1, std::vector<int>& c2)
   : m_data(data), m_centroids(c), m_num_clusters(k), m_index(i),
     m_clusters(c1), m_clusters2(c2)
{}

// Find the two closest centers for a data point
template<typename T>
void hartigan_wong<T>::two_closest_centroids_finder::operator()() const
{
   T d1 = std::numeric_limits<T>::max() ; // dist to closest cluster
   T d2 = std::numeric_limits<T>::max() ; // dist to 2nd closest cluster
   m_clusters [m_index] = -1 ; // index of closest cluster
   m_clusters2[m_index] = -1 ; // index of second closest cluster

   // From the input data matrix, extract the point for which this task
   // should find the two closest clusters.
   using boost::numeric::ublas::column ;
   hartigan_wong<T>::vector v = column(m_data, m_index) ;

   // Check the distance between v and each cluster center to find the
   // two clusters closest to the data point v.
   for (int j = 0; j < m_num_clusters; ++j)
   {
      T d = dist2<T>(v, column(m_centroids, j)) ;
      if (d < d1) // cluster j is closest to v
      {
         // Current closest cluster becomes second closest
         m_clusters2[m_index] = m_clusters[m_index] ;
         d2 = d1;
         // Cluster j becomes closest cluster
         m_clusters [m_index] = j ;
         d1 = d ;
      }
      else if (d < d2) // cluster j is second closest to v
      {
         m_clusters2[m_index] = j ;
         d2 = d ;
      }
   }
}

// Calculate new cluster centers using latest closest cluster assignments
template<typename T>
void hartigan_wong<T>::update_centroids(int k, const matrix& data)
{
   using boost::numeric::ublas::column ;

   // Init all cluster centroids to zero
   m_params->centroids =
      boost::numeric::ublas::zero_matrix<T>(m_params->centroids.size1(), k) ;

   // Sum all the points in each cluster and find the number of points in
   // each cluster...
   const int n = data.size2() ;
   for  (int i = 0; i < n; ++i)
   {
      int c = m_params->clusters[i] ;
      column (m_params->centroids, c) += column(data, i) ;
      ++m_cluster_data[c].n ;
   }

   // Average above sums to get the cluster centroids and initialize the
   // cluster data...
   for (int i = 0; i < k; ++i)
   {
      cluster_data& c = m_cluster_data[i] ;
      if (c.n <= 0) // most probably user supplied bad centroids init policy
         //throw kmeans_errors::empty_cluster(i) ;
         continue ; // allow empty clusters; hacky fix to allow parallel optra
      column(m_params->centroids, i) /= c.n ;
   }
}

// Reallocate points to closer clusters if that will reduce
// within-cluster sum-of-squares.
template<typename T>
bool hartigan_wong<T>::optra(int k, const matrix& data)
{
   // Launch parallel sub-tasks to go over all the data points and see if
   // they should be reallocated to some other cluster.
   optra_task::reset(k) ;
   const int n = data.size2() ;
   for  (int i = 0; i < n; ++i)
      // NOTE: Since we don't want any empty clusters, we will skip over
      // points that are the sole members of their repsective clusters.
      // That is, we ony consider clusters that have more than one point
      // in them.
      if (m_cluster_data[m_params->clusters[i]].n > 1)
         m_params->pool.add_task(optra_task(i, k, data, this)) ;
   m_params->pool.wait() ;

   // Copy new live set and reset cluster counts to zero so
   // update_centroids() will work correctly when called from the loop in
   // find_clusters()...
   for (int i = 0; i < k; ++i) {
      cluster_data& c = m_cluster_data[i] ;
      c.n    = 0 ;
      c.live = optra_task::live_set(i) ;
   }
   return optra_task::reallocs() ;
}

// New live set computed by optra tasks
template<typename T>
std::vector<bool> hartigan_wong<T>::optra_task::live ;

// Total number of reallocations made by all optra tasks
template<typename T>
bool hartigan_wong<T>::optra_task::realloc ;

// Optra task initialization
template<typename T>
hartigan_wong<T>::optra_task::
optra_task(int ii, int kk, const matrix& d, hartigan_wong<T>* hwhw)
   : i(ii), k(kk), data(d), hw(hwhw)
{}

// Optra task execution
template<typename T>
void hartigan_wong<T>::optra_task::operator()() const
{
   using boost::numeric::ublas::column ;

   const int j = hw->m_params->clusters[i] ;
   cluster_data& c = hw->m_cluster_data[j] ;

   vector d = column(data, i) ;
   T r1 = dist2<T>(d, column(hw->m_params->centroids, j)) * c.n/(c.n - 1) ;
   const std::pair<T, int> min_r2 = compute_min_r2(i, k, d, c.live);
   if (min_r2.first < r1) {
      // DEVNOTE: Each optra task works on a different point.
      // Therefore, we do not need a mutex here. Also, the kmeans
      // object's live set flag is only read by the optra tasks. The
      // new state is recorded in optra_task::live. However, since all
      // the slots in optra_task::live always get assigned true by all
      // optra tasks, again, we don't need a mutex. The realloc flag is
      // also always set to true by all optra tasks and so does not
      // require a mutex.
      hw->m_clusters2[i] = j ; // current closest cluster becomes 2nd closest
      hw->m_params->clusters[i] = min_r2.second;//min R2 cluster becomes closest
      live[j] = live[min_r2.second] = true ;    //both clusters are now live
      realloc = true ;
   }
   else
      hw->m_clusters2[i] = min_r2.second ;
}

// Find cluster with minimum R2 for i-th data point
template<typename T>
std::pair<T, int>
hartigan_wong<T>::optra_task::
compute_min_r2(int i, int k, const vector& d, bool all_clusters) const
{
   using std::make_pair ;
   using boost::numeric::ublas::column ;

   const int L1 = hw->m_params->clusters[i] ;
   const int L2 = hw->m_clusters2[i] ;

   std::pair<T, int> min_r2 = make_pair(std::numeric_limits<T>::max(), -1) ;
   for (int j = 0; j < k; ++j)
   {
      // Skip closest and second closest clusters for point i and also
      // skip cluster j if we have to compute min R2 only over the live
      // set and cluster j is not in the live set...
      const cluster_data& c = hw->m_cluster_data[j] ;
      if (j == L1 || j == L2 || (!all_clusters && !c.live))
         continue ;

      // Compute R2 for i-th point and j-th cluster, reassigning min if
      // necessary...
      T r2 = dist2<T>(d, column(hw->m_params->centroids, j)) * c.n/(c.n + 1) ;
      if (r2 < min_r2.first)
         min_r2 = make_pair(r2, j) ;
   }
   return min_r2 ;
}

/*
   A Note on Updating the Mean of a Set:

   Consider a set of three numbers {a, b, c}. The mean of this set is
   (a+b+c)/3. Now, let us say we remove c from this set. The new average
   will be (a+b)/2, which we can write as:

     new_average = (a+b)/2
                 = (a+b)/2 + c/2 - c/2
                 = (a+b+c)/2 - c/2
                 = (a+b+c)*3/(3*2) - c/2
                 = ((a+b+c)/3 * 3 - c)/2
                 = (old_average * old_count - removed_element)/new_count

   If we add a new number d to our original set, the new average can be
   computed as:

     new_average = (a+b+c+d)/4
                 = (a+b+c)/4 + d/4
                 = (a+b+c)*3/(3*4) + d/4
                 = ((a+b+c)/3 * 3 + d)/4
                 = (old_average * old_count + new_element)/new_count

   Although the above discussion is in terms of scalars, it also works
   for vectors.
*/
template<typename T>
void hartigan_wong<T>::qtran(int k, const matrix& data)
{
   using boost::numeric::ublas::column ;
   using boost::numeric::ublas::zero_vector ;

   // Mark all clusters as not live; they will become live if points move
   // from their respective closest to their second-closest clusters.
   for (int i = 0; i < k; ++i)
      m_cluster_data[i].live = false ;

   // According to the original Hartigan-Wong paper, qtran continues as
   // long as there are transfers from closest to second-closest
   // clusters. However, in some perverse situations, this can result in
   // an infinite loop because just one point or a small number of points
   // may keep oscillating back and forth between its or their respective
   // closest and second-closest clusters (which can happen because the
   // centroids update after a transfer can cause R1 and R2 to reverse
   // their inequality).
   //
   // To prevent this infinite loop, this implementation of the
   // Hartigan-Wong k-means algorithm allows a point to be transferred
   // from its closest to its second-closest cluster only once. We use
   // this vector to keep track of whether or not a point has already
   // been transferred.
   std::vector<bool> swapped(data.size2(), false) ;

   // The main qtran loop: keep going as long as there are transfers
   bool transfer ;
   do
   {
      transfer = false ;

      // qtran inner loop: move each point if its R2 < R1
      const int n = data.size2() ;
      for  (int i = 0; i < n; ++i)
      {
         // If point i has already been swapped, don't swap it again.
         if (swapped[i])
            continue ;

         // If point i is the only one in its cluster, skip ahead to the
         // next point (because we don't want any empty clusters).
         const int L1 = m_params->clusters[i] ;
         cluster_data& c1 = m_cluster_data[L1] ;
         if (c1.n < 2)
            continue ;

         // Calculate R1 and R2 for point i and transfer it from its
         // closest to its second-closest cluster if R2 < R1.
         vector d = column(data, i) ;
         const int L2 = m_clusters2[i] ;
         cluster_data& c2 = m_cluster_data[L2] ;
         T r1 = dist2<T>(d, column(m_params->centroids, L1)) * c1.n/(c1.n - 1) ;
         T r2 = dist2<T>(d, column(m_params->centroids, L2)) * c2.n/(c2.n + 1) ;
         if (r2 < r1)
         {
            m_clusters2[i] = L1 ; // current closest becomes 2nd closest
            m_params->clusters[i] = L2 ; // 2nd closest cluster becomes closest
            swapped[i] = transfer = c1.live = c2.live = true ;

            // Update cluster centers after the swap (see comment
            // preceding function to understand the logic applied here).
            --c1.n ; // new count of cluster c1 (source cluster)
            if (c1.n > 0)
               column(m_params->centroids, L1) =
                  (column(m_params->centroids, L1) * (c1.n + 1) - d)/c1.n ;
            else // source cluster had just one point is now empty
               column(m_params->centroids, L1) = zero_vector<T>(data.size1()) ;
            ++c2.n ; // new count of cluster c2 (destination cluster)
            if (c2.n > 1)
               column(m_params->centroids, L2) =
                  (column(m_params->centroids, L2) * (c2.n - 1) + d)/c2.n ;
            else // destination cluster was empty and now has one point
               column(m_params->centroids, L2) = d ;
         }
      }
   }
   while (transfer) ;
}

//--------------- K-MEANS IMPLEMENTATION: DHILLON-MODHA -----------------

/**

   Dhillon, I. S., Modha, D. S.
   A Data Clustering Algorithm on Distributed Memory Multiprocessors.
   Large-Scale Parallel Data Mining, Lecture Notes in Artificial
   Intelligence, March 2000, 1759:245--260, Springer-Verlag.
*/
template<typename T>
class dhillon_modha {
} ;

} // namespace kmeans_impl

//-----------------------------------------------------------------------

} // namespace gist

#endif

/* So things look consistent in everyone's emacs... */
/* Local Variables: */
/* indent-tabs-mode: nil */
/* End: */
