/**
   \file  kmeans.hh
   \brief Defines a generic API for multithreaded k-means clustering.

   This file implements the k-means clustering algorithm as described in
   the following papers:

   Hartigan, J. A., Wong, M. A.
   Algorithm AS 136: A K-Means Clustering Algorithm
   Journal of the Royal Statistical Society, Series C (Applied
   Statistics), 1979, 28(1):100--108.

   Maitra, R., Peterson, A. D., Ghosh, A. P.
   A Systematic Evaluation of Different Methods for Initializing the
   K-means Clustering Algorithm.
   IEEE Transactions on Knowledge and Data Engineering, 2010.

   Bradley, P. S., Fayyad, U. M.
   Refining Initial Points for K-Means Clustering.
   Proc. 15th Intl Conf. on Machine Learning, Morgan Kaufmann, San
   Francisco, CA, 1998, 91--99.

   To speed things up, we parallelize as many steps as possible in the
   basic Hartigan-Wong k-means.

   NOTE: This is a header-only interface. Thus, clients only need to
   include this file and do not have to link with an object module.
   However, since we use Boost.Thread for the multithreading
   functionality, you will need to link against libboost_thread
   eventually.

   NOTE 2: This algorithm does not really belong in libgist. Ideally, it
   should be part of a machine learning library (Boost.ML anyone?).
   However, at the time of writing (circa November 2011), since we were
   unable to find a suitable generic C++ implementation of multithreaded
   k-means, we had to implement it as part of libgist.
*/

/*
   This file is part of libgist.

   libgist is free software; you can redistribute it and/or modify it
   under the terms of the GNU General Public License as published by the
   Free Software Foundation; either version 2 of the License, or (at your
   option) any later version.

   libgist is distributed in the hope that it will be useful, but WITHOUT
   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
   for more details.

   You should have received a copy of the GNU General Public License
   along with libgist; if not, write to the Free Software Foundation,
   Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
*/

/*
   REVISION HISTORY

   $HeadURL: http://libgist.googlecode.com/svn/branches/wu_dev/src/kmeans.hh $
   $Id: kmeans.hh 114 2012-01-22 09:20:24Z libgist@gmail.com $
*/

#ifndef GIST_KMEANS_DOT_HH
#define GIST_KMEANS_DOT_HH

//------------------------------ HEADERS --------------------------------

// Boost
#include <boost/thread/thread.hpp>
#include <boost/thread/condition_variable.hpp>
#include <boost/thread/locks.hpp>
#include <boost/thread/shared_mutex.hpp>
#include <boost/thread/mutex.hpp>

#include <boost/random/variate_generator.hpp>
#include <boost/random/uniform_int.hpp>
#include <boost/random/mersenne_twister.hpp>

#include <boost/numeric/ublas/matrix_proxy.hpp>
#include <boost/numeric/ublas/matrix.hpp>
#include <boost/numeric/ublas/vector.hpp>

#include <boost/lambda/lambda.hpp>
#include <boost/function/function0.hpp>
#include <boost/timer.hpp>
#include <boost/tuple/tuple.hpp>

// Standard C++
#include <iostream>
#include <algorithm>
#include <numeric>
#include <queue>
#include <map>
#include <vector>
#include <stdexcept>
#include <limits>
#include <utility>

// Standard C
#include <time.h>

//---------------------------- NAMESPACE --------------------------------

namespace gist {

//------------------------- K-MEANS POLICIES ----------------------------

/**
   \namespace kmeans_policies
   \brief Namespace encapsulating various policies provided by this
   library.

   libgist's implementation of the k-means algorithm attempts to be as
   generic as possible. Clients may customize it by providing function
   objects to implement different aspects of the k-means algorithm that
   they need done in some specific way.

   However, to ease the burden on end-users, this library provides some
   such policies and designates suitable defaults. These policies are
   defined in this namespace so as to mark them clearly for what they
   are.
*/
namespace kmeans_policies {

/**
   \brief Initialize k-means centroids using the method described by
   Hartigan and Wong.

   An important initialization step for Hartigan and Wong's k-means is
   selecting the cluster centers. This determines how good the clustering
   results will eventually be. libgist's k-means allows clients to
   specify a suitable initialization policy for this purpose.

   This particular function object initializes the centroids by sorting
   the input data points on the basis of their distance to the overall
   mean of the dataset and then picking k points offset from the first
   one by n/k (where n is the total number of data points).

   According to the Hartigan-Wong paper, this initialization method
   guarantees that no cluster will be empty when we seed the initial
   cluster assignments.

   NOTE: This is the default cluster centroid initialization policy.
*/
template<typename T>
class init_centroids_hw {
   typedef boost::numeric::ublas::matrix<T> matrix ;
   typedef boost::numeric::ublas::vector<T> vector ;
public:
   matrix operator()(int k, const matrix& data) ;
private:
   class dist_to_mean {
      const matrix& data ;
      vector mean ;
   public:
      dist_to_mean(const matrix& data) ;
      bool operator()(int i, int j) const ;
   } ;
} ;

template<typename T>
typename init_centroids_hw<T>::matrix
init_centroids_hw<T>::
operator()(int k, const matrix& data)
{
   // First, we create a vector of indices referring to the columns of
   // the input data matrix and then sort these indices based on the
   // distances between the points they index and the dataset's mean.
   int i = 0 ;
   std::vector<int> points(data.size2()) ;
   std::generate(points.begin(), points.end(), boost::lambda::var(i)++) ;
   std::sort(points.begin(), points.end(), dist_to_mean(data)) ;

   // Next, we pick k points from the sorted data so that each point is
   // at least n/k elements away from the previous one...
   matrix centroids(data.size1(), k) ;
   const int f = data.size2()/k ;
   for  (int i = 0, j = 0; i < k; ++i, j += f)
   {
      using boost::numeric::ublas::column ;
      column(centroids, i) = column(data, points[j]) ;
   }
   return centroids ;
}

// Helper function object for sorting the data points based on their
// respective distances to the dataset mean.
template<typename T>
init_centroids_hw<T>::dist_to_mean::dist_to_mean(const matrix& d)
   : data(d)
{
   using boost::numeric::ublas::zero_vector ;
   using boost::numeric::ublas::column ;

   // Compute dataset mean
   mean = zero_vector<T>(data.size1()) ;
   const int n = data.size2() ;
   for  (int i = 0; i < n; ++i)
      mean += column(data, i) ;
   mean /= n ;
}

// Returns true if data point i is closer to dataset mean than point j
template<typename T>
bool init_centroids_hw<T>::dist_to_mean::operator()(int i, int j) const
{
   using boost::numeric::ublas::column ;
   using boost::numeric::ublas::inner_prod ;

   vector a = column(data, i) - mean ; // vector from dataset mean to point i
   vector b = column(data, j) - mean ; // vector from dataset mean to point j

   // To compare the distance between point i and the dataset mean to the
   // distance between point j and the dataset mean, we use the dot
   // products of the vectors a and b with themselves. The dot product of
   // a vector with itself yields the square of the magnitude of that
   // vector. In this case, since a and b are the vectors from points i
   // and j respectively to the dataset mean, these dot products will
   // result in the square of the distances between points i and j
   // respectively and the dataset mean.
   //
   // NOTE: For comparing distances, the square of the respective
   // distances will yield the same result as the actual distances
   // themselves. Thus, we can avoid taking square roots here.
   return inner_prod(a, a) < inner_prod(b, b) ;
}

/**
   \brief Initialize k-means centroids randomly.

   An important initialization step for Hartigan and Wong's k-means is
   selecting the cluster centers. This determines how good the clustering
   results will eventually be. libgist's k-means allows clients to
   specify a suitable initialization policy for this purpose.

   This particular function object initializes the centroids by choosing
   k input data points at random to serve as the initial cluster centers.

   NOTE: Although we provide this random initialization policy, it is
   not recommended for general use as the initial cluster assignments
   based on a random selection of k data points can result in one or
   more empty clusters, which, in turn, will cause the Hartigan-Wong
   algorithm to fail.
*/
template<typename T>
class init_centroids_random {
   typedef boost::numeric::ublas::matrix<T> matrix ;
   typedef boost::numeric::ublas::vector<T> vector ;
public:
   matrix operator()(int k, const matrix& data) ;
} ;

template<typename T>
typename init_centroids_random<T>::matrix
init_centroids_random<T>::
operator()(int k, const matrix& data)
{
   typedef boost::mt19937 gen_t ;
   typedef boost::uniform_int<> dist_t ;

   gen_t  gen(static_cast<unsigned int>(time(0))) ;
   dist_t dist(0, data.size2() - 1) ;
   boost::variate_generator<gen_t&, dist_t> rng(gen, dist) ;

   matrix centroids(data.size1(), k) ;
   for (int i = 0; i < k; ++i)
   {
      using boost::numeric::ublas::column ;
      column(centroids, i) = column(data, rng()) ;
   }
   return centroids ;
}

} // namespace kmeans_policies

//--------------------------- K-MEANS CLASS -----------------------------

/**
   \brief Encapsulation of multithreaded k-means clustering.

   This class implements Hartigan and Wong's version of the k-means
   clustering algorithm, taking advantage of parallelization
   opportunities whenever and wherever possible.

   Although the class's innards are quite complicated, its public
   interface is very simple. Clients should create an instance (usually,
   a temporary will suffice) and then, depending on exactly what they
   want returned by the clustering operation, invoke the appropriate
   clustering method. Here is some sample code illustrating typical usage
   of this class:

   \code
       typedef boost::numeric::ublas::matrix<float> matrix ;
       matrix data = get_input_data_matrix_somehow() ;
       matrix centroids = gist::kmeans<>().centroids(k, data) ;
   \endcode

   As shown above, the input data to be clustered has to be supplied via
   a Boost.uBLAS matrix. The data points in this matrix are expected to
   be arranged as column vectors. Thus, the number of rows in the input
   matrix will correspond to the number of dimensions in the data and the
   number of columns will be the number of input data points to be
   clustered.

   The first template parameter to this class should be a suitable
   numeric type specifying the contents of the input matrix described
   above. If clients do not supply this type explicitly, it will default
   to float.

   The second template parameter is an initialization policy for the
   cluster centers. If not supplied, it will default to a function object
   that initializes the cluster centers by randomly selecting k data
   points from the input matrix.

   However, if you need to supply a different centroids initialization
   policy, you should implement a suitable function object. The function
   call operator for this object will be passed two parameters, viz., k
   and data. The first parameter k is an int and specifies the number of
   clusters. The second parameter data is a const reference to the input
   matrix.

   The centroids initialization function object should return a matrix
   containing the initial cluster centers. Like the input data, these
   centroids should be arranged as column vectors. Thus, the matrix
   returned should have d rows, where d is the number of dimensions in
   the input data, i.e., the same number of rows as the input matrix. And
   it should have k columns, i.e., the number of clusters to be produced
   by k-means.

   If the client-supplied centroids initialization policy function object
   does not return a d-by-k matrix as described above, the clustering
   methods will throw a gist::kmeans::bad_centroids_initialization
   exception (which is derived from std::runtime_error).
*/
template<typename T = float,
         typename centroids_init_policy =
                     kmeans_policies::init_centroids_hw<T> >
class kmeans {
public:
   /// Convenient typedefs for the input matrix and its individual
   /// columns.
   /// \return
   //@{
   typedef boost::numeric::ublas::matrix<T> matrix ;
   typedef boost::numeric::ublas::vector<T> vector ;
   //@}

private:
   /// For each data point, the Hartigan-Wong k-means algorithm computes
   /// the cluster indices of the closest and second-closest clusters.
   /// These two data members are used to store these indices.
   /// \return
   std::vector<int> m_clusters, m_clusters2 ;

   /// In addition to the cluster indices, we also need to keep track of
   /// the cluster centers. This data member does just that.
   /// \return
   matrix m_centroids ;

   /// Various intermediate computations. See the Hartigan and Wong paper
   /// to better understand these variables.
   struct cluster_data {
      /// This variable keeps track of the current number of points in a
      /// cluster.
      /// \return
      int num_points ;

      /// In the optimal transfer stage, this variable records the last
      /// step at which the cluster was updated. In the quick transfer
      /// stage, it records the step at which the cluster was last
      /// updated plus N, where N is the total number of data points. The
      /// Hartigan-Wong paper refers to this variable as NCP.
      /// \return
      int last_update_step ;

      /// This flag indicates whether the cluster was updated in the last
      /// quick transfer stage. The Hartigan-Wong paper denotes it as
      /// ITRAN.
      /// \return
      bool qtran_updated ;

      /// Initialize the intermediate cluster data variables to their
      /// default values prior to beginning the clustering operation.
      /// \return Nothing.
      cluster_data() ;
   } ;

   /// As clustering proceeds, we update various pieces of data about the
   /// clusters with this member variable.
   /// \return
   std::vector<cluster_data> m_cluster_data ;

   /// This data member records a measure of the distance between a data
   /// point and the center of the cluster to which it belongs.
   /// Specifically, for point i, the i-th element of this vector will be
   /// dist2(i, c) * n(c)/(n(c) - 1), where c is the index of the cluster
   /// to which point i belongs, n(c) is the number of points in cluster
   /// c, and dist2(i, c) returns the square of the Euclidean distance
   /// between the point i and the centroid of cluster c.
   ///
   /// This distance measure is used to decide whether or not a point
   /// should be reallocated to another cluster during the optimal and
   /// quick transfer stages (see the Hartigan-Wong paper for the gory
   /// details).
   /// \return
   std::vector<T> m_distances ;

   /// By default, we will iterate a maximum of 100 times. However, we
   /// allow users to specify a different value for this parameter. We
   /// also keep track of the total number of iterations so that, at the
   /// end of the clustering, users can tell exactly how many iterations
   /// it took to cluster the input data.
   /// \return
   int m_max_iterations, m_iteration ;

   /**
      \brief A quick (but not too dirty) thread pool.

      This inner class provides the k-means implementation a relatively
      straightforward abstraction for queueing multiple tasks to be
      executed in parallel.

      DEVNOTE: Ideally, we would like to use a thread pool implementation
      provided by a readily available library. Unfortunately, at this
      time (circa November 2011), Boost.Thread does not implement a
      thread pool and Boost.ThreadPool is not yet a part of Boost.
   */
   class thread_pool {
      /// The pool of threads is implemented using a boost::thread_group.
      /// \return
      boost::thread_group m_threads ;

      /// When a client has a large job that can be parallelized, it
      /// should break that job into smaller, independent steps or tasks
      /// and then queue each of those sub-tasks with the thread pool.
      /// Each thread in the pool will retrieve the next task from the
      /// queue and execute it independently of the other threads.
      ///
      /// A task is simply a function or function object that takes no
      /// parameters and returns nothing. If tasks need to maintain
      /// state, they should probably be implemented as function objects.
      /// If they need to share variables and need synchronization,
      /// clients should take care of the details of mutual exclusion,
      /// etc.
      ///
      /// This data member holds the queue of tasks described above.
      /// \return
      std::queue<boost::function<void ()> > m_tasks ;

      /// Since all the threads in the pool access the task queue, we
      /// have to synchronize access to it with a mutex. Moreover, to
      /// ensure that the threads don't uselessly spin when the task
      /// queue is empty, we use a condition variable to have each thread
      /// block when there are no tasks to execute.
      /// \return
      //@{
      boost::mutex m_mutex ;
      boost::condition_variable m_cond ;
      //@}

      /// Once k-means is done, the thread pool should wind down. We use
      /// this flag to inform all the threads in the pool to gracefully
      /// quit.
      /// \return
      bool m_shutdown ;

      /// The shutdown flag defined above is set in the thread pool's
      /// destructor. The pool's threads themselves only read this flag.
      /// Thus, we use a read-write mutex to synchronize accesses to this
      /// variable.
      /// \return
      boost::shared_mutex m_shutdown_mutex ;

   public:
      /**
         \brief  Thread pool initialization.
         \param  num_threads Number of threads in the pool.
         \return Nothing.

         On instantiation, clients should specify how many threads the
         pool should have. The constructor will take care of launching
         that many threads.
      */
      thread_pool(int num_threads) ;

      /**
         \brief  Add a task to the thread pool.
         \param  f A function or function object implementing the task.
         \return Nothing.

         This method adds the given function or function object to the
         thread pool's queue of tasks. It will be executed when it
         reaches the front of the queue and when one of the threads in
         the pool becomes available.

         The task function takes no parameters and returns nothing. If
         some tasks need to share resources, they should be implemented
         in a way that ensures proper synchronization. That is, clients
         are responsible for implementing tasks so that they are
         thread-safe.
      */
      void add_task(boost::function<void ()> f) ;

      /// \return The number of threads in the pool.
      int size() const {return m_threads.size() ;}

   private:
      /**
         \brief  Thread pool's thread function.
         \param pool Pointer to the thread_pool object.
         \return Nothing.

         Each thread in the pool does the same thing: wait for a task and
         then execute it. In each iteration of this loop, we also check
         to see if the shutdown flag has been set and, if so, quit the
         loop, thereby, terminating each thread.
      */
      static void thread_func(thread_pool* pool) ;

   public:
      /**
         \brief  Thread pool clean-up.
         \return Nothing.

         In the thread pool's destructor, we set the shutdown flag and
         then wait for all the threads to exit. This will usually happen
         when the kmeans object goes out of scope, thus, invoking its
         embedded thread pool object's destructor.
      */
      ~thread_pool() ;
   } ;

   /// This member variable holds an internal thread pool that is used to
   /// parallelize the k-means algorithm whenever and wherever possible.
   /// \return
   thread_pool m_thread_pool ;

public:
   /**
      \brief  K-means initialization.
      \param  num_threads The number of threads for parallelization.
      \param  max_iterations The maximum number of iterations.
      \return Nothing.

      When this class is instantiated, it will setup its internal thread
      pool. If the size of the thread pool is not specified, the k-means
      algorithm will be single-threaded.

      Additionally, during instantiation, clients may specify the maximum
      number of iterations the clustering should take. The default
      maximum is 100 iterations.
   */
   kmeans(int num_threads = 1, int max_iterations = 100) ;

   /**
      \brief  Perform k-means clustering.
      \param  k Number of clusters to find in the input data.
      \param  data The input data arranged in a matrix.
      \return Cluster assignments for each data point plus the cluster centers.

      This function clusters the input data into k groups and returns the
      cluster assignments for each data point via an STL vector of
      integer cluster indices and the cluster centers via a matrix. These
      two return values, i.e., cluster assignments and centroids, are
      bundled together in an STL pair.

      The input data should be arranged as column vectors of a d-by-n
      matrix. Thus, d, the number of rows in this matrix, is the data
      dimensionality, and n, the number of columns, corresponds to the
      number of data points to be clustered.

      The first return value is an STL vector containing n elements,
      i.e., one for each input data point. Each element in this vector
      will be a number in the range [0, k), indicating the index of the
      cluster to which that data point belongs.

      The second return value is a Boost.uBLAS matrix of T (where T is a
      numeric type passed to the kmeans template class; defaults to
      float). The size of this returned matrix is d-by-k and, like the
      input data matrix, is also arranged column-wise. That is, each of
      the k cluster centroids makes up one column.
   */
   std::pair<std::vector<int>, matrix> cluster(int k, const matrix& data) ;

   /**
      \brief  Perform k-means clustering.
      \param  k Number of clusters to find in the input data.
      \param  data The input data arranged in a matrix.
      \return Cluster assignments for each data point.

      This function clusters the input data into k groups and returns the
      cluster assignments for each data point via an STL vector of
      integer cluster indices. Use this function when you want to perform
      clustering and are only interested in the cluster assignments and
      don't care for the cluster centroids.

      The input data should be arranged as column vectors of a d-by-n
      matrix. Thus, d, the number of rows in this matrix, is the data
      dimensionality, and n, the number of columns, corresponds to the
      number of data points to be clustered.

      The return value is an STL vector containing n elements, i.e., one
      for each input data point. Each element in this vector will be a
      number in the range [0, k), indicating the index of the cluster to
      which that data point belongs.
   */
   std::vector<int> clusters(int k, const matrix& data) ;

   /**
      \brief  Perform k-means clustering.
      \param  k Number of clusters to find in the input data.
      \param  data The input data arranged in a matrix.
      \return Centroids of the data clusters.

      This function clusters the input data into k groups and returns the
      coordinates of the cluster centers via a matrix. Use this function
      when you want to perform clustering and are only interested in the
      cluster centroids and not the cluster assignments themselves.

      The input data should be arranged as column vectors of a d-by-n
      matrix. Thus, d, the number of rows in this matrix, is the data
      dimensionality, and n, the number of columns, corresponds to the
      number of data points to be clustered.

      The return value is a Boost.uBLAS matrix of T (where T is a numeric
      type passed to the kmeans template class; defaults to float). The
      size of this returned matrix is d-by-k and, like the input data
      matrix, is also arranged column-wise. That is, each of the k
      cluster centroids makes up one column.
   */
   matrix centroids(int k, const matrix& data) ;

   /**
      \brief  Change or retrieve the maximum number of iterations.
      \param  m The new maximum number of iterations.
      \return The maximum number of iterations.

      After initialization, the Hartigan-Wong k-means algorithm enters a
      loop wherein it performs:

      -# An optimal transfer step so as to reallocate data points to
         clusters in order to reduce the within cluster sum-of-squares.
      -# A quick transfer step to check if points should be reallocated
         to their next closest cluster.

      This loop will terminate when the optimal transfer step makes no
      further cluster reallocations.

      If the initial cluster centers were chosen well, this loop will
      usually converge fairly soon. However, in some cases, it may not.
      To prevent a runaway loop, we limit the maximum number of
      iterations of this loop. The default maximum is 100 iterations.
      However, clients may change this maximum to a different number and
      check the current maximum using these functions.

      After clustering is done, clients may also find it useful to see
      how many iterations were performed, which they can do by calling
      the iterations method.
   */
   //@{
   void max_iterations(int m)  {m_max_iterations = std::max(m, 1) ;}
   int  max_iterations() const {return m_max_iterations ;}
   int  iterations()     const {return m_iteration ;}
   //@}

private:
   /**
      \brief  Perform k-means clustering.
      \param  k Number of clusters to find in the input data.
      \param  data The input data arranged in a matrix.
      \return Nothing.

      This method is the top-level implementation of the k-means
      clustering algorithm. It is the one that actually performs the
      clustering. The public interface methods simply forward their calls
      to this internal function.

      The input data should be arranged as column vectors of a d-by-n
      matrix. Thus, d, the number of rows in this matrix, is the data
      dimensionality, and n, the number of columns, corresponds to the
      number of data points to be clustered.

      When it is done, the cluster indices will be stored in the
      m_clusters data member and the cluster centers will be in
      m_centroids.
   */
   void find_clusters(int k, const matrix& data) ;

   /**
      \brief  Initialize the cluster centers.
      \param  k Number of clusters to find in the input data.
      \param  data The input data arranged in a matrix.
      \return Nothing.

      This method initializes the cluster centers for the k-means
      clustering algorithm. This initialization is actually performed by
      the centroids initialization policy. This function simply invokes
      that policy and checks the results.

      The centroids initialization policy is specified as the second
      template parameter of the kmeans class. The default policy
      initializes the k cluster centers by randomly picking k data points
      from the input matrix.

      The input data should be arranged as column vectors of a d-by-n
      matrix. Thus, d, the number of rows in this matrix, is the data
      dimensionality, and n, the number of columns, corresponds to the
      number of data points to be clustered.

      This function passes its parameters, viz., k and data, to the
      centroids initialization policy. It expects the policy function
      object to return a d-by-k matrix containing the initial values for
      the k cluster centers. If the matrix returned by the policy is not
      the expected size, i.e., d-by-k, this function will throw a
      bad_centroids_initialization exception (which is derived from
      std::runtime_error).

      When it is done, the initial cluster centers will be stored in the
      m_centroids data member.
   */
   void init_centroids(int k, const matrix& data) ;

   /**
      \brief  Find the two closest clusters for each data point.
      \param  k Number of clusters to find in the input data.
      \param  data The input data arranged in a matrix.
      \return Nothing.

      After initializing the cluster centers, the Hartigan-Wong algorithm
      computes the two closest clusters for each data point. This
      function performs the closest and second-closest cluster
      computation.

      The input data should be arranged as column vectors of a d-by-n
      matrix. Thus, d, the number of rows in this matrix, is the data
      dimensionality, and n, the number of columns, corresponds to the
      number of data points to be clustered.

      After it is done, the m_clusters data member will contain the
      cluster indices of the closest clusters and m_clusters2 the indices
      of the second-closest centers. Both these members will have n
      elements each, one for each of the input data points. Each element
      will be a number in the range [0, k).

      To find the closest and second-closest cluster for a data point, we
      have to find the Euclidean distance between it and each of the k
      cluster centers. For d-dimensional points, each distance
      computation involves d multiplications and d-1 additions. As we have
      n points and k clusters, we will have to perform these
      multiplications and additions n*k times.

      Needless to say, for large d, n, and k, this closest cluster
      computation thingy will involve a whole lotta number crunching...
      Luckily, we can do this for one point independent of all the other
      points. Therefore, this computation is a prime candidate for
      parallelization.

      Consequently, this function simply breaks up the task of finding
      the closest and second-closest clusters for n data points into n
      tasks and hands them off to the internal kmeans thread_pool to get
      the job done as quickly as possible.
   */
   void find_two_closest_centroids(int k, const matrix& data) ;

   /**
      \brief Function object to find two closest centroids for each point.

      The k-means thread pool needs parameterless functions that return
      nothing. Since we need to use the k-means input data and other
      parameters and store the results of the closest centroids
      computations in member variables of the k-means class, we have to
      use a properly initialized function object for each task.

      This inner class is the function object described above.
   */
   class two_closest_centroids_finder {
      /// To find the two closest clusters for a data point, we need
      /// access to the k-means input data matrix plus the current
      /// cluster centers. These two data members store references to the
      /// necessary data structures.
      /// \return
      //@{
      const matrix& m_data ;
      const matrix& m_centroids;
      //@}

      /// We could determine the number of clusters K from the
      /// m_centroids data member. However, this parameter is readily
      /// available throughout the k-means API and is easily enough just
      /// passed in to this class.
      /// \return
      const int m_num_clusters ;

      /// Since finding the closest two clusters for one data point is
      /// independent of the same computation for other points, we break
      /// up the overall job into sub-tasks and execute them in parallel.
      /// Each sub-task finds the closest two clusters for one data
      /// point.
      ///
      /// This variable specifies the index of the data point on which
      /// this task must concentrate its efforts.
      /// \return
      const int m_index ;

      /// The indices of the two closest clusters are stored in the
      /// m_clusters and m_clusters2 member variables of the (outer)
      /// k-means object. These two references simply point to those
      /// variables.
      /// \return
      //@{
      std::vector<int>& m_clusters ;
      std::vector<int>& m_clusters2;
      //@}

      /// After queuing all the sub-tasks for finding the two closest
      /// centroids for each data point, the main k-means thread will
      /// wait for all the tasks to complete before proceeding with the
      /// rest of the k-means computations. These variables take care of
      /// the details of this wait operation.
      ///
      /// As each task completes its job of finding the two closest
      /// clusters for the data point indexed by m_index, it will up the
      /// count of tasks that have completed. Meanwhile, the main k-means
      /// thread will wait for this count to reach n, where n is the
      /// number of data points to be clustered.
      /// \return
      //@{
      static size_t m_count ;
      static boost::mutex m_mutex ;
      static boost::condition_variable m_cond ;
      //@}

   public:
      /**
         \brief  Initialize function object for finding two closest clusters.
         \param  k The number of clusters.
         \param  data Input data matrix containing the points to be clustered.
         \param  i Index of point for which to find two closest clusters.
         \param  centroids Matrix containing current cluster centers.
         \param  c1 Cluster indices for closest clusters.
         \param  c2 Cluster indices for second closest clusters.
         \return Nothing.

         When a closest centroids finder task is instantiated, it has to
         be supplied the appropriate parameters so that the function
         object has all the state it needs to get its job done.
      */
      two_closest_centroids_finder(int k, const matrix& data, int i,
                                   const matrix& centroids,
                                   std::vector<int>& c1,
                                   std::vector<int>& c2) ;

      /**
         \brief  Find the two closest clusters for point indexed by m_index.
         \return Nothing.

         This function checks the Euclidean distance between the data
         point indexed by m_index and each of the K cluster centers to
         find the data point's closest and second closest clusters.
      */
      void operator()() const ;

      /**
         \brief  Wait for all the closest cluster finding tasks to complete.
         \param  n The total number of tasks launched.
         \return Nothing.

         After launching all the sub-tasks to compute, in parallel, the
         two closest clusters for each data point, the main k-means
         thread will have to wait for the tasks to complete before it can
         proceed with the rest of the k-means clustering steps.

         This function implements the necessary wait operation.
      */
      static void wait(size_t n) ;

      /**
         \brief  Reset the task count.
         \return Nothing.

         Before launching the multiple parallel tasks to find the two
         closest centroids for each data point, the main k-means thread
         must first reset the task counter to zero by calling this
         function. Otherwise, the wait operation will not work properly.
      */
      static void reset() {m_count = 0 ;}
   } ;

   /**
      \brief  Calculate the latest cluster centers.
      \param  k Number of clusters.
      \param  data The input matrix containing the data points to be clustered.
      \return Nothing.

      This function computes the k cluster centers using the current
      cluster assignments stored in the m_clusters member variable.

      The input data should be arranged as column vectors of a d-by-n
      matrix. Thus, d, the number of rows in this matrix, is the data
      dimensionality, and n, the number of columns, corresponds to the
      number of data points to be clustered.

      When it is done, the cluster centers will be stored in the
      m_centroids data member. Additionally, m_cluster_data will be
      initialized as described in the Hartigan-Wong paper.
   */
   void update_centroids(int k, const matrix& data) ;

   /**
      \brief  Calculate initial distance between each point and its cluster.
      \param  data The input matrix containing the data points to be clustered.
      \return Nothing.

      This function computes a measure how far each data point is from
      its cluster's center. It is meant to be used after the initial
      cluster allocations have taken place but before the main k-means
      loop commences.

      The input data should be arranged as column vectors of a d-by-n
      matrix. Thus, d, the number of rows in this matrix, is the data
      dimensionality, and n, the number of columns, corresponds to the
      number of data points to be clustered.

      When it is done, the point-to-cluster distance measures will be
      stored in the m_distances data member.

      DEVNOTE: Each element of the m_distances vector stores a*b/(b-1),
      where a is the square of the distance between that point and its
      cluter's center and b is the number of points in the cluster to
      which that point belongs.

      This is not the actual distance between the point and its cluster's
      center, but rather a measure of how far the point is from the
      center of its cluster and. This distance measure is used at
      different stages of the Hartigan-Wong k-means algorithm to
      determine when a point should be reallocated from one cluster to
      another that is closer.
   */
   void init_distances(const matrix& data) ;

   /**
      \brief  The optimal transfer stage.
      \param  k Number of clusters to find in the input data.
      \param  data The input data arranged in a matrix.
      \return Number of reallocations, live clusters, and "live" points.

      This method implements the optimal transfer step described in the
      Hartigan-Wong paper. The goal of the optimal transfer stage is to
      reallocate points to clusters so as to induce the maximum reduction
      in the within-cluster sum-of-squares.

      Clustering will terminate when we perform n optimal transfer steps
      without any reallocations (n is the number of input data points).

      The input data should be arranged as column vectors of a d-by-n
      matrix. Thus, d, the number of rows in this matrix, is the data
      dimensionality, and n, the number of columns, corresponds to the
      number of data points to be clustered.

      In the optimal transfer stage, for each data point, we find the
      minimum R2 over all the clusters. R2 is computed using the formula
      dist2(i, c) * n(c)/(n(c) + 1), where c is the cluster to which
      point i belongs, n(c) is the number points in cluster c, and dist2
      is a function that returns the square of the Euclidean distance
      between point i and the center of cluster c.

      If the minimum R2 for a point is less than the distance measure
      computed previously for it, we will reallocate the point to the
      cluster corresponding to the minimum R2.

      To aid the main loop in printing some useful stats about clustering
      progress, this function returns the total number of reallocations
      performed by the optimal transfer, the total number of live
      clusters it encountered, and the total number of points that were
      in live clusters (i.e., the number of live points).

      These three numbers are returned in a 3-tuple whose first element
      is the number of reallocations, whose second element is the number
      of live clusters, and whose third element is the number of live
      points.
   */
   boost::tuple<int, int, int> optra(int k, const matrix& data) ;

   /**
      \brief  Check if a cluster is in the live set.
      \param  i Index of the cluster to be checked.
      \param  n Number of data points to be clustered.
      \return True if cluster i belongs to the live set, false otherwise.

      The Hartigan-Wong algorithm uses the notion of a "live set" to help
      reduce the amount of computation the k-means clustering algorithm
      has to perform. If a cluster does not belong to the live set, then
      the optimal transfer stage can ignore it.

      To determine whether or not a cluster belongs to the live set, we
      check if it was updated in the most recent quick transfer stage. If
      so, it belongs to the live set throughout the current optimal
      transfer.

      If, however, it was not affected by the most recent quick transfer,
      then we check if the current optimal tranfser reallocated a point
      to or from it. If so, then, again, it belongs to the live set.

      DEVNOTE: Initially, the algorithm is set up so that all points
      belong to the live set. This is achieved by setting the
      qtran_updated flag to true for all the clusters.
   */
   bool is_in_live_set(int i, int n) const ;

   /**
      \brief  Find minimum R2 for the given data point.
      \param  i Index of the data point for which we want the minimum R2.
      \param  k The total number of clusters.
      \param  data The input data points to be clustered.
      \param  all_clusters Whether to consider all clusters or live ones only.
      \return STL pair containing min R2 and corresponding cluster index.

      The optimal transfer stage has to find the minimum R2 for all the
      data points. This helper function implements the necessary
      computation for one data point, i.e., for point i, it finds the
      cluster that has the minimum R2, where R2 = dist2(i,c) * n(c)/(n(c)+1).

      In the above expression, c is the cluster to which point i belongs,
      n(c) is the number of points in cluster c, and dist2(i,c) is a
      function that returns the square of the Euclidean distance between
      point i and the center of cluster c.

      To reduce the amount of computation, the Hartigan-Wong algorithm
      uses the notion of a live set. The optimal transfer stage has to
      find the minimum R2 for a data point only over the clusters in the
      live set. Thus, if cluster c belongs to the live set, this function
      will compute min R2 over all the clusters. If, however, cluster c
      does not belong to the live set, then we compute min R2 only over
      clusters in the live set. The all_clusters parameter is used to
      determine whether we should iterate over all the clusters (step 4a
      in the Hartigan-Wong paper) or only the clusters in the live set
      (step 4b in the paper).

      Once it's done, this function will return the minimum R2 and the
      corresponding cluster index to its caller using an STL pair. The
      first element of this pair is the minimum R2; the second element is
      the corresponding cluster index.

      NOTE: The input data should be arranged as column vectors of a
      d-by-n matrix. Thus, d, the number of rows in this matrix, is the
      data dimensionality, and n, the number of columns, corresponds to
      the number of data points to be clustered.
   */
   std::pair<T, int> compute_min_r2(int i, int k, const matrix& data,
                                    bool all_clusters) const ;

   boost::tuple<int, int, int> qtran(int k, const matrix& data) ;

   /**
      \brief  Calculate square of Euclidean distance between two points.
      \param  a The first point.
      \param  b The second point.
      \return Square of Euclidean distance between a and b.

      This is a helper function that takes two vectors a and b and
      returns the square of the Euclidean distance between them. The
      points a and b must have the same dimensionality.

      This function is useful because cluster assignments are based on
      Euclidean distance. However, this function stops short of computing
      the actual distance, returning the square, to avoid an unnecessary
      square root operation. We can work with distance squares because,
      for comparison purposes, the square and square root will return the
      same result.
   */
   static T dist2(const vector& a, const vector& b) ;

public:
   /**
      \brief Exception to indicate centroids initialization failure.

      The Hartigan-Wong k-means algorithm requires an initial set of
      cluster centers before it can commence. By default, we select k
      data points using the method described in the "Additional Comments"
      section of the Hartigan-Wong paper.

      However, libgist's k-means class allows clients to specify an
      alternative centroids initialization policy by supplying an
      appropriate function object to the kmeans template.

      This function object must return a d-by-k matrix, where d is the
      input data dimensionality and k the number of clusters. If the
      matrix returned by the centroids initialization policy does not
      have these dimensions, the kmeans clustering methods will throw an
      instance of this class to indicate the error.
   */
   struct bad_centroids_initialization: public std::runtime_error {
      bad_centroids_initialization() ;
   } ;

   /**
      \brief Exception to indicate poor initial cluster center selection.

      The Hartigan-Wong k-means algorithm requires an initial set of
      cluster centers before it can commence. If, however, these initial
      cluster centers are not chosen properly and result in an empty
      cluster during the initial clustering step, we will throw an
      instance of this class to indicate the problem.

      The Hartigan-Wong implementation reports this with an error code
      IFAULT = 1.
   */
   struct empty_initial_cluster: public std::runtime_error {
      empty_initial_cluster() ;
   } ;
} ;

//----------------------- CLUSTERING INTERFACE --------------------------

// Initialization
template<typename T, typename P>
kmeans<T, P>::kmeans(int num_threads, int max_iter)
   : m_max_iterations(std::max(max_iter, 1)), m_iteration(0),
     m_thread_pool(num_threads)
{}

// Cluster and return both cluster assignments as well as cluster centers
template<typename T, typename P>
std::pair<std::vector<int>, typename kmeans<T, P>::matrix>
kmeans<T, P>::
cluster(int k, const matrix& data)
{
   find_clusters(k, data) ;
   return std::make_pair(m_clusters, m_centroids) ;
}

// Cluster and return only the cluster assignments
template<typename T, typename P>
std::vector<int>
kmeans<T, P>::
clusters(int k, const matrix& data)
{
   find_clusters(k, data) ;
   return m_clusters;
}

// Cluster and return only the cluster centers
template<typename T, typename P>
typename kmeans<T, P>::matrix
kmeans<T, P>::
centroids(int k, const matrix& data)
{
   find_clusters(k, data) ;
   return m_centroids ;
}

//---------------------- K-MEANS IMPLEMENTATION -------------------------

// Top-level routine implementing Hartigan and Wong's k-means algorithm
template<typename T, typename P>
void kmeans<T, P>::find_clusters(int k, const matrix& data)
{
   boost::timer t ;
   init_centroids(k, data) ;
   double s = t.elapsed() ;
   std::cout << "took " << s << " seconds to init centroids\n" ;

   t.restart() ;
   find_two_closest_centroids(k, data) ;
   s = t.elapsed()/m_thread_pool.size() ;
   std::cout << "took " << s << " seconds to find two closest centroids\n" ;

   t.restart() ;
   update_centroids(k, data) ;
   s = t.elapsed() ;
   std::cout << "took " << s << " seconds to update centroids\n" ;

   t.restart() ;
   init_distances(data) ;
   s = t.elapsed() ;
   std::cout << "took " << s << " seconds to initialize distances\n" ;

   for (m_iteration = 0; m_iteration < m_max_iterations; ++m_iteration)
   {
      t.restart() ;
      boost::tuple<int, int, int> p = optra(k, data) ;
      s = t.elapsed() ;
      std::cout << "iteration " << m_iteration << ": optra: "
                << s << "s, "
                << p.get<0>() << " reallocs, "
                << p.get<1>() << " live clusters, "
                << p.get<2>() << " live points\n" ;
      if (p.get<0>() <= 0) // no more reallocations in optra ==> we're done
         break ;

      t.restart() ;
      p = qtran(k, data) ;
      s = t.elapsed() ;
      std::cout << "iteration " << m_iteration << ": qtran: "
                << s << "s, "
                << p.get<0>() << " transfers, "
                << p.get<1>() << " iterations, "
                << p.get<2>() << " steps\n" ;
   }
}

//---------------------- K-MEANS INITIALIZATION -------------------------

// Calculate initial cluster centers
template<typename T, typename init_policy>
void kmeans<T, init_policy>::init_centroids(int k, const matrix& data)
{
   m_centroids = init_policy()(k, data) ;
   const int num_centroids  = m_centroids.size2() ;
   if (m_centroids.size1() != data.size1() || num_centroids != k)
      throw bad_centroids_initialization() ;
}

// Find the two closest clusters for each data point
template<typename T, typename P>
void
kmeans<T, P>::
find_two_closest_centroids(int k, const matrix& data)
{
   const int n = data.size2() ;
   m_clusters. resize(n) ;
   m_clusters2.resize(n) ;

   // Break up the job of finding the two closest clusters for n data
   // points into n parallel tasks each of which finds the two closest
   // clusters for one of the data points.
   two_closest_centroids_finder::reset() ;
   for (int i = 0; i < n; ++i)
      m_thread_pool.add_task(two_closest_centroids_finder(k, data, i,
                                                          m_centroids,
                                                          m_clusters,
                                                          m_clusters2)) ;

   // Wait for the thread pool to complete finding the two closest
   // clusters for all the data points,
   two_closest_centroids_finder::wait(n) ;
}

// The number of parallel tasks completed
template<typename T, typename P>
size_t kmeans<T, P>::two_closest_centroids_finder::m_count ;

// Mutex to synchronize access to above count
template<typename T, typename P>
boost::mutex kmeans<T, P>::two_closest_centroids_finder::m_mutex ;

// Condition variable for waiting on above count to reach total number of
// input data points.
template<typename T, typename P>
boost::condition_variable kmeans<T, P>::two_closest_centroids_finder::m_cond ;

// Setup state for closest clusters computation task
template<typename T, typename P>
kmeans<T, P>::two_closest_centroids_finder::
two_closest_centroids_finder(int k, const matrix& data, int i, const matrix& c,
                             std::vector<int>& c1, std::vector<int>& c2)
   : m_data(data), m_centroids(c), m_num_clusters(k), m_index(i),
     m_clusters(c1), m_clusters2(c2)
{}

// Find the two closest centers for a data point
template<typename T, typename P>
void kmeans<T, P>::two_closest_centroids_finder::operator()() const
{
   T d1 = std::numeric_limits<T>::max() ; // dist to closest cluster
   T d2 = std::numeric_limits<T>::max() ; // dist to 2nd closest cluster
   m_clusters [m_index] = -1 ; // index of closest cluster
   m_clusters2[m_index] = -1 ; // index of second closest cluster

   // From the input data matrix, extract the point for which this task
   // should find the two closest clusters.
   using boost::numeric::ublas::column ;
   kmeans<T, P>::vector v = column(m_data, m_index) ;

   // Check the distance between v and each cluster center to find the
   // two clusters closest to the data point v.
   for (int j = 0; j < m_num_clusters; ++j)
   {
      T d = kmeans<T, P>::dist2(v, column(m_centroids, j)) ;
      if (d < d1) // cluster j is closest to v
      {
         // Current closest cluster becomes second closest
         m_clusters2[m_index] = m_clusters[m_index] ;
         d2 = d1;
         // Cluster j becomes closest cluster
         m_clusters [m_index] = j ;
         d1 = d ;
      }
      else if (d < d2) // cluster j is second closest to v
      {
         m_clusters2[m_index] = j ;
         d2 = d ;
      }
   }

   // Once above loop is over, we have found the two closest clusters for
   // the m_index-th data point. The main k-means thread will be waiting
   // for all the sub-tasks to complete. Let us signal the main thread
   // that this task is done by incrementing the count of completed
   // tasks...
   boost::lock_guard<boost::mutex> lock(m_mutex) ;
   if (++m_count >= m_data.size2())
      m_cond.notify_all() ;
}

// API to allow main k-means thread to wait for all closest centroids
// tasks to complete.
template<typename T, typename P>
void kmeans<T, P>::two_closest_centroids_finder::wait(size_t n)
{
   try
   {
      boost::unique_lock<boost::mutex> lock(m_mutex) ;
      while (m_count < n)
         m_cond.wait(lock) ;
   }
   catch (boost::thread_resource_error&)
   {
      std::cerr << "kmeans: thread " << boost::this_thread::get_id()
                << ": encountered an error waiting for "
                   "two_closest_centroids_finder to complete\n" ;
   }
   catch (boost::thread_interrupted&)
   {
      std::cerr << "kmeans: thread " << boost::this_thread::get_id()
                << ": interrupted waiting for "
                   "two_closest_centroids_finder to complete\n" ;
   }
}

// Calculate new cluster centers using latest closest cluster assignments
template<typename T, typename P>
void kmeans<T, P>::update_centroids(int k, const matrix& data)
{
   using boost::numeric::ublas::column ;

   // Initialize the intermediate clustering computations
   m_cluster_data.clear() ;
   m_cluster_data = std::vector<cluster_data>(k, cluster_data()) ;

   // Init all cluster centroids to zero
   m_centroids = boost::numeric::ublas::zero_matrix<T>(m_centroids.size1(), k) ;

   // Sum all the points in each cluster and find the number of points in
   // each cluster...
   const int n = data.size2() ;
   for  (int i = 0; i < n; ++i)
   {
      int c = m_clusters[i] ;
      column(m_centroids, c) += column(data, i) ;
      ++m_cluster_data[c].num_points ;
   }

   // Average above sums to get the cluster centroids and initialize the
   // cluster data...
   for (int i = 0; i < k; ++i)
   {
      cluster_data& c = m_cluster_data[i] ;
      if (c.num_points <= 0) // need a better set of initial cluster centers
         throw empty_initial_cluster() ;
      column(m_centroids, i) /= c.num_points ;
   }
}

// Initial distances between points and their respective cluster centers
template<typename T, typename P>
void kmeans<T, P>::init_distances(const matrix& data)
{
   using boost::numeric::ublas::column ;

   const int n = data.size2() ;
   m_distances.resize(n) ;

   for (int i = 0; i < n; ++i)
   {
      const int j = m_clusters[i] ;
      const cluster_data& c = m_cluster_data[j] ;
      if (c.num_points  > 1)
         m_distances[i] = dist2(column(data, i), column(m_centroids, j))
                        * c.num_points/(c.num_points - 1) ;
      else
         m_distances[i] = std::numeric_limits<T>::max() ;
   }
}

// Intermediate cluster data initialization
template<typename T, typename P>
kmeans<T, P>::cluster_data::cluster_data()
   : num_points(0), last_update_step(-1), qtran_updated(true)
{}

//---------------------- OPTIMAL TRANSFER STAGE -------------------------

// Reallocate points to closer clusters if that will reduce
// within-cluster sum-of-squares.
template<typename T, typename P>
boost::tuple<int, int, int> kmeans<T, P>::optra(int k, const matrix& data)
{
   using boost::numeric::ublas::column ;
   using boost::numeric::ublas::zero_vector ;

   // The main k-means loop terminates when the optimal tranfer stage
   // goes through all the data points and makes no more reallocations.
   // This counter keeps track of how many points were reallocated for
   // this iteration of optra.
   int realloc = 0 ;

   // Another useful optra metric is the total number of clusters it
   // encountered in the live set. This number should decrease over
   // successive optra invocations. Although not strictly necessary, we
   // return this as well to the caller, the k-means main loop, so it can
   // print it out.
   std::map<int, bool> live ;

   // Finally, we would also like to know how many points were in live
   // sets. This number should be >= the number of reallocations and
   // should decrease across successive iterations of optra.
   int num_live_points = 0 ;

   // Loop to go over all the data points and see if they should be
   // reallocated to some other cluster.
   const int n = data.size2() ;
   for  (int i = 0; i < n; ++i)
   {
      // We really don't want any empty clusters. So, if point i is the
      // only one in its cluster, skip ahead to the next data point.
      const int j = m_clusters[i] ;
      cluster_data&  c = m_cluster_data[j] ;
      if (c.num_points < 2)
         continue ;

      // If this optimal transfer stage moved a point into or out of
      // point i's cluster, the distance measure for point i will now be
      // outdated and we ought to recompute the measure using the new
      // cluster center and cardinality.
      vector d = column(data, i) ;
      if (c.last_update_step >= 0 && c.last_update_step < n)
         m_distances[i] = dist2(d, column(m_centroids, j))
                        * c.num_points/(c.num_points - 1) ;

      // Find cluster with minimum R2 and move data point if it is less
      // than the current distance.
      bool is_live = is_in_live_set(j, n) ;
      if (is_live) {
         live[j] = true ;
         ++num_live_points ;
      }
      const std::pair<T, int> min_r2 = compute_min_r2(i, k, data, is_live) ;
      if (min_r2.first < m_distances[i])
      {
         m_clusters2[i] = j ; // current closest cluster becomes second closest
         m_clusters [i] = min_r2.second ; // min R2 cluster becomes closest
         ++realloc ;

         // Update cluster center for i-th point's old cluster
         //
         // NOTE: To understand how the centroid update works, consider a
         // set P = {1, 2, 3, 4, 5}. Let p be the mean of this set; thus,
         // p = (1 + 2 + 3 + 4 + 5)/5 = 3. Now, let's say we remove the
         // number 2 from P to get a new set P' = {1, 3, 4, 5}. The new
         // mean, p' = (1 + 3 + 4 + 5)/4 = 13/4 = 3.25.
         //
         // Now, rather than computing p' by summing the numbers and
         // dividing by the new cardinality (which is one less than the
         // old cardinality), we can write:
         //
         //    p' = (1 + 2 + 3 + 4 + 5 - 2)/(5 - 1)
         //       = (1 + 2 + 3 + 4 + 5)/(5 - 1) - 2/(5 - 1)
         //       = ((1 + 2 + 3 + 4 + 5) * 5)/((5 - 1) * 5) - 2/(5 - 1)
         //       = ((1 + 2 + 3 + 4 + 5)/5) * (5/(5 - 1)) - 2/(5 - 1)
         //       = (3 * 5/(5 - 1)) - 2/(5 - 1)
         //       = (3 * 5 - 2)/(5 - 1)
         //       = 13/4 = 3.25
         //
         // Essentially, what we've done above is multiply the old mean
         // by the old cardinality to yield the old sum. From that we
         // subtract the data item we are removing; this gives us the sum
         // of the elements of the new set P'. Finally, we divide by the
         // new cardinality to get the new mean.
         //
         // The following bit of code applies the same technique to the
         // j-th (i.e., old) centroid vector and i-th data point, which
         // is the item removed from cluster j.
         cluster_data& c_old = c ;
         --c_old.num_points ;
         if (c_old.num_points > 0)
            column(m_centroids, j) =
               (column(m_centroids, j) * (c_old.num_points + 1) - d)/
               c_old.num_points ;
         else // old cluster is now empty ==> zero the cluster's center
            column(m_centroids, j) = zero_vector<T>(data.size1()) ;

         // Update cluster center for i-th point's new cluster
         //
         // NOTE: The technique for computing the average is the same as
         // the one described above. The only difference here is that
         // instead of removing the i-th point from the target cluster,
         // we're adding it. But the idea is the same: multiply old
         // average by old cardinality to yield old sum; add new data
         // point to get new sum and then divide by new cardinality
         // (which is simply one more than the old cardinality).
         cluster_data& c_new = m_cluster_data[min_r2.second] ;
         ++c_new.num_points ;
         if (c_new.num_points > 1)
            column(m_centroids, min_r2.second) =
               (column(m_centroids, min_r2.second) * (c_new.num_points-1) + d)/
               c_new.num_points ;
         else
            column(m_centroids, min_r2.second) = d ;

         // Mark i-th point's old and new clusters as updated so we can
         // recompute distance for subsequent points in these two
         // clusters...
         c_old.last_update_step = c_new.last_update_step = i ;
      }
      else // no need to reallocate; point i is already in best cluster
         m_clusters2[i] = min_r2.second ; // min R2 cluster becomes 2nd closest
   }
   return boost::make_tuple(realloc,
                            static_cast<int>(live.size()), num_live_points) ;
}

// Check whether or not cluster i is in the live set
template<typename T, typename P>
bool kmeans<T, P>::is_in_live_set(int i, int n) const
{
   const cluster_data& c = m_cluster_data[i] ;
   return c.qtran_updated || (c.last_update_step >= 0 &&
                              c.last_update_step <  n) ;
}

// Find cluster with minimum R2 for i-th data point
template<typename T, typename P>
std::pair<T, int>
kmeans<T, P>::
compute_min_r2(int i, int k, const matrix& data, bool all_clusters) const
{
   using std::make_pair ;
   using boost::numeric::ublas::column ;

   const int    n = data.size2() ;
   const int   L1 = m_clusters [i] ;
   const int   L2 = m_clusters2[i] ;
   const vector d = column(data, i);

   std::pair<T, int> min_r2 = make_pair(std::numeric_limits<T>::max(), -1) ;
   for (int j = 0; j < k; ++j)
   {
      // Skip closest and second closest clusters for point i and also
      // skip cluster j if we have to compute min R2 only over the live
      // set and cluster j is not in the live set...
      if (j == L1 || j == L2 || (!all_clusters && !is_in_live_set(j, n)))
         continue ;

      // Compute R2 for i-th point and j-th cluster, reassigning min if
      // necessary...
      const cluster_data& c = m_cluster_data[j] ;
      T r2 = dist2(d, column(m_centroids, j)) * c.num_points/(c.num_points + 1);
      if (r2 < min_r2.first)
         min_r2 = make_pair(r2, j) ;
   }
   return min_r2 ;
}

//----------------------- QUICK TRANSFER STAGE --------------------------

template<typename T, typename P>
boost::tuple<int, int, int> kmeans<T, P>::qtran(int k, const matrix& data)
{
   using boost::numeric::ublas::column ;
   using boost::numeric::ublas::zero_vector ;

   for (int i = 0; i < k; ++i)
      m_cluster_data[i].qtran_updated = false ;

   int  step = 0, begin = 0 ;
   bool transfer ;
   int num_transfers = 0, num_iterations = 0 ;
   do
   {
      transfer = false ;

      const int n = data.size2() ;
      for  (int i = 0; i < n; ++i, ++step)
      {
         // We really don't want any empty clusters. So, if point i is
         // the only one in its cluster, skip ahead to the next data
         // point.
         const int L1 = m_clusters[i] ;
         cluster_data&  c1 = m_cluster_data[L1] ;
         if (c1.num_points < 2)
            continue ;

         // If the most recent optimal transfer stage updated the cluster
         // for point i after it had moved beyond i in its loop, then the
         // distance measure for the i-th point will be outdated...
         vector d  = column(data, i) ;
         int diff  = c1.last_update_step - begin ;
         if (diff >= i)
            m_distances[i] = dist2(d, column(m_centroids, L1))
                           * c1.num_points/(c1.num_points - 1) ;

         const int L2 = m_clusters2[i] ;
         cluster_data& c2 = m_cluster_data[L2] ;
         if (diff < 0 && (c2.last_update_step - begin) < 0)
            continue ;

         T r2 = dist2(d, column(m_centroids, L2))
              * c2.num_points/(c2.num_points + 1) ;
         if (r2 < m_distances[i])
         {
            m_clusters2[i] = L1 ; // current closest cluster becomes 2nd closest
            m_clusters [i] = L2 ; // second  closest cluster becomes closest
            transfer = c1.qtran_updated = c2.qtran_updated = true ;
            ++num_transfers ;

            --c1.num_points ;
            if (c1.num_points > 0)
               column(m_centroids, L1) =
                  (column(m_centroids, L1) * (c1.num_points + 1) - d)/
                  c1.num_points ;
            else // old cluster is now empty ==> zero the cluster's center
               column(m_centroids, L1) = zero_vector<T>(data.size1()) ;

            ++c2.num_points ;
            if (c2.num_points > 1)
               column(m_centroids, L2) =
                  (column(m_centroids, L2) * (c2.num_points - 1) + d)/
                  c2.num_points ;
            else
               column(m_centroids, L2) = d ;

            // Mark i-th point's old and new clusters as updated so we can
            // recompute distance for subsequent points in these two
            // clusters...
            c1.last_update_step = c2.last_update_step = step + n ;
         }
      }
      begin += n ;
      ++num_iterations ;
   }
   while (transfer) ;

   for (int i = 0; i < k; ++i)
      m_cluster_data[i].last_update_step = -1 ;

   return boost::make_tuple(num_transfers, num_iterations, step) ;
}

//-------------------------- K-MEANS HELPERS ----------------------------

// Square of Euclidean distance between two points
template<typename T, typename P>
T kmeans<T, P>::dist2(const vector& a, const vector& b)
{
   vector d = a - b ;
   return boost::numeric::ublas::inner_prod(d, d) ;
}

//---------------------- K-MEANS MULTITHREADING -------------------------

// Initialization: create all the threads in the pool
template<typename T, typename P>
kmeans<T, P>::thread_pool::thread_pool(int n)
   : m_shutdown(false)
{
   if(n < 1)
      n = 1 ;
   for (int i = 0; i < n; ++i)
      m_threads.add_thread(new boost::thread(thread_func, this)) ;
}

// Add a new task to pool's queue and notify one of the threads that
// there's a task waiting to be executed.
template<typename T, typename P>
void kmeans<T, P>::thread_pool::add_task(boost::function<void ()> f)
{
   // DEVNOTE: Block to automatically acquire and release mutex.
   {
      boost::lock_guard<boost::mutex> lock(m_mutex) ;
      m_tasks.push(f) ;
   }
   m_cond.notify_one() ;
}

// Each thread in the pool basically waits for tasks to arrive and then
// executes them.
template<typename T, typename P>
void kmeans<T, P>::thread_pool::thread_func(thread_pool* pool)
{
   boost::thread::id id = boost::this_thread::get_id() ;
   boost::function<void ()> f ; // task to execute
   for(;;)
   {
      try
      {
         // First check if the thread pool has been shutdown
         // DEVNOTE: Block to automatically acquire and release mutex
         {
            typedef boost::shared_lock<boost::shared_mutex> read_lock ;
            read_lock r(pool->m_shutdown_mutex) ;
            if (pool->m_shutdown)
               break ;
         }

         boost::unique_lock<boost::mutex> lock(pool->m_mutex) ;
         if (pool->m_tasks.empty()) {  // wait for a task to arrive
            pool->m_cond.wait(lock) ;
            if (pool->m_tasks.empty()) // task consumed by another thread
               continue ;
         }

         // Got a task to work on: take it off the queue
         f = pool->m_tasks.front() ;
         pool->m_tasks.pop() ;
      }
      catch (boost::thread_resource_error&)
      {
         std::cerr << "k-means thread pool thread " << id
                   << " encountered an error\n" ;
         break ; // fatal error; thread cannot continue
      }
      catch (boost::thread_interrupted&)
      {
         std::cerr << "k-means thread pool thread " << id
                   << " interrupted; ignoring...\n" ;
         continue ; // not fatal
      }

      // Execute the latest task retrieved from the queue
      f() ;
   }
}

// When the kmeans objects goes out of scope, its thread pool will also
// be destroyed.
template<typename T, typename P>
kmeans<T, P>::thread_pool::~thread_pool()
{
   // Set shutdown flag to indicate to all threads it's time to quit
   // DEVNOTE: Block to automatically acquire and release mutex.
   {
      boost::lock_guard<boost::shared_mutex> write_lock(m_shutdown_mutex) ;
      m_shutdown = true ;
   }

   // Wake up all threads in the pool and wait for them to quit in
   // response to setting above flag.
   m_cond. notify_all() ;
   m_threads.join_all() ;
}

//------------------------ K-MEANS EXCEPTIONS ---------------------------

template<typename T, typename P>
kmeans<T, P>::bad_centroids_initialization::
bad_centroids_initialization()
   : std::runtime_error("centroids initialization policy "
                        "produced a matrix of the wrong size")
{}

template<typename T, typename P>
kmeans<T, P>::empty_initial_cluster::
empty_initial_cluster()
   : std::runtime_error("at least one cluster empty after initial assignment; "
                        "need better initial cluster center selection")
{}

//-----------------------------------------------------------------------

} // namespace gist

#endif

/* So things look consistent in everyone's emacs... */
/* Local Variables: */
/* indent-tabs-mode: nil */
/* End: */
