/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*   File:         cuda_kmeans.cu  (CUDA version)                            */
/*   Description:  Implementation of simple k-means clustering algorithm     */
/*                 This program takes an array of N data objects, each with  */
/*                 M coordinates and performs a k-means clustering given a   */
/*                 user-provided value of the number of clusters (K). The    */
/*                 clustering results are saved in 2 arrays:                 */
/*                 1. a returned array of size [K][N] indicating the center  */
/*                    coordinates of K clusters                              */
/*                 2. membership[N] stores the cluster center ids, each      */
/*                    corresponding to the cluster a data object is assigned */
/*                                                                           */
/*   Author:  Wei-keng Liao                                                  */
/*            ECE Department, Northwestern University                        */
/*            email: wkliao@ece.northwestern.edu                             */
/*   Copyright, 2005, Wei-keng Liao                                          */
/*                                                                           */
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */

#include <stdio.h>
#include <stdlib.h>
#include <assert.h>

#include "head.cuh"

void get_kernel_config_given_ratios(int sz1, int sz2, dim3& szGrid, dim3& szBlock
                , int& rowPerThread, int& colPerThread
                , int nThreadXRatio, int nThreadYRatio)
{
    szBlock.x = (sz1 <= nThreadXRatio) ? sz1 : nThreadXRatio; //min(sz1, nThreadXRatio);
	szBlock.y = (sz2 <= nThreadYRatio) ? sz2 : nThreadYRatio; //min(sz2, nThreadYRatio);
    szBlock.z = 1;
    szGrid.x = szGrid.y = szGrid.z = 1;
    colPerThread = rowPerThread = 1;
    
    if (sz1 > nThreadXRatio || sz2 > nThreadYRatio)
    {
        int ratio = sz1/nThreadXRatio, k;
        for (k = 1; (1 << k) <= ratio; ++k)
        {
            rowPerThread = (2 << (k/2));
        }
        //rowPerThread = 2 << (int)(std::log(std::sqrt((float)sz1/nThreadX))/std::log((float)2));
        szGrid.x = (sz1 + szBlock.x*rowPerThread - 1) / (szBlock.x*rowPerThread);

        ratio = sz2/nThreadYRatio;
        for (k = 1; (1 << k) <= ratio; ++k)
        {
            colPerThread = (2 << (k/2));
        }
        //colPerThread = 2 << (int)(std::log(std::sqrt((float)sz2/nThreadY))/std::log((float)2));
        szGrid.y = (sz2 + szBlock.y*colPerThread - 1) / (szBlock.y*colPerThread);
    }
    assert(szGrid.x*szBlock.x*rowPerThread >= sz1);
    assert(szGrid.y*szBlock.y*colPerThread >= sz2);
}

void get_kernel_config(int sz1, int sz2, dim3& szGrid, dim3& szBlock
                    , int& rowPerThread, int& colPerThread)
{
    // CUDA 2.x: maximum 1024 threads/block. CUDA < 2.x: 512 threads/block
    
    int nThreadX, nThreadY;
    if (sz1 / sz2 >= 2)
    {
        nThreadX = 64; nThreadY = 16;
    }
    else if (sz2 / sz1 >= 2)
    {
        nThreadX = 16; nThreadY = 64;
    }
    else
    {
        nThreadX = nThreadY = 32;
    }
    get_kernel_config_given_ratios(sz1, sz2, szGrid, szBlock, 
		rowPerThread, colPerThread, nThreadX, nThreadY);
}

/*----< euclid_dist_2() >----------------------------------------------------*/
/* square of Euclid distance between two multi-dimensional points            */
__host__ __device__ inline static
float euclid_dist_2(int    numCoords,
                    int    numObjs,
                    int    numClusters,
                    float *objects,     // [numCoords][numObjs]
                    float *clusters,    // [numCoords][numClusters]
                    int    objectId,
                    int    clusterId)
{
    int i;
    float ans=0.0;

    for (i = 0; i < numCoords; i++) {
        ans += (objects[numObjs * i + objectId] - clusters[numClusters * i + clusterId]) *
               (objects[numObjs * i + objectId] - clusters[numClusters * i + clusterId]);
    }

    return(ans);
}

/*----< find_nearest_cluster() >---------------------------------------------*/
__global__ static
void find_nearest_cluster(int numCoords,
                          int numObjs,
                          int numClusters,
                          float *objects,           //  [numCoords][numObjs]
                          float *deviceClusters,    //  [numCoords][numClusters]
                          int *membership          //  [numObjs]
						  )
{
    float *clusters = deviceClusters;

    int objectId = blockDim.x * blockIdx.x + threadIdx.x;

    if (objectId < numObjs) {
        int   index, i;
        float dist, min_dist;

        /* find the cluster id that has min distance to object */
        index    = 0;
        min_dist = euclid_dist_2(numCoords, numObjs, numClusters,
                                 objects, clusters, objectId, 0);

        for (i=1; i<numClusters; i++) {
            dist = euclid_dist_2(numCoords, numObjs, numClusters,
                                 objects, clusters, objectId, i);
            /* no need square root */
            if (dist < min_dist) { /* find the min and its array index */
                min_dist = dist;
                index    = i;
            }
        }

        /* assign the membership to object objectId */
        membership[objectId] = index;

    }
}



__global__ static
void update_cluster(const float* objects, const int* membership, float* clusters
                    , const int nCoords, const int nObjs, const int nClusters
                    , const int rowPerThread, const int colPerThread)
{
    for (int cIdx = 0; cIdx < colPerThread; ++cIdx)
    {
        int c = cIdx * gridDim.y * blockDim.y + blockIdx.y * blockDim.y + threadIdx.y;
        if (c >= nClusters)
            break;
        
        for (int rIdx = 0; rIdx < rowPerThread; ++rIdx)
        {
            int r = rIdx * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x;
            if (r >= nCoords)
                break;

            float sumVal(0);
            int clusterCount(0);
            for (int i = 0; i < nObjs; ++i)
            {
                if (membership[i] == c)
                {
                    sumVal += objects[r*nObjs + i];
                    clusterCount++;
                }
            }
            if (clusterCount > 0)
                clusters[nClusters*r+c] = sumVal / clusterCount;
        }
    }
}

__global__ static
void copy_rows(const float* src, const int sz1, const int sz2
                , const int copiedRows, float* dest
                , const int rowPerThread, const int colPerThread)
{
    for (int rIdx = 0; rIdx < rowPerThread; ++rIdx)
    {
        int r = rIdx * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x;
        if (r >= copiedRows)
            break;
            
        for (int cIdx = 0; cIdx < colPerThread; ++cIdx)
        {
            int c = cIdx * gridDim.y * blockDim.y + blockIdx.y * blockDim.y + threadIdx.y;
            if (c >= sz2)
                break;
            dest[c*copiedRows+r] = src[c*sz1+r];
        }
    }
}

int kMeans(float *deviceObjects,      /* in: [numObjs][numCoords] */
                   int     numCoords,    /* no. features */
                   int     numObjs,      /* no. objects */
                   int     numClusters,  /* no. clusters */
                   int     maxLoop,      /* maximum number of loops */
                   float  *deviceClusters)
{
    CHECK_PARAM(deviceClusters!=NULL, "deviceClusters cannot be NULL");
	int *deviceMembership; /* cluster id related to every obj*/
    CHECK_CUDA(cudaMalloc(&deviceMembership, numObjs*sizeof(int)));

    // initialize deviceMembership
    int* hostMembership = (int*)malloc(numObjs*sizeof(int));
    CHECK_PARAM(hostMembership!=NULL, "memory allocation failed");
    for (int i=0; i<numObjs; i++) hostMembership[i] = -1;
    CHECK_CUDA(cudaMemcpy(deviceMembership, hostMembership, numObjs*sizeof(int), 
							cudaMemcpyHostToDevice));
    free(hostMembership);
    
    dim3 szGrid, szBlock;
    int rowPerThread, colPerThread;//num of rows/cols processed per thread
        
    get_kernel_config(numClusters, numCoords, szGrid, szBlock, rowPerThread, colPerThread);
    // initialize the cluster centers from the first *numClusters* row of objs
    copy_rows<<<szGrid, szBlock>>>(deviceObjects, numObjs, numCoords, 
		numClusters, deviceClusters, rowPerThread, colPerThread);

	cudaDeviceSynchronize();
	CHECK_CUDA(cudaGetLastError());

    int loop(0);
    int numThreadsPerClusterBlock = 128;
    int numClusterBlocks =
        (numObjs + numThreadsPerClusterBlock - 1) / numThreadsPerClusterBlock;
    do {
        find_nearest_cluster<<< numClusterBlocks, numThreadsPerClusterBlock >>>
            (numCoords, numObjs, numClusters,
             deviceObjects, deviceClusters, deviceMembership);

		cudaDeviceSynchronize();
		CHECK_CUDA(cudaGetLastError());
		
        get_kernel_config(numCoords, numClusters, szGrid, szBlock, rowPerThread, colPerThread);
        update_cluster <<< szGrid, szBlock >>> (deviceObjects, deviceMembership
                    , deviceClusters, numCoords, numObjs, numClusters, rowPerThread, colPerThread);
        
        cudaDeviceSynchronize();
        CHECK_CUDA(cudaGetLastError());
    } while (loop++ < maxLoop);

    CHECK_CUDA(cudaFree(deviceMembership));

    return (loop + 1);
}