# vim: syntax=cuda
# K-Means Kernels

"""

Copyright 2009 Michael Seiler
Rutgers University
miseiler@gmail.com

This file is part of ConsensusCluster.

ConsensusCluster is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.

ConsensusCluster is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
along with ConsensusCluster.  If not, see <http://www.gnu.org/licenses/>.


"""

from __future__ import division

import cu_twister      as curand
import pycuda.driver   as cuda
import pycuda.gpuarray as gpuarray
import numpy           as N

from kernel_helpers import Krnl, full_grid

# Global vars
MAX_RAND_SEED = 32768

KM_BLOCK_ROWS = 8
KM_BLOCK_COLS = 16

INIT_BLOCK_ROWS = 8
INIT_BLOCK_COLS = 16

UPD_BLOCK_ROWS = 8
UPD_BLOCK_COLS = 16

UPD_MAT_ROWS = 16
UPD_MAT_COLS = 16

BSIZE = 16


init_km_kernel = """

    // Generate initial node vectors

    // Expects in to be a matrix containing features on rows and samples on columns
    // This function should be vertically gridded according to the number of features, ie grid=(1, M / BLOCK_ROWS)
    // out should be kxM dims

    // FIXME: 24 registers is too many

    #define BLOCK_ROWS %(brows)s
    #define BLOCK_COLS %(bcols)s

    #define FLT_MAX __int_as_float(0x7f800000)
    #define FLT_MIN __int_as_float(-0x7f800000)

    #define NUM_CLUSTERS %(num_clusters)s

    #include "mt_rand.cu.h"

    __global__ void init_km (float *out, float *in, int rows, int cols, int numsamples) {
    	
        int tid = __mul24(threadIdx.y, BLOCK_COLS) + threadIdx.x;

    	int i, j, k, l, jEnd, blk_start, iters;
        float rowmin, rowmax, range;

        // Shared memory indices for coalesced stores
        k = tid - __mul24((tid / BLOCK_ROWS), BLOCK_ROWS);
        l = tid / BLOCK_ROWS;

        // Current twister state

        __shared__ MersenneTwisterState rstate[BLOCK_ROWS];

        // Temporary storage for in matrix values

        __shared__ float tmp[BLOCK_ROWS][BLOCK_COLS+1];    // Avoids some bank conflicts
        
        int ridx = __mul24(blockIdx.y, BLOCK_ROWS) + tid;
    
        if (tid < BLOCK_ROWS)
            MersenneTwisterInitialise(rstate[tid], ridx);

        // Unfortunately, this is quite a register-inefficient way to determine number of iterations
        iters = rows / BLOCK_ROWS;

        if (iters > gridDim.y) {
            iters = (iters / gridDim.y) + (blockIdx.y < iters %% gridDim.y);
        } else {
            iters = (blockIdx.y < iters);
        }

        while (iters--) {

            rowmin = FLT_MAX;
            rowmax = FLT_MIN;
        
            blk_start = ((gridDim.y * iters + blockIdx.y) * BLOCK_ROWS + threadIdx.y) * cols;
    
            for (j = threadIdx.x; j < cols; j += BLOCK_COLS) {
    
                tmp[threadIdx.y][threadIdx.x] = in[ blk_start + j ];
    
                __syncthreads();
    
                if (j < numsamples) {
                    rowmax = max(rowmax, tmp[threadIdx.y][threadIdx.x]);
                    rowmin = min(rowmin, tmp[threadIdx.y][threadIdx.x]);
                }
    
                __syncthreads();
            }
    
            tmp[threadIdx.y][threadIdx.x] = rowmin;
    
            __syncthreads();
            
            // I have tested the reduction here and it seems to be slower for these block sizes, mainly due to if-statements, I think

            rowmin = FLT_MAX;
    
            if (tid < BLOCK_ROWS) {
                for (j = 0; j < BLOCK_COLS; j++) {
                    rowmin = min(rowmin, tmp[tid][j]);
                }
            }
    
            __syncthreads();
    
            tmp[threadIdx.y][threadIdx.x] = rowmax;
    
            __syncthreads();
            
            rowmax = FLT_MIN;
    
            if (tid < BLOCK_ROWS) {
                for (j = 0; j < BLOCK_COLS; j++) {
                    rowmax = max(rowmax, tmp[tid][j]);
                }
            }
    
            __syncthreads();
    
            range     = rowmax - rowmin;

            blk_start = (gridDim.y * iters + blockIdx.y) * BLOCK_ROWS + k;

            for (i = 0; i < NUM_CLUSTERS; i += BLOCK_COLS) {
    
                jEnd = min((NUM_CLUSTERS - i), BLOCK_COLS);
    
                if (tid < BLOCK_ROWS) {
                    for (j = 0; j < jEnd; j++) {
                        tmp[tid][j] = mt_rand(rstate[tid], ridx) * range + rowmin;
                    }
                }
    
                __syncthreads();
    
                if (l < jEnd)
                    out[ blk_start + (l + i) * rows ] = tmp[k][l];
    
                __syncthreads();
            }
        }
    }
"""


km_kernel = """

    // K-Means kernel

    // Data from in is expected to be column-ordered, MxN
    // This function expects to be gridded in the x direction, spawning at least N / TOTAL_THREADS blocks
    // Note that this function assumes N and M to be multiples of TOTAL_THREADS

    #define BLOCK_ROWS %(brows)s
    #define BLOCK_COLS %(bcols)s
    
    #define FLT_MAX __int_as_float(0x7f800000)

    #define NUM_CLUSTERS  %(num_clusters)s
    #define TOTAL_THREADS (BLOCK_ROWS * BLOCK_COLS)

    __global__ void kmeans (int *out, float *in, float *centroids, int rows, int cols) {

        __shared__ float sample_data[BLOCK_ROWS][BLOCK_COLS];

        // After extensive testing, preloading 6 centroids appears to be the sweet spot
        #if (NUM_CLUSTERS <= 5)
        __shared__ float centrd_data[TOTAL_THREADS*NUM_CLUSTERS];
        #else
        __shared__ float centrd_data[TOTAL_THREADS*6];
        #endif

        int tid       = __mul24(threadIdx.y, BLOCK_COLS) + threadIdx.x; // Thread location in the block
        int blk_start = __mul24(TOTAL_THREADS, blockIdx.x) + tid;

        int j, k, best_cntrd;

        float  dist;
        float  best_dist  = FLT_MAX;

        float  part_dist1 = 0.0;
        float  part_dist2 = 0.0;

        #if (NUM_CLUSTERS > 2)
        float  part_dist3 = 0.0;
        #endif
        #if (NUM_CLUSTERS > 3)
        float  part_dist4 = 0.0;
        #endif
        #if (NUM_CLUSTERS > 4)
        float  part_dist5 = 0.0;
        #endif
        #if (NUM_CLUSTERS > 5)
        float  part_dist6;
        #endif

        #if (NUM_CLUSTERS > 5)
        for (int i = 0; i < NUM_CLUSTERS; i += 6) {

            part_dist1 = part_dist2 = part_dist3 = part_dist4 = part_dist5 = part_dist6 = 0.0;
        #endif

            for (j = 0; j < rows; j += TOTAL_THREADS) {

                // Load as much of the centroid row as we can

                #if (NUM_CLUSTERS <= 5)
                centrd_data[tid]                   = centroids[ j + tid ];
                centrd_data[tid + TOTAL_THREADS]   = centroids[ rows + j + tid ];
                #if (NUM_CLUSTERS > 2)
                centrd_data[tid + (TOTAL_THREADS << 1)]      = centroids[ (rows << 1) + j + tid ];
                #endif
                #if (NUM_CLUSTERS > 3)
                centrd_data[tid + __mul24(TOTAL_THREADS, 3)] = centroids[ rows * 3 + j + tid ];
                #endif
                #if (NUM_CLUSTERS > 4)
                centrd_data[tid + (TOTAL_THREADS << 2)]      = centroids[ (rows << 2) + j + tid ];
                #endif
                #else
                centrd_data[tid]                                                       = centroids[ i * rows + j + tid ];
                if (i + 1 < NUM_CLUSTERS) centrd_data[tid + TOTAL_THREADS]             = centroids[ (i + 1) * rows + j + tid ];
                if (i + 2 < NUM_CLUSTERS) centrd_data[tid + (TOTAL_THREADS << 1)]      = centroids[ (i + 2) * rows + j + tid ];
                if (i + 3 < NUM_CLUSTERS) centrd_data[tid + __mul24(TOTAL_THREADS, 3)] = centroids[ (i + 3) * rows + j + tid ];
                if (i + 4 < NUM_CLUSTERS) centrd_data[tid + (TOTAL_THREADS << 2)]      = centroids[ (i + 4) * rows + j + tid ];
                if (i + 5 < NUM_CLUSTERS) centrd_data[tid + __mul24(TOTAL_THREADS, 5)] = centroids[ (i + 5) * rows + j + tid ];
                #endif
    
                __syncthreads();

                // Yes, I realise the loop below causes a step-function in terms of speed, where NUM_CLUSTER between
                // 7 and 12 (say) have the same performance, but adding the if statements slows the loop down even more
                // than the extra work does

                for (k = 0; k < TOTAL_THREADS; k++) {

                    sample_data[threadIdx.y][threadIdx.x] = in[ blk_start + (k + j) * cols ];

                    __syncthreads();

                    dist = sample_data[threadIdx.y][threadIdx.x] - centrd_data[k];
                    part_dist1 += dist * dist;
                    dist = sample_data[threadIdx.y][threadIdx.x] - centrd_data[k + TOTAL_THREADS];
                    part_dist2 += dist * dist;

                    #if (NUM_CLUSTERS > 2)
                    dist = sample_data[threadIdx.y][threadIdx.x] - centrd_data[k + TOTAL_THREADS*2];
                    part_dist3 += dist * dist;
                    #endif
                    #if (NUM_CLUSTERS > 3)
                    dist = sample_data[threadIdx.y][threadIdx.x] - centrd_data[k + TOTAL_THREADS*3];
                    part_dist4 += dist * dist;
                    #endif
                    #if (NUM_CLUSTERS > 4)
                    dist = sample_data[threadIdx.y][threadIdx.x] - centrd_data[k + TOTAL_THREADS*4];
                    part_dist5 += dist * dist;
                    #endif
                    #if (NUM_CLUSTERS > 5)
                    dist = sample_data[threadIdx.y][threadIdx.x] - centrd_data[k + TOTAL_THREADS*5];
                    part_dist6 += dist * dist;
                    #endif

                    __syncthreads();
                }
            }
                
            part_dist1 = sqrtf(part_dist1);

            if (part_dist1 < best_dist) {
                best_dist  = part_dist1;
                #if (NUM_CLUSTERS > 5)
                best_cntrd = i;
                #else
                best_cntrd = 0;
                #endif
            }

            part_dist2 = sqrtf(part_dist2);

            if (part_dist2 < best_dist) {
                #if (NUM_CLUSTERS > 5)
                if (i+1 < NUM_CLUSTERS) {
                    best_dist  = part_dist2;
                    best_cntrd = i+1;
                }
                #else
                best_dist  = part_dist2;
                best_cntrd = 1;
                #endif
            }

            #if (NUM_CLUSTERS > 2)
            part_dist3 = sqrtf(part_dist3);

            if (part_dist3 < best_dist) {
                #if (NUM_CLUSTERS > 5)
                if (i+2 < NUM_CLUSTERS) {
                    best_dist  = part_dist3;
                    best_cntrd = i+2;
                }
                #else
                best_dist  = part_dist3;
                best_cntrd = 2;
                #endif
            }
            #endif

            #if (NUM_CLUSTERS > 3)
            part_dist4 = sqrtf(part_dist4);

            if (part_dist4 < best_dist) {
                #if (NUM_CLUSTERS > 5)
                if (i+3 < NUM_CLUSTERS) {
                    best_dist  = part_dist4;
                    best_cntrd = i+3;
                }
                #else
                best_dist  = part_dist4;
                best_cntrd = 3;
                #endif
            }
            #endif

            #if (NUM_CLUSTERS > 4)
            part_dist5 = sqrtf(part_dist5);

            if (part_dist5 < best_dist) {
                #if (NUM_CLUSTERS > 5)
                if (i+4 < NUM_CLUSTERS) {
                    best_dist  = part_dist5;
                    best_cntrd = i+4;
                }
                #else
                best_dist  = part_dist5;
                best_cntrd = 4;
                #endif
            }
            #endif

            #if (NUM_CLUSTERS > 5)
            part_dist6 = sqrtf(part_dist6);

            if (part_dist6 < best_dist && (i+5 < NUM_CLUSTERS)) {
                best_dist  = part_dist6;
                best_cntrd = i+5;
            }
            #endif

            __syncthreads();

        #if (NUM_CLUSTERS > 5)
        }
        #endif

        out[ blk_start ] = best_cntrd;
        //out[blk_start + cols] = best_dist;
    }
"""
    

upd_cntrds_kernel = """

    // Update centroids to the means

    // Data from in is expected to be column-ordered, MxN

    // This algorithm expects to be gridded vertically according to rows / BLOCK_ROWS,
    // and horizontally according to the number of centroids. I realise this puts a hard limit on the number of rows,
    // so we'd better fix that in the function call, or up the number of BLOCK_ROWS.

    // This function bunches sample reads for about a 4x speed gain, however this feature requires version 1.2 compute devices
    // If you're stuck with 1.1 devices I suggest you write a fully-coalesced function based on a transposed matrix,
    // assuming you can fit the matrix and its transpose on the device at the same time...

    #define BLOCK_ROWS %(brows)s
    #define BLOCK_COLS %(bcols)s
    
    #define NUM_CLUSTERS  %(num_clusters)s
    #define TOTAL_THREADS (BLOCK_ROWS * BLOCK_COLS)

    #define THRESHOLD %(threshold)s

    __global__ void upd_cntrds (float *out, float *in, int *kresults, int *moved, int rows, int cols, int numsamples) {

        // Input data storage
        __shared__ float sample_data[BLOCK_ROWS][BLOCK_COLS+1];

        // Holds sample indices for bunching sample reads
        __shared__ float sample_idx[TOTAL_THREADS]; // Float prevents a few collisions

        // Keeps threads from bumping into each other
        __shared__ int   counter; 

        // Total in our cluster
        __shared__ int   cl_total;

        // Moved?
        __shared__ int   moved_flag;

        int tid       = __mul24(threadIdx.y, BLOCK_COLS) + threadIdx.x;
        int blk_start = (__mul24(BLOCK_ROWS, blockIdx.y) + threadIdx.y) * cols;

    	int i, j, k;
        
        float oldval, newval, sum = 0.0;

        if (tid == 0) {
            cl_total   = 0;
            counter    = 0;
            moved_flag = 0;
        }

        __syncthreads();

        for (i = 0; i < numsamples; i += TOTAL_THREADS) {

            k = tid + i;

            if ( kresults[k] == blockIdx.x && (k < numsamples) )
                sample_idx[atomicAdd(&counter, 1)] = k;

            __syncthreads();

            //TODO: loop down the matrix...don't forget to reset the counters

            // We can't set j = threadIdx.x as it may be less than counter itself and we'd miss the __sync
            for (j = 0; j < counter; j += BLOCK_COLS) {

                k = threadIdx.x + j;

                if (k < counter)
                    sum += in[ blk_start + (int) sample_idx[k] ];

                __syncthreads();
                
            }

            if (tid == 0) {
                cl_total += counter;
                counter   = 0;
            }

            __syncthreads();
        }

        sample_data[threadIdx.y][threadIdx.x] = sum;

        __syncthreads();

        sum = 0.0;

        blk_start = tid + __mul24(blockIdx.y, BLOCK_ROWS) + blockIdx.x * rows;

        if (tid < BLOCK_ROWS) {
                
            for (j = 0; j < BLOCK_COLS; j++) {
                sum += sample_data[tid][j];
            }

            oldval = out[blk_start];
            newval = sum / cl_total;

            // FIXME: This is interesting...
            if (fabsf(oldval - newval) > THRESHOLD) {
                out[blk_start] = newval;
                moved_flag = 1;
            }

        }
        
        __syncthreads();

        if (moved_flag) {
            if (tid == 0)
                moved[0] = 1;
        }

    }

"""


########### Kernel Functions ###########

def get_km_funcs(num_clusters, threshold=0.001):

    int32 = N.int32
    seed = curand.seed

    max_init_blocks = MAX_RAND_SEED // INIT_BLOCK_ROWS

    _init_km                = Krnl(init_km_kernel, (INIT_BLOCK_COLS, INIT_BLOCK_ROWS), '',
                            {'num_clusters': num_clusters, 'brows': INIT_BLOCK_ROWS,
                            'bcols': INIT_BLOCK_COLS}, include_dirs=[curand.get_include_dir()])

    _km_f, block_km         = Krnl(km_kernel, (KM_BLOCK_COLS, KM_BLOCK_ROWS), '',
                            {'num_clusters': num_clusters, 'brows': KM_BLOCK_ROWS,
                            'bcols': KM_BLOCK_COLS}).func('kmeans', 'PPPii')

    _upd_cntrds_f, block_uc = Krnl(upd_cntrds_kernel, (UPD_BLOCK_COLS, UPD_BLOCK_ROWS), '',
                            {'num_clusters': num_clusters, 'brows': UPD_BLOCK_ROWS,
                            'bcols': UPD_BLOCK_COLS, 'threshold': threshold}).func('upd_cntrds', 'PPPPiii')

    _init_km_f, block_ik    = _init_km.func('init_km', 'PPiii')

    def init_km(out, inp, numsamples):

        rows   = inp.shape[0]
        blocks = min(max_init_blocks, rows // INIT_BLOCK_ROWS)

        seed(cuda, _init_km.mod)

        _init_km_f((1, blocks), block_ik, out.gpudata, inp.gpudata, rows, inp.shape[1], int32(numsamples))

    total_threads = KM_BLOCK_COLS * KM_BLOCK_ROWS

    km         = lambda out, inp, cntrds: _km_f(((inp.shape[1] // total_threads), 1), block_km, out.gpudata, inp.gpudata, cntrds.gpudata, inp.shape[0], inp.shape[1])
    upd_cntrds = lambda out, inp, kresults, numsamples, moved: _upd_cntrds_f((len(out), inp.shape[0] // UPD_BLOCK_ROWS), block_uc, out.gpudata, inp.gpudata, kresults.gpudata, moved.gpudata, inp.shape[0], inp.shape[1], int32(numsamples))

    return init_km, km, upd_cntrds

