# vim: syntax=cuda
# CUDA kernels for Self-Organising Map clustering

"""

Copyright 2009 Michael Seiler
Rutgers University
miseiler@gmail.com

This file is part of ConsensusCluster.

ConsensusCluster is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.

ConsensusCluster is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
along with ConsensusCluster.  If not, see <http://www.gnu.org/licenses/>.


"""

from __future__ import division

import cu_twister      as curand
import pycuda.driver   as cuda
import pycuda.gpuarray as gpuarray
import numpy           as N

from kernel_helpers import Krnl

# Global vars
MAX_RAND_SEED   = 32768

INIT_BLOCK_ROWS = 8
INIT_BLOCK_COLS = 16

TRAIN_BLOCK_ROWS = 8
TRAIN_BLOCK_COLS = 32

EUC_BSIZE = 64

init_som_kernel = """

    // Generate initial node vectors

    // Expects in to be a matrix containing features on columns and samples on rows
    // Writes to a three-dimensional array of size HxVxM, where H and V are the node dimensions and M is the feature dim

    // Grid this function in the x direction tiling horizontally across your features (M / BLOCK_SIZE)
    // Grid this function in the y direction per simultaneous SOM clustering

    #define BLOCK_ROWS %(brows)s
    #define BLOCK_COLS %(bcols)s

    #define FLT_MAX __int_as_float(0x7f800000)
    #define FLT_MIN __int_as_float(-0x7f800000)

    #define NWIDTH  %(nwidth)s
    #define NHEIGHT %(nheight)s

    #include "mt_rand.cu.h"

    __global__ void init_som (float *out, float *in, int rows, int cols, int numsamples) {
    	
        int tid        = __mul24(threadIdx.y, BLOCK_COLS) + threadIdx.x; // Thread location in the block
        int iter_strd  = __mul24(BLOCK_COLS, gridDim.x);
        int blk_start  = __mul24(__mul24(blockIdx.y, NWIDTH), NHEIGHT) * cols;
        int iter_start;

    	int i, j, iters;
        float colmin, colmax;

        // Current twister state

        __shared__ MersenneTwisterState rstate[BLOCK_COLS];

        // Temporary storage for in matrix values

        __shared__ float tmp[BLOCK_ROWS][BLOCK_COLS+1];    // Avoids some bank conflicts
        
        int ridx = __mul24(blockIdx.x, BLOCK_COLS) + threadIdx.x;
    
        if (tid < BLOCK_COLS)
            MersenneTwisterInitialise(rstate[tid], ridx);

        iters = cols / BLOCK_COLS;

        if (iters > gridDim.x) {
            iters = (iters / gridDim.x) + (blockIdx.x < iters %% gridDim.x);
        } else {
            iters = (blockIdx.x < iters);
        }

        while (iters--) {

            colmin = FLT_MAX;
            colmax = FLT_MIN;

            iter_start = iter_strd * iters;
    
            for (i = threadIdx.y; i < rows; i += BLOCK_ROWS) {
    
                tmp[threadIdx.y][threadIdx.x] = in[ ridx + i * cols + iter_start ];
    
                __syncthreads();
    
                if (i < numsamples) {
                    colmax = max(colmax, tmp[threadIdx.y][threadIdx.x]);
                    colmin = min(colmin, tmp[threadIdx.y][threadIdx.x]);
                }
    
                __syncthreads();
            }
    
            tmp[threadIdx.y][threadIdx.x] = colmin;
    
            __syncthreads();
            
            colmin = FLT_MAX;
    
            if (tid < BLOCK_COLS) {
                for (i = 0; i < BLOCK_ROWS; i++) {
                    colmin = min(colmin, tmp[i][tid]);
                }
            }
    
            __syncthreads();
    
            tmp[threadIdx.y][threadIdx.x] = colmax;
    
            __syncthreads();
            
            colmax = FLT_MIN;
    
            if (tid < BLOCK_COLS) {
                
                for (i = 0; i < BLOCK_ROWS; i++) {
                    colmax = max(colmax, tmp[i][tid]);
                }
                
                for (i = 0; i < NHEIGHT; i++) {
                    for (j = 0; j < NWIDTH; j++) {
                        out[ blk_start + ridx + (__mul24(i, NWIDTH) + j) * cols + iter_start ] = mt_rand(rstate[tid], ridx) * (colmax - colmin) + colmin;
                    }
                }
            }

            __syncthreads();
        }
    }
"""

som_training_kernel = """

    // SOM training and evaluation completed by a single block

    // Grid in the x direction according to number of SOM repeats

    #define BLOCK_ROWS %(brows)s
    #define BLOCK_COLS %(bcols)s

    #define FLT_MAX __int_as_float(0x7f800000)
    
    #define EPOCHS     %(epochs)s
    #define LEARN_RATE %(learn_rate)s
    
    #define NWIDTH  %(nwidth)s
    #define NHEIGHT %(nheight)s

    #define TOTAL_THREADS (BLOCK_ROWS * BLOCK_COLS)

    __global__ void train_som (int *out, float *nodes, float *in, float *samples, int rows, int cols, int numsamples) {

        __shared__ float sample_tmp[TOTAL_THREADS];
        __shared__ float node_tmp[BLOCK_ROWS][BLOCK_COLS+1];
        __shared__ float bestdist[BLOCK_ROWS];
        __shared__ int   bestidx[BLOCK_ROWS*2];
        __shared__ int   bmu;

        int i, j, k, l, t, curnode, cursample;

        const float RADIUS     = (NWIDTH + NHEIGHT) / 3.0;
        const float TIME_CONST = EPOCHS / logf(RADIUS);
        const int   NUM_NODES  = __mul24(NWIDTH, NHEIGHT);

        float rad, lr, r, inf, dist, psum;

        int tid        = __mul24(threadIdx.y, BLOCK_COLS) + threadIdx.x; // Thread location in the block
        int node_start = __mul24(blockIdx.x, NUM_NODES) * cols;
        int samp_start = blockIdx.x * rows;

        for (t = 1; t <= EPOCHS; t++) {

            rad     = RADIUS * expf(-t / TIME_CONST);
            lr      = LEARN_RATE * expf(-t / TIME_CONST);
            r       = (t << 1) * (rad * rad);

            // Training
            for (i = 0; i < numsamples; i++) {

                cursample = (int) samples[ samp_start + i ]; // For random sampling

                if (tid < BLOCK_ROWS)
                    bestdist[tid] = FLT_MAX;

                __syncthreads();

                // Find BMU
                for (j = 0; j < NUM_NODES; j += BLOCK_ROWS) {

                    psum    = 0.0;
                    curnode = j + threadIdx.y;

                    for (k = 0; k < cols; k += TOTAL_THREADS) {
    
                        sample_tmp[tid] = in[ cursample * cols + k + tid ];
    
                        __syncthreads();
    
                        for (l = threadIdx.x; l < TOTAL_THREADS; l += BLOCK_COLS) {

                            if (curnode < NUM_NODES)
                                node_tmp[threadIdx.y][threadIdx.x] = nodes[ node_start + curnode * cols + k + l ];

                            __syncthreads();

                            if (curnode < NUM_NODES) {
                                dist  = node_tmp[threadIdx.y][threadIdx.x] - sample_tmp[l];
                                psum += dist * dist;
                            }

                            __syncthreads();
                        }
                    }

                    node_tmp[threadIdx.y][threadIdx.x] = psum;

                    __syncthreads();

                    psum = 0.0;

                    if (tid < BLOCK_ROWS && (j + tid < NUM_NODES)) {
                        
                        for (k = 0; k < BLOCK_COLS; k++)
                            psum += node_tmp[tid][k];

                        dist = sqrtf(psum);

                        if (dist < bestdist[tid]) {
                            bestdist[tid] = dist;
                            bestidx[tid << 1] = j + tid;
                        }
                    }

                    __syncthreads();
                }

                dist = FLT_MAX;

                if (tid == 0) {
                    
                    for (k = 0; k < BLOCK_ROWS; k++) {
                        if (bestdist[k] < dist) {
                            dist    = bestdist[k];
                            curnode = bestidx[k << 1];
                        }
                    }

                    bmu = curnode;
                }

                __syncthreads();

                k    = bmu / NWIDTH; // BMU y

                // Make adjustment to nearby nodes
                for (j = 0; j < NUM_NODES; j += BLOCK_ROWS) {

                    curnode = j + threadIdx.y;
                    l       = curnode / NWIDTH; // curnode y

                    dist = min(fabsf( (float) ((curnode - __mul24(l, NWIDTH)) - (bmu - __mul24(k, NWIDTH))) ), fabsf( (float) (l - k) )); // Chess distance
                    inf  = expf(-(dist * dist) / r) * lr;

                    for (k = 0; k < cols; k += TOTAL_THREADS) {
    
                        sample_tmp[tid] = in[ cursample * cols + k + tid ];
    
                        __syncthreads();
    
                        for (l = threadIdx.x; l < TOTAL_THREADS; l += BLOCK_COLS) {

                            if (curnode < NUM_NODES && dist < rad)
                                node_tmp[threadIdx.y][threadIdx.x] = nodes[ node_start + curnode * cols + k + l ];

                            __syncthreads();

                            if (curnode < NUM_NODES && dist < rad)
                                nodes[ node_start + curnode * cols + k + l ] = node_tmp[threadIdx.y][threadIdx.x] + inf * (sample_tmp[l] - node_tmp[threadIdx.y][threadIdx.x]);
                            
                            __syncthreads();
                        }
                    }
                }
            }
        }

        for (i = 0; i < numsamples; i++) {

            if (tid < BLOCK_ROWS)
                bestdist[tid] = FLT_MAX;

            __syncthreads();

            // Find BMU
            for (j = 0; j < NUM_NODES; j += BLOCK_ROWS) {

                psum    = 0.0;
                curnode = j + threadIdx.y;

                for (k = 0; k < cols; k += TOTAL_THREADS) {

                    sample_tmp[tid] = in[ i * cols + k + tid ];

                    __syncthreads();

                    for (l = threadIdx.x; l < TOTAL_THREADS; l += BLOCK_COLS) {

                        if (curnode < NUM_NODES)
                            node_tmp[threadIdx.y][threadIdx.x] = nodes[ node_start + curnode * cols + k + l ];

                        __syncthreads();

                        if (curnode < NUM_NODES) {
                            dist  = node_tmp[threadIdx.y][threadIdx.x] - sample_tmp[l];
                            psum += dist * dist;
                        }

                        __syncthreads();
                    }
                }

                node_tmp[threadIdx.y][threadIdx.x] = psum;

                __syncthreads();

                psum = 0.0;

                if (tid < BLOCK_ROWS && (j + tid < NUM_NODES)) {
                    
                    for (k = 0; k < BLOCK_COLS; k++)
                        psum += node_tmp[tid][k];

                    dist = sqrtf(psum);

                    if (dist < bestdist[tid]) {
                        bestdist[tid] = dist;
                        bestidx[tid << 1] = j + tid;
                    }
                }

                __syncthreads();
            }

            dist = FLT_MAX;

            if (tid == 0) {
                
                for (k = 0; k < BLOCK_ROWS; k++) {
                    if (bestdist[k] < dist) {
                        dist    = bestdist[k];
                        curnode = bestidx[k << 1];
                    }
                }

                out[ blockIdx.x * rows + i ] = curnode;
            }
        }
    }
"""

            
################# Kernel Access Methods ##################

def get_som_funcs(nwidth, nheight, epochs=1000, learn_rate = 0.01, repeats = 300):

    maxblks = MAX_RAND_SEED // INIT_BLOCK_COLS

    brows  = INIT_BLOCK_ROWS
    bcols  = INIT_BLOCK_COLS

    _som   = Krnl(init_som_kernel, (INIT_BLOCK_COLS, INIT_BLOCK_COLS),
                 frmt={'brows': INIT_BLOCK_ROWS, 'bcols': INIT_BLOCK_COLS, 'nwidth': nwidth, 'nheight': nheight},
                 include_dirs=[curand.get_include_dir()])

    _som_f, block_som = _som.func('init_som', 'PPiii')

    int32 = N.int32

    def som_init(out, inp, numsamples):

        grid = (min(maxblks, inp.shape[1] // INIT_BLOCK_COLS), repeats)
    
        curand.seed(cuda, _som.mod)

        _som_f(grid, block_som, out.gpudata, inp.gpudata, inp.shape[0], inp.shape[1], int32(numsamples))

    _train_som, block_ts    = Krnl(som_training_kernel, (TRAIN_BLOCK_COLS, TRAIN_BLOCK_ROWS),
                            frmt={'brows': TRAIN_BLOCK_ROWS, 'bcols': TRAIN_BLOCK_COLS,
                                  'epochs': epochs, 'learn_rate': learn_rate,
                                  'nwidth': nwidth, 'nheight': nheight}).func('train_som', 'PPPPiii')

    train_som  = lambda out, nodes, inp, samples, numsamples: _train_som((repeats, 1), block_ts, out.gpudata, nodes.gpudata, inp.gpudata, samples.gpudata, inp.shape[0], inp.shape[1], int32(numsamples))
    
    return som_init, train_som
