# vim: syntax=cuda
# Consensus matrix update kernels

"""

Copyright 2009 Michael Seiler
Rutgers University
miseiler@gmail.com

This file is part of ConsensusCluster.

ConsensusCluster is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.

ConsensusCluster is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
along with ConsensusCluster.  If not, see <http://www.gnu.org/licenses/>.


"""

from kernel_helpers import Krnl, full_grid


BSIZE = 16

upd_mat_kernel_base = """
    
    // Updates an NxN result matrix based on the results of a clustering iteration
    //
    // Because we cannot now guarantee row/column locations due to random sampling,
    // we are forced to update on a random-access basis, which is EXTREMELY slow on the gpu.

    // Keep in mind that this approach, sans repeats, has a limit of a bit more than 2mil samples
    // To make it infinite you would need to have blocks loop
    // Note that if your sample/feature size is greater than this you can't use mul24 on these values..

    // Grid this in the x and y directions according to x / BLOCK_COLS and y / BLOCK_COLS

    // TODO: Loop this and have each block do four comparisons to maximise data reuse

    #define BLOCK_SIZE %(bsize)s

    __global__ void upd_mat (int *clustcount, int *totalcount, int *kresults, int *samples, int cols) {

        __shared__ int kr1[BLOCK_SIZE];
        __shared__ int kr2[BLOCK_SIZE];
        __shared__ int id1[BLOCK_SIZE];
        __shared__ int id2[BLOCK_SIZE];

        int tid         = __mul24(threadIdx.y, BLOCK_SIZE) + threadIdx.x;
        int blk_start_x = __mul24(blockIdx.x,  BLOCK_SIZE) + threadIdx.x;
        int blk_start_y = __mul24(blockIdx.y,  BLOCK_SIZE) + threadIdx.x;
        int loc;

        if (threadIdx.y == 0)
            kr1[threadIdx.x] = kresults[blk_start_x];
        
        if (threadIdx.y == 2)
            kr2[threadIdx.x] = kresults[blk_start_y];

        if (threadIdx.y == 4)
            id1[threadIdx.x] = samples[blk_start_x];

        if (threadIdx.y == 6)
            id2[threadIdx.x] = samples[blk_start_y];

        __syncthreads();

        loc = id1[threadIdx.x] * cols + id2[threadIdx.y];
"""

upd_mat_atomic = """
        if (kr1[threadIdx.x] == kr2[threadIdx.y]) {
            if (id1[threadIdx.x] != id2[threadIdx.y])
                atomicAdd( &clustcount[loc], 1);
            //out[ (int) id1[threadIdx.x] * cols + (int) id2[threadIdx.y] ] += 1;
        }

        atomicAdd( &totalcount[loc], 1);
    }
"""

upd_mat_nonatomic = """
        if (kr1[threadIdx.x] == kr2[threadIdx.y]) {
            if (id1[threadIdx.x] != id2[threadIdx.y])
                clustcount[loc] += 1;
        }

        totalcount[loc] += 1;
    }
"""


########### Kernel Functions ###########

_upd_mat_atom, block_uma = Krnl(upd_mat_kernel_base + upd_mat_atomic, BSIZE).func('upd_mat', 'PPPPi')
upd_mat_atom  = lambda clustcount, totalcount, kresults, samples: _upd_mat_atom(full_grid(kresults.shape[0],
                                                                                kresults.shape[0], BSIZE), block_uma, clustcount.gpudata,
                                                                                totalcount.gpudata, kresults.gpudata, samples.gpudata,
                                                                                clustcount.shape[1])

_upd_mat, block_um = Krnl(upd_mat_kernel_base + upd_mat_nonatomic, BSIZE).func('upd_mat', 'PPPPi')
upd_mat  = lambda clustcount, totalcount, kresults, samples: _upd_mat(full_grid(kresults.shape[0], kresults.shape[0], BSIZE), block_um,
                                                                      clustcount.gpudata, totalcount.gpudata, kresults.gpudata,
                                                                      samples.gpudata, clustcount.shape[1])
