# vim: syntax=cuda
# Kernels network analysis

"""

Copyright 2009 Michael Seiler
Rutgers University
miseiler@gmail.com

This file is part of ConsensusCluster.

ConsensusCluster is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.

ConsensusCluster is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
along with ConsensusCluster.  If not, see <http://www.gnu.org/licenses/>.


"""

from kernel_helpers import Krnl, unroll, FULL_GRID_IDX, full_grid

import pycuda.gpuarray as gpuarray
import pycuda.driver   as cuda
import numpy as N

# Global vars
BSIZE = 16

gpuRho_kernel = """
// Compute pearson correlation matrix. Based on code obtained from the following paper:
// 
// Chang et al. Compute pairwise Manhattan distance and Pearson correlation coefficient of data points with GPU.
// Proceedings of the 10th ACIS International Conference on Software Engineering, Artificial Intelligence, Networking and Parallel/Distributed Computing (SNPD 2009)

// Compute pearson correlation matrix into out
// Expects full gridding of out dimensions (r/BSIZE, r/BSIZE)

#define BLOCK_SIZE %(bsize)s

__global__ void gpuRho (float *out, float *in, int cols, int rows, int numsamples, int numfeatures) {
	
	__shared__ float Xs[BLOCK_SIZE][BLOCK_SIZE];
	__shared__ float Ys[BLOCK_SIZE][BLOCK_SIZE];
	
    int xblockloc = __mul24(blockIdx.x, BLOCK_SIZE);
    int yblockloc = __mul24(blockIdx.y, BLOCK_SIZE);

	int xBegin = xblockloc * cols;
	int yBegin = yblockloc * cols;

	int yEnd = yBegin + cols - 1;
    
    int idx = threadIdx.x + threadIdx.y * cols;

	int x, y, out_xloc, out_yloc;
	float a1, a2, a3, a4, a5;
	float avgX, avgY, varX, varY, cov, rho;

	a1 = a2 = a3 = a4 = a5 = 0.0;

	for (y = yBegin, x = xBegin; y <= yEnd; y += BLOCK_SIZE, x += BLOCK_SIZE) {
		Ys[threadIdx.y][threadIdx.x] = in[idx + y];
		Xs[threadIdx.x][threadIdx.y] = in[idx + x];

		__syncthreads();

        %(loop)s

		__syncthreads();
	}

    out_xloc = xblockloc + threadIdx.x;
    out_yloc = yblockloc + threadIdx.y;

    if (out_xloc < numsamples && out_yloc < numsamples) {
        int norm = numfeatures - 1;
    
    	avgX = a1 / numfeatures;
    	avgY = a2 / numfeatures;
    
    	varX = (a3 - avgX * avgX * numfeatures) / (norm);
    	varY = (a4 - avgY * avgY * numfeatures) / (norm);
    
    	cov  = (a5 - avgX * avgY * numfeatures) / (norm);
    	rho  = cov / sqrtf(varX * varY);
    
	    out[ out_xloc + out_yloc * rows ] = rho;
    } else {
        out[ out_xloc + out_yloc * rows ] = 0.0;
    }
}"""

unroll_gpuRho = unroll("""
    a1 += Xs[k][threadIdx.x];
    a2 += Ys[threadIdx.y][k];
    a3 += Xs[k][threadIdx.x] * Xs[k][threadIdx.x];
    a4 += Ys[threadIdx.y][k] * Ys[threadIdx.y][k];
    a5 += Ys[threadIdx.y][k] * Xs[k][threadIdx.x];
    """, 'k', BSIZE)

top_overlay_kernel = """
    // Compute topological overlap of square distance matrix 
    // Expects to be fully gridded (r/BSIZE, r/BSIZE)
    
    #define BLOCK_SIZE %(bsize)s
    
    __global__ void top_overlay (float *out, float *in, int cols) {
    	
    	__shared__ float Xs[BLOCK_SIZE][BLOCK_SIZE];
    	__shared__ float Ys[BLOCK_SIZE][BLOCK_SIZE];

        int xblockloc = __mul24(blockIdx.x, BLOCK_SIZE);
        int yblockloc = __mul24(blockIdx.y, BLOCK_SIZE);

        int out_xloc = xblockloc + threadIdx.x;
        int out_yloc = yblockloc + threadIdx.y;

        int glob_idx = out_xloc + out_yloc * cols;
        
        int idx = threadIdx.x + cols * threadIdx.y;
    
    	int xBegin = xblockloc * cols;
    	int yBegin = yblockloc * cols;
    
    	int yEnd = yBegin + cols - 1;
    
    	int x, y;
    
        float xsum = 0.0, ysum = 0.0, xy_prod_sum = 0.0;
    
        float sim_xy = in[glob_idx];
    
    	for (y = yBegin, x = xBegin; y <= yEnd; y += BLOCK_SIZE, x += BLOCK_SIZE) {
    
    		Ys[threadIdx.y][threadIdx.x] = in[idx + y];
    		Xs[threadIdx.x][threadIdx.y] = in[idx + x];
    
    		__syncthreads();
    
            %(loop)s
    
    		__syncthreads();
    	}
    
        if (out_xloc != out_yloc) {
            out[glob_idx] = (xy_prod_sum + sim_xy) / (min(xsum, ysum) + 1 - sim_xy);
        } else {
            out[glob_idx] = 1.0;
        }
    }"""

unroll_top_overlay = unroll("""
        xsum += Xs[k][threadIdx.x];
        ysum += Ys[threadIdx.y][k];
        xy_prod_sum += Xs[k][threadIdx.x] * Ys[threadIdx.y][k];
        """, 'k', BSIZE)


_gpuRho, block_gr      = Krnl(gpuRho_kernel, BSIZE, '', {'loop': unroll_gpuRho}).func('gpuRho', 'PPiiii')
_top_overlay, block_to = Krnl(top_overlay_kernel, BSIZE, FULL_GRID_IDX, {'loop': unroll_top_overlay}).func('top_overlay', 'PPi')

gpuRho      = lambda out, inp, numsamples, numfeatures: _gpuRho(full_grid(out.shape[0], out.shape[0], BSIZE), block_gr, out.gpudata, inp.gpudata, inp.shape[1], inp.shape[0], N.int32(numsamples), N.int32(numfeatures))
top_overlay = lambda out, inp: _top_overlay(full_grid(inp.shape[0], inp.shape[1], BSIZE), block_to, out.gpudata, inp.gpudata, inp.shape[1])



##########################################
# Test functions for the above kernels
def test_overlay(M):
    # Create topological overlay and return it
    from itertools import combinations as comb

    X = N.zeros_like(M)

    for i, j in tuple(comb(xrange(M.shape[0]), 2)) + tuple([ (x,x) for x in xrange(M.shape[0]) ]):
        sum_prods = sum(M[i] * M[:,j])
        
        numer = float(sum_prods + M[i][j])
        denom = min(sum(M[i]), sum(M[:,j])) + 1 - M[i][j]
        
        if i == j:
            numer /= 2.

        X[i][j] = numer / denom

    tmp = (X+X.T).astype(N.float32)

    for i in xrange(tmp.shape[0]):
        tmp[i][i] = 1.

    return tmp
