# Self-Organising Map CUDA Implementation

"""

Copyright 2009 Michael Seiler
Rutgers University
miseiler@gmail.com

This file is part of ConsensusCluster.

ConsensusCluster is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.

ConsensusCluster is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
along with ConsensusCluster.  If not, see <http://www.gnu.org/licenses/>.


"""

import pycuda
import pycuda.driver   as cuda
import pycuda.gpuarray as gpuarray
import pycuda.curandom as curandom

import numpy as N

from som_kernels    import get_som_funcs
from consensus      import upd_mat, upd_mat_atom

import kernel_helpers as kh

ALIGNMENT = 128

BOOTSTRAP     = 1
NON_BOOTSTRAP = 0    # XXX SOM more or less invariant without bstrap, useless at least at small node configs

SOM_BLOCK_PER_MP = 2 # 256 threads at half occupancy on GPU compute v1.2
                     # If using GPU compute v1.1, it's OK to set this to 1

def som(ary, nwidth, nheight = 2, epochs = 1000, learn_rate = 0.01, repeats = 300):
    """

    Runs the Self-Organising Map clustering method

    Input:

    ary        - Matrix to be clustered. Assumes NxM dimensions, where N contains samples and M contains features
    nwidth     - Node matrix width
    nheight    - Node matrix height
    epochs     - Number of iterations to learn the data
    learn_rate - Rate of learning. Lower values learn more accurately, but require more epochs to converge

    Returns:

    """

    # Currently SOM can consist of 2 separate invocations of length repeats
    repeats /= (BOOTSTRAP + NON_BOOTSTRAP)

    # Best number of repeats
    mpc     = cuda.Device(0).get_attributes()[pycuda._driver.device_attribute.MULTIPROCESSOR_COUNT] * SOM_BLOCK_PER_MP
    repeats = int(N.ceil(float(repeats) / mpc) * mpc)

    #print "Running SOM with %s repeats per invocation..." % repeats
    
    # Get functions
    init_som, train_som = get_som_funcs(nwidth, nheight, epochs=epochs, learn_rate=learn_rate, repeats=repeats)

    assert len(ary.shape) == 2
    
    numsamples, numfeatures = ary.shape

    ary     = kh.aligntobsize(ary, 128)
    ary_gpu = gpuarray.to_gpu(ary)
    
    n, m    = ary_gpu.shape

    # Matrices for standard SOM
    node_matrix = gpuarray.zeros((repeats, nheight, nwidth, m), dtype=N.float32)
    assignments = gpuarray.zeros((repeats * n,), dtype=N.int32) # PyCUDA doesn't support multislicing yet

    # Consenus matrices
    clustcount  = gpuarray.zeros((n,n), dtype=N.int32)
    totalcount  = gpuarray.zeros((n,n), dtype=N.int32)

    normal_samples = gpuarray.arange((n,), dtype=N.int32)

    if BOOTSTRAP:

        samples = curandom.rand((repeats * n), dtype=N.float32) * numsamples
        
        init_som(node_matrix, ary_gpu, numsamples)
        train_som(assignments, node_matrix, ary_gpu, samples, numsamples)
        
        for i in xrange(repeats):
            sl = i*n
            results = assignments[sl:sl+n]
            upd_mat_atom(clustcount, totalcount, results, normal_samples)

    if NON_BOOTSTRAP:
        
        samples = gpuarray.to_gpu(N.mgrid[:repeats, :n][1].reshape((repeats * n,)).astype(N.float32))

        init_som(node_matrix, ary_gpu, numsamples)
        train_som(assignments, node_matrix, ary_gpu, samples, numsamples)
        
        for i in xrange(repeats):
            sl = i*n
            results = assignments[sl:sl+n]
            upd_mat(clustcount, totalcount, results, normal_samples)

    return clustcount.get()[:numsamples][:,:numsamples], totalcount.get()[:numsamples][:,:numsamples]
