
/*
todo
(1) ayschrnous write/execute
(2) cpp intereface
(3) speed up kernel
(4) support very large block sizes
    (5) support odd and block sizes not equal to 0.
    (6) introduce multiple levels
Broad machine
(1) send nikos query source code, with compilation instruction


Notes
// NEEDS LOT OF ERROR CHECKING, user NDEBUG, for speed up
*/

// Includes
#include <stdio.h>
#include <cutil_inline.h>
#include <vector>
#include "assert.h"

#include "math.h"
//#include "cuda_device_query.hpp"




// Functions


void cudaAccumulate(const float* input, const UINT64 N, float &output);
// Utility class used to avoid linker errors with extern
// unsized shared memory arrays with templated type
template<class T>
struct SharedMemory
{
    __device__ inline operator       T*()
    {
        extern __shared__ int __smem[];
        return (T*)__smem;
    }

    __device__ inline operator const T*() const
    {
        extern __shared__ int __smem[];
        return (T*)__smem;
    }
};

/* 
   
   
    Threads within a block can cooperate by sharing data through some shared memory 
    and by synchronizing their execution to coordinate memory accesses. More precisely, 
    one can specify synchronization points in the kernel by calling the __syncthreads() 
    intrinsic function; __syncthreads() acts as a barrier at which all threads in the
    block must wait before any is allowed to proceed. Section 3.2.2 gives an example of 
    using shared memory.   
   
    Each block within the grid can be identified by a one-dimensional or two-dimensional 
    index accessible within the kernel through the built-in blockIdx variable. The dimension 
    of the thread block is accessible within the kernel through the built-in blockDim variable.   
   
   This reduction interleaves which threads are active by using the modulo
   operator.  This operator is very expensive on GPUs, and the interleaved 
   inactivity means that no whole warps are active, which is also very 
   inefficient 
   
   
   
   */
// device Code (GPU)
template <class T>
__global__ void
reduce0(T *g_idata, T *g_odata, const unsigned int n)
{      
    
    // some sort of shared memory that can be used across a block
    T *sdata = SharedMemory<T>();

    // load shared mem
    unsigned int tid = threadIdx.x;
    unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
    
    sdata[tid] = (i < n) ? g_idata[i] : 0;
    
    __syncthreads();

    // do reduction in shared mem
    for(unsigned int s=1; s < blockDim.x; s *= 2) {
        // modulo arithmetic is slow!
        if ((tid % (2*s)) == 0) {
            sdata[tid] += sdata[tid + s];
        }
        __syncthreads();
    }

    // write result for this block to global mem
    if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}



// FIX ME use cuda_device_query.hpp
// need to double check this
typedef unsigned long long UINT64;
typedef unsigned int UINT32;
const UINT32 MaxThreadsPerBlock = 512;
//! Max Size A Block, Dimension X ( same as max threads per block
const UINT32 MaxBlockDimX       = 512;
const UINT32 MaxBlockDimY       = 512;
const UINT32 MaxBlockDimZ       = 64;
const UINT32 MaxGridDimX        = 65535;
const UINT32 MaxGridDimY        = 65535;
const UINT32 MaxGridDimZ        = 1;
//! what the hell does this imply
const UINT32 WarpSize           = 32;
/*

    todo
        iterm2 size can be reduce beyond M, M=N/numberOfThreads
*/
/*! Cuda Accumulator
    Steps
    (1) Determine number of grids, based on global memory/input size
    (2) To handle a old block size, reduce the count by 1, and then have the host
            add the last same to the accumulated value
    (2) Determine number of levels (kernel executions, For example
            if the input size is 4096, and the number of threads per 
            block is 1024, Then after one kernel call there will be 4 outputs,
            so a second call is used to reduce down to 1 output
    (3) Allocate memory on device input(N), output(1), interm1(M), interm2(M)
    (4) COpy input into device





*/
void cudaAccumulate(const float* input, const UINT64 N, float &output)
{
    bool odd = false;
    float* d_A, *d_C, *d_E;
    float *h_C, *h_E;
    cudaError_t cudaError;
    UINT32 
        // Samples per block, if 1, then 1 sample per thread.
        samplesPerThread = 1,
        // Threads per blocks
        threadsPerBlock = MaxThreadsPerBlock,
        // blocks per grid
        blocksPerGrid, 
        // shared memory size in bytes
        smemSize, 
        // levels of accumulation (calls of the kernel)
        levels  = 0, 
        // input size to be processed, inputSize+inputRemainder == N
        inputSize,
        // intermediate size
        isize,
        // remaining elements to be integrated that are divisible by threadsPerBlock*levels
        inputRemainder;
    bool direction = false;        
    dim3 dimBlock,dimGrid;
    size_t sizeIn, sizeIntm;    
// need to add alot more programmability!!!
    {//(1) Handle odd data append a zero at the end
        if(N%2 == 1){
            odd = true;
            inputSize = N -1;
        }else{
            inputSize = N;
        }
    }          
    inputSize = inputSize;
    
    
    {// (2) step determine size of block and grids
    // FIX ME what up with warp, and how to utilize it
        

        // block per grid
        blocksPerGrid       = (inputSize + threadsPerBlock* samplesPerThread - 1) / (threadsPerBlock*samplesPerThread);
        
        // block dimension
        dimBlock            = dim3(threadsPerBlock, 1, 1);
        // grid dimension
        dimGrid             = dim3(blocksPerGrid, 1, 1);   
        
        // input size
        sizeIn   = inputSize * sizeof(float);
        // intermediate memory size
        sizeIntm = blocksPerGrid * sizeof(float);
        
        //int smemSize = (threads <= 32) ? 2 * threads * sizeof(float) : threads * sizeof(float);
        smemSize = threadsPerBlock * samplesPerThread * sizeof(float);
    }
    
    {// (3) allocate Device memory
    
        // input memory
        cudaError = cudaMalloc((void**)&d_A, sizeIn);
        assert(cudaError == cudaSuccess);    
        // intermediate memory
        cudaError = cudaMalloc((void**)&d_C, sizeIntm);
        assert(cudaError == cudaSuccess);    
        if(blocksPerGrid != 1){
            cudaError = cudaMalloc((void**)&d_E, sizeIntm);
            assert(cudaError == cudaSuccess);            
        }
    }
// FIX ME - introduce asynchronous write    
    {// (4) Copy input into device
        cudaError = cudaMemcpy(d_A, input, sizeIn, cudaMemcpyHostToDevice);    
        assert(cudaError == cudaSuccess);    
    }
    {//(5) launch kernels
        reduce0<<<dimGrid, dimBlock, smemSize>>>(d_A, d_C, unsigned int(inputSize));
        #ifndef NDEBUG
        cudaError =cudaMemcpy(h_C, d_C, sizeIntm, cudaMemcpyDeviceToHost);
        assert(cudaError == cudaSuccess);
        #endif                  
        while( blocksPerGrid != 1){
            direction = ! direction;
            isize               = blocksPerGrid;
            blocksPerGrid       = (isize + threadsPerBlock*samplesPerThread - 1) / (threadsPerBlock * samplesPerThread);
            if(direction == true){
                reduce0<<<dimGrid, dimBlock, smemSize>>>(d_C, d_E, unsigned int(isize));            
            }else{
                reduce0<<<dimGrid, dimBlock, smemSize>>>(d_E, d_C, unsigned int(isize));             
            }
            
       }
           
        
        cutilCheckMsg("kernel launch failure");    
    }
    {//(6) wait for all kernels to be complete
        //Blocks until the device has completed all preceding requested tasks
        cudaError =cudaThreadSynchronize();
        assert(cudaError == cudaSuccess);
    }
    
    {//(7) Copy result from device memory to host memory
        
        h_C = (float*)malloc(sizeof(float));
        // h_C contains the result in host memory
        if(direction == true){            
            cudaError =cudaMemcpy(h_C, d_E, sizeof(float), cudaMemcpyDeviceToHost);
            assert(cudaError == cudaSuccess);
        }else{                      
            cudaError =cudaMemcpy(h_C, d_C, sizeof(float), cudaMemcpyDeviceToHost);
            assert(cudaError == cudaSuccess);
        }
        
    }
    double sum = 0;
    for(int kk =0; kk < blocksPerGrid; kk++){
        sum +=h_C[kk];
    }
    if(odd == true){
        output = sum + input[N-1];
    }else{
        output = sum;
    }
    
    
    
    // Free device memory
    if (d_A != NULL){
        cudaError = cudaFree(d_A);
        assert(cudaError == cudaSuccess);
    }
    if (d_C != NULL){
        cudaError = cudaFree(d_C);
        assert(cudaError == cudaSuccess);
    }  
    if (d_E != NULL){
        cudaError = cudaFree(d_E);
        assert(cudaError == cudaSuccess);
    }    
    cudaError = cudaThreadExit();
    assert(cudaError == cudaSuccess);
}
