/*
 * Copyright 1993-2006 NVIDIA Corporation.  All rights reserved.
 *
 * NOTICE TO USER:   
 *
 * This source code is subject to NVIDIA ownership rights under U.S. and 
 * international Copyright laws.  
 *
 * This software and the information contained herein is PROPRIETARY and 
 * CONFIDENTIAL to NVIDIA and is being provided under the terms and 
 * conditions of a Non-Disclosure Agreement.  Any reproduction or 
 * disclosure to any third party without the express written consent of 
 * NVIDIA is prohibited.     
 *
 * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE 
 * CODE FOR ANY PURPOSE.  IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR 
 * IMPLIED WARRANTY OF ANY KIND.  NVIDIA DISCLAIMS ALL WARRANTIES WITH 
 * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF 
 * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.   
 * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, 
 * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS 
 * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE 
 * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE 
 * OR PERFORMANCE OF THIS SOURCE CODE.  
 *
 * U.S. Government End Users.  This source code is a "commercial item" as 
 * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting  of 
 * "commercial computer software" and "commercial computer software 
 * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) 
 * and is provided to the U.S. Government only as a commercial end item.  
 * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through 
 * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the 
 * source code with only those rights set forth herein.
 */

#ifndef _SCAN_BEST_KERNEL_CU_
#define _SCAN_BEST_KERNEL_CU_

#define NUM_BANKS 16
#define LOG_NUM_BANKS 4

#ifdef CHECK_BANK_CONFLICTS
#define TEMP(index)   CUT_BANK_CHECKER(temp, index)
#else
#define TEMP(index)   temp[index]
#endif

///////////////////////////////////////////////////////////////////////////////
// Work-efficient compute implementation of scan, one thread per 2 elements
// Work-efficient: O(log(n)) steps, and O(n) adds.
// Also shared storage efficient: Uses n + n/NUM_BANKS shared memory -- no ping-ponging
// Also avoids most bank conflicts using single-element offsets every NUM_BANKS elements.
//
// In addition, If ZERO_BANK_CONFLICTS is defined, uses 
//     n + n/NUM_BANKS + n/(NUM_BANKS*NUM_BANKS) 
// shared memory. If ZERO_BANK_CONFLICTS is defined, avoids ALL bank conflicts using 
// single-element offsets every NUM_BANKS elements, plus additional single-element offsets 
// after every NUM_BANKS^2 elements.
//
// Uses a balanced tree type algorithm.  See Blelloch, 1990 "Prefix Sums 
// and Their Applications", or Prins and Chatterjee PRAM course notes:
// http://www.cs.unc.edu/~prins/Classes/203/Handouts/pram.pdf
// 
// This work-efficient version is based on the algorithm presented in Guy Blelloch's
// Excellent paper "Prefix sums and their applications".
// http://www-2.cs.cmu.edu/afs/cs.cmu.edu/project/scandal/public/papers/CMU-CS-90-190.html
//
// Pro: Work Efficient, very few bank conflicts (or zero if ZERO_BANK_CONFLICTS is defined)
// Con: More instructions to compute bank-conflict-free shared memory addressing,
// and slightly more shared memory storage used.
//
// @param g_odata  output data in global memory
// @param g_idata  input data in global memory
// @param g_blockSums output data -- the last element of the scan gets output to this blocks index
// @param n        input number of elements to scan from input data

//! Dynamically allocated shared memory for prescan kernels
extern  __shared__  int temp[];  

__device__ void loadSharedChunkFromMem(const int *g_idata, 
                                       int& ai, int& bi, 
                                       int& mem_ai, int& mem_bi)
{
    mem_ai = __mul24(blockIdx.x, (blockDim.x << 1)) + threadIdx.x;
    mem_bi = mem_ai + blockDim.x;

    ai = threadIdx.x;
    bi = threadIdx.x + blockDim.x;

    // compute spacing to avoid bank conflicts
    ai += (ai >> LOG_NUM_BANKS);
    bi += (bi >> LOG_NUM_BANKS);

    // Cache the computational window in shared memory
    TEMP(ai) = g_idata[mem_ai]; 
    TEMP(bi) = g_idata[mem_bi]; 
}

__device__ void loadSharedChunkFromMemNP2(const int *g_idata, 
                                          int n, int baseIndex,
										  int& ai, int& bi, 
                                          int& mem_ai, int& mem_bi, 
                                          int& bankOffsetA, int& bankOffsetB)
{
	int thid = threadIdx.x;
	mem_ai = baseIndex + threadIdx.x;
    mem_bi = mem_ai + blockDim.x;

    ai = thid;
    bi = thid + blockDim.x;

    // compute spacing to avoid bank conflicts
    bankOffsetA = (ai >> LOG_NUM_BANKS);
    bankOffsetB = (bi >> LOG_NUM_BANKS);

    // Cache the computational window in shared memory
    // pad values beyond n with zeros
    TEMP(ai + bankOffsetA) = g_idata[mem_ai]; 
    TEMP(bi + bankOffsetB) = (bi < n) ? g_idata[mem_bi] : 0; 
}



__device__ void storeSharedChunkToMem(int* g_odata, int ai, int bi, int mem_ai, int mem_bi)
{
	__syncthreads(); // must sync before we store

	// write results to global memory
    g_odata[mem_ai] = TEMP(ai); 
    g_odata[mem_bi] = TEMP(bi); 
}

__device__ void storeSharedChunkToMemNP2(int* g_odata, int n, 
												int ai, int bi, int mem_ai, int mem_bi,
												int bankOffsetA, int bankOffsetB)
{
	__syncthreads();

    // write results to global memory
    g_odata[mem_ai] = TEMP(ai + bankOffsetA); 
    if (bi < n)
        g_odata[mem_bi] = TEMP(bi + bankOffsetB); 
}



__device__ void clearLastElement()
{
    if (threadIdx.x == 0)
    {
        int conflict_offset = ((blockDim.x << 1) - 1) >> LOG_NUM_BANKS;
        // zero the last element in the scan so it will propagate back to the front
        TEMP((blockDim.x << 1) - 1 + conflict_offset) = 0;
    }   
}

__device__ void clearLastElementAndStoreSum(int *g_blockSums, int blockIndex)
{
    if (threadIdx.x == 0)
    {
		int conflict_offset = ((blockDim.x << 1) - 1) >> LOG_NUM_BANKS;
        
        // write this block's total sum to the corresponding index in the blockSums array
        g_blockSums[blockIndex] = TEMP((blockDim.x << 1) - 1 + conflict_offset);
        // zero the last element in the scan so it will propagate back to the front
        TEMP((blockDim.x << 1) - 1 + conflict_offset) = 0;
	}
}



__device__ unsigned int buildSum()
{
 	unsigned int thid = threadIdx.x;
    unsigned int offset = 1;
    
    // build the sum in place up the tree
    for (int d = blockDim.x; d > 0; d >>= 1)
    {
        __syncthreads();

        if (thid < d)      
        {
            int i  =  __mul24(__mul24(2, offset), thid);
            int ai =  i + offset - 1;
            int bi = ai + offset;

            // compute single-level offsets to avoid MOST bank conflicts
            ai += (ai >> LOG_NUM_BANKS);
            bi += (bi >> LOG_NUM_BANKS);

            TEMP(bi) += TEMP(ai);
        }

        offset *= 2;
    }

    return offset;
}

__device__ void scanRootToLeaves(unsigned int offset)
{
 	unsigned int thid = threadIdx.x;

    // traverse down the tree building the scan in place
    for (int d = 1; d <= blockDim.x; d *= 2)
    {
        offset >>= 1;

        __syncthreads();

        if (thid < d)
        {
            int i  =  __mul24(__mul24(2, offset), thid);
            int ai =  i + offset - 1;
            int bi = ai + offset;

            // compute single-level offsets to avoid MOST bank conflicts
            ai += (ai >> LOG_NUM_BANKS);
            bi += (bi >> LOG_NUM_BANKS);

            int t  = TEMP(ai);
            TEMP(ai) = TEMP(bi);
            TEMP(bi) += t;
        }
    }
}

__global__ void prescan(int *g_odata, const int *g_idata)
{
 	int ai, bi, mem_ai, mem_bi;

	loadSharedChunkFromMem(g_idata, ai, bi, mem_ai, mem_bi); // load data into shared memory
    int offset = buildSum();							     // build the sum in place up the tree
    clearLastElement();
    scanRootToLeaves(offset);							     // traverse down tree to build the scan 
	storeSharedChunkToMem(g_odata, ai, bi, mem_ai, mem_bi);  // write results to global memory
}


__global__ void prescanWithBlockSums(int *g_odata, 
                                     const int *g_idata, 
                                     int *g_blockSums)
{   
	int ai, bi, mem_ai, mem_bi;

	loadSharedChunkFromMem(g_idata, ai, bi, mem_ai, mem_bi); // load data into shared memory
    int offset = buildSum();                                 // build the sum in place up the tree
    clearLastElementAndStoreSum(g_blockSums, blockIdx.x);
    scanRootToLeaves(offset);                                // traverse down tree to build the scan 
    storeSharedChunkToMem(g_odata, ai, bi, mem_ai, mem_bi);  // write results to device memory
}

__global__ void prescanNP2(int *g_odata, 
                           const int *g_idata, 
                           int n, 
                           int baseIndex)
{
	int ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB;

	loadSharedChunkFromMemNP2(g_idata, n, baseIndex,             // load data into shared memory
							  ai, bi, mem_ai, mem_bi, 
							  bankOffsetA, bankOffsetB);
	int offset = buildSum();   							         // build the sum in place up the tree
	clearLastElement();
    scanRootToLeaves(offset);                                    // traverse down tree to build the scan 
	storeSharedChunkToMemNP2(g_odata, n, ai, bi, mem_ai, mem_bi, // write results to device memory
						     bankOffsetA, bankOffsetB);
}

__global__ void prescanNP2WithBlockSums(int *g_odata, 
                                        const int *g_idata, 
                                        int *g_blockSums, 
                                        int n, 
                                        int blockIndex, 
                                        int baseIndex)
{
	int ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB;

	loadSharedChunkFromMemNP2(g_idata, n, baseIndex,             // load data into shared memory
							  ai, bi, mem_ai, mem_bi, 
							  bankOffsetA, bankOffsetB);
	int offset = buildSum();							         // build the sum in place up the tree
	clearLastElementAndStoreSum(g_blockSums, blockIndex);
    scanRootToLeaves(offset);							         // traverse down tree to build the scan 
	storeSharedChunkToMemNP2(g_odata, n, ai, bi, mem_ai, mem_bi, // write results to device memory
						     bankOffsetA, bankOffsetB);
}


__global__ void uniformAdd(int *g_data, 
                           int *uniforms, 
                           int n, 
                           int blockOffset, 
                           int baseIndex)
{
    __shared__ int uni;
    if (threadIdx.x == 0)
        uni = uniforms[blockIdx.x + blockOffset];
    
    unsigned int address = __mul24(blockIdx.x, (blockDim.x << 1)) + baseIndex + threadIdx.x; 

    __syncthreads();
    
    // note two adds per thread
    g_data[address]              += uni;
    g_data[address + blockDim.x] += (threadIdx.x + blockDim.x < n) * uni;
}


#endif // #ifndef _SCAN_BEST_KERNEL_CU_

