#include "kuaicu.h"
#include <cassert>
#include <iostream>

#ifdef __DEVICE_EMULATION__
#define EMUSYNC __syncthreads()
#else
#define EMUSYNC __syncthreads()
#endif

// x = y + z
__global__ void 
	add_vector_f(
		float x[], int incx, int n, 
		const float y[], int incy,
		const float z[], int incz
		) {
	int idt = THREAD_ID;
	if (idt < n) {
		x[MUL(idt, incx)] = y[MUL(idt, incy)] + z[MUL(idt, incz)];
	}
}

// x = y - z
__global__ void 
	sub_vector_f(
		float x[], int incx, int n, 
		const float y[], int incy,
		const float z[], int incz
		) {
	int idt = THREAD_ID;
	if (idt < n) {
		x[MUL(idt, incx)] = y[MUL(idt, incy)] - z[MUL(idt, incz)];
	}
}

// x = y * z
__global__ void 
	mul_vector_f(
		float x[], int incx, int n, 
		const float y[], int incy,
		const float z[], int incz
		) {
	int idt = THREAD_ID;
	if (idt < n) {
		x[MUL(idt, incx)] = y[MUL(idt, incy)] * z[MUL(idt, incz)];
	}
}

// x = y / z
__global__ void 
	div_vector_f(
		float x[], int incx, int n, 
		const float y[], int incy,
		const float z[], int incz
		) {
	int idt = THREAD_ID;
	if (idt < n) {
		x[MUL(idt, incx)] = y[MUL(idt, incy)] / z[MUL(idt, incz)];
	}
}

// x = y / z
__global__ void 
	rev_vector_f(
		float x[], int incx, int n, 
		const float y[], int incy
		) {
	int idt = THREAD_ID;
	if (idt < n) {
		x[MUL(idt, incx)] = 1 / y[MUL(idt, incy)];
	}
}

namespace kuai { namespace cuda {

	FloatArray& FloatArray::add(const FloatArray& x, const FloatArray& y) {
		assert (self.size() == x.size());
		assert (self.size() == y.size());
		int nblock = block_size(self.size());
		add_vector_f<<<nblock, THREADS_IN_BLOCK>>>(
			self.c_pointer(), self.size(), self.skip(), 
			x.c_pointer(), x.skip(), y.c_pointer(), y.skip());
		return self;
	};
	FloatArray& FloatArray::sub(const FloatArray& x, const FloatArray& y) {
		assert (self.size() == x.size());
		assert (self.size() == y.size());
		int nblock = block_size(self.size());
		sub_vector_f<<<nblock, THREADS_IN_BLOCK>>>(
			self.c_pointer(), self.size(), self.skip(), 
			x.c_pointer(), x.skip(), y.c_pointer(), y.skip());
		return self;
	};
	FloatArray& FloatArray::mul(const FloatArray& x, const FloatArray& y) {
		assert (self.size() == x.size());
		assert (self.size() == y.size());
		int nblock = block_size(self.size());
		mul_vector_f<<<nblock, THREADS_IN_BLOCK>>>(
			self.c_pointer(), self.size(), self.skip(), 
			x.c_pointer(), x.skip(), y.c_pointer(), y.skip());
		return self;
	};
	FloatArray& FloatArray::div(const FloatArray& x, const FloatArray& y) {
		assert (self.size() == x.size());
		assert (self.size() == y.size());
		int nblock = block_size(self.size());
		div_vector_f<<<nblock, THREADS_IN_BLOCK>>>(
			self.c_pointer(), self.size(), self.skip(), 
			x.c_pointer(), x.skip(), y.c_pointer(), y.skip());
		return self;
	};
	
	FloatArray& FloatArray::operator*=(const float v0) {
		// cublasSscal(size(), v0, f_pointer(), skip());
		return self;
	};
	
	FloatArray& FloatArray::rev(const FloatArray& x) {
		assert (self.size() == x.size());
		int nblock = block_size(self.size());
		rev_vector_f<<<nblock, THREADS_IN_BLOCK>>>(
			self.c_pointer(), self.size(), self.skip(), x.c_pointer(), x.skip());
		return self;	
	}
	
	FloatArray& FloatArray::add_ax(float a, const FloatArray& x) {
		assert (self.size() == x.size());
		// cublasSaxpy(self.size(), a, x.f_pointer(), x.skip(), self.f_pointer(), self.skip());
		return self;
	}
	
	

} }

/*************************************************************************************/

__device__ unsigned int retirementCount = 0;

template <unsigned int blockSize>
__device__ void
sumBlockF(float *sdata, const unsigned int tid)
{
    // do reduction in shared mem
    if (blockSize >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); }
    if (blockSize >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); }
    if (blockSize >= 128) { if (tid <  64) { sdata[tid] += sdata[tid +  64]; } __syncthreads(); }

#ifndef __DEVICE_EMULATION__
    if (tid < 32)
#endif
    {
        if (blockSize >=  64) { sdata[tid] += sdata[tid + 32]; EMUSYNC; }
        if (blockSize >=  32) { sdata[tid] += sdata[tid + 16]; EMUSYNC; }
        if (blockSize >=  16) { sdata[tid] += sdata[tid +  8]; EMUSYNC; }
        if (blockSize >=   8) { sdata[tid] += sdata[tid +  4]; EMUSYNC; }
        if (blockSize >=   4) { sdata[tid] += sdata[tid +  2]; EMUSYNC; }
        if (blockSize >=   2) { sdata[tid] += sdata[tid +  1]; EMUSYNC; }
    }
}

template <unsigned int blockSize, bool nIsPow2>
__device__ void
sumBlocksF(const float *g_idata, int inc, float *g_odata, unsigned int n)
{
    extern __shared__ float sdata[];

    // perform first level of reduction,
    // reading from global memory, writing to shared memory
    unsigned int tid = threadIdx.x;
    unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x;
    unsigned int gridSize = blockSize*2*gridDim.x;
    sdata[tid] = 0;

    // we reduce multiple elements per thread.  The number is determined by the 
    // number of active thread blocks (via gridDim).  More blocks will result
    // in a larger gridSize and therefore fewer elements per thread
    while (i < n) {
        sdata[tid] += g_idata[i*inc];
        // ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays
        if (nIsPow2 || i + blockSize < n) {
            sdata[tid] += g_idata[(i+blockSize)*inc];  
        }
        i += gridSize;
    } 

    __syncthreads();

    // do reduction in shared mem
    sumBlockF<blockSize>(sdata, tid);
   
    // write result for this block to global mem 
    if (tid == 0) {
		g_odata[blockIdx.x] = sdata[0];
	}
}

template <unsigned int blockSize, bool nIsPow2>
__global__ void sumSinglePassF(const float *g_idata, int inc, float *g_odata, unsigned int n)
{

    //
    // PHASE 1: Process all inputs assigned to this block
    //

    sumBlocksF<blockSize, nIsPow2>(g_idata, inc, g_odata, n);

    //
    // PHASE 2: Last block finished will process all partial sums
    //

    if (gridDim.x > 1)
    {
        const unsigned int tid = threadIdx.x;
        __shared__ bool amLast;
        extern float __shared__ smem[];

        // wait until all outstanding memory instructions in this thread are finished
        __threadfence();

        // Thread 0 takes a ticket
        if( tid==0 )
        {
            unsigned int ticket = atomicInc(&retirementCount, gridDim.x);
            // If the ticket ID is equal to the number of blocks, we are the last block!
            amLast = (ticket == gridDim.x-1);
        }
        __syncthreads();

        // The last block sums the results of all other blocks
        if( amLast )
        {
            // load block results back into shared memory
            smem[tid] = (tid < gridDim.x) ? g_odata[tid] : 0;
            
            __syncthreads();
            
            sumBlockF<blockSize>(smem, tid);
            
            if( tid==0 )  
            {
                g_odata[0] = smem[0];
                
                // reset retirement count so that next run succeeds
                retirementCount = 0; 
            }
        }
    }
}

void sumSinglePassF(int size, int blocks, int threads, 
	const float d_idata[], int inc, float d_odata[])
{
    int smemSize = threads * sizeof(float);

    // choose which of the optimized versions of reduction to launch
    if (kuai::isPow2(size))
    {
        switch (threads)
        {
        case 512:
            sumSinglePassF<512, true><<< blocks, threads, smemSize >>>(d_idata, inc, d_odata, size); break;
        case 256:
            sumSinglePassF<256, true><<< blocks, threads, smemSize >>>(d_idata, inc, d_odata, size); break;
        case 128:
            sumSinglePassF<128, true><<< blocks, threads, smemSize >>>(d_idata, inc, d_odata, size); break;
        case 64:
            sumSinglePassF< 64, true><<< blocks, threads, smemSize >>>(d_idata, inc, d_odata, size); break;
        case 32:
            sumSinglePassF< 32, true><<< blocks, threads, smemSize >>>(d_idata, inc, d_odata, size); break;
        case 16:
            sumSinglePassF< 16, true><<< blocks, threads, smemSize >>>(d_idata, inc, d_odata, size); break;
        case  8:
            sumSinglePassF<  8, true><<< blocks, threads, smemSize >>>(d_idata, inc, d_odata, size); break;
        case  4:
            sumSinglePassF<  4, true><<< blocks, threads, smemSize >>>(d_idata, inc, d_odata, size); break;
        case  2:
            sumSinglePassF<  2, true><<< blocks, threads, smemSize >>>(d_idata, inc, d_odata, size); break;
        case  1:
            sumSinglePassF<  1, true><<< blocks, threads, smemSize >>>(d_idata, inc, d_odata, size); break;
        default:
			assert (false);
        }
    }
    else
    {
        switch (threads)
        {
            case 512:
                sumSinglePassF<512, false><<< blocks, threads, smemSize >>>(d_idata, inc, d_odata, size); break;
            case 256:
                sumSinglePassF<256, false><<< blocks, threads, smemSize >>>(d_idata, inc, d_odata, size); break;
            case 128:
                sumSinglePassF<128, false><<< blocks, threads, smemSize >>>(d_idata, inc, d_odata, size); break;
            case 64:
                sumSinglePassF< 64, false><<< blocks, threads, smemSize >>>(d_idata, inc, d_odata, size); break;
            case 32:
                sumSinglePassF< 32, false><<< blocks, threads, smemSize >>>(d_idata, inc, d_odata, size); break;
            case 16:
                sumSinglePassF< 16, false><<< blocks, threads, smemSize >>>(d_idata, inc, d_odata, size); break;
            case  8:
                sumSinglePassF<  8, false><<< blocks, threads, smemSize >>>(d_idata, inc, d_odata, size); break;
            case  4:
                sumSinglePassF<  4, false><<< blocks, threads, smemSize >>>(d_idata, inc, d_odata, size); break;
            case  2:
                sumSinglePassF<  2, false><<< blocks, threads, smemSize >>>(d_idata, inc, d_odata, size); break;
            case  1:
                sumSinglePassF<  1, false><<< blocks, threads, smemSize >>>(d_idata, inc, d_odata, size); break;
        }
	}
}


namespace kuai { namespace cuda {

	////////////////////////////////////////////////////////////////////////////////
	// Compute the number of threads and blocks to use for the reduction 
	// We set threads / block to the minimum of maxThreads and n/2. 
	////////////////////////////////////////////////////////////////////////////////
	void getNumBlocksAndThreads(int n, int &blocks, int &threads)
	{
		if (n == 1) 
		{
			threads = 1;
			blocks = 1;
		}
		else
		{
			threads = (n < THREADS_IN_BLOCK*2) ? nextPow2(n / 2) : THREADS_IN_BLOCK;
			blocks = max(1, n / (threads * 2));
		}

		blocks = min(MAX_BLOCKS, blocks);
	}
} }


/*************************************************************************************/