#include <iostream>
#include <cuda_runtime.h>
//////////////////////////////////////////////////////////////////////////
using namespace std;
//////////////////////////////////////////////////////////////////////////
#define N (512*512)
#define THREADS_PER_BLOCK       512
//////////////////////////////////////////////////////////////////////////
__global__ void reduction(int *a, int *c)
{
	__shared__ int tmp[THREADS_PER_BLOCK];
	int idx = blockIdx.x * blockDim.x + threadIdx.x;
	tmp[threadIdx.x]  = a[idx];
	__syncthreads();

	//Parallel summation in reduction
	for(int i=1; i<blockDim.x; i*=2)
	{
		if(threadIdx.x % (2*i) == 0)
		{
			tmp[threadIdx.x] += tmp[threadIdx.x + i];
		}
		__syncthreads();
	}
	*c = tmp[0];
}
//////////////////////////////////////////////////////////////////////////
int main(void)
{
	//Host copies of a, c
	int *a, *c;

	//Allocate Host copies of a, b, c
	a = (int*)malloc(N*sizeof(int));
	c = (int*)malloc(  sizeof(int));

	//Device copies of a, c
	int *dev_a, *dev_c;

	//Allocate Device copies of a, c
	cudaMalloc((void**)&dev_a, N*sizeof(int));
	cudaMalloc((void**)&dev_c,   sizeof(int));

	//Random inits( a, N );
	for(int i=0; i<N; i++)
	{
		a[i] = rand()%1000;
	}

	//Copy input to device
	cudaMemcpy(dev_a, a, N*sizeof(int), cudaMemcpyHostToDevice);

	//Launch reduction() kernel 
	reduction<<<N/THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(dev_a, dev_c);

	//Copy Device result back to host copy of c
	cudaMemcpy(c, dev_c,  sizeof(int), cudaMemcpyDeviceToHost);

	//Deallocate Host copies
	free(a); 
	free(c);

	//Deallocate Device copies
	cudaFree(dev_a);
	cudaFree(dev_c);

	return 0;
}
//////////////////////////////////////////////////////////////////////////
