#include "../../include/GlobalMemory.hpp"

__global__ static void my_reduce_kernel(int* a, int data_size, int* dt)
{
	extern __shared__ int s[];
	
	int idx = threadIdx.x;
	int baseIdx = 2*blockDim.x*blockIdx.x;

	if ((baseIdx + idx) < data_size)
		s[idx] = a[baseIdx + idx];
	else
		s[idx] = 0;

	if ((baseIdx + idx + blockDim.x) < data_size)
		s[idx + blockDim.x] = a[baseIdx + idx + blockDim.x];
	else
		s[idx + blockDim.x] = 0;

	__syncthreads();


	for(int n = blockDim.x; n > 32; n /= 2)
	{
		if(idx < n)
			s[idx] = s[idx] + s[idx + n];
		__syncthreads();
	}

	///single warp
	///!!! very necessary IF condition !!! (idx < 32) means "in a single warp"
	if(idx < 32) 
	{
		s[idx] = s[idx] + s[idx + 32];

		s[idx] = s[idx] + s[idx + 16];

		s[idx] = s[idx] + s[idx + 8];

		s[idx] = s[idx] + s[idx + 4];

		s[idx] = s[idx] + s[idx + 2];

		s[idx] = s[idx] + s[idx + 1]; 

		if(idx == 0) 
			dt[blockIdx.x] = s[0];
	}
}

void gpu_op(CH::GlobalMemory<int>& a_d, const int& block_dim, int block_num)
{
	int data_size = a_d.size();

	while(block_num >= 1)
	{
		CH::GlobalMemory<int> dt(block_num);

		my_reduce_kernel <<<block_num, block_dim, sizeof(int)*2*block_dim>>>
			(a_d.get_memory_ptr(), data_size, dt.get_memory_ptr());

		cudaMemcpy(a_d.get_memory_ptr(), dt.get_memory_ptr(), sizeof(int)*block_num, cudaMemcpyDeviceToDevice); 

		if(block_num == 1)
			break;
		else
		{
			data_size = block_num;
			block_num = (block_num + 2*block_dim - 1)/(2*block_dim);
		}

	}
}
