/*
 * Reduce.h
 *
 *  Created on: Mar 25, 2011
 *      Author: curt
 */

#ifndef REDUCE_H_
#define REDUCE_H_

#include <vector>
#include <cmath>
#include "CUDA_vec.h"
#include "utility.h"
//#include "cuPrintf.cu"


template<typename T, typename BinaryOp>
__global__
void
reduce(const T * x, T * result, int level_length, BinaryOp op)
{
	extern __shared__ T valcache[];
	unsigned int i = blockIdx.x * (blockDim.x ) + threadIdx.x;
	if(i < level_length)
	{
		unsigned int tid = threadIdx.x;
		valcache[tid] = x[i];
		__syncthreads();

		for(unsigned int s=blockDim.x/2; s > 0 ; s >>= 1)
		{
			if(tid < s && i + s < level_length)
			{
				//cuPrintf("i: %d s: %d val: %f nextval: %f\n",i,s,valcache[tid], valcache[tid+s]);
				valcache[tid] = op(valcache[tid], valcache[tid+s]);
			}

			__syncthreads();
		}
		if(tid==0) result[blockIdx.x] = valcache[0];
	}
}

template<typename T, typename BinaryOp>
T sequential_reduce(const T * device_x, int length, BinaryOp op)
{
	T result = op.identity();
	T * host_x = new T[length];
	cudaMemcpy(host_x, device_x, length * sizeof(T), cudaMemcpyDeviceToHost);
	for(int i=0; i<length; i++)
	{
		result = op(result, host_x[i]);
	}
	delete [] host_x;
	return result;
}

template<typename T, typename BinaryOp>
T CUDA_reduce(const CUDA_vec<T> & x, BinaryOp op)
{
	int len = x.getSize();
	int n = (int)ceil(log(len)/log(2));


	int block_levels = 7;
	int numThreads = 1 << block_levels;
	dim3 blockDim(numThreads);
	dim3 gridDim((int)ceil( ((float)len ) / (numThreads )));

	int resultLength = gridDim.x;
	T * result;
	print_dim(blockDim);
	print_dim(gridDim);

	print_CUDA_err_msg("Before first malloc");
	cudaMalloc(&result, sizeof(T) * gridDim.x);
//	cudaPrintfInit();

	print_CUDA_err_msg("After first malloc");
	reduce<<<gridDim,blockDim, sizeof(T)*numThreads>>>(x.raw_pointer(), result, len, op);
//	cudaPrintfDisplay(stdout, true);
//	cudaPrintfEnd();
	print_CUDA_err_msg("After first reduce");

	T * prev_result;

	while(resultLength > numThreads)
	{
		prev_result = result;
		gridDim.x = ((int)ceil( ((float)resultLength ) / (numThreads )));

		cudaMalloc(&result, sizeof(T) * gridDim.x);

		reduce<<<gridDim, blockDim, sizeof(T) *numThreads>>>(prev_result, result, resultLength, op);
		resultLength = gridDim.x;
		cudaFree(prev_result);

	}

	T result_value = sequential_reduce(result, resultLength, op);
	cudaFree(result);
	return result_value;

}

#endif /* REDUCE_H_ */
