#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>

// 2^20 = 1048576
#define ARRAY_SIZE (1 << 20)  
#define MAXTHREADPERBLOCK 1024

__global__ void global_reduce_kernel(float *d_out, float *d_in, int n){
	// idx is the global index of the thread
	int idx = threadIdx.x + blockDim.x * blockIdx.x;
	// tid is the index of the thread in the current block
	int tid = threadIdx.x;
	// do reduction in global memory
	for(unsigned int s = blockDim.x / 2; s > 0; s >>= 1){
		if(tid < s && idx + s < n){
			d_in[idx] += d_in[idx + s];
		}
		__syncthreads();
	}
	if(tid == 0){
		d_out[blockIdx.x] = d_in[idx];
	}
}

__global__ void sharemem_reduce_kernel(float *d_out, const float *d_in, int n){
	extern __shared__ float sdata[];

	int idx = threadIdx.x + blockDim.x * blockIdx.x;
	int tid = threadIdx.x;

	sdata[tid] =  (idx < n) ? d_in[idx] : 0.0f;
	__syncthreads();

	// do reduction in shared memory
	for(unsigned int s = blockDim.x / 2; s > 0; s >>= 1){
		if(tid < s){
			sdata[tid] += sdata[tid + s];
		}
		__syncthreads();
	}
	if(tid == 0){
		d_out[blockIdx.x] = sdata[0];
	}
}

void reduce(float *d_intermediate, float *d_in, int size, bool useShareMemory){
	int threads = MAXTHREADPERBLOCK;
	//Round up to ensure that all elements are processed
	int blocks = (size + MAXTHREADPERBLOCK - 1) / MAXTHREADPERBLOCK;
	// first do redution
	if(useShareMemory){
		sharemem_reduce_kernel<<<blocks, threads, threads*sizeof(float)>>>(d_intermediate, d_in, size);
	}else{
		global_reduce_kernel<<<blocks, threads>>>(d_intermediate, d_in, size);
	}
	// second do redution
	// now down to one block left, so continure to reduce it
	int newsize = blocks;
    if(newsize <= MAXTHREADPERBLOCK) {
		if(useShareMemory){
			sharemem_reduce_kernel<<<1, newsize, newsize*sizeof(float)>>>(d_intermediate, d_intermediate, newsize);
		}else{
			global_reduce_kernel<<<1, newsize>>>(d_intermediate, d_intermediate, newsize);
		}
    } else {
        // 如果newsize太大，仍需要循环处理
        while(newsize > 1) {
            dim3 newBlocks((newsize + MAXTHREADPERBLOCK - 1)/MAXTHREADPERBLOCK);
			if(useShareMemory){
				sharemem_reduce_kernel<<<newBlocks, threads, threads*sizeof(float)>>>(d_intermediate, d_intermediate, newsize);
			}else{
				global_reduce_kernel<<<newBlocks, threads>>>(d_intermediate, d_intermediate, newsize);
			}
            newsize = newBlocks.x;
        }
    }
}

int main(int argc, char *argv[]){
	int deviceCount;
	cudaGetDeviceCount(&deviceCount);
	if(deviceCount == 0){
		fprintf(stderr, "error: no device supporting cuda.\n");
		exit(1);
	}
	int dev = 0;
	cudaSetDevice(dev);

	cudaDeviceProp devProp;
	if(cudaGetDeviceProperties(&devProp, dev) == 0){
		printf("using device %d\n", dev);
		printf("%s: global mem: %zu MB; compute v%d.%d; clock: %d kHz\n",
		devProp.name, devProp.totalGlobalMem /(1024 * 1024), (int)devProp.major,
		(int)devProp.minor, (int)devProp.clockRate);
	}

	const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
	// generate the input array in host
	float h_in[ARRAY_SIZE];
	float sum = 0.0f;
	for(int i = 0; i < ARRAY_SIZE; i++){
		// generate random value in [-1.0, 1.0]
		h_in[i] = -1.0f + (float)random()/((float)RAND_MAX / 2.0f); 
		sum += h_in[i];
	}
	
	float *d_in, *d_intermediate;
	// allocate gpu memory
	cudaMalloc((float**)&d_in, ARRAY_BYTES);
	cudaMalloc((float**)&d_intermediate, ARRAY_BYTES);
	cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);

	int whichKernel = 0;
	if(argc == 2) whichKernel = atoi(argv[1]);

	cudaEvent_t start, stop;
	cudaEventCreate(&start);
	cudaEventCreate(&stop);
	switch(whichKernel){
		case 0:
			printf("running global reduce\n");
			cudaEventRecord(start, 0);
			reduce(d_intermediate, d_in, ARRAY_SIZE, false);
			cudaEventRecord(stop, 0);
			break;
		case 1:
			printf("running reduce with shared memory\n");
			cudaEventRecord(start, 0);
			// run redution 100 times
			for(int i = 0; i < 100; i++){
				reduce(d_intermediate, d_in, ARRAY_SIZE, true);
			}
			cudaEventRecord(stop, 0);
			break;
		default:
		fprintf(stderr, "error: no kernel\n");
		exit(1);
	}

	cudaEventSynchronize(stop);
	float elapseTime;
	cudaEventElapsedTime(&elapseTime, start, stop);
	if(whichKernel) elapseTime /= 100.0f;
	cudaEventDestroy(start);
	cudaEventDestroy(stop);

	float h_out;
	cudaMemcpy(&h_out, d_intermediate, sizeof(float), cudaMemcpyDeviceToHost);
	printf("reduction result: %f, correct answer: %f\n", h_out, sum);
	printf("average time elapsed: %f\n", elapseTime);

	cudaFree(d_in);
	cudaFree(d_intermediate);
	cudaDeviceReset();

	return 0;
}
