#ifndef __COMPACTION_CU__
#define __COMPACTION_CU__


#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include "cuda_header.h"
#include <thrust/device_ptr.h>
#include <thrust/scan.h>
#include <thrust/reduce.h>


///////////////////////////////////////////// compaction function starts ////////////////////////////////

template<class T>
__global__
void compaction_kernel(T* d_out,const T* d_in, const int numPart, const int maxNumElementPerPart, 
			const int* d_numElementPerPart, const int* d_writePos) {
	for(int partId = blockIdx.x; partId < numPart; partId += gridDim.x) {
		const int numElement = d_numElementPerPart[partId];
		if(numElement <= 0) {
			continue;
		}

		const int writePos = d_writePos[partId];
		const int readBase = partId*maxNumElementPerPart;
		for(int i = threadIdx.x; i < numElement; i += blockDim.x) {
			d_out[writePos + i] = d_in[readBase + i];
		}	
	}	
}

template<class T>
int compaction(T* d_out, const T* d_in, const int numPart, 
		const int maxNumElementPerPart, const int* d_numElementPerPart) {
	thrust::device_ptr<int> dptr_numElementPerPart(const_cast<int*>(d_numElementPerPart));
	int* d_writePos = NULL;
	GPUMALLOC((void**)&d_writePos, sizeof(int)*numPart);
	cutilSafeCall(cudaMemset(d_writePos, 0, sizeof(int)*numPart));
	thrust::device_ptr<int> dptr_writePos(d_writePos);
	thrust::exclusive_scan(dptr_numElementPerPart, dptr_numElementPerPart + numPart, dptr_writePos);
	
	compaction_kernel<T><<<128, 256>>>(d_out, d_in, numPart, maxNumElementPerPart, 
					d_numElementPerPart, d_writePos);
	cutilCheckMsg("compaction_kernel");
	GPUFREE(d_writePos);

	return thrust::reduce(dptr_numElementPerPart, dptr_numElementPerPart + numPart);
}


/////////////////////////////////////////////// compaction function ends ///////////////////////////////
// below is the test function

/*
void testCompaction() {
	const int numPart = 1234*1000;
	const int maxNumElementPerPart = 100;
	int* h_numElementPerPart = (int*)malloc(sizeof(int)*numPart);
	for(int i = 0; i < numPart; i++) {
		h_numElementPerPart[i] = rand()%maxNumElementPerPart;
	}
	int* h_in = (int*)malloc(sizeof(int)*numPart*maxNumElementPerPart);
	for(int i = 0; i < numPart*maxNumElementPerPart; i++) {
		h_in[i] = rand();
	}
	printf("numPart = %d, maxNumElementPerPart = %d\n", numPart, maxNumElementPerPart);
	unsigned int timer = 0;

	//GPU
	int* d_in = NULL;
	GPUMALLOC((void**)&d_in, sizeof(int)*numPart*maxNumElementPerPart);
	TOGPU(d_in, h_in, sizeof(int)*numPart*maxNumElementPerPart);
	int* d_out = NULL;
	GPUMALLOC((void**)&d_out, sizeof(int)*numPart*maxNumElementPerPart);
	int* d_numElementPerPart = NULL;
	GPUMALLOC((void**)&d_numElementPerPart, sizeof(int)*numPart);
	TOGPU(d_numElementPerPart, h_numElementPerPart, sizeof(int)*numPart);
	timer = 0;
	startTimer(&timer);
	const int gpu_numResult = compaction(d_out, d_in, numPart, maxNumElementPerPart, d_numElementPerPart);
	cutilSafeCall(cudaThreadSynchronize());
	endTimer(&timer, "GPU compaction");
	int* gpu_out = (int*)malloc(sizeof(int)*gpu_numResult);
	FROMGPU(gpu_out, d_out, sizeof(int)*gpu_numResult);

	//CPU
	int gold_numResult = 0;
	for(int i = 0; i < numPart; i++) {
		gold_numResult += h_numElementPerPart[i];
	}
	assert(gpu_numResult == gold_numResult);
	int* gold_out = (int*)malloc(sizeof(int)*gold_numResult);
	int offset = 0;
	timer = 0;
	startTimer(&timer);
	for(int partId = 0; partId < numPart; partId++) {
		for(int i = 0; i < h_numElementPerPart[partId]; i++) {
			gold_out[offset] = h_in[partId*maxNumElementPerPart + i];
			offset ++;
		}
	}
	endTimer(&timer, "CPU compaction");

	//check result
	for(int i = 0; i < gold_numResult; i++) {
		if(gpu_out[i] != gold_out[i]) {
			printf("!!!ERROR.\n");
			exit(0);
		}
	}
	printf("Test passed.\n");
}

int main(int argc, char** argv) {

	testCompaction();

	return EXIT_SUCCESS;
}
*/
#endif /*__COMPACTION_CU__*/


