#include "compress_gpu.h"

__device__ uint8 bit_table[8] = {0x80, 0x40, 0x20, 0x10, 8,4, 2,1};

__device__ void d_putBit(uint8* out, uint32 curPos, bool bit)
{
	if(bit == 0)
		return;
	uint32 curByte = curPos >> 3;
	uint32 pos = curPos & 7;
	out[curByte] |= bit_table[pos];
}

__device__ void d_golomb_coding_int(uint32 data, uint32 r, uint8* out, uint32* curPos)
{
	uint32 i;
	uint32 q = data >> r;

	for(i=0; i<q; i++){
		d_putBit(out, (*curPos), true);
		(*curPos)++;
	}
	(*curPos)++;

	uint32 v = 1;
	for(i=0; i<r; i++){
		d_putBit(out, (*curPos), v & data);
		(*curPos)++;
		v <<= 1;
	}
}

__device__ void d_golomb_coding_int(uint8 data, uint32 r, uint8* out, uint32* curPos)
{
	uint8 i;
	uint8 q = data >> r;

	for(i=0; i<q; i++){
		d_putBit(out, (*curPos), true);
		(*curPos)++;
	}
	(*curPos)++;

	uint8 v = 1;
	for(i=0; i<r; i++){
		d_putBit(out, (*curPos), v & data);
		(*curPos)++;
		v <<= 1;
	}
}

__device__ void d_golomb_coding_int4(uint4* data, uint32 r, uint8* out, uint32* curPos)
{
	uint4 d = data[0];
	uint32 pos = (*curPos);

	d_golomb_coding_int(d.x, r, out, &pos);
	d_golomb_coding_int(d.y, r, out, &pos);
	d_golomb_coding_int(d.z, r, out, &pos);
	d_golomb_coding_int(d.w, r, out, &pos);	

	(*curPos) = pos;
}

__device__ void d_golomb_coding_char16(uint4* data, uint32 r, uint8* out, uint32* curPos)
{
	uint4 d = data[0];
	uint8* temp = (uint8*)(&d);
	uint32 pos = (*curPos);

	for(int i=0; i<16; i++) {
		d_golomb_coding_int(temp[i], r, out, &pos);
	}
	(*curPos) = pos;
}


__global__ void d_golomb_coding_kernel(uint32* data, uint32 dataSize, uint32 r , uint32* outDataPos, uint8* out)
{
	uint32 i = blockIdx.x * blockDim.x + threadIdx.x;
	uint32 dataSize4 = dataSize >> 2;
	uint4 *data4 = (uint4 *)data;
	uint32 outPosIndex = 0;
	uint32 curPos;
	while(i*2 < dataSize4) {
		outPosIndex = i * 8;
		curPos = outDataPos[outPosIndex];
		d_golomb_coding_int4(data4 + i*2, r, out, &curPos);

		i += blockDim.x * gridDim.x;
	}
}
__global__ void d_golomb_coding_kernel(uint8* data, uint32 dataSize, uint32 r , uint32* outDataPos, uint8* out)
{
	uint32 i = blockIdx.x * blockDim.x + threadIdx.x;
	uint32 dataSize4 = dataSize >> 2;
	uint4 *data4 = (uint4 *)data;
	uint32 outPosIndex = 0;
	uint32 curPos;
	while((i * 32 + 16) < dataSize) {
		outPosIndex = (i << 5);
		curPos = outDataPos[outPosIndex];
		d_golomb_coding_char16(data4 + i*2, r, out, &curPos);

		i += blockDim.x * gridDim.x;
	}

}

__global__ void golomb_get_num_len_kernel(uint32* data, uint32 dataSize, uint32* pos, uint32 r)
{
	uint32 i = blockIdx.x * blockDim.x + threadIdx.x;
	uint4 d, p;
	while(i*4 < dataSize) {
		d = ((uint4*)data)[i];
		p.x = (d.x >> r) + 1 + r;
		p.y = (d.y >> r) + 1 + r;
		p.z = (d.z >> r) + 1 + r;
		p.w = (d.w >> r) + 1 + r;
		((uint4*)pos)[i] = p;

		i += blockDim.x * gridDim.x;
	}
}

__global__ void golomb_get_num_len_kernel(uint8* data, uint32 dataSize, uint32* pos, uint32 r)
{
	uint32 i = blockIdx.x * blockDim.x + threadIdx.x;
	uchar4 d;
	uint4 p;
	uint32 dataSize4 = dataSize / 4;
	while(i < dataSize4) {
		d = ((uchar4*)data)[i];
		p.x = (d.x >> r) + 1 + r;
		p.y = (d.y >> r) + 1 + r;
		p.z = (d.z >> r) + 1 + r;
		p.w = (d.w >> r) + 1 + r;
		((uint4*)pos)[i] = p;

		i += blockDim.x * gridDim.x;
	}
}
__global__ void get_sum_kernel(uint32* data, uint32 dataSize, uint64* intBuffer)
{

	uint32 i = blockDim.x * blockIdx.x + threadIdx.x;
	uint4 d;
	uint64 value = 0;
	uint32 idx = i;
	while(i * 4 +4 < dataSize) {
		d = ((uint4*)data)[i];
		value = 0;
		value += d.x;
		value += d.y;
		value += d.z;
		value += d.w;

		intBuffer[idx] += value;
		i += blockDim.x * gridDim.x;
	}	
}

uint32 golomb_get_para(uint32* data, uint32 dataSize)
{
	uint64 sum = 0;
	uint64* data64;
	const int block_num = 512, thread_num = 128;
	uint32 dataSize64 = block_num * thread_num;
	cutilSafeCall(cudaMalloc(&data64, block_num * thread_num * sizeof(uint64)));
	cutilSafeCall(cudaMemset(data64, 0, block_num * thread_num * sizeof(uint64)));

	get_sum_kernel<<<block_num, thread_num>>>(data, dataSize, data64);

	thrust::device_ptr<uint64> data_ptr(data64);
	sum = thrust::reduce(data_ptr, data_ptr + dataSize64);
	printf("sum: %lu, dataSize: %u, sum/dataSize: %u\n", sum, dataSize, sum/dataSize);

	cutilSafeCall(cudaFree(data64));
	return ((uint32)(sum/(uint64)dataSize));
}


__global__ void get_sum_kernel(uint8* data, uint32 dataSize, uint32* intBuffer)
{

	uint32 i = blockDim.x * blockIdx.x + threadIdx.x;
	uint4 d;
	uint32 dataSize16 = dataSize >> 4;
	uint32 o = 0;
	uint4* data4 = (uint4*)data;

	while( i  < dataSize16) {
		d = data4[i];
		o = 0;
		o = ((d.x & 0xFF000000) >> 24);
		o += ((d.x & 0x00FF0000) >> 16);
		o += ((d.x & 0x0000FF00) >> 8);
		o += (d.x & 0x000000FF);

		o += (d.y & 0xFF000000) >> 24;
		o += (d.y & 0x00FF0000) >> 16;
		o += (d.y & 0x0000FF00) >> 8;
		o += (d.y & 0x000000FF);

		o += (d.z & 0xFF000000) >> 24;
		o += (d.z & 0x00FF0000) >> 16;
		o += (d.z & 0x0000FF00) >> 8;
		o += (d.z & 0x000000FF);

		o += (d.w & 0xFF000000) >> 24;
		o += (d.w & 0x00FF0000) >> 16;
		o += (d.w & 0x0000FF00) >> 8;
		o += (d.w & 0x000000FF);
		

		intBuffer[i] = o;
		i += blockDim.x * gridDim.x;
	}

}


uint32 golomb_get_para(uint8* data, uint32 dataSize)
{
	uint32 sum = 0;
	uint32* intBuffer;
	printf("get para data_ptr: %u\n", data);
	cutilSafeCall(cudaMalloc(&intBuffer, ceil((double)dataSize / 16.0) * sizeof(int)));

	printf("dataSize = %d\n", dataSize);

	get_sum_kernel<<<128, 128>>>(data, dataSize, intBuffer);
	cutilCheckMsg("golomb_get_sum_kernel");

	thrust::device_ptr<uint32> data_ptr(intBuffer);
	sum = thrust::reduce(data_ptr, data_ptr + (dataSize/16));
	
	printf("sum: %lu, dataSize: %u, sum/dataSize: %u\n", sum, dataSize, sum/dataSize);

	cutilSafeCall(cudaFree(intBuffer));
	return (sum/dataSize);
}

void golomb_prefix_sum(uint32* pos, uint32 dataSize)
{
	thrust::device_ptr<uint32> pos_ptr(pos);
	thrust::exclusive_scan(pos_ptr, pos_ptr + dataSize, pos_ptr);
}

void gpu_golomb_coding(uint32* d_data, uint32 dataSize, uint32* d_dataPos, uint8* d_out, uint32* outBitSize, uint32* para)
{
	uint32 M = golomb_get_para(d_data, dataSize);
	uint32 r = (log((double)M) / log(2.0));
	M = pow(2.0, r);
	printf("dataSize: %u, M: %u, r: %u\n",dataSize, M, r);

	uint32 thread_num = 128, block_num = 512;

	golomb_get_num_len_kernel<<<block_num, thread_num>>>(d_data, dataSize, d_dataPos, r);

	golomb_prefix_sum(d_dataPos, dataSize + 1);

	uint32 outBit;	
	cutilSafeCall(cudaMemcpy(&outBit, d_dataPos + dataSize, sizeof(int), cudaMemcpyDeviceToHost));
	printf("outbitsize: %lu\n", (outBit));

	d_golomb_coding_kernel<<<block_num, thread_num>>>(d_data, dataSize, r, d_dataPos, d_out);
	cutilCheckMsg("d_golomb_coding_kernel1");
	d_golomb_coding_kernel<<<block_num, thread_num>>>(d_data+4, dataSize-4, r , d_dataPos+4, d_out);
	cutilCheckMsg("d_golomb_coding_kernel2");
	cutilSafeCall(cudaThreadSynchronize());

	//	cutilSafeCall(cudaMemcpy(outBitSize, d_dataPos + dataSize, sizeof(int), cudaMemcpyDeviceToHost));
	(*para) = M; 	
}

void gpu_golomb_coding(uint8* d_data, uint32 dataSize, uint32* d_dataPos, uint8* d_out, uint32* outBitSize, uint32* para)
{
	INIT_TIMER;
	START_TIMER;
	uint32 M = golomb_get_para(d_data, dataSize);
	
	uint32 r = log((double)M) / log(2.0);
	M = pow(2.0, r);
	printf("M: %u, r: %u\n", M, r);

	uint32 thread_num = 128, block_num = 128;

	golomb_get_num_len_kernel<<<block_num, thread_num>>>(d_data, dataSize, d_dataPos, r);
	cutilCheckMsg("golomb_get_num_len_kernel");

	golomb_prefix_sum(d_dataPos, dataSize + 1);

	d_golomb_coding_kernel<<<block_num, thread_num>>>(d_data, dataSize, r , d_dataPos, d_out);
	cutilCheckMsg("d_golomb_coding_kernel1");

	d_golomb_coding_kernel<<<block_num, thread_num>>>(d_data+16, dataSize-16, r , d_dataPos+16, d_out);
	cutilCheckMsg("d_golomb_coding_kernel2");

	cutilSafeCall(cudaThreadSynchronize());
	//	cutilSafeCall(cudaMemcpy(outBitSize, d_dataPos + dataSize, sizeof(int), cudaMemcpyDeviceToHost));
	(*para) = M;
	END_TIMER;
	PRINT_TIMER_SEC("golomb compression on gpu ");
}

__device__ void d_binary_encoding(uchar4* data, uchar4* out) 
{ 
	uchar4 d, o; 
	uint32 i; 
	o.x = 0; 
	o.y = 0; 
	o.z = 0; 
	o.w = 0;         

	d = data[0]; 
	o.x <<= 2; 
	o.x |= d.x; 
	o.x <<= 2; 
	o.x |= d.y; 
	o.x <<= 2; 
	o.x |= d.z; 
	o.x <<= 2; 
	o.x |= d.w; 

	d = data[1]; 
	o.y <<= 2; 
	o.y |= d.x; 
	o.y <<= 2; 
	o.y |= d.y; 
	o.y <<= 2; 
	o.y |= d.z; 
	o.y <<= 2; 
	o.y |= d.w; 

	d = data[2]; 
	o.z <<= 2; 
	o.z |= d.x; 
	o.z <<= 2; 
	o.z |= d.y; 
	o.z <<= 2; 
	o.z |= d.z; 
	o.z <<= 2; 
	o.z |= d.w; 

	d = data[3]; 
	o.w <<= 2; 
	o.w |= d.x; 
	o.w <<= 2; 
	o.w |= d.y; 
	o.w <<= 2; 
	o.w |= d.z; 
	o.w <<= 2; 
	o.w |= d.w; 

	out[0] = o; 
} 

__global__ void binary_encoding_kernel(uint8* data, uint32 dataSize, uint8* out) 
{ 
	uint32 i = blockIdx.x * blockDim.x + threadIdx.x; 
	uint32 dataSize4 = dataSize >> 2; 
	uchar4 *data4 = (uchar4 *)data; 
	uchar4 *out4 = (uchar4 *)out; 
	uint32 outPosIndex = 0; 
	uint32 curPos; 
	while(i*4 < dataSize4) { 
		d_binary_encoding(data4 + i*4, out4 + i);                
		i += blockDim.x * gridDim.x; 
	} 
} 

void gpu_binary_encoding(uint8* d_data, uint32 dataSize, uint8* d_out) 
{ 
	INIT_TIMER; 
	START_TIMER; 

	uint32 thread_num = 128, block_num = 128; 

	binary_encoding_kernel<<<block_num, thread_num>>>(d_data,dataSize, d_out); 
	cutilCheckMsg("binary_encoding_kernel"); 
	cutilSafeCall(cudaThreadSynchronize()); 

	END_TIMER; 
	PRINT_TIMER_SEC("binary_encoding_kernel "); 
} 

void gpu_run_length_encoding(uint32* data, uint32 dataSize, uint32* out, uint32* out_len, uint32* outSize)
{
	thrust::device_ptr<uint32> data_ptr(data);
	thrust::device_ptr<uint32> out_ptr(out);
	thrust::device_ptr<uint32> out_len_ptr(out_len);

	// compute run lengths
	uint32 num_runs;

	try{num_runs = thrust::reduce_by_key
		(data_ptr, data_ptr + dataSize,          // input key sequence
		 thrust::constant_iterator<uint32>(1),   // input value sequence
		 out_ptr,                         // output key sequence
		 out_len_ptr                      // output value sequence
		).first - out_ptr;            // compute the output size
	}   catch( std::bad_alloc &e){
		    std::cerr<<"Couldn't allocate d_a: " << e.what() <<std::endl;
		        exit(-1);
			  }
	(*outSize) = num_runs;

	printf(" outsize: %u \n", (*outSize));
}

void gpu_run_length_decoding(uint32* data, uint32* data_len, uint32 dataSize, uint32* out, uint32* outSize)
{
	thrust::device_ptr<uint32> data_ptr(data);
	thrust::device_ptr<uint32> out_ptr(out);
	thrust::device_ptr<uint32> data_len_ptr(data_len);

	// scan the lengths
	thrust::inclusive_scan(data_len_ptr, data_len_ptr + dataSize, data_len_ptr);

	// output size is sum of the run lengths
	uint32 N = 0;
	cutilSafeCall(cudaMemcpy(&N, data_len+dataSize-1, sizeof(int), cudaMemcpyDeviceToHost));

	// compute input index for each output element
	thrust::device_vector<int> indices(N);
	thrust::lower_bound(data_len_ptr, data_len_ptr + dataSize,
			thrust::counting_iterator<int>(1),
			thrust::counting_iterator<int>(N + 1),
			indices.begin());

	// gather input elements
	thrust::gather(indices.begin(), indices.end(),
			data_ptr,
			out_ptr);
}

__global__ void delta_encoding_kernel1(uint32* data, uint32 dataSize, uint32* out)
{
	uint32 i = blockIdx.x * blockDim.x + threadIdx.x;

	uint4* data4 = (uint4*)data;
	uint4* out4 = (uint4*)out;
	uint4 d, o;
	while(i*4+4 < dataSize) {
		d = data4[i];
		o.x = d.x;
		o.y = d.y - d.x;
		o.z = d.z - d.y;
		o.w = d.w - d.z;
		out4[i] = o;
		i += blockDim.x * gridDim.x;
	}
	int j = 0;
	while( i*4 + j < dataSize){
		out[i*4 + j] = data[i*4 + j] - data[i*4 + j - 1];
		j++;
	}
}

__global__ void delta_encoding_kernel2(uint32* data, uint32 dataSize, uint32* out)
{
	uint32 i = blockIdx.x * blockDim.x + threadIdx.x;
	while(i*4 + 4 < dataSize) {
		out[i*4] = data[i*4] - data[i*4-1];
		i += blockDim.x * gridDim.x;
	}       
}
__global__ void delta_encoding_kernel3(uint32* data, uint32 dataSize, uint32* out)
{
	uint32 i = blockIdx.x * blockDim.x + threadIdx.x;
	while(i*4 + 4 < dataSize) {
		((uint4*)data)[i] = ((uint4*)out)[i];
		i += blockDim.x * gridDim.x;
	}               

	int j = 0;
	while( i*4 + j < dataSize){
		data[i*4 + j] = out[i*4 + j];
		j++;
	}
}

void gpu_delta_encoding(uint32* data, uint32 dataSize)
{
	const int block_num = 512;
	const int thread_num = 128;
	uint32* out;
	cutilSafeCall(cudaMalloc(&out, sizeof(int) * dataSize));
	cutilSafeCall(cudaMemset(out,0, sizeof(int) * dataSize));

	delta_encoding_kernel1<<<block_num, thread_num>>>(data, dataSize, out);
	delta_encoding_kernel2<<<block_num, thread_num>>>(data+4, dataSize-4, out + 4);
	delta_encoding_kernel3<<<block_num, thread_num>>>(data, dataSize, out);
	cutilCheckMsg("delta_encoding_kernel");
	cutilSafeCall(cudaThreadSynchronize());

	cutilSafeCall(cudaFree(out));
}

void gpu_delta_decoding(uint32* data, uint32 dataSize)
{
	thrust::device_ptr<uint32> data_ptr(data);
	thrust::inclusive_scan(data_ptr, data_ptr + dataSize, data_ptr);
}

void gpu_delta_encoding_test(uint32* data, uint32 dataSize)
{
	uint32* d_data; 
	uint32* d_out;
	uint32* data1 = (uint32*)malloc(dataSize * sizeof(int));
	memcpy(data1, data, dataSize * sizeof(int));

	uint32 dataSize1 = dataSize; 
	if(dataSize % 4 > 0) 
		dataSize1 = (dataSize / 4 + 1 ) * 4;

	INIT_TIMER;
	START_TIMER;
	cutilSafeCall(cudaMalloc(&d_data,  dataSize1 * sizeof(int)));
	cutilSafeCall(cudaMemset(d_data, 0, dataSize1 * sizeof(int)));
	cutilSafeCall(cudaMemcpy(d_data, data, dataSize * sizeof(int), cudaMemcpyHostToDevice));
	END_TIMER;
	PRINT_TIMER_SEC("malloc time ");

	START_TIMER;
	gpu_delta_encoding(d_data, dataSize1);
	END_TIMER;
	PRINT_TIMER_SEC("delta_encoding_kernel ");


	START_TIMER;
	gpu_delta_decoding(d_data, dataSize);
	END_TIMER;
	PRINT_TIMER_SEC("delta_decoding_kernel ");

	for(uint32 i =0; i<dataSize; i++) {
		if(data[i] != data1[i]) {
			printf("i: %u, data: %u, data1: %u\n",i , data[i], data1[i]);
			break;
		}
	}

	cutilSafeCall(cudaFree(d_data));
}

void gpu_golomb_coding_test(uint32* data, uint32 dataSize, uint8* out, uint32* outBitSize)
{
	uint32* d_data, *d_dataPos; 
	uint8* d_out;
	uint32 dataSize1 = dataSize; 
	if(dataSize % 4 > 0) 
		dataSize1 = (dataSize / 4 + 1 ) * 4;
	cutilSafeCall(cudaSetDevice(0));
	INIT_TIMER;
	START_TIMER;
	cutilSafeCall(cudaMalloc(&d_data, sizeof(int) * dataSize1));
	cutilSafeCall(cudaMemset(d_data, 0, sizeof(int) * dataSize1));
	cutilSafeCall(cudaMemcpy(d_data, data, sizeof(int) * dataSize, cudaMemcpyHostToDevice));
	cutilSafeCall(cudaMalloc(&d_dataPos, sizeof(int) * (dataSize1+1)));
	cutilSafeCall(cudaMemset(d_dataPos, 0, sizeof(int) * (dataSize1+1)));
	cutilSafeCall(cudaMalloc(&d_out, sizeof(int) * dataSize1));
	cutilSafeCall(cudaMemset(d_out, 0, sizeof(int) * dataSize1));
	END_TIMER;
	PRINT_TIMER_SEC("malloc time ");

	thrust::device_ptr<uint32> data_ptr(d_data);

	START_TIMER;
	thrust::sort(data_ptr, data_ptr + dataSize);
	END_TIMER;
	PRINT_TIMER_SEC("sorting time ");

	gpu_delta_encoding(d_data, dataSize);
	cutilSafeCall(cudaMemset(d_data+dataSize, 0, (dataSize1 - dataSize)*sizeof(int)));

	uint32 M = golomb_get_para(d_data, dataSize1);
	uint32 r = ceil(log((double)M) / log(2.0));
	M = pow(2.0, r);
	printf("M: %u, r: %u\n", M, r);

	uint32 thread_num = 128, block_num = 512;

	golomb_get_num_len_kernel<<<block_num, thread_num>>>(d_data, dataSize1, d_dataPos, r);

	golomb_prefix_sum(d_dataPos, dataSize1 + 1);

	START_TIMER;
	d_golomb_coding_kernel<<<block_num, thread_num>>>(d_data, dataSize1, r , d_dataPos, d_out);
	cutilCheckMsg("d_golomb_coding_kernel1");
	d_golomb_coding_kernel<<<block_num, thread_num>>>(d_data+4, dataSize1-4, r , d_dataPos+4, d_out);
	cutilCheckMsg("d_golomb_coding_kernel2");
	cutilSafeCall(cudaThreadSynchronize());
	END_TIMER;
	PRINT_TIMER_SEC("golomb coding kernel ");

	cutilSafeCall(cudaMemcpy(out, d_out, sizeof(int) * dataSize, cudaMemcpyDeviceToHost));
	cutilSafeCall(cudaMemcpy(outBitSize, d_dataPos + dataSize, sizeof(int), cudaMemcpyDeviceToHost));
	printf("compressed bit size: %u, byte size: %u, compress ratio: %.2f\n", (*outBitSize), (*outBitSize)/4, 
			(double)(dataSize * sizeof(int) * 8) / (double)(*outBitSize));

	cutilSafeCall(cudaFree(d_data));
	cutilSafeCall(cudaFree(d_out));
	cutilSafeCall(cudaFree(d_dataPos));
}


void gpu_golomb_coding_test(uint8* data, uint32 dataSize, uint8* out, uint32* outBitSize)
{
	uint32*d_dataPos; 
	uint8* d_data, *d_out;
	uint32 dataSize1 = dataSize; 
	if(dataSize % 16 > 0) 
		dataSize1 = (dataSize / 16 + 1 ) * 16;

	INIT_TIMER;
	START_TIMER;
	cutilSafeCall(cudaMalloc(&d_data, dataSize1));
	cutilSafeCall(cudaMemset(d_data, 0, dataSize1));
	cutilSafeCall(cudaMemcpy(d_data, data, dataSize, cudaMemcpyHostToDevice));
	cutilSafeCall(cudaMalloc(&d_dataPos, sizeof(int)*(dataSize1+1)));
	cutilSafeCall(cudaMemset(d_dataPos, 0, sizeof(int)*(dataSize1+1)));
	cutilSafeCall(cudaMalloc(&d_out, sizeof(int)*dataSize1));
	cutilSafeCall(cudaMemset(d_out, 0, sizeof(int)*dataSize1));
	END_TIMER;
	PRINT_TIMER_SEC("malloc time ");

	uint32 M = golomb_get_para(d_data, dataSize);
	uint32 r = ceil(log((double)M) / log(2.0));
	M = pow(2.0, r);
	printf("M: %u, r: %u\n", M, r);

	uint32 thread_num = 128, block_num = 512;

	golomb_get_num_len_kernel<<<block_num, thread_num>>>(d_data, dataSize1, d_dataPos, r);

	golomb_prefix_sum(d_dataPos, dataSize1 + 1);

	START_TIMER;
	d_golomb_coding_kernel<<<block_num, thread_num>>>(d_data, dataSize1, r , d_dataPos, d_out);
	cutilCheckMsg("d_golomb_coding_kernel1");
	d_golomb_coding_kernel<<<block_num, thread_num>>>(d_data+16, dataSize1-16, r , d_dataPos+16, d_out);
	cutilCheckMsg("d_golomb_coding_kernel2");
	cutilSafeCall(cudaThreadSynchronize());
	END_TIMER;
	PRINT_TIMER_SEC("golomb coding kernel ");

	cutilSafeCall(cudaMemcpy(out, d_out, sizeof(int) * dataSize, cudaMemcpyDeviceToHost));
	cutilSafeCall(cudaMemcpy(outBitSize, d_dataPos + dataSize, sizeof(int), cudaMemcpyDeviceToHost));
	printf("compressed bit size: %u, byte size: %u, compress ratio: %.2f\n", (*outBitSize), (*outBitSize)/4, 
			(double)(dataSize * sizeof(int) * 8) / (double)(*outBitSize));

	cutilSafeCall(cudaFree(d_data));
	cutilSafeCall(cudaFree(d_out));
	cutilSafeCall(cudaFree(d_dataPos));
}



/*
   int main(int argc, char* argv[])
   {
   char* inFileName = argv[1];
   uint32 dataSize = atoi(argv[2]);
   uint32* data = (uint32*)malloc(dataSize * sizeof(int));
   uint32* data1 = (uint32*)malloc(dataSize * sizeof(int));
   uint8* out = (uint8*)malloc(dataSize * sizeof(int));
   uint32 bitSize;

   FILE* infile = fopen(inFileName, "rb");
   fread(data, sizeof(int), dataSize, infile);

   INIT_TIMER;
   START_TIMER;
   gpu_golomb_coding_test(data, dataSize, out, &bitSize);
   END_TIMER;
   PRINT_TIMER_SEC("golomb coding gpu ");

//check result
golombDecode(out, data1, 32, bitSize);
for(uint32 i=0; i<dataSize; i++) {
if(data[i] != data1[i]) {
printf("data: %u, data1: %u\n", data[i], data1[i]);
}
}

//compare with cpu version
uint64 bitSize_cpu;
START_TIMER;
golombEncode(data, out, 32, dataSize, &bitSize_cpu);
printf("bit size cpu: %u\n", bitSize_cpu);
END_TIMER;
PRINT_TIMER_SEC("golomb coding cpu ");

free(data);
free(out);
fclose(infile);
return 0;
}
 */
