#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <iostream>

#include <assert.h>
#include <helper_string.h>  // helper for shared functions common to CUDA Samples

// CUDA runtime
#include <cuda_runtime.h>

// CUDA and CUBLAS functions
#include <cublas_v2.h>
#include <helper_functions.h>
#include <helper_cuda.h>

#include <thrust/sort.h>


float DotVector(float *a, float *b, int n)
{
	float sum = 0;
	for (int i = 0; i < n; i++){
		sum = sum + a[i] * b[i];
	}

	return sum;
}

void CPUTestDotVector()
{
	int n = 128;
	int m = 4000;

	float *a = (float *)malloc(m* n *sizeof(float));
	float *b = (float *)malloc(m* n *sizeof(float));
	float *c = (float *)malloc(m * sizeof(float));
	StopWatchInterface *timer = NULL;
	sdkCreateTimer(&timer);
	sdkResetTimer(&timer);

	sdkStartTimer(&timer);


	for (int i = 0; i < m; i++){
		c[i] = DotVector(a + (i*n), b + (i*n), n);
	}


	float msTotalTime = sdkGetTimerValue(&timer);
	double flops = 2 * m*n;
	double gigaFlops = (flops * 1.0e-9f) / (msTotalTime / 1000.0f);
	printf(
		"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.2f MOps\n",
		gigaFlops,
		msTotalTime,
		flops / (float)1e+6);

	sdkStopTimer(&timer);
	
}

__global__ void
vectorMulKernel(float *A, const float *B, int s)
{
	int i = (blockDim.x * blockIdx.x + threadIdx.x);
	
	A[i] = A[i] * B[i];
}

void 
vectorMulCPU(float *A, const float *B, int numElements)
{
	for (int i = 0; i < numElements; i++)
	{
		A[i] = A[i] * B[i];
	}
}

void
vectorMulGPU(int sizeMultiplication)
{
	if (sizeMultiplication == 0)
		sizeMultiplication = 1;
	StopWatchInterface *timer = NULL;
	sdkCreateTimer(&timer);


	cudaEvent_t start, stop;
	checkCudaErrors(cudaEventCreate(&start));
	checkCudaErrors(cudaEventCreate(&stop));

	printf("\n----- [Indirected vector Mul using local kernel] - Starting...\n");

	int numElements = 17000 * 120 * sizeMultiplication;
	// Error code to check return values for CUDA calls
	cudaError_t err = cudaSuccess;
	size_t size = numElements * sizeof(float);
	printf("[Vector addition of  17K*120 * %d elements ]\n", sizeMultiplication);

	// Allocate the host input vector A
	float *h_A = (float *)malloc(size);

	// Allocate the host input vector B
	float *h_B = (float *)malloc(size);

	// Allocate the host output vector C
	float *h_C = (float *)malloc(size);

	// Allocate the host input vector A
	float *h_D = (float *)malloc(size);


	// Verify that allocations succeeded
	if (h_A == NULL || h_B == NULL || h_C == NULL)
	{
		fprintf(stderr, "Failed to allocate host vectors!\n");
		exit(EXIT_FAILURE);
	}

	// Initialize the host input vectors
	for (int i = 0; i < numElements; ++i)
	{
		h_A[i] = rand() / (float)RAND_MAX + 1;
		h_B[i] = rand() / (float)RAND_MAX + 1;
		h_D[i] = 1;
	}

	// Allocate the device input vector A
	float *d_A = NULL;
	err = cudaMalloc((void **)&d_A, size);

	if (err != cudaSuccess)
	{
		fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
		exit(EXIT_FAILURE);
	}

	// Allocate the device input vector B
	float *d_B = NULL;
	err = cudaMalloc((void **)&d_B, size);

	if (err != cudaSuccess)
	{
		fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
		exit(EXIT_FAILURE);
	}

	// Allocate the device output vector C
	float *d_C = NULL;
	err = cudaMalloc((void **)&d_C, size);

	float *d_D = NULL;
	err = cudaMalloc((void **)&d_D, numElements * sizeof(float));

	if (err != cudaSuccess)
	{
		fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
		exit(EXIT_FAILURE);
	}

	// Copy the host input vectors A and B in host memory to the device input vectors in
	// device memory

	sdkResetTimer(&timer);

	sdkStartTimer(&timer);
	
	
	printf("Copy input data from the host memory to the CUDA device\n");
	err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);

	sdkStopTimer(&timer);

	printf("done.\n");
	printf("time spent copying by CPU : %.2f ms\n", sdkGetTimerValue(&timer));

	if (err != cudaSuccess)
	{
		fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
		exit(EXIT_FAILURE);
	}

	err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);

	if (err != cudaSuccess)
	{
		fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err));
		exit(EXIT_FAILURE);
	}

	// Launch the Vector Add CUDA Kernel
	int threadsPerBlock = 256;
	int blocksPerGrid = (numElements + threadsPerBlock-1) /threadsPerBlock;
	printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);

	// Record the start event
	checkCudaErrors(cudaEventRecord(start, NULL));

	vectorMulKernel << <numElements / threadsPerBlock, threadsPerBlock >> >(d_A, d_B, numElements);


	checkCudaErrors(cudaEventRecord(stop, NULL));
	err = cudaGetLastError();

	if (err != cudaSuccess)
	{
		fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
		exit(EXIT_FAILURE);
	}

	// Copy the device result vector in device memory to the host result vector
	// in host memory.
	printf("Copy output data from the CUDA device to the host memory\n");
	err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);

	if (err != cudaSuccess)
	{
		fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
		exit(EXIT_FAILURE);
	}


	checkCudaErrors(cudaEventSynchronize(stop));

	float msecTotal = 0.0f;
	checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));


	//Compute and print the performance
	float msecPerMatrixMul = msecTotal;
	double flopsPerMatrixMul = numElements;
	double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
	printf(
		"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.2f MOps\n",
		gigaFlops,
		msecPerMatrixMul,
		flopsPerMatrixMul / 1e+6);

	cudaDeviceReset();

	sdkResetTimer(&timer);

	sdkStartTimer(&timer);
	vectorMulCPU(h_A, h_B, numElements);
	sdkStopTimer(&timer);

	printf("done.\n");
	printf("time spent executing by CPU : %.2f ms\n", sdkGetTimerValue(&timer));
}

void
ComputePersonalizedScoresRates(int lineMultiplication, float *v, float *e, float *p,  float * r, float *s, float *w)
{
	if (lineMultiplication == 0)
		lineMultiplication = 1;
	int linesNo = 16896 * lineMultiplication;
	int indicesNo = 128;
	int numElements = linesNo * indicesNo;
	int themesInGroupNo = 16;
	int themesIndexesNo = indicesNo / themesInGroupNo ;
	
	size_t size = numElements * sizeof(float);

	printf("executing companies = ~17K * %d , indicators = %d ... .\n", lineMultiplication, indicesNo);

	int* themesIndexes = (int*)malloc(themesIndexesNo * sizeof(int));
	for (int i = 0; i < themesIndexesNo; i++)
	{
		themesIndexes[i] = (i + 1) *  themesIndexesNo - 1;
	}

	StopWatchInterface *timer = NULL;
	sdkCreateTimer(&timer);
	sdkResetTimer(&timer);

	sdkStartTimer(&timer);

	float sum;
	
	for (int line = 0; line < linesNo; line++)
	{
		int column = 0;
		float score = 0;

		for (int i = 0; i < 16; i++)
		{
			sum = 0;
			for (int j = 0; j < 8; j++)
			{
				int index = linesNo * column + line;
				sum = sum + e[index] * r[index];
			}

			for (int j = 0; j < 8; j++)
			{
				int index = linesNo * column + line;
				score = score + v[index] * r[index] * p[index] / sum;
			}

			++column;
		}

		s[line] = score;
	}
	

	sdkStopTimer(&timer);


	printf("done.\n");
	printf("time spent executing by CPU : %.2f ms\n", sdkGetTimerValue(&timer));
}

void
ComputePersonalizedScoresRates(int lineMultiplication) 
{
	if (lineMultiplication == 0)
		lineMultiplication = 1;
	int linesNo = 16896 * lineMultiplication;
	int indicesNo = 128;
	int numElements = linesNo * indicesNo;
	int themesInGroupNo = 16;
	int themesIndexesNo = indicesNo / themesInGroupNo - 1;
		
	size_t size = numElements * sizeof(float);
	float *e = (float *)malloc(size); //enable disable matrix
	float *r = (float *)malloc(size); // relative w matrix 
	float *p = (float *)malloc(size); // absolute themes Weights matrix
	float *v = (float *)malloc(size); // absolute themes Weights matrix
	float *w = (float *)malloc(size); // absolute Weights vector

	float *s = (float *)malloc(linesNo * sizeof(float)); // companies scores vector
	
	//float *themesScores = (float *)malloc(linesNo * sizeof(float) *); // companies scores vector;
	//wi = produsScalar(ei
	for (int i = 0; i < linesNo; ++i)
	{
		for (int j = 0; j < indicesNo; ++j)
		{
			int currentIndex = indicesNo * i + j;
			e[currentIndex] = 1;
			r[currentIndex] = rand() / (float)RAND_MAX + 1;
			p[currentIndex] = rand() / (float)RAND_MAX + 1;
			v[currentIndex] = rand() / (float)RAND_MAX + 1;
		}
	}


	ComputePersonalizedScoresRates(lineMultiplication, v, e, p, r, s, w);
	
	free(r);
	free(p);
	free(w);
	free(v);


	

	
}

__global__  void
companiesScoresKernel(float *v, const float *e, const float *p, const float * r, float * s, int lines)
{
	//layoutul v, e , r, p column order
	int line = (blockDim.x * blockIdx.x + threadIdx.x);


	float sum;
	int column = 0;
	float score = 0;
	for (int i = 0; i < 16; i++)
	{
		sum = 0;
		for (int j = 0; j < 8; j++)
		{
			int index = lines * column + line;
			sum = sum + e[index] * r[index];
		}

		for (int j = 0; j < 8; j++)
		{
			int index = lines * column + line;
			score = score + v[index] * r[index] * p[index] / sum;
		}

		++column;
	}

	s[line] = score;
}

void ComputePersonalizedScoresRatesGPU(int lineMultiplication)
{
	//consider pentru simplificare no themes-urile multiplu de 10;
	cudaEvent_t start, stop;
	checkCudaErrors(cudaEventCreate(&start));
	checkCudaErrors(cudaEventCreate(&stop));

	if (lineMultiplication == 0)
		lineMultiplication = 1;

	int linesNo = 16896 * lineMultiplication;
	int indicesNo = 128;
	int numElements = linesNo * indicesNo;
	int themesInGroupNo = 16;
	int themesIndexesNo = indicesNo / themesInGroupNo - 1;

	size_t size = numElements * sizeof(float);
	float *e = (float *)malloc(size); //enable disable matrix
	float *r = (float *)malloc(size); // relative w matrix 
	float *p = (float *)malloc(size); // absolute themes Weights matrix
	float *v = (float *)malloc(size); // absolute themes Weights matrix
	float *w = (float *)malloc(size); // absolute Weights vector

	float *s = (float *)malloc(linesNo * sizeof(float)); // companies scores vector
	float *s_h = (float *)malloc(linesNo * sizeof(float)); // companies scores vector

	printf("executing companies = 17K * %d , indicators = %d ... .\n", lineMultiplication, indicesNo);
	//wi = produsScalar(ei
	for (int i = 0; i < linesNo; ++i)
	{
		for (int j = 0; j < indicesNo; ++j)
		{
			int currentIndex = indicesNo * i + j;
			e[currentIndex] = 1;
			r[currentIndex] = rand() / (float)RAND_MAX + 1;
			p[currentIndex] = rand() / (float)RAND_MAX + 1;
			v[currentIndex] = rand() / (float)RAND_MAX + 1;
		}
	}

	float *d_e = NULL; //enable disable matrix
	checkCudaErrors(cudaMalloc((void **)&d_e, size));
	checkCudaErrors(cudaMemcpy(d_e, e, size, cudaMemcpyHostToDevice));

	float *d_r = NULL;// (float *)cudaMalloc(size); // relative w matrix 
	checkCudaErrors(cudaMalloc((void **)&d_r, size));
	checkCudaErrors(cudaMemcpy(d_r, r, size, cudaMemcpyHostToDevice));

	float *d_p = NULL;//(float *)cudaMalloc(size); // absolute themes Weights matrix
	checkCudaErrors(cudaMalloc((void **)&d_p, size));
	checkCudaErrors(cudaMemcpy(d_p, p, size, cudaMemcpyHostToDevice));

	float *d_v = NULL;// (float *)cudaMalloc(size); // absolute themes Weights matrix
	checkCudaErrors(cudaMalloc((void **)&d_v, size));
	checkCudaErrors(cudaMemcpy(d_v, v, size, cudaMemcpyHostToDevice));

	float *d_w = NULL;// (float *)cudaMalloc(size); // absolute Weights vector
	checkCudaErrors(cudaMalloc((void **)&d_w, size));


	float *d_s = NULL;//(float *)cudaMalloc(linesNo * sizeof(float)); // companies scores vector
	checkCudaErrors(cudaMalloc((void **)&d_s, linesNo * sizeof(float)));


	// Record the start event
	checkCudaErrors(cudaEventRecord(start, NULL));
	int threadsPerBlock = 128;
	int blocksPerGrid = linesNo / threadsPerBlock;
	//*v, const float *e, const float *p, const float * r, float * s, int lines
	companiesScoresKernel << <blocksPerGrid, threadsPerBlock >> >(d_v, d_e, d_p, d_r, d_s, linesNo);

	checkCudaErrors(cudaEventRecord(stop, NULL));

	checkCudaErrors(cudaEventSynchronize(stop));

	float msecTotal = 0.0f;
	checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));


	checkCudaErrors(cudaMemcpy(s, d_s, linesNo * sizeof(float), cudaMemcpyDeviceToHost));


	ComputePersonalizedScoresRates(lineMultiplication, v, e, p, r, s_h, w);

	bool succes = sdkCompareL2fe(s, s_h, linesNo, 1.0e-6f);

	if (!succes)
	{
		printf("host computed scores != device computed scores !!!!!!!!!!!!!!");

	}
	else {

		//Compute and print the performance
		float msecPerMatrixMul = msecTotal;
		double flopsPerMatrixMul = 16 * 8 * 6 * linesNo;
		double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
		printf(
			"Results are equal. Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.2f MOps\n",
			gigaFlops,
			msecPerMatrixMul,
			flopsPerMatrixMul / 1e+6);
	}


	cudaFreeHost(d_v);
	cudaFreeHost(d_e);
	cudaFreeHost(d_p);
	cudaFreeHost(d_s);
	cudaFreeHost(d_w);


	cudaDeviceReset();

}


void TestSortGpuVsCpu(int lineMultiplication) 
{
	if (lineMultiplication == 0)
		lineMultiplication = 1;
	cudaEvent_t start, stop;
	checkCudaErrors(cudaEventCreate(&start));
	checkCudaErrors(cudaEventCreate(&stop));


	int n = 17e+3 * lineMultiplication;
	
	thrust::host_vector<float> h_A(n);
	thrust::host_vector<float> A(n);
	
	thrust::device_vector<float> d_A;
	
	for (int i = 0; i < n; i++)
	{
		float val =  rand() % 1000 + 1;
		h_A[i] = val;
		A[i] = val;
	}

	//copy to device
	d_A = h_A;
	
	printf("\n\nexecuting sort companies = 17K * %d  ... .\n", lineMultiplication);

	checkCudaErrors(cudaEventRecord(start, NULL));
	
	thrust::sort(d_A.begin(), d_A.end());
	checkCudaErrors(cudaEventRecord(stop, NULL));

	checkCudaErrors(cudaEventSynchronize(stop));

	float msecTotal = 0.0f;
	checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));
	
	h_A = d_A;

	StopWatchInterface *timer = NULL;
	sdkCreateTimer(&timer);
	sdkResetTimer(&timer);

	sdkStartTimer(&timer);

	thrust::sort(A.begin(), A.end());

	sdkStopTimer(&timer);

	printf("done.\n");
	printf("time spent executing by CPU : %.2f ms\n", sdkGetTimerValue(&timer));
	

	bool succes = sdkCompareL2fe(thrust::raw_pointer_cast(&A[0]), thrust::raw_pointer_cast(&h_A[0]), n, 1.0e-6f);

	if (!succes)
	{
		printf("host computed scores != device computed scores !!!!!!!!!!!!!!\n");

	}
	else {

		//Compute and print the performance
		printf("time spent executing by GPU= %.3f msec\n", msecTotal);
	}
	
}


void TestReduceByKey(int lineMultiplication) 
{
	if (lineMultiplication <= 0)
		lineMultiplication = 1;
	
	cudaEvent_t start, stop;
	checkCudaErrors(cudaEventCreate(&start));
	checkCudaErrors(cudaEventCreate(&stop));


	int n = 17e+3 * lineMultiplication;

	thrust::host_vector<float> h_A(n);
	thrust::host_vector<int> h_k_A(n);
	thrust::host_vector<float> A(n);
	thrust::host_vector<float> D(n);
	thrust::host_vector<int> k_D(n);

	thrust::device_vector<float> d_A;
	thrust::device_vector<float> d_k_A;
	thrust::device_vector<float> d_k_D(n);
	thrust::device_vector<float> d_D(n);
	
	int nKeys = 1000;
	for (int i = 0; i < n; i++)
	{
		float val = rand() % 1000 + 1;
		h_A[i] = val;
		h_k_A[i] = rand() % nKeys;
		A[i] = val;
	}

	//copy to device
	d_A = h_A;
	d_k_A = h_k_A;
	


	printf("\n\nexecuting reduce by key companies = 17K * %d  ... .\n", lineMultiplication);

	checkCudaErrors(cudaEventRecord(start, NULL));

	thrust::reduce_by_key(d_A.begin(), d_A.end(), d_k_A.begin(), d_D.begin(), d_k_D.begin());
	checkCudaErrors(cudaEventRecord(stop, NULL));

	checkCudaErrors(cudaEventSynchronize(stop));

	float msecTotal = 0.0f;
	checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));

	h_A = d_A;

	StopWatchInterface *timer = NULL;
	sdkCreateTimer(&timer);
	sdkResetTimer(&timer);

	sdkStartTimer(&timer);

	thrust::reduce_by_key(A.begin(), A.end(), h_k_A.begin(), D.begin(), k_D.begin());

	sdkStopTimer(&timer);

	printf("done.\n");
	printf("time spent executing by CPU : %.2f ms\n", sdkGetTimerValue(&timer));


	bool succes = sdkCompareL2fe(thrust::raw_pointer_cast(&A[0]), thrust::raw_pointer_cast(&h_A[0]), n, 1.0e-6f);

	if (!succes)
	{
		printf("host computed scores != device computed scores !!!!!!!!!!!!!!\n");

	}
	else {

		//Compute and print the performance
		printf("time spent executing by GPU= %.3f msec\n", msecTotal);
	}
}



void TestScanByKey(int lineMultiplication)
{
	if (lineMultiplication <= 0)
		lineMultiplication = 1;

	cudaEvent_t start, stop;
	checkCudaErrors(cudaEventCreate(&start));
	checkCudaErrors(cudaEventCreate(&stop));


	int n = 17e+3 * lineMultiplication;

	thrust::host_vector<float> h_A(n);
	thrust::host_vector<int> h_k_A(n);
	thrust::host_vector<float> A(n);
	thrust::host_vector<float> D(n);
	thrust::host_vector<int> k_D(n);

	thrust::device_vector<float> d_A;
	thrust::device_vector<float> d_k_A;
	thrust::device_vector<float> d_k_D(n);
	thrust::device_vector<float> d_D(n);

	int nKeys = 1000;
	for (int i = 0; i < n; i++)
	{
		float val = rand() % 1000 + 1;
		h_A[i] = val;
		h_k_A[i] = rand() % nKeys;
		A[i] = val;
	}

	//copy to device
	d_A = h_A;
	d_k_A = h_k_A;



	printf("\n\nexecuting reduce by key companies = 17K * %d  ... .\n", lineMultiplication);

	checkCudaErrors(cudaEventRecord(start, NULL));

	thrust::reduce_by_key(d_A.begin(), d_A.end(), d_k_A.begin(), d_D.begin(), d_k_D.begin());
	checkCudaErrors(cudaEventRecord(stop, NULL));

	checkCudaErrors(cudaEventSynchronize(stop));

	float msecTotal = 0.0f;
	checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));

	h_A = d_A;

	StopWatchInterface *timer = NULL;
	sdkCreateTimer(&timer);
	sdkResetTimer(&timer);

	sdkStartTimer(&timer);

	thrust::reduce_by_key(A.begin(), A.end(), h_k_A.begin(), D.begin(), k_D.begin());

	sdkStopTimer(&timer);

	printf("done.\n");
	printf("time spent executing by CPU : %.2f ms\n", sdkGetTimerValue(&timer));


	bool succes = sdkCompareL2fe(thrust::raw_pointer_cast(&A[0]), thrust::raw_pointer_cast(&h_A[0]), n, 1.0e-6f);

	if (!succes)
	{
		printf("host computed scores != device computed scores !!!!!!!!!!!!!!\n");

	}
	else {

		//Compute and print the performance
		printf("time spent executing by GPU= %.3f msec\n", msecTotal);
	}
}




template <typename HeadFlagType>
struct head_flag_predicate
	: public thrust::binary_function<HeadFlagType, HeadFlagType, bool>
{
	__host__ __device__
		bool operator()(HeadFlagType left, HeadFlagType right) const
	{
		return !right;
	}
};
template <typename Vector>
void print(const Vector& v)
{
	for (size_t i = 0; i < v.size(); i++)
		std::cout << v[i] << " ";
	std::cout << "\n";
}


void TestScanByKeyFastKeySwitch(int lineMultiplication)
{
	if (lineMultiplication <= 0)
		lineMultiplication = 1;

	cudaEvent_t start, stop;
	cudaEventCreate(&start);
	cudaEventCreate(&stop);


	int n = 17e+3 * lineMultiplication;

	thrust::host_vector<int> h_A(n);
	thrust::host_vector<int> h_out(n);

	thrust::device_vector<int> d_A;
	thrust::device_vector<int> d_out(n);

	for (int i = 0; i < n; i++)
	{
		h_A[i] = (i + 1) % 2;
	}

	//copy to device
	d_A = h_A;


	printf("\n\nexecuting reduce by key companies = 17K * %d  ... .\n", lineMultiplication);
	cudaEventRecord(start, NULL);

	thrust::exclusive_scan_by_key
		(d_A.begin(), d_A.end(),
		d_A.begin(),
		d_out.begin(),
		0,
		head_flag_predicate<int>());

	cudaEventRecord(stop, NULL);

	cudaEventSynchronize(stop);

	float msecTotal = 0.0f;
	cudaEventElapsedTime(&msecTotal, start, stop);

	printf("time spent executing by GPU : %.2f ms\n", msecTotal);

	h_A = d_A;

	StopWatchInterface *timer = NULL;
	sdkCreateTimer(&timer);
	sdkResetTimer(&timer);

	sdkStartTimer(&timer);

	thrust::exclusive_scan_by_key
		(h_A.begin(), h_A.end(),
		h_A.begin(),
		h_out.begin(),
		0,
		head_flag_predicate<int>());

	sdkStopTimer(&timer);

	printf("done.\n");
	printf("time spent executing by CPU : %.2f ms\n", sdkGetTimerValue(&timer));
}

