#undef _GLIBCXX_ATOMIC_BUILTINS
#undef _GLIBCXX_USE_INT128


#include "DotProduct.h"

#include <iostream>
#include <iomanip>

#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <iostream>

#include <assert.h>
#include <helper_string.h>  // helper for shared functions common to CUDA Samples

// CUDA runtime
#include <cuda_runtime.h>

// CUDA and CUBLAS functions
#include <cublas_v2.h>
#include <helper_functions.h>
#include <helper_cuda.h>


#ifndef min
#define min(a,b) ((a < b) ? a : b)
#endif
#ifndef max
#define max(a,b) ((a > b) ? a : b)
#endif


typedef struct _matrixSize      // Optional Command-line multiplier for matrix sizes
{
	unsigned int rowsA, colsA, rowsB, colsB, rowsC, colsC;
} sMatrixSize;

int
ci(int row, int column, int nColumns) {
	return row*nColumns + column;
}

int
myrand(int modulo)
{
	return rand() % modulo;

}

////////////////////////////////////////////////////////////////////////////////
//! Compute reference data set matrix multiply on CPU
//! C = A * B row major

////////////////////////////////////////////////////////////////////////////////
void
matrixMulCPU(float *C, const float *A, const float *B, unsigned int rowsA, unsigned int colsA, unsigned int colsB)
{
	for (unsigned int i = 0; i < rowsA; ++i)
		for (unsigned int j = 0; j < colsB; ++j)
		{
		double sum = 0;

		for (unsigned int k = 0; k < colsA; ++k)
		{
			double a = A[i * colsA + k];
			double b = B[k * colsB + j];
			sum += a * b;
		}

		C[i * colsB + j] = (float)sum;
		}
}

// Allocates a matrix with random float entries.
void
randomInit(float *data, int size)
{
	for (int i = 0; i < size; ++i)
		data[i] = rand() / (float)RAND_MAX;
}

void
printDiff(float *data1, float *data2, int rowsNo, int columnsNo, int iListLength, float fListTol)
{
	printf("Listing first %d Differences > %.6f...\n", iListLength, fListTol);
	int i, j, k;
	int error_count = 0;

	for (j = 0; j < columnsNo; j++)
	{
		if (error_count < iListLength)
		{
			printf("\n  Row %d:\n", j);
		}

		for (i = 0; i < rowsNo; i++)
		{
			k = i * columnsNo + j;
			float fDiff = fabs(data1[k] - data2[k]);

			if (fDiff > fListTol)
			{
				if (error_count < iListLength)
				{
					printf("    Loc(%d,%d)\tCPU=%.5f\tGPU=%.5f\tDiff=%.6f\n", i, j, data1[k], data2[k], fDiff);
				}

				error_count++;
			}
		}
	}

	printf(" \n  Total Errors = %d\n", error_count);
}

void
initializeCUDA(int &devID, sMatrixSize &matrix_size, int lineMultiplication)
{
	// By default, we use device 0, otherwise we override the device ID based on what is provided at the command line
	cudaError_t error;
	devID = 0;

	// get number of SMs on this GPU
	error = cudaGetDevice(&devID);

	if (error != cudaSuccess)
	{
		printf("cudaGetDevice returned error code %d, line(%d)\n", error, __LINE__);
		exit(EXIT_FAILURE);
	}
	cudaDeviceProp deviceProp;

	error = cudaGetDeviceProperties(&deviceProp, devID);

	if (error != cudaSuccess)
	{
		printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
		exit(EXIT_FAILURE);
	}

	printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);


	int companyNo = 17000 *lineMultiplication;
	int indicatorsNo = 128;
	int sectorsNo = 1;// 46 * matrixNo

	matrix_size.rowsA = companyNo;
	matrix_size.colsA = indicatorsNo;

	matrix_size.rowsB = indicatorsNo;
	matrix_size.colsB = sectorsNo;

	matrix_size.rowsC = companyNo;
	matrix_size.colsC = sectorsNo;


	printf("MatrixA(%u,%u), MatrixB(%u,%u), MatrixC(%u,%u)\n",
		matrix_size.rowsA, matrix_size.colsA,
		matrix_size.rowsB, matrix_size.colsB,
		matrix_size.rowsC, matrix_size.colsC);
}

void
printMatrix(float* matrix, int rows, int cols)
{
	for (size_t i = 0; i < rows; i++){
		for (size_t j = 0; j < cols; j++){
			std::cout << std::setw(5) << matrix[i *cols + j] << " ";
		}
		std::cout << "\n";
	}

	std::cout << "\n";

}

////////////////////////////////////////////////////////////////////////////////
//! Run a simple test matrix multiply using CUBLAS
////////////////////////////////////////////////////////////////////////////////

int
matrixMultiply(int devID, sMatrixSize &matrix_size)
{
	cudaDeviceProp deviceProp;

	checkCudaErrors(cudaGetDeviceProperties(&deviceProp, devID));

	unsigned int size_A = matrix_size.colsA * matrix_size.rowsA;
	unsigned int mem_size_A = sizeof(float) * size_A;
	float *h_A = (float *)malloc(mem_size_A);



	unsigned int size_B = matrix_size.colsB * matrix_size.rowsB;
	unsigned int mem_size_B = sizeof(float) * size_B;
	float *h_B = (float *)malloc(mem_size_B);

	// initialize data
	float result;
	for (int i = 0; i < size_A; i++)
	{
		result = myrand(10);
		h_A[i] = result;
		//std::cout << hA[i] << " ";
	}


	//std::cout << "\nA:\n";
	//printMatrix(thrust::raw_pointer_cast(&hA[0]), matrix_size.rowsA, matrix_size.colsA);

	for (int i = 0; i < size_B; i++)
	{
		result = myrand(10);
		h_B[i] = result;
	}

	float *d_A, *d_B, *d_C;
	unsigned int size_C = matrix_size.rowsC * matrix_size.colsC;
	unsigned int mem_size_C = sizeof(float) * size_C;

	checkCudaErrors(cudaMalloc((void **)&d_A, mem_size_A));
	checkCudaErrors(cudaMalloc((void **)&d_B, mem_size_B));
	checkCudaErrors(cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice));
	checkCudaErrors(cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice));
	checkCudaErrors(cudaMalloc((void **)&d_C, mem_size_C));

	float *h_C = (float *)malloc(mem_size_C);
	float *h_CUBLAS = (float *)malloc(mem_size_C);

	// create and start timer
	printf("Computing result using CUBLAS...");

	// execute the kernel
	int nIter = 1;

	// CUBLAS version 2.0
	{
		const float alpha = 1.0f;
		const float beta = 0.0f;
		cublasHandle_t handle;
		cudaEvent_t start, stop;
		cublasStatus_t status;

		checkCudaErrors(cublasCreate(&handle));

		//Perform warmup operation with cublas
		//status = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, matrix_size.colsB, matrix_size.rowsA /*rowD*/, matrix_size.colsA /*colD*/,
		//	&alpha, thrust::raw_pointer_cast(&B[0]), matrix_size.colsB /*colE*/,
		//	thrust::raw_pointer_cast(&A[0]), matrix_size.colsA /*colD*/,
		//	&beta, thrust::raw_pointer_cast(&C[0]), matrix_size.colsB /*colE*/);

		//checkCudaErrors(status);

		// Allocate CUDA events that we'll use for timing
		checkCudaErrors(cudaEventCreate(&start));
		checkCudaErrors(cudaEventCreate(&stop));

		// Record the start event
		checkCudaErrors(cudaEventRecord(start, NULL));

		for (int j = 0; j < nIter; j++)
		{
			//note cublas is column primary!
			//need to transpose the order
			status = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, matrix_size.colsB, matrix_size.rowsA /*rowD*/, matrix_size.colsA /*colD*/,
				&alpha, d_B, matrix_size.colsB /*colE*/,
				d_A, matrix_size.colsA /*colD*/,
				&beta, d_C, matrix_size.colsB /*colE*/);

			checkCudaErrors(status);
		}

		checkCudaErrors(cudaMemcpy(h_CUBLAS, d_C, mem_size_C, cudaMemcpyDeviceToHost));

		// Destroy the handle
		checkCudaErrors(cublasDestroy(handle));

		printf("done.\n");

		// Record the stop event
		checkCudaErrors(cudaEventRecord(stop, NULL));

		// Wait for the stop event to complete
		checkCudaErrors(cudaEventSynchronize(stop));

		float msecTotal = 0.0f;
		checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));

		// Compute and print the performance
		float msecPerMatrixMul = msecTotal / nIter;
		double flopsPerMatrixMul = 2 * (double)matrix_size.rowsA * (double)matrix_size.colsA * (double)matrix_size.colsB;
		double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
		printf(
			"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.2f MOps\n",
			gigaFlops,
			msecPerMatrixMul,
			flopsPerMatrixMul / (float)1e+6);
	}

	// compute reference solution
	printf("Computing result using host CPU...");

	/*std::cout << "\nA:\n";
	printMatrix(thrust::raw_pointer_cast(&hA[0]), matrix_size.rowsA, matrix_size.colsA);*/

	/*std::cout << "\nB:\n";
	printMatrix(thrust::raw_pointer_cast(&hB[0]), matrix_size.rowsB, matrix_size.colsB);*/

	/*std::cout << "\nC:\n";
	printMatrix(h_CUBLAS, matrix_size.rowsC, matrix_size.colsC);*/

	StopWatchInterface *timer = NULL;
	sdkCreateTimer(&timer);
	sdkResetTimer(&timer);

	sdkStartTimer(&timer);
	matrixMulCPU(h_C, h_A, h_B, matrix_size.rowsA, matrix_size.colsA, matrix_size.colsB);
	sdkStopTimer(&timer);

	printf("done.\n");
	printf("time spent executing by CPU : %.2f\n", sdkGetTimerValue(&timer));

	//printMatrix(reference, matrix_size.rowsC, matrix_size.colsC);

	// check result (CUBLAS)
	bool resCUBLAS = sdkCompareL2fe(h_C, h_CUBLAS, size_C, 1.0e-6f);
	if (resCUBLAS != true)
	{
		//printDiff(reference, h_CUBLAS, matrix_size.uiWC, matrix_size.uiHC, 100, 1.0e-5f);
	}

	printf("Comparing CUBLAS Matrix Multiply with CPU results: %s\n", (true == resCUBLAS) ? "PASS" : "FAIL");

	free(h_A);
	free(h_B);
	free(h_C);
	free(h_CUBLAS);
	checkCudaErrors(cudaFree(d_A));
	checkCudaErrors(cudaFree(d_B));
	checkCudaErrors(cudaFree(d_C));

	
	// clean up memory

	// cudaDeviceReset causes the driver to clean up all state. While
	// not mandatory in normal operation, it is good practice.  It is also
	// needed to ensure correct operation when the application is being
	// profiled. Calling cudaDeviceReset causes all profile data to be
	// flushed before the application exits

	if (resCUBLAS == true)
	{
		return EXIT_SUCCESS;    // return value = 1
	}
	else
	{
		return EXIT_FAILURE;     // return value = 0
	}
}

////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////

int
TestMatrixMul(int lineMultiplication)
{
	if (lineMultiplication == 0)
		lineMultiplication = 1;
	printf("\n-----[Matrix Multiply CUBLAS] - Starting...\n");

	int devID = 0;
	sMatrixSize matrix_size;

	initializeCUDA(devID, matrix_size, lineMultiplication);

	int matrix_result = matrixMultiply(devID, matrix_size);

	//cudaDeviceReset();
	return matrix_result;
}

typedef unsigned int uint;

struct assign_functor
{
	float *d;
	byte *enabled;

	assign_functor(float *perLineValues)
	{
		d = perLineValues;
	}

	template <typename Tuple>
	__device__
		void operator()(Tuple t)
	{
		int index = thrust::get<0>(t) % 120;
		float value = thrust::get<1>(t);
		byte  enabled = thrust::get<2>(t);
		if (enabled)
		{
			thrust::get<3>(t) = value / d[index];
		}
		else
		{
			thrust::get<3>(t) = 0;
		}

	}
};

void
TestThrustIndirectedVectorMul(void)
{

	int nLines = 120;
	int N = 17.2e+3 * nLines;
	printf("\n----- [Indirected vector Mul using local kernel] - Starting...\n");

	thrust::host_vector <float> h_float_vec1(N);
	thrust::host_vector <byte> h_vector_enabled(N);
	thrust::host_vector <float> h_float_vec2(nLines);

	thrust::counting_iterator<uint> first(0);
	thrust::counting_iterator<uint> last = first + N;

	for (int i = 0; i < N; i++)
	{
		h_vector_enabled[i] = 1;
		h_float_vec2[i%nLines] = 1;
		h_float_vec1[i] = i;
	}

	thrust::device_vector <float> d_float_vec1 = h_float_vec1;
	thrust::device_vector <float> d_float_vec3(N);
	thrust::device_vector <byte> d_vector_enabled = h_vector_enabled;
	thrust::device_vector <float> d_float_vec2 = h_float_vec2;

	cudaEvent_t start, stop;
	checkCudaErrors(cudaEventCreate(&start));
	checkCudaErrors(cudaEventCreate(&stop));

	checkCudaErrors(cudaEventRecord(start, NULL));

	float* perLineVector = thrust::raw_pointer_cast(&d_float_vec2[0]);

	thrust::for_each(
		thrust::make_zip_iterator(
		thrust::make_tuple(first, d_float_vec1.begin(), d_vector_enabled.begin(), d_float_vec3.begin())
		),
		thrust::make_zip_iterator(
		thrust::make_tuple(last, d_float_vec1.end(), d_vector_enabled.end(), d_float_vec3.begin())
		),
		assign_functor(perLineVector)
		);

	checkCudaErrors(cudaEventRecord(stop, NULL));

	checkCudaErrors(cudaEventSynchronize(stop));

	float msecTotal = 0.0f;
	checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));


	//Compute and print the performance
	float msecPerMatrixMul = msecTotal;
	double flopsPerMatrixMul = N;
	double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
	printf(
		"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.2f MOps\n",
		gigaFlops,
		msecPerMatrixMul,
		flopsPerMatrixMul / 1e+6);

	std::cout << d_float_vec3[10] << " " << d_float_vec3[N - 1] << std::endl;
}

__global__ void
vectorMul(float *A, const float *B, float *C, float* D, int numElements)
{
	int i = blockDim.x * blockIdx.x + threadIdx.x;

	if (i < numElements)
	{
		//if (D[i])
		{
			A[i] = A[i] * D[i] / B[i];// * D[i] / B[i];
		}
		//else 
		{
			//C[i] = 0;
		}
	}
}

void
vectorMulCPU(const float *A, const float *B, float *C, float* D, int numElements)
{
	for (int i = 0; i < numElements; i++)
	{
		int line = i % 120;
		if (D[i])
		{
			C[i] = (A[i]) / B[line];
		}
		else
		{
			C[i] = 0;
		}
	}
}


void
TestIndirectVectorMul(void)
{
	cudaEvent_t start, stop;
	checkCudaErrors(cudaEventCreate(&start));
	checkCudaErrors(cudaEventCreate(&stop));

	printf("\n----- [Indirected vector Mul using local kernel] - Starting...\n");

	int numElements = 17000 * 120;
	// Error code to check return values for CUDA calls
	cudaError_t err = cudaSuccess;
	size_t size = numElements * sizeof(float);
	printf("[Vector addition of %d elements]\n", numElements);

	// Allocate the host input vector A
	float *h_A = (float *)malloc(size);

	// Allocate the host input vector B
	float *h_B = (float *)malloc(size);

	// Allocate the host output vector C
	float *h_C = (float *)malloc(size);

	// Allocate the host input vector A
	float *h_D = (float *)malloc(size);


	// Verify that allocations succeeded
	if (h_A == NULL || h_B == NULL || h_C == NULL)
	{
		fprintf(stderr, "Failed to allocate host vectors!\n");
		exit(EXIT_FAILURE);
	}

	// Initialize the host input vectors
	for (int i = 0; i < numElements; ++i)
	{
		h_A[i] = rand() / (float)RAND_MAX + 1;
		h_B[i] = rand() / (float)RAND_MAX + 1;
		h_D[i] = 1;
	}

	// Allocate the device input vector A
	float *d_A = NULL;
	err = cudaMalloc((void **)&d_A, size);

	if (err != cudaSuccess)
	{
		fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
		exit(EXIT_FAILURE);
	}

	// Allocate the device input vector B
	float *d_B = NULL;
	err = cudaMalloc((void **)&d_B, size);

	if (err != cudaSuccess)
	{
		fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
		exit(EXIT_FAILURE);
	}

	// Allocate the device output vector C
	float *d_C = NULL;
	err = cudaMalloc((void **)&d_C, size);

	float *d_D = NULL;
	err = cudaMalloc((void **)&d_D, numElements * sizeof(float));

	if (err != cudaSuccess)
	{
		fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
		exit(EXIT_FAILURE);
	}

	// Copy the host input vectors A and B in host memory to the device input vectors in
	// device memory
	printf("Copy input data from the host memory to the CUDA device\n");
	err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);

	if (err != cudaSuccess)
	{
		fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
		exit(EXIT_FAILURE);
	}

	err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);

	if (err != cudaSuccess)
	{
		fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err));
		exit(EXIT_FAILURE);
	}

	// Launch the Vector Add CUDA Kernel
	int threadsPerBlock = 96;
	int blocksPerGrid = (numElements + threadsPerBlock - 1) / threadsPerBlock;
	printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);

	// Record the start event
	checkCudaErrors(cudaEventRecord(start, NULL));

	vectorMul << <blocksPerGrid, threadsPerBlock >> >(d_A, d_B, d_C, d_D, numElements);

	checkCudaErrors(cudaEventRecord(stop, NULL));
	err = cudaGetLastError();

	if (err != cudaSuccess)
	{
		fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
		exit(EXIT_FAILURE);
	}

	// Copy the device result vector in device memory to the host result vector
	// in host memory.
	printf("Copy output data from the CUDA device to the host memory\n");
	err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);

	if (err != cudaSuccess)
	{
		fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
		exit(EXIT_FAILURE);
	}


	checkCudaErrors(cudaEventSynchronize(stop));

	float msecTotal = 0.0f;
	checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));


	//Compute and print the performance
	float msecPerMatrixMul = msecTotal;
	double flopsPerMatrixMul = numElements;
	double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
	printf(
		"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.2f MOps\n",
		gigaFlops,
		msecPerMatrixMul,
		flopsPerMatrixMul / 1e+6);

	cudaDeviceReset();


	StopWatchInterface *timer = NULL;
	sdkCreateTimer(&timer);
	sdkResetTimer(&timer);

	sdkStartTimer(&timer);
	vectorMulCPU(h_C, h_A, h_B, h_D, numElements);
	sdkStopTimer(&timer);

	printf("done.\n");
	printf("time spent executing by CPU : %.2f ms\n", sdkGetTimerValue(&timer));

}


void
TestcuBlasIndirectVectorMul(void)
{
	cudaEvent_t start, stop;
	checkCudaErrors(cudaEventCreate(&start));
	checkCudaErrors(cudaEventCreate(&stop));

	printf("\n----- [TestcuBlasIndirectVectorMul] - Starting...\n");

	int numElements = 17000 * 120;
	// Error code to check return values for CUDA calls
	cudaError_t err = cudaSuccess;
	size_t size = numElements * sizeof(float);
	printf("[Vector addition of %d elements]\n", numElements);

	// Allocate the host input vector A
	float *h_A = (float *)malloc(size);

	// Allocate the host input vector B
	float *h_B = (float *)malloc(size);

	// Allocate the host output vector C
	float *h_C = (float *)malloc(size);

	// Allocate the host input vector A
	float *h_D = (float *)malloc(numElements * sizeof(float));


	// Verify that allocations succeeded
	if (h_A == NULL || h_B == NULL || h_C == NULL)
	{
		fprintf(stderr, "Failed to allocate host vectors!\n");
		exit(EXIT_FAILURE);
	}

	// Initialize the host input vectors
	for (int i = 0; i < numElements; ++i)
	{
		h_A[i] = rand() / (float)RAND_MAX + 1;
		h_B[i] = rand() / (float)RAND_MAX + 1;
		h_D[i] = 1;
	}

	// Allocate the device input vector A
	float *d_A = NULL;
	err = cudaMalloc((void **)&d_A, size);

	if (err != cudaSuccess)
	{
		fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
		exit(EXIT_FAILURE);
	}

	// Allocate the device input vector B
	float *d_B = NULL;
	err = cudaMalloc((void **)&d_B, size);

	if (err != cudaSuccess)
	{
		fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
		exit(EXIT_FAILURE);
	}

	// Allocate the device output vector C
	float *d_C = NULL;
	err = cudaMalloc((void **)&d_C, size);

	float *d_D = NULL;
	err = cudaMalloc((void **)&d_D, numElements * sizeof(float));

	if (err != cudaSuccess)
	{
		fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
		exit(EXIT_FAILURE);
	}

	// Copy the host input vectors A and B in host memory to the device input vectors in
	// device memory
	printf("Copy input data from the host memory to the CUDA device\n");
	err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);

	if (err != cudaSuccess)
	{
		fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
		exit(EXIT_FAILURE);
	}

	err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);

	if (err != cudaSuccess)
	{
		fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err));
		exit(EXIT_FAILURE);
	}

	// Launch the Vector Add CUDA Kernel
	int threadsPerBlock = 96;
	int blocksPerGrid = (numElements + threadsPerBlock - 1) / threadsPerBlock;
	printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);

	// Record the start event
	cublasHandle_t handle;
	checkCudaErrors(cublasCreate(&handle));

	checkCudaErrors(cudaEventRecord(start, NULL));
	float alpha = 1;
	cublasSaxpy(handle, numElements, &alpha, d_A, 1, d_B, 1);
	checkCudaErrors(cudaEventRecord(stop, NULL));
	checkCudaErrors(cudaEventSynchronize(stop));
	err = cudaGetLastError();

	if (err != cudaSuccess)
	{
		fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
		exit(EXIT_FAILURE);
	}

	// Copy the device result vector in device memory to the host result vector
	// in host memory.
	printf("Copy output data from the CUDA device to the host memory\n");
	err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);

	if (err != cudaSuccess)
	{
		fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
		exit(EXIT_FAILURE);
	}




	float msecTotal = 0.0f;
	checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));


	//Compute and print the performance
	float msecPerMatrixMul = msecTotal;
	double flopsPerMatrixMul = numElements;
	double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
	printf(
		"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.2f MOps\n",
		gigaFlops,
		msecPerMatrixMul,
		flopsPerMatrixMul / 1e+6);

	cudaDeviceReset();


	StopWatchInterface *timer = NULL;
	sdkCreateTimer(&timer);
	sdkResetTimer(&timer);

	sdkStartTimer(&timer);
	vectorMulCPU(h_C, h_A, h_B, h_D, numElements);
	sdkStopTimer(&timer);

	printf("done.\n");
	printf("time spent executing by CPU : %.2f ms\n", sdkGetTimerValue(&timer));

}



int
main(void)
{
	int test_switch = 3;

	switch (test_switch)
	{
	case 1:

		for (int i = 0; i <= 3; i++) {
			ComputePersonalizedScoresRatesGPU(i * 10);
		}
		break;
	case 2:
		vectorMulGPU(1);

		for (int i = 0; i <= 3; i++) {
			vectorMulGPU(10 * i);
		}
		break;
	case 3:
		TestSortGpuVsCpu(0);
		cudaDeviceReset();
		for (int i = 0; i <= 10; i++) {
			TestSortGpuVsCpu(i*10);
			cudaDeviceReset();
		}
		break;
	case 4:
		TestMatrixMul(0);
		for (int i = 0; i <= 10; i++) {
			TestMatrixMul(i*10);
		}
		break;
	case 5:
		TestReduceByKey(0);
		for (int i = 0; i < 10; i++) {
			TestReduceByKey(i * 10);
		}
		break;
	default:
		break;
	}


	//runTest();


	std::cout << "Test Completed. press any key";
	char c;
	std::cin >> c;

	return 0;
}
