/*
* Copyright 1993-2010 NVIDIA Corporation.  All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/

/*
* This program generates random numbers using three different generators on the GPU, as well as the CPU rand() function.
* For each generated sequence, it also calculates the Monte Carlo Pi value and the mean of the sequence in order to test
* the randomness of the numbers generated.
*/

// Utilities and system includes
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>


// Utilities and system includes
#include <sdkHelper.h>  // helper for shared functions common to CUDA SDK samples
#include <shrQATest.h>

/* Using updated (v2) interfaces to cublas and cusparse */
#include <cuda_runtime_api.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <curand.h>

const int    DEFAULT_RAND_N = 2400000;
const unsigned int DEFAULT_SEED = 777;

////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions

// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err)           __checkCudaErrors (err, __FILE__, __LINE__)

inline void __checkCudaErrors( cudaError err, const char *file, const int line )
{
	if( cudaSuccess != err) {
		fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",
			file, line, (int)err, cudaGetErrorString( err ) );
		exit(-1);
	}
}

// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCurandErrors(err)           __checkCurandErrors (err, __FILE__, __LINE__)

inline void __checkCurandErrors( curandStatus_t err, const char *file, const int line )
{
	if( CURAND_STATUS_SUCCESS != err) {
		fprintf(stderr, "%s(%i) : checkCurandErrors() CURAND error %d: ", file, line, (int)err);
		switch (err) {
		case CURAND_STATUS_VERSION_MISMATCH:    fprintf(stderr, "CURAND_STATUS_VERSION_MISMATCH");
		case CURAND_STATUS_NOT_INITIALIZED:     fprintf(stderr, "CURAND_STATUS_NOT_INITIALIZED");
		case CURAND_STATUS_ALLOCATION_FAILED:   fprintf(stderr, "CURAND_STATUS_ALLOCATION_FAILED");
		case CURAND_STATUS_TYPE_ERROR:          fprintf(stderr, "CURAND_STATUS_TYPE_ERROR");
		case CURAND_STATUS_OUT_OF_RANGE:        fprintf(stderr, "CURAND_STATUS_OUT_OF_RANGE"); 
		case CURAND_STATUS_LENGTH_NOT_MULTIPLE: fprintf(stderr, "CURAND_STATUS_LENGTH_NOT_MULTIPLE");
		case CURAND_STATUS_DOUBLE_PRECISION_REQUIRED: 
			fprintf(stderr, "CURAND_STATUS_DOUBLE_PRECISION_REQUIRED");
		case CURAND_STATUS_LAUNCH_FAILURE:      fprintf(stderr, "CURAND_STATUS_LAUNCH_FAILURE"); 
		case CURAND_STATUS_PREEXISTING_FAILURE: fprintf(stderr, "CURAND_STATUS_PREEXISTING_FAILURE");
		case CURAND_STATUS_INITIALIZATION_FAILED:     
			fprintf(stderr, "CURAND_STATUS_INITIALIZATION_FAILED");
		case CURAND_STATUS_ARCH_MISMATCH:       fprintf(stderr, "CURAND_STATUS_ARCH_MISMATCH");
		case CURAND_STATUS_INTERNAL_ERROR:      fprintf(stderr, "CURAND_STATUS_INTERNAL_ERROR");
		default: fprintf(stderr, "CURAND Unknown error code\n");
		}
		exit(-1);
	}
}

// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg)      __getLastCudaError (msg, __FILE__, __LINE__)

inline void __getLastCudaError( const char *errorMessage, const char *file, const int line )
{
	cudaError_t err = cudaGetLastError();
	if( cudaSuccess != err) {
		fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",
			file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
		exit(-1);
	}
}

// General GPU Device CUDA Initialization
int gpuDeviceInit(int devID)
{
	int deviceCount;
	checkCudaErrors(cudaGetDeviceCount(&deviceCount));
	if (deviceCount == 0) {
		fprintf(stderr, "gpuDeviceInit() CUDA error: no devices supporting CUDA.\n");
		exit(-1);
	}
	if (devID < 0) 
		devID = 0;
	if (devID > deviceCount-1) {
		fprintf(stderr, "\n");
		fprintf(stderr, ">> %d CUDA capable GPU device(s) detected. <<\n", deviceCount);
		fprintf(stderr, ">> gpuDeviceInit (-device=%d) is not a valid GPU device. <<\n", devID);
		fprintf(stderr, "\n");
		return -devID;
	}

	cudaDeviceProp deviceProp;
	checkCudaErrors( cudaGetDeviceProperties(&deviceProp, devID) );
	if (deviceProp.major < 1) {
		fprintf(stderr, "gpuDeviceInit(): GPU device does not support CUDA.\n");
		exit(-1);                                                  \
	}

	checkCudaErrors( cudaSetDevice(devID) );
	printf("> gpuDeviceInit() CUDA device [%d]: %s\n", devID, deviceProp.name);
	return devID;
}

// This function returns the best GPU (with maximum GFLOPS)
int gpuGetMaxGflopsDeviceId()
{
	int current_device   = 0, sm_per_multiproc = 0;
	int max_compute_perf = 0, max_perf_device  = 0;
	int device_count     = 0, best_SM_arch     = 0;
	cudaDeviceProp deviceProp;

	cudaGetDeviceCount( &device_count );
	// Find the best major SM Architecture GPU device
	while ( current_device < device_count ) {
		cudaGetDeviceProperties( &deviceProp, current_device );
		if (deviceProp.major > 0 && deviceProp.major < 9999) {
			best_SM_arch = MAX(best_SM_arch, deviceProp.major);
		}
		current_device++;
	}

	// Find the best CUDA capable GPU device
	current_device = 0;
	while( current_device < device_count ) {
		cudaGetDeviceProperties( &deviceProp, current_device );
		if (deviceProp.major == 9999 && deviceProp.minor == 9999) {
			sm_per_multiproc = 1;
		} else {
			sm_per_multiproc = _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor);
		}

		int compute_perf  = deviceProp.multiProcessorCount * sm_per_multiproc * deviceProp.clockRate;
		if( compute_perf  > max_compute_perf ) {
			// If we find GPU with SM major > 2, search only these
			if ( best_SM_arch > 2 ) {
				// If our device==dest_SM_arch, choose this, or else pass
				if (deviceProp.major == best_SM_arch) {	
					max_compute_perf  = compute_perf;
					max_perf_device   = current_device;
				}
			} else {
				max_compute_perf  = compute_perf;
				max_perf_device   = current_device;
			}
		}
		++current_device;
	}
	return max_perf_device;
}

// Initialization code to find the best CUDA Device
int findCudaDevice(int argc, const char **argv)
{
	cudaDeviceProp deviceProp;
	int devID = 0;
	// If the command-line has a device number specified, use it
	if (checkCmdLineFlag(argc, argv, "device")) {
		devID = getCmdLineArgumentInt(argc, argv, "device=");
		if (devID < 0) {
			printf("Invalid command line parameters\n");
			exit(-1);
		} else {
			devID = gpuDeviceInit(devID);
			if (devID < 0) {
				printf("exiting...\n");
				shrQAFinishExit(argc, (const char **)argv, QA_FAILED);
				exit(-1);
			}
		}
	} else {
		// Otherwise pick the device with highest Gflops/s
		devID = gpuGetMaxGflopsDeviceId();
		checkCudaErrors( cudaSetDevice( devID ) );
		checkCudaErrors( cudaGetDeviceProperties(&deviceProp, devID) );
		printf("> Using CUDA device [%d]: %s\n", devID, deviceProp.name);
	}
	return devID;
}
// end of CUDA Helper Functions


///////////////////////////////////////////////////////////////////////////////
// Kernels
//////////////////////////////////////////////////////////////////////////////

//Distance formula
__device__ float distance(float x, float y, float a, float b){
	return sqrt(pow(x-a,2)+pow(y-b,2));
} 

//Calculate the Monte Carlo Pi value
__global__ void calcPi(float *d_rand, int rand_n, int *output)
{
	//declare shared memory for hit counter
	__shared__ int hit[512]; //one spot for each thread
	hit[threadIdx.x] = 0;
	__syncthreads();

	//initialize thread index and stride
	int idx = (threadIdx.x + blockIdx.x * blockDim.x)*2;
	int stride = blockDim.x *gridDim.x;

	//loop using a tuple of two d_rand numbers to calculate distance and check for a hit
	while(idx < rand_n){
		float x = d_rand[idx];
		float y = d_rand[idx+1];
		if(distance(x,y,0.5,0.5) <= 0.5){ //0.5 = radius of circle inscribed w/in 0-1, 0-1 square
			//hit
			hit[threadIdx.x]++; //increment hit counter
		}
		idx += stride;
	}

	__syncthreads();
	stride = 256;
	while(stride > 0){
		if(threadIdx.x < stride){
			hit[threadIdx.x] += hit[threadIdx.x+stride]; //combine all of the separate hit counters
		}
		stride/= 2;
		__syncthreads();
	}

	if(threadIdx.x == 0)
		atomicAdd(&output[0], hit[0]); //combine the hit counters for each block
}

__global__ void calcMean(float *d_rand, int rand_n, float *output)
{
	__shared__ float cache[512]; //one spot for each thread
	int idx = 2*threadIdx.x + blockIdx.x * blockDim.x; //initialize thread index and stride
	int cacheIndex = threadIdx.x;
	

	float temp = 0;
	while(idx < rand_n)
	{
		temp += d_rand[idx] + d_rand[idx+1]; //add two more random numbers to the temp accumulator;
		idx += (blockDim.x * gridDim.x);
	}
	cache[cacheIndex] = temp;

	__syncthreads(); 

	int stride = 256;
	while(stride > 0){
		if(cacheIndex < stride)
		{
			cache[cacheIndex] += cache[cacheIndex+stride]; //combine all of the accumulated sums for this block
		}
		__syncthreads();
		stride/= 2;
	}

	if(cacheIndex == 0)
		atomicAdd(&output[0], cache[0]); //combine all of the sums across blocks
}

///////////////////////////////////////////////////////////////////////////////
// Main program
///////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
	// Start logs
	shrQAStart(argc, argv);

	// initialize the GPU, either identified by --device
	// or by picking the device with highest flop rate.
	int devID = findCudaDevice(argc, (const char **)argv);

	// parsing the number of random numbers to generate
	int rand_n = DEFAULT_RAND_N;
	if( checkCmdLineFlag(argc, (const char**) argv, "count") )  
	{       
		rand_n = getCmdLineArgumentInt(argc, (const char**) argv, "count"); 
	}
	printf("Allocating data for %i samples...\n", rand_n);
	float L1norm;
	bool cont = true;
	
	//Loop to enable the user to do the tests more than once per run
	while(cont)
	{
		cont = false;
		srand ( time(NULL) ); //seeding the seed
		int seed = rand() % DEFAULT_SEED;
		if( checkCmdLineFlag(argc, (const char**) argv, "seed") ) 
		{       
			seed = getCmdLineArgumentInt(argc, (const char**) argv, "seed"); 
		}
		printf("Seeding with %i ...\n", seed);

		//Run each of the 3 random number generators
		for(int x = 0; x<3; x++)
		{
			float *d_Rand;
			checkCudaErrors( cudaMalloc((void **)&d_Rand, rand_n * sizeof(float)) );
			curandRngType prngType;

			switch(x)
			{
			case 0:
				prngType = CURAND_RNG_PSEUDO_MTGP32;
				printf("Using CURAND_RNG_PSEUDO_MTGP32\n\n");
				break;
			case 1:
				prngType = CURAND_RNG_PSEUDO_XORWOW;
				printf("Using CURAND_RNG_PSEUDO_XORWOW\n\n");
				break;
			case 2:
				prngType =  CURAND_RNG_PSEUDO_MRG32K3A;
				printf("Using CURAND_RNG_PSEUDO_MRG32K3A\n\n");
				break;
			default:
				break;
			}

			curandGenerator_t prngGPU;
			checkCurandErrors( curandCreateGenerator(&prngGPU, prngType) ); 
			checkCurandErrors( curandSetPseudoRandomGeneratorSeed(prngGPU, seed) );

			float *h_RandGPU  = (float *)malloc(rand_n * sizeof(float));

			printf("Generating random numbers on GPU...\n\n");
			checkCurandErrors( curandGenerateUniform(prngGPU, (float*) d_Rand, rand_n) ); //generate the numbers

			/////////////////////////////////////////////////////////////////////
			//Our Kernel Call
			////////////////////////////////////////////////////////////////////
			cudaSetDevice(0);
			float *h_output = (float *)malloc(sizeof(float));
			*h_output=0;
			printf("Testing random numbers on GPU...\n\n");
			float *d_output;
			//allocate space and copy to the gpu
			cudaMalloc((void**)&d_output, sizeof(float));
			cudaMemcpy(d_output, h_output, sizeof(float), cudaMemcpyHostToDevice);
			//call the Mean kernel
			calcMean<<<32,512>>>(d_Rand, rand_n, d_output);
			cudaMemcpy(h_output, d_output, sizeof(float), cudaMemcpyDeviceToHost); //copy d_output to h_output
			//calculate the mean
			float mean = (float)(*h_output/2) / (float)rand_n;
			printf("Our calculated mean is: %f\n\n", mean);
			////////////////////////////////////////////////////////////////////
			cudaSetDevice(0);
			int *h_output2 = (int *)malloc(sizeof(int));
			*h_output2=0;
			printf("Testing random numbers on GPU...\n\n");
			int *d_output2;
			//allocate space and copy to the gpu
			cudaMalloc((void**)&d_output2, sizeof(int));
			cudaMemcpy(d_output2, h_output2, sizeof(int), cudaMemcpyHostToDevice);
			//call the Pi kernel
			calcPi<<<32,512>>>(d_Rand, rand_n, d_output2);
			cudaMemcpy(h_output2, d_output2, sizeof(int), cudaMemcpyDeviceToHost);
			//copy d_output to h_output
			//get the pi value
			float pi = 4.0 * (float)(*h_output2) / (float)rand_n;
			printf("Our calculated pi is: %f\n\n", pi);

			checkCudaErrors( cudaMemcpy(h_RandGPU, d_Rand, rand_n * sizeof(float), cudaMemcpyDeviceToHost) );

			float *h_RandCPU  = (float *)malloc(rand_n * sizeof(float));

			//timing section
			const int numIterations = 10;
			int i;
			StopWatchInterface *hTimer;

			checkCudaErrors( cudaDeviceSynchronize() );
			sdkCreateTimer(&hTimer);
			sdkResetTimer(&hTimer);
			sdkStartTimer(&hTimer);

			for (i = 0; i < numIterations; i++)
			{
				checkCurandErrors( curandGenerateUniform(prngGPU, (float*) d_Rand, rand_n) );
			}

			checkCudaErrors( cudaDeviceSynchronize() );
			sdkStopTimer(&hTimer);

			double gpuTime = 1.0e-3 * sdkGetTimerValue(&hTimer)/(double)numIterations; //the time

			printf("Throughput = %.4f GNumbers/s, Time = %.5f s, Size = %u Numbers\n\n\n", 
				1.0e-9 * rand_n / gpuTime, gpuTime, rand_n); 

			checkCurandErrors( curandDestroyGenerator(prngGPU) );
			checkCudaErrors( cudaFree(d_Rand) );
			sdkDeleteTimer( &hTimer);
			free(h_RandGPU);
			free(h_RandCPU);

			cudaDeviceReset();
		}
		printf("Using rand() on CPU\n\n");

		float sum = 0;
		int hits = 0;
		float n1, n2;
		float temp;
		//do the mean and pi on the cpu
		for(int x = 0; x<rand_n; ++x)
		{
			temp = (float)rand()/(float)RAND_MAX; //generate rand_n random numbers
			sum += temp; // add each number to the sum
			
			if(x%2==0)	//on every other number, calculate the distance and check for a hit
			{
				n1 = temp;
			}
			else
			{
				n2 = temp;
				if(sqrt(pow(n1-0.5,2)+pow(n2-0.5,2)) <= 0.5)
					hits++;
			}
		}
		
		float mean = sum / (float)rand_n; // CPU mean calculation
		printf("Our calculated mean is: %f\n\n", mean);
		float pi = 4.0 * (float)(hits) / ((float)rand_n/2); //CPU pi calculation
		printf("Our calculated pi is: %f\n\n", pi);


		printf("Input a '1' to run the test again with a different seed, or \nany other character to exit.\n");
		int a = 0;
		scanf("%d", &a);

		if(a==1)
			cont = true;
		else
		{
			printf("Shutting down...\n");
			shrQAFinishExit(argc, (const char**)argv, (L1norm < 1e-6) ? QA_PASSED : QA_FAILED);
		}
	}


}

