/*
* Copyright 1993-2010 NVIDIA Corporation.  All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.  Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE.  IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND.  NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS,  WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION,  ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users.   This source code is a "commercial item" as
* that term is defined at  48 C.F.R. 2.101 (OCT 1995), consisting  of
* "commercial computer  software"  and "commercial computer software
* documentation" as such terms are  used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/

/* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication and is exactly the same as
* Chapter 7 of the programming guide.
* It has been written for clarity of exposition to illustrate various CUDA
* programming principles, not with the goal of providing the most
* performant generic kernel for matrix multiplication.
*
* CUBLAS provides high-performance matrix multiplication.
*/

// systems
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>

// windows
#include <windows.h>
#include <crtdbg.h>

// cudas
#include <cuda.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"

// locals
#include "matrixMul_kernel.cu"
#include "../../../Common/check.h"

////////////////////////////////////////////////////////////////////////////////
// declaration, forward
int runTest(int argc, char** argv);
bool shouldGetUserInput(int argc, char** argv);
void randomInit(float*, int);
void printDiff(float*, float*, int, int);

extern "C"
	void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
int PrintDevices(int deviceSelected);

////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
	int err = runTest(argc, argv);
	CheckConditionXR(err == cudaSuccess, -1);

	bool getUserInput = shouldGetUserInput(argc, argv);

	if (getUserInput)
	{
		printf("Hit any key to terminate\n");
		getchar();
	}
}

int PrintDevices(int deviceCount, int deviceSelected)
{
	cudaError_t err = cudaSuccess;

	cudaDeviceProp deviceProperty;
	for (int currentDeviceId = 0; currentDeviceId < deviceCount; ++currentDeviceId)
	{
		memset(&deviceProperty, 0, sizeof(cudaDeviceProp));
		err = cudaGetDeviceProperties(&deviceProperty, currentDeviceId);
		CheckConditionXR_(err == cudaSuccess, err);

		printf("\ndevice name: %s", deviceProperty.name);
		if (currentDeviceId == deviceSelected)
		{
			printf("    <----- creating CUcontext on this");    
		}
		printf("\n");

		
		printf("device totalGlobalMem: %Iu \n", deviceProperty.totalGlobalMem);
		printf("device sharedMemPerBlock: %Iu \n", deviceProperty.sharedMemPerBlock);
		printf("device regsPerBlock: %d \n", deviceProperty.regsPerBlock);
		printf("device warpSize: %d \n", deviceProperty.warpSize);
		printf("device memPitch: %Iu \n", deviceProperty.memPitch);
		printf("device maxThreadsPerBlock: %d \n", deviceProperty.maxThreadsPerBlock);
		printf("device maxThreadsDim[0]: %d \n", deviceProperty.maxThreadsDim[0]);
		printf("device maxThreadsDim[1]: %d \n", deviceProperty.maxThreadsDim[1]);
		printf("device maxThreadsDim[2]: %d \n", deviceProperty.maxThreadsDim[2]);
		printf("device maxGridSize[0]: %d \n", deviceProperty.maxGridSize[0]);
		printf("device maxGridSize[1]: %d \n", deviceProperty.maxGridSize[1]);
		printf("device maxGridSize[2]: %d \n", deviceProperty.maxGridSize[2]);
		printf("device Processor clockRate: %d \n", deviceProperty.clockRate);
		printf("device totalConstMem: %Iu \n", deviceProperty.totalConstMem);
		printf("device compute capability: %d.%d \n", deviceProperty.major,deviceProperty.minor);
		printf("device textureAlignment: %Iu \n", deviceProperty.textureAlignment);
		printf("device deviceOverlap: %d \n", deviceProperty.deviceOverlap);
		printf("device multiProcessorCount: %d \n", deviceProperty.multiProcessorCount);        
		printf("device kernelExecTimeoutEnabled: %d \n", deviceProperty.kernelExecTimeoutEnabled);
		printf("device integrated: %d \n", deviceProperty.integrated);
		printf("device canMapHostMemory: %d \n", deviceProperty.canMapHostMemory);
		printf("device computeMode: %d \n", deviceProperty.computeMode);
		printf("device maxTexture1D: %d \n", deviceProperty.maxTexture1D);
		printf("device maxTexture2D: %d %d\n", deviceProperty.maxTexture2D[0],deviceProperty.maxTexture2D[1]);
		printf("device maxTexture3D: %d %d\n", deviceProperty.maxTexture3D[0],deviceProperty.maxTexture3D[1],deviceProperty.maxTexture3D[2]);
		printf("device maxTexture1DLayered: %d %d\n", deviceProperty.maxTexture1DLayered[0],deviceProperty.maxTexture1DLayered[1]);
		printf("device maxTexture2DLayered: %d %d %d\n", deviceProperty.maxTexture2DLayered[0],deviceProperty.maxTexture2DLayered[1],deviceProperty.maxTexture2DLayered[2]);
		printf("device surfaceAlignment: %d \n", deviceProperty.surfaceAlignment);
		printf("device concurrentKernels: %d \n", deviceProperty.concurrentKernels);
		printf("device ECCEnabled: %d \n", deviceProperty.ECCEnabled);
		printf("device pciBusID: %d \n", deviceProperty.pciBusID);
		printf("device pciDeviceID: %d \n", deviceProperty.pciDeviceID);
		printf("device pciDomainID: %d \n", deviceProperty.pciDomainID);
		printf("device tccDriver: %d \n", deviceProperty.tccDriver);
		printf("device asyncEngineCount: %d \n", deviceProperty.asyncEngineCount);
		printf("device unifiedAddressing: %d \n", deviceProperty.unifiedAddressing);
		printf("device memoryClockRate: %d KHZ\n", deviceProperty.memoryClockRate);
		printf("device memoryBusWidth: %d bits \n", deviceProperty.memoryBusWidth);
		printf("device l2CacheSize: %d Bytes\n", deviceProperty.l2CacheSize);
		printf("device maxThreadsPerMultiProcessor: %d \n", deviceProperty.maxThreadsPerMultiProcessor);


		printf("\n");
	}

	return cudaSuccess;
}

////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
int runTest(int argc, char** argv)
{
	cudaError_t err = cudaSuccess;

	int deviceCount = 0;
	err = cudaGetDeviceCount(&deviceCount);
	CheckConditionXR_(err == cudaSuccess, err);

	if (deviceCount == 0) {
		printf("error: no devices supporting CUDA.\n");
		return(-1);
	}

	int deviceOrdinal = 0;
	if (argc > 1)
	{
		deviceOrdinal = atoi(argv[1]);
	}

	CheckConditionXR_(deviceOrdinal <= deviceCount, cudaErrorInvalidDevice);

	err = (cudaError_t)PrintDevices(deviceCount, deviceOrdinal);
	CheckConditionXR_(err == cudaSuccess, err);

	err = cudaSetDevice(deviceOrdinal);
	CheckConditionXR_(err == cudaSuccess, err);

	// set seed for rand()
	srand(2006);

	// allocate host memory for matrices A and B
	unsigned int size_A = WA * HA;
	unsigned int mem_size_A = sizeof(float) * size_A;
	float* h_A = (float*) malloc(mem_size_A);
	unsigned int size_B = WB * HB;
	unsigned int mem_size_B = sizeof(float) * size_B;
	float* h_B = (float*) malloc(mem_size_B);

	// initialize host memory
	randomInit(h_A, size_A);
	randomInit(h_B, size_B);

	// allocate device memory
	float* d_A;
	cudaMalloc((void**) &d_A, mem_size_A);
	float* d_B;
	cudaMalloc((void**) &d_B, mem_size_B);

	// copy host memory to device
	cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice);
	cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice);

	// allocate device memory for result
	unsigned int size_C = WC * HC;
	unsigned int mem_size_C = sizeof(float) * size_C;
	float* d_C;
	cudaMalloc((void**) &d_C, mem_size_C);

	// allocate host memory for the result
	float* h_C = (float*) malloc(mem_size_C);

	// create and start timer
	unsigned int startTick = ::GetTickCount();

	// setup execution parameters
	dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
	dim3 grid(WC / threads.x, HC / threads.y);

	// execute the kernel
	matrixMul<<< grid, threads >>>(d_C, d_A, d_B, WA, WB);
	CheckConditionXR_(cudaGetLastError() == cudaSuccess, -1);

	// copy result from device to host
	err = cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost);
	CheckConditionXR_(err == cudaSuccess, err)

		printf("GPU Processing time: %d (ms) \n", ::GetTickCount() - startTick);

	// compute reference solution

	float* reference = (float*) malloc(mem_size_C);
	startTick = ::GetTickCount();
	computeGold(reference, h_A, h_B, HA, WA, WB);

		printf("CPU Processing time: %d (ms) \n", ::GetTickCount() - startTick);

	float acc = 0;
	for (unsigned int i = 0; i < size_C; ++i)
	{
		float temp = reference[i] - h_C[i];
		float temp_sq = temp * temp;
		acc += temp_sq;
	}
	bool res = (acc <= 1e-6f);

	// check result
	printf("Test %s \n", (1 == res) ? "PASSED" : "FAILED");
	if (res!=1) printDiff(reference, h_C, WC, HC);

	// clean up memory
	free(h_A);
	free(h_B);
	free(h_C);
	free(reference);
	cudaFree(d_A);
	cudaFree(d_B);
	cudaFree(d_C);

	cudaThreadExit();

	return cudaSuccess;
}

// Allocates a matrix with random float entries.
void randomInit(float* data, int size)
{
	for (int i = 0; i < size; ++i)
		data[i] = rand() / (float)RAND_MAX;
}

void printDiff(float *data1, float *data2, int width, int height)
{
	int i,j,k;
	int error_count=0;
	for (j=0; j<height; j++) {
		for (i=0; i<width; i++) {
			k = j*width+i;
			if (data1[k] != data2[k]) {
				printf("diff(%d,%d) CPU=%4.4f, GPU=%4.4f n", i,j, data1[k], data2[k]);
				error_count++;
			}
		}
	}
	printf(" nTotal Errors = %d n", error_count);
}

bool shouldGetUserInput(int argc, char** argv)
{
	for(int i = 1; i < argc; i++)
	{
		if (strcmp(argv[i], "noprompt") == 0)
		{
			return false;
		}
	}

	return true;
}
