/* Matrix multiplication: C = A * B.
 * Host code.
 *
 * This sample implements matrix multiplication using the CUDA driver API.
 * It has been written for clarity of exposition to illustrate various CUDA
 * programming principles, not with the goal of providing the most
 * performant generic kernel for matrix multiplication.
 *
 * CUBLAS provides high-performance matrix multiplication.
 */

// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <fstream>
#include <math.h>
#include <math/m_MathFunctions.h>

// includes, CUDA
#include <cuda.h>

// includes, project
#include "common_header.h"

static CUresult initCUDA(CUfunction *pMatrixMul, char EntryPointName[], int *NumMProc);

CUdevprop GPU_prop;

#define CUDA_SAFE_CALL(func)\
		{\
			CUresult res = func;\
			assert(res == CUDA_SUCCESS);\
		}

////////////////////////////////////////////////////////////////////////////////
// Globals
////////////////////////////////////////////////////////////////////////////////
CUdevice cuDevice;
CUcontext cuContext;
CUmodule cuModule;

bool Compare2Gold_f(float *gold, float *tst_array, unsigned int size, float precision)
{
	for (unsigned int i = 0; i < size; ++i)
	{
		if (math::fastabs(*(gold++) - *(tst_array++)) > precision)
			return false;
	}

	return true;
}

////////////////////////////////////////////////////////////////////////////////
// Run a simple test for CUDA ( Dot product )
////////////////////////////////////////////////////////////////////////////////
void runCUDATest()
{
	// Dot product
	unsigned int i, VecSize = 5000;

	float	*vA = new float [VecSize],
			*vB = new float [VecSize],
			dot_result;

	int NumMultiProc;

    CUfunction cufDotProduct = NULL;
    CUDA_SAFE_CALL( initCUDA(&cufDotProduct, "DotProduct", &NumMultiProc) );

	srand(2010);

	// Initialize vectors with random values
	for (i = 0; i < VecSize; ++i)
	{
		// Normalized rand
		vA[i] = (rand()%2000) / 1000.0f - 1.0f;
		vB[i] = (rand()%2000) / 1000.0f - 1.0f;
	}

	// Calculate GOLD dot product
	float gold_dot_result = 0.0f;

	for (i = 0; i < VecSize; ++i)
	{
		gold_dot_result += vA[i] * vB[i];
	}

	// Device memory pointers
	unsigned int gpu_internal_results_size = NumMultiProc;

    CUdeviceptr gpu_vA, gpu_vB, gpu_dot_result, gpu_internal_results;

	// Allocate memory on GPU
    CUDA_SAFE_CALL( cuMemAlloc(&gpu_vA, VecSize * sizeof(float)) );
    CUDA_SAFE_CALL( cuMemAlloc(&gpu_vB, VecSize * sizeof(float)) );
    
	CUDA_SAFE_CALL( cuMemAlloc(&gpu_dot_result, sizeof(float)) );

	CUDA_SAFE_CALL( cuMemAlloc(&gpu_internal_results, gpu_internal_results_size * sizeof(float)) );

	// Copy memory from host to GPU-device
    CUDA_SAFE_CALL( cuMemcpyHtoD(gpu_vA, vA, VecSize * sizeof(float)) );
    CUDA_SAFE_CALL( cuMemcpyHtoD(gpu_vB, vB, VecSize * sizeof(float)) );

#define GET_UPPER_ALIGNMENT_BOUND(offset, data) \
			((offset) + __alignof(data) - 1) & ~(__alignof(data) - 1)

	// Setup execution parameters
	int offset = 0;
	void *ptr;

	// dot_result
	ptr = (void *)gpu_dot_result;
	offset = GET_UPPER_ALIGNMENT_BOUND(offset, ptr);
	CUDA_SAFE_CALL( cuParamSetv(cufDotProduct, offset, &ptr, sizeof(ptr)) );
	offset += sizeof(ptr);

	// vA
	ptr = (void *)gpu_vA;
	offset = GET_UPPER_ALIGNMENT_BOUND(offset, ptr);
	CUDA_SAFE_CALL( cuParamSetv(cufDotProduct, offset, &ptr, sizeof(ptr)) );
	offset += sizeof(ptr);

	// vB
	ptr = (void *)gpu_vB;
	offset = GET_UPPER_ALIGNMENT_BOUND(offset, ptr);
	CUDA_SAFE_CALL( cuParamSetv(cufDotProduct, offset, &ptr, sizeof(ptr)) );
	offset += sizeof(ptr);

	// Vector Size
	offset = GET_UPPER_ALIGNMENT_BOUND(offset, VecSize);
	CUDA_SAFE_CALL( cuParamSeti(cufDotProduct, offset, VecSize) );
	offset += sizeof(VecSize);

	// Memory for internal results
	ptr = (void *)gpu_internal_results;
	offset = GET_UPPER_ALIGNMENT_BOUND(offset, ptr);
	CUDA_SAFE_CALL( cuParamSetv(cufDotProduct, offset, &ptr, sizeof(ptr)) );
	offset += sizeof(ptr);

	// Initialize block sync flags
	unsigned int BSFMemSize;
	CUdeviceptr SyncAllFlags;
	cuModuleGetGlobal(&SyncAllFlags, &BSFMemSize, cuModule, "syncAllFlags");
	cuMemsetD32(SyncAllFlags, 0, BSFMemSize >> 2);


	// Set argument list size
	CUDA_SAFE_CALL( cuParamSetSize(cufDotProduct, offset) );

#undef GET_UPPER_ALIGNMENT_BOUND

	// Set number of threads in block (as grid XYZ)
	CUDA_SAFE_CALL( cuFuncSetBlockShape(cufDotProduct, BLOCK_SIZE, 1, 1) );
	// Set shared memory per block size
	CUDA_SAFE_CALL( cuFuncSetSharedSize(cufDotProduct, (BLOCK_SIZE + WARPS_PER_BLOCK) * sizeof(float)) );

	// Number of blocks is equal to number of multiprocessors
	int BlocksPerGrid = NumMultiProc;
	CUDA_SAFE_CALL( cuLaunchGrid(cufDotProduct, BlocksPerGrid, 1) );

	// Retrieve DOT result
	CUDA_SAFE_CALL( cuMemcpyDtoH((void *)&dot_result, gpu_dot_result, sizeof(float)) );

	// Free GPU-device memory
    CUDA_SAFE_CALL( cuMemFree(gpu_vA) );
    CUDA_SAFE_CALL( cuMemFree(gpu_vB) );
    CUDA_SAFE_CALL( cuMemFree(gpu_dot_result) );
    CUDA_SAFE_CALL( cuMemFree(gpu_internal_results) );

	delete [] vA;
	delete [] vB;

    CUDA_SAFE_CALL( cuCtxDetach(cuContext) );


	// COMPARE TO GOLD
	bool cmp_res = math::fastabs(gold_dot_result - dot_result) < 1e-4f;
	char MessageStr[512];

	sprintf(MessageStr, "%s :: %6.3f %s %6.3f", cmp_res?"WIN":"FAIL", dot_result, cmp_res?"==":"!=", gold_dot_result);

	MessageBox(0, MessageStr, "epic message!!11", 0);
}

static CUresult initCUDA(CUfunction *pMatrixMul, char EntryPointName[], int *NumMProc)
{
    CUfunction cuFunction = 0;
    char *module_path;

	cuDevice = 0;

	int deviceCount = 0;
	CUDA_SAFE_CALL(cuInit(0));
	CUDA_SAFE_CALL(cuDeviceGetCount(&deviceCount));

	if (deviceCount == 0)
	{
		fprintf(stderr, "CU DeviceInitDrv error: no devices supporting CUDA\n");
		exit(-1);
	}

	int dev = 0;

	CUDA_SAFE_CALL(cuDeviceGet(&cuDevice, dev));
	char name[100];
	cuDeviceGetName(name, 100, cuDevice);

	fprintf(stderr, "Using device %d: %s\n", dev, name);

#define ErrorExit() \
		if (CUDA_SUCCESS != status)\
		{\
			cuCtxDetach(cuContext);\
			return status;\
		}

	CUresult status = cuCtxCreate(&cuContext, 0, cuDevice );
	ErrorExit();

    status = cuModuleLoad(&cuModule, "resources\\solver_kernel.ptx");
	ErrorExit();

    status = cuModuleGetFunction(&cuFunction, cuModule, EntryPointName);
	ErrorExit();

#undef ErrorExit

    *pMatrixMul = cuFunction;

	cuDeviceGetProperties(&GPU_prop, cuDevice);
	assert(GPU_prop.SIMDWidth == WARP_SIZE);

	cuDeviceGetAttribute(NumMProc, CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT, cuDevice);

    return CUDA_SUCCESS;
}


