// This example is modified from the NVIDIA official source code.
// Samples/0_Introduction/simpleZeroCopy/simpleZeroCopy.cu

#include <assert.h>
#include <stdio.h>
#include <cuda_runtime.h>

#if 0
#ifndef MAX
#define MAX(a, b) (a > b ? a : b)
#endif
#endif

// vector add
__global__ void vec_add(int *a, int *b, int *c, int N) {
	int idx = blockIdx.x * blockDim.x + threadIdx.x;

	if (idx < N) {
		c[idx] = a[idx] + b[idx];
	}
}

// Allocate generic memory with malloc() and pin it laster instead of using
// cudaHostAlloc()
int bPinGenericMemory = 0;

// Macro to aligned up to the memory size in question
#define MEMORY_ALIGNMENT 4096
#define ALIGN_UP(x, size) (((size_t)x + (size - 1)) & (~(size - 1)))

int main() {
	int i, nelem;
	int idev = 0;  // use default device 0
	unsigned int flags;
	size_t bytes;
	int *a, *b, *c;           // Pinned memory allocated on the CPU
	int *a_UA, *b_UA, *c_UA;  // Non-4K Aligned Pinned memory on the CPU
	int *d_a, *d_b, *d_c;     // Device pointers for mapped memory
	cudaDeviceProp deviceProp;

	printf("CUDART_VERSION = %d\n", CUDART_VERSION);
	if (CUDART_VERSION < 4000) {
		printf("CUDART_VERSION < 4000, please upgrade your CUDA driver\n");
		return 1;
	}

	cudaSetDevice(idev);
	cudaGetDeviceProperties(&deviceProp, idev);

	// need: CUDART_VERSION >= 2020
	if (!deviceProp.canMapHostMemory) {
		fprintf(stderr, "Device %d does not support mapping CPU host memory!\n",
				idev);

		exit(EXIT_SUCCESS);
	}

	// checkCudaErrors(cudaSetDeviceFlags(cudaDeviceMapHost));
	cudaSetDeviceFlags(cudaDeviceMapHost);

	/* Allocate mapped CPU memory. */
	nelem = 10;
	bytes = nelem * sizeof(int);

	if (bPinGenericMemory) {
		// need: CUDART_VERSION >= 4000
		printf("using cudaHostRegister cudaHostRegisterMapped\n");
		a_UA = (int *)malloc(bytes + MEMORY_ALIGNMENT);
		b_UA = (int *)malloc(bytes + MEMORY_ALIGNMENT);
		c_UA = (int *)malloc(bytes + MEMORY_ALIGNMENT);

		// We need to ensure memory is aligned to 4K (so we will need to padd memory
		// accordingly)
		a = (int *)ALIGN_UP(a_UA, MEMORY_ALIGNMENT);
		b = (int *)ALIGN_UP(b_UA, MEMORY_ALIGNMENT);
		c = (int *)ALIGN_UP(c_UA, MEMORY_ALIGNMENT);

		cudaHostRegister(a, bytes, cudaHostRegisterMapped);
		cudaHostRegister(b, bytes, cudaHostRegisterMapped);
		cudaHostRegister(c, bytes, cudaHostRegisterMapped);
	} else {
		// need: CUDART_VERSION >= 2020
		printf("using cudaHostAlloc cudaHostAllocMapped\n");
		flags = cudaHostAllocMapped;
		cudaHostAlloc((void **)&a, bytes, flags);
		cudaHostAlloc((void **)&b, bytes, flags);
		cudaHostAlloc((void **)&c, bytes, flags);
	}

	/* Initialize the vectors. */

	for (i = 0; i < nelem; i++) {
		a[i] = i;
		b[i] = 2*i;
	}

	/* Get the device pointers for the pinned CPU memory mapped into the GPU
	   memory space. */
	// need: CUDART_VERSION >= 2020
	cudaHostGetDevicePointer((void **)&d_a, (void *)a, 0);
	cudaHostGetDevicePointer((void **)&d_b, (void *)b, 0);
	cudaHostGetDevicePointer((void **)&d_c, (void *)c, 0);

	/* Call the GPU kernel using the CPU pointers residing in CPU mapped memory.
	 */
	dim3 block(256);
	dim3 grid((unsigned int)ceil(nelem / (float)block.x));
	vec_add<<<grid, block>>>(d_a, d_b, d_c, nelem);
	cudaDeviceSynchronize();

	/* Compare the results */
	for (i = 0; i < nelem; i++) {
		printf("a[%d]=%d, b[%d]=%d, c[%d]=%d\n", i, a[i], i, b[i], i, c[i]);
	}

	if (bPinGenericMemory) {
		// need: CUDART_VERSION >= 4000
		cudaHostUnregister(a);
		cudaHostUnregister(b);
		cudaHostUnregister(c);
		free(a_UA);
		free(b_UA);
		free(c_UA);
	} else {
		// need: CUDART_VERSION >= 2020
		cudaFreeHost(a);
		cudaFreeHost(b);
		cudaFreeHost(c);
	}
	return 0;
}
