#include <cuda_runtime.h>
#include <iostream>

// vector add
template<const int UNROLL_FACTOR>
__global__ void vec_add(int *a, int *b, int *c, int n) {
	int index = threadIdx.x + blockIdx.x * blockDim.x;
	int stride = blockDim.x * gridDim.x;

	// unroll
	for (int i = 0; i < UNROLL_FACTOR; i++) {
		if (index + i * stride < n) {
			c[index + i * stride] = a[index + i * stride] + b[index + i * stride];
		}
	}
}

const int unroll_factor = 4;
int main() {
	int n = 1024 * unroll_factor; // vector size
	int threadsPerBlock = 256;
	int blocksPerGrid = (1024 + threadsPerBlock - 1) / threadsPerBlock;

	// allocate the device memory
	int *d_a, *d_b, *d_c;
	cudaMalloc(&d_a, n * sizeof(int));
	cudaMalloc(&d_b, n * sizeof(int));
	cudaMalloc(&d_c, n * sizeof(int));

	// allocate the host memory
	int *h_a = new int[n];
	int *h_b = new int[n];
	int *h_c = new int[n];

	// init the host memory
	for (int i = 0; i < n; ++i) {
		h_a[i] = i;
		h_b[i] = 2 * i;
	}

	// copy data from host to device
	cudaMemcpy(d_a, h_a, n * sizeof(int), cudaMemcpyHostToDevice);
	cudaMemcpy(d_b, h_b, n * sizeof(int), cudaMemcpyHostToDevice);

	// init unroll factor

	// start kernel
	vec_add<unroll_factor><<<blocksPerGrid, threadsPerBlock>>>(d_a, d_b, d_c, n);

	// device sync
	cudaDeviceSynchronize();

	// copy result back to host
	cudaMemcpy(h_c, d_c, n * sizeof(int), cudaMemcpyDeviceToHost);

	// check the result
	int i;
	for (i = 0; i < n; ++i) {
		if (h_c[i] != h_a[i] + h_b[i]) {
			std::cerr << "Result error: invalid result at index" << i << std::endl;
			break;
		}
	}

	if (i >= n) {
		std::cout << "The result is correct" << std::endl;
	}

	// clean up the resource
	cudaFree(d_a);
	cudaFree(d_b);
	cudaFree(d_c);

	delete[] h_a;
	delete[] h_b;
	delete[] h_c;

	return 0;
}
