#include <iostream>
//////////////////////////////////////////////////////////////////////////
using namespace std;
//////////////////////////////////////////////////////////////////////////
#define N (512*512)
#define THREADS_PER_BLOCK       512
//////////////////////////////////////////////////////////////////////////
__global__ void add(int *a, int *b, int *c)
{
	int idx = blockIdx.x * blockDim.x + threadIdx.x;
	c[idx]  = a[idx] + b[idx];
}
//////////////////////////////////////////////////////////////////////////
int main(void)
{
	//Host copies of a, b, c
	int *a, *b, *c;

	//Allocate Host copies of a, b, c
	a = (int*)malloc(N*sizeof(int));
	b = (int*)malloc(N*sizeof(int));
	c = (int*)malloc(N*sizeof(int));

	//Device copies of a, b, c
	int *dev_a, *dev_b, *dev_c;

	//Allocate Device copies of a, b, c
	cudaMalloc((void**)&dev_a, N*sizeof(int));
	cudaMalloc((void**)&dev_b, N*sizeof(int));
	cudaMalloc((void**)&dev_c, N*sizeof(int));

	//Randomly inits (a, N)
	for(int i=0; i<N; i++)
	{
		a[i] = rand()%1000;
	}

	//Randomly inits (b, N)
	for(int i=0; i<N; i++)
	{
		b[i] = rand()%1000;
	}
	//Copy inputs to device
	cudaMemcpy(dev_a, a, N*sizeof(int), cudaMemcpyHostToDevice);
	cudaMemcpy(dev_b, b, N*sizeof(int), cudaMemcpyHostToDevice);

	//Launch add() kernel 
	add<<<N/THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(dev_a, dev_b, dev_c);

	//Copy Device result back to host copy of c
	cudaMemcpy(c, dev_c, N*sizeof(int), cudaMemcpyDeviceToHost);

	//Deallocate Host copies
	free(a); 
	free(b); 
	free(c);

	//Deallocate Device copies
	cudaFree(dev_a);
	cudaFree(dev_b);
	cudaFree(dev_c);

	return 0;
}
//////////////////////////////////////////////////////////////////////////
