#include <stdio.h>
#include <stdlib.h>

#define N 32

const int blocks_per_grid = 2;
const int threads_per_block = 16;

// vector add kernel function
__global__ void vec_add(float *a, float *b, float *c) {
	int tid = blockDim.x * blockIdx.x + threadIdx.x;

	if (tid < N) {
		c[tid] = a[tid] + b[tid];
	}
}

int main( void ) {
	float *h_a, *h_b, *h_c; // for cpu memory
	float *d_a, *d_b, *d_c; // for gpu memory

	// allocate host memory
	h_a = (float *)malloc( N * sizeof(float) );
	h_b = (float *)malloc( N * sizeof(float) );
	h_c = (float *)malloc( N * sizeof(float) );

	// init host data
	for (int i=0; i<N; i++) {
		h_a[i] = i * 1.0;
		h_b[i] = i * 2.0;
	}

	// allocate device memory
	cudaMalloc( (void**)&d_a, N * sizeof(float) );
	cudaMalloc( (void**)&d_b, N * sizeof(float) );
	cudaMalloc( (void**)&d_c, N * sizeof(float) );

	// copy data from host to device
	cudaMemcpy( d_a, h_a, N * sizeof(float), cudaMemcpyHostToDevice );
	cudaMemcpy( d_b, h_b, N * sizeof(float), cudaMemcpyHostToDevice );

	// call vector add kernel
	vec_add<<<blocks_per_grid, threads_per_block>>>( d_a, d_b, d_c );

	// copy data from gpu to cpu
	cudaMemcpy( h_c, d_c, N * sizeof(float), cudaMemcpyDeviceToHost );

	// dump the result
	for (int i = 0; i < N; i++) {
		printf("%.2f = %.2f + %.2f\n", h_c[i], h_a[i], h_b[i]);
	}

	// free host and device memory
	free(h_a);
	free(h_b);
	free(h_c);
	cudaFree(d_a);
	cudaFree(d_b);
	cudaFree(d_c);

	return 0;
}
