#include "dept150_common.h"
#include "cuda_runtime.h"


__global__ void add_matrix(double *A, double *B)
{
    // Just a dummy kernel, doing enough for us to verify that everything
    // worked
    const int idx = blockIdx.x * blockDim.x + threadIdx.x;
	__shared__ double mat[512];
	mat[threadIdx.x] = B[idx];
    for(int i = 0; i < 2; i++)
	{
		A[idx] = A[idx] + mat[threadIdx.x];
	}
}

int main(int argc, char** argv)
{
	double* host_matrix;
	double*	g0_matrix;
	double* g1_matrix;

	int matrix_dim = 1000;
	if (argc > 1)
	{
		matrix_dim = atoi(argv[1]);
	}
	const size_t matrix_size = matrix_dim*matrix_dim*sizeof(double);

	host_matrix = (double *)malloc(matrix_size);

	for(int i = 0; i < matrix_dim; i++){
		for(int j =0; j < matrix_dim; j++)
		{
			host_matrix[i*matrix_dim + j] = i+j;
		}
	}
	int gpuids[2];
	gpuids[0] = 0;
	gpuids[1] = 1;

	checkCudaErrors(cudaSetDevice(gpuids[0]));
	checkCudaErrors(cudaDeviceEnablePeerAccess(gpuids[1], 0));
	checkCudaErrors(cudaSetDevice(gpuids[1]));
	checkCudaErrors(cudaDeviceEnablePeerAccess(gpuids[0], 0));

	checkCudaErrors(cudaSetDevice(gpuids[0]));
	checkCudaErrors(cudaMalloc(&g0_matrix, matrix_size));
	checkCudaErrors(cudaMemcpy(g0_matrix, host_matrix, matrix_size, cudaMemcpyDefault));

	checkCudaErrors(cudaSetDevice(gpuids[1]));
	checkCudaErrors(cudaMalloc(&g1_matrix, matrix_size));
	checkCudaErrors(cudaMemcpy(g1_matrix, host_matrix, matrix_size, cudaMemcpyDefault));

	const dim3 threads(512, 1);
	const dim3 blocks((matrix_size / sizeof(double)) / threads.x, 1);

	cudaEvent_t start_event, stop_event;
	float time_memcpy;
	int eventflags = cudaEventBlockingSync;
	checkCudaErrors(cudaEventCreateWithFlags(&start_event, eventflags));
	checkCudaErrors(cudaEventCreateWithFlags(&stop_event, eventflags));

	// P2P memcopy() benchmark
	checkCudaErrors(cudaEventRecord(start_event, 0));

	for (int i=0; i<100; i++)
	{
		// With UVA we don't need to specify source and target devices, the
		// runtime figures this out by itself from the pointers

		// Ping-pong copy between GPUs
		if (i % 2 == 0)
		{
			checkCudaErrors(cudaMemcpy(g0_matrix, g1_matrix, matrix_size, cudaMemcpyDefault));
			add_matrix<<<blocks, threads>>>(g0_matrix, g0_matrix);
		} else
		{
			checkCudaErrors(cudaMemcpy(g1_matrix, g0_matrix, matrix_size, cudaMemcpyDefault));
			add_matrix<<<blocks, threads>>>(g1_matrix, g1_matrix);
		}
	}
	checkCudaErrors(cudaEventRecord(stop_event, 0));
	checkCudaErrors(cudaEventSynchronize(stop_event));
	checkCudaErrors(cudaEventElapsedTime(&time_memcpy, start_event, stop_event));

	printf("Time1 - %f\n", time_memcpy);


	cudaEvent_t start_event1, stop_event1;
	
	checkCudaErrors(cudaEventCreateWithFlags(&start_event1, eventflags));
	checkCudaErrors(cudaEventCreateWithFlags(&stop_event1, eventflags));

	// P2P memcopy() benchmark
	checkCudaErrors(cudaEventRecord(start_event1, 0));
	for(int i =0; i < 100; i++)
	{
		add_matrix<<<blocks, threads>>>(g0_matrix, g1_matrix);
	}
	
	checkCudaErrors(cudaEventRecord(stop_event1, 0));
	checkCudaErrors(cudaEventSynchronize(stop_event1));
	checkCudaErrors(cudaEventElapsedTime(&time_memcpy, start_event1, stop_event1));

	printf("Time2 - %f\n", time_memcpy);
	
}
