#include <iostream>
#include <cstdio>
#include <cstdlib>
#include <cmath>

#include <chTimer.hpp>
#include <mpi.h>

extern "C" void do_saxpy_on_cpu(float *x, float *y, float alpha, int size);
extern "C" void do_saxpy_on_gpu(float *global_x, float *global_y, float alpha, int numThreads, int numBlocks, size_t N, double *results);

int main(int argc, char *argv[])
{
	/* MPI setup */
	MPI_Status status[32];
	MPI_Request request[32];
	char hostname[50];
	int nodes, rank;
	//double start, stop;
	//double cpu_runtime, gpu_runtime, mpi_runtime;

	ChTimer cpu_timer, gpu_timer, mpi_timer, mpi_sync_timer;
	double gpures[10];

	MPI_Init(&argc, &argv);
	MPI_Comm_rank (MPI_COMM_WORLD, &rank);  // Who am I?
	MPI_Comm_size (MPI_COMM_WORLD, &nodes); // How many processes?
	gethostname(hostname, 50);
	
	//printf("# I'm process %2d out of %2d (%s)\n", rank, nodes, hostname);
	MPI_Barrier(MPI_COMM_WORLD);

	/* parse arguments */
	int c;
	size_t optSize = 2<<20;
	size_t optNumBlocks = 64;
	size_t optNumThreads = 512;
	  bool optCmpCPU = false;
	  bool optCPUvsGPU = false;
	  bool optDbg = false;

	if (argc >= 1)
	{
		size_t argVal;
		while ((c = getopt(argc, argv, "s:b:t:cg?")) != -1)
		{
			switch (c)
			{
				case 's':
					argVal = atol(optarg);
					optSize = argVal >= 1 ? argVal : optSize;
					if (optSize <= 640)
					{
						optSize *= 1024*1024;
					}
					else
					{
						printf("-s too large!\n");
						exit(-1);
					}	
					break;
				case 'b':
					argVal = atol(optarg);
					optNumBlocks = argVal >= 1 ? argVal : optNumBlocks; break;
				case 't':
					argVal = atol(optarg);
					optNumThreads = argVal >= 1 ? argVal : optNumThreads; break;
				case 'c':
					optCmpCPU = true; break;
				case 'g':
					optCPUvsGPU = true; break;
				case '?':
					optDbg = true; break;
				default:
					break;
			}
		}
	}
	else
	{
		if (rank == 0)
		{
			printf("Syntax Error!\n");
			printf("Usage: %s -s size -b blocks -t threads [OPTIONS]\n", argv[0]);
			printf("Options:\n");
			printf("  -c          compare gpu result to cpu result (default: off)\n");
			printf("  -g          run on one gpu (default: off)\n");
			printf("  -?          show debug infos (default: off)\n");
		}
		MPI_Finalize();
		return 0;
	}
	
	size_t numElements = optSize/sizeof(float);

	if (rank == 0 && optDbg)
	{
		std::cout	<< "#"
					<< " numElem=" << numElements
					<< " size=" << optSize/1024/1024 << "M (" << optSize/1024 << "K)"
					<< " vram=" << optSize/1024/1024*2 << "M (" << optSize/1024*2 << "K)"
					<< " blocks=" << optNumBlocks
					<< " threads=" << optNumThreads
					<< " cmpCPU=" << optCmpCPU
					<< " gpuOnly=" << optCPUvsGPU
					<< " dbg=" << optDbg
					<< std::endl;
	}

	/* allocate memory */
	float *x = NULL;
	float *y = NULL;
	float *x_cpu = NULL;
	float *y_cpu = NULL;
	x = (float*)malloc(optSize);
	y = (float*)malloc(optSize);
	x_cpu = (float*)malloc(optSize);
	y_cpu = (float*)malloc(optSize);

	/* init data */
	srand(0);
	for (size_t i=0; i<numElements; i++)
	{
		*x = rand()%100;
		*y = rand()%100;
		*x_cpu = *x;
		*y_cpu = *y;
	}
	float alpha = 1.337f;

	/* run on CPU */
	if (rank == 0)
	{
		cpu_timer.start();
		do_saxpy_on_cpu(x_cpu, y_cpu, alpha, numElements);
		cpu_timer.stop();
	}

	if (rank == 0 && optCPUvsGPU)
	{
		/* run on one GPU */
		gpu_timer.start();
		do_saxpy_on_gpu(x, y, alpha, optNumThreads, optNumBlocks, numElements, gpures);
		gpu_timer.stop();
	}
	else
	{
		/* run on MPI */
		int nodeSize = numElements / nodes;
		int nodeStart = nodeSize * rank;
		if (optDbg) printf("# %2d: nodeSize=%d nodeStart=%d \n", rank, nodeSize, nodeStart);

		mpi_timer.start();
		do_saxpy_on_gpu(&x[nodeStart], &y[nodeStart], alpha, optNumThreads, optNumBlocks, nodeSize, gpures);
		if (rank == 0)
		{
			mpi_sync_timer.start();
			for (int i=1; i<nodes; i++)
			{
				MPI_Irecv(&y[nodeSize*i], nodeSize, MPI_FLOAT, i, 0, MPI_COMM_WORLD, &request[i-1]);
			}
			MPI_Waitall(nodes-1, &request[0], &status[0]);
			mpi_sync_timer.stop();
		}
		else
		{
			MPI_Send(&y[nodeStart], nodeSize, MPI_FLOAT, 0, 0, MPI_COMM_WORLD);
		}
		mpi_timer.stop();
	}

	/* compare results */
	if (rank == 0 && optCmpCPU)
	{
		size_t errors = 0;
		for (size_t i=0; i<numElements; i++)
		{
			//if (y[i] != y_cpu[i])
			if (fabs(y[i] - y_cpu[i]) > 0.000001)
			{
				printf("\x1B[31m");
				printf("# expected: %lf result: %lf at position %ld \n", y_cpu[i], y[i], i);
				printf("\x1B[0m");
				++errors;
			}
		}

		if (errors)
		{
			printf("\x1B[31m");
			printf("# errors: %ld of %ld \n", errors, numElements);
			printf("\x1B[0m");
		}
	}

	/* print results */
	if (rank == 0)
	{
		if (optDbg)
		{
			std::cout	<< "### Results ###" << std::endl
						<< "# cpu-time: "
						<< cpu_timer.getTime() << " s " << std::endl
						<< "# gpu-time: "
						<< gpu_timer.getTime() << " s " << std::endl
						<< "     # h2d: "
						<< gpures[0] * 1e3 << " ms "
						<< gpures[1] << " GB/s" << std::endl
						<< "     # d2h: "
						<< gpures[2] * 1e3 << " ms "
						<< gpures[3] << " GB/s" << std::endl
						<< "     # kernel: "
						<< gpures[4] << " ms " << std::endl
						<< "# mpi-time: "
						<< mpi_timer.getTime() << " s " << std::endl
						<< "     # mpi-sync-time: "
						<< mpi_sync_timer.getTime() << " s "
						<< std::endl;
		}
		else
		{
			char seperator[5] = ",";
			std::cout	<< nodes << seperator
						<< numElements << seperator
						<< optSize << seperator
						<< cpu_timer.getTime() << seperator
						<< gpu_timer.getTime() << seperator
						<< gpures[0] << seperator
						<< gpures[2] << seperator
						<< gpures[4] <<  seperator
						<< mpi_timer.getTime() << seperator
						<< mpi_sync_timer.getTime()
						<< std::endl;
		}
	}

	free(x);
	free(y);
	free(x_cpu);
	free(y_cpu);

	//printf("# %2d: done\n", rank);
	MPI_Finalize();
	return 0;
}

