#include "GlobalDefinitions.h"
#include "MpiSynchronization.h"
#include "ThreadFunc.h"
#include "CUDAFunctions.cuh"
#include <sstream>

int processes; //number of processes
int myrank; //process rank
int *elements; //vector containg the number of rows received from the master by the i-th process
int maxElem; //max elements given to each process
int rows; //rows of the matrix synched with the master process
int columns; //columns of the matrix synched with the master process
pthread_t* p_threads; //array of pthread ids
int thread_amount; //number of threads
MEMORYSTATUSEX statex; //struct for memory info

SimpleMatrix<unsigned char> *currentUniverse;
SimpleMatrix<unsigned char> *nextUniverse;
SimpleMatrix<unsigned char> *globalUniverse;

enum spawnConfig { DEBUG, RANDOM, FROM_FILE, RANDOM_FILE }; //options for the initial seed
enum hpcOption { NORMAL,CUDA_NORMAL,CUDA_TEXTURE,MPI_PTHREAD,MPI_OPENMP }; //options for hpc computing
enum threadOption { STATIC,DYNAMIC };
enum allocationOption { RAM,DISK};

struct ReportData
{
	MEMORYSTATUSEX* state_ptr;
	double elapsed_time;
	int thread_used;
	int matrix_memory_used;
	int processes_used;
	int ticks;
};

using namespace std;

//fill the start matrix according to the specified option
void spawnSeed(spawnConfig config, bool save_input_matrix = false)
{
	switch (config)
	{
		case RANDOM:
		{
			globalUniverse = new SimpleMatrix<unsigned char>(ROWS,COLUMNS);

#ifdef DEBUG_SEED
			srand (6);
#else
			srand (time(0));
#endif
			#pragma omp parallel num_threads(MAX_THREAD)
			{
				//initialize the universe
				#pragma omp for
				for (int i = 0; i < globalUniverse -> height(); i++)
				{
					#pragma omp parallel num_threads(MAX_THREAD)
					{
						#pragma omp for
						for (int j = 0; j < globalUniverse -> width(); j++)
						{
							int choice = rand() % 100;
							if (choice < DEAD_PROB)
								globalUniverse ->set(i, j, 0);
							else
								globalUniverse ->set(i, j, 1);
						}
					}
				}
			}

			//build a border of dead cells around the universe
			globalUniverse ->buildBorder(0);
			if (save_input_matrix)
			{
				CreateDirectory(L"D:\\HPCTest",NULL);
				globalUniverse ->storeMatrix("D:\\HPCTest\\matrix.in");
			}
			break;
		}

		case RANDOM_FILE:
		{
#ifdef DEBUG_SEED
			srand (6);
#else
			srand (time(0));
#endif
			CreateDirectory(L"D:\\HPCTest",NULL);
			ofstream output_file("D:\\HPCTest\\input.dat",std::ios::out);
			if(output_file.is_open())
			{
				for (int i = 0; i < ROWS; i++)
				{
					for (int j = 0; j < COLUMNS; j++)
					{
						int choice = rand() % 100;
						if (choice < DEAD_PROB)
						{
							if (j == COLUMNS - 1)
								output_file << 0 << "\n";
							else
								output_file << 0 << ' ';
						}
						else
						{
							if (j == COLUMNS - 1)
								output_file << 1 << "\n";
							else
								output_file << 1 << ' ';
						}
					}
				}
				output_file.close();
			}
			else
			{
				cerr << "Unable to create file for seed generation" << endl;
				exit(1);
			};
			break;
		}

		case DEBUG:
		{
			globalUniverse = new SimpleMatrix<unsigned char>(ROWS,COLUMNS);
			for (int i = 0; i < globalUniverse -> height(); i++)
				for (int j = 0; j < globalUniverse -> width(); j++)
					globalUniverse -> set(i, j, i);
			break;
		}

		case FROM_FILE:
		{
			//cout << "Insert input file path" << endl;
			//string path;
			//cin >> path;
			globalUniverse = new SimpleMatrix<unsigned char>("D:\\HPCTest\\matrix.dat");
			//build a border of dead cells around the border
			globalUniverse ->buildBorder(0);
			break;
		}

		default:
		{
			cerr << "ERROR: Invalid config option in spawnSeed" << endl;
			exit(1);
		}
	}
}

void tick(hpcOption option, threadOption to)
{
	for (int i = 0; i < currentUniverse -> height(); i++)
		for (int j = 0; j < currentUniverse -> width(); j++)
			nextUniverse -> set(i, j, currentUniverse -> get(i,j));

	vector<char> symbols(2);
	vector<unsigned char> elements(2);
	symbols[0] = 'X';
	symbols[1] = 'O';
	elements[0] = 0;
	elements[1] = 1;

#ifdef MPI_DEBUG
	cout << "Process " << myrank << " Current Universe: \n" << endl;
	currentUniverse -> print(symbols, elements);
#endif

	switch(option)
	{
		case NORMAL:
		{
			for (int i = 1; i < currentUniverse -> height() - 1; i++)
			{
				for (int j = 1; j < currentUniverse -> width() - 1; j++)
				{
					int liveNeighbors =	currentUniverse -> get(i - 1, j - 1) +
										currentUniverse	-> get(i - 1, j) +
										currentUniverse -> get(i - 1, j + 1) +
										currentUniverse -> get(i, j - 1) +
										currentUniverse -> get(i, j + 1) +
										currentUniverse -> get(i + 1, j - 1) +
										currentUniverse -> get(i + 1, j) +
										currentUniverse -> get(i + 1, j + 1);
			
					if (currentUniverse -> get(i,j) == 1)
					{
						if (liveNeighbors < 2 || liveNeighbors > 3)
							nextUniverse -> set(i, j, 0);
					}
					else
						if (liveNeighbors == 3)
							nextUniverse -> set(i, j, 1);
				}
			}

			for (int i = 0; i < nextUniverse -> height(); i++)
				for (int j = 0; j < nextUniverse -> width(); j++)
					currentUniverse -> set(i, j, nextUniverse -> get(i,j));
			break;
		}
		case MPI_PTHREAD:
		{
			switch(to)
			{
				case STATIC:
				{
					p_threads = new pthread_t[MAX_THREAD];
					pthread_attr_t attr;
					vector<int> thread_elements(MAX_THREAD);
					thread_amount = thread_elements.size();
					invokeThreads(&attr,thread_elements);
					thread_elements.clear();

					//update the current universe with the new status
					for (int i = 0; i < nextUniverse -> height(); i++)
						for (int j = 0; j < nextUniverse -> width(); j++)
							currentUniverse -> set(i, j, nextUniverse -> get(i,j));
					break;
				}
			}

			break;
		}
		case MPI_OPENMP:
		{
			#pragma omp parallel num_threads(MAX_THREAD)
			{
				#pragma omp for
				for (int i = 1; i < currentUniverse -> height() - 1; i++)
				{
					#pragma omp parallel num_threads(MAX_THREAD)
					{
						#pragma omp for
						for (int j = 1; j < currentUniverse -> width() - 1; j++)
						{
							int liveNeighbors =	currentUniverse -> get(i - 1, j - 1) +
												currentUniverse	-> get(i - 1, j) +
												currentUniverse -> get(i - 1, j + 1) +
												currentUniverse -> get(i, j - 1) +
												currentUniverse -> get(i, j + 1) +
												currentUniverse -> get(i + 1, j - 1) +
												currentUniverse -> get(i + 1, j) +
												currentUniverse -> get(i + 1, j + 1);
			
							if (currentUniverse -> get(i,j) == 1)
							{
								if (liveNeighbors < 2 || liveNeighbors > 3)
									nextUniverse -> set(i, j, 0);
							}
							else
								if (liveNeighbors == 3)
									nextUniverse -> set(i, j, 1);
						}
					}
				}

				#pragma omp for
				for (int i = 0; i < nextUniverse -> height(); i++)
					#pragma omp parallel num_threads(MAX_THREAD)
					{
						#pragma omp for
						for (int j = 0; j < nextUniverse -> width(); j++)
							currentUniverse -> set(i, j, nextUniverse -> get(i,j));
					}

			}
			break;
		}

	}
#ifdef MPI_DEBUG
	cout << "\n\nProcess " << myrank << " Next Universe: \n" << endl;
	nextUniverse -> print(symbols, elements);
#endif

}

void writeReport(struct ReportData data)
{
	MEMORYSTATUSEX statex = *(data.state_ptr);
	CreateDirectory(L"D:\\HPCTest",NULL);
	ofstream report("D:\\HPCTest\\GameOfLife.rep");
	if (report.is_open())
	{
		report << "Total physical memory: " << fixed << (long double)statex.ullTotalPhys / 1024.0 << " KB\n";
		report << "Available physical memory: " << fixed << (long double)statex.ullAvailPhys / 1024.0 << " KB\n";
		report << "Total virtual memory: " << fixed << (long double)statex.ullTotalVirtual / 1024.0 << " KB\n";
		report << "Available virtual memory: " << fixed << (long double)statex.ullAvailVirtual / 1024.0 << " KB\n";
		report << "Elapsed time: " << data.elapsed_time << "\n";
		if (data.matrix_memory_used == -1)
			report << "Matrix RAM usage: Disk allocation used\n";
		else
			report << "Matrix RAM usage: " << (long double)data.matrix_memory_used / 1024.0 << " KB\n";
		report << "Threads per process used: " << data.thread_used << "\n";
		report << "Processes used: " << data.processes_used << "\n";
		report << "Iterations: " << data.ticks << "\n";
	}
	else
	{
		cerr << "ERROR: Cannot create report file" << endl;
		exit(1);
	}
}


void runWithNormalAllocation(bool save_to_file = false,hpcOption opt = NORMAL)
{
	if (opt == CUDA_NORMAL)
	{
		//run cuda only on one process
		if (myrank == 0)
		{
			cout << "Generating matrix..." << endl;
			spawnSeed(RANDOM,save_to_file);
			cout << "Done!" << endl;
			cudaRunWithNormalMemory(save_to_file);
			return;
		}
	}
	else if (opt == CUDA_TEXTURE)
	{
		if (myrank == 0)
		{
			cout << "Generating matrix..." << endl;
			spawnSeed(RANDOM,save_to_file);
			cout << "Done!" << endl;
			cudaRunWithTextureCache(save_to_file);
			return;
		}
	}

	if (processes < 2)
	{
		cerr << "The cluster must have at least 2 processes" <<endl;
		exit(1);
	}

	vector<char>symbols(2);
	vector<unsigned char>elements(2);
	if (myrank == 0)
	{
		cout << "Generating matrix..." << endl;
		spawnSeed(RANDOM,save_to_file);
		vector<char>symbols(2);
		vector<unsigned char>elements(2);
		symbols[0] = 'X';
		symbols[1] = 'O';
		elements[0] = 0;
		elements[1] = 1;
		cout << "Done!" << endl;
#ifdef GENERAL_DEBUG
		cout << "\n\nSTARTING UNIVERSE:\n" << endl;
		globalUniverse ->print(symbols,elements);
		globalUniverse ->storeMatrix("D:\\HPCTest\\debug_starting_universe.out");
#endif

	}
	synchMatrixDimensions();
	if (myrank == 0)
		cout << "Broadcasting matrix..." << endl;


	sendPartialMartrices();

	if (myrank == 0)
	{
		cout << "Done!" << endl;
		cout << "Running game of life..." << endl;
	}
	for (int i = 0; i < TICK_COUNT; i++)
	{
		tick(opt,STATIC);
		updatePartialMatrices();
#ifdef MPI_DEBUG
		Sleep(2.5);
#endif
	}

	if (myrank == 0)
	{
		cout << "Done!" << endl;
		cout << "Gathering matrixes..." << endl;
	}

	gatherPartialMatrices();

	if (myrank == 0)
	{
		cout << "Done!" << endl;
		if (save_to_file)
		{
			cout << "Writing output file..." << endl;
			CreateDirectory(L"D:\\HPCTest",NULL);
			globalUniverse ->storeMatrix("D:\\HPCTest\\matrix.out");
			cout << "Done!" << endl;
		}
	}
}

void runWithDiskAllocation(MEMORYSTATUSEX* statex, bool generate_file = false, bool overwrite_files = true,hpcOption opt = NORMAL)
{
	if (processes < 2)
	{
		cerr << "The cluster must have at least 2 processes" <<endl;
		exit(1);
	}

	int total_rows = 0;
	int total_columns = 0;
	int matrix_params[2];
	int allocated_rows = 0;
	const string part_extension = ".part";
	const string dat_extension = ".dat";
	stringstream temp_stream;
	string matrix_file_path = "D:\\HPCTest\\input.dat";
	vector<char>symbols(2);
	vector<unsigned char>elements(2);

	if (myrank == 0)
	{
		if (generate_file)
		{
			cout << "Generating matrix file..." << endl;
			spawnSeed(RANDOM_FILE);
			cout << "Done!" << endl;
		}
		cout << "Scanning matrix file to retrieve matrix properties..." << endl;
		MatrixUtilities::getMatrixDimensionFromFile(matrix_file_path,' ',matrix_params);
		total_rows = matrix_params[0];
		total_columns = matrix_params[1];
		allocated_rows = MAX_MEMORY_USAGE / total_columns;
		cout << "Done!" << endl;
		cout << "Matrix size: " << (long double)total_rows * (long double)total_columns / 1024.0 << " KB" << endl;
		cout << "Allocated rows per submatrix: " << allocated_rows << endl;

		//if the matrix can be stored completely then we don't need to run disk allocation
		if (allocated_rows >= total_rows)
		{
			cout << "Matrix can be stored directly into main memory..." << endl;
			runWithNormalAllocation(true,opt);
			return;
		}
	}

	//sending allocation parameters to the other processes
	if (myrank == 0)
	{
		for(int i = 1; i < processes; i++)
		{
			MPI_Send(&total_rows,1,MPI_INT,i,MSG_TOTAL_ROWS,MPI_COMM_WORLD);
			MPI_Send(&total_columns,1,MPI_INT,i,MSG_TOTAL_COLUMNS,MPI_COMM_WORLD);
			MPI_Send(&allocated_rows,1,MPI_INT,i,MSG_ALLOC_ROWS,MPI_COMM_WORLD);
		}
	}
	else
	{
		MPI_Status status;
		MPI_Recv(&total_rows,1,MPI_INT,0,MSG_TOTAL_ROWS,MPI_COMM_WORLD,&status);
		MPI_Recv(&total_columns,1,MPI_INT,0,MSG_TOTAL_COLUMNS,MPI_COMM_WORLD,&status);
		MPI_Recv(&allocated_rows,1,MPI_INT,0,MSG_ALLOC_ROWS,MPI_COMM_WORLD,&status);
	}

	//cout << "Hello from process " << myrank <<endl;

	for (int t = 0,submatrix_index = 0; t < TICK_COUNT; t++)
	{
		submatrix_index = 0;
		for (int i = 0; submatrix_index <= total_rows / allocated_rows; i += allocated_rows,submatrix_index++)
		{
			if (myrank == 0)
			{
				int assigned_rows = i + allocated_rows > total_rows ? total_rows % allocated_rows : allocated_rows;
				cout << "Submatrix " << submatrix_index << " assigned rows: " << assigned_rows << endl;
				cout << "Loading submatrix " << submatrix_index << " from file..." << endl;
				globalUniverse = new SimpleMatrix<unsigned char>(matrix_file_path,assigned_rows,total_columns,i);
				//cout << "globalUniverse =" << globalUniverse ->height() << globalUniverse ->width() << endl;
				//globalUniverse ->print();

				//the first submatrix must store the first row of the next submatrix
				if (submatrix_index == 0)
				{
					SimpleMatrix<unsigned char> *next_line = new SimpleMatrix<unsigned char>(matrix_file_path,1,total_columns,allocated_rows);
					globalUniverse ->addRow(next_line ->buffer(),globalUniverse ->height());
					globalUniverse ->buildBorder(0);
					delete next_line;
				}
				//the last submatrix must store the last row of the previous submatrix
				else if (submatrix_index == total_rows / allocated_rows)
				{
					SimpleMatrix<unsigned char> *prev_line = new SimpleMatrix<unsigned char>(matrix_file_path,1,total_columns,total_rows - assigned_rows - 1);
					globalUniverse ->addRow(prev_line ->buffer(),0);
					globalUniverse ->buildBorder(0);
					//cout << "globalUniverse =" << endl;
					//globalUniverse ->print();
					delete prev_line;
				}
				//the remaining submatrices store the last row of the previous submatrix and the first of the next one
				else
				{
					SimpleMatrix<unsigned char> *next_line = new SimpleMatrix<unsigned char>(matrix_file_path,1,total_columns,i + allocated_rows);
					SimpleMatrix<unsigned char> *prev_line = new SimpleMatrix<unsigned char>(matrix_file_path,1,total_columns,i - 1);
					globalUniverse ->addRow(prev_line ->buffer(),0);
					globalUniverse ->addRow(next_line ->buffer(),globalUniverse ->height());
					globalUniverse ->buildBorder(0);
					//globalUniverse -> print();
					delete next_line;
					delete prev_line;
				}

				//globalUniverse ->print();
				
				//format the universe correctly
				globalUniverse ->ReplaceElement('0',0);
				globalUniverse ->ReplaceElement('1',1);

				cout << "Done!" << endl;
			}


			synchMatrixDimensions();
			
			if (myrank == 0)
				cout << "Broadcasting matrix..." << endl;

			sendPartialMartrices();

			if (myrank == 0)
			{
				cout << "Done!" << endl;
				cout << "Running game of life on submatrix " << submatrix_index << "..." << endl;
			}
			
			tick(opt,STATIC);
			updatePartialMatrices();

			if (myrank == 0)
			{
				cout << "Done!" << endl;
				cout << "Gathering matrixes..." << endl;
			}

			gatherPartialMatrices();

			//clean the partial matrix of additional rows used in computation
			if (myrank == 0)
			{
				if (submatrix_index == 0)
				{
					globalUniverse ->RemoveBorder();
					globalUniverse ->removeRow(globalUniverse ->height() - 1);
				}
				else if (submatrix_index == total_rows / allocated_rows)
				{
					globalUniverse ->RemoveBorder();
					globalUniverse ->removeRow(0);
				}
				else
				{
					globalUniverse ->RemoveBorder();
					globalUniverse ->removeRow(0);
					globalUniverse ->removeRow(globalUniverse ->height() - 1);
				}
			}

			//store the updated partial matrix file
			if (myrank == 0)
			{
				temp_stream.str(""); //clear the stringstream to build the next string

				if (!overwrite_files)
					temp_stream << "D:\\HPCTest\\output_part" << t << submatrix_index << part_extension;
				else
					temp_stream << "D:\\HPCTest\\output_part" << submatrix_index << part_extension;

				cout << "Done!" << endl;
				cout << "Storing partial matrix " << submatrix_index << " of tick " << t << endl;
				string part_path = temp_stream.str();
				globalUniverse ->storeMatrix(temp_stream.str());

				//free the memory for the next submatrix
				delete globalUniverse;
			}
		}

		//merge the part files into one file
		if (myrank == 0)
		{
			cout << "Merging universe output file..." << endl;
			temp_stream.str(""); //clear streamstring

			if (!overwrite_files)
				temp_stream << "D:\\HPCTest\\final_matrix" << t << ".dat";
			else
				temp_stream << "D:\\HPCTest\\final_matrix" << ".dat";

			matrix_file_path = temp_stream.str();
			ofstream tick_output(matrix_file_path);
			ifstream input_part_file;
			if (tick_output.is_open())
			{
				for (int i = 0; i < submatrix_index; i++)
				{
					string current_line;
					temp_stream.str(""); //clear streamstring

					if (!overwrite_files)
						temp_stream << "D:\\HPCTest\\output_part" << t << i << part_extension;
					else
						temp_stream << "D:\\HPCTest\\output_part" << i << part_extension;

					string part_path = temp_stream.str();
					input_part_file.open(part_path);
					if (input_part_file.is_open())
					{
						while (!input_part_file.eof())
						{
							getline(input_part_file,current_line);
							tick_output << current_line << (!current_line.empty() ? "\n" : "");
						}
						input_part_file.close();
					}
					else
					{
						cerr << "Can't read " + part_path << endl;
						exit(1);
					}
				}
				tick_output.close();
				cout << "Done!" << endl;
			}
			else
			{
				cerr << "Cannot create matrix output file in runWithDiskAllocation" << endl;
				exit(1);
			}
		}

		MPI_Barrier(MPI_COMM_WORLD);

	}
}

int main(int argc, char* argv[])
{
	allocationOption alopt = RAM;
	hpcOption hopt = CUDA_TEXTURE;
	clock_t t1,t2;
	double elapsed_time;

	MPI_Init(&argc, &argv);
	MPI_Comm_size(MPI_COMM_WORLD, &processes);
	MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
	
	if (myrank == 0)
	{
		cout << "****** SYSTEM INFORMATION ******\n\n";
		statex.dwLength = sizeof (statex);

		GlobalMemoryStatusEx(&statex);
		cout << "Total physical memory: " << fixed << (long double)statex.ullTotalPhys / 1024.0 << " KB" << endl;
		cout << "Available physical memory: " << fixed << (long double)statex.ullAvailPhys / 1024.0 << " KB" << endl;
		cout << "Total virtual memory: " << fixed << (long double)statex.ullTotalVirtual / 1024.0 << " KB" << endl;
		cout << "Available virtual memory: " << fixed << (long double)statex.ullAvailVirtual / 1024.0 << " KB" << endl;
		cout << "\n" << endl;
	}


	//initial time
	if (myrank == 0)
		t1 = clock();
	
	switch (alopt)
	{
		case RAM:
			runWithNormalAllocation(false,hopt);
			break;

		case DISK:
			runWithDiskAllocation(&statex,true,true,hopt);
			break;
	}

	//final time
	if (myrank == 0)
	{
		t2 = clock();
		elapsed_time = ((double)t2 - (double)t1) / (double)CLOCKS_PER_SEC;
	}

	if (myrank == 0)
	{
		struct ReportData data =
		{
			&statex,
			elapsed_time,
			thread_amount,
			alopt == RAM ? globalUniverse ->memoryUsage() : -1,
			processes,
			TICK_COUNT
		};
		writeReport(data);
	}

	Sleep(5000);
	MPI_Barrier(MPI_COMM_WORLD);
	MPI_Finalize();
	return 0;
}