#ifndef PARALLEL_TURBO
#define PARALLEL_TURBO

//If its a local code
#ifndef LOCAL	
	#include "parmetis.h"
	#include <mpi.h>
#endif


#include <vector>
#include <map>
#include <boost/algorithm/string.hpp>
#include "grid.h"
#include <stdlib.h>
#include <time.h>

#ifndef LOCAL
class Request
{
public:
	MPI_Request request;	//Request ID
	double *buf;	//Buffer
	int buf_size;	//Size of buffer
	int cell_index;	//Index of cell to send or to recive
	int tag;		//Tag for determine different requests 
	Request(int size) {
		buf_size = size;
		buf = new double[size];
	};
	~Request()
	{
		delete[] buf;
	};
};

class RequestGroup {
public:
	std::string GroupName;		
	std::vector<Request*> requests_to_send;
	std::vector<Request*> requests_to_recv;
	RequestGroup(std::string name) {
		GroupName = name;
		requests_to_send.clear();
		requests_to_recv.clear();
	};
	//Add send request that sends cell data to destination process
	int AddSendRequest(int cell_index, int bufsize, int dest, int tag) {		
		Request* new_req = new Request(bufsize);
		new_req->cell_index = cell_index;
		new_req->tag = tag;
		int res = MPI_Send_init(new_req->buf, bufsize, MPI_DOUBLE, dest, tag, MPI_COMM_WORLD, &(new_req->request));
		if (res != MPI_SUCCESS) throw 1;
		//printf("send req: %ld\ninfo: %d double %d", new_req->request, bufsize, dest);
		requests_to_send.push_back(new_req);		
		return 0;
	};
	//Add recive request
	int AddRecvRequest(int cell_index, int bufsize, int source, int tag) {
		Request* new_req = new Request(bufsize);
		new_req->cell_index = cell_index;		
		new_req->tag = tag;
		int res = MPI_Recv_init(new_req->buf, bufsize, MPI_DOUBLE, source, tag, MPI_COMM_WORLD, &(new_req->request));
		//printf("recv req %ld\ninfo: %d double %d", new_req->request, bufsize, source);
		if (res != MPI_SUCCESS) throw 1;
		requests_to_recv.push_back(new_req);
		return 0;
	};
	//Send all prepared data
	int SendAll() {
		MPI_Request *requests = new MPI_Request[requests_to_send.size()];
		for (int i = 0; i<requests_to_send.size(); i++) {
			requests[i] = requests_to_send[i]->request;			
		};
		int res = MPI_Startall(requests_to_send.size(), requests);
		if (res != MPI_SUCCESS) throw 1;
		delete[] requests;
		return 0;
	};
	//Recv all prepared data
	int RecvAll() {	
		//Recv all we need
		MPI_Request *requests = new MPI_Request[requests_to_recv.size()];
		MPI_Status *statuses = new MPI_Status[requests_to_recv.size()];
		for (int i = 0; i<requests_to_recv.size(); i++) {
			requests[i] = requests_to_recv[i]->request;
		};
		int res;
		res = MPI_Startall(requests_to_recv.size(), requests);
		if (res != MPI_SUCCESS) throw 1;		
		res = MPI_Waitall(requests_to_recv.size(), requests, statuses);
		if (res != MPI_SUCCESS) throw 1;
		//Wait for sends now
		delete[] statuses;
		delete[] requests;
		statuses = new MPI_Status[requests_to_send.size()];
		requests = new MPI_Request[requests_to_send.size()];
		for (int i = 0; i<requests_to_send.size(); i++) {
			requests[i] = requests_to_send[i]->request;
		};
		res = MPI_Waitall(requests_to_send.size(), requests, statuses);		
		if (res != MPI_SUCCESS) throw 1;
		delete[] statuses;
		delete[] requests;
		return 0;
	};
};

#else
class Request
{
public:	
	double *buf;	//Buffer
	int buf_size;	//Size of buffer
	int cell_index;	//Index of cell to send or to recive
	int tag;		//Tag for determine different requests 
	Request(int size) {
		buf_size = size;
		buf = new double[size];
	};
	~Request()
	{
		delete[] buf;
	};
};

class RequestGroup {
public:
	std::string GroupName;		
	std::vector<Request*> requests_to_send;
	std::vector<Request*> requests_to_recv;
	RequestGroup(std::string name) {		
	};
	//Add send request that sends cell data to destination process
	int AddSendRequest(int cell_index, int bufsize, int dest, int tag) {				
		return 0;
	};
	//Add recive request
	int AddRecvRequest(int cell_index, int bufsize, int source, int tag) {		
		return 0;
	};
	//Send all prepared data
	int SendAll() {		
		return 0;
	};
	//Recv all prepared data
	int RecvAll() {			
		return 0;
	};
};

#endif

class Parallel
{
public:
	//Variables
	int NP;
	int rank;
	std::map<int, int> index;	//Global index to local index in pnodes
	std::vector<int> pnodes;	//Parallel nodes and rank of process assigned to them
	
	//Vectors to store necessary exchange requests information
	std::map<std::string, RequestGroup*>	request_groups;	//Map group names
	int AddRequestGroup(std::string name) {
		RequestGroup *new_group = new RequestGroup(name);
		request_groups[name] = new_group;
		return 0;
	};

	//Initialize MPI
	int Init(int *argc, char **argv[])
	{
		#ifndef LOCAL
		int res = MPI_Init(argc, argv);
		if (res != MPI_SUCCESS) printf("MPI_Init error!\n");		
		res = MPI_Comm_size(MPI_COMM_WORLD, &NP);		
		res = MPI_Comm_rank(MPI_COMM_WORLD, &rank);		
		#else	
		NP = 1;
		rank = 0;
		#endif
		return 0;
	};

	Parallel() {
		SetStartTime();
	};

	//Initialize MPI
	Parallel(int *argc, char **argv[]) {
		Init(argc, argv);
		SetStartTime();
	};

	//Finalize MPI
	~Parallel()
	{
		#ifndef LOCAL
		MPI_Finalize();
		#endif
	};

	//Barrier
	void Barrier()
	{
		#ifndef LOCAL
		MPI_Barrier(MPI_COMM_WORLD);
		#endif
		return;
	};

	//Start timer
	#ifndef LOCAL
	double StartTime;
	#else
	time_t StartTime;
	#endif	
	void SetStartTime() {
		#ifndef LOCAL
		StartTime = MPI_Wtime();
		#endif
		StartTime = time( &StartTime );		
		return;
	};

	//Get current time in seconds
	double GetTime() {
		#ifndef LOCAL
		return MPI_Wtime() - StartTime;
		#endif		
		return difftime( time( NULL ) , StartTime);
	};

	double GlobalTime() {
		#ifndef LOCAL
		double min = GlobalMin(StartTime);
		double max = GlobalMax(MPI_Wtime());
		return max - min;			
		#endif
		return GetTime();		
	};
		

	//For a rocket grid
	int CartesianDecomposition(Grid &g) {
		int N = g.cells.size();
		pnodes.resize(N);
		for (int i = 0; i<g.cells.size(); i++) {
			   index[g.cells[i].GlobalIndex] = i;
		};
		#ifndef LOCAL		
		//Needed variables
		int dims[2]={0,0};
		int periods[2]= {0,0}; 
		int reorder = 0;
		int coords[2];
		int cartrank;
		int x, y;

		//Determine dimensions
		MPI_Dims_create(NP, 2, dims);
		
		//Determine x and y position coordinates
		MPI_Comm cartcomm;		
		if (MPI_Cart_create(MPI_COMM_WORLD, 2, dims, periods, reorder, &cartcomm) != MPI_SUCCESS) {
			printf("MPI_Cart_create failed\n");
			exit(0);
		};
		MPI_Comm_rank(cartcomm, &cartrank);
		MPI_Cart_coords(cartcomm, cartrank, 2, coords);
		x = coords[0];
		y = coords[1];
		int dx = g.nx / dims[0];
		int dy = g.ny / dims[1];
		//Map cell coords to process coords		
		const int dim_max = 1000;
		int xdispl[dim_max];
		int ydispl[dim_max];
		int r = g.nx % dims[0];
		xdispl[0] = 0;
		for (int i = 1; i<=dims[0]; i++) {
			xdispl[i] = xdispl[i-1] + dx;
			if (r > 0) {
				xdispl[i]++;
				r--;
			};
		}
		r = g.ny % dims[1];
		ydispl[0] = 0;
		for (int i = 1; i<=dims[1]; i++) {
			ydispl[i] = ydispl[i-1] + dy;
			if (r > 0) {
				ydispl[i]++;
				r--;
			};
		}
	
		if (IsMaster()) {
			printf("dimensions nx = %ld, ny = %ld\n", dims[0], dims[1]);
			printf("g.nx = %ld, g.ny = %ld\n", g.nx, g.ny);
			printf("dx = %ld, dy = %ld\n", dx, dy);
		};

		for (int i = 0; i<pnodes.size(); i++) {
			Cell& c = g.cells[i];
			std::vector<int> c_coords = g.GetCoordsFromIndex(g.cells[i].GlobalIndex);
			//TO DO modify distribution like in decomposition
			for (int i = 1; i<=dims[0]; i++) {
				if (c_coords[0] < xdispl[i]) {
					coords[0] = i-1;
					break;
				};
			};
			for (int i = 1; i<=dims[1]; i++) {
				if (c_coords[1] < ydispl[i]) {
					coords[1] = i-1;
					break;
				};
			};
			//coords[0] = c_coords[0] / dx;
			//coords[1] = c_coords[1] / dy;
			//if (coords[0] >= dims[0]) coords[0] = dims[0]-1;
			//if (coords[1] >= dims[1]) coords[1] = dims[1]-1;
			int r;
			if (IsMaster()) {
				//printf("GlobalIndex = %ld, x = %ld, y = %ld, r = %ld\n", g.cells[i].GlobalIndex, coords[0], coords[1], r);			
			};			
			int MPI_Result = MPI_Cart_rank(cartcomm, coords, &r );
			if (IsMaster()) {
				//printf("GlobalIndex = %ld, x = %ld, y = %ld, r = %ld\n", g.cells[i].GlobalIndex, coords[0], coords[1], r);			
			};
			pnodes[i] = r;
			g.cells[i].CellHSize = (double)r;
		}		

		double edgecut = 0;
		Barrier();
		#else
		double edgecut = 0;
		for (int i = 0; i<pnodes.size(); i++) {
			pnodes[i] = 0;
		};
		#endif
		return edgecut;
	}

	//Domain decomposition
	int Decomposition(Grid &g)
	{
		int N = g.cells.size();
		pnodes.resize(N);
		for (int i = 0; i<g.cells.size(); i++) {
			   index[g.cells[i].GlobalIndex] = i;
		};
		#ifndef LOCAL
		//Distribute cells to processors
		int n = N/NP;
		int r = N%NP;
		idxtype* vtxdist = new idxtype[NP+1];
		vtxdist[0] = 0;
		for (int i = 1; i<=NP; i++) {
			   vtxdist[i] = vtxdist[i-1] + n;
			   if (r>0) {
					   vtxdist[i]++;
					   r--;
			   };
		};
		n = vtxdist[rank+1] - vtxdist[rank]; //Number of vertices on this processor
		//Build an adjacency list and fill arrays
		idxtype* xadj = new idxtype[n+1];
		std::vector<idxtype> Adjncy;
		int k = 1;
		xadj[0] = 0;
		for (int i = vtxdist[rank]; i<vtxdist[rank+1]; i++) {
			   Cell& c = g.cells[i];
			   int faces = 0;  //Number of faces adjacent to cell
			   for (int j = 0; j<c.Faces.size(); j++) {
					   Face& f = g.faces[g.face_index[c.Faces[j]]];
					   if (!f.isExternal) {
							   faces++;
							   if (f.FaceCell_1 == c.GlobalIndex) Adjncy.push_back(index[f.FaceCell_2]);
							   if (f.FaceCell_2 == c.GlobalIndex) Adjncy.push_back(index[f.FaceCell_1]);
					   };
			   };
			   xadj[k] = xadj[k-1] + faces;
			   k++;
		};
		idxtype* adjncy = new idxtype[Adjncy.size()];
		for (int i = 0; i<Adjncy.size(); i++) adjncy[i] = Adjncy[i];
		float* tpwgts = NULL;
		float* ubvec;
		int wgtflag = 0;
		int numflag = 0;
		int ncon = 0;
		int nparts = NP;
		int options[10]; options[0] = 0;
		int edgecut;
		MPI_Comm Comm;
		MPI_Comm_dup(MPI_COMM_WORLD, &Comm);
		idxtype* part = new idxtype[n];
		//Decompose mesh
		ParMETIS_V3_PartKway(vtxdist,
			   xadj,
			   adjncy,
			   NULL,
			   NULL,
			   &wgtflag,
			   &numflag,
			   &ncon,
			   &nparts,
			   tpwgts,
			   ubvec,
			   options,
			   &edgecut,
			   part,
			   &Comm);
		//Gather results
		int *rbuf = new int[pnodes.size()];
		int *sbuf = new int[n];
		for (int i = 0; i<n; i++) sbuf[i] = part[i];
		int *rcounts = new int[NP];
		int *displs = new int[NP];
		for (int i = 0; i<NP; i++) {
			rcounts[i] = vtxdist[i+1] - vtxdist[i];
			displs[i] = vtxdist[i]; 
		};
		MPI_Allgatherv(sbuf, rcounts[rank], MPI_INT, rbuf, rcounts, displs, MPI_INT, Comm);
		for (int i = 0; i<pnodes.size(); i++) {
			pnodes[i] = rbuf[i];
		};
		delete[] rbuf;
		delete[] sbuf;
		delete[] rcounts;
		delete[] displs;
		#else
		double edgecut = 0;
		for (int i = 0; i<pnodes.size(); i++) {
			pnodes[i] = 0;
		};
		#endif
		return edgecut;
	};	

	//Getting minimum value from all nodes
	double GlobalMin(double value) {
		#ifndef LOCAL
		double result;
		MPI_Allreduce(&value, &result, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD);
		return result;
		#else
		return value;
		#endif
	};

	//Getting maximum value from all nodes
	double GlobalMax(double value) {
		#ifndef LOCAL
		double result;
		MPI_Allreduce(&value, &result, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);
		return result;
		#else
		return value;
		#endif
	};

	//Getting sum of values from all nodes
	double GlobalSum(double value) {
		#ifndef LOCAL
		double result;
		MPI_Allreduce(&value, &result, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
		return result;
		#else
		return value;
		#endif
	};

	//If this is the master node
	bool IsMaster() {
		return rank == 0;
	};

	//If this node belong to this process
	bool IsLocal(int Global_index) {		
		return pnodes[index[Global_index]] == rank;
	};	

	//Get node's host rank
	int GetRank(int Global_index) {
		return pnodes[index[Global_index]];
	};

	//Return amount of nodes that belong to this process
	int NodesCount() {
		int res = 0;
		for (int i = 0; i<pnodes.size(); i++) if (pnodes[i] == rank) res++;
		return res;
	};

	int Gather(int varsize, double *rbuf, double *sbuf)
	{
		#ifndef LOCAL
		int *rcounts = new int[NP];
		int *displs = new int[NP];
		for (int i = 0; i<NP; i++) rcounts[i] = 0;
		for (int i = 0; i<pnodes.size(); i++) rcounts[pnodes[i]]++;
		displs[0] = 0;
		for (int i = 0; i<NP; i++) {
			rcounts[i] *= varsize;
			if (i>0) displs[i] = rcounts[i-1] + displs[i-1];
		};
		MPI_Gatherv(sbuf, rcounts[rank], MPI_DOUBLE, rbuf, rcounts, displs, MPI_DOUBLE, 0, MPI_COMM_WORLD);
		delete[] rcounts;
		delete[] displs;
		#else
		for (int i = 0; i<pnodes.size()*varsize; i++) rbuf[i] = sbuf[i];
		#endif
		return 0;
	};

};

#endif
