#include "structMeshPart.hpp"
#include "structMeshFunctions.hpp"
#include "util.hpp"

namespace hms {

template<typename Derived1, typename Derived2>
void decomposeArray(
	const Eigen::DenseBase<Derived1>& globalArray,
	Eigen::DenseBase<Derived2>& localArray,
	Index nComponents,
	const Ref<const Array2i>& globalArraySize,
	const Ref<const Array2i>& localArraySize,
	const Ref<const Array2i>& localOffset,
	const MPI_Comm_H& comm,
	int root
){
	assert(
		(localArray.rows() == localArraySize.prod() && nComponents == 1 ) ||
		(localArray.cols() == localArraySize.prod() &&
		nComponents == localArray.rows() )
	);
	if ( comm.rank() == root )
		assert(
			(globalArray.cols() == 1 &&
			globalArray.rows() == globalArraySize.prod() ) ||
			(globalArray.cols() == globalArraySize.prod() &&
			globalArray.rows() == nComponents )
		);
	
	/* when creating a window, it is not clear, whether MPI_Put or MPI_Get is
	 * going to be used. Therefore, MPI_Win_create() takes a non-const void
	 * pointer. However, within this function, only MPI_Get is used and thus the
	 * data is not going to be modified. Therefore, the const_cast is safe */
	MPI_Win_H win { const_cast<Derived1&>( globalArray.derived() ), comm };
	
	win.fence();
	for ( int i{0}; i<localArraySize.y(); ++i ){
		win.get(
			localArray,
			localArraySize.x()*nComponents*i,
			localArraySize.x()*nComponents,
			root,
			( localOffset.x() + (localOffset.y()+i)*globalArraySize.x() )
			* nComponents
		);
	}
	win.fence();
}

template<typename Derived>
auto decomposeArray(
	const Eigen::DenseBase<Derived>& array,
	const Ref<const Array2i>& globalArraySize,
	const Ref<const Array2i>& localArraySize,
	const Ref<const Array2i>& localOffset,
	const MPI_Comm_H& comm,
	int root
) -> typename Derived::PlainObject
{
	Index nComponents {
		Derived::ColsAtCompileTime == 1 ? 1 : array.rows()
	};

	MPI_Bcast(&nComponents, 1, MPI_INDEX, comm.master(), comm);
	
	typename Derived::PlainObject localArray;
	
	/* the matrix is either a column vector with one value for each mesh cell
	 * or a matrix whose column count is equal to the mesh's cell count */
	if constexpr( Derived::ColsAtCompileTime == 1 )
		localArray.resize( localArraySize.prod() );
	else
		localArray.resize(nComponents, localArraySize.prod() );
	
	decomposeArray( array, localArray, nComponents,
		globalArraySize, localArraySize, localOffset, comm, root
	);
	
	return localArray;
}

template<typename Derived, typename MeshType>
auto decomposeArray(
	const Eigen::DenseBase<Derived>& array,
	const StructMeshPart<MeshType>& part,
	bool withGhosts,
	int root
) -> typename Derived::PlainObject
{
	Index includedGhostLayers {0};
	if (withGhosts)
		includedGhostLayers = part.mesh().nGhostLayers();
	
	if ( part.comm().rank() == root )
		assert((
		(
			array.cols() == 1 &&
			array.size() == (part.meshSizeGlobal()+2*includedGhostLayers).prod()
		) ||
		(
			array.size() ==
			(part.meshSizeGlobal().x() + 2*includedGhostLayers)*array.rows() *
			(part.meshSizeGlobal().y() + 2*includedGhostLayers)
		)) &&
			"Array on root has incorrect size!"
		);
	
	return decomposeArray(
		array,
		part.meshSizeGlobal() + 2*includedGhostLayers,
		part.meshSize() + 2*includedGhostLayers,
		part.offset(),
		part.comm(), root
	);
}



template<typename Derived1, typename Derived2>
void recomposeArray(
	Eigen::DenseBase<Derived1>& globalArray,
	const Eigen::DenseBase<Derived2>& localArray,
	Index nComponents,
	const Ref<const Array2i>& globalArraySize,
	const Ref<const Array2i>& localArraySize,
	const Ref<const Array2i>& localOffset,
	const Ref<const Array2i>& localGhostOffBeg,
	const Ref<const Array2i>& localGhostOffEnd,
	const MPI_Comm_H& comm,
	int root
){
	assert(
		(localArray.rows() == localArraySize.prod() && nComponents == 1 ) ||
		(localArray.cols() == localArraySize.prod() &&
		nComponents == localArray.rows() )
	);

	if ( comm.rank() == root )
		assert(
			(globalArray.cols() == 1 &&
			globalArray.rows() == globalArraySize.prod() ) ||
			(globalArray.cols() == globalArraySize.prod() &&
			globalArray.rows() == nComponents )
		);

	MPI_Win_H win { globalArray, comm };
	
	win.fence();
	for (
		Index i{localGhostOffBeg.y()};
		i<localArraySize.y()-localGhostOffEnd.y();
		++i
	){
		win.put( localArray,
			nComponents * ( localArraySize.x() * i + localGhostOffBeg.x() ),
			nComponents * ( localArraySize.x()
				- localGhostOffBeg.x()
				- localGhostOffEnd.x()
			),
			root,
			nComponents *
			( localOffset.x() + localGhostOffBeg.x() + 
			( localOffset.y() + i )*globalArraySize.x() )
		);
	}
	win.fence();
}


template<typename Derived>
auto recomposeArray(
	const Eigen::DenseBase<Derived>& localArray,
	const Ref<const Array2i>& globalArraySize,
	const Ref<const Array2i>& localArraySize,
	const Ref<const Array2i>& localOffset,
	const Ref<const Array2i>& localGhostOffBeg,
	const Ref<const Array2i>& localGhostOffEnd,
	const MPI_Comm_H& comm,
	int root
) -> typename Derived::PlainObject
{
	const Index nComponents {
		Derived::ColsAtCompileTime == 1 ? 1 : localArray.rows()
	};

	typename Derived::PlainObject array;
	
	/* the matrix is either a column vector with one value for each mesh cell
	 * or a matrix whose column count is equal to the mesh's cell count */
	if (comm.rank() == root){
		if constexpr( Derived::ColsAtCompileTime == 1 )
			array.resize( globalArraySize.prod() );
		else
			array.resize(nComponents, globalArraySize.prod() );
	}

	recomposeArray(
		array, localArray, nComponents, globalArraySize, localArraySize,
		localOffset, localGhostOffBeg, localGhostOffEnd, comm, root
	);
	
	return array;
}

template<typename Derived, typename MeshType>
auto recomposeArray(
	const Eigen::DenseBase<Derived>& localArray,
	const StructMeshPart<MeshType>& part,
	bool withGhosts,
	bool useOldSizes,
	int root
) -> typename Derived::PlainObject
{
	Index includedGhostLayers {0};
	if (withGhosts)
		includedGhostLayers = part.mesh().nGhostLayers();
	
	Array2i meshSize {
		useOldSizes ?
			part.oldSizes().col( part.comm().rank() ) :
			part.meshSize()
	};

	Array2i offset {
		useOldSizes ?
			meshPartSizesToOffsets( part.nParts(), part.oldSizes() ).col(
				part.comm().rank()
			) :
			part.offset()
	};
	
	assert((
	(
		localArray.cols() == 1 &&
		localArray.size() == (meshSize+2*includedGhostLayers).prod()
	) ||
	(
		localArray.size() ==
		(meshSize.x() + 2*includedGhostLayers)*localArray.rows() *
		(meshSize.y() + 2*includedGhostLayers)
	)) &&
		"Local array has incorrect size!"
	);
	
	return recomposeArray(
		localArray,
		part.meshSizeGlobal() + 2*includedGhostLayers,
		meshSize + 2*includedGhostLayers,
		offset,
		Array2i{
			part.atBoundary(BoundaryLocation::left  ) ? 0 : includedGhostLayers,
			part.atBoundary(BoundaryLocation::bottom) ? 0 : includedGhostLayers
		},
		Array2i{
			part.atBoundary(BoundaryLocation::right ) ? 0 : includedGhostLayers,
			part.atBoundary(BoundaryLocation::top   ) ? 0 : includedGhostLayers
		},
		part.comm(), root
	);
}



template<typename MeshType>
Array2Xi balancedStructMeshParts( 
	const StructMeshPart<MeshType>& part,
	const ArrayXs& localLoadData
){
	assert(
		localLoadData.size() == part.mesh().nDomainCells() &&
		"Local load data must have exactly as many entries as the number of "
		"cells of the local mesh partition"
	);

	/* shape the load like the mesh */
	auto loadReshaped { localLoadData.reshaped(
		part.meshSize().x(),
		part.meshSize().y()
	) };

	/**********************************************************************/
	/* calculate load sums in first dimension (mesh x-direction) */
	/**********************************************************************/

	/* The first step is to divide the mesh into bands with similar loads.
	 * When reshaping the local loads like the mesh, then these bands have
	 * the same direction as the reshaped load's columns. Therefore, the
	 * column-wise sums must be calculated on each node */
	ArrayXs localSums ( loadReshaped.colwise().sum() );

	/* Then, these local sums must be summed up into band-wise sums. */
	/* Creating band communicators: */
	MPI_Comm_H band { part.comm().split( part.band(), part.comm().rank() ) };
	
	ArrayXs bandSums;
	if ( band.isMaster() ) bandSums.resizeLike(localSums);

	MPI_Reduce(
		localSums.data(), bandSums.data(), localSums.size(),
		MPI_SCALAR, MPI_SUM, band.master(), band
	);

	/* the masters of each band hold the bandwise sums, which now have to be
	 * gathered onto one node for determining balanced band sizes */
	MPI_Comm_H bandMasters { part.comm().split(
		band.isMaster() ? 0 : MPI_UNDEFINED, part.comm().rank()
	) };

	ArrayXs xSums;
	if ( bandMasters )
		xSums = MPI_Gatherv_H(bandSums, bandMasters, bandMasters.master());

	/**********************************************************************/
	/* calculate balanced band sizes */
	/**********************************************************************/
	/* now the band sizes can be determined */
	ArrayXi bandSizes ( part.nParts().y() );
	if ( bandMasters.isMaster() )
		bandSizes = equalPartialSums( xSums, part.nParts().y() );

	/* every node needs to know about them */
	MPI_Bcast(
		bandSizes.data(), bandSizes.size(),
		MPI_INDEX, part.comm().master(), part.comm()
	);

	ArrayXi bandOffsets;
	bandOffsets.resizeLike(bandSizes);
	for (Index i{0}; i<bandOffsets.size(); ++i){
		bandOffsets(i) = bandSizes.head(i).sum();
	}

	/**********************************************************************/
	/* check whether the new bands have overlap with the previous ones */
	/**********************************************************************/
	int hasOverlap { static_cast<bool>(overlap_usingSizes(
		bandOffsets( part.band() ), bandSizes( part.band() ),
		part.offset().y(), part.meshSize().y()
	))};

	int allHaveOverlap;
	/* "MPI_LAND" means MPI Logical AND */
	MPI_Allreduce(
		&hasOverlap, &allHaveOverlap,
		1, MPI_INT, MPI_LAND, part.comm()
	);
	
	/* fallback option is to gather all load data on one node and calculate
	 * the new sizes there locally */
	if ( !allHaveOverlap ){
		ArrayXs loadGlobal { recomposeArray(localLoadData,part) };
		Array2Xi newMeshSizes (2, part.comm().size() );
		if ( part.comm().isMaster() )
			newMeshSizes = balancedStructMeshParts(
				part.meshSizeGlobal(),
				part.nParts(),
				loadGlobal
			);
		return newMeshSizes;
	}

	/**********************************************************************/
	/* calculate load sums in second dimension (mesh y-direction) */
	/**********************************************************************/

	/* start with locally available data, then add neighbours' data if
		* necessary */
	ArrayXs ySumsLocal;
	
	/* the new bands' lower and upper boundary will have shifted up or down
	 * in comparison to the previous position */
	/* number of mesh rows shifted up- (positive) or downwards (negative)
	 * relative to previous y-offset */
	Index diffBot { bandOffsets( part.band() ) - part.offset().y() };
	Index diffTop {
		( bandOffsets( part.band() ) + bandSizes(part.band() ) ) -
		( part.offset().y() + part.meshSize().y() )
	};

	/* for local sums in the mesh's y-direction, only use the data that is
	 * within the overlap between old and new band */
	ySumsLocal = loadReshaped.middleCols(
		std::max(Index{0}, diffBot),
		loadReshaped.cols() + std::min(Index{0}, diffTop) -
		/*             this ^ '+'-sign is misleading. std::min(0,diffTop)
		 * is either 0 or negative, so a subtraction will be executed. */
		std::max(Index{0}, diffBot)
	).rowwise().sum();

	ArrayXs sumsForNeighbours;
	ArrayXs sumsFromNeighbours;

	/* if top or bottom is moved inwards, data must be provided. */
	if ( diffBot > 0 || diffTop < 0 )
		sumsForNeighbours.resizeLike(ySumsLocal);
	/*if either of them is moving outwards, data must be fetched. */
	if ( diffBot < 0 || diffTop > 0 )
		sumsFromNeighbours.resizeLike(ySumsLocal);

	MPI_Win_H win { sumsForNeighbours, part.comm(), MPI_Info_no_locks() };

	/* the bottom of the band shifted upwards, so the current node holds
	 * load data required by the lower band */
	if (diffBot > 0){
		sumsForNeighbours =
			loadReshaped.leftCols(diffBot).rowwise().sum();
	}

	win.fence();
	/* if band size and position is exactly the same as before, no 
	 * exchange is necessary */
	if ( diffTop > 0 ){
		for ( const auto& nbr : part.neighbours(BoundaryLocation::top) )
			win.get(sumsFromNeighbours, nbr.ownBeg, nbr.size, nbr.rank, nbr.beg);
		ySumsLocal += sumsFromNeighbours;
	}
	win.fence();

	if (diffTop < 0){
		sumsForNeighbours =
			loadReshaped.rightCols(-diffTop).rowwise().sum();
	}

	win.fence();
	if ( diffBot < 0 ){
		for ( const auto& nbr : part.neighbours(BoundaryLocation::bottom) )
			win.get(sumsFromNeighbours, nbr.ownBeg, nbr.size, nbr.rank, nbr.beg);
		ySumsLocal += sumsFromNeighbours;
	}
	win.fence();

	/**********************************************************************/
	/* determine part sizes per band */
	/**********************************************************************/

	ArrayXs ySums { MPI_Gatherv_H( ySumsLocal, band, band.master() ) };

	Array2Xi partSizes;
	if ( band.isMaster() ){
		partSizes.resize( 2, part.nParts().x() );
		partSizes.row(0) = equalPartialSums( ySums, part.nParts().x() );
		partSizes.row(1) = bandSizes( part.band() );

		Array2Xi meshSizes;
		if ( part.comm().isMaster() )
			meshSizes.resize( 2, part.comm().size() );
		MPI_Gather(
			partSizes.data(), partSizes.size(), MPI_INDEX,
			meshSizes.data(), partSizes.size(), MPI_INDEX,
			bandMasters.master(), bandMasters
		);
		
		return meshSizes;
	}
	return Array2Xi{};
};



}