#include "structMeshPart.hpp"
#include "structMeshPartitioning.hpp"
#include "structMeshDistribution.hpp"
#include "util.hpp"
#include <iomanip>

#include "magic_enum.hpp"

namespace hms {

/**
 * @brief Construct an uninitialised mesh partition.
 * 
 * @param comm The optional MPI communicator. Default is MPI_COMM_NULL,
 * which can be set to a valid one using StructMeshPart::comm( MPI_Comm_H ).
 */
template<typename MeshType>
StructMeshPart<MeshType>::StructMeshPart( MPI_Comm_H comm ) :
	m_comm { comm }
{}

/**
 * @brief Constructs mesh partitions from undistributed mesh. Mesh is
 * automatically distributed in close to equally sized parts.
 * 
 * @param mesh Full (undistributed) mesh on master.
 * @param comm Communicator, whose master has to pass in the full mesh,
 * and to whose members the mesh is distributed. Communicator must be the same
 * on all partitions.
 */
template<typename MeshType>
StructMeshPart<MeshType>::StructMeshPart(
	StructMeshBase<MeshType>&& mesh,
	MPI_Comm_H comm
) :
	m_comm { std::move(comm) }
{
	assignAndDistribute( std::move( mesh.derived() ) );
}

/**
 * @brief Constructs mesh partitions from distributed meshes and their offsets.
 * 
 * @param mesh Local mesh.
 * @param offset Offset of local mesh (distance to global bottom left domain
 * cell in number of cells in x- and y-direction)
 * @param comm Communicator. Must be the same on all partitions.
 */
template<typename MeshType>
StructMeshPart<MeshType>::StructMeshPart(
	StructMeshBase<MeshType>&& mesh,
	Array2i offset,
	MPI_Comm_H comm
) :
	m_comm { std::move(comm) }
{
	assignLocal( std::move( mesh.derived() ), std::move(offset) );
}

/**
 * @brief Returns the global mesh domain size (no ghost cells).
 * 
 * @return const Array2i& 
 */
template<typename MeshType>
auto StructMeshPart<MeshType>::meshSizeGlobal() const -> const Array2i& {
	assert( (m_meshSizeGlobal != 0).all() &&
		"You are trying to access the global mesh size of an uninitialised mesh partition!"
	);
	return m_meshSizeGlobal;
}


/**
 * @brief Returns the number of partitions in topological x- and y-direction.
 * 
 * @return const Array2i& 
 */
template<typename MeshType>
auto StructMeshPart<MeshType>::nParts() const -> const Array2i& {
	return m_nParts;
};

/**
 * @brief Returns the offset of the managed mesh.
 * 
 * @return const Array2i& 
 * 
 * The offset is defined as the number of domain cells in x- and y-direction
 * between the bottom left cell of the global and local mesh.
 * @see meshSize()
 */
template<typename MeshType>
auto StructMeshPart<MeshType>::offset() const -> const Array2i& {
	return m_offset;
}


/**
 * @brief Returns all mesh sizes from before redistributeMesh() was called.
 * 
 * @return const Array2Xi& 
 */
template<typename MeshType>
auto StructMeshPart<MeshType>::oldSizes() const -> const Array2Xi&{
	assert( m_oldSizes.size() != 0 && 
		"No previous mesh sizes available before calling redistributeMesh()!"
	);
	return m_oldSizes;
}


/**
 * @brief Returns the mesh domain size in x- and y-direction.
 * 
 * @return Array2i
 * 
 * @see offset()
 */
template<typename MeshType>
auto StructMeshPart<MeshType>::meshSize() const -> SizeType {
	return { mesh().size() };
}

/**
 * @brief Returns the band index of the mesh partition.
 * 
 * @return Index 
 * 
 * Rebalancing can move the mesh partitions laterally, which changes their
 * position relative to their top and bottom neighbours. However, their
 * position relative to their left and right neighbours does not change.
 * A mesh partition and all its left and right neighbours forms a band.
 * The lowest band gets the index zero and they're incremented in y-direction.
 * 
 * @see indexInBand()
 */
template<typename MeshType>
auto StructMeshPart<MeshType>::band() const -> Index {
	assert( m_comm &&
		"Mesh partition has invalid communicator!"
	);
	return m_comm.rank() / m_nParts.x();
}

/**
 * @brief Returns the index of a mesh partition within its band.
 * 
 * @return Index 
 * @see band()
 * 
 * The indices are counted from left to right.
 * 
 */
template<typename MeshType>
auto StructMeshPart<MeshType>::indexInBand() const -> Index {
	assert( m_comm &&
		"Mesh partition has invalid communicator!"
	);
	return m_comm.rank() % m_nParts.x();
}

/**
 * @brief Returns the communicator.
 * 
 * @return const MPI_Comm_H& 
 */
template<typename MeshType>
auto StructMeshPart<MeshType>::comm() const -> const MPI_Comm_H& {
	return m_comm;
}

/**
 * @brief Sets the communicator. Only allowed, if it currently is a null
 * communicator, e.g. because of default-initialisation.
 * 
 * @param comm The new communicator. Passing it as an rvalue ref will trigger
 * move semantics, otherwise it will be copied.
 */
template<typename MeshType>
void StructMeshPart<MeshType>::comm( MPI_Comm_H comm ){
	assert( !m_comm &&
		"Mesh partition already has a valid communicator! "
		"Assigning a new communicator is only allowed when the mesh "
		"partition holds a null communicator!"
	);
	m_comm = std::move(comm);
}


/**
 * @brief Returns a const reference to the mesh managed by the mesh partition.
 * 
 * @return const MeshType& 
 */
template<typename MeshType>
auto StructMeshPart<MeshType>::mesh() const -> const MeshType& {
	assert( m_mesh.nDomainCells() != 0 &&
"You are trying to access an uninitialised mesh! Use assignAndDistribute() or \
assignLocal() first!"
	);
	return m_mesh;
}

/**
 * @brief Returns a mutable reference to the mesh managed by the mesh partition.
 * 
 * @return MeshType& 
 */
template<typename MeshType>
auto StructMeshPart<MeshType>::mesh() -> MeshType& {
	return const_cast<MeshType&>(
		static_cast<const StructMeshPart<MeshType>&>(*this).mesh()
	);
}


/**
 * @brief Returns a std::vector of structs holding topological data on the
 * neighbours at the specified boundary.
 * 
 * @return const std::vector<OverlapSegment>&
 */
template<typename MeshType>
auto StructMeshPart<MeshType>::neighbours( BoundaryLocation loc ) const
	-> const std::vector<OverlapSegment>&
{
	return m_neighbours[
		static_cast<typename decltype(m_neighbours)::size_type>(loc)
	];
}

template<typename MeshType>
auto StructMeshPart<MeshType>::neighbours( BoundaryLocation loc )
	-> std::vector<OverlapSegment>&
{
	return m_neighbours[
		static_cast<typename decltype(m_neighbours)::size_type>(loc)
	];
}


/**
 * @brief Returns true if the mesh partition lies at the specified boundary and
 * false if it doesn't.
 * 
 * @param loc The boundary location to be queried.
 * @return true 
 * @return false 
 * 
 * @see BoundaryLocation
 */
template<typename MeshType>
bool StructMeshPart<MeshType>::atBoundary(BoundaryLocation loc) const {
	switch (loc){
		case BoundaryLocation::bottom:
			return band() == 0;
		case BoundaryLocation::left:
			return indexInBand() == 0;
		case BoundaryLocation::right:
			return indexInBand() == nParts().x()-1;
		case BoundaryLocation::top:
			return band() == nParts().y()-1;
		default:
			UNREACHABLE;
	}
}

template<typename MeshType>
auto StructMeshPart<MeshType>::getOldOffsets() const -> Array2Xi {
	return meshPartSizesToOffsets(m_nParts, m_oldSizes);
}

/**
 * @brief Sets the neighbour overlaps to correct initial values, depending
 * on the location of the partition within the global mesh.
 * 
 * @tparam addTopBottom If true, sets top and bottom neighbours. Do this, if
 * the mesh is distributed for the first time. For redistribution, set to false.
 * @param neighbours The m_neighbours data member of the partition.
 * @param meshSize 
 * @param nParts 
 * @param comm 
 */
template<bool addTopBottom>
void setEqualSizeNeighbours(
	std::array<std::vector<OverlapSegment>, 4>& neighbours,
	const Ref<const Array2i>& meshSize,
	const Ref<const Array2i>& nParts,
	const MPI_Comm_H& comm
){
	auto overwriteNeighbours = [&](
		BoundaryLocation loc, OverlapSegment&& neighbour
	){
		std::vector<OverlapSegment>& vec {neighbours[static_cast<int>(loc)]};
		vec.clear();
		vec.push_back( std::move(neighbour) );
		return vec;
	};

	if (comm.rank() % nParts.x() != 0)
		overwriteNeighbours(
			BoundaryLocation::left,
			{ static_cast<int>( comm.rank() - 1 ), meshSize.y(), 0, 0 }
		);
	if (comm.rank() % nParts.x() != nParts.x() - 1)
		overwriteNeighbours(
			BoundaryLocation::right,
			{ static_cast<int>( comm.rank() + 1 ), meshSize.y(), 0, 0 }
		);
	if constexpr (addTopBottom){
		if (comm.rank() / nParts.x() != 0)
			overwriteNeighbours(
				BoundaryLocation::bottom,
				{ static_cast<int>( comm.rank()-nParts.x() ), meshSize.x(),0,0 }
			);
		if (comm.rank() / nParts.x() != nParts.y() - 1)
			overwriteNeighbours(
				BoundaryLocation::top,
				{ static_cast<int>( comm.rank()+nParts.x() ), meshSize.x(),0,0 }
			);
	}
}

/**
 * @brief Distributes the passed mesh from the communicator's master to all
 * partitions using close to equal sizes.
 * 
 * @param mesh 
 * 
 * @see assignLocal()
 */
template<typename MeshType>
void StructMeshPart<MeshType>::assignAndDistribute(
	MeshType&& mesh
){
	if ( m_comm.isMaster() ){
		m_meshSizeGlobal = { mesh.nCellsX(), mesh.nCellsY() };
	}
	
	MPI_Bcast_H(m_meshSizeGlobal, m_comm, m_comm.master(), false);

	/* all mesh types need the number of ghost cell layers from master */
	Index ng { m_comm.isMaster() ? mesh.nGhostLayers() : 0 };
	MPI_Bcast_H( ng, m_comm, m_comm.master() );

	m_nParts = structMeshNParts( m_meshSizeGlobal, m_comm.size() );
	m_offset = structMeshInitOffset( m_meshSizeGlobal, m_nParts, m_comm );

	m_mesh = distributeMesh( mesh, m_comm, m_meshSizeGlobal,
		structMeshPartSize(m_meshSizeGlobal, m_nParts, m_comm), m_offset, ng
	);

	/* now the boundary patches can get added to the mesh, but they still refer
	 * to the dimensions of the global mesh, so they must be distributed */
	m_mesh.addBoundaryPatches( mesh.moveBoundaryPatches() );
	distributeBoundaryPatches( *this, m_mesh );

	/* set left and right neighbours to equal sizes with executing partition */
	setEqualSizeNeighbours<true>( m_neighbours, meshSize(), nParts(), comm() );
}


/**
 * @brief Takes an existing local mesh and its offset, e.g. from a previous run.
 * 
 * @param mesh 
 * @param offset 
 * 
 * Uses the mesh size and the provided offset to calculate all necessary data,
 * such as the number of partitions in both dimensions, the global mesh size
 * and the neighbours.
 * 
 * @see assignAndDistribute()
 */
template<typename MeshType>
void StructMeshPart<MeshType>::assignLocal(
	MeshType&& mesh,
	Array2i offset
){
	m_mesh = std::move(mesh);
	m_offset = std::move(offset);

	Array2Xi allSizes { 2, m_comm.size() };
	Array2Xi allOffsets;
	allOffsets.resizeLike(allSizes);

	MPI_Allgather(
		meshSize().data(), meshSize().size(), MPI_INDEX,
		allSizes.data(),   meshSize().size(), MPI_INDEX, m_comm
	);

	MPI_Allgather(
		m_offset.data()  , m_offset.size(), MPI_INDEX,
		allOffsets.data(), m_offset.size(), MPI_INDEX, m_comm
	);

	/* the next rank after rank 0 whose x-offset is zero is equal to the number
	 * of ranks per band */
	for ( Index i{1}; i<m_comm.size(); ++i ){
		if ( allOffsets(0,i) == 0 ){
			m_nParts.x() = i;
			break;
		}
	}

	if ( m_comm.size() % m_nParts.x() != 0 ){
		std::cerr <<
		"Number of mesh partitions in x- and y-direction does not form grid: "
		<< m_comm.size() << " partitions in bands of " << m_nParts.x()
		<< "\n";
		MPI_Abort(m_comm, MPI_ERR_OTHER);
	}

	m_nParts.y() = m_comm.size() / m_nParts.x();

	m_meshSizeGlobal.x() = allSizes.row(0).head( m_nParts.x() ).sum();
	m_meshSizeGlobal.y() = allSizes.row(1).reshaped(
			m_nParts.x(), m_nParts.y()
		).row(0).sum();
	
	/* these tests should be executed by redistributeMesh as well */
	if ( (allSizes.row(0).reshaped( m_nParts.x(), m_nParts.y() )
			.colwise().sum() != m_meshSizeGlobal.x() ).any() ){
		std::cerr <<
		"Sum of mesh sizes in x-direction does not equal global mesh size "
		"in x-direction!\n";
		MPI_Abort(m_comm, MPI_ERR_OTHER);
	}{
	auto yOffsets = allSizes.row(1).reshaped( m_nParts.x(), m_nParts.y() );
	for ( auto col : yOffsets.colwise() ){
		if ( (col != col.head(1)[0]).any() ){
			std::cerr <<
			"Mesh partitions within the same band have different sizes "
			"in y-direction!\n";
			MPI_Abort(m_comm, MPI_ERR_OTHER);
		}
	}}

	findNeighbours( allSizes, allOffsets );

	m_boundaryPatchLimits = allgatherBoundaryPatchLimits( *this );
}



/* reusing these in the next two functions */
#define ASSERT_LOAD_SIZE_PER_NODE \
	assert( ( !m_comm.isMaster() || load.size() == m_comm.size() ) && \
		"Per-node load data length must equal number of nodes!" \
	);

#define ASSERT_LOAD_SIZE_PER_CELL \
	assert( load.size() == mesh().nDomainCells() && \
		"Per-cell local load data length must equal local number of \
		domain cells!" \
	);

#define ASSERT_LOAD_SIZE_PER_CELL_RECOMP \
	assert( load.size() == m_meshSizeGlobal.prod() && \
		"Recomposed per-cell load data length must equal global \
		number of domain cells!" \
	);

#define ASSERT_FALSE_PART_LOAD_TYPE \
	assert( false && "Supplied PartLoad value not implemented!" );

/**
 * @brief Rebalances mesh sizes according to the provided load.
 * 
 * @param loadType 
 * @param load 
 */
template<typename MeshType>
void StructMeshPart<MeshType>::rebalanceMeshSizes(
	PartLoad loadType,
	const Ref<const ArrayXs>& load
){
	Array2Xi newSizes;
	switch (loadType){
		case PartLoad::perNode: {
			ASSERT_LOAD_SIZE_PER_NODE
			Array2Xi allSizes { gatherMeshSizes() };
			if ( m_comm.isMaster() )
				newSizes = balancedStructMeshParts(
					m_meshSizeGlobal, m_nParts, allSizes, load
				);
			break;
		}
		case PartLoad::perCell: {
			ASSERT_LOAD_SIZE_PER_CELL
			newSizes = balancedStructMeshParts( *this, load );
			break;
		}
		case PartLoad::perCellRecomp:
			if ( m_comm.isMaster() ){
				ASSERT_LOAD_SIZE_PER_CELL_RECOMP
				newSizes = balancedStructMeshParts(
					m_meshSizeGlobal, m_nParts, load
				);
			}
			break;
		default:
			ASSERT_FALSE_PART_LOAD_TYPE
			break;
	}
	if ( !m_comm.isMaster() ) newSizes.resize( 2, m_comm.size() );
	MPI_Bcast(
		newSizes.data(), newSizes.size(), MPI_INDEX, m_comm.master(), m_comm
	);
	redistributeMesh(newSizes);
}


/**
 * @brief Prints the load and mean deficit per node. Careful, for anything but
 * per-node values, this function is expensive!
 * 
 * @param loadType 
 * @param load 
 */
template<typename MeshType>
void StructMeshPart<MeshType>::printLoad(
	PartLoad loadType,
	const Ref<const ArrayXs>& load
) const {
	ArrayXs relativeLoads;
	switch(loadType){
		case PartLoad::perNode: {
			if ( m_comm.isMaster() ){
				ASSERT_LOAD_SIZE_PER_NODE
				relativeLoads = load;
			}
			break;
		}
		case PartLoad::perCell: {
			ASSERT_LOAD_SIZE_PER_CELL
			relativeLoads =
				MPI_Gather_H<ArrayXs>( load.sum(), m_comm );
			break;
		}
		case PartLoad::perCellRecomp: {
			Array2Xi meshSizes { gatherMeshSizes() };
			if ( m_comm.isMaster() ){
				ASSERT_LOAD_SIZE_PER_CELL_RECOMP
				relativeLoads.resize( m_comm.size() );
				Array2Xi offsets { meshPartSizesToOffsets(m_nParts,meshSizes) };
				for ( int i{0}; i<m_comm.size(); ++i ){
					relativeLoads(i) = load.reshaped(
						m_meshSizeGlobal.x(),
						m_meshSizeGlobal.y()
					).block(
						offsets.col(i).x(), offsets.col(i).y(),
						meshSizes.col(i).x(), meshSizes.col(i).y()
					).sum();
				}
			}
			break;
		}
		default:
			ASSERT_FALSE_PART_LOAD_TYPE
			break;
	}
	if ( m_comm.isMaster() ){
		relativeLoads *= 100 / relativeLoads.maxCoeff();
		std::stringstream ss;
		ss << std::fixed << std::setprecision(0);
		auto printRow = [&]( const auto& title, const auto& content ){
			int titleWidth {9};
			int contentWidth {4};
			ss.width(titleWidth);
			ss << std::left << title;
			for ( int i{0}; i<content.size(); ++i ){
				ss.width(contentWidth);
				ss << std::right << content(i);
			}
			ss << "\n";
		};
		printRow( "Rank [-]:", ArrayXi::LinSpaced(m_comm.size(),0,m_comm.size()-1) );
		printRow( "Load [%]:", relativeLoads );
		printRow( "Mean deficit [%]:", ArrayNs<1>{ (-relativeLoads + 100).mean() } );
		std::cout << ss.str();
	}
}

#undef ASSERT_LOAD_SIZE_PER_NODE
#undef ASSERT_LOAD_SIZE_PER_CELL
#undef ASSERT_LOAD_SIZE_PER_CELL_RECOMP
#undef ASSERT_FALSE_PART_LOAD_TYPE


/**
 * @brief Takes new mesh sizes and redistributes the meshes accordingly.
 * 
 * @param meshSizes 
 * 
 * Relies on hms::redistributeMesh() for the mesh type to redistribute the
 * internal mesh data.
 */
template<typename MeshType>
void StructMeshPart<MeshType>::redistributeMesh(
	const Ref<const Array2Xi>& newSizes
){
	assert( (
		newSizes.row(0).reshaped( m_nParts.x(), m_nParts.y() ).colwise().sum() 
		== meshSizeGlobal().x() ).all() &&
		"New mesh sizes in x do not add up to global mesh size in x!"
	);
	assert( (
		newSizes.row(1).reshaped( m_nParts.x(), m_nParts.y() ).rowwise().sum() 
		== meshSizeGlobal().y() ).all() &&
		"New mesh sizes in y do not add up to global mesh size in y!"
	);

	Array2Xi oldSizes = gatherMeshSizes();

	m_oldSizes = std::move(oldSizes);

	/* no need to redistribute meshes that have not changed in size */
	if ( (m_oldSizes == newSizes).all() ){
		if ( m_comm.isMaster() )
			std::cerr << "WARNING: New mesh sizes are equal to old sizes! "
				"Check condition for redistribution! "
				"Returning early...\n";
		return;
	}

	Array2Xi newOffsets { meshPartSizesToOffsets(m_nParts, newSizes) };
	m_offset = newOffsets.col( m_comm.rank() );

	/* store old boundary patches, because the mesh they belong to may get
	 * destroyed by the call to hms::redistributeMesh below. */
	std::vector<BoundaryPatch> patches {
		m_mesh.moveBoundaryPatches()
	};

	hms::redistributeMesh(
		m_mesh, m_comm,
		Array2i{ indexInBand(), band() }, nParts(),
		newSizes.col( m_comm.rank() ), m_offset,
		m_oldSizes, getOldOffsets()
	);
	/* now the boundary patches can get added back to the mesh, but they still
	 * refer to the dimensions of the old mesh, so they must be redistributed */
	m_mesh.addBoundaryPatches( std::move(patches) );
	redistributeBoundaryPatches( *this, m_mesh );

	findNeighbours( newSizes, newOffsets );
}


/**
 * @brief Distributes a field whose values are available on the master node
 * to all nodes according to their mesh sizes.
 * 
 * @param field 
 * 
 * Assigns the partition's mesh to the field's mesh pointer.
 */
template<typename MeshType>
void StructMeshPart<MeshType>::distributeField( Field<MeshType>& field ) const {
	field.values() = decomposeArray( field.values(), *this, true);

	/* the new mesh must be assigned */
	field.mesh(m_mesh);
}


/**
 * @brief Redistributes a field according to the partition's old and new mesh
 * sizes.
 * 
 * @param field The field to be redistributed.
 * @param recomposeFirst Recomposing the field first has higher memory
 * requirements and preliminary testing shows it to be slower. Test on your own
 * cluster to be sure.
 */
template<typename MeshType>
void StructMeshPart<MeshType>::redistributeField(
	Field<MeshType>& field,
	bool recomposeFirst
) const {
	if (!recomposeFirst)
		redistributeField_distr(field);
	else
		redistributeField_centr(field);
}

/**
 * @brief Redistributes a field without gathering it first.
 * 
 * @param field 
 * 
 */
template<typename MeshType>
void StructMeshPart<MeshType>::redistributeField_distr(
	Field<MeshType>& field
) const {
	assert(m_oldSizes.size() != 0 &&
		"Cannot redistribute field without knowing previous mesh sizes!");
	
	Array2Xi oldOffsets { getOldOffsets() };
	
	static constexpr Index ng {1};
	
	assert( field.values().cols() ==
		(m_oldSizes.col( comm().rank() ) + 2*ng).prod() &&
		"Field size does not match old mesh size!"
	);

	/* find out from which partitions which data must be retrieved */
	std::vector<OverlapBlock> blocksToGet {
		overlappingBlocks(
			Array2i{ indexInBand(), band() }, nParts(),
			offset(), meshSize(), oldOffsets, oldSizes(), ng
		)
	};

	/* an example for how to use the bulk redistribution function instead */
	/*
	std::vector<Field<MeshType>> fields;
	fields.emplace_back( std::move(field) );
	redistributeFields( fields, oldOffsets, blocksToGet );
	field = std::move( fields[0] );
	*/

	field.values() = redistributeValues(
		field.values(), meshSize(), offset(), ng, oldSizes(), oldOffsets,
		blocksToGet, comm()
	);

	/* finally, the new mesh must be assigned */
	field.mesh(m_mesh);
}


/**
 * @brief Redistributes a field by gathering the data on the master node and
 * then having each node access that data according to their new mesh sizes.
 * 
 * @param field 
 */
template<typename MeshType>
void StructMeshPart<MeshType>::redistributeField_centr(
	Field<MeshType>& field
) const {
	ArrayXXs recompField {
		recomposeArray(field.values(), *this, true, true)
	};

	field.values() = decomposeArray( recompField, *this, true);
}

/**
 * @brief Given that old offsets and overlapping blocks only need to be
 * calculated once, this version may be slightly faster.
 * 
 * @tparam MeshType 
 * @param fields a std::vector of fields
 * @param oldOffsets 
 * @param overlappingBlocks 
 */
template<typename MeshType>
void StructMeshPart<MeshType>::redistributeFields(
	std::vector<Field<MeshType>>& fields,
	const Ref<const Array2Xi>& oldOffsets,
	const std::vector<OverlapBlock>& overlappingBlocks
) const {
	assert(m_oldSizes.size() != 0 &&
		"Cannot redistribute field without knowing previous mesh sizes!");
	
	static constexpr Index ng {1};
	
	for ( Field<MeshType>& field : fields ){
		assert( field.values().cols() ==
			(m_oldSizes.col( comm().rank() ) + 2*ng).prod() &&
			"Field size does not match old mesh size!"
		);
		field.values() = redistributeValues(
			field.values(), meshSize(), offset(), ng, oldSizes(), oldOffsets,
			overlappingBlocks, comm()
		);
		/* finally, the new mesh must be assigned */
		field.mesh(m_mesh);
	}
}


/**
 * @brief Retrieves domain values from all neighbouring fields to update
 * ghost cell values
 * 
 * @param field 
 */
template<typename MeshType>
void StructMeshPart<MeshType>::getNeighbourValues( Field<MeshType>& field ) const {
	assert( &( mesh() ) == &( field.mesh() ) &&
		"Mesh assigned to field not the same as mesh owned by partition!" );
	
	/* a couple of convenience definitions */
	static constexpr Index ng {1};
	Index nx { meshSize().x() };
	Index ny { meshSize().y() };
	Index nComponents { field.values().rows() };
	auto reshapedField {
		field.values().reshaped(nComponents*(nx+2*ng), ny+2*ng)
	};
	auto reshapedDomainField {
		reshapedField.block(nComponents*ng, ng, nComponents*nx, ny)
		// field.domainValues().reshaped(nComponents*nx,ny)
	};
	
	/* ownBoundary is to hold a copy of a rank's own domain values at the 
	 * boundary and neighbourBoundary is to receive that data from a 
	 * neighbouring partition. */
	ArrayXXs ownBoundary, neighbourBoundary;

	/* these communications are always strictly 2-way */
	MPI_Info info { MPI_Info_no_locks() };

	MPI_Win_H win;

	auto getNeighbour = [&]( BoundaryLocation loc ){
		bool isSource {true}, isTopBottom {false};
		Index begDomain {0}, begGhost {0};
		/* those who have a neighbour at the opposite of the current position
		 * make the values of the respective domain cells at the boundary
		 * available */
		switch(loc){
			case BoundaryLocation::bottom:
				isSource = !neighbours(BoundaryLocation::top).empty();
				isTopBottom = true;
				begDomain = ny-ng;
				break;
			case BoundaryLocation::left:
				isSource = !neighbours(BoundaryLocation::right).empty();
				begDomain = nComponents*(nx-ng);
				break;
			case BoundaryLocation::right:
				isSource = !neighbours(BoundaryLocation::left).empty();
				begGhost = nComponents*(ng+nx);
				break;
			case BoundaryLocation::top:
				isSource = !neighbours(BoundaryLocation::bottom).empty();
				isTopBottom = true;
				begGhost = ng+ny;
				break;
			default:
				UNREACHABLE;
		}

		if ( isSource ){
			if ( isTopBottom )
				ownBoundary = reshapedDomainField.block(
					0, begDomain, nComponents*nx, ng
				).transpose().eval();
				/* transposing it allows all neighbours to access a contiguous
				 * memory segment */
			else
				ownBoundary = reshapedDomainField.block(
					begDomain, 0, nComponents*ng, ny
				).eval();
		}

		win.fence();
		for ( const OverlapSegment& nbr : neighbours(loc) ){
			win.get(
				neighbourBoundary,
				nComponents*ng * nbr.ownBeg,
				nComponents*ng * nbr.size,
				nbr.rank,
				nComponents*ng * nbr.beg
			);
		}
		/* fence at the end because values in the target windows are going to
		 * be overwritten */
		win.fence();

		if ( !neighbours(loc).empty() ){
			if (loc==BoundaryLocation::bottom || loc==BoundaryLocation::top){
				reshapedField.block(
					nComponents*ng, begGhost, nComponents*nx, ng
				) = neighbourBoundary.transpose();
			}
			else
				reshapedField.block( begGhost, ng, nComponents*ng, ny )
					= neighbourBoundary;
		}
	};

	/* it's important to define these sizes here for all ranks, even if they
	 * might not get values assigned to them yet. That is because resizing can
	 * change their memory address, which must stay the same when reusing an
	 * MPI_Win */
	ownBoundary.resize(nComponents*ng, ny);
	neighbourBoundary.resizeLike(ownBoundary);
	win.create(ownBoundary, comm(), info);

	getNeighbour(BoundaryLocation::right);
	getNeighbour(BoundaryLocation::left);
	
	ownBoundary.resize(ng, nComponents*nx);
	neighbourBoundary.resizeLike(ownBoundary);
	win.create(ownBoundary, comm(), info);

	getNeighbour(BoundaryLocation::top);
	getNeighbour(BoundaryLocation::bottom);
}


/**
 * @brief Sets the boundary patch limits (i.e. first and last edge of each
 * patch, indexed based on the global mesh size) to the provided values.
 * 
 * @param limits 
 */
template<typename MeshType>
void StructMeshPart<MeshType>::boundaryPatchLimits( Array2Xi&& limits ){
	assert( m_boundaryPatchLimits.size() == 0 &&
"Global boundary patch limits already set! If you have a good reason to set \
them again, use StructMeshPart::clearBoundaryPatchLimits() first." );

	m_boundaryPatchLimits = std::move( limits );
}

/**
 * @brief Returns the boundary patch limits, i.e. the first and last edge index
 * of each patch. These indices are based on the global mesh size.
 * 
 * @return const Array2Xi& 
 */
template<typename MeshType>
auto StructMeshPart<MeshType>::boundaryPatchLimits() const -> const Array2Xi& {
	return m_boundaryPatchLimits;
}

/**
 * @brief Resets the boundary patch limits to a size of zero.
 */
template<typename MeshType>
void StructMeshPart<MeshType>::clearBoundaryPatchLimits(){
	m_boundaryPatchLimits.resize(NoChange, 0);
}

/**
 * @brief Finds the partitions top and bottom neighbours and stores the
 * relevant topological data in the data members.
 * 
 * @param sizes 
 * @param offsets 
 */
template<typename MeshType>
void StructMeshPart<MeshType>::findNeighbours(
	const Ref<const Array2Xi>& sizes,
	const Ref<const Array2Xi>& offsets
){
	auto findNeighboursH = [&](
		int startRank,
		int rank,
		std::vector<OverlapSegment>& neighbours
	){
		/* all previous neighbours must be removed, because neighbours may
		 * change with each mesh redistribution */
		neighbours.clear();
		/* for two mesh partitions to be neighbours, their range in
		 * x-direction has to overlap. So if the OverlapSegment's mesh begins
		 * to the left of the current mesh, then its right end has to be
		 * to the right of the current mesh's left end:
		 *  _ _ _              _ _ _
		 * |_|_|_|_ _         |_|_|_|_ _ _
		 *     |_|_|_|              |_|_|_|
		 * -> neighbours!    ->not neighbours!
		 * 
		 * Or, if it begins to the right of the current mesh's left
		 * end, then it mustn't begin too far to the right, i.e. further
		 * than the current mesh's right end. (same as above, but 
		 * mirrored left to right) */
		for (int i{startRank}; i<startRank + m_nParts.x(); ++i){
			if ( auto limits = overlap_usingSizes(
				offsets(0,rank), sizes(0,rank),
				offsets(0,i), sizes(0,i)
			) ){
				neighbours.push_back( {
					i,
					limits.value()[1] - limits.value()[0] + 1,
					limits.value()[0] - offsets(0,i),
					limits.value()[0] - offsets(0,rank)
				});
			}
		}
	};
	/* set left and right neighbours */
	setEqualSizeNeighbours<false>( m_neighbours,meshSize(),nParts(),comm() );

	/* no bottom neighbours for bottom row */
	if ( m_comm.rank() >= m_nParts.x() ){
		/* startRank is the lowest rank of the next higher row of nodes */
		Index startRank { (( m_comm.rank() / m_nParts.x() ) - 1)*m_nParts.x() };
		findNeighboursH(
			startRank, m_comm.rank(), neighbours(BoundaryLocation::bottom)
		);
	}
	/* no top neighbours for top row */
	if ( m_comm.rank() < m_nParts.prod() - m_nParts.x() ){
		/* startRank is the lowest rank of the next higher row of nodes */
		Index startRank { (( m_comm.rank() / m_nParts.x() ) + 1)*m_nParts.x() };
		findNeighboursH(
			startRank, m_comm.rank(), neighbours(BoundaryLocation::top)
		);
	}
};


/**
 * @brief For internal use.
 * 
 * Gathers current mesh sizes to all nodes.
 */
template<typename MeshType>
Array2Xi StructMeshPart<MeshType>::gatherMeshSizes() const {
	Array2Xi allSizes (2, m_comm.size() );

	MPI_Allgather(
		meshSize().data(), 2, MPI_INDEX,
		allSizes.data(), 2, MPI_INDEX,
		m_comm
	);
	return allSizes;
}

template class StructMeshPart<UniMesh>;
template class StructMeshPart<RectMesh>;
template class StructMeshPart<StructMesh>;

}

#undef ASSERT_MESHTYPE