#include "structMeshPartitioning.hpp"
#include "util.hpp"
#include <cmath>
#include <algorithm>
#include <array>
#include <cmath>
#include <iomanip>
#include <iostream>
#include <stdexcept>
#include <vector>

namespace hms {

std::vector<int> factors(int num){
	assert( num > 0 && "Not intended for negative numbers!" );
	std::vector<int> fac {1};
	
	for (int i {2}; i<num; ++i){
		if (num / i < i ) break;
		if (num % i == 0)
			fac.push_back(i);
	}
	for (int i {static_cast<int>(fac.size()-1)}; i>=0; --i){
		if (num/fac[i] != fac[i])
			fac.push_back(num/fac[i]);
	}

	return fac;
}

int roundToFactor(double num, const std::vector<int>& factors){
	std::vector<int>::const_iterator it {
		std::lower_bound(factors.cbegin(), factors.cend(), num)
	};
	if ( it == factors.cend() )
		return factors.back();
	if ( it != factors.cbegin() )
		return (*it - num) < (num - it[-1]) ? *it : it[-1];
	else
		return factors.front();
}

Array2i structMeshNParts(Index nCellsX, Index nCellsY, int nodes)
{
	/* the "aspect ratio" of an m x n mesh */
	double ratio { static_cast<double>(nCellsY)/nCellsX };
	/* Let the partitioning of p nodes be p=a*b, then the most compact mesh 
	 * parts of a m x n mesh occur, when m/n = a/b, and therefore
	 * a = sqrt(p*m/n). Same for b, which is used here to have more nodes in 
	 * x-direction, which is preferable for the load balancing mechanism. */
	double bOptimal { sqrt(nodes*ratio) };
	/* the partitioning must be done with integral numbers, and their product
	 * must be the number of nodes, so their integer factorisation is needed */
	int b { roundToFactor( bOptimal, factors(nodes) ) };
	int a { nodes / b };

	/* check whether each node gets at the very least one cell */
	if ( a > nCellsX || b > nCellsY )
		throw std::runtime_error( "Too many nodes/ranks for global mesh size!");

	return
		(nCellsX >= nCellsY && a >= b) ||
		(nCellsY >  nCellsX && b >  a) ?
		Array2i{a,b} : Array2i{b,a};
}

Array2i structMeshNParts( const Ref<const Array2i>& meshSizeGlobal, int nodes ){
	return structMeshNParts( meshSizeGlobal[0], meshSizeGlobal[1], nodes );
}


Array2i structMeshPartSize(
	const Ref<const Array2i>& meshSizeGlobal,
	const Ref<const Array2i>& nParts,
	const MPI_Comm_H& comm
){
	assert( nParts.prod() != 0 && nParts.prod() == comm.size() );
	return {
		meshSizeGlobal.x() / nParts.x() +
			( comm.rank() % nParts.x() != nParts.x()-1 ?
			0 : meshSizeGlobal.x() % nParts.x() ),
		meshSizeGlobal.y() / nParts.y() +
			( comm.rank() / nParts.x() != nParts.y()-1 ?
			0 : meshSizeGlobal.y() % nParts.y() )
	};
}

Array2i structMeshInitOffset(
	const Ref<const Array2i>& meshSizeGlobal,
	const Ref<const Array2i>& nParts,
	const MPI_Comm_H& comm
){
	assert( nParts.prod() == comm.size() );
	return {
		( comm.rank() % nParts.x() ) * ( meshSizeGlobal.x() / nParts.x() ),
		( comm.rank() / nParts.x() ) * ( meshSizeGlobal.y() / nParts.y() )
	};
}


ArrayXi equalPartialSums(
	const Ref<const ArrayXs>& loads,
	Index nParts
){
	if ( loads.size() < 2*nParts )
		throw std::runtime_error(
			"Load array length is smaller than two times the number of parts -> "
			"equalPartialSums may produce zero-length segments!"
		);

	/* the target load is the total load divided by the available parts */
	scalar target { loads.sum()/nParts };
	ArrayXi parts (nParts);
	
	/* the first part starts at 0, so the virtual previous one ends at -1 */
	Index previousLastIndex {-1};
	for (Index part {0}; part < nParts-1; ++part){
		scalar partialSumLower {0};
		scalar partialSumUpper {0};
		/* start at first index that was not allocated to the previous part */
		Index i {previousLastIndex+1};
		for (; i<loads.size()-1; ++i){
			partialSumUpper += loads(i);
			if ( partialSumUpper >= target )
				break;
			else
				partialSumLower = partialSumUpper;
		}
		if (i>0){
			Index currentLastIndex {
				( hms::approxEqual(
					fabs(partialSumLower-target), fabs(partialSumUpper-target)
				) ||
				/* choose the one closer to the target */
				fabs(partialSumLower-target) < fabs(partialSumUpper-target) ) ?
				--i : i
			};
			parts(part) = currentLastIndex - previousLastIndex;
			previousLastIndex = currentLastIndex;
		}
		else
			throw std::runtime_error(
				"Could not build partial sums! "
				"Check load input and requested number of partitions!"
			);
	}
	/* the parts must add up to loads.size() */
	parts(nParts-1) = loads.size()-1 - previousLastIndex;

	return parts;
}


Array2Xi meshPartSizesToOffsets(
	const Ref<const Array2i>& nParts,
	const Ref<const Array2Xi>& allSizes
){
	assert( nParts.prod() == allSizes.cols() && "Size mismatch!" );

	Array2Xi offsets;
	offsets.resizeLike(allSizes);
	offsets = 0;

	auto offsetsX { offsets.row(0).reshaped( nParts.x(), nParts.y() ) };
	auto offsetsY { offsets.row(1).reshaped( nParts.x(), nParts.y() ) };

	auto sizesX { allSizes.row(0).reshaped( nParts.x(), nParts.y() ) };
	auto sizesY { allSizes.row(1).reshaped( nParts.x(), nParts.y() ) };

	assert( ( sizesX.col(0).sum() == sizesX.colwise().sum() ).all() &&
		"The sizes in x per band must add up to the same global mesh size!");
	#ifndef NDEBUG
	for ( int i{0}; i<nParts.x(); ++i )
		assert( (sizesY.row(0) == sizesY.row(i) ).all() &&
		"The sizes in y must be constant within a band!" );
	#endif

	for (Index i{1}; i<offsetsY.cols(); ++i){
		offsetsY.col(i) = sizesY.row(0).head(i).sum();
	}
	for (Index i{1}; i<offsetsX.rows(); ++i){
		offsetsX.row(i) = sizesX.block( 0,0,i,sizesX.cols() )
			.colwise().sum();
	}
	return offsets;
}


/**
 * @brief For internal use. Is called by
 * balancedStructMeshParts(
 * 	const Ref<const Array2i>& meshSizeGlobal,
 * 	const Ref<const Array2i>& nParts,
 * 	const Ref<const ArrayXs>& load
 * ).
 * @overload
 * 
 * @param nParts The number of partitions in x- and y-dimension.
 * @param loadPerCell The load data as an array shaped like the mesh.
 * 
 * Used for per-cell load data collected on one node, where the new part sizes
 * are calculated locally.
 */
Array2Xi balancedStructMeshParts(
	const Ref<const Array2i>& nParts,
	const Ref<const ArrayXXs>& load
){
	/* the first partitioning step is to divide the mesh vertically into bands
	 * with similar loads.
	 * In a column-major array, the (topological) x- and y-directions are
	 * represented by columns and rows, respectively. Therefore, the directions
	 * are somewhat flipped. Dividing the mesh vertically means dividing the
	 * load array into blocks of columns, not rows. */
	ArrayXs sums1 { load.colwise().sum() };
	ArrayXi bands { equalPartialSums( sums1, nParts.y() ) };

	/* the second partitioning step is to subdivide the bands into blocks with
	 * similar loads. The same orientation flip as above applies. */
	Array2Xi parts ( 2, nParts.prod() );

	Index begin {0};
	for ( Index i{0}; i<nParts.y(); ++i ){
		ArrayXs sums2 {
			load.block( 0, begin, load.rows(), bands(i) ).rowwise().sum()
		};
		begin += bands(i);
		parts.row(0).reshaped( nParts.x(), nParts.y() ).col(i) = 
			equalPartialSums( sums2, nParts.x() );
	}

	parts.row(1).reshaped( nParts.x(), nParts.y() ).rowwise() =
		bands.transpose();
	
	return parts;
}

Array2Xi balancedStructMeshParts(
	const Ref<const Array2i>& meshSizeGlobal,
	const Ref<const Array2i>& nParts,
	const Ref<const ArrayXs>& load
){
	assert( meshSizeGlobal.prod() == load.size() &&
		"Global mesh size and load data size mismatch!"
	);

	return balancedStructMeshParts(
		nParts,
		Eigen::Map<const ArrayXXs>(
			load.data(), meshSizeGlobal.x(), meshSizeGlobal.y()
		)
	);
}


Array2Xi balancedStructMeshParts(
	const Ref<const Array2i>& meshSizeGlobal,
	const Ref<const Array2i>& nParts,
	const Ref<const Array2Xi>& meshSizes,
	const Ref<const ArrayXs>& loads
){
	assert(
		meshSizes.cols() == nParts.prod() &&
		loads.size() == nParts.prod() &&
		"This function is only for uniform loads per partition!"
	);

	ArrayXs rowLoads ( meshSizeGlobal.y() );

	auto sizeMatrRef {
		meshSizes.cast<scalar>().row(0).reshaped( nParts.x(), nParts.y() ).matrix()
	};
	auto loadMatrRef { loads.reshaped( nParts.x(), nParts.y() ).matrix() };
	ArrayXs loadPerBand {
		(sizeMatrRef.transpose() * loadMatrRef).diagonal()
	};
	
	ArrayXi meshSizesY {
		meshSizes.row(1).reshaped( nParts.x(), nParts.y() ).row(0)
	};

	for (Index i{0}; i<nParts.y(); ++i){
		rowLoads.segment(
			meshSizesY.head(i).sum(),
			meshSizesY(i)
		) = loadPerBand(i);
	}
	ArrayXi newSizesY { equalPartialSums( rowLoads, nParts.y() ) };

	/* this table holds multipliers for load numbers. Each col represents a 
	 * new band and holds the number of rows of each old band which now lie
	 * inside the new band as coefficients, e.g.
	 * 
	 * old:           new:
	 * ____________   ____________
	 *    band 1      ___band_1___
	 * ____________      band 0
	 * ___band_0___   ____________
	 * 
	 * So the new band 0 has one row from the old band 0 and one from the old 
	 * band 1, and the new band 1 has zero rows from band 0 and one row from
	 * band 1. Therefore, bandwiseMult is now a 2x2 matrix with the entries
	 * 
	 *            |       new
	 * ___________|_band_0_|_band_1_
	 *     band 0 |  1     |  0
	 * old -------|--------|--------
	 *     band 1 |  1     |  1
	 *
	 * Because these bands span the whole width of the mesh, this table only
	 * needs to be created once.
	 * This forms a sparse matrix, but it's likely unnecessary to optimise here.
	 * */
	MatrixXXi bandwiseMult ( nParts.y(), nParts.y() );
	bandwiseMult.array() = 0;

	/* filling bandwiseMult */
	for (Index i{0}; i<nParts.y(); ++i){
		Index vOffNew = newSizesY.head(i).sum();
		for (Index j{0}; j<nParts.y(); ++j){
			Index vOff { meshSizesY.head(j).sum() };
			Index nextOff { vOff + meshSizesY(j) };
			if ( nextOff > vOffNew ){
				bandwiseMult(j,i) += (
						std::min(vOffNew+newSizesY(i), nextOff) - 
						std::max(vOffNew, vOff)
					);
				if (vOffNew+newSizesY(i) <= nextOff ) break;
			}
		}
	}


	/* for each column of cells, we need to figure out to which partition/rank
	 * the cells of each band belong */
	ArrayXXi rankOffX ( nParts.x(), nParts.y() );

	for (Index j{0}; j<nParts.y(); ++j){
		for (Index i{0}; i<nParts.x(); ++i){
			rankOffX(i,j) =
				meshSizes.row(0).reshaped( nParts.x(), nParts.y() )
					.col(j).head(i).sum();
		}
	}


	
	ArrayXXs colLoads ( meshSizeGlobal.x(), nParts.y() );

	ArrayXi rankInBand ( ArrayXi::Zero( nParts.y() ) );

	for (Index i{0}; i<meshSizeGlobal.x(); ++i){
		VectorXs colLoad ( nParts.y() );

		for (Index j{0}; j<nParts.y(); ++j){
			/* adjust rank index per band */
			if (
				rankInBand(j)+1 < nParts.x() &&
				i == rankOffX(rankInBand(j)+1, j)
			)
				++rankInBand(j);

			/* get loads of one mesh cell column, while respecting the fact that
			 * mesh partitions of different bands can be shifted left/right */
			colLoad(j) = loadMatrRef(
				rankInBand(j),
				j
			);
		}
		colLoads.row(i) = colLoad.transpose() * bandwiseMult.cast<scalar>();
	}

	Array2Xi newSizes { 2, nParts.prod() };

	Index i{-1};
	for ( auto col : newSizes.row(0).reshaped( nParts.x(), nParts.y() ).colwise() ){
		col = equalPartialSums( colLoads.col(++i), nParts.x() );
	}

	newSizes.row(1).reshaped( nParts.x(), nParts.y() ).rowwise() =
		newSizesY.transpose();

	return newSizes;
}



int findPart( Index cell, const Ref<const ArrayXi>& offsets ){
	return std::max(
		Index{0},
		std::upper_bound(
			offsets.cbegin(), offsets.cend(), cell
		) - offsets.cbegin() - 1
	);
};


std::optional<ArrayNNi<2,2>> overlappingBlock(
	const Ref<const Array2i>& part,
	const Ref<const Array2i>& targetPart,
	const Ref<const Array2i>& nParts,
	const Ref<const Array2i>& ownOffset,
	const Ref<const Array2i>& ownSize,
	const Ref<const Array2i>& targetOffset,
	const Ref<const Array2i>& targetSize,
	int ng
){
	ArrayNNi<2,2> overlaps;
	for ( int dim{0}; dim<2; ++dim ){
		/* we want the overlap with the domain area of the target rank, i.e.
		 * the values the target rank is actually responsible for. If both the
		 * executing and the target rank lie at a global boundary, then include
		 * boundary values, as then and only then, the target rank is
		 * responsible for those values as well. */
		Index addAtLBound {
			(part[dim] == 0 && targetPart[dim] == 0) ? ng : 0
		};
		Index addAtUBound {
			( nParts[dim] - 1 == part[dim] &&
			  nParts[dim] - 1 == targetPart[dim] ) ? ng : 0
		};
		if ( std::optional<Array2i> overlap = hms::overlap_usingSizes(
			ownOffset[dim]-ng, ownSize[dim]+2*ng,
			targetOffset[dim] - addAtLBound,
			targetSize[dim] + addAtLBound + addAtUBound
		))
			overlaps.row(dim) = overlap.value().transpose();
		else {
			return {};
		}
	}
	return overlaps;
}

std::vector<OverlapBlock> overlappingBlocks(
	const Ref<const Array2i>& part,
	const Ref<const Array2i>& nParts,
	const Ref<const Array2i>& ownOffset,
	const Ref<const Array2i>& ownSize,
	const Ref<const Array2Xi>& targetOffsets,
	const Ref<const Array2Xi>& targetSizes,
	int ng
){
	std::vector<OverlapBlock> blocks;

	auto yOffsets {
		targetOffsets.row(1).reshaped( nParts.x(), nParts.y() ).row(0)
	};
	int bandBeg { findPart(ownOffset.y()-ng, yOffsets ) };
	
	for ( int j{bandBeg}; j<nParts.y(); ++j ){
		if (yOffsets(j) >= ownOffset.y() + ownSize.y() + ng) break;

		auto xOffsets {
			targetOffsets.row(0).reshaped( nParts.x(), nParts.y() ).col(j)
		};
		int partBeg { findPart(ownOffset.x()-ng, xOffsets) };

		for ( int i{partBeg}; i<nParts.x(); ++i ){
			if (xOffsets(i) >= ownOffset.x() + ownSize.x() + ng) break;

			Index rank { nParts.x()*j + i };
			if ( auto block = overlappingBlock(
				part, Array2i{i,j}, nParts, ownOffset, ownSize,
				targetOffsets.col(rank),
				targetSizes.col(rank),
				ng
			)){
				Array2i beg { block.value().col(0).eval() };
				Array2i size { block.value().col(1) - beg + 1 };
				blocks.push_back( {
					rank,
					std::move(beg), std::move(size)
				} );
			} else {
				assert( false && "Criteria for overlap are faulty!" );
			}
		}
	}
	return blocks;
}



void redistributeValues(
	Ref<ArrayXXs> newValues,
	const Ref<const ArrayXXs>& values,
	const Ref<const Array2i>& meshSize,
	const Ref<const Array2i>& offset,
	Index ng,
	const Ref<const Array2Xi>& oldSizes,
	const Ref<const Array2Xi>& oldOffsets,
	const std::vector<OverlapBlock>& overlappingBlocks,
	const MPI_Comm_H& comm
){
	assert( !values.IsRowMajor );
	assert( oldSizes.cols()==comm.size() && oldOffsets.cols()==comm.size() );
	assert( newValues.rows() == values.rows() );
	
	assert( values.cols() == (oldSizes.col( comm.rank() ) + 2*ng).prod() &&
"Number of columns in parameter \"values\" must match number of cells \
including ghost cells of old size." );
	assert( newValues.cols() == (meshSize + 2*ng).prod() &&
"Number of columns in parameter \"newValues\" must match number of cells \
including ghost cells of new size." );

	Index nx { meshSize.x() };
	Index ny { meshSize.y() };
	Index nComponents { values.rows() };

	/* provide old values for all ranks to access. The const_cast is safe,
	 * because only functions that do not change the values are used,
	 * i.e. MPI_Win_H::get(), not MPI_Win_H::put(). It is also necessary,
	 * because MPI_Win_create requires a non-const pointer. */
	MPI_Win_H win {
		const_cast<Ref<const ArrayXXs>&>(values), comm, MPI_INFO_NULL
	};

	win.fence();
	for ( const OverlapBlock& block : overlappingBlocks ){
		Array2i ownBeg { block.begin - offset + ng };
		Array2i targetBeg { block.begin - oldOffsets.col(block.rank) + ng };
		/* the distinction between communication with self or others does 
		 * not seem to have a appreciable performance impact on local
		 * machine, but may be beneficial for actual distributed computation
		 **/
		if ( block.rank != comm.rank() )
			for (Index i{0}; i<block.size.y(); ++i)
				win.get(
					newValues,
					nComponents * (
						(ownBeg.y() + i) * (nx+2*ng) + ownBeg.x()
					),
					block.size.x()*nComponents,
					block.rank,
					nComponents * (
						(targetBeg.y() + i) * (oldSizes(0, block.rank) + 2*ng)
						+ targetBeg.x()
					)
				);
		else
			newValues.reshaped( (nx+2*ng)*nComponents, ny+2*ng ).block(
				ownBeg.x() * nComponents,
				ownBeg.y(),
				block.size.x() * nComponents,
				block.size.y()
			) = values.reshaped(
				(oldSizes(0, block.rank) + 2*ng)*nComponents,
				oldSizes(1, block.rank) + 2*ng
			).block(
				targetBeg.x() * nComponents,
				targetBeg.y(),
				block.size.x() * nComponents,
				block.size.y()
			);
	}
	win.fence();
}


ArrayXXs redistributeValues(
	const Ref<const ArrayXXs>& values,
	const Ref<const Array2i>& meshSize,
	const Ref<const Array2i>& offset,
	Index ng,
	const Ref<const Array2Xi>& oldSizes,
	const Ref<const Array2Xi>& oldOffsets,
	const std::vector<OverlapBlock>& overlappingBlocks,
	const MPI_Comm_H& comm
){
	ArrayXXs newValues ( values.rows(), (meshSize + 2*ng).prod() );

	redistributeValues(
		newValues, values, meshSize, offset, ng,
		oldSizes, oldOffsets, overlappingBlocks, comm
	);

	return newValues;
}


}