#include "customReductions.hpp"
#include "bi_traversal.hpp"
#include "bi_timing.hpp"

namespace hms::swe
{

template<typename MeshType, typename Exporter>
void integratedTimeLoop(
	SWESolver<MeshType>& solver,
	Time& time,
	Exporter&& exporter
){
	assert( &( solver.time() ) == &time );
	const MeshType& mesh { solver.mesh() };
	log::logger& logger { mesh.logger() };

	/* Given that each thread executes the complete time loop for each block,
	 * the end results (new state for d and v) must be stored globally in
	 * new variables. The values will later be swapped with the old ones.
	 * Because we want to maintain boundary conditions, we don't swap fields,
	 * but only field values. */
	Index nFieldCols { hms::nFieldCols(mesh) };
	ArrayXXs
		dNew (1, nFieldCols ),
		vNew (2, nFieldCols );

	setCorners(dNew, mesh);
	setCorners(vNew, mesh);

	Field<MeshType>
		& d { solver.d() },
		& v { solver.v() };

	/* Some boundary conditions don't require updating, so the values are only
	 * set once. Because of that, we copy them once to the new state, so that
	 * both contain those values and swapping old and new state doesn't alter
	 * them. */
	auto copyGhosts = [](
		const ArrayXXs& from, ArrayXXs& to, const ArrayXXi& cells
	){
		to(hms::all, cells.reshaped()) = from(hms::all, cells.reshaped());
	};
	OMP_PRAGMA( parallel for )
	for ( const BoundaryPatch& p : mesh.boundaryPatches() ){
		copyGhosts( d.values(), dNew, p.ghostCells() );
		copyGhosts( v.values(), vNew, p.ghostCells() );
	}

	/* To keep thread team and blocks intact, the time step calculation must
	 * be done last. However, we need a time step for the first loop iteration,
	 * so we calculate that one the old way. */
	if ( !time.useExplicitFirstStep() ){
		time.step( solver.calculateTimeStep() );
		logger.debug( "Time step length: {:.5f}s", time.step() );
	}

	/* the time step to use omp reduction on at the end */
	scalar step { std::numeric_limits<scalar>::max() };

	/* precalculated indices for blockwise execution */
	IndexHelper ih { indexHelper(
		solver.settings().blocksizeX,
		solver.settings().blocksizeY,
		mesh
	) };

	/* check whether block size is 1x1,
	 * because outer layers are then treated differently
	 */
	bool singleCell { ih.blocks.sizeX * ih.blocks.sizeY == 1 };

	Index nFluxesComputed {
	  mesh.nEdges()
	+ ih.mesh.nx*(ih.blocks.nFullY + static_cast<bool>(ih.blocks.remSizeY) + 1)
	+ ih.mesh.ny*(ih.blocks.nFullX + static_cast<bool>(ih.blocks.remSizeX) + 1)
	};

	if ( singleCell )
		nFluxesComputed = 2 * mesh.nInnerEdges() + mesh.nBoundaryEdges();

	double fluxRedundancy {
		static_cast<double>(nFluxesComputed) / mesh.nEdges()
	};

	// Index width { 1 + 2 * maxDecimalDigits(
	// 	ih.mesh.nx, ih.mesh.ny,
	// 	ih.blocks.sizeX, ih.blocks.sizeY
	// ) };

	int width_desc {18};

	logger.info(
		"Executing block-wise integrated time loop with:"
		"\n{0}{1}"
		"\n{0}{2}"
		"\n{0}{3}"
		"\n{0}{4}"
		"\n{0}{5}"
		"\n{0}{6}",
		indent(),
		fmt::format("{:<{}}{}x{}", "mesh size:",
			width_desc, ih.mesh.nx, ih.mesh.ny),
		fmt::format("{:<{}}{}x{}", "block size:",
			width_desc, ih.blocks.sizeX, ih.blocks.sizeY),
		fmt::format("{:<{}}{}x{}", "full size blocks:",
			width_desc, ih.blocks.nFullX, ih.blocks.nFullY),
		fmt::format("{:<{}}{}x{}", "remainder size:",
			width_desc, ih.blocks.remSizeX, ih.blocks.remSizeY),
		fmt::format("cell layers with lower order scheme: {}",
			ih.mesh.nOuterLayers),
		fmt::format("redundancy factor: {:.3f}", fluxRedundancy)
	);

	/* for steps involving all threads simultaneously, like reductions */
	Timer t;

	// Index nEdgesTotal {0};
	
	/* average timings */
	ArrayXs execTimings { ExecTimings::Zero() };
	#ifdef BLOCKWISE_INTEGRATED_TIME_FLUXES
	ArrayXs fluxTimings { FluxTimings::Zero() };
	#endif

	OMP_PRAGMA( parallel )
	{
		SolverThread threadData {solver, dNew, vNew, ih};

		#ifdef BI_NO_DEBUG_LOG
			#define BI_LOG_DEBUG(...)
		#else
			#define BI_LOG_DEBUG(...) \
				logger.debug( __VA_ARGS__ ); \
				setTiming(threadData.execTimings(), LoopStep::output, t);
		#endif

		#ifdef BI_NO_DEBUG_LOG
			#define BI_LOG_DEBUG_SINGLE(...)
		#else
			#define BI_LOG_DEBUG_SINGLE(...) \
			OMP_PRAGMA( single nowait ) \
			{ \
				BI_LOG_DEBUG(__VA_ARGS__) \
			}
		#endif

		/* time loop */
		while ( !time.atEnd() ){

			t.reset();
			if( time.atWriteTime() ){
				OMP_PRAGMA( sections ) // nowait
				{
					OMP_PRAGMA( section )
					time.print( logger );
					OMP_PRAGMA( section )
					exporter();
				}
			}

			/* if these two messages aren't executed by the same thread,
			 * their order is always reversed */
			#ifndef BI_NO_DEBUG_LOG
			OMP_PRAGMA(single nowait){
				logger.debug(
					"At time step #{}, time: {:.5f}s, time step length: {:.5f}s",
					time.nSteps(), time.current(), time.step() );
				logger.debug("Updating boundary fields...");
			}
			#endif
			setTiming(threadData.execTimings(), LoopStep::output, t);

			OMP_PRAGMA( for schedule(static) collapse(2) )
			for ( unsigned j = 0; j<solver.fields().size(); ++j ){
			for ( unsigned i = 0; i<mesh.boundaryPatches().size(); ++i ){
				Field<MeshType>& field { solver.fields()[j] };
				solver.updateBoundary(field, i);
			}}
			setTiming(threadData.execTimings(), LoopStep::boundary, t);

			/* source field update */
			BI_LOG_DEBUG_SINGLE("Updating source terms...");
			solver.updateSourceFields();
			setTiming(threadData.execTimings(), LoopStep::updateSources, t);

			/* block execution */
			OMP_PRAGMA(barrier)
			if (singleCell){
				BI_LOG_DEBUG_SINGLE("Computing cells...");
				computeCells(threadData, ih);
			} else {
				BI_LOG_DEBUG_SINGLE("Computing blocks...");
				computeBlocks( threadData, ih );
			}
			t.reset();

			OMP_PRAGMA( sections )
			{
				/* swap new and old state variables */
				OMP_PRAGMA(section){
					BI_LOG_DEBUG("Swapping old and new state variables...")
					d.values().swap(dNew);
					v.values().swap(vNew);
				}
				OMP_PRAGMA(section){
					BI_LOG_DEBUG("Advancing time...");
					time.advance();
					BI_LOG_DEBUG("New time: {:.5f}s", time.current() );
				}
			}
			setTiming(threadData.execTimings(), LoopStep::other, t);

			if ( time.atEnd() )
				break;

			/* time step reduction */
			BI_LOG_DEBUG_SINGLE("Parallel reduction of time step...");
			OMP_PRAGMA( for reduction (min:step) )
			for (int i=0; i<omp_get_num_threads(); ++i)
				step = std::min( step, threadData.timestep() );

			OMP_PRAGMA( single ) // has implicit barrier at the end
			{
				time.step(step);
				BI_LOG_DEBUG( "New time step length: {:.5f}s", time.step() );
				step = std::numeric_limits<scalar>::max();
			}

			threadData.resetTimestep();
			setTiming(threadData.execTimings(), LoopStep::other, t);
		}
		// for (int i=0; i<omp_get_num_threads(); ++i){
		// 	if ( i==omp_get_thread_num() ){
		// 		std::cout
		// 			<< "Thread #" << i <<
		// 			", nEdges: " << threadData.nEdges << std::endl;
		// 	}
		// 	OMP_PRAGMA( barrier )
		// }
		/* sum timings up, then average */
		OMP_PRAGMA(for reduction(+:execTimings) )
		for (int i=0; i<omp_get_num_threads(); ++i){
			execTimings += threadData.execTimings();
		}

		#ifdef BLOCKWISE_INTEGRATED_TIME_FLUXES
		OMP_PRAGMA( for reduction(+:fluxTimings) )
		for (int i=0; i<omp_get_num_threads(); ++i){
			fluxTimings += threadData.fluxTimings;
		}
		#endif

		OMP_PRAGMA( master )
		{
			time.print( logger );
			exporter();
			execTimings /= omp_get_num_threads();
			hms::printExecTimings( execTimings );
			
			// std::cout << "nEdgesTotal: " << nEdgesTotal << "\n";

			#ifdef BLOCKWISE_INTEGRATED_TIME_FLUXES
				fluxTimings /= omp_get_num_threads();
				hms::printFluxTimings( fluxTimings );
			#endif
		}

		/* after switch, ghost values are one step behind.
		 * Copy it to allow for testing */
		#ifndef NDEBUG
		BI_LOG_DEBUG("Copying ghost values to current time step...")
		OMP_PRAGMA(for)
		for ( const BoundaryPatch& p : mesh.boundaryPatches() ){
			copyGhosts( dNew, d.values(), p.ghostCells() );
			copyGhosts( vNew, v.values(), p.ghostCells() );
		}
		#endif

		#undef BI_LOG_DEBUG
		#undef BI_LOG_DEBUG_SINGLE
	}
}

} // namespace hms::swe
