#pragma once

#include "stdafx.h"
#include <vector>

#pragma region Reduce

//
// Reduction contains client and server codes to perform reduction of vectors
// distributed across a number of processes, whether remote or local.
//
template<typename T>
class Reduction : public Process
{
public:
	static Process *Factory() { return new Reduction; }

	//
	// The client interface. One of the segments will be processed locally, the
	// rest by the processes in the specified group.
	//
	// Parameters:
	//     code   - the reduction operator
	//     group  - the identifiers of the processes that are to do the work.
	//     data   - the input vector
	//     result - the output scalar
	//
	static bool Reduce(ReductionCode code, const PID::Group &group, std::vector<T> data, T &result)
	{
		__int32 procCount = (__int32)group.size()+1;
		__int32 elemCount = (__int32)data.size();

		if (procCount < 1) return false;

		__int32 perProcElements = elemCount / procCount;
		if (procCount * perProcElements != elemCount)
			perProcElements += 1;

		size_t elementsLeft = elemCount;
		auto begin = data.begin();
		auto pid = group.begin();

		Process::Collector<T> results;

		// Capture the segment we'll be processing locally.

		auto end = begin;
		__int32 j = 0;
		for (; j < perProcElements && elementsLeft > 0; j++, end++) elementsLeft--;

		std::vector<T> segment(begin, end);
		
		begin = end;

		for (__int32 i = 1; i < procCount; i++, pid++)
		{
			auto end = begin;
			__int32 j = 0;
			for (; j < perProcElements && elementsLeft > 0; j++, end++) elementsLeft--;

			std::vector<T> tmp(begin, end);

			(*pid).Send(std::make_tuple((PID)results, code, i*perProcElements, tmp));

			begin = end;
		}

		std::vector<T> resvec;

		resvec.push_back(_reduce(code, segment, 0));

		for (__int32 i = 1; i < procCount; i++)
		{
			resvec.push_back(results);
		}

		result = _reduce(code, resvec, 0);

		return true;
	}

protected:
	//
	// The server code driving the individual segment reductions.
	//
	void Execute()
	{
		Forward<std::tuple<PID, ReductionCode, __int32, std::vector<T>>>([=](std::tuple<PID, ReductionCode, __int32, std::vector<T>> data)
		{ 
			PID returnTo = std::get<0>(data);
			ReductionCode code = std::get<1>(data);
			__int32 offset = std::get<2>(data);
			std::vector<T> vector = std::get<3>(data);

			returnTo.Send(_reduce(code, vector, offset));
		});
	}

private:
	//
	// The actual reduction logic, invoked by both the client and server code
	//
	static T _reduce(ReductionCode code, std::vector<T> vector, __int32 offset)
	{
		T result =
			(code == ::Sum) ? (T) 0 :
			(code == ::Product) ? (T) 1 : vector[0];

		for (auto pos = vector.begin(); pos != vector.end(); pos++)
		{
			switch (code)
			{
			case ::Sum:  result += *pos; break;
			case ::Product: result *= *pos; break;
			case ::Max:  if (*pos > result) result = *pos; break;
			case ::Min:  if (*pos < result) result = *pos; break;
			}
		}

		return result;
	}
};
#pragma endregion

#pragma region Prefix Scan
//
// PrefixScan contains client and server codes to perform prefix scan of vectors
// distributed across a number of processes, whether remote or local.
//
// Note: for right now, the scan operator is understood to be '+'
//
template<typename T>
class PrefixScan : public Process
{
public:
	static Process *Factory() { return new PrefixScan; }

	//
	// The client interface. One of the segments will be processed locally, the
	// rest by the processes in the specified group.
	//
	// Parameters:
	//     group  - the identifiers of the processes that are to do the work.
	//     data   - the input vector
	//     result - the output scalar
	//
	static bool Scan(PID::Group &group, std::vector<T> data, std::vector<T> &result)
	{
		__int32 procCount = (__int32)group.size() + 1;
		__int32 elemCount = (__int32)data.size();

		if (procCount < 1) return false;

		__int32 perProcElements = elemCount / procCount;
		if (procCount * perProcElements != elemCount)
			perProcElements += 1;

		size_t elementsLeft = elemCount;
		auto begin = data.begin();
		auto pid = group.begin();

		Process::Correlator<std::vector<T>> *results = new Process::Correlator<std::vector<T>>[procCount];

		// Capture the segment we'll be processing locally.

		auto end = begin;
		__int32 j = 0;
		for (; j < perProcElements && elementsLeft > 0; j++, end++) elementsLeft--;

		std::vector<T> segment(begin, end);
		
		begin = end;

		for (__int32 i = 1; i < procCount; i++, pid++)
		{
			auto end = begin;
			__int32 j = 0;
			for (; j < perProcElements && elementsLeft > 0; j++, end++) elementsLeft--;

			std::vector<T> tmp(begin, end);

			(*pid).Send(std::make_tuple((PID)results[i], group, i, tmp));

			begin = end;
		}

		// Now, do some of the work ourselves.

		T total = segment[0];
		result.push_back(total);
		for(size_t i = 1; i < segment.size(); i++)
		{
			total += segment[i];
			result.push_back(total);
		}

		// Forward the local results to all workers.

		for (auto pos = group.begin(); pos != group.end(); pos++)
		{
			pos->Send(total);
		}

		// Get back to collecting results from the other workers

		for (__int32 i = 1; i < procCount; i++)
		{
			PID replyTo = (PID) results[i];
			std::vector<T> segment = (std::vector<T>) results[i];

			for (size_t j = 0; j < segment.size(); j++)
			{
				result.push_back(segment[j]);
			}
		}

		delete [] results;

		return true;
	}

protected:
	//
	// The server code driving the individual segment scans.
	//
	void Execute()
	{
		Forward<T>((Concurrency::ITarget<T> *)&totals);

		std::tuple<PID, PID::Group, __int32, std::vector<T>> data;
		Accept(data);

		{ 
		    PID returnTo = std::get<0>(data);
			PID::Group procs = std::get<1>(data);
			__int32 index = std::get<2>(data);
			std::vector<T> segment = std::get<3>(data);

			T total = segment[0];
			for(size_t i = 1; i < segment.size(); i++)
				total += segment[i];

			for (size_t i = index; i < procs.size(); i++)
			{
				procs[i].Send(total);
			}
		
			total = 0;
			for (int i = 0; i < index; i++)
			{
				T t =	Concurrency::receive(totals);
				total += t;
			}

			for(size_t i = 0; i < segment.size(); i++)
			{
				total += segment[i];
				segment[i] = total;
			}
			
			returnTo.Send(segment);
		}
	}

private:
	Concurrency::unbounded_buffer<T> totals;
};
#pragma endregion
