/*
 * ParallelWorker.h
 *
 *  Created on: Jan 9, 2013
 *      Author: fs
 */

#ifndef PARALLELWORKER_H_
#define PARALLELWORKER_H_

#include <vector>
#include "DataTypes.h"
#include "GenCalc.h"
#include "MPIDataTypes.h"
#include <mpi.h>

class ParallelWorker
{
	Configuration* conf;
	GenCalc* calc;
	data* datas;


public:
	ParallelWorker(
			Configuration* conf,
			GenCalc* calc,
			data* datas):	conf(conf), calc(calc), datas(datas) {};

	virtual ~ParallelWorker();

	void work(int threadCount)
	{
		MPIDataTypes mpiTypes;
		int sendSize = conf->populationCount / threadCount;
		mpiIndividuum * subPopulation = new mpiIndividuum[sendSize];
		double * subFitness = new double[sendSize];
		bool stop = false;


		while (!stop)
		{

			// přijmout práci

			MPI_Scatter(NULL, sendSize, mpiTypes.mpiIndiviuum, subPopulation, sendSize, mpiTypes.mpiIndiviuum, 0, MPI_COMM_WORLD );
			for (int i = 0; i < sendSize; i++)
			{
				subFitness[i] = fitness(subPopulation[i]);
			}
			// poslat zpět do rootu
			MPI_Gather(subFitness, sendSize, MPI_DOUBLE, NULL, sendSize, MPI_DOUBLE, 0,	MPI_COMM_WORLD );
			MPI_Bcast(&stop, 1, MPI_LOGICAL, 0, MPI_COMM_WORLD );
		}

		delete[] subPopulation;
		delete[] subFitness;
	}


private:

	double fitness(mpiIndividuum ind) const
	{
			double res = 0;

			double p = ind.p;
			double cg = ind.cg;
			double c = ind.c;
			double dt = ind.dt;
			double k = ind.k;

			for (int i = 0; i < datas->length; i++)
			{
				double ecg = datas->items[i].ecg;
				if (calc->checkTimeCondition(k, dt, ecg))
				{
					res = WORSE_FITTNES;
					return res;
				}

				double left_side = p * datas->items[i].bg+ cg * datas->items[i].bg * ecg + c;
				double index_time = datas->items[i].time + dt + k * dt * ecg;
				double ig;

				if (calc->getIg(index_time * (24 * 60 * 60), &ig) == 1)
				{
					double right_side = ig;
					res += fabs(right_side - left_side);
				}
			}

		return res;
	}
};

#endif /* PARALLELWORKER_H_ */
