#ifndef DYNAMIC_PROGRAMMING_HEADER
#define DYNAMIC_PROGRAMMING_HEADER

#include "animalwrapper.h"

class DynamicProgramming {
	public:
		DynamicProgramming();
		// Policy evaluator, returns number of iterations on the states' values
		int policyEvaluation( double discount, bool iteration );
		// Value Iteration, returns number of iterations on the states' values
		int valueIteration( double discount );
		// Policy iterator, returns best policy for the environment
		int policyIteration( double discount );
		// Used to fetch particular state values
		double getStateValue(int a, int b, int c, int d);
		// Used to fetch particulare state-action policy values
		double getStatePolicy(int a, int b, int c, int d, int e);
	private:
		// Resets stateValues and stateValuesBackup to zero matrices
		void resetStateValues();
		// Reverts to random policy
		void resetStatePolicy();
		// Copies the content of stateValues into stateValuesBackup
		void updateStateValuesBackup();
		// Computes the summatory of the values of the states that the predator can reach if it follows predAction
		double getNextStatesSum( int predX, int predY, int preyX, int preyY, int predAction, double discount );
		// Evaluator of a particular state value following a random policy
		double stateValuePE(int predX, int predY, int preyX, int preyY, double discount );
		// Evaluator of a particular state value following a greedy policy
		double stateValueVI(int predX, int predY, int preyX, int preyY, double discount );
		// Reports the probability that the prey moves in a given way from the agents' position
		double getPreyActionProbability( AnimalWrapper * pred, AnimalWrapper * prey, int preyMove );

		// Keeps track of all the state values
		double stateValues[11][11][11][11];
		// State values backup, used for computations
		double stateValuesBackup[11][11][11][11];
		// Policy values
		double statePolicy[11][11][11][11][5];
};

#endif
