#ifndef ANIMAL_HEADER
#define ANIMAL_HEADER

#include <vector>

using namespace std;

class Environment;

class Animal {
	public:
		// Sets the environment containing the animal
		Animal( Environment * newEnv );
		~Animal();
		
		static void printParameters();
		void printQ( int a );
	
		static void increaseNumberOfAgents();
		static void setNumberOfAgents( int n );
		static int stateConverter( vector<int> state );

		// Sets the algorithm used by the animal
		static void setAlgorithm( int a );
		static int getAlgorithm();
				
		// Function to set the last state of the animal
		// ( Useful for resetting the animals in the map )
		void setLastState( int state );
				
		// Functions to set the policy parameters
		static void setParameters ();
		static void setAlpha( double a );
		
		// Allocates the Q and PI matrices in memory
		void init();

		// Resets the learning and the reward the animal got
		void resetLearning( double input );

		static void decayAlpha();
		static double getAlpha();
		
		// Function returning the desired move of the animal
		int move();

		// Function to set the observations of the animal
		void setObservation(int obsState, int obsReward, int opponentMove = 0);
		
		// Function to return if the last action taken by the animal was
		// exploratory
		bool isActionExploratory();
				
		// Sets the predator's last state as the terminal state
		void setTerminalState( int opponentMove = 0 );

		// Returns the total reward collected by the predator
		int getReward();
	private:
		// FUNCTIONS

		// Implemented policies of the predator
		int randomPolicy();
		int eGreedyQPolicy();
		int ePolicy();
		
		// Functions used by the environment to set the predator's observations
		void setObservationQLearning( int obsState, int obsReward );
		void setObservationMinimaxQLearning( int obsState, int obsReward, int opponentMove );
		void setObservationHillClimbing( int obsState, int obsReward );
		vector<double> linearSolve();
		

		void resetQ( double qValue );
		void resetPiVC( double pv, double vv );
		
		// VARIABLES

		// The environment containing the animal
		Environment * ownEnv;

		// The number of agents inside the environment		
		static int agentsNum;
		static int maxStates;

		// The currently used algorithm for moving/learning
		static int selectedAlgorithm;	// Random Policy

		// Variable used for printing
		bool exploratory;
		
		// Q matrix
		double * q;
		double qValue;

		// Policy and V values for minimax
		double * pi, * v;
		int * c;

		// E-greedy
		static double alpha, gamma, epsilon, decay, alpha_backup, dw, dl;
		
		// Memory variables
		int lastState, actionTaken, reward;
};

#endif


