#ifndef PREDATOR_HEADER
#define PREDATOR_HEADER

#include "animal.h"
#include <vector>
#include <array>

class Environment;

class Predator : public Animal {
        public:
                Predator( Environment * newEnv );

				// Functions to set the policy parameters
                void setAlpha( double a );
                void setGamma( double g );
                void setEpsilon( double e );
                void setTemperature( double t );

				void printParameters();
				void printQ( int a );

				// Function to set the last state of the predator
				// ( Useful for resetting the predator in the map )
                void setLastState( int state1, int state2, int state3, int state4);

				// Resets the learning and the reward the predator got
                void resetQ( double qValue );

				// Gives the predator the observation about the world
                void setObservation( int obsState1, int obsState2, int obsState3, int obsState4, int obsReward );

				// Returns the action chosen by the predator
                int move();
				bool isActionExploratory();
				void setAlgorithm(int p);

				// Sets the predator's last state as the terminal state
                void setTerminalState();

				// Returns the total reward collected by the predator
                int getReward();
        private:
				// Implemented policies of the predator
                int randomPolicy();
                int eGreedyQPolicy();
                int softmaxQPolicy();

				// Functions used by the environment to set the predator's observations
				void setObservationQLearning( int obsState1, int obsState2, int obsState3, int obsState4, int obsReward );
				void setObservationSarsa( int obsState1, int obsState2, int obsState3, int obsState4, int obsReward );
				void setObservationMonteCarlo( int obsState1, int obsState2, int obsState3, int obsState4, int obsReward );

				// Final calculations for certain algorithm
				void terminalOnPolicyMonteCarlo();
				void terminalOffPolicyMonteCarlo();

				// The currently used algorithm for moving/learning
				int selectedAlgorithm;

				// Variable used for printing
				bool exploratory;

				// Q matrix
				double q[11][11][11][11][5];

				// E-greedy
                double alpha, gamma, epsilon;
				// Softmax
				double temperature;

				// Memory variables
                int lastState[4], actionTaken, reward;
				// Sarsa memory variable
				int sarsaAction;

				// Monte Carlo useful values
				std::vector<std::array<double, 7>> returns;
				// Offline Monte Carlo useful values
				std::vector<std::array<int, 5>> history;
				double offlineNumDen[11][11][11][11][5][2];
				// This counts the times we see a state-action pair over numerous episodes
				double timesSeen[11][11][11][11][5];
				// This holds often used precomputed powers of gamma
				double gammas[250];
};

#endif

