#include "predator.h"
#include "environment.h"
#include <stdlib.h>
#include <math.h>
#include <iostream>

using namespace std;

// Constructor
Predator::Predator( Environment * newEnv ) : Animal(newEnv) {
	// Setup of initial parameters
	alpha = 0.5;
	epsilon = 0.1;
	gamma = 0.9;
	temperature = 0.5;
	selectedAlgorithm = 1; // E greedy - QLearning

	// Initialize the Q matrix
	resetQ(15);

	// Initialization of addictional variables
	exploratory = false;
	sarsaAction = -1;

	// Computing common powers of gamma
	for ( int i=0; i<250; i++ )
		gammas[i] = pow(gamma,i);
}

// Function to set a value for Alpha (0.1 - 0.5)
void Predator::setAlpha( double a ) {
    alpha = a;
}

// Function to set a value for Gamma (0.1 - 0.9)
void Predator::setGamma( double g ) {
    gamma = g;

	// Recomputing gamma powers depending on the new value
	for ( int i=0; i<250; i++ )
		gammas[i] = pow(gamma,i);
}

// Function to set a value for Epsilon (0.1 - 0.9)
void Predator::setEpsilon( double e ) {
    epsilon = e;
}

// Function to set a value for Temperature (0.1 - 0.9)
void Predator::setTemperature( double t ) {
	// We limit the temperature value otherwise it may crash from
	// too high exponential values
	if ( t < 0.1 ) t = 0.1;

    temperature = t;
}

// Prints the predator's parameters
void Predator::printParameters() {
	cout << "Alpha value is:\t\t" << alpha << "\n";
	cout << "Epsilon value is:\t" << epsilon << "\n";
	cout << "Gamma value is:\t\t" << gamma << "\n";
	cout << "Temperature value is:\t" << temperature << "\n";
	cout << "Algorithm is:\t\t";

	switch ( selectedAlgorithm ) {
		case 0:
			cout << "Random";
			break;
		case 1:
			cout << "E greedy, Q learning";
			break;
		case 2:
			cout << "Softmax, Q learning";
			break;
		case 3:
			cout << "E greedy, SARSA";
			break;
		case 4:
			cout << "Softmax, SARSA";
			break;
		case 5:
			cout << "E greedy, On Policy Monte Carlo";
			break;
		case 6:
			cout << "Softmax, On Policy Monte Carlo";
			break;
        case 7:
            cout << "E greedy, Off Policy Monte Carlo";
            break;
        case 8:
            cout << "Softmax, Off Policy Monte Carlo";
            break;
		default:
			cout << "Algorithm Unknown";
	}
	cout << "\n";
}

// Function to set the Last State
// Useful for resetting the position of the predator
void Predator::setLastState( int state1, int state2, int state3, int state4) {
    lastState[0] = state1;
    lastState[1] = state2;
    lastState[2] = state3;
    lastState[3] = state4;
}

// Funtion to reset all Q-values, also forgets the rewards and resets general variables
void Predator::resetQ( double qValue ) {
    for(int h=0; h<11; h++)
        for(int k=0; k<11; k++)
            for(int j=0; j<11; j++)
                for(int i=0; i<11; i++)
                    for(int a=0; a<5; a++) {
                        q[h][k][j][i][a] = qValue;
                        timesSeen[h][k][j][i][a] = 0;
						offlineNumDen[h][k][j][i][a][0] = 0;
						offlineNumDen[h][k][j][i][a][1] = 0;
					}

	returns.clear();
	history.clear();

	sarsaAction = -1;
	exploratory = false;
	reward = 0;
}

// Prints a subset of the Q matrix
void Predator::printQ( int a ) {
	for(int k=10; k>=0; k--) {
    	for(int h=0; h<11; h++)
        	cout << q[h][k][5][5][a] << "\t";

		cout << "\n";
	}
}

// Returns a random move
int Predator::randomPolicy() {
	exploratory = false;
    // Policy returns a random number b/w 1 and 5
    return (rand() % 5);
}

// Function to generate next action using eGreedy Policy on Q matrix
int Predator::eGreedyQPolicy() {

    // Rolling the dice
    int prob = rand() % 1000;

    // Choosing the greedy action to perform i.e., maximum of the 5 Q Values
    int greedyAction = 0;
    for(int i=1; i<5; i++ )
        if ( q[lastState[0]][lastState[1]][lastState[2]][lastState[3]][i] >
			 q[lastState[0]][lastState[1]][lastState[2]][lastState[3]][greedyAction] )
            greedyAction = i;

    // Taking the action
    // Greedy action
    if ( prob > epsilon * 1000 ) {
		exploratory = false;
        return greedyAction;
    }
	// We chose an action at random
    else {
		exploratory = true;
        for ( int i=0; i<5; i++)
            if ( prob <= epsilon * 200 * (i+1) ) 
                return i;

		// Safeguard catch, it shouldn't really come to this
		cout << "Warning, E Greedy went wrong!";
		return 4;
	}
}

// Function to generate next action using Softmax Policy on Q matrix
int Predator::softmaxQPolicy() {
    double prob = (double)(rand() % 1000);
    double actionProb[5];
    double probRange = 0;

    // Computing denominator and partial numerators
    double sumActionProb = 0;
    for( int i=0; i<5; i++ ) {
		actionProb[i] = exp(q[lastState[0]][lastState[1]][lastState[2]][lastState[3]][i]/temperature);
        sumActionProb += actionProb[i];
	}

	// We find the greediest action to know if our choice is exploratory or not
    int greedyAction = 0;
    for(int i=1; i<5; i++ )
        if ( q[lastState[0]][lastState[1]][lastState[2]][lastState[3]][i] >
			 q[lastState[0]][lastState[1]][lastState[2]][lastState[3]][greedyAction] )
            greedyAction = i;

    // Computing action probability
    for( int i=0; i<5; i++ ) {
        actionProb[i] /= sumActionProb;
		
        // Returning the chosen action
        probRange += actionProb[i] * 1000;
        if( prob <= probRange ) {
			if ( i != greedyAction )
				exploratory = true;
			else
				exploratory = false;
            return i;
		}
    }

	// Safeguard catch, it shouldn't really come to this
	cout << "Warning, Softmax went wrong!";
	return 4;
}

// Observation for Q learning
void Predator::setObservationQLearning( int obsState1, int obsState2, int obsState3, int obsState4, int obsReward ) {
    // Calculating the action with maximum Q value
    double maxQValue = -1.0;
    for ( int i=0; i<5; i++ )
        if ( q[obsState1][obsState2][obsState3][obsState4][i] > maxQValue )
            maxQValue = q[obsState1][obsState2][obsState3][obsState4][i];

    // Updating the Q value
    q[lastState[0]][lastState[1]][lastState[2]][lastState[3]][actionTaken]
        = q[lastState[0]][lastState[1]][lastState[2]][lastState[3]][actionTaken]
        + alpha * ( obsReward + gamma * maxQValue  - q[lastState[0]][lastState[1]][lastState[2]][lastState[3]][actionTaken] );

	// Iterating to next state
	lastState[0] = obsState1;
	lastState[1] = obsState2;
	lastState[2] = obsState3;
	lastState[3] = obsState4;

	reward += obsReward;
}

// Observation for SARSA
void Predator::setObservationSarsa ( int obsState1, int obsState2, int obsState3, int obsState4, int obsReward ) {
	int lastStateBackup[4];

	for ( int i=0; i<4; i++ )
		lastStateBackup[i] = lastState[i];

	// Iterating to next state
	lastState[0] = obsState1;
	lastState[1] = obsState2;
	lastState[2] = obsState3;
	lastState[3] = obsState4;

	// We chose our future action based on our chosen policy
	if ( selectedAlgorithm == 3 )
		sarsaAction = eGreedyQPolicy();
	else
		sarsaAction = softmaxQPolicy();

    // Updating the Q value with SARSA's algorithm
    q[lastStateBackup[0]][lastStateBackup[1]]
	 [lastStateBackup[2]][lastStateBackup[3]][actionTaken]
        =  q[lastStateBackup[0]][lastStateBackup[1]]
			[lastStateBackup[2]][lastStateBackup[3]][actionTaken] +
			alpha * ( obsReward + gamma * q[obsState1][obsState2][obsState3][obsState4][sarsaAction]
            - q[lastStateBackup[0]][lastStateBackup[1]][lastStateBackup[2]][lastStateBackup[3]][actionTaken] );

	reward += obsReward;
}

// Observation for On/Off Policy Monte Carlo
void Predator::setObservationMonteCarlo ( int obsState1, int obsState2, int obsState3, int obsState4, int obsReward ) {
	// If we are off policy and it was an exploratory move we reset the states we've seen
	if ( ( selectedAlgorithm == 7 || selectedAlgorithm == 8 ) && ( exploratory ) ) {
		returns.clear();
		history.clear();
	}

	bool newState = true;
	int x,y,z,k;

	for( vector<array<double,7>>::iterator it = returns.begin(); it != returns.end(); it++ ) {
		x = (int)(*it)[0];
		y = (int)(*it)[1];
		z = (int)(*it)[2];
		k = (int)(*it)[3];

		if ( lastState[0] == x && lastState[1] == y && lastState[2] == z && lastState[3] == k )
			newState = false;

		// If we have the gamma power already stored
		// Note: This is why the arrays in the vector have to contain doubles
		if ( (*it)[6] < 250)
			// We don't have to compute it
			(*it)[5] += obsReward * gammas[(int)((*it)[6])];
		else
			// Otherwise we calculate the power of gamma needed
			(*it)[5] += obsReward * pow(gamma,(*it)[6]);

		// Increase the time passed for each combination we have seen
		(*it)[6]++;
	}

	// If we never seen this state before ( it has to be non-exploratory if we are off policy )
	if ( newState ) {
		// Insert it in the returns vector ( returns saves a copy of it )
		array<double,7> temp = { { (double)lastState[0], (double)lastState[1], (double)lastState[2], (double)lastState[3], (double)actionTaken, (double)obsReward, 1.0 } };
		returns.push_back( temp );
	}

	// If needed add new state into history ( non-exploratory and off policy )
	if ( selectedAlgorithm == 7 || selectedAlgorithm == 8 ) {
		array<int,5> temp = { { lastState[0], lastState[1], lastState[2], lastState[3], actionTaken} };
		history.push_back( temp );
	}

	// Iterating to next state
	lastState[0] = obsState1;
	lastState[1] = obsState2;
	lastState[2] = obsState3;
	lastState[3] = obsState4;
	
	reward += obsReward;
}

// Function for the prey to make a move
int Predator::move() {
	switch ( selectedAlgorithm ) {
		// Random
		case 0:
			actionTaken = randomPolicy();
			break;
		// E greedy - Q learning / On policy Monte Carlo / Off policy Monte Carlo
		case 1: case 5:	case 7:
			actionTaken = eGreedyQPolicy();
			break;
		// Softmax - Q learning / On policy Monte Carlo / Off policy Monte Carlo
		case 2:	case 6:	case 8:
			actionTaken = softmaxQPolicy();
			break;
		// E greedy - SARSA
		case 3:
			if ( sarsaAction == -1 )
				actionTaken = eGreedyQPolicy();
			else
				actionTaken = sarsaAction;
			break;
		// Softmax - SARSA
		case 4:
			if ( sarsaAction == -1 )
				actionTaken = softmaxQPolicy();
			else
				actionTaken = sarsaAction;
			break;

		default:
			actionTaken = randomPolicy();
	}

    return actionTaken;
}

// General observation function, selects the correct one depending on the policy in use
void Predator::setObservation( int obsState1, int obsState2, int obsState3, int obsState4, int obsReward ) {
	switch ( selectedAlgorithm ) {
		// Random policy
		case 0:
			reward += obsReward;
			break;
		// E greedy / Softmax - Q learning
		case 1:	case 2:
			setObservationQLearning(obsState1, obsState2, obsState3, obsState4, obsReward);
			break;
		// E greedy / Softmax - SARSA
		case 3: case 4:
			setObservationSarsa(obsState1, obsState2, obsState3, obsState4, obsReward);
			break;
		// E greedy / Softmax - On/Off policy Monte Carlo
		case 5: case 6: case 7:	case 8:
			setObservationMonteCarlo(obsState1, obsState2, obsState3, obsState4, obsReward);
			break;
		default:;
	}
}

// Sets the used policy for
void Predator::setAlgorithm( int p ) {
	selectedAlgorithm = p;
}

// Function to set the Q value of the terminal state to zero
void Predator::setTerminalState() {
	for ( int i=0; i<5; i++ )
    	q[lastState[0]][lastState[1]][lastState[2]][lastState[3]][i] = 0;

	// E-greedy / Softmax On Policy Monte Carlo
	if ( selectedAlgorithm == 5 || selectedAlgorithm == 6 )
		terminalOnPolicyMonteCarlo();
	// E-greedy / Softmax Off Policy Monte Carlo
	else if ( selectedAlgorithm == 7 || selectedAlgorithm == 8 )
		terminalOffPolicyMonteCarlo();
}

// Final operations for On Policy Monte Carlo
void Predator::terminalOnPolicyMonteCarlo() {
	int x,y,z,k,a;
	
	// For each unique state-action pair we visited
	for( vector<array<double,7>>::iterator it = returns.begin(); it != returns.end(); it++ ) {
		x = (int)(*it)[0];
		y = (int)(*it)[1];
		z = (int)(*it)[2];
		k = (int)(*it)[3];
		a = (int)(*it)[4];

		// We update the Q matrix by averaging all results
		timesSeen[x][y][z][k][a]++;

		q[x][y][z][k][a] =
			q[x][y][z][k][a] +
			( ( (*it)[5] - q[x][y][z][k][a] ) /
				timesSeen[x][y][z][k][a] );
	}
	// We reset the arrays for the next episode
	returns.clear();
}

void Predator::terminalOffPolicyMonteCarlo() {
	// Variables to store the current state-action pair
	int x,y,z,k,a;
	// Variables to store the temporary examined state-action pairs
	int xt,yt,zt,kt,at;
    double weight = 1;
	double probAction = 0;

	int historyPosOwn, historyPosNext;

	// For each unique state-action pair, from the most recent one to the most old one
	for( vector<array<double,7>>::reverse_iterator it = returns.rbegin(); it != returns.rend(); it++ ) {
		// We get our parameters for the state-action pair we are analyzing
		x = (int)(*it)[0]; y = (int)(*it)[1];
		z = (int)(*it)[2]; k = (int)(*it)[3];
		a = (int)(*it)[4];
		
		// We get the time passed between the end of the episode and the current element ( not included )
		historyPosOwn = (int)((*it)[6])-1;
		// We get the time passed between the end of the episode the next most recent element, if it exists
		if ( it == returns.rbegin() )
			historyPosNext = 1;
		else
			historyPosNext = (int)((*(it-1))[6]);

		// We update the weight over the probabilities of every move between our own and the last one, using the last known weight value ( we divide only by probabilities we still haven't computed )
		for ( vector<array<int,5>>::iterator ith = history.end()-historyPosOwn; 
											 ith != history.end()-historyPosNext+1; ith++ ) {
			
			xt = (int)(*ith)[0]; yt = (int)(*ith)[1];
			zt = (int)(*ith)[2]; kt = (int)(*ith)[3];
			at = (int)(*ith)[4];
			//cout << "Hown: -"<<historyPosOwn<<" --> Hnext: -"<<historyPosNext-1<<"\n";

			// Now compute the probability of the action taken in this particular state
			
			// E greedy
			if ( selectedAlgorithm == 7 ) {
				probAction = 1 - epsilon;
				for ( int i=0; i<5; i++ ) {
					if ( i != at && q[xt][yt][zt][kt][i] > q[xt][yt][zt][kt][at] ) {
						probAction = epsilon/5;
						break;
					}
				}
			}
			// Softmax
			else {
				double sumActionProb = 0;
                for( int i=0; i<5; i++ )
					sumActionProb += exp(q[xt][yt][zt][kt][i]/temperature);
				probAction = exp(q[xt][yt][zt][kt][at]/temperature) / sumActionProb;
			}

			// Update the weight with it
			weight /= probAction;

		}	// End of for-history
	
		// We update the numerator with the weight of the state action pair times the weight
		offlineNumDen[x][y][z][k][a][0] += weight * ((*it)[5]);
		// We update the denominator with only the weight
		offlineNumDen[x][y][z][k][a][1] += weight;
 
	}	// End of for-returns

	// For each unique state-action pair
	for( vector<array<double,7>>::iterator it = returns.begin(); it != returns.end(); it++ ) {
		// We get our parameters for the state-action pair we are analyzing
		x = (int)(*it)[0]; y = (int)(*it)[1];
		z = (int)(*it)[2]; k = (int)(*it)[3];
		a = (int)(*it)[4];

		// We update the Q matrix
		q[x][y][z][k][a] = offlineNumDen[x][y][z][k][a][0]/offlineNumDen[x][y][z][k][a][1];
	}

	// We reset the vectors for the next episode
	returns.clear();
	history.clear();
}


// Function to get the reward
int Predator::getReward() {
    return reward;
}

// Returns if the last action chosen was exploratory
bool Predator::isActionExploratory() {
	return exploratory;
}
