#include "dynamicprogramming.h"
#include "animalwrapper.h"
#include "environment.h"
#include <math.h>

DynamicProgramming::DynamicProgramming() {}

// Resets both stateValues and stateValuesBackup
void DynamicProgramming::resetStateValues() {
	for ( int i = 0; i < 11; i++ )
		for ( int j = 0; j < 11; j++ )
			for ( int m = 0; m < 11; m++ )
				for ( int n = 0; n < 11; n++ ) {
					stateValues[i][j][m][n] = 0;
					stateValuesBackup[i][j][m][n] = 0;
				}
}

// Resets policy to random actions
void DynamicProgramming::resetStatePolicy() {
	for ( int i = 0; i < 11; i++ )
		for ( int j = 0; j < 11; j++ )
			for ( int m = 0; m < 11; m++ )
				for ( int n = 0; n < 11; n++ )
					for ( int a = 0; a < 5; a++ )
						statePolicy[i][j][m][n][a] = 1.0/5.0;
}

// Copies the content of stateValues into stateValuesBackup
void DynamicProgramming::updateStateValuesBackup() {
	for ( int i = 0; i < 11; i++ )
		for ( int j = 0; j < 11; j++ )
			for ( int m = 0; m < 11; m++ )
				for ( int n = 0; n < 11; n++ )
					stateValuesBackup[i][j][m][n] = stateValues[i][j][m][n];
}

// Evaluates the random policy
int DynamicProgramming::policyEvaluation( double discount, bool iteration ) {
	int steps = 0;
	double delta, maxDelta;
	
	// Initialization of the state values
	resetStateValues();
	// Policy Reset
	if ( ! iteration )
		resetStatePolicy();

	do {
	maxDelta = 0;
	// For each possible state value
	for ( int i = 0; i < 11; i++ )
		for ( int j = 0; j < 11; j++ )
			for ( int m = 0; m < 11; m++ )
				for ( int n = 0; n < 11; n++ ) {
					// If this is not one if the final states
					if ( i != m || j != n ) {
						// Update state value and compute its delta
						stateValues[i][j][m][n] = stateValuePE(i,j,m,n,discount);
						delta = fabs( stateValuesBackup[i][j][m][n] - stateValues[i][j][m][n] );
							
						maxDelta = delta > maxDelta? delta : maxDelta;
					}	
				}
	steps++;
	updateStateValuesBackup();
	} while ( maxDelta > 0.0001 );

	return steps;
}

// Performs Value Iteration with a greedy policy
int DynamicProgramming::valueIteration( double discount ) {
	int steps = 0;
	double delta, maxDelta;
	
	// Initialization of the state values
	resetStateValues();

	do {
	maxDelta = 0;
	// For each possible state value
	for ( int i = 0; i < 11; i++ )
		for ( int j = 0; j < 11; j++ )
			for ( int m = 0; m < 11; m++ )
				for ( int n = 0; n < 11; n++ ) {
					// If this is not one if the final states
					if ( i != m || j != n ) {
						// Update state value and compute its delta
						stateValues[i][j][m][n] = stateValueVI(i,j,m,n,discount);
						delta = fabs( stateValuesBackup[i][j][m][n] - stateValues[i][j][m][n] );
							
						maxDelta = delta > maxDelta? delta : maxDelta;
					}	
				}
	steps++;
	updateStateValuesBackup();
	} while ( maxDelta > 0.0001 );

	return steps;
}

// Performs Policy Iteration
int DynamicProgramming::policyIteration( double discount ) {
	int maxes[5], maxesNumber, steps = 0;
	double value, maxValue;
	double policyBackup[5];
	bool stable;

	resetStatePolicy();
	
	do {
		stable = true;
		policyEvaluation(discount, true);

		for ( int i = 0; i < 11; i++ )
			for ( int j = 0; j < 11; j++ )
				for ( int m = 0; m < 11; m++ )
					for ( int n = 0; n < 11; n++ ) {
						maxValue = -1.0;
						for ( int a = 0; a < 5; a++ ) {
							// Backup policy value for this state and action
							policyBackup[a] = statePolicy[i][j][m][n][a];
							
							// Set it to zero so we don't have to afterwards
							statePolicy[i][j][m][n][a] = 0.0;
							value = getNextStatesSum( i, j, m, n, a, discount);
					
							// If this is the new max
							if ( value > maxValue ) {
								maxes[0] = a;
								maxesNumber = 1;
								maxValue = value;
							}
							// Otherwise add it to the max list
							else if ( value == maxValue ) {
								maxes[maxesNumber] = a;
								maxesNumber++;
							}
						}
						for ( int a = 0; a < 5; a++ ) {
							// Assign new policy values
							if ( a < maxesNumber )
								statePolicy[i][j][m][n][ maxes[a] ] = 1.0/maxesNumber;
							// Check differences with the previous policy 
							if ( statePolicy[i][j][m][n][a] != policyBackup[a] ) 
								stable = false;
						}	// End updating for cycle
					}	// End four for cycles 
		steps++;
	} while ( ! stable );

	return steps;
}

// Computes the value of a single state based on current policy
double DynamicProgramming::stateValuePE(int predX, int predY, int preyX, int preyY, double discount ) {
	double finalValue = 0.0;

	for ( int i = 0; i < 5; i++ )
		finalValue += statePolicy[predX][predY][preyX][preyY][i] * 
						getNextStatesSum(predX, predY, preyX, preyY, i, discount); 	
	
	return finalValue;
}

// Computes the value of a single state, given a greedy policy
double DynamicProgramming::stateValueVI(int predX, int predY, int preyX, int preyY, double discount ) {
	double finalValue = 0.0;
	double nextStateValue;

	for ( int i = 0; i < 5; i++ ) {
		nextStateValue = getNextStatesSum(predX, predY, preyX, preyY, i, discount);
		finalValue = finalValue < nextStateValue ? nextStateValue : finalValue;
	}

	return finalValue;
}
	
// Computes the value relative to predator's move from a state
double DynamicProgramming::getNextStatesSum(int predX, int predY, int preyX, int preyY, int predAction, double discount ) {
	AnimalWrapper * pred = new AnimalWrapper();
	AnimalWrapper * prey = new AnimalWrapper();
	double preyActionProbability;
	double sumStateValues = 0.0, rewardInThisState;

	// Set predator position
	pred->position[0] = predX;
	pred->position[1] = predY;

	// Calculate the position of the predator if it were to take this action
	Environment::updatePosition( pred, predAction );

	// For each possible action that the prey may take
	for ( int preyAction = 0; preyAction < 5; preyAction++ ) {
		// Reset prey position
		prey->position[0] = preyX;
		prey->position[1] = preyY;

		// Calculate the probability that the prey does this action
		preyActionProbability = 
				getPreyActionProbability( pred, prey, preyAction );

		if ( preyActionProbability == 0 ) continue;

		// Calculate the position of the prey if it takes this action
		Environment::updatePosition( prey, preyAction );
		
		// Calculate the reward in this state
		if ( pred->position[0] == prey->position[0] && 
			 pred->position[1] == prey->position[1] )
			rewardInThisState = 10.0;
		else
			rewardInThisState = 0.0;

		sumStateValues += preyActionProbability * ( rewardInThisState + discount * stateValuesBackup[pred->position[0]][pred->position[1]][prey->position[0]][prey->position[1]]); 
	}
	
	delete pred;
	delete prey;

	return sumStateValues;
}

// Computes the probability that the prey performs an action
double DynamicProgramming::getPreyActionProbability ( AnimalWrapper * pred, AnimalWrapper * prey, int preyMove) {
    // Checks if the predator has already caught the prey
    if (pred->position[0] == prey->position[0] && 
		pred->position[1] == prey->position[1] ) {

        // If the game ended, the only possible move for the prey is to stay still
		if ( preyMove == 4 ) return 1.0;
        
		return 0.0; 
	}

    // If prey doesn't want to move we don't have to take into account pred's position
	if ( preyMove == 4 ) return 0.8;

    // Checks if the predator is close to the prey
	int predDirection = Environment::checkNearAnimals( prey, pred );
	// If the two animals are near
	if ( predDirection != -1 ) {
		// The prey can't take this action
        if ( preyMove == predDirection ) return 0.0;
        
		return (0.2/3.0);
    }
	
	return (0.2/4.0);
}

// Returns the specified state value
double DynamicProgramming::getStateValue( int a, int b, int c, int d ) {
	return stateValues[a][b][c][d];
}

// Returns the specified policy value
double DynamicProgramming::getStatePolicy( int a, int b, int c, int d, int e ) {
	return statePolicy[a][b][c][d][e];
}
