#include "dynamicprogrammingsmart.h"
#include "animalwrapper.h"
#include "environment.h"
#include <stdlib.h>
#include <math.h>
using namespace std;

DynamicProgrammingSmart::DynamicProgrammingSmart() {}

// Resets both stateValues and stateValuesBackup
void DynamicProgrammingSmart::resetStateValues() {
	for ( int i = 0; i < 21; i++ ) {
		stateValues[i] = 0;
		stateValuesBackup[i] = 0;
	}
}

// Resets policy to random actions
void DynamicProgrammingSmart::resetStatePolicy() {
	for ( int i = 0; i < 21; i++ )
		for ( int a = 0; a < 5; a++ )
			statePolicy[i][a] = 1.0/5.0;
}

// Copies the content of stateValues into stateValuesBackup
void DynamicProgrammingSmart::updateStateValuesBackup() {
	for ( int i = 0; i < 21; i++ )
		stateValuesBackup[i] = stateValues[i];
}

// Evaluates the random policy
int DynamicProgrammingSmart::policyEvaluation( double discount, bool iteration ) {
	int steps = 0;
	double delta, maxDelta;
	
	// Initialization of the state values
	resetStateValues();
	// Policy Reset
	if ( ! iteration )
		resetStatePolicy();

	do {
	maxDelta = 0;
	// For each possible state value ( Not counting final state )
	for ( int y = 0; y < 5; y++ ) {
		for ( int x = 0; x < 6-y; x++ ) {
			// Update state value and compute its delta
			stateValues[ ((13-y)*y)/2 + x ] = stateValuePE(x,y,discount);
			delta = fabs( stateValuesBackup[((13-y)*y)/2 + x] - stateValues[((13-y)*y)/2 + x] );
				
			maxDelta = delta > maxDelta? delta : maxDelta;
		}	
	}
	steps++;
	updateStateValuesBackup();
	} while ( maxDelta > 0.0001 );

	return steps;
}

// Performs Value Iteration with a greedy policy
int DynamicProgrammingSmart::valueIteration( double discount ) {
	int steps = 0;
	double delta, maxDelta;
	
	// Initialization of the state values
	resetStateValues();

	do {
	maxDelta = 0;
	// For each possible state value ( Not counting final state )
	for ( int y = 0; y < 5; y++ ) {
		for ( int x = 0; x < 6-y; x++ ) {
			// Update state value and compute its delta
			stateValues[ ((13-y)*y)/2 + x ] = stateValueVI(x,y,discount);
			delta = fabs( stateValuesBackup[((13-y)*y)/2 + x] - stateValues[((13-y)*y)/2 + x] );
				
			maxDelta = delta > maxDelta? delta : maxDelta;
		}	
	}
	steps++;
	updateStateValuesBackup();
	} while ( maxDelta > 0.0001 );

	return steps;
}

// Performs Policy Iteration
int DynamicProgrammingSmart::policyIteration( double discount ) {
	int maxes[5], maxesNumber, steps = 0;
	double value, maxValue;
	double policyBackup[5];
	bool stable;

	resetStatePolicy();
	
	do {
		stable = true;
		policyEvaluation(discount, true);

		for ( int y = 0; y < 6; y++ ) {
			for ( int x = 0; x < 6-y; x++ ) {
				maxValue = -1.0;
				for ( int a = 0; a < 5; a++ ) {
							// Backup policy value for this state and action
					policyBackup[a] = statePolicy[((13-y)*y)/2 + x][a];
					
					// Set it to zero so we don't have to afterwards
					statePolicy[((13-y)*y)/2 + x][a] = 0.0;

					value = getNextStatesSum( x, y, a, discount);
					
					// If this is the new max
					if ( value > maxValue ) {
						maxes[0] = a;
						maxesNumber = 1;
						maxValue = value;
					}
					// Otherwise add it to the max list
					else if ( value == maxValue ) {
						maxes[maxesNumber] = a;
						maxesNumber++;
					}
				}
				for ( int a = 0; a < 5; a++ ) {
					// Assign new policy values
					if ( a < maxesNumber )
						statePolicy[((13-y)*y)/2 + x][ maxes[a] ] = 1.0/maxesNumber;
					// Check differences with the previous policy
					if ( statePolicy[((13-y)*y)/2 + x][a] != policyBackup[a] ) 
						stable = false;
				}	// End updating for cycle
			}	// End of x cycle 
		}	// End of y cycle
		steps++;
	} while ( ! stable );

	return steps;
}

// Computes the value of a single state based on current policy
double DynamicProgrammingSmart::stateValuePE(int predX, int predY, double discount ) {
	double finalValue = 0.0;

	for ( int i = 0; i < 5; i++ )
		finalValue += statePolicy[((13-predY)*predY)/2 + predX][i] * 
						getNextStatesSum(predX, predY, i, discount); 	
	
	return finalValue;
}

// Computes the value of a single state, given a greedy policy
double DynamicProgrammingSmart::stateValueVI(int predX, int predY, double discount ) {
	double finalValue = 0.0;
	double nextStateValue;

	for ( int i = 0; i < 5; i++ ) {
		nextStateValue = getNextStatesSum(predX, predY, i, discount);
		finalValue = finalValue < nextStateValue ? nextStateValue : finalValue;
	}

	return finalValue;
}

// Computes the value relative to predator's move from a state
double DynamicProgrammingSmart::getNextStatesSum(int predX, int predY, int predAction, double discount ) {
	double preyActionProbability;
	double sumStateValues = 0.0, rewardInThisState;
	int pposX, pposY;

	// Get predator position after predAction
	getNextCell( predX, predY, predAction, &predX, &predY );

	// For each possible action that the prey may take
	for ( int preyAction = 0; preyAction < 5; preyAction++ ) {
		// Reset predator position ( we move the predator, not the prey! )
		pposX = predX;
		pposY = predY;

		// Calculate the probability that the prey does this action
		preyActionProbability = 
				getPreyActionProbability( pposX, pposY, preyAction );

		if ( preyActionProbability == 0 ) continue;

		// Calculate the position of the predator if the prey takes this action
		getNextCell( pposX, pposY, (preyAction/4)*2 + (preyAction+2)%4, &pposX, &pposY );
		
		// Calculate the reward in this state
		if ( pposX == 0 && pposY == 5 )
			rewardInThisState = 10.0;
		else
			rewardInThisState = 0.0;

		sumStateValues += preyActionProbability * ( rewardInThisState + discount * stateValuesBackup[((13-pposY)*pposY)/2 + pposX]); 
	}
	
	return sumStateValues;
}

// Computes the probability that the prey performs an action
double DynamicProgrammingSmart::getPreyActionProbability ( int predX, int predY, int preyMove) {
    // Checks if the predator has already caught the prey
    if ( predX == 0 && predY == 5 ) {
        // If the game ended, the only possible move for the prey is to stay still
		if ( preyMove == 4 ) return 1.0;
        
		return 0.0; 
	}

    // If prey doesn't want to move we don't have to take into account pred's position
	if ( preyMove == 4 ) return 0.8;

    // Checks if the predator is close to the prey
	if ( predX == 0 && predY == 4 ) {
		// The prey can't take this action
        if ( preyMove == 2 ) return 0.0;
        
		return (0.2/3.0);
    }
	
	return (0.2/4.0);
}

// Returns the specified state value
double DynamicProgrammingSmart::getStateValue( int a, int b, int c, int d ) {
	int predX, predY;

	// We translate the predator as if the prey was in 5,5
	predX = (abs(a - c) + 5)%11;
	predY = (abs(b - d) + 5)%11;

	// We fold the map to find the predator's position inside our system
	normalizePredatorPosition(predX, predY, &predX, &predY);

	return stateValues[((13-predY)*predY)/2 + predX];
}

// Returns the specified policy value
double DynamicProgrammingSmart::getStatePolicy( int a, int b, int c, int d, int e ) {
	int predX, predY, preyX, preyY;
	int action;

	// We translate the predator as if the prey was in 5,5
	predX = (abs(a - c) + 5)%11;
	predY = (abs(b - d) + 5)%11;
	
	// We calculate the orientation of the predator in respect to our representation
	// of the system
	// All conditions are explicitly written simply because they are easier to read and
	// understand, not because of efficiency.
	if 		( predX >= 5 && predY >= 5 && predX <= predY ) orientation = 0;
	else if ( predX >= 5 && predY >  5 && predX > predY  ) orientation = 1;
	else if ( predX >= 5 && predY <= 5 && predX + predY >= 10 ) orientation = 2;
	else if ( predX >  5 && predY <= 5 && predX + predY < 10  ) orientation = 3;
	else if ( predX <= 5 && predY <= 5 && predX >= predY ) orientation = 4;
	else if ( predX <  5 && predY <  5 && predX <  predY ) orientation = 5;
	else if ( predX <  5 && predY >= 5 && predX + predY < 10  ) orientation = 6;
	else orientation = 7;

	// We translate the predator's action to the equivalent action in our system
	action = getOrientedAction(e);

	// We fold the map to find the predator's position inside our system
	normalizePredatorPosition( predX, predY, &predX, &predY );

	return statePolicy[((13-predY)*predY)/2 + predX][action];
}

// Sets the position relative to x,y given a
void DynamicProgrammingSmart::getNextCell( int x, int y, int a, int* x2, int *y2 ) {
	// Looking left here is the same as looking to the right
	if ( a == 3 && x == 0 ) return getNextCell( x, y, 1, x2, y2 );
	// Looking down here is the same as looking ourselves 
	if ( a == 2 && y == 0 ) {
		*y2 = y;
		*x2 = x;
		return;
	}
	// If we are on the diagonal
	if ( x == 5-y ) {
		// Looking up takes us to the left
		if ( a == 0 )
			return getNextCell( x, y, 3, x2, y2 );
		// While looking right takes us down
		if ( a == 1 )
			return getNextCell( x, y, 2, x2, y2 );
	}
	// Standard cases	
	switch ( a ) {
		case 0:
			*y2 = y+1;
			*x2 = x;
			break;
		case 1:
			*y2 = y;
			*x2 = x+1;
			break;
		case 2:
			*y2 = y-1;
			*x2 = x;
			break;
		case 3:
			*y2 = y;
			*x2 = x-1;
			break;
		default:
			*y2 = y;
			*x2 = x;
	}
}

// Returns the equivalent action given an orientation
int DynamicProgrammingSmart::getOrientedAction( int a ) {
	if ( a == 4 ) return a;

	switch ( orientation ) {
		// Upper-Top-Right: Up-Down inverted ( 0->2, 2->0 )
		case 0: return (( ( a%2 + 1 )%2 * 2 ) + a ) % 4;
		// Lower-Top-Right: Up => Right, Right => Down, Down => Left, Left => Up
		// ( 0->1, 1->2, 2->3, 3->0 )
		case 1: return ( a + 1 ) % 4;
		// Upper-Bottom-Right: Up => Left, Right => Down, Down => Right, Left=> Up
		// ( 0->3, 1->2, 2->1, 3->0 )
		case 2: return (( (a%2) + (a%2 + 1)%2 * 3 ) + a ) % 4;
		// Lower-Bottom-Right: Normal
		case 3: return a;
		// Lower-Bottom-Left: Left-Right inverted ( 1->3, 3->1 )
		case 4: return (( a%2 * 2 ) + a ) % 4;
		// Upper-Bottom-Left: Up => Left, Right => Up, Down => Right, Left => Down
		// ( 0->3, 1->0, 2->1, 3->2 )
		case 5: return ( a + 3 ) % 4;
		// Lower-Top-Left: Up => Right, Right => Up, Down => Left, Left => Down
		// ( 0->1, 1->0, 2->3, 3->2 )
		case 6: return (a%2 + 1)%2 - (a%2) + a;
		// Upper-Top-Left: Up-Down inverted, Left-Right inverted
		// ( 0->2, 1->3, 2->0, 3->1 )
		default:
			return ( a + 2 ) % 4;
	}
}

// Returns the position the predator would have on our representation of the system
// Predator position must be already set relative to prey position 5,5!
void DynamicProgrammingSmart::normalizePredatorPosition(int x, int y, int * x2, int *y2 ) {
	// Horizontal folding of the map ( left to right )
	x = x < 5 ? 10 - x : x;
	// Vertical folding of the map ( up to down )
	y = y > 5 ? 10 - y : y;
	// Diagonal folding of the map ( upper right to lower left )
	if ( x + y > 10 ) {
		x += 10 - y;		// x + 10 - y
		y = 20 - y - x;		// 10 - x
		x += y - 10;		// 10 - y  
	}
	// Translation of the position to the bottom left corner
	*x2 = x - 5;
	*y2 = y;
}
