#include "animal.h"
#include "environment.h"
#include <stdlib.h>
#include <math.h>
#include <vector>
#include <iostream>
#include "lp_solve/lp_lib.h"

using namespace std;

int Animal::agentsNum = 0;
int Animal::maxStates = 0;
int Animal::selectedAlgorithm = 0;
double Animal::alpha = 1.0;
double Animal::gamma = 0.9;
double Animal::epsilon = 0.1;
double Animal::decay = 0.9999954;
double Animal::alpha_backup = 1.0;
double Animal::dl = 0.002;
double Animal::dw = 0.001;

// Constructor
Animal::Animal( Environment * newEnv ) {
	ownEnv = newEnv;

	// Initialization of addictional variables
	exploratory = false;

	lastState = 0;
	reward = 0;
	qValue = 1;

	// Set pointers as empty
	q = 0;
	pi = 0;
	v = 0;
}

// Destructor
Animal::~Animal() {
	delete[] q;
	delete[] pi;
	delete[] v;
	delete[] c;
}

// Prints the predator's parameters
void Animal::printParameters() {
	cout << "Alpha value is:\t\t" << alpha << "\n";
	cout << "Epsilon value is:\t" << epsilon << "\n";
	cout << "Gamma value is:\t\t" << gamma << "\n";	
	cout << "Delta lose value is:\t" << dl << "\n";
	cout << "Delta win value is:\t\t" << dw << "\n";
	cout << "Algorithm is:\t\t";

	switch ( selectedAlgorithm ) {
		case 0:
			cout << "Random";
			break;
		case 1:
			cout << "E greedy, Independent Q learning";
			break;
		case 2:
			cout << "Minimax Q learning";
			break;
		case 3:
			cout << "WoLF Hill Climbing";
			break;
		default:
			cout << "Algorithm Unknown";
	}
	cout << "\n";
}

// Prints a subset of the Q matrix
void Animal::printQ( int a ) {
	if ( !q ) {
		cout << "A Q matrix for this animal has not been defined yet!";
		return;
	}
}

void Animal::increaseNumberOfAgents() {
	setNumberOfAgents(agentsNum + 1);
}

// Sets the total number of agents in the environment
void Animal::setNumberOfAgents( int n ) {
	maxStates = pow(11,2*n);

	agentsNum = n;	
}

// Converts a series of coordinates into a single index
int Animal::stateConverter( vector<int> state ) {
	int finalState = 0;

	for ( int i = 0; i < 2*agentsNum; i++ )
		finalState += state[i] * pow(11, i);

	return finalState;
}

// Sets the used policy for
void Animal::setAlgorithm( int p ) {
	selectedAlgorithm = p;
	// MUST CALL INIT() FOR EACH ANIMAL AFTER CALLING THIS!
}

int Animal::getAlgorithm() {
	return selectedAlgorithm;
}

// Function to set the Last State
// Useful for resetting the position of the predator
void Animal::setLastState( int state ) {
	lastState = state;
}

// Sets the parameters for the selected algorithm only
void Animal::setParameters() {
	switch ( selectedAlgorithm ) {
		// Random policy
		case 0:
			cout << "Random policy has no parameter that can be changed!";
			break;
		// Independent Q learning
		// Minimax
		case 1:
		case 2:
			cout << "Insert new value for alpha ( old value = "<< alpha <<" ):\n";
			cin >> alpha;
			alpha_backup = alpha;
			cout << "Insert new value for epsilon ( old value = "<< epsilon <<" ):\n";
			cin >> epsilon;
			cout << "Insert new value for gamma ( old value = "<< gamma <<" ):\n";
			cin >> gamma;
			break;
		// WoLF Hill Climbing
		case 3:
			cout << "Insert new value for alpha ( old value = "<< alpha <<" ):\n";
			cin >> alpha;
			alpha_backup = alpha;
			cout << "Insert new value for epsilon ( old value = "<< epsilon <<" ):\n";
			cin >> epsilon;
			cout << "Insert new value for gamma ( old value = "<< gamma <<" ):\n";
			cin >> gamma;
			cout << "Insert new value for delta lose ( old value = "<< dl <<" ):\n";
			cin >> dl;
			cout << "Insert new value for delta win ( old value = "<< dw <<" ):\n";
			cin >> dw;
			break;	
	}
}

// Function to set a value for Alpha (0.1 - 0.5)
void Animal::setAlpha( double a ) {
	alpha = a;
	alpha_backup = a;
}

// Here we clean/initialize all the matrices we need
void Animal::init() {
	delete[] q;
	delete[] pi;
	delete[] v;
	delete[] c;

	q = 0;
	pi = 0;
	v = 0;
	c = 0;

	if ( selectedAlgorithm == 1 || selectedAlgorithm == 3 ) {
		q = new double[maxStates*5]();
		resetQ( qValue );
	}
	else if ( selectedAlgorithm == 2 ) {
		q = new double[maxStates*5*5]();
		resetQ( qValue );
	}

	if ( selectedAlgorithm == 2 ) {
		pi = new double[maxStates*5]();
		v = new double[maxStates]();
		resetPiVC( 0.2, qValue );
	}
	else if ( selectedAlgorithm == 3 ) {
		pi = new double[maxStates*5]();
		// We are using V as average Policy
		v = new double[maxStates*5]();
		resetPiVC( 0.2, 0.2 );

		c = new int[maxStates]();
	}
}

void Animal::resetLearning( double input ) {
	switch ( selectedAlgorithm ) {
		// Random policy
		case 0:
			// Nothing to be done
			break;
		
		// Independent Q learning
		case 1: 
			resetQ ( input );
			break;
		
		// Minimax
		case 2: 
			resetQ ( input );
			resetPiVC( 0.2, input );
			break;

		// WoLF Hill Climbing
		case 3: 
			resetQ ( input );
			// Here v is used as backup policy
			resetPiVC( 0.2, 0.2 );	
			break;
	}
}

// Funtion to reset all Q-values, also forgets the rewards and resets general variables
void Animal::resetQ( double qv ) {
	if ( q ) {
		int factor = selectedAlgorithm == 2 ? 5*5 : 5;
		for ( int i = 0; i < maxStates*factor; i++ )
			q[i] = qv;
	}

	exploratory = false;
	reward = 0;
	qValue = qv;
	alpha = alpha_backup;
}

// Funtion to reset all policy-values
void Animal::resetPiVC( double pv, double vv ) {
	if ( pi ) {
		for ( int i = 0; i < maxStates*5; i++ )
			pi[i] = pv;
	}
	if ( v ) {
		int factor = selectedAlgorithm == 3 ? 5 : 1;
		for ( int i = 0; i < maxStates*factor; i++ )
			v[i] = vv;
	}
	if ( c ) {
		for ( int i = 0; i < maxStates; i++ )
			c[i] = 0;
	}
}

void Animal::decayAlpha() {
	alpha *= decay;
}

double Animal::getAlpha() {
	return alpha;
}

// Function choosing the action
int Animal::move() {
	switch ( selectedAlgorithm ) {
		// Random
		case 0:
			actionTaken = randomPolicy();
			break;
			// E greedy - Q learning 
		case 1: 
			actionTaken = eGreedyQPolicy();
			break;
			// Minimax Q learning
		case 2:
			actionTaken = ePolicy();
			break;
			// WoLF Hill Climbing
		case 3:
			actionTaken = ePolicy();
			break;
		default:
			actionTaken = randomPolicy();
	}

	return actionTaken;
}

// General observation function, selects the correct one depending on the policy in use
void Animal::setObservation( int obsState, int obsReward, int opponentMove ) {
	switch ( selectedAlgorithm ) {
		// Random policy
		case 0:
			reward += obsReward;
			break;
		// E greedy - Q learning
		case 1:	
			setObservationQLearning(obsState, obsReward);
			break;
		// Minimax Q learning 
		case 2:
			setObservationMinimaxQLearning(obsState, obsReward, opponentMove);
			break;
		// WoLF Hill Climbing
		case 3:
			setObservationHillClimbing(obsState, obsReward);
			break;
		default:;
	}
}

// Returns if the last action chosen was exploratory
bool Animal::isActionExploratory() {
	return exploratory;
}

// Function to set the Q value of the terminal state to zero
void Animal::setTerminalState( int opponentMove ) {
	switch ( selectedAlgorithm ) {
		// E greedy - Q learning
		case 1: 
		// WoLF Hill Climbing
		case 3:	{ 
				for ( int i=0; i<5; i++ )
					q[maxStates*i + lastState] = 0;
				break;
			}
		// Minimax Q learning
		case 2: {
				for ( int i=0; i<5; i++ ) {
					q[maxStates*(5*i + opponentMove ) + lastState] = 0;
				}
				v[lastState] = 0;
				break;
			}
	}
}

// Function to get the reward
int Animal::getReward() {
	return reward;
}

/*** PRIVATE FUNCTIONS ***/

// Returns a random move
int Animal::randomPolicy() {
	exploratory = false;

	// Policy returns a random number b/w 0 and 4
	return (rand() % 5);
}

// Function to generate next action using eGreedy Policy on Q matrix
int Animal::eGreedyQPolicy() {
	// Rolling the dice
	int prob = rand() % 1000;

	// Choosing the greedy action to perform i.e., maximum of the 5 Q Values
	int greedyAction = 0;
	for(int i=1; i<5; i++ )
		if ( q[maxStates*i + lastState] > q[maxStates*greedyAction + lastState] )
			greedyAction = i;

	// Taking the action
	// Greedy action
	if ( prob > epsilon * 1000 ) {
		exploratory = false;
		return greedyAction;
	}
	// We chose an action at random
	else {
		exploratory = true;

		for ( int i=0; i<5; i++)
			if ( prob <= epsilon * 200 * (i+1) ) 
				return i;

		// Safeguard catch, it shouldn't really come to this
		cout << "Warning, E Greedy went wrong!";
		return 4;

	}
}

// Function to generate next action using the probabilities stored in the Q matrix, with an 1 - epsilon chance of exploring
int Animal::ePolicy() {
	// Rolling the dice
	int prob = rand() % 1000;

	// Taking the action
	// Policy Action
	if ( prob > epsilon * 1000 ) {
		exploratory = false;

		// We select an action based on the Q values
		int random = rand() % 1000;
		double sum = 0;
		int selectedAction;

		// Random number from 0 to 1
		for ( int i=0; i<5; i++) {
			sum += pi[maxStates*i + lastState] * 1000;
			if ( random < sum ) 
				return i;
		}
		cout << "ERROR: FINAL SUM = "<<sum<<"\n";
		// Safeguard catch, it shouldn't really come to this
		cout << "Warning, E policy went wrong!";
		return 4;
	}
	// Random Action
	else {
		exploratory = true;

		for ( int i=0; i<5; i++)
			if ( prob <= epsilon * 200 * (i+1) ) 
				return i;

		// Safeguard catch, it shouldn't really come to this
		cout << "Warning, E policy exploratory went wrong!";
		return 4;

	}
}

// Observation for Q learning
void Animal::setObservationQLearning( int obsState, int obsReward ) {
	// Calculating the action with maximum Q value
	double maxQValue = -1.0;

	for ( int i=0; i<5; i++ )
		if ( q[maxStates*i + obsState] > maxQValue )
			maxQValue = q[maxStates*i + obsState];

	// Updating the Q value
	q[maxStates*actionTaken + lastState] = q[maxStates*actionTaken + lastState]
		+ alpha * ( obsReward + gamma * maxQValue  - q[maxStates*actionTaken + lastState] );

	// Iterating to next state
	lastState = obsState;

	reward += obsReward;
}

// Observation for Minimax Q learning
void Animal::setObservationMinimaxQLearning( int obsState, int obsReward, int opponentMove ) {
	// Q(s, a, o ) = ( 1 - alpha ) * Q(s,a,o) + alpha * ( rew + gamma * V(s') )
	q[maxStates*(5*actionTaken + opponentMove) + lastState] =
		( 1 - alpha ) * q[maxStates*(5*actionTaken + opponentMove) + lastState] +
		alpha * ( obsReward + gamma * v[obsState] );

	// LINEAR PROGRAMMING
	vector<double> solutions = linearSolve();

	// Update policy values
	for ( int i = 0; i < 5; i++ )
		pi[maxStates*i + lastState] = solutions[i];
	
	// Update V value
	v[lastState] = solutions[5];

	// Iterating to next state
	lastState = obsState;

	reward += obsReward;
}

void Animal::setObservationHillClimbing(int obsState, int obsReward) {
	// Choosing the greedy action of lastState
	int greedyAction = 0;
	for(int i=1; i<5; i++ )
		if ( q[maxStates*i + obsState] > q[maxStates*greedyAction + obsState] )
			greedyAction = i;

	// Choosing the greedy action of obsState
	int nextGreedyAction = 0;
	for(int i=1; i<5; i++ )
		if ( q[maxStates*i + obsState] > q[maxStates*nextGreedyAction + obsState] )
			nextGreedyAction = i;

	// Q(s, a ) = ( 1 - alpha ) * Q(s,a) + alpha * ( rew + gamma * max{a'} Q(s', a') )
	q[maxStates*actionTaken + lastState] =
		( 1 - alpha ) * q[maxStates*actionTaken + lastState] +
		alpha * ( obsReward + gamma * q[maxStates*nextGreedyAction + obsState] );

	c[lastState]++;
	
	for(int i=0; i<5; i++ )
		v[maxStates*i + lastState] = v[maxStates*i + lastState] + (1/c[lastState]) * ( pi[maxStates*i + lastState] - v[maxStates*i + lastState] );

	bool piCondition = true;
	double diff, sum = 0;
	for(int i=0; i<5; i++ ) {
		piCondition = pi[maxStates*i + lastState] * q[maxStates*i + lastState] > v[maxStates*i + lastState] * q[maxStates*i + lastState];

		diff = ( piCondition ? dw : dl );
		if ( i == greedyAction )
			diff = ( diff > 1 - pi[maxStates*i + lastState] ? 1 - pi[maxStates*i + lastState] : diff );
		else
			diff = ( diff > 4 * pi[maxStates*i + lastState] ? 4 * pi[maxStates*i + lastState] : diff );

		pi[maxStates*i + lastState] = pi[maxStates*i + lastState] + ( i == greedyAction ? diff : -( diff / 4 ) );
		sum += pi[maxStates*i + lastState];
	}
	for(int i=0; i<5; i++ )
		pi[maxStates*i + lastState] /= sum;

	lastState = obsState;

	reward =+ obsReward;
}

vector<double> Animal::linearSolve() {
	lprec * lp;
	// Create linear programming model
	lp = make_lp(0,6); 
	
	if ( lp == NULL ) {
		cout << "NO LP";
		throw 0;	
	}

	double row_values[7][7];

	// Set values for the V function as -1
	for ( int i = 0; i < 5; i++ )
		row_values[i][6] = -1.0;
	
	set_verbose(lp, IMPORTANT);
	set_add_rowmode(lp, TRUE);

	// Load variables
	row_values[0][1] = q[maxStates*(5*0 + 0) + lastState];
	row_values[0][2] = q[maxStates*(5*1 + 0) + lastState];
	row_values[0][3] = q[maxStates*(5*2 + 0) + lastState];
	row_values[0][4] = q[maxStates*(5*3 + 0) + lastState];
	row_values[0][5] = q[maxStates*(5*4 + 0) + lastState];

	row_values[1][1] = q[maxStates*(5*0 + 1) + lastState];
	row_values[1][2] = q[maxStates*(5*1 + 1) + lastState];
	row_values[1][3] = q[maxStates*(5*2 + 1) + lastState];
	row_values[1][4] = q[maxStates*(5*3 + 1) + lastState];
	row_values[1][5] = q[maxStates*(5*4 + 1) + lastState];

	row_values[2][1] = q[maxStates*(5*0 + 2) + lastState];
	row_values[2][2] = q[maxStates*(5*1 + 2) + lastState];
	row_values[2][3] = q[maxStates*(5*2 + 2) + lastState];
	row_values[2][4] = q[maxStates*(5*3 + 2) + lastState];
	row_values[2][5] = q[maxStates*(5*4 + 2) + lastState];

	row_values[3][1] = q[maxStates*(5*0 + 3) + lastState];
	row_values[3][2] = q[maxStates*(5*1 + 3) + lastState];
	row_values[3][3] = q[maxStates*(5*2 + 3) + lastState];
	row_values[3][4] = q[maxStates*(5*3 + 3) + lastState];
	row_values[3][5] = q[maxStates*(5*4 + 3) + lastState];

	row_values[4][1] = q[maxStates*(5*0 + 4) + lastState];
	row_values[4][2] = q[maxStates*(5*1 + 4) + lastState];
	row_values[4][3] = q[maxStates*(5*2 + 4) + lastState];
	row_values[4][4] = q[maxStates*(5*3 + 4) + lastState];
	row_values[4][5] = q[maxStates*(5*4 + 4) + lastState];

	// Set policy probability constraint
	row_values[5][1] = 1.0;
	row_values[5][2] = 1.0;
	row_values[5][3] = 1.0;
	row_values[5][4] = 1.0;
	row_values[5][5] = 1.0;
	row_values[5][6] = 0.0;

	// Set that we want to maximize V
	row_values[6][1] = 0.0;
	row_values[6][2] = 0.0;
	row_values[6][3] = 0.0;
	row_values[6][4] = 0.0;
	row_values[6][5] = 0.0;
	row_values[6][6] = 1.0;

	// Creating constraints
	for ( int i = 0; i < 5; i++ )
		if ( ! add_constraint(lp, row_values[i], GE, 0.0) ) {
			cout << "NO "<<i+1<<" ROW";
			throw 0;
		}
	

	if ( ! add_constraint(lp, row_values[5], EQ, 1.0) ) {
		cout << "NO SIXTH ROW ( EQ )";
		throw 0;
	}
	
	set_add_rowmode(lp, FALSE);

	// Setting objective ( V ) 
	if ( ! set_obj_fn(lp, row_values[6]) ) {
		cout << "NO SETOBJ FUN";
		throw 0;	
	}

	// We want to maximize V
	set_maxim(lp);
	
	int result;
	bool negative = false;
	result = solve(lp);

	if ( result != OPTIMAL && result != SUBOPTIMAL ) {
		// There was no solution, We may need a negative V
		// We need to reset the lp problem because of a bug in the library
		// We do the same as before but now V is positive
		for ( int i = 0; i < 5; i++ )
			row_values[i][6] = 1.0;

		delete_lp(lp);
		lp = make_lp(0,6); 
		
		if ( lp == NULL ) {
			cout << "NO LP";
			throw 0;	
		}
		
		set_verbose(lp, IMPORTANT);
	
		set_add_rowmode(lp, TRUE);

		for ( int i = 0; i < 5; i++ )
			if ( ! add_constraint(lp, row_values[i], GE, 0.0) ) {
				cout << "NO "<<i+1<<" ROW";
				throw 0;
			}

		if ( ! add_constraint(lp, row_values[5], EQ, 1.0) ) {
			cout << "NO SIXTH ROW ( EQ )";
			throw 0;
		}
		
		set_add_rowmode(lp, FALSE);

		if ( ! set_obj_fn(lp, row_values[6]) ) {
			cout << "NO SETOBJ FUN";
			throw 0;	
		}

		// We want to minimise V ( since the problem is in reverse )
		set_minim(lp);

		set_add_rowmode(lp, FALSE);
		
		negative = true;
		result = solve(lp);

		if ( result != OPTIMAL && result != SUBOPTIMAL ) {
			cout << "No solutions at all!";
			throw result;
		}
	}

	get_variables(lp, row_values[0]);

	vector<double> results(6,0);

	// Save results
	for( int i=0; i<5;i++)
		results[i] = row_values[0][i];
	// Save correct value for V
	results[5] = negative ? -(row_values[0][5]) : row_values[0][5];
 
	delete_lp(lp);

	return results;
}
