
#include "power_method.h"

const int MaxPowerMethodIterations = 100;

void fast_power_method(ModularityMatrix* Bg, elem precision,
					   Vector* eigen_vector, elem* eigen_value)
{
	Vector* x;
	int n,bRes,i;
	int iterations_count;
	elem norm1;

	n = Bg->Ag->n;
	x = allocate_Vector(n);
	x->data[0] = 1;

	iterations_count = 0;
	norm1 = Bg->Norm1;

	do 
	{
		/*  eigen_vector = fast_power_method_iteration(...) */
		/* fast_power_method_iteration(eigen_vector, x, adj_sparse_g, Kg, f, */
		/* 	M, Bg_norm_one); */

		/*  eigen_vector = Bg * X */
		modularity_mat_mlpl_by_vector(Bg, x, eigen_vector, 1);

		/*  eigen_vector += ||B[g]||1 * X */
		for (i = 0; i < n; i++)
			eigen_vector->data[i] += norm1 * x->data[i];

		/*  Use last eigenvector approximation and the current one to */
		/*  approximate the eigenvalue */
		*eigen_value = approximate_eigenvalue(eigen_vector, x);

		bRes = is_precise_enough(eigen_vector, x, *eigen_value, precision);

		normalize_vector(eigen_vector);

		/*  X = eigen_vector */
		vector_copy(x, eigen_vector);

		iterations_count++;

		/*  Exit when enough precision has been reached, and prevent an */
		/*  infinite loop */
	} while (bRes != 1 && iterations_count < MaxPowerMethodIterations);

	free_vector(x);
	*eigen_value -= Bg->Norm1;
}

elem approximate_eigenvalue(Vector* next_x, Vector* x)
{
	elem dividend,divisor;
	
	dividend = scalar_mlpl(next_x, x);
	divisor = scalar_mlpl(x, x);

	return dividend / divisor;
}

void fast_power_method_iteration(Vector* result, Vector* x,
								 sparse_matrix_arr* Ag, Vector* Kg,
								 Vector* f, elem M, elem Bg_norm1)
{
	elem scalar;
	Vector* helper;
	int n;

	n = x->size;
	helper = allocate_Vector(n);
	
	sparse_matrix__mlpl_matrix_times_vector(Ag, x->data, result->data);/* result=A[g]*x */
	
	/*  helper = (K[g] * x * K[g]) / M */
	scalar = scalar_mlpl(Kg, x) / M ; /* (K[g]*x))/M */
	scalar_vector_mlpl(scalar, Kg, helper); /* scalar*K[g] */

	/*  result -= helper */
	add_vectors(result, helper, result, SUBTRACT_VECTORS); /* result=A[g]*-((K[g]*x)/m)*K[g] */
	
	/*  helper = D[g] * x */
	diagonal_matrix_vector_mlpl(f, x, helper); /* D[g]*x */

	/*  result -= helper */
	add_vectors(result, helper, result, SUBTRACT_VECTORS); /* result=A[g]*x-((K[g]*x)/m)*K[g]-D[g]*x */

	/*  helper = ||B[g]||1 * x */
	scalar_vector_mlpl(Bg_norm1, x, helper); /* ||B[g]||1*x */

	/*  result += helper */
	add_vectors(result, helper, result, ADD_VECTORS); /* result=A[g]*x-((K[g]*x)/m)*K[g]-D[g]*x+||B[g]||1*x */
	
	free_vector(helper);
}

/* checks if the approximation is good enough. */
/* return 0 if more iterations are needed, 1 if the result is good enough. */
int is_precise_enough(Vector* next_x,Vector* x,elem lambda,elem precision)
{
	int n;
	Vector* helper_vector;
	elem norm1,norm2;

	n = x->size;
	helper_vector = allocate_Vector(n);

	/*  helper_vector = lambda * X */
	scalar_vector_mlpl(lambda, x, helper_vector);
	/*  helper_vector = next_x - helper_vector  */
	add_vectors(next_x, helper_vector, helper_vector, SUBTRACT_VECTORS);

	norm1 = vector_norm(helper_vector);
	norm2 = vector_norm(x);

	norm1 /= norm2;

	free_vector(helper_vector);

	return (norm1 <= precision ? 1 : 0);
}/* is_precise_enough */

