/****************************************************************************
*
*					Functions of class SimpleParameterLearning
*
****************************************************************************/

#include <string>
#include <utility>
#include <stdlib.h>
#include "SimpleParameterLearning.h"


/****************************************************************************
*					SimpleParameterLearning::SimpleParameterLearning
*
*	Description:	Standard constructor with initialization of internal parameters
*	Parameters:		-
*	Returns:		-
*
****************************************************************************/
SimpleParameterLearning::SimpleParameterLearning (void) {
	
}


/****************************************************************************
*					SimpleParameterLearning::~SimpleParameterLearning
*
*	Description:	Standard destructor with removing all dynamic internal variables
*	Parameters:		-
*	Returns:		-
*
****************************************************************************/
SimpleParameterLearning::~SimpleParameterLearning (void) {
	
}


/****************************************************************************
*					SimpleParameterLearning::learn
*
*	Description:	Main Learning Procedure
*	Parameters:		dataSet - a set of samples to learn from
*					pgmStruct - description of the pgm
*					inference - an algorithm for running inference
*	Returns:		float - last value of objective function
*
****************************************************************************/
float SimpleParameterLearning::learn (DataSet& dataSet, PGMStruct& pgmStruct, Inference* inference) {
	unsigned int j, n = 0;
	int ret = 0;

	// Getting Number of Weights
	n = pgmStruct.weights.size();

	// Representing weights in form of lbfgsFLOATval_t*
    lbfgsfloatval_t *x = lbfgs_malloc(n);
	// Checking that we've got the memory
    if (x == NULL) {
        printf("ERROR in SimpleParameterLearning::learn: Failed to allocate a memory block for weight variables.\n");
        return -1;
    }
    // Initialize the variables
	for (j = 0; j < pgmStruct.weights.size(); j++)
		x[j] = (lbfgsfloatval_t) pgmStruct.weights[j];

	// Returned optimal functional value
	lbfgsfloatval_t fx; 
	PGMandDataSet pgmAndDataSet;
	pgmAndDataSet.dataSet = &dataSet;
	pgmAndDataSet.pgmStruct = &pgmStruct;
	pgmAndDataSet.inference = inference;

	// Start the L-BFGS optimization; this will invoke the callback functions calcValueAndGradient() and printoutProgress() when necessary.
	// This function finds minimum values of fx as a function from x
    ret = lbfgs(n, x, &fx, calcValueAndGradient, printoutProgress, &pgmAndDataSet, &lbfgsParams);

    // Report the result
    /*printf("\n");
	printf("L-BFGS optimization terminated with status code = %d\n", ret);
    printf("  fx = %f\n", fx);
	for (j = 0; j < n; j++)
		printf("  x[%d] = %f", j, x[j]);
	printf("\n"); */

	// Getting parameters back to the matrix 'weights'
	for (j = 0; j < pgmStruct.weights.size(); j++) {
		pgmStruct.weights[j] = (float) x[j];
	}

	// Freeing the memory of x
    lbfgs_free(x);

	return (float) fx;
}


/****************************************************************************
*					SimpleParameterLearning::calcValueAndGradient
*
*	Description:	Function calculates values of objective function and its 
*					gradient at a given set of weights values
*	Parameters:		-
*	Returns:		-
*
****************************************************************************/
lbfgsfloatval_t SimpleParameterLearning::calcValueAndGradient (
    void *instance, // our structures to calculate values
    const lbfgsfloatval_t *x, // current vector of feature weights
    lbfgsfloatval_t *g, // gradiend (needs to be filled)
    const int n, // number of parameters
    const lbfgsfloatval_t step ) // current step of the search routine
{
	unsigned int i, j;
	int l;
	// casting 'instance' to be of type PGMandDataSet*, as we created it in 'learn' function
	PGMandDataSet* pgmAndDataSet;
	pgmAndDataSet = (PGMandDataSet*) instance;
	
	// setting current values of the weights
	for (l = 0; l < n; l++)
		pgmAndDataSet->pgmStruct->weights[l] = (float) x[l];

	// Running inference in order to get logZ
	float logZ;
	pgmAndDataSet->inference->run(logZ, *(pgmAndDataSet->pgmStruct));
	
	// initializing gradient as zero
	for (l = 0; l < n; l++)
		g[l] = 0;
	float objFunctionVal = logZ;
	float curEmpProb, curExpValue;
	// going through all pgm features and updating corresponding gradients
	for (i = 0; i < pgmAndDataSet->pgmStruct->features.size(); i++) {
		for (j = 0; j < pgmAndDataSet->pgmStruct->features[i].featsIDs.size(); j++) {
			curEmpProb = pgmAndDataSet->dataSet->calculateEmpiricalProb (*(pgmAndDataSet->pgmStruct->featureTypes[pgmAndDataSet->pgmStruct->features[i].featsIDs[j]]), pgmAndDataSet->pgmStruct->features[i].varsIDs);
			// if this weight correspond to 1 in factor table and should not be changed - then updating only objective function value
			if (pgmAndDataSet->pgmStruct->features[i].weightsIDs[j] < 0) {
				objFunctionVal -= curEmpProb;
				continue;
			}
			// updating objective function value
			objFunctionVal -= curEmpProb * pgmAndDataSet->pgmStruct->weights[ pgmAndDataSet->pgmStruct->features[i].weightsIDs[j] ];
			// updating corresponding gradient
			pgmAndDataSet->inference->run (curExpValue, *(pgmAndDataSet->pgmStruct->featureTypes[pgmAndDataSet->pgmStruct->features[i].featsIDs[j]]), (pgmAndDataSet->pgmStruct->features[i]).varsIDs, *(pgmAndDataSet->pgmStruct));
			g[pgmAndDataSet->pgmStruct->features[i].weightsIDs[j]] += curExpValue - curEmpProb; 
		}
	}
	// testing printout
	/*std::cout << "\n\tGradient calculation: " << objFunctionVal;
	for (l = 0; l < n; l++)
		std::cout << "\tg_" << l << "=" << g[l];
	std::cout << "."; */
	// returning objective function value (objective value to be minimized is negative log-likelihood)
    return (lbfgsfloatval_t) objFunctionVal;
}


/****************************************************************************
*					SimpleParameterLearning::printoutProgress
*
*	Description:	Function printout current state of optimization
*					Specifically it prints current values of the objective 
*					function and weights and convergence parameters
*	Parameters:		-
*	Returns:		-
*
****************************************************************************/
int SimpleParameterLearning::printoutProgress (
    void *instance,
    const lbfgsfloatval_t *x,
    const lbfgsfloatval_t *g,
    const lbfgsfloatval_t fx,
    const lbfgsfloatval_t xnorm,
    const lbfgsfloatval_t gnorm,
    const lbfgsfloatval_t step,
    int n,
    int k,
    int ls )
{
	// Report the results if build in the debug mode
	#ifdef _DEBUG
		printf("\n");
		printf("Iteration %d:\n", k);
		printf("  fx = %f\n", fx);
		for (int i = 0; i < n; i++)
			printf("  x[%d]=%f", i, x[i]);
		printf("\n");
		printf("  xnorm = %f, gnorm = %f, step = %f\n", xnorm, gnorm, step);
	#endif
    return 0;
}


/****************************************************************************
*					SimpleParameterLearning::setParameters
*
*	Description:	Sets object patameters from environment
*	Parameters:		environment - a set of parameters from config-file
*	Returns:		0
*
****************************************************************************/
int SimpleParameterLearning::setParameters (Environment &environment) {
	// Initialize the parameters for the L-BFGS optimization algorithm
	double valD;
	int valI;
	std::string valStr;
	lbfgs_parameter_init(&lbfgsParams);
	if (environment.getDoubleParamValue(valD, "lbfgs_Params_gtol") == 0)
		lbfgsParams.gtol = valD;
	if (environment.getDoubleParamValue(valD, "lbfgs_Params_delta") == 0)
		lbfgsParams.delta = valD;
	if (environment.getDoubleParamValue(valD, "lbfgs_Params_epsilon") == 0)
		lbfgsParams.epsilon = valD;
	if (environment.getIntParamValue(valI, "lbfgs_Params_m") == 0)
		lbfgsParams.m = valI;
	if (environment.getDoubleParamValue(valD, "lbfgs_Params_ftol") == 0)
		lbfgsParams.ftol = valD;
	if (environment.getIntParamValue(valI, "lbfgs_Params_max_iterations") == 0)
		lbfgsParams.max_iterations = valI;
	if (environment.getIntParamValue(valI, "lbfgs_Params_max_linesearch") == 0)
		lbfgsParams.max_linesearch = valI;
	if (environment.getIntParamValue(valI, "lbfgs_Params_past") == 0)
		lbfgsParams.past = valI;
	if (environment.getDoubleParamValue(valD, "lbfgs_Params_xtol") == 0)
		lbfgsParams.xtol = valD;
	if (environment.getIntParamValue(valI, "lbfgs_Params_linesearch") == 0)
		lbfgsParams.linesearch = valI;
	if (environment.getDoubleParamValue(valD, "lbfgs_Params_orthantwise_c") == 0)
		lbfgsParams.orthantwise_c = valD;
	if (environment.getIntParamValue(valI, "lbfgs_Params_orthantwise_end") == 0) {
		lbfgsParams.orthantwise_end = valI;
		lbfgsParams.orthantwise_start = 0;
	}
	return 0;
}
