/**
 * @author: mattwang@tencent.com
 * @date: 2012-10-16
 */

#include <cassert>
#include <cfloat>
#include <cmath>
#include <limits>
#include <algorithm>
#include <tr1/unordered_map>

#include "lbfgs.h"
#include "mpi_def.h"
#include "lbfgs_trainer.h"

using namespace std;

void LBFGSTrainer::init_trainer() {
	local_observed_expects = new double[n_theta];
	global_observed_expects = new double[n_theta];
	std::fill(local_observed_expects, local_observed_expects + n_theta, 0.0);

	double data_weight;

	typedef std::tr1::unordered_map<pair<size_t, size_t>, float, featid_hasher> FeatSumMap;
	FeatSumMap feat_sum;
	for (int i = 0; i < n_train_data; i++) {
		SampleVector &sample = train_data->at(i);
		double sample_weight = train_data_weight->at(i);
		int class_id = train_data_class->at(i);
		size_t len = sample.size();
		for (size_t i = 0; i < len; ++i) {
			feat_sum[make_pair(sample[i].id, class_id)] += sample_weight * sample[i].value;
		}
		data_weight += sample_weight;
	}

	FeatSumMap::iterator it;
	for (size_t fid = 0; fid < param_map->size(); ++fid) {
		vector<pair<size_t, size_t> >& param = param_map->at(fid);
		for (size_t j = 0; j < param.size(); ++j) {
			it = feat_sum.find(make_pair(fid, param[j].first));
			assert(it != feat_sum.end());

			local_observed_expects[param[j].second] = -(it->second);
		}
	}

	local_gradient = new double[n_theta];

	// sum up global observed_expects
	//all reduce all processor's function value
	int info;
	MPI_Barrier (MPI_COMM_WORLD);
	info = MPI_Allreduce(local_observed_expects, global_observed_expects, n_theta, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
	CHKERRQ(info);

	MPI_Barrier(MPI_COMM_WORLD);
	info = MPI_Allreduce(&data_weight, &global_data_weight, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
	CHKERRQ(info);

	return;
}

// test accuracy of current model on heldout sample
double LBFGSTrainer::heldout_accuracy() const {
	double correct = 0;
	double total = 0;
	vector<double> q(n_class); // q(y|x)
	for (int i = 0; i < n_test_data; i++) {
		SampleVector &sample = test_data->at(i);
		double sample_weight = test_data_weight->at(i);
		total += sample_weight;
		size_t best_class = eval(sample, q);
		if (best_class == test_data_class->at(i))
			correct += sample_weight;
	}
	return correct / total;
}

int LBFGSTrainer::calc_function(int dim, double *x, double &f, double *gradient, int correct) {
	static const double LOG_ZERO = log(DBL_MIN);
	vector<double> probs(n_class);
	correct = 0;
	memset(local_gradient, 0, sizeof(double) * dim);

	if (g_mpi_rank == 0) {
		std::copy(global_observed_expects, global_observed_expects + dim, local_gradient);
	}

	for (int i = 0; i < n_train_data; i++) {
		SampleVector &sample = train_data->at(i);
		double sample_weight = train_data_weight->at(i);
		size_t predict_class = eval(sample, probs);
		int corrent_class = train_data_class->at(i);
		if (predict_class == corrent_class)
			correct += sample_weight;

		for (size_t j = 0; j < sample.size(); ++j) {
			int fid = sample[i].id;
			double fval = sample[i].value;

			vector<pair<size_t, size_t> >& param = param_map->at(fid);
			for (size_t k = 0; k < param.size(); ++k) {
				size_t class_id = param[k].first;
				size_t dim_id = param[k].second;
				local_gradient[dim_id] += sample_weight * probs[class_id] * fval;
			}
		}

		double t = log(probs[corrent_class]);
		if (finite(t))
			f -= sample_weight * t;
		else
			f -= sample_weight * LOG_ZERO;
	}

	if (sigma2) { // applying Gaussian penality
		for (size_t i = 0; i < dim; ++i) {
			double penality = x[i] / sigma2[i];
			local_gradient[i] += penality;
			f += (penality * x[i]) / 2;
		}
	}

	double local_func_value = f;
	//all reduce all processor's function value
	MPI_Barrier (MPI_COMM_WORLD);
	int info = MPI_Allreduce(&local_func_value, &f, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
	CHKERRQ(info);

	//all reduce all processor's function value
	MPI_Barrier(MPI_COMM_WORLD);
	info = MPI_Allreduce(local_gradient, gradient, dim, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
	CHKERRQ(info);

	return 0;
}

void LBFGSTrainer::train(int max_iter, double eps) {
	init_trainer();

	int n = n_theta;
	int m = 5;
	double* g = new double[n];
	double* x = theta_all;
	fill(x, x + n, 0.0);

	double f = 0.0;
	int correct;
	double heldout_acc = -1.0;

	LBFGSOptimizer optimizer;
	optimizer.init(n, m);

	info("Starting L-BFGS iterations...");
//	info("Number of Predicates:  %d", m_params->size());
	info("Number of Class:    %ld", n_class);
	info("Number of Parameters:  %d", n);
	info("Number of Corrections: %d", m);
	info("Tolerance:             %E", eps);
//	info("Gaussian Penalty:      %s", (m_sigma2 ? "on" : "off"));

	info("iter  eval     loglikelihood  training accuracy   heldout accuracy");
	info("==================================================================");

	for (; optimizer.iter < max_iter;) {
		// calculate loglikehood and gradient
		correct = 0;
		f = 0.0;
//		std::copy(m_observed_expects.get(), m_observed_expects.get() + n, g);

		calc_function(n_theta, x, f, g, correct);

		if (n_test_data > 0)
			heldout_acc = heldout_accuracy();

		int ret = optimizer.optimize(n, x, f, g, false, 1.0);

		if (ret < 0) {
			optimizer.clear();
			info("lbfgs routine stops with an error");
			break;
		} else if (ret == 0) {
			info("Training terminats succesfully ");
			break;
		} else {
			// continue evaluations
			double acc = correct / double(n_train_data);
			if (n_test_data > 0) {
				info("%3d\t%E\t  %.3f%%\t     %.3f%%",
						optimizer.iter, (-f / n_train_data), (acc * 100), (heldout_acc * 100));
			} else {
				info("%3d\t%E\t  %.3f%%\t     %s", optimizer.iter, (-f / n_train_data), (acc * 100), "N/A");
			}
		}
	}

	if (optimizer.iter >= (int) max_iter)
		info("Maximum numbers of %d iterations reached ", optimizer.iter);
	info("Highest log-likelihood: %E", (-f / n_train_data));
}

// return the oid of best outcome
size_t LBFGSTrainer::eval(SampleVector sample, vector<double>& probs) const {
	if (probs.size() != n_class)
		probs.resize(n_class);

	fill(probs.begin(), probs.end(), 0.0);

	size_t sampe_len = sample.size();
	for (size_t i = 0; i < sampe_len; ++i) {
		int fid = sample[i].id;
		float fval = sample[i].value;

		vector<pair<size_t, size_t> >& param = param_map->at(fid);
		for (size_t k = 0; k < param.size(); ++k) {
			size_t class_id = param[k].first;
			size_t dim_id = param[k].second;
			probs[class_id] += theta_all[dim_id] * fval;
		}
	}

	/* For the rationale behind subtracting max_prob from the log-probabilities
	 see maxentmodel.cpp:maxent::MaxentModel::eval_all */

	// Find the maximum log-prob
	double max_prob = numeric_limits<double>::min();
	size_t best_oid = 0;
	for (size_t oid = 0; oid < n_class; ++oid) {
		if (probs[oid] >= max_prob) {
			max_prob = probs[oid];
			best_oid = oid;
		}
	}

	// normalize
	double sum = 0.0;
	for (size_t oid = 0; oid < n_class; ++oid) {
		// Subtract the maximum log-prob from the others to get them in
		// the (-inf,0] range.
		probs[oid] = exp(probs[oid] - max_prob);
		sum += probs[oid];
	}

	for (size_t oid = 0; oid < n_class; ++oid) {
		probs[oid] /= sum;
	}

	return best_oid;
}

