// third party c includes
#include <mpi.h>
#include <math.h>
#include <time.h>
#include <float.h>

// third party c++ includes
#include <iostream>
#include <iomanip>
#include <sstream>
#include <assert.h>
#include <strings.h>

// local includes
#include "crf_minable.h"
#include "param.h"
#include "data.h"
#include "sparse_array.h"
#include "lbfgs.h"
#include "util.h"
#include "bp.h"

inline double log_sum(double a, double b);

//////////////////
//
// CRFMinable
//
//////////////////
const double X=exp(1); // a replacement for exp in probabilites to avoid
						// overflow

void CRFMinable::set_iteration(int iteration, double objective, double acc)
{
	if (m_debug)
	{
		std::cout << std::endl << std::setprecision(6) 
			<< "ITERATION: " << iteration << " OBJECTIVE: " 
			<< objective << " ACCURACY: " << acc << std::endl;	  
	}
	else
	{
		m_it_data.last_objective = m_it_data.objective;
		m_it_data.objective = objective;
		std::stringstream s;
		s << std::setprecision(3) 
		  << "ITERATION: " << iteration  << " EVALS: " 
		  << m_it_data.function_evals 
		  << " OBJECTIVE: " << objective
		  << " ACCURACY: " << acc*100 << "%";	  
//			<< " DELTA: " << m_it_data.last_objective - objective 
//			<< " TIME(secs): " << m_it_data.iteration_secs
//			<< " TOTAL(secs): " << m_it_data.total_secs << " ";   
		pp(s.str());
		m_it_data.iteration_secs = 0;
	}
}

void CRFMinable::minimise()
{
	// precompute emperical expectations for the gradient calculations
	_set_emperical_kernel_count();

	int len_weights = m_param->n;
	lbfgs_t *opt = lbfgs_create(len_weights,m_param->lbfgs_vecs,m_param->eps);
	if (!opt)
	{
		std::cerr << "Unable to initialise L-BFGS module" << std::endl;
		exit(1);
	}

	double	f=0; // objective
	boost::shared_array<double> g(new double[len_weights]); // gradient
	bzero(g.get(), sizeof(double)*len_weights);
    int curr_iteration = 0;
    for(;m_iterations > 0 && curr_iteration < m_iterations;)
	{
		double acc = objective_and_gradient(m_weights, f, g);
        int iflag;

        // reduce the gradient
        double *root_gradient=0;
        if (m_rank==0) 
        {
            root_gradient = new double[len_weights];
            bzero(root_gradient, sizeof(double)*len_weights);
        }
        double root_f=0;
        if (MPI_SUCCESS != MPI_Reduce(g.get(), root_gradient, len_weights, 
                                      MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD))
            MPI_Abort(MPI_COMM_WORLD,1);
        if (MPI_SUCCESS != MPI_Reduce(&f, &root_f, 1, 
                                      MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD))
            MPI_Abort(MPI_COMM_WORLD,1);
        if (m_rank==0) 
        {
            iflag = lbfgs_run(opt, m_weights.get(), &root_f, root_gradient);
            delete root_gradient;
        }
       
        // Broadcast the new weights from the root to all other processes
        if (MPI_SUCCESS != MPI_Bcast(
                m_weights.get(), len_weights, MPI_DOUBLE, 0, MPI_COMM_WORLD))
        {
            /*if (m_rank == 0)*/ std::cerr << "\n\nMPI_BCAST failed" << std::endl;
            MPI_Abort(MPI_COMM_WORLD,1);
        }

        // Broadcast the current lbfgs state
        int result = MPI_Bcast(&iflag, 1, MPI_INT, 0, MPI_COMM_WORLD);
        if (result != MPI_SUCCESS)
        {
            if (m_rank == 0) std::cerr << "\n\nMPI_BCAST failed" << std::endl;
            MPI_Abort(MPI_COMM_WORLD,1);
        }

        MPI_Barrier(MPI_COMM_WORLD);
        if (iflag < 0) 
        {
            if (m_rank == 0)
            {   
                lbfgs_destory(opt);
                std::cerr << "\n\nlbfgs routine stops with an error" << std::endl;
            }
            MPI_Abort(MPI_COMM_WORLD,1);
        } 
        else if (iflag == 0) 
        {
            if (m_rank == 0)
            {
                set_iteration(opt->niter, root_f, acc);
                std::cout << "\n\nConverged after " << opt->niter 
                    << " iterations" << std::endl;
            }
            break;
        } 
        else if (m_rank == 0 && opt->niter % 10 == 0)
                set_iteration(opt->niter, root_f, acc);

        if (m_rank == 0) curr_iteration = opt->niter;
        if (MPI_SUCCESS != MPI_Bcast(
                    &curr_iteration, 1, MPI_INT, 0, MPI_COMM_WORLD))
        {
            std::cerr << "\n\nMPI_BCAST failed" << std::endl;
            MPI_Abort(MPI_COMM_WORLD,1);
        }
    }

    if (m_rank == 0) lbfgs_destory(opt);
    if (m_rank == 0) pp("\n",true);
}

CRFMinable::CRFMinable(EventsPtr data, GPParameter *param)
: m_iterations(30), m_param(param), m_data(data)
{
	MPI_Comm_rank(MPI_COMM_WORLD, &m_rank);
	MPI_Comm_size(MPI_COMM_WORLD, &m_size);

	m_debug = m_rank == 0 && param->debug;

	if(m_debug) 
		std::cout << "-- Creating CRFMinable: " << m_param->m << " " 
			<< m_param->k << " " << m_param->n << std::endl;

	// initialise the weights vector
	int len_weights = m_param->n;
	m_weights = boost::shared_array<double>(new double [len_weights]);
	bzero(m_weights.get(), sizeof(double)*len_weights);

	if (m_debug) std::cout << "-- done" << std::endl;
}

CRFMinable::~CRFMinable() 
{
}

double CRFMinable::objective_and_gradient(
	boost::shared_array<double> alpha, double &objective, 
	boost::shared_array<double> gradient)
{
	if (m_debug)  
		std::cout << "-- Calculating objective and gradient" << std::endl;
	time_t start_t = time(0);

	objective = 0;
    if (m_rank == 0)
    {
		// global gradient contributions
		double inv_ss = 0.0;
		if (m_param->sigma_sqr != 0.0)
			inv_ss = 1.0 / m_param->sigma_sqr;

		// add the constant terms to G
		for(size_t x=0; x < m_param->n; ++x)
			gradient[x] = m_emperical_count[x] + inv_ss*m_weights[x];

		// global prior contributions
		double theta_norm = 0.0;
		for(size_t x=0; x < m_param->n; ++x)
			theta_norm += m_weights[x]*m_weights[x];
		objective = 0.5 * inv_ss * theta_norm; // the prior
	}
	else
		bzero(gradient.get(), sizeof(double)*m_param->n);

	double correct=0, total=0, acc=0;
	EventPtr e = m_data->next();
	while (e.get() != 0)
	{
		int num_labels = e->nodes[0][0]->features.size();
		Array4d marginals(
			boost::extents[e->length][e->order][num_labels][num_labels]);

		// calculate the log-likelihood with gaussian prior
		correct += m_calculate_objective(e,objective,marginals);
		total += e->length;

		// calculate gradients
		m_calculate_gradient(e, marginals, gradient); 

		e = m_data->next();
	}
	acc = correct / total;

//	  ////////////////////////////////////
//	  std::cout << "Probs: " << std::endl;
//	  for (unsigned int i=0; i < 9; ++i)
//		  std::cout << " " << probs[i];
//	  std::cout << std::endl;
//	  ////////////////////////////////////
//	  //////////////////////////////////////
//	  std::cout << "Gradient: " << std::endl;
//	  for (unsigned int i=0; i < 106; ++i)
//		  std::cout << " " << gradient[i];
//	  std::cout << std::endl;
//	  //////////////////////////////////////
//	  //////////////////////////////////////
//	  std::cout << "Weights: " << std::endl;
//	  for (unsigned int i=0; i < 106; ++i)
//		  std::cout << " " << m_weights[i];
//	  std::cout << std::endl;
//	  //////////////////////////////////////
	
	// record some trace data
	time_t end_t = time(0);
	double elapsed_time = difftime(end_t,start_t);
	m_it_data.iteration_secs += (int) elapsed_time;
	m_it_data.total_secs += (int) elapsed_time;
	m_it_data.function_evals += 1;

	if (m_debug) 
		std::cout << "-- done in: " <<	elapsed_time << " secs" << std::endl;

	return acc;
}

void CRFMinable::_set_emperical_kernel_count( void )
{
	if (m_debug) std::cout << "  ->setting emperical counts" << std::endl;
	time_t start_t = time(0);

	boost::shared_array<double> local_emperical_count(new double[m_param->n]);
	bzero(local_emperical_count.get(), sizeof(double)*m_param->n);

	// for each event
	SparseArrayPtr sa;
	EventPtr e = m_data->next();
	while (e.get() != 0)
	{
		// count gold node features
		int previous_gold = -1;
		for (int i=0; i < (int)e->length; ++i)
		{
			// check each segement ending at this index to see if
			// it's gold
			for (int o=0; (i-o)>=0 &&  o<(int)e->order; ++o)
			{
				int start = i-o;
				NodePtr np = e->nodes[o][start];
				if (np->gold >= 0)
				{
					// add the node feature contribution
					sa = np->features[np->gold];
					for(int j=0; j < sa->active(); ++j)
						local_emperical_count[sa->_index[j]] -= sa->_values[j];
					// add the clique feature contribution
					if (start>0)
					{
						assert(previous_gold >= 0);
						BaseCliquePtr cp = e->cliques[o][start-1];
						sa = cp->labelling(previous_gold,np->gold);
						for(int j=0; j < sa->active(); ++j)
							local_emperical_count[sa->_index[j]] -= sa->_values[j];
					}
					// can only have one gold node per index
					previous_gold = np->gold;
					break;
				}
			}
		}
		e = m_data->next();
	}
 
	if (m_rank == 0)
		m_emperical_count = boost::shared_array<double>(new double[m_param->n]);
	if (MPI_SUCCESS != MPI_Reduce(local_emperical_count.get(), 
		m_emperical_count.get(), m_param->n, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD))
	MPI_Abort(MPI_COMM_WORLD,1);

//	//////////////////////////////////////////////
//	std::cout << "Emperical Counts:" << std::endl;
//	for (unsigned int i=0; i < m_param->n && i < 10; ++i)
//		std::cout << " " << m_emperical_count[i];
//	std::cout << std::endl;
//	//////////////////////////////////////////////

	time_t end_t = time(0);
	double elapsed_time = difftime(end_t,start_t);
	if (m_debug) 
		std::cout << "\n <-done in: " << elapsed_time << " secs" << std::endl;
}

double 
CRFMinable::m_calculate_objective(EventPtr e, double &R, Array4d &marginals)
{
	// calculate the log-likelihood with gaussian prior
	int correct=0;//, total=0;
	int event_length = e->length;
	int num_labels = e->nodes[0][0]->features.size();

	// calculate the forward backward matrices
	Array2d alpha(boost::extents[event_length][num_labels]);
	Array2d beta(boost::extents[event_length][num_labels]);

	// calculate the dot-products
	Array3d node_phi(boost::extents[event_length][e->order][num_labels]);
	Array4d clique_phi(boost::extents[event_length-1][e->order][num_labels][num_labels]);
//	m_dot_products(e, node_phi, clique_phi);
	BP::calculate_dot_products(e, m_weights, node_phi, clique_phi);
	
	// do forward backward
//	m_forward_backward(e, node_phi, clique_phi, alpha, beta, false);
	BP::forward_backward(e, node_phi, clique_phi, alpha, beta, false);

//	//////////////////////////////////////////////
//	std::cout << "Dots:" << std::endl;
//	for (unsigned int i=0; i < 9; ++i)
//		std::cout << " " << dot_products[i];
//	std::cout << std::endl;
//	//////////////////////////////////////////////

	/////////////////////////////////////////////////////
	// calculate global Z
	double log_partition = -FLT_MAX;
	for (int l=0; l<num_labels; ++l)
		log_partition = log_sum(log_partition,alpha[event_length-1][l]);
	R += log_partition; // the global normalisation

	/////////////////////////////////////////////////////
	/////////////////////////////////////////////////////
	// calculate the marginals
	int previous_gold=-1;
	for (int n=0; n<event_length; ++n)
	{
//		if (max_a == gold.first && max_b == gold.second) correct++;
//		total++;
		for (int o=0; (n-o)>=0 && o<(int)e->order; ++o)
		{
			int start = n-o;
			int gold = e->nodes[o][start]->gold;
			int best_tag = 0; double best_marginal = - DBL_MAX;
			for (int b=0; b<num_labels; ++b)
			{
				double node_marginal = node_phi[n][o][b]+beta[n][b];

				// if this node has no predecessor, only record the node
				// marginal
				double this_marginal=0;
				if (start==0)
				{
					marginals[n][o][0][b] = exp(node_marginal-log_partition);
//					std::cout << marginals[n][o][0][b] << " ";
					this_marginal += marginals[n][o][0][b];
				}
				// otherwise we need to include alpha and clique contributions 
				// for each previous labelling
				else
					for (int a=0; a<num_labels; ++a)
					{
						// add the marginal to the array
						marginals[n][o][a][b] = exp(alpha[start-1][a] 
							+ clique_phi[n-1][o][a][b] 
							+ node_marginal - log_partition);
//						std::cout << marginals[n][o][a][b] << " ";
						this_marginal += marginals[n][o][a][b];
					}
//				std::cout << std::endl;
				if (this_marginal > best_marginal)
				{
					best_marginal = this_marginal;
					best_tag = b;
				}
				// handle the gold labelling's contribution to the objective
				if (gold == b)
				{
					R -= node_phi[n][o][gold];
					if (start > 0)
					{
						assert(previous_gold >= 0);
						assert(previous_gold < num_labels);
						R -= clique_phi[n-1][o][previous_gold][gold];
					}
					previous_gold = gold;
				}
			}
			if (best_tag == gold) correct++;
//			std::cout << "---" << std::endl;
		}
//		std::cout << "+++" << std::endl;
	}
	/////////////////////////////////////////////////////

//	return (double)correct/(double)total;
	return correct;
}

void 
CRFMinable::m_calculate_gradient(EventPtr e, Array4d &marginals, 
	boost::shared_array<double> G)
{
	// calculate the gradient
	SparseArrayPtr sa;
	int num_labels = e->nodes[0][0]->features.size();
	for (int n=0; n<(int)e->length; ++n)
		for (int o=0; (n-o)>=0 && o<(int)e->order; ++o)
		{
			int start = n-o;
			NodePtr node = e->nodes[o][start];
			for (int b=0; b<num_labels; ++b)
			{
				// if this node has no predecessor, only record the node
				// feature expectations
				if (start==0)
				{
					sa = node->features[b];
					for(int j=0; j < sa->active(); ++j)
						G[sa->_index[j]] += marginals[n][o][0][b]*sa->_values[j];
				}
				// otherwise we need to include alpha and clique contributions 
				// for each previous labelling
				else
					for (int a=0; a<num_labels; ++a)
					{
						// node features expectations
						sa = node->features[b];
						for(int j=0; j < sa->active(); ++j)
							G[sa->_index[j]] 
								+= marginals[n][o][a][b]*sa->_values[j];

						// clique features expectations
						BaseCliquePtr clique = e->cliques[o][start-1];
						sa = clique->labelling(a,b);
						for(int j=0; j < sa->active(); ++j)
							G[sa->_index[j]] 
								+= marginals[n][o][a][b]*sa->_values[j];
					}
			}
		}
}

void
CRFMinable::m_dot_products(EventPtr e, Array3d &nodes, Array4d &cliques)
{
	int event_length = e->length;
	int num_labels = e->nodes[0][0]->features.size();
	SparseArrayPtr sa;
	for (int n=0; n < event_length; ++n)
	{
		for (int o=0; (n-o)>=0 && o<(int)e->order; ++o)
		{
			int start = n-o;
			NodePtr node = e->nodes[o][start];
			for (int b=0; b<num_labels; ++b)
			{
//				std::cerr << n << " " << o << " " << b << std::endl;
				double dot=0.0;
				sa = node->features[b];
				for(int j=0; j < sa->active(); ++j)
					dot += m_weights[sa->_index[j]]*sa->_values[j];
				nodes[n][o][b] = dot;
//				std::cerr << "\t" << dot << std::endl;

				if (start!=0)
				{
					BaseCliquePtr clique = e->cliques[o][start-1];
					for (int a=0; a<num_labels; ++a)
					{
						dot=0.0;
						sa = clique->labelling(a,b);
						for(int j=0; j < sa->active(); ++j)
							dot += m_weights[sa->_index[j]]*sa->_values[j];
						cliques[n-1][o][a][b] = dot;
					}
				}
			}
		}
	}
}

void
CRFMinable::m_forward_backward(EventPtr v, Array3d &nodes, Array4d &cliques,
		Array2d &alpha, Array2d &beta, bool print)
{
	// assume every labelled clique is the same size
	size_t width = v->nodes[0][0]->features.size();
	size_t num_nodes = v->length;

	/////////////////////////////////////////////////////////////////
	// initialisation step
	// start with a uniform distribution over initial labels
	for (size_t i=0; i < width; ++i)
	{
//		alpha[0][i] = 0.0; 
		beta[num_nodes-1][i] = 0.0; 
	}

	/////////////////////////////////////////////////////////////////

	/////////////////////////////////////////////////////////////////
	// main sum product(forward) loop
	if (print) std::cout << "Forward pass:" << std::endl;
	for (int i=0; i < (int)num_nodes; ++i)
	{
		for (size_t b=0; b < width; ++b)
			alpha[i][b] = -FLT_MAX;

		for (int o=0; (i-o)>=0 && o<(int)v->order; ++o)
		{
			int start = i-o;
			for (size_t b=0; b < width; ++b)
			{
				// node_contribution only
				if (start == 0)
				{
					alpha[i][b] = log_sum(alpha[i][b], nodes[i][o][b]);
				}	
				// clique and nodes contribution
				else
				{
					for (size_t a=0; a < width; ++a)
						alpha[i][b] = log_sum(alpha[i][b],
							alpha[start-1][a]+cliques[i-1][o][a][b]+nodes[i][o][b]);
				}
			}
		}
		if (print)
		{
			for (size_t j=0; j < width; ++j)
				std::cout << " " << std::setprecision(3) << std::setw(6) 
					<< alpha[i][j];
			std::cout << std::endl;
		}
	}
	if (print) std::cout << std::endl;

	// main sum product(backward) loop
	if (print) std::cout << "Backward pass:" << std::endl;
	if (print)
	{
		for (size_t j=0; j < width; ++j)
			std::cout << " " << std::setprecision(2) << std::setw(6) 
				<< beta[num_nodes-1][j];
		std::cout << std::endl;
	}
	for (int i=num_nodes-2; i >= 0; --i)
	{
		for (size_t a=0; a < width; a++)
		{
			beta[i][a] = -FLT_MAX;
			for (int o=0; (i+o) < (int)num_nodes-1 && o<(int)v->order; ++o)
			{
				int end = i+o+1;
				for (size_t b=0; b < width; b++)
				{
					beta[i][a] = log_sum(beta[i][a],
						beta[end][b]+cliques[end-1][o][a][b]+nodes[end][o][b]);
				}
			}
		}
		if (print)
		{
			for (size_t j=0; j < width; ++j)
					std::cout << " " << std::setprecision(3) << std::setw(6) << beta[i][j];
			std::cout << std::endl;
		}
	}
	if (print) std::cout << std::endl;

	return;
}

///////////////////////////////////////////////////////////////////////////////
// log(exp(a) + exp(b))
///////////////////////////////////////////////////////////////////////////////
inline double
log_sum(double a, double b)
{
	if (a>b) return a + log(1+exp(b-a));
	else	 return b + log(1+exp(a-b));
}
