#include "mpi_trees.h"

#ifdef ENABLE_MPI_TREES

//
// returns the mpi_message (an integer) that was broadcast
//
int Broadcast_mpi_msg(t_tree *tree, int mpi_message)
{
#ifdef TIME
	time_t time_broadcast_mpi_msg_start = clock();
#endif
	int packed_size = 0;
	char *packbuf;
	if (Global_myRank == 0 && mpi_message != GOODBYE)
	{	int buffersize = 1000000;
		packbuf = (char *)mCalloc(buffersize,sizeof(char));
		packed_size = Pack_mpi_msg(tree, packbuf, buffersize, mpi_message);
	}

	if (Global_myRank == 0 && mpi_message == GOODBYE)
	{	packed_size = GOODBYE;
	}

	MPI_Bcast( &packed_size, 1, MPI_INT, 0, MPI_COMM_WORLD ); // send the size of the packbuf

	if (packed_size == GOODBYE)
	{	return GOODBYE;
	}

	// The workers now build a packbuf wit the appropriate size
	if (Global_myRank != 0)
	{
		// Build a buffer with just enough size.
		packbuf = (char *)mCalloc(packed_size,sizeof(char));
	}

	// Do the broadcast:
	MPI_Bcast( packbuf, packed_size, MPI_PACKED, 0, MPI_COMM_WORLD ); // now send the packbuf

	// Everyone (including the root process) unpacks the message
	// to ensure that everyone has the same tree, model, etc.
	mpi_message = Unpack_mpi_msg(packbuf, tree, packed_size);

	if (mpi_message == GOODBYE) return mpi_message;

	//printf("\n. mpi_boot 530 - Worker %d received mpi_message %d", Global_myRank, mpi_message);
	//fflush(NULL);

	Set_Model_Parameters(tree->mod);
	Free(packbuf);

#ifdef TIME
	time_broadcast_mpi_msg += clock() - time_broadcast_mpi_msg_start;
#endif

	return mpi_message;
}


//
// Given a tree, this method packs the entire tree and model into an array of floats.
// This is necessary for transmission via MPI.
//
int Pack_mpi_msg(t_tree *tree, char* buffer, int buffersize, int mpi_message)
{
	int packsize = 0;
	MPI_Pack(&mpi_message, 1, MPI_INT, buffer, buffersize, &packsize, MPI_COMM_WORLD);

#ifdef ENABLE_MBL
	char *s_tree = (char *)Write_Tree(tree,2);
#endif
#ifndef ENABLE_MBL
	char *s_tree = (char *)Write_Tree(tree);
#endif
	int s_tree_len = (int)strlen(s_tree);
	MPI_Pack(&s_tree_len, 1, MPI_INT, buffer, buffersize, &packsize, MPI_COMM_WORLD);
	MPI_Pack(s_tree, (int)strlen(s_tree), MPI_CHAR, buffer, buffersize, &packsize, MPI_COMM_WORLD);
	Free(s_tree);

	MPI_Pack(&tree->n_pattern, 1, MPI_INT, buffer, buffersize, &packsize, MPI_COMM_WORLD);
	MPI_Pack(&tree->both_sides, 1, MPI_INT, buffer, buffersize,&packsize, MPI_COMM_WORLD);
	MPI_Pack(&tree->init_lnL, 1, MPI_DOUBLE, buffer, buffersize, &packsize, MPI_COMM_WORLD);
	MPI_Pack(&tree->c_lnL, 1, MPI_DOUBLE, buffer, buffersize, &packsize, MPI_COMM_WORLD);

	// pack the model:
	MPI_Pack(&tree->mod->ns, 1, MPI_INT, buffer, buffersize, &packsize, MPI_COMM_WORLD);
	MPI_Pack(&tree->mod->n_catg, 1, MPI_INT, buffer, buffersize, &packsize, MPI_COMM_WORLD);
	MPI_Pack(&tree->mod->n_rr_branch,1,MPI_INT,buffer, buffersize, &packsize, MPI_COMM_WORLD);
	//MPI_Pack(&tree->mod->blprops_init,1,MPI_INT, buffer, buffersize, &packsize, MPI_COMM_WORLD);
	MPI_Pack(&tree->mod->whichmodel, 1, MPI_INT, buffer, buffersize, &packsize, MPI_COMM_WORLD);
	if (tree->io->datatype == NT && (tree->mod->whichmodel == GTR || tree->mod->whichmodel == CUSTOM))
	{
		MPI_Pack(tree->mod->rr_num, 6, MPI_INT, buffer, buffersize, &packsize, MPI_COMM_WORLD);
		MPI_Pack(&tree->mod->rr, 1, MPI_DOUBLE, buffer, buffersize, &packsize, MPI_COMM_WORLD);
		MPI_Pack(&tree->mod->rr_val, 1, MPI_DOUBLE, buffer, buffersize, &packsize, MPI_COMM_WORLD);
	}
#ifdef ENABLE_MBL
	MPI_Pack(tree->mod->bl_props, tree->mod->n_l, MPI_DOUBLE, buffer, buffersize, &packsize, MPI_COMM_WORLD);
#endif
	MPI_Pack(&tree->mod->invar, 1, MPI_INT, buffer, buffersize, &packsize, MPI_COMM_WORLD);
	MPI_Pack(&tree->mod->gamma_median, 1, MPI_INT, buffer, buffersize, &packsize, MPI_COMM_WORLD);
	int ns = tree->mod->ns;
	MPI_Pack(tree->mod->pi, ns, MPI_DOUBLE, buffer, buffersize, &packsize, MPI_COMM_WORLD);
	MPI_Pack(tree->mod->gamma_r_proba, tree->mod->n_catg, MPI_DOUBLE, buffer, buffersize, &packsize, MPI_COMM_WORLD);
	MPI_Pack(tree->mod->gamma_rr, tree->mod->n_catg, MPI_DOUBLE, buffer, buffersize, &packsize, MPI_COMM_WORLD);
	MPI_Pack(&tree->mod->kappa, 1, MPI_DOUBLE, buffer, buffersize, &packsize, MPI_COMM_WORLD);
	MPI_Pack(&tree->mod->lambda, 1, MPI_DOUBLE, buffer, buffersize, &packsize, MPI_COMM_WORLD);
	MPI_Pack(&tree->mod->alpha, 1, MPI_DOUBLE, buffer, buffersize, &packsize, MPI_COMM_WORLD);
	MPI_Pack(&tree->mod->pinvar, 1, MPI_DOUBLE, buffer, buffersize, &packsize, MPI_COMM_WORLD);
	int ncats = 1;
#ifdef ENABLE_MBL
	if (tree->mod->n_l > 1){
		ncats = tree->mod->n_l;
	}
#endif
	if (tree->mod->n_catg > 1){
		ncats = tree->mod->n_catg;
	}
	MPI_Pack(tree->mod->Pij_rr, ncats*ns*ns,MPI_DOUBLE, buffer, buffersize, &packsize, MPI_COMM_WORLD);
	MPI_Pack(tree->mod->qmat,ns*ns, MPI_DOUBLE, buffer, buffersize, &packsize, MPI_COMM_WORLD);

	// pack the Eigen object
	MPI_Pack(&tree->mod->eigen->size,1,MPI_INT, buffer, buffersize, &packsize, MPI_COMM_WORLD);
	MPI_Pack(tree->mod->eigen->q,ns*ns,MPI_DOUBLE, buffer, buffersize, &packsize, MPI_COMM_WORLD);
	MPI_Pack(tree->mod->eigen->space_int,2*ns,MPI_INT, buffer, buffersize, &packsize, MPI_COMM_WORLD);
	MPI_Pack(tree->mod->eigen->e_val,ns,MPI_DOUBLE, buffer, buffersize, &packsize, MPI_COMM_WORLD);
	MPI_Pack(tree->mod->eigen->e_val_im,ns,MPI_DOUBLE, buffer, buffersize,&packsize, MPI_COMM_WORLD);
	MPI_Pack(tree->mod->eigen->r_e_vect,ns*ns,MPI_DOUBLE, buffer, buffersize, &packsize, MPI_COMM_WORLD);
	MPI_Pack(tree->mod->eigen->r_e_vect_im,ns*ns,MPI_DOUBLE, buffer, buffersize, &packsize, MPI_COMM_WORLD);
	MPI_Pack(tree->mod->eigen->l_e_vect,ns*ns,MPI_DOUBLE, buffer, buffersize, &packsize, MPI_COMM_WORLD);

	return packsize;
}

//
// Given a packed double buffer from MPI messaging, this method fills the tree.
//
// Returns the integer mpi_message that was included in the message.
//
int Unpack_mpi_msg(char* buffer, t_tree* tree, int packsize)
{
#ifdef ENABLE_MBL
	int n_l = tree->mod->n_l;
#endif
#ifndef ENABLE_MBL
	int n_l = 1;
#endif
	int n_otu = tree->n_otu;

	int position = 0;
	int mpi_message;
	MPI_Unpack(buffer, packsize, &position, &mpi_message, 1, MPI_INT,  MPI_COMM_WORLD);

	int s_tree_len;
	MPI_Unpack(buffer, packsize, &position, &s_tree_len, 1, MPI_INT, MPI_COMM_WORLD);

	char *s_tree = (char *)mCalloc(s_tree_len,sizeof(char));
	MPI_Unpack(buffer, packsize, &position, s_tree, s_tree_len, MPI_CHAR, MPI_COMM_WORLD);

	// We clear this stuff here, and rebuild it in Prepare_Tree_For_Lk
	Free_Spr_List(tree);
	Free_One_Spr(tree->best_spr);
	Free_Tree_Pars(tree);
	Free_Tree_Lk(tree);
	Free_Triplet(tree->triplet_struct);
	Read_Tree_Nomalloc(s_tree, tree, n_l);
	Free(s_tree);

	MPI_Unpack(buffer, packsize, &position,&tree->n_pattern, 1, MPI_INT,  MPI_COMM_WORLD);
	MPI_Unpack(buffer, packsize, &position,&tree->both_sides, 1, MPI_INT,  MPI_COMM_WORLD);
	MPI_Unpack(buffer, packsize, &position,&tree->init_lnL, 1, MPI_DOUBLE,  MPI_COMM_WORLD);
	MPI_Unpack(buffer, packsize, &position,&tree->c_lnL, 1, MPI_DOUBLE,  MPI_COMM_WORLD);

	if (mpi_message == GO_LK)
	{	tree->io->parallelism_type = PARALLEL_OVER_SITES;
	}
	if (mpi_message == GO_NNI || mpi_message == GO_DFUNC)
	{	tree->io->parallelism_type = PARALLEL_OVER_SWAPS;
	}

	// unpack the model. . .
	MPI_Unpack(buffer, packsize, &position,&tree->mod->ns, 1, MPI_INT,  MPI_COMM_WORLD);
	MPI_Unpack(buffer, packsize, &position,&tree->mod->n_catg, 1, MPI_INT,  MPI_COMM_WORLD);
#ifdef ENABLE_MBL
	tree->mod->n_l = n_l;
#endif
	MPI_Unpack(buffer, packsize, &position,&tree->mod->n_rr_branch,1,MPI_INT, MPI_COMM_WORLD);
#ifdef ENABLE_MBL
	MPI_Unpack(buffer, packsize, &position,&tree->mod->blprops_init,1,MPI_INT, MPI_COMM_WORLD);
#endif

	// . . and then fill the model will real values.
	MPI_Unpack(buffer, packsize, &position,&tree->mod->whichmodel, 1, MPI_INT,  MPI_COMM_WORLD);

	if (tree->io->datatype == NT && (tree->mod->whichmodel == GTR ||tree->mod->whichmodel == CUSTOM))
	{
		MPI_Unpack(buffer, packsize, &position,tree->mod->rr_num, 6, MPI_INT,  MPI_COMM_WORLD);
		MPI_Unpack(buffer, packsize, &position,&tree->mod->rr, 1, MPI_DOUBLE, MPI_COMM_WORLD);
		MPI_Unpack(buffer, packsize, &position,&tree->mod->rr_val, 1, MPI_DOUBLE,  MPI_COMM_WORLD);
	}

#ifdef ENABLE_MBL
	MPI_Unpack(buffer, packsize, &position,tree->mod->bl_props, tree->mod->n_l, MPI_DOUBLE,  MPI_COMM_WORLD);
#endif ENABLE_MBL
	MPI_Unpack(buffer, packsize, &position,&tree->mod->invar, 1, MPI_INT,  MPI_COMM_WORLD);
	MPI_Unpack(buffer, packsize, &position,&tree->mod->gamma_median, 1, MPI_INT,  MPI_COMM_WORLD);
	int ns = tree->mod->ns;

	MPI_Unpack(buffer, packsize, &position,tree->mod->pi, ns, MPI_DOUBLE, MPI_COMM_WORLD);
	MPI_Unpack(buffer, packsize, &position,tree->mod->gamma_r_proba, tree->mod->n_catg, MPI_DOUBLE,  MPI_COMM_WORLD);
	MPI_Unpack(buffer, packsize, &position,tree->mod->gamma_rr, tree->mod->n_catg, MPI_DOUBLE,  MPI_COMM_WORLD);
	MPI_Unpack(buffer, packsize, &position,&tree->mod->kappa, 1, MPI_DOUBLE, MPI_COMM_WORLD);
	MPI_Unpack(buffer, packsize, &position,&tree->mod->lambda, 1, MPI_DOUBLE, MPI_COMM_WORLD);
	MPI_Unpack(buffer, packsize, &position,&tree->mod->alpha, 1, MPI_DOUBLE, MPI_COMM_WORLD);
	MPI_Unpack(buffer, packsize, &position,&tree->mod->pinvar, 1, MPI_DOUBLE, MPI_COMM_WORLD);
	int ncats = 1;
#ifdef ENABLE_MBL
	if (tree->mod->n_l > 1){
		ncats = tree->mod->n_l;
	}
#endif
	if (tree->mod->n_catg > 1){
		ncats = tree->mod->n_catg;
	}

	MPI_Unpack(buffer, packsize, &position,tree->mod->Pij_rr, ncats*ns*ns,MPI_DOUBLE, MPI_COMM_WORLD);
	MPI_Unpack(buffer, packsize, &position,tree->mod->qmat,ns*ns, MPI_DOUBLE, MPI_COMM_WORLD);

	// unpack the Eigen object
	MPI_Unpack(buffer, packsize, &position,&tree->mod->eigen->size,1,MPI_INT,  MPI_COMM_WORLD);
	MPI_Unpack(buffer, packsize, &position,tree->mod->eigen->q,ns*ns,MPI_DOUBLE, MPI_COMM_WORLD);
	MPI_Unpack(buffer, packsize, &position,tree->mod->eigen->space_int,2*ns,MPI_INT,  MPI_COMM_WORLD);
	MPI_Unpack(buffer, packsize, &position,tree->mod->eigen->e_val,ns,MPI_DOUBLE,  MPI_COMM_WORLD);
	MPI_Unpack(buffer, packsize, &position,tree->mod->eigen->e_val_im,ns,MPI_DOUBLE,  MPI_COMM_WORLD);
	MPI_Unpack(buffer, packsize, &position,tree->mod->eigen->r_e_vect,ns*ns,MPI_DOUBLE,  MPI_COMM_WORLD);
	MPI_Unpack(buffer, packsize, &position,tree->mod->eigen->r_e_vect_im,ns*ns,MPI_DOUBLE,  MPI_COMM_WORLD);
	MPI_Unpack(buffer, packsize, &position,tree->mod->eigen->l_e_vect,ns*ns,MPI_DOUBLE, MPI_COMM_WORLD);

	// here are the individual bits of Prepare_Tree....
	Order_Tree_CSeq(tree,tree->data);
	Fill_Dir_Table(tree);
	Update_Dirs(tree);
	// Victor's note: when I re-make the Lk structures, I somehow
	// break NNI's ability to work.
	Make_Tree_4_Pars(tree,tree->data,tree->data->init_len);
	Make_Tree_4_Lk(tree,tree->data,tree->data->init_len);
	tree->triplet_struct = Make_Triplet_Struct(tree->mod);
	Br_Len_Not_Involving_Invar(tree);
	Make_Spr_List(tree);
	Make_Best_Spr(tree);

	return mpi_message;
}

void Reduce_derivatives(phydbl *derivatives, int n)
{
	phydbl *derivatives_out = (phydbl *)mCalloc(n,sizeof(phydbl ));
	MPI_Reduce( derivatives, derivatives_out, n, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD );
	if (Global_myRank == 0)
	{
		int i = 0;
		for(i = 0; i < n; i++)
		{	derivatives[i] = derivatives_out[i];
		}
	}
	Free(derivatives_out);
}


void Reduce_NNI_Lk_Estimates(t_tree *tree)
{
	//printf("\n. mpinode %d entered Reduce_NNI_Lk...", Global_myRank);
	//fflush(NULL);

	int i,j;
#ifdef ENABLE_MBL
	int offset = (9+tree->mod->n_l);
#endif
#ifndef ENABLE_MBL
	int offset = 11;
#endif
	phydbl *nni_best_lnls_in = (phydbl *)mCalloc((2*tree->n_otu-3)*offset,sizeof(phydbl));
	phydbl *nni_best_lnls_out = (phydbl *)mCalloc((2*tree->n_otu-3)*offset,sizeof(phydbl));

	for(i=0;i<2*tree->n_otu-3;i++)
	{
		for(j=0;j<offset;j++)
		{
			nni_best_lnls_in[(i*offset)+j] = UNLIKELY;
			nni_best_lnls_out[(i*offset)+j] = UNLIKELY;
		}
	}

	for(i = Global_myStartEdge; i <= Global_myStopEdge; i++ )
	{
		//printf("\n. mpi_trees.c 294 %d", (i*offset));
		nni_best_lnls_in[(i*offset)] = tree->t_edges[i]->nni->score;

		if (tree->t_edges[i]->nni->swap_node_v1 == NULL) nni_best_lnls_in[(i*offset)+1] = -1.0;
		else nni_best_lnls_in[(i*offset)+1] = (phydbl)tree->t_edges[i]->nni->swap_node_v1->num;

		if (tree->t_edges[i]->nni->swap_node_v2 == NULL) nni_best_lnls_in[(i*offset)+2] = -1.0;
		else nni_best_lnls_in[(i*offset)+2] = (phydbl)tree->t_edges[i]->nni->swap_node_v2->num;

		if (tree->t_edges[i]->nni->swap_node_v3 == NULL) nni_best_lnls_in[(i*offset)+3] = -1.0;
		else nni_best_lnls_in[(i*offset)+3] = (phydbl)tree->t_edges[i]->nni->swap_node_v3->num;

		if (tree->t_edges[i]->nni->swap_node_v4 == NULL) nni_best_lnls_in[(i*offset)+4] = -1.0;
		else nni_best_lnls_in[(i*offset)+4] = (phydbl)tree->t_edges[i]->nni->swap_node_v4->num;

		nni_best_lnls_in[(i*offset)+5] = tree->t_edges[i]->nni->lk0;
		nni_best_lnls_in[(i*offset)+6] = tree->t_edges[i]->nni->lk1;
		nni_best_lnls_in[(i*offset)+7] = tree->t_edges[i]->nni->lk2;

		nni_best_lnls_in[(i*offset)+8] = (phydbl)tree->t_edges[i]->nni->best_conf;
#ifdef ENABLE_MBL
		For(j,tree->mod->n_l)
		{	nni_best_lnls_in[(i*offset)+9+j] = tree->t_edges[i]->nni->best_l[j];
		}
#endif
#ifndef ENABLE_MBL
		nni_best_lnls_in[(i*offset)+9] = tree->t_edges[i]->nni->best_l;

		nni_best_lnls_in[(i*offset)+10] = tree->t_edges[i]->nni->l0;
		//printf("\n. mpinode %d mpi_trees.c 320: tree->t_edges[%d]->nni->best_l = %f", Global_myRank, i,  tree->t_edges[i]->nni->best_l);
		//printf("\n. mpi_trees.c 321 nni_best_lnls_in[9] = %f", nni_best_lnls_in[9]);
#endif
	}

	MPI_Reduce( nni_best_lnls_in, nni_best_lnls_out, (2*tree->n_otu-3)*offset, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD );

	if (Global_myRank == 0)
	{
		int best_edge = -1;
		For(i,2*tree->n_otu-3)
		{
			tree->t_edges[i]->nni->score = nni_best_lnls_out[(i*offset)];
			//if ( (int)nni_best_lnls_out[(i*offset)+1] != -1)
			//{
				tree->t_edges[i]->nni->swap_node_v1 = tree->noeud[ (int)nni_best_lnls_out[(i*offset)+1] ];
			//}
			//if ( (int)nni_best_lnls_out[(i*offset)+2] != -1 )
			//{
				tree->t_edges[i]->nni->swap_node_v2 = tree->noeud[ (int)nni_best_lnls_out[(i*offset)+2] ];
			//}
			//if ( (int)nni_best_lnls_out[(i*offset)+3] != -1)
			//{
				tree->t_edges[i]->nni->swap_node_v3 = tree->noeud[ (int)nni_best_lnls_out[(i*offset)+3] ];
			//}
			//if ( (int)nni_best_lnls_out[(i*offset)+4] != -1)
			//{
				tree->t_edges[i]->nni->swap_node_v4 = tree->noeud[ (int)nni_best_lnls_out[(i*offset)+4] ];
			//}
			tree->t_edges[i]->nni->lk0 = nni_best_lnls_out[(i*offset)+5];
			tree->t_edges[i]->nni->lk1 = nni_best_lnls_out[(i*offset)+6];
			tree->t_edges[i]->nni->lk2 = nni_best_lnls_out[(i*offset)+7];

			tree->t_edges[i]->nni->best_conf = (int)nni_best_lnls_out[(i*offset)+8];
#ifdef ENABLE_MBL
			For(j,tree->mod->n_l)tree->t_edges[i]->nni->best_l[j] = nni_best_lnls_out[(i*offset)+9+j];
#endif
#ifndef ENABLE_MBL
			tree->t_edges[i]->nni->best_l = nni_best_lnls_out[(i*offset)+9];
			//PhyML_Printf("\n. mpi_trees.c 355: tree->t_edges[%d]->nni->best_l = %f",i,  tree->t_edges[i]->nni->best_l);
#endif
			tree->t_edges[i]->nni->l0 = nni_best_lnls_out[(i*offset)+10];

		}
	}

	Free(nni_best_lnls_in);
	Free(nni_best_lnls_out);
}

void Print_MPI_Timing()
{
	PhyML_Printf("\n. Time total %f", (phydbl)time_total/CLOCKS_PER_SEC);
	PhyML_Printf("\n. Time building trees %f", (phydbl)time_building_tree/CLOCKS_PER_SEC);
	PhyML_Printf("\n\n. Time sending NNI message %f", (phydbl)time_nni_bcast/CLOCKS_PER_SEC);
	PhyML_Printf("\n. Time in NNI all edges loop %f", (phydbl)time_nni_loop/CLOCKS_PER_SEC);
	PhyML_Printf("\n. Time gathering NNI score %f", (phydbl)time_nni_gather/CLOCKS_PER_SEC);
	PhyML_Printf("\n. Time in method Check_NNI_Five_Branches %f", (phydbl)time_nni_five_branches/CLOCKS_PER_SEC);
	PhyML_Printf("\n\n. Time optimizing free parameters %f", (phydbl)time_optimize_all_free_params/CLOCKS_PER_SEC);
}

#endif
