/*-------------------------------------------------------------------------*/
/**
  @file		mlp.c
  @author	Renaud Wanchoor
  @date	July 2004
  @version	$Revision: 2.16 $
  @brief	multilevel graph partitioning
  @update date	March 2007 by Xiaochen Wu
  
*/
/*--------------------------------------------------------------------------*/
#include "pagrid.h"

/**
 * \brief multilevel graph partitioning
 *
 * Partition the graph file onto the grid using info params
 * \param graph application graph
 * \param grid computational grid
 * \param info parameters for partitioning
 */

void MultiLevelPartition(GraphType *graph, GridType *grid, PartInfoType *info){ 
        /*Statistic INfo*/
	int MinLinear = 2;
	int MaxLinear = 8;
	int FracLinear = 8;
	int i;			
	int *guide = NULL;
	int dummy;
	char *nullchar = NULL;
	double e_limit_incluster = 0.1;
	double e_limit_cluster = 0.1;
	int t_weight = graph->nvtxs;
	double e = 1;

	info->rtimer[1] = gettimes();	
	info->level = -1;
	info->currLevel = -1;

	/**************************************/
	/*                                    */
	/*        Coarsening Stage            */
	/*                                    */
	/**************************************/
	/* unserialize matching */
	if(info->out[IO_SER] == IO_SER_USE){
	  graph = Pagrid_Unserialize(info->F_serialize,graph,
				     &(nullchar),&(nullchar),
				     &(info->level), 
				     &(info->CType), &(info->WType), &dummy,
				     &info->CSeed, &dummy, &dummy);
	/* compute matchings */
	}else{
	  ran_start(info->CSeed);
	  graph = Coarsen(graph, grid, info);
	}

	/*****************************************/
	/*                                       */
	/*       Initial Partitionning Phase     */
	/*                                       */
	/*****************************************/

        int ncluster = grid->ncluster;
	int *cnpro = grid->cnpro;
        int npro = grid->npro;
        int nedges = grid->nedges;
        int snpro;
	int gspro, subgspro;
        int c;
        int* corder = U_mallocset(ncluster,int, -1, "MultiLevelPartition Error Code 6\n");
	int* gstartp = U_malloc(ncluster, int, "MultiLevelPartition Error Code 7\n");
	int* subgstartp = U_malloc(ncluster, int, "MultiLevelPartition Error Code 8\n");

	/* the clusters are ordered by their computational power, 
	   which is represented by the maximun estimated execution time among all the processors in the cluster */
	orderCluster(corder, grid, graph);
	gspro = 0;
	subgspro = 0;
	for(i = 0; i < ncluster; i++){
	  gstartp[i] = gspro;
	  subgstartp[i] = subgspro;
	  gspro += cnpro[i];
	  subgspro += cnpro[corder[i]];
	}
        GridType* subgrid;
        subgrid = LIBN_Init();
        subgrid->app = LIBG_Init();
	subgrid->app->nvtxs = npro;
	subgrid->app->nedges = npro*(npro-1)*2;
        LIBG_Allocate(subgrid->app);

	/* Initialize the grid */
	initGrid(subgrid, npro, nedges);

	/* Build ratio, latency, latency1 by the cluster order */
	orderGrid(grid, subgrid, corder, gstartp, subgstartp);

	/* Cluster Level Search 
	   Stop when the added number of processors can not get efficient performance improvement */
        snpro = 0;
	c = 0;
	double mintmax = INFINITY;
	int mintmax_npro, minc;
	double currenttmax = INFINITY;
	mintmax_npro = 0;
	mintmax = INFINITY;
	minc = 0;
	do{
	  snpro = subgstartp[c]+cnpro[corder[c]];
	/* clean the grid information */
          clearGrid(subgrid, npro, nedges);
	/* build the sub grid */
          buildGrid(grid, subgrid, snpro, corder, gstartp, subgstartp);
	  ran_start(info->ISeed);
	  switch(info->RType){
	  case IO_UNCO_KLEXEC:
	    currenttmax = InitialPartExec(graph, subgrid);
	    break;
	  }
	  e = (mintmax - currenttmax) * mintmax_npro / ((snpro - mintmax_npro) * mintmax);
	  if(e > e_limit_cluster || c == 0){
	    mintmax = currenttmax;
	    mintmax_npro = snpro;
	    minc = c+1;
	  }
	  c++;
	}while(c == minc && c < ncluster);

	/* Inside Cluster Level Search */	
	int prange;
	c = minc-1;
	/* linear search cluster minc-1*/
	prange = (int)(cnpro[corder[c]] / FracLinear);
	if(prange < MinLinear){
	  prange = MinLinear;
	}else{
	  if(prange > MaxLinear){
	    prange = MaxLinear;
	  }
	}
	/* if it is the linear search for the first cluster in the cluster order, 
	   it is treated as a homogeneous grid, and it is treated as a search on a smooth tmax curve. 
	   Therefore, we need to find out a number of processors with the cost effectively best performance. */
	if(minc == 1){
	  mintmax = INFINITY;
	  mintmax_npro = 0;
	  for(snpro = subgstartp[c]+prange; snpro <= subgstartp[c]+cnpro[corder[c]]; snpro+=prange){
	    clearGrid(subgrid, npro, nedges);
	    buildGrid(grid, subgrid, snpro, corder, gstartp, subgstartp);
	    currenttmax = InitialPartExec(graph, subgrid);
	    e = (t_weight / currenttmax - t_weight / mintmax) / (snpro - mintmax_npro);
	    if(e < e_limit_incluster){
	      break;
	    }else{
	      mintmax = currenttmax;
	      mintmax_npro = snpro;
	      minc = c+1;
	    }
	  }
	/* or just find the number of processors with the lowest tmax */
	}else{
	  for(snpro = subgstartp[c]+prange; snpro <= subgstartp[c]+cnpro[corder[c]]; snpro+=prange){
	    clearGrid(subgrid, npro, nedges);
	    buildGrid(grid, subgrid, snpro, corder, gstartp, subgstartp);
	    currenttmax = InitialPartExec(graph, subgrid);
	    if(currenttmax < mintmax){
	      mintmax = currenttmax;
	      mintmax_npro = snpro;
	      minc = c+1;
	    }
	  }
	}
	/*linear search cluster minc, if it does not exceed the number of clusters*/
	c++;
	if(c < ncluster){
	  prange = (int)(cnpro[corder[c]] / FracLinear);
	  if(prange < MinLinear){
	    prange = MinLinear;
	  }else{
	    if(prange > MaxLinear){
	      prange = MaxLinear;
	    }
	  }
	/* find the number of processors with the lowest tmax */
	  for(snpro = subgstartp[c]+prange; snpro <= subgstartp[c]+cnpro[corder[c]]; snpro+=prange){
	    clearGrid(subgrid, npro, nedges);
	    buildGrid(grid, subgrid, snpro, corder, gstartp, subgstartp);
	    currenttmax = InitialPartExec(graph, subgrid);
	    if(currenttmax < mintmax){
	      mintmax = currenttmax;
	      mintmax_npro = snpro;
	      minc = c+1;
	    }
	  }
	}
	/* after the number of processors that the most cost effectively benefit the coarsest graph, 
	   the sub-grid is rebuilt and repartitioned preparing for the uncoarsening phase. */
	clearGrid(subgrid, mintmax_npro, nedges);
	buildGrid(grid, subgrid, mintmax_npro, corder, gstartp, subgstartp);
	currenttmax = InitialPartExec(graph, subgrid);
        info->rtimer[3] = gettimes();

	/* print out the sub-grid */
	printf("The best sub grid has %d clusters with %d processors with minumn tmax %f ", minc, mintmax_npro, mintmax);
	printf("The sub grid is: \n");
	c = 0;
	snpro = 0;
	while(c < minc-1){
	  printf("Cluster #%d :\t", corder[c]);
	  for(i = gstartp[corder[c]]; i < gstartp[corder[c]]+cnpro[corder[c]]; i++){
	    printf("%d\t", i);
	  }
	  printf("\n");
	  snpro += cnpro[corder[c]];
	  c++;
	}
	printf("Cluster #%d :\t", corder[minc-1]);
	for(i = gstartp[corder[minc-1]]; i < (gstartp[corder[minc-1]] + mintmax_npro - snpro); i++){
	  printf("%d\t", i);
	}
	printf("\n");

	/* serialize matching */
	if(info->out[IO_SER] == IO_SER_DO){
	  Pagrid_Serialize(graph,info->F_serialize,info->out[IO_RESULT],
			   info->out[IO_ID],
			   info->F_graph,info->F_grid,
			   info->CType,info->WType,info->RType,
			   info->CSeed,info->ISeed,info->RSeed);
	}

	
	/* serialize refinement*/
	if(info->out[IO_SER] != IO_SER_NONE)
	  Pagrid_SerializeWhere(graph,info->F_output,info->out[IO_ID],
				info->out[IO_RESULT],info->out[IO_SER]);

	/**************************************/
	/*                                    */
	/*       Refinement stage             */
	/*                                    */
	/**************************************/
        /*                                    */
	/*       Init                         */
	/*                                    */
	/**************************************/

	ran_start(info->RSeed);
	switch(info->RType){	
	case IO_UNCO_KLEXEC:
	  /** -- REMOVE -- **/
	  for (i=0; i<subgrid->npro; i++){
	    printf("processor %2d : %5d vertices %10.2f execution time\n", graph->pro[i], graph->vwgt[i], subgrid->exec[graph->pro[i]]);
	  }
	  printf("\n");
	  /** -- END REMOVE -- **/
	  printf("---------------------------------------------------------------------------------------------------------------------------\n");
	  break;
	}

	/**************************************/
        /*                                    */
	/*       Uncoarsening phase           */
	/*                                    */
	/**************************************/
	for (i=info->level; i>0; i--){
	  /***********************/
	  /* serialize refinement*/
	  if(info->out[IO_SER] != IO_SER_NONE)
	    Pagrid_SerializeWhere(graph,info->F_output,info->out[IO_ID],
				  info->out[IO_RESULT],info->out[IO_SER]);
	  /***********************/

	  info->currLevel = i;

	  /*Project Back the coarer graph*/
	  ProjBack(graph, subgrid, info);	
	  
	  graph = graph->father;
	  
	  guide = U_malloc(graph->nvtxs,int,"MultiLevelPartition error code 5\n");
	  randnum(graph->nvtxs, guide);

	  /*Refinement*/
	  switch(info->RType){
	  case IO_UNCO_KLEXEC:
            Init_Connect(graph, subgrid);
	    SwapProcessors(subgrid,graph);
	    execRefine(graph,subgrid,i-1,guide,info);	
	    break;
	  }
	  U_free("MultiLevelPartition error code 6\n",&guide,LEND);
	}
	printf("---------------------------------------------------------------------------------------------------------------------------\n");
	/***********************/
	/* serialize refinement*/
	if(info->out[IO_SER] != IO_SER_NONE)
	  Pagrid_SerializeWhere(graph,info->F_output,info->out[IO_ID],
				info->out[IO_RESULT],info->out[IO_SER]);
	/***********************/
	/** -- REMOVE -- **/
	{
	  
	  double* volet;
	  double volmet;
	  int* npro      = subgrid->app->pro;
	  int* nadjidx   = subgrid->app->adjidx;
	  int* nadjncy   = subgrid->app->adjncy;
          int** connect  = subgrid->connect;
          int** latency1 = subgrid->latency1;
	  int j,jstart,jend;

	  switch(info->RType){
	  case IO_UNCO_KLEXEC:
            ExecTime_FullWeight(graph, subgrid);
            IO_statVol(TOOL_PAGRID,graph,subgrid,&volmet,&volet);
            for(i = 0; i < subgrid->npro; i++){
              for(j = 0; j < subgrid->npro; j++){
                if(connect[i][j] > 0){
                  volet[i] += latency1[i][j];
                }
              }
            }

	    printf("\n");
	    for (i=0; i<subgrid->npro; i++){
	      printf("processor %2d : %5d vertices %10.2f (%10.2f) execution time adj : ", 
		     npro[i], subgrid->app->vwgt[i], subgrid->exec[npro[i]], volet[npro[i]]);
	      
	      jstart = nadjidx[i];
	      jend   = nadjidx[i+1];
	      
	      for(j=jstart;j<jend;j++){
		printf("%2d ",npro[nadjncy[j]]);
	      }
	      printf("\n");
	    }
	    printf("\n");
	    U_free("mlp.c volmet debug free error\n",&volet,LEND);
	    break;
	  }
	}
	currenttmax = 0;
	for(i = 0; i < subgrid->npro; i++){
	  if(subgrid->exec[i] > currenttmax)
	    currenttmax = subgrid->exec[i];
	}
	printf("final tmax is %f\n", currenttmax);

	/** -- END REMOVE -- **/
	/**************************************/
	/*                                    */
	/*              Finalize              */
	/*                                    */
	/**************************************/	
	info->rtimer[4] = gettimes();

        U_free("MultiLevelPartition Error Code 6\n", &corder, &gstartp, &subgstartp, LEND);
	/**************************************/
        /*                                    */
	/*       output                       */
	/*                                    */
	/**************************************/	
   
	info->cputime[0] = info->rtimer[4] - info->rtimer[0]; /* total */
	info->cputime[1] = info->rtimer[1] - info->rtimer[0]; /* coarsening */
	info->cputime[2] = info->rtimer[3] - info->rtimer[2]; /* initial partitioning */
	info->cputime[3] = info->rtimer[4] - info->rtimer[3]; /* refinement */
}

/**
 * \initialize the grid g
 *
 * Mainly allocate the memory needed for sub-grid
 * \param g computational sub grid
 * \param npro number of processors
 * \param nedges number of edges
 */
void initGrid(GridType* g, int npro, int nedges){
  Mem *mem;
  mem = &(g->mem);
  MEM_Needxy(mem, 7*npro, 7*npro, int);
  MEM_Needxy(mem, npro, npro, double);
  MEM_Need  (mem, 5*npro,        double);
  MEM_Need  (mem, 5*npro + 8*nedges + 2, int);

  MEM_Allocate(mem);

  g->realnetcost = MEM_Requestxy(mem, npro, npro, int   );
  g->netcost     = MEM_Requestxy(mem, npro, npro, int   );
  g->proratio    = MEM_Request  (mem, npro,       double);
  g->proportion  = MEM_Request  (mem, npro,       double);
  g->prospeed    = MEM_Request  (mem, npro,       double);
  g->ratio       = MEM_Requestxy(mem, npro, npro, double);

  g->adjncy      = MEM_Request  (mem, nedges,     int   );
  g->adjwgt      = MEM_Request  (mem, nedges,     int   );
  g->rpro        = MEM_Request  (mem, npro,       int   );
  g->exec        = MEM_Request  (mem, npro,       double);
  g->adjidx      = MEM_Request  (mem, npro+1,     int   );

  g->connect     = MEM_Requestxy(mem, npro, npro, int   );
  g->adjlcy      = MEM_Request  (mem, nedges,     int   );
  g->latency     = MEM_Requestxy(mem, npro, npro, int   );
  g->latency1    = MEM_Requestxy(mem, npro, npro, int   );
  g->convtxs     = MEM_Requestxy(mem, npro, npro, int   );
  g->convtxs2    = MEM_Requestxy(mem, npro, npro, int   );
  g->conproc     = MEM_Request  (mem, npro,       int   );

  g->cprospeed   = MEM_Request  (mem, npro,       double);
  g->procluster  = MEM_Request  (mem, npro,       int   );
  g->cnpro       = MEM_Request  (mem, npro,       int   );
  g->cewgt       = MEM_Request  (mem, npro,       int   );
  g->clatency    = MEM_Request  (mem, npro,       int   );
  g->cadjidx     = MEM_Request  (mem, npro+1,     int   );
  g->cadjncy     = MEM_Request  (mem, nedges,     int   );
  g->cadjwgt     = MEM_Request  (mem, nedges,     int   );
  g->cadjlatency = MEM_Request  (mem, nedges,     int   );
  g->cadjfrom    = MEM_Request  (mem, nedges,     int   );
  g->cadjto      = MEM_Request  (mem, nedges,     int   );

}

/**
 * \clear the grid information before build it
 *
 * Different sub-grids have different information, before build another sub-grid, 
 * the information of the older sub-grid is cleared.
 * \param g computational sub-grid
 * \param npro number of processors 
 * \param nedges number of edges 
 */
void clearGrid(GridType* g, int npro, int nedges){
  int i, j;
  for(i = 0; i < npro; i++){
    for(j = 0; j < npro; j++){
      g->realnetcost[i][j] = 0;
      g->netcost[i][j] = 0;
      g->connect[i][j] = 0;
      g->convtxs[i][j] = 0;
      g->convtxs2[i][j] = 0;
    }
  }
  for(i = 0; i < npro; i++){
    g->proratio[i] = 0;
    g->proportion[i] = 0;
    g->rpro[i] = -1;
    g->exec[i] = 0;
    g->adjidx[i] = -1;
    g->conproc[i] = 0;
    g->clatency[i] = 0;
  }
  for(i = 0; i < nedges; i++){
    g->adjncy[i] = -1;
    g->adjwgt[i] = -1;
    g->cadjncy[i] = -1;
    g->cadjwgt[i] = -1;
    g->adjlcy[i] = 0;
    g->cadjlatency[i] = 0;
    g->cadjfrom[i] = -1;
    g->cadjto[i] = -1;
  }
}

/**
 * \build the subgrid
 *
 * build the sub-grid according to the number of processors and the cluster order
 * \param g computation grid
 * \param subg computational sub-grid
 * \param npro number of processors
 * \param corder the cluster order
 * \gspro start processor numbers for each cluster in grid
 * \subgspro start processor number for each cluster in sub-grid
 */
void buildGrid(GridType* g, GridType* subg, int npro, int* corder, int* gspro, int* subgspro){
  subg->npro = npro;
  int c, i, j, k, jstart, jend;
  int snedges = 0;
  int ncpro;
  int nnpro = npro;
  int *gcnpro = g->cnpro;
  int *gcewgt = g->cewgt;
  int *gcadjncy = g->cadjncy;
  int *gcadjwgt = g->cadjwgt;
  int *gcadjfrom = g->cadjfrom;
  int *gcadjto = g->cadjto;
  int *gcadjidx = g->cadjidx;

  int *sgcnpro = subg->cnpro;
  int *sgadjidx = subg->adjidx;
  int *sgadjncy = subg->adjncy;
  int *sgadjwgt = subg->adjwgt;
  int *sgcadjidx = subg->cadjidx;
  int *sgcadjncy = subg->cadjncy;
  int *sgcadjwgt = subg->cadjwgt;
  int *sgcadjfrom = subg->cadjfrom;
  int *sgcadjto = subg->cadjto;
  int p, pstart;
  int idxend, startidx;
  int endp;
  int subfrom, subto;

  pstart = 0;
  c = 0;
  sgadjidx[0] = 0;
  sgcadjidx[0] = 0;
  while(nnpro > 0){
    if(nnpro >= gcnpro[corder[c]]){
      ncpro = gcnpro[corder[c]];
    }else{
      ncpro = nnpro;
    }
    sgcnpro[c] = ncpro;
    snedges += ncpro * (ncpro-1);
    nnpro -= ncpro;
    for(i = 0; i < ncpro; i++){
      sgadjidx[pstart+i+1] = sgadjidx[pstart+i] + ncpro - 1;
      k = 0;
      jstart = sgadjidx[pstart+i];
      for(j = 0; j < ncpro; j++){
        if(i != j){
          sgadjncy[jstart+k] = pstart + j;
          sgadjwgt[jstart+k] = gcewgt[c];
          k++;
        }
      }
    }
    sgcadjidx[c+1] = sgcadjidx[c];
    idxend = sgadjidx[pstart+ncpro];
    endp = pstart + ncpro;
    for(i = 0; i < c; i++){
      jstart = gcadjidx[corder[i]];
      jend = gcadjidx[corder[i]+1];
      for(j = jstart; j < jend; j++){
        if(gcadjncy[j] == corder[c]){
          snedges += 2;
          p = gcadjfrom[j];
          subfrom = subgspro[i] + gcadjfrom[j] - gspro[corder[i]];
          subto = subgspro[c] + gcadjto[j] - gspro[corder[c]];
          startidx = sgadjidx[subfrom + 1];
          for(k = idxend; k > startidx; k--){
            sgadjncy[k] = sgadjncy[k-1];
            sgadjwgt[k] = sgadjwgt[k-1];
          }
          sgadjncy[startidx] = subto;
          sgadjwgt[startidx] = gcadjwgt[j];
          idxend++;
          for(k = subfrom+1; k <= endp; k++){
            sgadjidx[k]++;
          }
          p = gcadjto[j];
          startidx = sgadjidx[subto + 1];
          for(k = idxend; k > startidx; k--){
            sgadjncy[k] = sgadjncy[k-1];
            sgadjwgt[k] = sgadjwgt[k-1];
          }
          sgadjncy[startidx] = subfrom;
          sgadjwgt[startidx] = gcadjwgt[j];
          idxend++;
          for(k = subto+1; k <= endp; k++){
            sgadjidx[k]++;
          }
          for(k = sgcadjidx[c+1]; k > sgcadjidx[i+1]; k--){
            sgcadjncy[k] = sgcadjncy[k-1];
            sgcadjwgt[k] = sgcadjwgt[k-1];
            sgcadjfrom[k] = sgcadjfrom[k-1];
            sgcadjto[k] = sgcadjto[k-1];
          }
          sgcadjncy[sgcadjidx[i+1]] = c;
          sgcadjwgt[sgcadjidx[i+1]] = gcadjwgt[j];
          sgcadjfrom[sgcadjidx[i+1]] = subfrom;
          sgcadjto[sgcadjidx[i+1]] = subto;
          for(k = i+1; k <= c+1; k++){
            sgcadjidx[k]++;
          }
          sgcadjncy[sgcadjidx[c+1]] = i;
          sgcadjwgt[sgcadjidx[c+1]] = gcadjwgt[j];
          sgcadjfrom[sgcadjidx[c+1]] = subto;
          sgcadjto[sgcadjidx[c+1]] = subfrom;
          sgcadjidx[c+1]++;
        }
      }
    }
    pstart += ncpro;
    c++;
  }
  subg->nedges = snedges;
  subg->ncluster = c;
}

/**
 * \settle down unchanged information of the sub-grid
 *
 * when build the sub-grid, some information is unchanged, such as latency and bandwidth information
 * this function build the latency and bandwidth information, and when the sub-grid is rebuilt, 
 * these information is no longer updated.
 * \param g computational grid
 * \param subg computational sub grid
 * \param corder the cluster order
 * \param gspro the start processor numbers for each cluster in grid
 * \param subgspro the start processor numbers for each cluster in sub-grid
 */
void orderGrid(GridType* g, GridType* subg, int* corder, int* gspro, int* subgspro){
  double **gratio = g->ratio;
  int **glatency = g->latency;
  int **glatency1 = g->latency1;
  int *gcnpro = g->cnpro;
  int ncluster = g->ncluster;
  double *gcprospeed = g->cprospeed;
  int *gcewgt = g->cewgt;

  int *subgcewgt = subg->cewgt;
  double **subgratio = subg->ratio;
  int **subglatency = subg->latency;
  int **subglatency1 = subg->latency1;
  double *subgprospeed = subg->prospeed;
  double *subgcprospeed = subg->cprospeed;
  int *subgprocluster = subg->procluster;

  subg->r_from = 0;
  subg->r_to = 1;
  subg->r_val = gratio[gspro[corder[0]]][gspro[corder[0]]+1];

  int i, c, j, k, l;
  int gstart, subgstart, length;
  int gstart_v, gstart_h, subgstart_v, subgstart_h, length_v, length_h;

  for(i = 0; i < ncluster; i++){
    c = corder[i];
    subgcewgt[i] = gcewgt[c];
    subgcprospeed[i] = gcprospeed[c];
    gstart = gspro[c];
    subgstart = subgspro[i];
    length = gcnpro[c];
    for(j = 0; j < length; j++){
      subgprospeed[subgstart+j] = gcprospeed[c];
      subgprocluster[subgstart+j] = i;
      for(k = 0; k < length; k++){
        subgratio[subgstart+j][subgstart+k] = gratio[gstart+j][gstart+k];
        subglatency[subgstart+j][subgstart+k] = glatency[gstart+j][gstart+k];
        subglatency1[subgstart+j][subgstart+k] = glatency1[gstart+j][gstart+k];
      }
    }
    if(i == 0){
      continue;
    }
    for(l = 0; l < i; l++){
      if(c < corder[l]){
        length_v = gcnpro[c];
        length_h = gcnpro[corder[l]];
        gstart_v = gspro[c];
        gstart_h = gspro[corder[l]];
        subgstart_v = subgspro[i];
        subgstart_h = subgspro[l];
        for(j = 0; j < length_v; j++){
          for(k = 0; k < length_h; k++){
            subgratio[subgstart_v+j][subgstart_h+k] = gratio[gstart_v+j][gstart_h+k];
            subglatency[subgstart_v+j][subgstart_h+k] = glatency[gstart_v+j][gstart_h+k];
            subglatency1[subgstart_v+j][subgstart_h+k] = glatency1[gstart_v+j][gstart_h+k];
          }
        }
        gstart_h = gspro[c];
        gstart_v = gspro[corder[l]];
        subgstart_h = subgspro[i];
        subgstart_v = subgspro[l];
        for(j = 0; j < length_h; j++){
          for(k = 0; k < length_v; k++){
            subgratio[subgstart_v+j][subgstart_h+k] = gratio[gstart_v+j][gstart_h+k];
            subglatency[subgstart_v+j][subgstart_h+k] = glatency[gstart_v+j][gstart_h+k];
            subglatency1[subgstart_v+j][subgstart_h+k] = glatency1[gstart_v+j][gstart_h+k];
          }
        }
      }else{
        length_h = gcnpro[c];
        length_v = gcnpro[corder[l]];
        gstart_v = gspro[corder[l]];
        gstart_h = gspro[c];
        subgstart_v = subgspro[l];
        subgstart_h = subgspro[i];
        for(j = 0; j < length_v; j++){
          for(k = 0; k < length_h; k++){
            subgratio[subgstart_v+j][subgstart_h+k] = gratio[gstart_v+j][gstart_h+k];
            subglatency[subgstart_v+j][subgstart_h+k] = glatency[gstart_v+j][gstart_h+k];
            subglatency1[subgstart_v+j][subgstart_h+k] = glatency1[gstart_v+j][gstart_h+k];
          }
        }
        gstart_h = gspro[corder[l]];
        gstart_v = gspro[c];
        subgstart_h = subgspro[l];
        subgstart_v = subgspro[i];
        for(j = 0; j < length_h; j++){
          for(k = 0; k < length_v; k++){
            subgratio[subgstart_v+j][subgstart_h+k] = gratio[gstart_v+j][gstart_h+k];
            subglatency[subgstart_v+j][subgstart_h+k] = glatency[gstart_v+j][gstart_h+k];
            subglatency1[subgstart_v+j][subgstart_h+k] = glatency1[gstart_v+j][gstart_h+k];
          }
        }
      }
    }
  }
}

/**
 * \Order clusters by their powers
 *
 * \param corder the cluster order
 * \param grid computational grid
 * \param graph application graph
 */
void orderCluster(int* corder, GridType* grid, GraphType* graph){
  int GSIZE = 8;
  int ncluster = grid->ncluster;
  int* cnpro = grid->cnpro;
  int* cewgt = grid->cewgt;
  double* cprospeed = grid->cprospeed;
  int* clatency = grid->clatency;
  int* cadjidx = grid->cadjidx;
  int* cadjwgt = grid->cadjwgt;
  int* cadjncy = grid->cadjncy;
  double** nratio = grid->ratio;

  if(ncluster <= 1){
    corder[0] = 0;
    return;
  }

  double* cpower;
  double* gain;
  int* region;
  cpower = U_malloc(ncluster, double, "Cluster order error 1\n");
  gain = U_malloc(ncluster, double, "Cluster order error 2\n");
  region = U_malloc(ncluster, int, "Cluster order error 3\n");

  int c,i,j, cn, spro;
  int adjc;
  int jstart, jend;
  int istart, iend;
  int v, prov, w, prow;
  double tmpgain;
  double tmax, tmpmax;

  double maxP = INFINITY;
  int maxE = INFINITY;

  for(i = 0; i < ncluster; i++){
    if(maxP > cprospeed[i]){
      maxP = cprospeed[i];
    }
    if(maxE > cewgt[i]){
      maxE = cewgt[i];
    }
  }

  int *subnpro, *subecut;
  subnpro = U_mallocset(GSIZE, int, 0, "Order cluster error code 4\n");
  subecut = U_mallocset(GSIZE, int, 0, "Order cluster error code 5\n");

  int* gpro = graph->pro;
  int* gvwgt = graph->vwgt;
  int* gadjidx = graph->adjidx;
  int* gadjncy = graph->adjncy;
  int* gadjwgt = graph->adjwgt;
  int nvtxs = graph->nvtxs;
  int start_v;
  int t_weight = 0;
  InitCtrl ICtrl;
  Mem mem;
  MEM_Init(&mem);
  MEM_Need(&mem, 3*nvtxs, int);
  MEM_Allocate(&mem);

  ICtrl.glayers = MEM_Request(&mem, nvtxs, int);
  ICtrl.vtxorder = MEM_Request(&mem, nvtxs, int);
  ICtrl.gainorder = MEM_Request(&mem, nvtxs, int);

  for(i = 0; i < nvtxs; i++){
    gpro[i] = 0;
    t_weight += gvwgt[i];
  }
  start_v = PseudoPeripherialVertex(0, 0, &ICtrl, graph);
  PartExec(graph, 0, GSIZE-1, start_v, 0, t_weight, &ICtrl);

  for(v = 0; v < nvtxs; v++){
    prov = gpro[v];
    subnpro[prov] += gvwgt[v];
    istart = gadjidx[v];
    iend = gadjidx[v+1];
    for(i = istart; i < iend; i++){
      w = gadjncy[i];
      prow = gpro[w];
      if(prov != prow){
        subecut[prov] += gadjwgt[i];
      }
    }
  }

  spro = 0;
  for(i = 0; i < ncluster; i++){
    tmax = 0;
    for(j = 0; j < GSIZE; j++){
      tmpmax = cprospeed[i] * subnpro[j] + subecut[j] * nratio[spro][spro+1] + (GSIZE-1)*clatency[i];
      if(tmax < tmpmax){
        tmax = tmpmax;
      } 
    }
    if(cnpro[i] == GSIZE){
      cpower[i] = tmax;
    }else{
      cpower[i] = (tmax*GSIZE-t_weight)*(cnpro[i]-1)/((GSIZE-1)*cnpro[i]) + t_weight/cnpro[i];
    }
    spro += cnpro[i];
  }

  cn = 0;
  for(i = 1; i < ncluster; i++){
    if(cpower[cn] > cpower[i]){
      cn = i;
    }
  }

  for(i = 0; i < ncluster; i++){
    gain[i] = INFINITY;
    region[i] = 0;
  }

  region[cn] = 1;
  corder[0] = cn;
  c = 1;
  while(c < ncluster){
    jstart = cadjidx[cn];
    jend = cadjidx[cn+1];
    for(j = jstart; j < jend; j++){
      adjc = cadjncy[j];
      tmpgain = cpower[adjc] * cadjwgt[j];
      if(region[adjc] == 0 && tmpgain < gain[adjc]){
        gain[adjc] = tmpgain;
      }
    }
    for(i = 0; i < ncluster; i++){
      if(region[i] == 0){
        cn = i;
        break;
      }
    }
    for(i = cn+1; i < ncluster; i++){
      if(gain[cn] > gain[i] && region[i] == 0){
        cn = i;
      }
    }
    corder[c] = cn;
    gain[cn] = INFINITY;
    region[cn] = 1;
    c++;
  }

  MEM_Free(&mem);
  U_free("Cluster order error 6\n", &cpower, &gain, &region, &subnpro, &subecut, LEND);
}

/**
 * \Compute estimated execution time with full latency weight
 *
 * \param graph application graph
 * \param grid computational grid
 */
void ExecTime_FullWeight(GraphType* graph, GridType* grid){
  int i,j;
  double *exec = grid->exec;
  int nnpro = grid->npro;
  int istart,iend;
  int v,prov,w, prow;
  double *prospeed = grid->prospeed;
  int *adjidx = graph->adjidx;
  int *adjncy = graph->adjncy;
  int *adjwgt = graph->adjwgt;
  int *pro = graph->pro;
  int *vwgt = graph->vwgt;
  int nvtxs = graph->nvtxs;
  double **ratio = grid->ratio;
  int **connect = grid->connect;
  int **latency1 = grid->latency1;

  for(i = 0; i < nnpro; i++)
    exec[i] = 0;
  for(v = 0; v < nvtxs; v++){
    prov = pro[v];
    exec[prov] += vwgt[v]*prospeed[prov];
    istart = adjidx[v];
    iend = adjidx[v+1];
    for(i = istart; i < iend; i++){
      w = adjncy[i];
      prow = pro[w];
      if(prov != prow){
        exec[prov] += adjwgt[i] * ratio[prov][prow];
      }
    }
  }
  for(i = 0; i < nnpro; i++){
    for(j = 0; j < nnpro; j++){
      if(connect[i][j] > 0)
        exec[i] += latency1[i][j];
    }
  }
}

