/*
>> Mapping evaluation in the light of dependent operations
*/

#define _ISOC99_SOURCE

#include <math.h>
#include <values.h>
#include <log.h>
#include "globals.h"
#include "mapping.h"
#include "evaluate.h"
#include "evaldeps.h"

extern table_t scm_procs, scm_ops, scm_streams, /* owned, indexed */
               scm_links;                       /* owned */
extern map_uses_affinity;

//#define SC_EVL_DEBUG

#define MYEPS 0.0001

#define VERTEXSIZE  sizeof(vertex_t)
#define EDGESIZE    sizeof(edge_t)
#define ALLOCATE(x) (eptr += (x), eptr-(x))

#define WORK(l, p)   work[(l)*processors + (p)]
#define DEP(l1, l2)  dep[(l1)*labels+(l2)]

#define DEP_GRAPH(set, bulk) (((bulk) || ((set)==SC_GRAPH_TDG)) && !((set)==SC_GRAPH_TIG))

static unsigned char       emem[MAX_VERTICES*VERTEXSIZE + MAX_EDGES*EDGESIZE], *eptr;
static vertex_t           *vertex[MAX_VERTICES], *topord[MAX_VERTICES];
static edge_t             *edge[MAX_EDGES];
static int                 vertices, edges, labels, processors;
static float              *work;
static unsigned short int *dep;

void printGraph(void)
{
  int i,j ;

  printf("digraph G {\n");

  for (j=0; j < processors; j++)
  {
    printf("subgraph cluster_processor%d {\n", j);
    printf("label = \"%d\"\n", j);

    for (i=0; i < vertices; i++)
    {
      vertex_t *v = vertex[i];
      
      if (v->processor == j)
      {
	if (v->label < 0)
	  printf("v%d [label=\"%d: %fs\"]\n", v->id, v->id, v->work);
	else
	  printf("v%d [label=\"%d: %fs\" style=filled fillcolor=\"%f 1 1\"]\n", v->id, v->id, v->work, (float)v->label/labels);
      }
    }

    printf("}\n");
  }

  for (i=0; i < edges; i++)
  {
    edge_t *e = edge[i];

    printf("v%d -> v%d [label=\"%d\"]\n", e->start->id, e->end->id, e->bulk);
  }
	 
  printf("}\n");

  fflush(stdout);
}

static void generateGraph(double iop)
{
  int id=-1, id2=-1, id3=-1;
  scm_op_t *o;
  scm_arg_t *a;
  scm_stream_t *s;
  scm_proc_t *p;
  scm_transport_t *t;
  vertex_t *u, *v, *w, *transvertex[MAX_PROCESSORS];
  edge_t *e;

  int proc[MAX_PROCESSORS];
  memset(proc, -1, MAX_PROCESSORS*sizeof(int));

  vertices = edges = processors = 0;
  eptr = emem;

  /* Prepare vertices */
  while (tblgetnextelement(&scm_ops, &id, &o))
  {
    if (o->processor)
    {
      o->_id = vertices++;
      v = vertex[o->_id] = (vertex_t*) ALLOCATE(VERTEXSIZE);

      v->id = o->_id;
      v->edges = 0;
      v->delay = 0;
      v->label = -1;
      
      v->work = fmax(0, o->target_operation->time);
      v->energy = fmax(0, o->target_operation->energy);

      if (settings.benchmarknoise > 0)
      {
        v->work *= 1+scuRnd()*2*settings.benchmarknoise-settings.benchmarknoise;
        v->energy *= 1+scuRnd()*2*settings.benchmarknoise-settings.benchmarknoise;
      }

      if (proc[o->processor->target->id] >= 0)
        v->processor = proc[o->processor->target->id];
      else
        v->processor = proc[o->processor->target->id] = processors++;  
    }
  }

  /* Generate graph structure */
  while (tblgetnextelement(&scm_ops, &id, &o))
  {
    if (o->processor)
    {
      v = vertex[o->_id];

      while (tblgetnextelement(&o->streams, &id2, &s))
      if (s->producer && s->producer->operation == o)
      {
  	if (tblgetelements(&s->processors) > 1)
  	{
  	  /* Create a vertex on all processors the stream passes through */
  	  while (tblgetnextelement(&s->processors, &id3, &p))
  	  {
  	    w = vertex[vertices] = (vertex_t*) ALLOCATE(VERTEXSIZE);
  	    
  	    w->id = vertices++;	  
  	    w->edges = 0;
  	    w->delay = 0;
  	    w->label = -1;
            w->work = 0;
            w->energy = 0;
  	    
  	    if (proc[p->target->id] >= 0)
  	      w->processor = proc[p->target->id];
  	    else
  	      w->processor = proc[p->target->id] = processors++;

  	    transvertex[p->target->id] = w;
  	  }

  	  /* Create edge from source to transport on source processor */
  	  e = edge[edges] = (edge_t*) ALLOCATE(EDGESIZE);
  	  w = transvertex[o->processor->target->id];
  	  
  	  v->edge[v->edges++] = e;
  	  w->edge[w->edges++] = e;

  	  e->id    = edges++;
  	  e->start = v;
  	  e->end   = w;
  	  e->bulk  = DEP_GRAPH(scs_mapper.graph, s->producer->bulk);

  	  /* Create edges from transport on destination processor to consumers */
  	  while (tblgetnextelement(&s->consumers, &id3, &a))
  	  {
  	    if (a->operation->processor)
   	    {
              e = edge[edges] = (edge_t*) ALLOCATE(EDGESIZE);
 
              if (a->operation->processor == o->processor)
                u = v;
              else
                u = transvertex[a->operation->processor->target->id];

       	      w = vertex[a->operation->_id];
       	    
              u->edge[u->edges++] = e;
              w->edge[w->edges++] = e;
            
              e->id    = edges++;
              e->start = u;
              e->end   = w;
              e->bulk  = DEP_GRAPH(scs_mapper.graph, a->bulk);
            }
  	  }

  	  /* Connect transports */
  	  while (tblgetnextelement(&s->processors, &id3, &p))
  	  {
  	    if (p != o->processor)
  	    {
  	      e = edge[edges] = (edge_t*) ALLOCATE(EDGESIZE);
  	      u = transvertex[o->processor->target->id];
  	      w = transvertex[p->target->id];

  	      u->edge[u->edges++] = e;
  	      w->edge[w->edges++] = e;
  	      
  	      e->id    = edges++;
  	      e->start = u;
  	      e->end   = w;
  	      e->bulk  = 0;	      
  	    }
  	  }

          /* Add one half transportation time to each stream source and
           * destination */
	  while (tblgetnextelement(&s->alltransports, &id3, &t))
	  {
            p = t->source;
            u = transvertex[p->target->id];
            u->work += t->streams * fmax(0, p->transport_operation->time*iop*s->bytes/2);
            u->work += t->streams * fmax(0, p->transport_operation->energy*iop*s->bytes/2);

            p = t->destination;
            u = transvertex[p->target->id];
            u->work += t->streams * fmax(0, p->transport_operation->time*iop*s->bytes/2);
            u->work += t->streams * fmax(0, p->transport_operation->energy*iop*s->bytes/2);
	  }          
  	}
  	else
  	{
  	  /* Just create edges between producer and consumers */
  	  while (tblgetnextelement(&s->consumers, &id3, &a))
  	  {
  	    if (a->operation->processor)
  	    {
    	      e = edge[edges] = (edge_t*) ALLOCATE(EDGESIZE);
  	      w = vertex[a->operation->_id];
  	    
  	      v->edge[v->edges++] = e;
  	      w->edge[w->edges++] = e;
  	    
  	      e->id    = edges++;
  	      e->start = v;
  	      e->end   = w;
  	      e->bulk  = DEP_GRAPH(scs_mapper.graph, a->bulk | s->producer->bulk);
            }
  	  }
  	}
      }
    }
  }
}

/*
>> http://www.ics.uci.edu/~eppstein/161/960208.html
>> Algorithm 7: (topological ordering, detailed implementation)
*/

static void orderTopologically(void)
{

  static vertex_t *K[MAX_VERTICES];
  static int I[MAX_VERTICES];

  vertex_t **L = topord;
  int Kw=0, Kr=0, Lw=0, i, j;

  /* for each vertex v in G */
  for (i=0; i < vertices; i++)
  {
    int incoming=0;
    vertex_t *v = vertex[i];

    /* let I[v] = number of incoming edges to v */
    for (j=0; j < v->edges; j++)
    {
      edge_t *e = v->edge[j];

      if (e->end == v)
	incoming++;
    }

    I[i] = incoming;

    /* if (I[v] = 0) add v to K */
    if (!incoming) K[Kw++] = v;
  }

  /* while (K is not empty) */
  while (Kr != Kw)
  {
    /* remove a vertex v from K */
    vertex_t *v = K[Kr++];

    /* for each outgoing edge (v,w) */
    for (j=0; j < v->edges; j++)
    {
      edge_t *e = v->edge[j];
      vertex_t *w = e->end;

      if (e->start == v)
      {
	/* decrement I[w] */
	I[w->id]--;

	/* if (I[w] = 0) add w to K */
	if (!I[w->id])
	  K[Kw++] = w;
      }
    }

    /* add v to L */
    L[Lw++] = v;
  }
}

static void findDelays(void)
{
  int i, j;

  for (i=0; i < vertices; i++)
  {
    vertex_t *v = topord[i];
    v->delay = 0;

    for (j=0; j < v->edges; j++)
    {
      edge_t *e = v->edge[j];

      if (e->end == v)
	v->delay = MAX(v->delay, e->start->delay + e->bulk);
    }
  }
}

static void assignLabel(int label, vertex_t *v)
{
  int i;

  v->label = label;

  for (i=0; i < v->edges; i++)
  {
    edge_t *e = v->edge[i];
    vertex_t *w;

    if (e->start == v) w = e->end;
    else               w = e->start;

    if (w->label < 0 && w->delay == v->delay)
      assignLabel(label, w);
  }
}

static void assignLabels(void)
{
  int i;

  labels = 0;

  for (i=0; i < vertices; i++)
  {
    vertex_t *v = topord[i];

    if (v->label < 0)
      assignLabel(labels++, v);
  }
}

static void separateLabels(void)
{
  int i;

  /* Create dependent edges between each two tasks for which
   * there is a dependent edge between their two labels */
   
  for (i=0; i < edges; i++)
  {
    edge_t *e = edge[i];
  
    if (e->bulk)
    {
      int j;
    
      for (j=0; j < vertices; j++)
      {
        vertex_t *v = vertex[j];
        
        if (v->label == e->start->label)
        {
          int k;
        
          for (k=0; k < vertices; k++)
          {
            vertex_t *w = vertex[k];
            
            if (w->label == e->end->label)
            {
              int l, found=0;
            
              for (l=0; l < v->edges; l++)
              {
                edge_t *f = v->edge[l];
              
                if (f->start == v && f->end == w && f->bulk)
                {
                  found = 1;
                  break;
                }
              }
            
              if (!found)
              {
                edge_t *f;
                
    	        f = edge[edges] = (edge_t*) ALLOCATE(EDGESIZE);
  	    
  	        v->edge[v->edges++] = f;
  	        w->edge[w->edges++] = f;
  	    
  	        f->id    = edges++;
  	        f->start = v;
  	        f->end   = w;
  	        f->bulk  = 1;
  	      }
            }
          }
        }
      }
    }
  }
  
  /* Create a separate label for each task */
  for (i=0; i < vertices; i++)
  {
    vertex_t *v = vertex[i];
    
    v->label = i;
  }
  
  labels = i;
}

static void convertGraphToMatrix(void)
{
  int i;

  work = (float*) calloc(labels*processors, sizeof(float));
  dep  = (unsigned short int*) calloc(labels*labels, sizeof(unsigned short int));

  for (i=0; i < vertices; i++)
  {
    vertex_t *v = topord[i];
    WORK(v->label, v->processor) += v->work;
  }

  for (i=0; i < edges; i++)
  {
    edge_t *e = edge[i];

    if (e->bulk)
      DEP(e->end->label, e->start->label) = 1;
  }
}

void _printStructures(char *title, int *locked, float *speed, float *load, float **runwork, int runlabels)
{
  int l, p;

  printf("%s\n", title);

  printf("  # l speed ");
  for (p=0; p < processors; p++)
    printf("%5d (l/w) ", p);
  printf("\n");

  for (l=0; l < runlabels; l++)
  {
    printf("%3d %1d %5.2f ", l, locked[l], speed[l]);

    for (p=0; p < processors; p++)
    {
      printf("%5.2f/%5.2f ", load[l*processors+p], runwork[l][p]);
    }

    printf("\n");
  }
  printf("\n");
}

#ifndef SC_EVL_DEBUG
#define printStructures(a, b, c, d, e, f)
#else
#define printStructures(a, b, c, d, e, f) _printStructures(a, b, c, d, e, f) 
#endif

int ploadsort(const void *a, const void *b)
{
  float d = ((pload_t*)a)->load-((pload_t*)b)->load;
  return ((d > 0)<<1)-1;
}

/* *** FLOATING POINT RESOLUTION *** */
static double evaluateMapping(void)
{
  float *runwork[labels];
  int   runlabels;
  float speed[labels];
  int   locked[labels];
  float load[labels][processors];
  float t=0, msl, store, delta;
  int bottleneck, processes, members;
  pload_t process[labels];

  int l, p, l2;

  while (1)
  {
    /* Find runnable labels: no dependencies, but with work to do */
    runlabels = 0;
    for (l=0; l < labels; l++)
    {
      int i = 0;
      unsigned short int *dp = &DEP(l, 0);
      
      for (l2=0; l2 < labels; l2++, dp++)
	i += *dp;
      
      if (!i)
      {
	float f = 0;
	float *wp = &WORK(l, 0);
      
	for (p=0; p < processors; p++, wp++)
	  f += *wp;
      
	if (f)
	  runwork[runlabels++] = &WORK(l, 0);
      }
    }

    if (!runlabels)
      return t;
    
    /* Initialize speeds to load one processor maximally */
    for (l=0; l < runlabels; l++)
    {
      float s = 0;

      for (p=0; p < processors; p++)
	s = fmax(s, runwork[l][p]);

      speed[l] = s = 1/s;

      for (p=0; p < processors; p++)
	load[l][p] = s*runwork[l][p];
    }

    /* Start with all labels unlocked */
    memset(locked, 0, runlabels*sizeof(int));

    printStructures("start", locked, speed, &load[0][0], runwork, runlabels);

    while (1)
    {
      msl = 0;

      for (p=0; p < processors; p++)
      {
	float sumload=0;
	  
	for (l=0; l < runlabels; l++)
	  sumload += load[l][p];

	if (sumload > msl)
	  msl = sumload;
      }
	    
      if (msl < 1+MYEPS)
	break;

      /* Limit loads due to processor overcommitment */
      while (1)
      {
	msl = 0;
	bottleneck = 0;
	
	for (p=0; p < processors; p++)
	{
	  float sumload=0;
	  
	  for (l=0; l < runlabels; l++)
	    sumload += load[l][p];
	  
	  if (sumload > msl)
	  {
	    msl = sumload;
	    bottleneck =  p;
	  }
	}
	
	if (msl < 1+MYEPS)
	  break;
	
	/* Get processes running on this processor */
	processes = 0;
	
	for (l=0; l < runlabels; l++)
	  if (runwork[l][bottleneck])
	  {
	    process[processes].id   = l;
	    process[processes++].load = load[l][bottleneck];
	  }

	/* Do the load distribution thingy */
	qsort(process, processes, sizeof(pload_t), ploadsort);
	store = 1;
	members = processes;
	for (l2=0; l2 < processes; l2++)
	{
	  float newload, s;
	  l = process[l2].id;

	  newload = fmin(process[l2].load, store/members);
	  
	  store -= newload;
	  members--;
	  s = speed[l] *= newload / process[l2].load;

	  /* Recalculate load */
	  for (p=0; p < processors; p++)
	    load[l][p] = s*runwork[l][p];
	}

	printStructures("limited", locked, speed, &load[0][0], runwork, runlabels);
      }
      
      /* Lock labels running on fully loaded processors */
      for (p=0; p < processors; p++)
      {
	float sumload = 0;

	for (l=0; l < runlabels; l++)
	  sumload += load[l][p];

	if (sumload > 1-MYEPS)
	  for (l=0; l < runlabels; l++)
	    if (runwork[l][p] && !locked[l])
	      locked[l] = 1;
      }

      /* Reset speed of unlocked labels */
      for (l=0; l < runlabels; l++)
        if (!locked[l])
        {
	  float s = 0;
	   
	  for (p=0; p < processors; p++)
	    s = fmax(s, runwork[l][p]);
	  
	  speed[l] = s = 1/s;

	  /* Recalculate loads */
	  for (p=0; p < processors; p++)
	    load[l][p] = s*runwork[l][p];
	}
 
      printStructures("reset", locked, speed, &load[0][0], runwork, runlabels);
    }
    
    /* Find completion time of first label */
    delta = MAXDOUBLE;

    for (l=0; l < runlabels; l++)
      for (p=0; p < processors; p++)
	if (runwork[l][p] && load[l][p] > 0)
	  delta = fmin(delta, runwork[l][p] / load[l][p]);

    /* Advance label completion */
    for (l=0; l < runlabels; l++)
      for (p=0; p < processors; p++)
      {
	runwork[l][p] -= delta * load[l][p];
	if (runwork[l][p] < MYEPS)
	  runwork[l][p] = 0;
      }

    /* Satisfy dependencies of completed labels */
    for (l=0; l < labels; l++)
    {
      float sumwork=0;
      float *wp = &WORK(l, 0);

      for (p=0; p < processors; p++, wp++)
	sumwork += *wp;
      
      if (!sumwork)
      {
	for (l2=0; l2 < labels; l2++)
	  DEP(l2, l) = 0;
      }
    }

    printStructures("completed", locked, speed, &load[0][0], runwork, runlabels);

    t += delta;
  }				  
}

static int testAffinity(void)
{
  int affinities[MAX_PROCESSORS], id=-1, i;
  int processors[MAX_PROCESSORS];
  scm_op_t *o;

  for (i=0; i < MAX_PROCESSORS; i++)
    affinities[i] = -1;

  /* All operations with the same affinity should be mapped to the
   * same processor */
  while (tblgetnextelement(&scm_ops, &id, &o))
  {
    if (o->processor && o->affinity >= 0)
    {
      if (affinities[o->affinity] < 0)
        affinities[o->affinity] = o->processor->processor->id;
      else if (affinities[o->affinity] != o->processor->processor->id)
        return 1;
    }
  }
  
  if (settings.hardaffinity)
  {
    /* No two affinities may be mapped to the same processor */
    for (i=0; i < MAX_PROCESSORS; i++)
      processors[i] = -1;
      
    for (i=0; i < MAX_PROCESSORS; i++)
      if (affinities[i] >= 0)
      {
        if (processors[affinities[i]] < 0)
          processors[affinities[i]] = i;
        else
          return 1;
      }
  }

  return 0;
}

double scEvlMappingDeps(double iop)
{
  double result;

  tmr_t timer = tmrInit();
  float t;
  float totalwork[MAX_PROCESSORS];
  double maxtime=0, totaltime=0, totalsqtime=0, totalenergy=0, maxtotaltime=0;
  int i, id=-1;
  scs_target_t *st;
  
  if (map_uses_affinity && testAffinity()) return 0;

  tmrStart(&timer);
  generateGraph(iop);
  orderTopologically();
  findDelays();
  assignLabels();
  convertGraphToMatrix();
  
#ifdef SC_EVL_DEBUG
  printGraph();
#endif

  memset(totalwork, 0, processors*sizeof(float));

  for (i=0; i < vertices; i++)
  {
    vertex_t *v = topord[i];
    totalwork[v->processor] += v->work;
    totalenergy += v->energy;
  }
    
  if (logEdit(scl.crawl, LOG_STREAM_GET_ENABLED))
  {
    lprintf(STREAM(scl.crawl), "Evaluating performance for %d labels on %d processors",
	    labels, processors);

#ifdef SC_EVL_DEBUG
    printf("WORK:\n");
    for (i=0; i < labels; i++)
    {
      int j;
      
      printf("%2d: ", i);

      for (j=0; j < processors; j++)
	printf("%6.2f ", WORK(i, j));

      printf("\n");
    }

    printf("\nDEP:\n");

    for (i=0; i < labels; i++)
    {
      int j;
    
      printf("%2d: ", i);

      for (j=0; j < labels; j++)
	printf("%d ", DEP(i, j));

      printf("\n");
    }
#endif
  }

  t = evaluateMapping();

  free(work);
  free(dep);

  maxtime = t;
  
  /* Add idle power consumption */
  while (tblgetnextelement(&scs_targets, &id, &st))
    if (st->address && st->power > 0)
      totalenergy += st->power * maxtime;
  
  for (i=0; i < processors; i++)
  {
    maxtotaltime = fmax(maxtotaltime, totalwork[i]);
    totaltime += totalwork[i];
    totalsqtime += totalwork[i]*totalwork[i];
  }
  totalsqtime = sqrt(totalsqtime);

  if (scs_mapper.maxtime == 0 && scs_mapper.avgtime == 0 && scs_mapper.avgsqtime == 0 && scs_mapper.avgenergy == 0 && scs_mapper.avgresources == 0)
  {
    /* No dependencies */
    result = maxtotaltime;
    last_makespan_prediction = maxtotaltime;
    last_energy_prediction = totalenergy;
  }
  else
  {
    result = scs_mapper.maxtime*maxtime + scs_mapper.avgtime*totaltime + scs_mapper.avgsqtime*totalsqtime + scs_mapper.avgenergy * totalenergy;
    last_makespan_prediction = maxtime;
    last_energy_prediction = totalenergy;
  }

  tmrStop(&timer);

  lprintf(STREAM(scl.debug), "Mapping evaluated to {%f*%f + %f*%f + %f*%f + %f*%f} = %f in %.0f us",
          scs_mapper.maxtime, maxtime, scs_mapper.avgtime, totaltime,
          scs_mapper.avgsqtime, totalsqtime, scs_mapper.avgenergy, totalenergy,
          result, tmrGetValueInUs(&timer));
          
  return 1.0/result;
}
