/**  
 * Copyright (c) 2010 University of Pennsylvania.
 *     All rights reserved.
 *
 *  Licensed under the Apache License, Version 2.0 (the "License");
 *  you may not use this file except in compliance with the License.
 *  You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 *  Unless required by applicable law or agreed to in writing,
 *  software distributed under the License is distributed on an "AS
 *  IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
 *  express or implied.  See the License for the specific language
 *  governing permissions and limitations under the License.
 *
 */
 
void init_mquerycomp()         // initialize structures
{
  memset(multicast_cache, 0, sizeof(multicast_cache));
}

void sendto_joinnode(int join_index, uint8_t size, uint8_t *buffer)
{
DO_STACKTEST

  init_forward(TOS_NODE_ID, TOS_NODE_ID, queryjointarget_buffer[join_index].id,
    queryjointarget_buffer[join_index].path_nodecount, queryjointarget_buffer[join_index].path_nodes,
    FORWARD_DESTRUCTIVE, size, buffer, 0, NULL, ROUTE_SUCCESS);
}

void send_tuplemsg(TUPLE_MSGHI *msg)
{                              // called from the window join layer, joined tuples are computed and need to go to base
  uint8_t *buffer;
  int size;

DO_STACKTEST

  DBGOUT("Sending %d aggregate tuples to Base", msg->page.tuple_count);
  buffer = (uint8_t *) mmalloc(marshalest_tuplemsghi(msg));
  size = marshal_tuplemsghi(msg, buffer);
  sendto_joinnode(JOININDEX_BASE, size, buffer);
  mfree(buffer);
}

void enqueue_invalidtuple(ADDRESS source_node, uint8_t tuple_len, ATTRIBUTE *tuple)
{
  TUPLE_MSGHI msg;
  uint8_t *buffer;
  int size;

DO_STACKTEST

  DBGOUT("Marking invalid tuple as disabled join and adding to UART");

  msg.cmd = CMDHI_TUPLE;
  msg.cache_multicast = 0;
  page_init(&msg.page);
  TUPLESET_DISABLEJOIN(tuple, 1);
  addtuple_wrap(&msg.page, tuple_len, tuple);

  buffer = (uint8_t *) mmalloc(marshalest_tuplemsghi(&msg));
  size = marshal_tuplemsghi(&msg, buffer);
  enqueue_uartmsg(1, size, buffer, source_node);
  mfree(buffer);
}

void handle_tuplemsghi(ADDRESS source_node, ADDRESS dest_node, uint8_t path_nodecount,
  ADDRESS *path_nodes, uint8_t data_len, uint8_t *data)
{                                 // handler for received tuples at the join node (or at Base)
  int i;
  TUPLE_MSGHI msg_in;
  uint8_t tuple_len;
  ATTRIBUTE tuple[MAX_TUPLELEN];

DO_STACKTEST

  unmarshal_tuplemsghi(&msg_in, data, source_node);
  DBGOUT("Received %d tuples from node %d", msg_in.page.tuple_count, source_node);
  for (i = 0; i < msg_in.page.tuple_count; i++)
  {
    gettuple_wrap(&msg_in.page, i, &tuple_len, tuple);

    DBGOUT("Received tuple from node %d, query %d, seq %d, rel %d", TUPLEGET_NODE(tuple),
      TUPLEGET_QUERY(tuple), TUPLEGET_SEQ(tuple), TUPLEGET_REL(tuple));
    print_tuple(&tuple[TUPLEID_LEN], tuple_len-TUPLEID_LEN, "Tuple");

    if (query_buffer[TUPLEGET_QUERY(tuple)].spec.srcrel_count > 1 && TUPLEGET_REL(tuple) != REL_J &&
      (query_buffer[TUPLEGET_QUERY(tuple)].options & (QUERYOPT_SENDTOROOT | QUERYOPT_PRUNE)) == 0)
    {
      if (place_tuple(tuple_len, tuple))
        enqueue_invalidtuple(source_node, tuple_len, tuple);
    } else if (i == 0 && TOS_NODE_ID == BASESTATION_ADDR)
    {
      DBGOUT("Adding tuples to UART");
      msgsent_extern += msg_in.page.tuple_count;
      enqueue_uartmsg(1, data_len, data, source_node);
    }

#ifdef TOSSIM
    log_queryjoinin(source_node, tuple_len, tuple);
#endif
  }
}

void handlefail_tuplemsghi(ADDRESS source_node, ADDRESS dest_node, uint8_t path_nodecount,
  ADDRESS *path_nodes, uint8_t data_len, uint8_t *data)
{                                        // handler at a source/target or a join node, when a tuple dosnt make it to join/Base
  int i, j, k;
  TUPLE_MSGHI msg_in, msg_out;
  int size;
  uint8_t *buffer;
  uint8_t tuple_len, rel_id;
  ATTRIBUTE tuple[MAX_TUPLELEN];
  QUERY_ENTRY *p;
  uint8_t all_notified = 0;

DO_STACKTEST

  unmarshal_tuplemsghi(&msg_in, data, source_node);
  DBGOUT("Failed to send tuples to node %d", source_node);
  for (j = 0; j < msg_in.page.tuple_count; j++)
  {
    gettuple_wrap(&msg_in.page, j, &tuple_len, tuple);
#ifdef TOSSIM
    print_tuple(tuple, tuple_len, "Failed to send tuple");
#endif

    p = &query_buffer[TUPLEGET_QUERY(tuple)];
    rel_id = TUPLEGET_REL(tuple);
    if (rel_id != REL_J)    // if we are a source/target and not a join point ...
    {
      resetjoin_base(source_node);                      // use Base instead of source_node for join
      optimize_plan();                                  // rebuild multicast tree, since join points have changed
      p->rel[rel_id].multicast.improved = 1;            // tuples will be sent next sampling cycle, "check_hostart'
    } else if (all_notified == 0)
    {
      DBGOUT("Cannot send results of join!");    // we're are join node, and we've lost our path to Base

      all_notified = 1;
      for (i = 0; i < MAX_JOINNODES; i++)               // notify all sources and targets we're joining to send to Base instead
        if (windowjoin_nodes[i].used)
        {
          msg_out.cmd = CMDHI_TUPLE;
          msg_out.cache_multicast = 0;
          page_init(&msg_out.page);

          tuple_len = TUPLEID_LEN;
          TUPLESET(tuple, TOS_NODE_ID, 1, 0, p->query_id, REL_S, 0); // send back S tuple, doesnt matter as long as not J
          addtuple_wrap(&msg_out.page, tuple_len, tuple);
          if (windowjoin_nodes[i].node_id == TOS_NODE_ID)  // if we are one of the sources or targets
          {                                           // disable pairs for which we join at ourselves (we=source and join node)
            for (k = 0; k < QUERYSOURCEBUFFER_SIZE; k++)                                     // or we = target and join node
              if (querysource_buffer[k].used && querysource_buffer[k].jointarget_index >= 0 &&
                queryjointarget_buffer[querysource_buffer[k].jointarget_index].id == TOS_NODE_ID)
              {
                DBGOUT("Disabling join with %d because of failed base station!",
                  querysource_buffer[k].id);
                querysource_buffer[k].jointarget_index = -1;
              }

            optimize_plan();                         // recompute multicast tree
            for (k = 0; k < SRCREL_COUNT; k++)
              p->rel[k].multicast.improved = 1;      // renew multicast trees for all relations
          } else {                                   // otherwise we notify source/target to stop sending to us
            DBGOUT("Notifying source %d to stop sending", windowjoin_nodes[i].node_id);
            
            buffer = (uint8_t *) mmalloc(marshalest_tuplemsghi(&msg_out));
            size = marshal_tuplemsghi(&msg_out, buffer);
            init_forward(TOS_NODE_ID, TOS_NODE_ID, windowjoin_nodes[i].node_id, windowjoin_nodes[i].path_len,
              windowjoin_nodes[i].path, FORWARD_DESTRUCTIVE, size, buffer, 0, NULL, ROUTE_FAIL);
            mfree(buffer);
          }
        }
    }
  }
}

int find_shortcutpos(ADDRESS dest_node, ADDRESS node, ADDRESS neighbor_node, int *join_index,
  int *path_index, int *dest_index)
{
  int i;

DO_STACKTEST

  for (i = 0; i < QUERYJOINTARGETBUFFER_SIZE; i++)
    if (queryjointarget_buffer[i].used && jointarget_anymultiplicity(i) > 0)
    {
      *path_index = find_pathpos(queryjointarget_buffer[i].path_nodecount,
        queryjointarget_buffer[i].path_nodes, node);
      if (*path_index < 0)
        continue;

      *dest_index = find_pathpos(queryjointarget_buffer[i].path_nodecount,
        queryjointarget_buffer[i].path_nodes, dest_node);
      if (*dest_index < 0 || *path_index > *dest_index)
        continue;

      if (find_pathpos(queryjointarget_buffer[i].path_nodecount,
        queryjointarget_buffer[i].path_nodes, neighbor_node) >= 0)
        continue;

      *join_index = i;
      return 0;
    }

  return -1;
}

void handle_pathcollapsemsghi(ADDRESS source_node, ADDRESS dest_node, uint8_t path_nodecount,
  ADDRESS *path_nodes, uint8_t data_len, uint8_t *data)  // handler for path collapse messages
{
  int i, k, l, join_index, join_index1, path_index, path_index1, dest_index, dest_index1;
  PATHCOLLAPSE_MSGHI msg;
  uint8_t path_len;
  ADDRESS path[MAX_PATHLEN], t;
  uint8_t path_changed, only_improvement;
  uint32_t oldcost_sum, newcost_sum;
  struct {
    uint8_t path_len;
    ADDRESS *path;
  } PACKING_MACRO restore_buffer[QUERYJOINTARGETBUFFER_SIZE];
  struct {
    ADDRESS source_node, dest_node1, dest_node2, neighbor_node;
    uint8_t path_len;
    ADDRESS *path;
  } PACKING_MACRO hints[2*MAX_PATHCOLLAPSEHINTS];
  uint8_t hint_count;
  QUERY_ENTRY *p;

DO_STACKTEST

  unmarshal_pathcollapsemsghi(&msg, data, source_node);
  DBGOUT("Path collapse message received from %d, %d hints", source_node, msg.hint_count);

  hint_count = msg.hint_count;
  memset(hints, 0, sizeof(hints));
  for (l = 0; l < hint_count; l++)
  {
    hints[l].source_node = msg.hints[l].source_node;
    hints[l].dest_node1 = msg.hints[l].dest_node1;
    hints[l].dest_node2 = msg.hints[l].dest_node2;
    hints[l].neighbor_node = msg.hints[l].neighbor_node;
    hints[l].path_len = msg.hints[l].path_len;
    hints[l].path = (hints[l].path_len > 0) ? msg.hints[l].path : NULL;
  }

  for (l = 0; l < msg.hint_count; l++)
    if (hints[l].path_len == 0)               // consider multicast hints in both directions
    {
      memcpy(&hints[hint_count], &hints[l], sizeof(hints[l]));

      t = hints[hint_count].source_node;
      hints[hint_count].source_node = hints[hint_count].neighbor_node;
      hints[hint_count].neighbor_node = t;
      t = hints[hint_count].dest_node1;
      hints[hint_count].dest_node1 = hints[hint_count].dest_node2;
      hints[hint_count].dest_node2 = t;

      DBGOUT("Added multicast hint (%d, %d, %d, %d)", hints[hint_count].source_node,
        hints[hint_count].dest_node1, hints[hint_count].neighbor_node, hints[hint_count].dest_node2);
      hint_count++;
    }

  for (l = 0; l < hint_count; l++)
  {
#ifdef TOSSIM
    DBGOUT("Source: %d, Dest: %d, Neighbor: %d", hints[l].source_node, hints[l].dest_node1,
      hints[l].neighbor_node);
    if (hints[l].path_len > 0)
      print_path(hints[l].path, hints[l].path_len, "Path");
    else DBGOUT("Other dest: %d", hints[l].dest_node2);
#endif

    memset(restore_buffer, 0, sizeof(restore_buffer));
    path_changed = 0; only_improvement = 1;
    for (;;)
    {
      if (find_shortcutpos(hints[l].dest_node1, hints[l].source_node, hints[l].neighbor_node,
        &join_index, &path_index, &dest_index) < 0)
      {
        DBGOUT("Cannot apply shortcut further");
        break;
      }

      DBGOUT("Join target: %d, Path index: %d", join_index, path_index);
      if (hints[l].path_len == 0)  // multicast merge
      {
        if (find_shortcutpos(hints[l].dest_node2, hints[l].neighbor_node, hints[l].source_node,
          &join_index1, &path_index1, &dest_index1) < 0)
        {
          DBGOUT("Cannot apply shortcut further");
          break;
        }

        DBGOUT("Join target1: %d, Path index1: %d", join_index1, path_index1);
        path_len = path_index+1+queryjointarget_buffer[join_index1].path_nodecount-path_index1;
        memcpy(path, queryjointarget_buffer[join_index].path_nodes, (path_index+1)*sizeof(ADDRESS));
        memcpy(&path[path_index+1], &queryjointarget_buffer[join_index1].path_nodes[path_index1],
          (queryjointarget_buffer[join_index1].path_nodecount-path_index1)*sizeof(ADDRESS));

        join_index = join_index1;
        only_improvement = 1;   // demand strict improvement in cost
      } else {                 // other source merge
        path_len = path_index+2+hints[l].path_len+
          queryjointarget_buffer[join_index].path_nodecount-1-dest_index;

        memcpy(path, queryjointarget_buffer[join_index].path_nodes, (path_index+1)*sizeof(ADDRESS));
        path[path_index+1] = hints[l].neighbor_node;
        memcpy(&path[path_index+2], hints[l].path, hints[l].path_len*sizeof(ADDRESS));

        if (dest_index < queryjointarget_buffer[join_index].path_nodecount-1)
          memcpy(&path[path_index+2+hints[l].path_len],
            &queryjointarget_buffer[join_index].path_nodes[dest_index+1],
            (queryjointarget_buffer[join_index].path_nodecount-dest_index-1)*sizeof(ADDRESS));

        only_improvement = 0;  // cost could be the same
      }

      path_changed = 1;
      restore_buffer[join_index].path_len = queryjointarget_buffer[join_index].path_nodecount;
      restore_buffer[join_index].path = queryjointarget_buffer[join_index].path_nodes;

      queryjointarget_buffer[join_index].path_nodecount = path_len;
      queryjointarget_buffer[join_index].path_nodes = (ADDRESS *) mcalloc(path_len, sizeof(ADDRESS));
      memcpy(queryjointarget_buffer[join_index].path_nodes, path, path_len*sizeof(ADDRESS));

#ifdef TOSSIM
      print_path(queryjointarget_buffer[join_index].path_nodes,
        queryjointarget_buffer[join_index].path_nodecount, "Updated path");
#endif
    }

    if (path_changed)
    {
      oldcost_sum = 0; 
      for (k = 0; k < QUERYBUFFER_SIZE; k++)
        if (query_buffer[k].used)
          for (i = 0; i < SRCREL_COUNT; i++)
            oldcost_sum += query_buffer[k].rel[i].multicast.innet_cost*query_percentremaining(k)/100;

      optimize_plan();

      newcost_sum = 0;
      for (k = 0; k < QUERYBUFFER_SIZE; k++)
        if (query_buffer[k].used)
          for (i = 0; i < SRCREL_COUNT; i++)
            newcost_sum += query_buffer[k].rel[i].multicast.innet_cost*query_percentremaining(k)/100;

      DBGOUT("Old remaining cost: %d, New remaining cost: %d", oldcost_sum, newcost_sum);
      if (newcost_sum > oldcost_sum-only_improvement)
      {
        DBGOUT("Not an improvement. Reverting...");
        for (i = 0; i < QUERYJOINTARGETBUFFER_SIZE; i++)
          if (restore_buffer[i].path && queryjointarget_buffer[i].used)
          {
            mfree(queryjointarget_buffer[i].path_nodes);
            queryjointarget_buffer[i].path_nodecount = restore_buffer[i].path_len;
            queryjointarget_buffer[i].path_nodes = restore_buffer[i].path;
          }

        optimize_plan();
      } else {
        for (i = 0; i < QUERYJOINTARGETBUFFER_SIZE; i++)
          if (restore_buffer[i].path)
            mfree(restore_buffer[i].path);

        for (k = 0; k < QUERYBUFFER_SIZE; k++)
          if (query_buffer[k].used)
          {
            p = &query_buffer[k];
            for (i = 0; i < SRCREL_COUNT; i++)
            {
              p->rel[i].multicast.improved |= ((int) p->rel[i].multicast.lastsent_cost/20 <
                ((int) p->rel[i].multicast.lastsent_cost-p->rel[i].multicast.cost)*
                query_percentremaining(k)/100);
              DBGOUT("Query %d: Rel: %d, Improved: %d, Last cost: %d, Current cost: %d, remaining: %d%%",
                k, i, p->rel[i].multicast.improved, p->rel[i].multicast.lastsent_cost,
                p->rel[i].multicast.cost, query_percentremaining(k));
            }
          }
      }
    }
  }
}

void handle_statsmsghi(ADDRESS source_node, ADDRESS dest_node, uint8_t path_nodecount,
  ADDRESS *path_nodes, uint8_t data_len, uint8_t *data)
{
  STATS_MSGHI msg_in;
#ifdef USE_MEMFILES  
  FILE *f;
  char s[256];
  uint8_t *compr_buffer;
  int16_t compr_len;
#endif

DO_STACKTEST
  
  DBGOUT("Stats message received from %d", source_node);
  unmarshal_statsmsghi(&msg_in, data);
  
  if (msg_in.type != STATTYPE_RESPONSE && !is_dest(msg_in.dest_node))
  {
    DBGOUT("Request addressed to %d. Skipping.", msg_in.dest_node);
    return;
  }
  
  switch (msg_in.type) 
  {
    case STATTYPE_RESET:
       DBGOUT("Requesting reset...");
       reset_requested = RESET_CODE;
       break;
     
    case STATTYPE_RESPONSE:
      if (TOS_NODE_ID == BASESTATION_ADDR)
      {
        DBGOUT("Sending stats to UART");
        enqueue_uartmsg(1, data_len, data, source_node);
      }
      break;

    default:
#ifdef USE_MEMFILES    
      if (filebuffer_alloc > 0)   // already generating stats
      {
        DBGOUT("Already generating stats.\n");
        break;
      }
  
      init_filebuffer(MAX_STATLOGFILESIZE);  // maximum size of log file

      snprintf(s, sizeof(s), "stats%d-%d.txt", msg_in.type, TOS_NODE_ID);
      f = file_open(s, "a");
      
      switch (msg_in.type) {      
        case STATTYPE_REQUESTROUTESTATS:
          dolog_routestats(f);
          break;
        case STATTYPE_REQUESTLINKSTATE:   
          dostore_link(f);    
          break;      
        case STATTYPE_REQUESTTREESTATE:   
          dostore_trees(f);
          break;    
        case STATTYPE_REQUESTJOINSTATE:   
          dostore_joinstate(f);            
          break;
      }
      fclose(f);
      
      if (filebuffer_size > 0)
      {
        compr_buffer = (uint8_t *) malloc(filebuffer_size);
        compressASCII(file_buffer, filebuffer_size, compr_buffer, &compr_len);    
        reset_filebuffer();
        filebuffer_pos = 0;
        filebuffer_size = compr_len;
        file_buffer = compr_buffer;      
        DBGOUT("Sending stats to Base station in %d fragments", DIV_CEIL(filebuffer_size, MAX_STATSIZE));    // timer_querycomp will call send_statsfragment below
      } else reset_filebuffer();             
#else
      DBGOUT("Not compiled with MemFile support.");
#endif      
      break;            
  }
}

#ifdef USE_MEMFILES  
void send_statsfragment()
{
  STATS_MSGHI msg_out;
  uint8_t buffer[sizeof(msg_out)];
  int size;
  
DO_STACKTEST

  if (filebuffer_size == 0)
    return;

  msg_out.cmd = CMDHI_STATS;      
  msg_out.type = STATTYPE_RESPONSE;
  msg_out.fragment_count = DIV_CEIL(filebuffer_size, MAX_STATSIZE);                
  msg_out.fragment_index = DIV_CEIL(filebuffer_pos, MAX_STATSIZE);
  DBGOUT("Sending stats fragment %d/%d", msg_out.fragment_index+1, msg_out.fragment_count);         
    
  msg_out.stat_size = MIN(MAX_STATSIZE, filebuffer_size-filebuffer_pos);
  memcpy(msg_out.stats, &file_buffer[filebuffer_pos], msg_out.stat_size);
  size = marshal_statsmsghi(&msg_out, buffer);
  init_forward(TOS_NODE_ID, TOS_NODE_ID, BASESTATION_ADDR, 0, NULL, FORWARD_DESTRUCTIVE, size, buffer, 0, NULL, ROUTE_SUCCESS);
  filebuffer_pos += msg_out.stat_size;    

  if (msg_out.fragment_index+1 == msg_out.fragment_count)
    reset_filebuffer();     
}
#endif
  
int find_multicastcacheentry(uint8_t query_id, uint8_t rel_id, ADDRESS node)
{                                                              // search multicast cache by source node, query and relation
  int i;

  for (i = 0; i < MULTICASTCACHE_SIZE; i++)
    if (multicast_cache[i].used && multicast_cache[i].source_node == node &&
      multicast_cache[i].query_id == query_id && multicast_cache[i].rel_id == rel_id)
      return i;

  return -1;
}

int find_freemulticastcacheentry()             // find free slot in the multicast cache structure
{
  int i;

  for (i = 0; i < MULTICASTCACHE_SIZE; i++)
    if (multicast_cache[i].used == 0)
      return i;

  return -1;
}

void handleforwardinter_tuple(FORWARD_MSG *msg)
{                                     // perform intermediate operation during forwarding when carrying tuple payload
  int cache_index;                   // handle caching of multicast
  TUPLE_MSGHI msghi;
  uint8_t query_id, rel_id;
  uint8_t tuple_len;
  ATTRIBUTE tuple[MAX_TUPLELEN];
  MULTICASTCACHE_ENTRY *p;

DO_STACKTEST

  unmarshal_tuplemsghi(&msghi, msg->data, msg->source_node);
  if (msg->dest_node == TOS_NODE_ID && msghi.cache_multicast != 0)
  {
    gettuple_wrap(&msghi.page, 0, &tuple_len, tuple);
    query_id = TUPLEGET_QUERY(tuple);
    rel_id = TUPLEGET_REL(tuple);

    if (msghi.cache_multicast >> 1)
    {
      if (msg->multicastbuffer_len == 0)
      {
        DBGERR("Cannot cache empty multicast!");
        pause_sim();
        return;
      }

      cache_index = find_multicastcacheentry(query_id, rel_id, msg->source_node);
      if (cache_index >= 0)                       // check if we are updating a previously cached multicast
      {
        p = &multicast_cache[cache_index];
        if (msg->multicastbuffer_len == p->buffer_len &&
          memcmp(msg->multicast_buffer, p->buffer, msg->multicastbuffer_len) == 0)
        {
          DBGERR("Ignored redundant update to multicast");
          msghi.cache_multicast = 1;
          marshal_tuplemsghi(&msghi, msg->data);
          return;
        }

        p->buffer_len = 0;
        mfree(p->buffer);    // reset entry
      } else {
        cache_index = find_freemulticastcacheentry();          // we're caching a new multicast
        if (cache_index < 0)
        {
          DBGERR("Cannot cache multicast!");
          pause_sim();
          return;
        }
        p = &multicast_cache[cache_index];
      }

      DBGOUT("Multicast cached at position %d", cache_index);
      p->used = 1;
      p->source_node = msg->source_node;
      p->query_id = query_id;
      p->rel_id = rel_id;
      p->buffer_len = msg->multicastbuffer_len;
      p->buffer = (uint8_t *) mmalloc(msg->multicastbuffer_len);
      memcpy(p->buffer, msg->multicast_buffer, msg->multicastbuffer_len);
    } else {
      if (msg->multicastbuffer_len > 0)
      {
        DBGERR("Message already has multicast!");
        pause_sim();
        return;
      }

      cache_index = find_multicastcacheentry(query_id, rel_id, msg->source_node);  // ignore endpoints of tree
      if (cache_index < 0)
      {
        DBGERR("Cannot find cached multicast!");
        pause_sim();
        return;
      }
      p = &multicast_cache[cache_index];

      DBGOUT("Multicast found in cache at position %d", cache_index);
      msg->multicastbuffer_len = p->buffer_len;
      memcpy(msg->multicast_buffer, p->buffer, msg->multicastbuffer_len);
    }
  }
}

uint8_t handlesendmulticast_tuple(FORWARD_MSG *msg)    // handler to determine if multicast buffer should be sent
{
  TUPLE_MSGHI msghi;

DO_STACKTEST

  unmarshal_tuplemsghi(&msghi, msg->data, msg->source_node);
  return (msghi.cache_multicast == 0) || (msghi.cache_multicast >> 1);
}

void clearmulticastpayload_tuple(uint8_t *data_len, uint8_t *data, ADDRESS source_node)
{
  TUPLE_MSGHI msghi;

DO_STACKTEST

  DBGOUT("Clearing multicast payload");
  unmarshal_tuplemsghi(&msghi, data, source_node);
  msghi.cache_multicast = 0;
  *data_len = marshal_tuplemsghi(&msghi, data);
}

uint8_t check_canaggr(uint8_t tuple_width1, ATTRIBUTE *tuple1, uint8_t tuple_width2, ATTRIBUTE *tuple2,
  uint8_t *result_width, ATTRIBUTE *result)
{
  int i;
  uint8_t query_id, rel_id, nonaggr_count;
  ATTRIBUTE aggr1[MAX_TUPLELEN], aggr2[MAX_TUPLELEN];
  QUERY_ENTRY *p;
  RELSPEC_ENTRY *pr, *pj;

DO_STACKTEST

  query_id = TUPLEGET_QUERY(tuple1);
  rel_id = TUPLEGET_REL(tuple1);
  if (query_id != TUPLEGET_QUERY(tuple2) || rel_id != TUPLEGET_REL(tuple2))
    return 0;                // must be same query and same relation

  p = &query_buffer[query_id];
  pr = &p->spec.rel[REL_DYNAMIC][rel_id];        // evaluations at source or join nodes,
  pj = &p->spec.rel[REL_STATIC][REL_J];          // evaluations at base

  if (pj->eval_aggrcount == 0 || (p->spec.srcrel_count > 1 && rel_id != REL_J))
    return 0;              // aggr. fields not present or trying to aggregate before join node

  if ((TUPLEGET_AGGR(tuple1) == 0 && tuple_width1 != p->tuple_len[rel_id]) ||
    (TUPLEGET_AGGR(tuple1) && tuple_width1 != p->aggrtuple_len[rel_id]) ||
    (TUPLEGET_AGGR(tuple2) == 0 && tuple_width2 != p->tuple_len[rel_id]) ||
    (TUPLEGET_AGGR(tuple2) && tuple_width2 != p->aggrtuple_len[rel_id]))
  {
    DBGERR("Invalid tuple sizes (%d, %d, %d, %d, %d, %d)", tuple_width1, tuple_width2,
      p->tuple_len[rel_id], p->aggrtuple_len[rel_id], query_id, rel_id);
    pause_sim();
    return 0;
  }

  for (i = 0; i < pr->eval_count; i++)
    if (pr->eval[i].is_aggr == 0 && tuple1[TUPLEID_LEN+i] != tuple2[TUPLEID_LEN+i])
      return 0;                                              // compare non-exclusively aggregation subresults

  nonaggr_count = pj->eval_count-pj->eval_aggrcount;
                    
  if (TUPLEGET_AGGR(tuple1))                                    // if tuples aleady aggr., copy aggr. fields to result
    memcpy(aggr1, &tuple1[p->tuple_len[rel_id]], pj->eval_aggrcount*sizeof(ATTRIBUTE));
  else for (i = 0; i < pj->eval_aggrcount; i++)                 // generate aggregation fields for tuple1
    aggr1[i] = eval_stack(&pj->eval[i+nonaggr_count].eval,
      (rel_id == REL_S) ? &tuple1[TUPLEID_LEN] : NULL,
      (rel_id != REL_S) ? &tuple1[TUPLEID_LEN] : NULL, 0);

  if (TUPLEGET_AGGR(tuple2))
    memcpy(aggr2, &tuple2[p->tuple_len[rel_id]], pj->eval_aggrcount*sizeof(ATTRIBUTE));
  else for (i = 0; i < pj->eval_aggrcount; i++)
    aggr2[i] = eval_stack(&pj->eval[i+nonaggr_count].eval,
      (rel_id == REL_S) ? &tuple2[TUPLEID_LEN] : NULL,
      (rel_id != REL_S) ? &tuple2[TUPLEID_LEN] : NULL, 0);

  *result_width = p->aggrtuple_len[rel_id];       // perform actual aggregation
  memcpy(result, tuple1, p->tuple_len[rel_id]*sizeof(ATTRIBUTE));
  TUPLESET_AGGR(result, 1);

  for (i = 0; i < pj->eval_aggrcount; i++)
    switch (pj->eval[i+nonaggr_count].eval.stack[pj->eval[i+nonaggr_count].eval.stack_len-1].value)
    {
      case OP_AGGRCOUNT:
      case OP_AGGRSUM:   result[p->tuple_len[rel_id]+i] = aggr1[i]+aggr2[i]; break;
      case OP_AGGRMIN:   result[p->tuple_len[rel_id]+i] = MIN(aggr1[i], aggr2[i]); break;
      case OP_AGGRMAX:   result[p->tuple_len[rel_id]+i] = MAX(aggr1[i], aggr2[i]); break;
      default:
        DBGERR("Invalid operator: %d", pj->eval[i+nonaggr_count].eval.stack[pj->eval[i+nonaggr_count].eval.stack_len-1].value);
        pause_sim();
        return 0;
    }

  if ((TUPLEGET_AGGR(tuple1) == 0 &&
    memcmp(aggr1, &result[p->tuple_len[rel_id]], pj->eval_aggrcount*sizeof(ATTRIBUTE)) == 0) ||
    (TUPLEGET_AGGR(tuple2) == 0 &&
    memcmp(aggr2, &result[p->tuple_len[rel_id]], pj->eval_aggrcount*sizeof(ATTRIBUTE)) == 0))
  {
    *result_width = p->tuple_len[rel_id];  // check for equality of result to either tuple, if so remove aggr. fields
    TUPLESET_AGGR(result, 0);
  }

  return 1;
}

uint8_t merge_tuple(FORWARD_MSG *msg1, FORWARD_MSG *msg2)   // merging function for tuple payloads
{                                                           // if returns 1, then merge occurred, otherwise merge not possible
  TUPLE_MSGHI msghi1, msghi2;
  int i, j;
  uint8_t tuple_width1, tuple_width2, result_width;
  ATTRIBUTE tuple1[MAX_TUPLELEN], tuple2[MAX_TUPLELEN], result[MAX_TUPLELEN];
  PAGE p;
  uint8_t aggregated, p2aggr[MAX_TUPLES];   // remember which tuple of page 2 was aggregated

DO_STACKTEST

  unmarshal_tuplemsghi(&msghi1, msg1->data, msg1->source_node);
  unmarshal_tuplemsghi(&msghi2, msg2->data, msg2->source_node);
  if (msghi1.cache_multicast != 0 || msghi2.cache_multicast != 0)
    return 0;

  page_init(&p);
  memset(p2aggr, 0, sizeof(p2aggr));
  for (i = 0; i < msghi1.page.tuple_count; i++)
  {
    aggregated = 0;
    gettuple_wrap(&msghi1.page, i, &tuple_width1, tuple1);
    for (j = 0; j < msghi2.page.tuple_count; j++)
    {
      gettuple_wrap(&msghi2.page, j, &tuple_width2, tuple2);
      if (p2aggr[j] == 0 && check_canaggr(tuple_width1, tuple1, tuple_width2, tuple2, &result_width, result))
      {
#ifdef TOSSIM
        DBGOUT("Aggregated tuple %d page 1 with tuple %d page 2", i, j);
        print_tuple(tuple1, tuple_width1, "Tuple 1");
        print_tuple(tuple2, tuple_width2, "Tuple 2");
        print_tuple(result, result_width, " Result");
#endif
        aggregated = 1;
        if (addtuple_wrap(&p, result_width, result))
        {
          DBGOUT("Page too large");
          return 0;
        }

        p2aggr[j] = 1;
        break;
      }
    }

    if (aggregated == 0 && addtuple_wrap(&p, tuple_width1, tuple1))
    {
      DBGOUT("Page too large");
      return 0;
    }
  }

  for (i = 0; i < msghi2.page.tuple_count; i++)
    if (p2aggr[i] == 0)
    {
      gettuple_wrap(&msghi2.page, i, &tuple_width2, tuple2);
      if (addtuple_wrap(&p, tuple_width2, tuple2))
      {
        DBGOUT("Page too large");
        return 0;
      }
    }

  memcpy(&msghi1.page, &p, sizeof(PAGE));
  msg1->data_len = marshal_tuplemsghi(&msghi1, msg1->data);
  return 1;
}

uint8_t merge_pathcollapse(FORWARD_MSG *msg1, FORWARD_MSG *msg2)  // merging function for path collapsing payloads
{
  PATHCOLLAPSE_MSGHI msghi1, msghi2;

DO_STACKTEST

  unmarshal_pathcollapsemsghi(&msghi1, msg1->data, msg1->source_node);
  unmarshal_pathcollapsemsghi(&msghi2, msg2->data, msg2->source_node);

  if (msghi1.hint_count + msghi2.hint_count > MAX_PATHCOLLAPSEHINTS)
    return 0;

  memcpy(&msghi1.hints[msghi1.hint_count], msghi2.hints, sizeof(msghi2.hints[0])*msghi2.hint_count);
  msghi1.hint_count += msghi2.hint_count;
  msg1->data_len = marshal_pathcollapsemsghi(&msghi1, msg1->data);

  return 1;
}

void sendhistory_joinnode(QUERY_ENTRY *q, int join_index, uint8_t rel_id)
{
  int i, size;
  uint8_t *buffer;
  TUPLE_MSGHI msg;
  uint8_t tuple_count, tuple_len;
  ATTRIBUTE tuple[MAX_TUPLELEN];

DO_STACKTEST

  if (q->rel[rel_id].tuples_sent > queryjointarget_buffer[join_index].tuples_sent[rel_id]+1)
  {
    msg.cmd = CMDHI_TUPLE;
    msg.cache_multicast = 0;
    page_init(&msg.page);

    tuple_count = MIN(q->spec.params[PARAM_WINDOWSIZE],
      q->rel[rel_id].tuples_sent-queryjointarget_buffer[join_index].tuples_sent[rel_id]);
    if (tuple_count > 0)
    {
      tuple_len = q->tuple_len[rel_id];
      for (i = 0; i < tuple_count; i++)
      {
        memcpy(tuple, &q->rel[rel_id].history_buffer[i*tuple_len], tuple_len*sizeof(ATTRIBUTE));
        TUPLESET_DISABLEJOIN(tuple, 1);
        addtuple_wrap(&msg.page, tuple_len, tuple);
      }

      DBGOUT("Sending history of %d to join node %d for query %d", msg.page.tuple_count,
        queryjointarget_buffer[join_index].id, q->query_id);
      buffer = (uint8_t *) mmalloc(marshalest_tuplemsghi(&msg));
      size = marshal_tuplemsghi(&msg, buffer);
      sendto_joinnode(join_index, size, buffer);
      mfree(buffer);
    }
  }

  queryjointarget_buffer[join_index].tuples_sent[rel_id] = q->rel[rel_id].tuples_sent;
}

void check_hotstart(QUERY_ENTRY *q, uint8_t rel_id)
{
  int i;

DO_STACKTEST

  if (q->options & QUERYOPT_SENDTOROOT)
    sendhistory_joinnode(q, JOININDEX_BASE, rel_id);
  else for (i = 0; i < QUERYJOINTARGETBUFFER_SIZE; i++)
    if (queryjointarget_buffer[i].used && jointarget_multiplicity(q, rel_id, i) > 0)
    {
      if (q->options & QUERYOPT_PRUNE || q->groupopt.decision == DECISION_BASE)
      {
        sendhistory_joinnode(q, JOININDEX_BASE, rel_id);
        break;
      } else
        sendhistory_joinnode(q, i, rel_id);
    }    
}

void sendvalue_standard(TUPLE_MSGHI *msg, QUERY_ENTRY *q, uint8_t rel_id)
{
  int i, size;
  uint8_t *buffer;

DO_STACKTEST

  msg->cache_multicast = 0;
  buffer = (uint8_t *) mmalloc(marshalest_tuplemsghi(msg));
  size = marshal_tuplemsghi(msg, buffer);
  if ((q->options & QUERYOPT_SENDTOROOT) == 1 || q->spec.srcrel_count == 1)
  {
    DBGOUT("Sending to Base for query %d (naive)", q->query_id);
    sendto_joinnode(JOININDEX_BASE, size, buffer);
  } else for (i = 0; i < QUERYJOINTARGETBUFFER_SIZE; i++)
    if (queryjointarget_buffer[i].used && jointarget_multiplicity(q, rel_id, i) > 0)
    {
      if (q->options & QUERYOPT_PRUNE || q->groupopt.decision == DECISION_BASE)
      {                                  // send to base if we have at least one join node
        DBGOUT("Sending to Base for query %d (base)", q->query_id);
        sendto_joinnode(JOININDEX_BASE, size, buffer);
        break;
      } else {
        DBGOUT("Sending to join node %d for query %d", queryjointarget_buffer[i].id, q->query_id);
        sendto_joinnode(i, size, buffer);
      }
    }  

  mfree(buffer);
}

void sendvalue_multicast(TUPLE_MSGHI *msg, QUERY_ENTRY *q, uint8_t rel_id)
{
  int size;
  uint8_t *buffer;
  MULTICAST_ENTRY *p;

DO_STACKTEST

  p = &q->rel[rel_id].multicast;
  msg->cache_multicast = (q->options & QUERYOPT_CACHEMULTICAST) ? (p->improved ? 3 : 1) : 0;
  if (p->improved)
  {
    DBGOUT("Multicast has been improved");
    p->improved = 0;
    p->lastsent_cost = p->cost;

    p->prev_node = p->node;
    if (p->prev_path)
    {
      mfree(p->prev_path);
      p->prev_path = NULL;
    }

    p->prev_pathlen = p->path_len;
    if (p->prev_pathlen > 0)
    {
      p->prev_path = (ADDRESS *) mcalloc(p->path_len, sizeof(ADDRESS));
      memcpy(p->prev_path, p->path, p->path_len*sizeof(ADDRESS));
    }
  }

  DBGOUT("Sending %scached multicast message to join nodes for query %d",
    (msg->cache_multicast == 1) ? "" : "un", q->query_id);
  buffer = (uint8_t *) mmalloc(marshalest_tuplemsghi(msg));
  size = marshal_tuplemsghi(msg, buffer);
  init_forward(TOS_NODE_ID, TOS_NODE_ID, p->prev_node, p->prev_pathlen, p->prev_path,
    FORWARD_DESTRUCTIVE, size, buffer, (msg->cache_multicast != 1) ? p->buffer_len : 0,
    (msg->cache_multicast != 1) ? p->buffer : NULL, ROUTE_SUCCESS);
  mfree(buffer);
}

uint8_t query_percentremaining(uint8_t query_id)
{
  uint16_t t_start, t_end;

  t_start = query_buffer[query_id].spec.params[PARAM_BEGIN];
  t_end = query_buffer[query_id].spec.params[PARAM_END];
  if (query_buffer[query_id].used == 0 || timer_cycles >= t_end)
    return 0;
  if (timer_cycles < t_start)
    return 100;

  return 100*(t_end-timer_cycles)/(t_end-t_start);
}

uint8_t check_queriesrunning()   // returns 1 if at least one query is still executing
{
  int i;

  for (i = 0; i < QUERYBUFFER_SIZE; i++)
    if (query_buffer[i].used && timer_cycles-5 < query_buffer[i].spec.params[PARAM_END])
      return 1;

  return 0;
}

void update_localselest(QUERY_ENTRY *q, uint8_t rel_id, uint8_t have_tuple)
{
  int i;
  uint8_t r, sel_est;
  uint32_t t, t1;
  uint8_t jp_updated;

  if (q->rel[rel_id].est_tuplecount < MAX_ESTTUPLECOUNT)
    q->rel[rel_id].est_tuplecount += have_tuple;

  t1 = q->rel[rel_id].nextsampling_cycle - q->rel[rel_id].est_starttime;
  t = t1/q->spec.params[PARAM_SAMPLEINTERVAL];
  if (t > 0 && (t % EST_WINDOW) == 0)
  {
    sel_est = (q->rel[rel_id].est_tuplecount == 0) ? MAX_SEL :
      MAX(MIN_SEL, MIN(MAX_SEL, DIV_ROUND(t1, q->spec.params[PARAM_SAMPLEINTERVAL]*q->rel[rel_id].est_tuplecount)));

    DBGOUT("Local selectivity for relation %d updated to %d", rel_id, sel_est);
    r = (sel_est > (EST_SENS(rel_id)+1)*q->sel_est[rel_id]/EST_SENS(rel_id) ||
      sel_est < (EST_SENS(rel_id)-1)*q->sel_est[rel_id]/EST_SENS(rel_id));
    q->sel_est[rel_id] = sel_est;
    if (r && rel_id == REL_T)
    {
      DBGOUT("Significant update");
      jp_updated = 0;
      for (i = 0; i < QUERYSOURCEBUFFER_SIZE; i++)
        if (querysource_buffer[i].used && querysource_buffer[i].query_id == q->query_id && querysource_buffer[i].rel_id == REL_T)
          jp_updated |= doupdate_joinnode(q, i);

      if (jp_updated)
        group_opt(q->query_id, -1);
    }

    if ((t % EST_RENORMALIZE) == 0)
    {
      q->rel[rel_id].est_starttime = (timer_cycles+q->rel[rel_id].est_starttime)/2;
      q->rel[rel_id].est_tuplecount /= 2;
      DBGOUT("Renormalized local tuple count to %d, start time to %d", q->rel[rel_id].est_tuplecount,
        q->rel[rel_id].est_starttime);
    }    
  }
}

void schedule_nextsamplingcycle(QUERY_ENTRY *q, uint8_t rel_id, uint8_t have_tuple, uint8_t do_est)
{
  set_globalstatus(have_tuple ? GS_TUPLESENT : GS_IDLE);
  
  if (do_est && (q->options & QUERYOPT_ESTSEL))
    update_localselest(q, rel_id, have_tuple);

  if (attrqueue_size == 0 || !attrqueue_check(q->spec.token_list.attr_used))
    q->rel[rel_id].nextsampling_cycle += q->spec.params[PARAM_SAMPLEINTERVAL];  
}

void timer_querycomp()           // called periodically from the OS
{                                    // generate a sample and send it to all join nodes for all queries
  int i, j, k;
  TUPLE_MSGHI msg;
  uint8_t tuple_len, result;
  ATTRIBUTE tuple[MAX_TUPLELEN];
  QUERY_ENTRY *p;
  uint8_t tuple_sent;

DO_STACKTEST

#ifdef USE_MEMFILES
  if (filebuffer_size >= 0 && node_idle())
    send_statsfragment();
#endif   

  tuple_sent = 0;
  for (k = 0; k < QUERYBUFFER_SIZE; k++)
    if (query_buffer[k].used)
    {
      p = &query_buffer[k];

#ifdef TOSSIM
      if (config.start_route)                    // if in simulation, ignore query start times
#else
      if (timer_cycles < p->spec.params[PARAM_BEGIN] && timer_cycles + QUERYINIT_FACTOR*trees[0].deepest_hops >= p->spec.params[PARAM_BEGIN])
#endif                                           // give the query some time to initialize before begin sampling
        if (p->initiated == 0)
          query_sourceinitiate(k);

      if (timer_cycles % 15 == 0)
      {
        groupopt_coordcost(k);
        groupopt_coorddecision(k);
      }
      
#ifdef TOSSIM
      if (config.start_route && config.start_join == 0)
        continue;
#endif

      if (timer_cycles >= p->spec.params[PARAM_END])
      {        
        stop_query(p);
        continue;
      }

      update_windowjoin(p);

      if (p->spec.srcrel_count > 1 && (p->options & QUERYOPT_SENDTOROOT) == 0 && source_count(k) == 0)
        continue;  // for two relations, not sending to root, check static join predicate, equivalent to having join pairs

      for (i = 0; i < SRCREL_COUNT; i++)
        if (p->spec.srcrel_count > 1 || p->spec.rel[REL_DYNAMIC][i].eval_count > 0)
        {                    // only consider non-empty relations
          if (timer_cycles < p->rel[i].nextsampling_cycle)
            continue;

          set_globalstatus(GS_TUPLEEVAL);

          DBGOUT("Query %d, rel %d, static", k, i);
          result = eval_stack(&p->spec.rel[REL_STATIC][i].pred, NULL, NULL, 0);
          if (eval_info.need_sampling)
            continue;
          if (result == 0)
          {
            schedule_nextsamplingcycle(p, i, 0, 0);  // do not estimate local sel. if we fail static predicate
            continue;
          }

          DBGOUT("Query %d, rel %d, dynamic", k, i);
          result = eval_stack(&p->spec.rel[REL_DYNAMIC][i].pred, NULL, NULL, p->spec.params[PARAM_SAMPLEINTERVAL]/2);
          if (eval_info.need_sampling)
            continue;
          if (result == 0)
          {
            schedule_nextsamplingcycle(p, i, 0, 1);
            continue;
          }

          msg.cmd = CMDHI_TUPLE;
          msg.cache_multicast = 0;
          page_init(&msg.page);
          TUPLESET(tuple, TOS_NODE_ID, 0, p->rel[i].tuples_sent & 0x7f, p->query_id, i, 0);

          tuple_len = p->tuple_len[i];
          for (j = TUPLEID_LEN; j < tuple_len; j++)
          {
            tuple[j] = eval_stack(&p->spec.rel[REL_DYNAMIC][i].eval[j-TUPLEID_LEN].eval, NULL, NULL,
              p->spec.params[PARAM_SAMPLEINTERVAL]/2);
            if (eval_info.need_sampling)
              break;
          }
          if (eval_info.need_sampling)
            continue;

          schedule_nextsamplingcycle(p, i, 1, 1);

          addtuple_wrap(&msg.page, tuple_len, tuple);
          j = p->rel[i].tuples_sent % p->spec.params[PARAM_WINDOWSIZE];
          memcpy(&p->rel[i].history_buffer[j*tuple_len], tuple, tuple_len*sizeof(ATTRIBUTE));

          if (config.enable_send)
          {
            tuple_sent = 1;
#ifdef TOSSIM
            print_tuple(&tuple[TUPLEID_LEN], tuple_len-TUPLEID_LEN, "Sending tuple");
#endif

            check_hotstart(p, i);
            if (p->spec.srcrel_count > 1 && p->groupopt.decision == DECISION_INNET &&
              (p->options & (QUERYOPT_SENDTOROOT | QUERYOPT_PRUNE)) == 0 &&
              p->rel[i].multicast.buffer_len > 0 && (p->options & QUERYOPT_MULTICAST) != 0)
              sendvalue_multicast(&msg, p, i);
            else sendvalue_standard(&msg, p, i);
          } else DBGOUT("Sending is disabled for this node");

          p->rel[i].tuples_sent++;
        }       
    }
    
  if (tuple_sent && attrqueue_size > 0)
    attrqueue_pop();
}

uint8_t addtuple_wrap(PAGE *p, uint8_t tuple_len, ATTRIBUTE *tuple)
{
  int r, i, id_index = query_buffer[TUPLEGET_QUERY(tuple)].spec.rel[REL_DYNAMIC][TUPLEGET_REL(tuple)].id_index;

  if (id_index < 0)
    return page_addtuple(p, tuple_len, tuple);

  for (i = TUPLEID_LEN+id_index; i < tuple_len-1; i++)
    tuple[i] = tuple[i+1];
  DBGOUT("Removed ID attribute from tuple");

  r = page_addtuple(p, tuple_len-1, tuple);

  for (i = tuple_len-1; i > TUPLEID_LEN+id_index; i--)     // make sure we dont change the tuple
    tuple[i] = tuple[i-1];
  tuple[TUPLEID_LEN+id_index] = TUPLEGET_NODE(tuple);

  return r;
}

uint8_t gettuple_wrap(PAGE *p, uint8_t tuple_index, uint8_t *tuple_len, ATTRIBUTE *tuple)
{
  int i, id_index;
  uint8_t r = page_gettuple(p, tuple_index, tuple_len, tuple);

  if (r)
    return r;

  id_index = query_buffer[TUPLEGET_QUERY(tuple)].spec.rel[REL_DYNAMIC][TUPLEGET_REL(tuple)].id_index;
  if (id_index >= 0)
  {
    for (i = *tuple_len; i > TUPLEID_LEN+id_index; i--)
      tuple[i] = tuple[i-1];
    (*tuple_len)++;
    tuple[TUPLEID_LEN+id_index] = TUPLEGET_NODE(tuple);
    DBGOUT("Restored ID attribute to tuple");
  }

  return 0;
}


