/**  
 * Copyright (c) 2010 University of Pennsylvania.
 *     All rights reserved.
 *
 *  Licensed under the Apache License, Version 2.0 (the "License");
 *  you may not use this file except in compliance with the License.
 *  You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 *  Unless required by applicable law or agreed to in writing,
 *  software distributed under the License is distributed on an "AS
 *  IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
 *  express or implied.  See the License for the specific language
 *  governing permissions and limitations under the License.
 *
 */
 
void init_mforward()    // initialize structures
{
  memset(forward_buffer, 0, sizeof(forward_buffer));
  memset(pathcollapse_buffer, 0, sizeof(pathcollapse_buffer));
  memset(merge_buffer, 0, sizeof(merge_buffer));
  mergebuffer_size = 0;
  memset(subscribe_buffer, 0, sizeof(subscribe_buffer));
  memset(path_cache, 0, sizeof(path_cache));
  memset(repair_path, 0, sizeof(repair_path));
  memset(repair_queue, 0, sizeof(repair_queue));
}

int find_repairpathindex(ADDRESS neighbor_node, ADDRESS dest_node)
{                   // search repair path buffer by neighbor and destination node
  int i;

  for (i = 0; i < REPAIRPATH_SIZE; i++)
    if (repair_path[i].used && neighbor_node == repair_path[i].neighbor_node &&
      dest_node == repair_path[i].dest_node)
      return i;

  return -1;
}

void stepforward_forwardmsg(ADDRESS neighbor_node, FORWARD_MSG *p)
{                  // rolls path vector forward one hop, used normal forwarding
  int i;

  if (p->path_len > 0)
  {
    for (i = 0; i < p->path_len-1; i++)
      p->path[i] = p->path[i+1];

    if (p->destructive == FORWARD_NONDESTRUCTIVE)
      p->path[p->path_len-1] = neighbor_node;
    else p->path_len--;
  }
}

void stepback_forwardmsg(ADDRESS neighbor_node, FORWARD_MSG *p)
{                 // rolls path vector back one hop, used during repair
  int i;

  if (p->destructive == FORWARD_NONDESTRUCTIVE || p->dest_node != BASESTATION_ADDR || p->path_len > 0)
  {
    if (p->destructive == FORWARD_DESTRUCTIVE)
      p->path_len++;

    for (i = p->path_len-1; i > 0; i--)
      p->path[i] = p->path[i-1];
    p->path[0] = neighbor_node;
  }
}

uint8_t returnfailed_forwardmsg(FORWARD_MSG *msg, ADDRESS neighbor_node)
{
  int endpos_index;
  ADDRESS t;

#ifdef TOSSIM
  log_repairforward(0, msg->source_node, msg->dest_node, neighbor_node, neighbor_node, 0, msg->path,
    msg->path_len, NULL, 0);
#endif

  if (msg->destructive)
    msg->path_len = 0;
  else {
    reverse_pathvec(msg->path, msg->path_len);
    endpos_index = find_pathpos(msg->path_len, msg->path, msg->dest_node);
    if (endpos_index < 0)
    {
      DBGERR("Cannot find end position in path!");
      pause_sim();
      return 1;
    }

    msg->path[endpos_index] = msg->source_node;
    stepforward_forwardmsg(TOS_NODE_ID, msg);
  }

  msg->failed = 1;
  t = msg->dest_node;
  msg->dest_node = msg->source_node;
  msg->source_node = t;

  return 0;
}

void forward_unreachable(FORWARD_MSG *msg)
{              // called when cannot find the next hop of the pathvector among the current neighbor set.
#ifdef TOSSIM
  forward_logunreachable(msg);
#endif                  // if we cannot forward due to unknown neighbor, return to sender as failed
  if (msg->path_len > 0 && msg->destructive == 0 && msg->failed == 0 &&
    returnfailed_forwardmsg(msg, msg->dest_node) == 0)
    merge_forward(msg, TOS_NODE_ID, TOS_NODE_ID, 0);
}

void handlerepair_forwardmsgs(uint8_t repair_success, ADDRESS neighbor_node, ADDRESS dest_node)
{                // after repair is finished, with success or not, flush repair queue
  int i;
  FORWARD_MSG msg_out;
  uint8_t return_failed = 1;

DO_STACKTEST

  DBGOUT("Flushing repair queue for neighbor: %d, destination: %d, success: %d", neighbor_node,
    dest_node, repair_success);

  for (i = 0; i < REPAIRQUEUE_SIZE; i++)
    if (repair_queue[i].used && repair_queue[i].neighbor_node == neighbor_node &&
      repair_queue[i].dest_node == dest_node)
    {
      unmarshal_forwardmsg(&msg_out, repair_queue[i].msg);
      if (repair_success != ROUTE_SUCCESS)   // if repair is unsuccessfull, mark as failed, reverse source and target
        return_failed = (returnfailed_forwardmsg(&msg_out, neighbor_node) == 0);
      if (return_failed || repair_success == ROUTE_SUCCESS || msg_out.destructive == 0)
      {
        DBGOUT("Sending entry %d", i);
        merge_forward(&msg_out,
          (repair_success == ROUTE_SUCCESS) ? repair_queue[i].prev_node : TOS_NODE_ID, TOS_NODE_ID, 0);
      }                        //if msg failed, send only if destructive or in computation; else give up.

      mfree(repair_queue[i].msg);
      repair_queue[i].msg = NULL;
      repair_queue[i].used = 0;
    }
}

uint8_t checkallow_forwardshortcut(FORWARD_MSG *msg)
{              // allow shortcutting of the pathvectors for forwarding messages, only if they are not repairs.
  return (msg->data_len == 0 || (msg->data[0] & CMDHI_MASK) != CMDHI_REPAIR);
}

uint8_t checkallow_forwardbufferupdate(FORWARD_MSG *msg)
{
  return (msg->failed == 0 && msg->destructive && msg->source_node != msg->dest_node &&
    (msg->data_len == 0 || (msg->data[0] & CMDHI_MASK) != CMDHI_REPAIR));
}

uint8_t merge_forwardmsgs(FORWARD_MSG *msg1, FORWARD_MSG *msg2)     // check if can merge payloads on two forward msgs
{
  if (msg1->destructive != msg2->destructive || msg1->failed != msg2->failed ||
    msg1->multicastbuffer_len != msg2->multicastbuffer_len ||
    memcmp(msg1->multicast_buffer, msg2->multicast_buffer, msg1->multicastbuffer_len) ||
    ((msg1->data_len == 0) != (msg2->data_len == 0)))
    return 0;

  if (msg1->data_len == 0)
    return 1;

  if ((msg1->data[0] & CMDHI_MASK) != (msg2->data[0] & CMDHI_MASK) ||
    msg1->data_len+msg2->data_len > MAX_FORWARDDATASIZE)
    return 0;

  switch (msg1->data[0] & CMDHI_MASK)
  {
    case CMDHI_TUPLE:        return merge_tuple(msg1, msg2);
    case CMDHI_PATHCOLLAPSE: return merge_pathcollapse(msg1, msg2);
    default:                 return 0;
  }
}

uint8_t check_forwardsendmulticast(FORWARD_MSG *msg)   // main multicast send handler
{
  return (msg->data_len > 0 && (msg->data[0] & CMDHI_MASK) == CMDHI_TUPLE) ?
    handlesendmulticast_tuple(msg) : 1;
}

void clearmulticast_payload(uint8_t *data_len, uint8_t *data, ADDRESS source_node)
{
  if (*data_len > 0 && (data[0] & CMDHI_MASK) == CMDHI_TUPLE)
    clearmulticastpayload_tuple(data_len, data, source_node);
}

void handle_forwardintermediate(FORWARD_MSG *msg)  // main handler for payload-depending source routing
{
  if (msg->data_len > 0 && (msg->data[0] & CMDHI_MASK) == CMDHI_TUPLE)
    handleforwardinter_tuple(msg);
}

uint8_t init_forward(ADDRESS source_node, ADDRESS prev_node, ADDRESS dest_node, uint8_t node_count,
  ADDRESS *nodes, uint8_t destructive, uint8_t data_len, uint8_t *data, uint8_t multicastbuffer_len,
  uint8_t *multicast_buffer, uint8_t failed)
{                         // new source routing request, one a given path vector, payload, and optionally multicast tree
  FORWARD_MSG msg;        // "failed" means msgs returns back to source after failures, will be dropped on another failure

DO_STACKTEST

  if (check_congestion())
  {
    DBGOUT("Dropping forward message to %d because of congestion", dest_node);
#ifdef TOSSIM    
    log_dropcmd(1, &dest_node, data, data_len, 4);
#endif    
    return 1;
  }

  DBGOUT("Initiating forwarding from node %d to node %d using prev_node %d", source_node,
    dest_node, prev_node);

  if (data_len > MAX_FORWARDDATASIZE)
  {
    DBGERR("Data size too large: %d", data_len);
    pause_sim();
    return 1;
  }

  if (node_count > MAX_PATHLEN)
  {
    DBGERR("Path length too large: %d", node_count);
    pause_sim();
    return 1;
  }

  if (multicastbuffer_len > MULTICASTBUFFER_SIZE)
  {
    DBGERR("Multicast buffer too large: %d", multicastbuffer_len);
    pause_sim();
    return 1;
  }

  msg.cmd = CMD_FORWARD;
  msg.source_node = source_node;
  msg.dest_node = dest_node;
  msg.failed = failed;
  msg.destructive = destructive;

  msg.path_len = node_count;
  memcpy(msg.path, nodes, node_count*sizeof(ADDRESS));

  msg.data_len = data_len;
  memcpy(msg.data, data, data_len);

  msg.multicastbuffer_len = multicastbuffer_len;
  if (multicastbuffer_len > 0)
  {
    memcpy(msg.multicast_buffer, multicast_buffer, multicastbuffer_len);
#ifdef TOSSIM
    log_multicastrequest(node_count, nodes, multicastbuffer_len, multicast_buffer);
#endif
  }

#ifdef TOSSIM
  if (config.disable_msglog == 0)
    log_sendcmdhi(data, data_len);    
  forward_request(&msg);
#endif

  merge_forward(&msg, prev_node, TOS_NODE_ID, 0);

  return 0;
}

int find_lowerlevelpeer()       // finds a neighbor which is not failed, and is one hope closer to the root
{                               // or same level, but lower id and different parent
  int i;

  for (i = 0; i < MAX_PEERLINKS; i++)  // check for neighbors, closer to base
    if (peer_link[i].used && peerlink_statusok(i) && peer_link[i].tree.trees[0].hops < trees[0].hops)
      return i;

  for (i = 0; i < MAX_PEERLINKS; i++)  // lesser id neighbors on same level, with another parent
    if (peer_link[i].used && peer_link[i].id < TOS_NODE_ID && peerlink_statusok(i) &&
      peer_link[i].tree.trees[0].parent != trees[0].parent && peer_link[i].tree.trees[0].hops == trees[0].hops)
      return i;

  return -1;
}

int find_extneighborshortcut(ADDRESS extneighbor_node)  
{
  int i, peer_index;

  for (i = 0; i < EXTNEIGHBORHOODBUFFER_SIZE; i++)
    if (extneighborhood_buffer[i].used && extneighborhood_buffer[i].extneighbor_node == extneighbor_node)
    {
      peer_index = id_to_peerlink(extneighborhood_buffer[i].neighbor_node);
      return (peer_index > 0 && peerlink_statusok(peer_index)) ? i : -1;
    }
    
  return -1;
}

void update_mergebuffer()  // called periodically from OS, packs at most MERGE_TROTTLE data value msgs and sends to phys. buffer
{
  int k, i, peerlink_index, peer_index;
  FORWARD_MSG msg;
  ADDRESS next_node;

  for (i = 0; i < MERGEBUFFER_THROTTLE; i++)
  {
    if (mergebuffer_size == 0)       // must be inside the loop, check if we have any more msgs left
      break;

    do {
      k = rand() % MERGEBUFFER_SIZE;
    } while (merge_buffer[k].used == 0);
    while (merge_buffer[k].prev >= 0)
      k = merge_buffer[k].prev;

    unmarshal_forwardmsg(&msg, merge_buffer[k].msg);

    next_node = merge_buffer[k].next_node;
    if (next_node == TOS_NODE_ID)
    {
      merge_buffer[k].msg_len = 0;
      mfree(merge_buffer[k].msg);  // prevent attempts for merging with this message, while calling forward_message
      merge_buffer[k].msg = NULL;

      DBGOUT("Processing incoming message at merge buffer position %d", k);
      forward_message(merge_buffer[k].prev_node, &msg);
    } else {
      peerlink_index = id_to_peerlink(next_node);
      if (peerlink_index < 0)
      {
        DBGERR("Invalid node: %d", next_node);
        pause_sim();
        return;
      }
      if (peer_link[peerlink_index].unicast_msgindex >= 0)   // link to neighbor is busy sending other messages
        continue;

      if (peer_link[peerlink_index].congestion && startup_cycles > peer_link[peerlink_index].lastmsgrcv_time+1000)
      {
        DBGOUT("Resetting congestion bit for %d", next_node);
        peer_link[peerlink_index].congestion = 0;            // allow for recovery from congestion
      }
      if (peer_link[peerlink_index].congestion)
      {
        DBGOUT("Congestion at peer %d", next_node);
        peer_index = (msg.dest_node == BASESTATION_ADDR) ? find_lowerlevelpeer() :
          ((next_node != msg.dest_node && msg.path_len > 0) ? find_extneighborshortcut(msg.path[0]) : -1);
        if (peer_index < 0)
          continue;
        else {
          next_node = peer_link[peer_index].id;
          DBGOUT("Rerouting through %d", next_node);
        }
      }

      DBGOUT("Processing outgoing message at merge buffer position %d", k);
      send_genericmsg(merge_buffer[k].msg, merge_buffer[k].msg_len, MSG_CONFIRM, 1, alloc_addr(next_node), -1,
        PEER_DEFAULTPOWER, 1);

      msgsent_forward++;
      bytesent_forward += merge_buffer[k].msg_len;
    }

    if (merge_buffer[k].msg)
      mfree(merge_buffer[k].msg);   // might be already freed above
    merge_buffer[k].msg_len = 0;

    if (merge_buffer[k].next >= 0)
      merge_buffer[merge_buffer[k].next].prev = -1;
    mergebuffer_size--;
    merge_buffer[k].used = 0;
  }
}

int find_freemergeentry()
{
  int i;

  for (i = 0; i < MERGEBUFFER_SIZE; i++)
    if (merge_buffer[i].used == 0)
      return i;

  return -1;
}

void handledrop_forward(FORWARD_MSG *msg, ADDRESS next_node)
{
#ifdef TOSSIM
  int size;
  uint8_t buffer[sizeof(FORWARD_MSG)];

  DBGERR("No space in merge buffer. Dropping message");
  size = marshal_forwardmsg(msg, buffer);
  log_dropcmd(1, &next_node, buffer, size, 5);
#endif
}

int find_samepathmergeentry(FORWARD_MSG *msg, ADDRESS next_node) 
{                                     // check if there is another message on the same path already in queue
  int i;

  for (i = 0; i < MERGEBUFFER_SIZE; i++)
    if (merge_buffer[i].used && merge_buffer[i].dest_node == msg->dest_node &&
      merge_buffer[i].next_node == next_node && merge_buffer[i].next == -1)
      return i;

  return -1;
}

void merge_forward(FORWARD_MSG *msg, ADDRESS prev_node, ADDRESS next_node, uint8_t allow_merge)  
{                                                       // submit a message to the merge buffer
  int merge_index, prev_index;
  FORWARD_MSG msg1;

DO_STACKTEST

  prev_index = find_samepathmergeentry(msg, next_node);
  if (prev_index >= 0 && merge_buffer[prev_index].msg && merge_buffer[prev_index].allow_merge &&
    (next_node != TOS_NODE_ID || allow_merge)) 
  {                                               // disallow merging for certain msgs, but maintain processing order
    unmarshal_forwardmsg(&msg1, merge_buffer[prev_index].msg);
    DBGOUT("Attempting to merge with message at position %d", prev_index);
    if (merge_forwardmsgs(&msg1, msg))
    {                        // if we've found a message on the same path, try to merge payload with it
      DBGOUT("Merged outgoing message with message at position %d", prev_index);
      print_list(msg1.data, msg1.data_len, "Updated data");

      merge_buffer[prev_index].msg = mrealloc(merge_buffer[prev_index].msg, marshalest_forwardmsg(&msg1));
      merge_buffer[prev_index].msg_len = marshal_forwardmsg(&msg1, merge_buffer[prev_index].msg);

      return;
    }
  }

  merge_index = find_freemergeentry();
  if (merge_index >= 0)
  {
    DBGOUT("Placing %s message in merge buffer at position %d",
      (next_node == TOS_NODE_ID) ? "incoming" : "outgoing", merge_index);

    merge_buffer[merge_index].msg = mmalloc(marshalest_forwardmsg(msg));
    if (merge_buffer[merge_index].msg == NULL)
    {
      handledrop_forward(msg, next_node);
      return;
    }
    merge_buffer[merge_index].msg_len = marshal_forwardmsg(msg, merge_buffer[merge_index].msg);

    merge_buffer[merge_index].used = 1;
    merge_buffer[merge_index].allow_merge = allow_merge;
    merge_buffer[merge_index].next_node = next_node;
    merge_buffer[merge_index].prev_node = prev_node;
    merge_buffer[merge_index].dest_node = msg->dest_node;

    merge_buffer[merge_index].next = -1;
    if (prev_index >= 0)   // if we have found a message on the same path, make sure we go after it
    {
      merge_buffer[merge_index].prev = prev_index;
      merge_buffer[prev_index].next = merge_index;
    } else
      merge_buffer[merge_index].prev = -1;

    mergebuffer_size++;
    if (mergebuffer_size > maxusage_mergebuffer)
      maxusage_mergebuffer = mergebuffer_size;

    return;
  }

  handledrop_forward(msg, next_node);
}

int findfree_repairpathindex()          // returns a free slot index (if any) in the repair path buffer
{
  int i;

  for (i = 0; i < REPAIRPATH_SIZE; i++)
    if (repair_path[i].used == 0)
      return i;

  return -1;
}

void enqueuerepair_forwardmsg(ADDRESS source_id, ADDRESS dest, FORWARD_MSG *p)
{                         // place a message to some failed neighbor dest in the repair queue, until ongoing repair is finished
  int i;

DO_STACKTEST

  for (i = 0; i < REPAIRQUEUE_SIZE; i++)
    if (repair_queue[i].used == 0)
    {
      DBGOUT("Message queued at position %d", i);
      repair_queue[i].used = 1;
      repair_queue[i].neighbor_node = dest;
      repair_queue[i].dest_node = p->dest_node;
      repair_queue[i].prev_node = source_id;
      repair_queue[i].msg = (uint8_t *) mmalloc(marshalest_forwardmsg(p));
      repair_queue[i].msg_len = marshal_forwardmsg(p, repair_queue[i].msg);

      return;
    }

  DBGERR("Repair queue full. Dropped message");
}

void checkrepair_forwardmsg(ADDRESS source_id, ADDRESS dest, FORWARD_MSG *p)
{                         // checks if forwarding directly to dest is possible, or need to queue up or change paths
  int dest_index, repairpath_index;
  ADDRESS path[MAX_PATHLEN];
#ifdef TOSSIM
  uint8_t old_pathlen;
  ADDRESS old_path[MAX_PATHLEN];
#endif

DO_STACKTEST

  repairpath_index = find_repairpathindex(dest, p->dest_node);
  if (repairpath_index >= 0)
  {
    DBGOUT("Cannot forward directly to %d because of failure", dest);
    if (p->failed)
    {
      DBGERR("Droped failed forward message!");
      return;
    }

    if (repair_path[repairpath_index].path_status == PATHSTATUS_REPAIRED)
    {
      DBGOUT("Repair path exists.");
#ifdef TOSSIM
      old_pathlen = p->path_len;
      memcpy(old_path, p->path, sizeof(ADDRESS)*p->path_len);
#endif

      if (p->destructive == 0)
      {
        dest_index = find_pathpos(p->path_len, p->path, p->dest_node);
        if (dest_index < 0)
        {
          DBGERR("Cannot find destination!");
          pause_sim();
          return;
        }

        memcpy(path, repair_path[repairpath_index].path,     // apply repairs to path
          repair_path[repairpath_index].path_len*sizeof(ADDRESS));
        memcpy(&path[repair_path[repairpath_index].path_len], &p->path[dest_index+1],
          (p->path_len-dest_index-1)*sizeof(ADDRESS));

        p->path_len = repair_path[repairpath_index].path_len+p->path_len-dest_index-1;
        memcpy(p->path, path, p->path_len*sizeof(ADDRESS));
      } else {
        p->path_len = repair_path[repairpath_index].path_len;  // apply repairs to path
        memcpy(p->path, repair_path[repairpath_index].path, p->path_len*sizeof(ADDRESS));
      }
      dest = p->path[0];

#ifdef TOSSIM
      print_path(repair_path[repairpath_index].path, repair_path[repairpath_index].path_len,
        "Repair path");
      print_path(p->path, p->path_len, "Repaired path");
      
      log_repairforward(1, p->source_node, p->dest_node,
        repair_path[repairpath_index].neighbor_node, dest, p->path_len-old_pathlen, old_path,
        old_pathlen, p->path, p->path_len);
#endif
    } else {
      DBGOUT("Repair path does not exist. Queueing message");
      enqueuerepair_forwardmsg(source_id, dest, p);

      if (repair_path[repairpath_index].path_status == PATHSTATUS_BROKEN)
      {
        DBGOUT("Repair has failed for this path."); 
        handlerepair_forwardmsgs(ROUTE_FAIL, dest, p->dest_node);
      }

      return;
    }
  }

  DBGOUT("Forwarding to node %d", dest);
  stepforward_forwardmsg(dest, p);
  merge_forward(p, source_id, dest, 1);
}

int find_forwardbufferentry(ADDRESS source_node, ADDRESS dest_node)
{                               // check if forward buffer contains a given flow
  int i;

  for (i = 0; i < FORWARDBUFFER_SIZE; i++)
    if (forward_buffer[i].used &&
      (source_node == 0xffff || forward_buffer[i].source_node == source_node) &&
      (dest_node == 0xffff || forward_buffer[i].dest_node == dest_node))
      return i;

  return -1;
}

int find_freeforwardbufferentry()      // returns a free slot in the forward buffer (if any)
{
  int i;

  for (i = 0; i < FORWARDBUFFER_SIZE; i++)
    if (forward_buffer[i].used == 0)
      return i;

  return -1;
}

void update_forwardbuffer(ADDRESS source_id, ADDRESS dest, FORWARD_MSG *p)
{                                    // update the forward buffer info, with this forwarding message
  int forwardbuffer_entry;

  forwardbuffer_entry = find_forwardbufferentry(p->source_node, p->dest_node);
  if (forwardbuffer_entry < 0)
  {
    forwardbuffer_entry = find_freeforwardbufferentry();
    if (forwardbuffer_entry < 0)
      forwardbuffer_entry = rand() % FORWARDBUFFER_SIZE;

    forward_buffer[forwardbuffer_entry].used = 1;
    forward_buffer[forwardbuffer_entry].source_node = p->source_node;
    forward_buffer[forwardbuffer_entry].dest_node = p->dest_node;
    forward_buffer[forwardbuffer_entry].hops = 255;
    forward_buffer[forwardbuffer_entry].modified = 1;

    DBGOUT("Added forward buffer entry (%d, %d)", p->source_node, p->dest_node);
  }

  forward_buffer[forwardbuffer_entry].msg_count++;
  if (p->path_len < forward_buffer[forwardbuffer_entry].hops)
  {
    forward_buffer[forwardbuffer_entry].modified = 1;
    forward_buffer[forwardbuffer_entry].hops = p->path_len;
    forward_buffer[forwardbuffer_entry].next_node = dest;
    forward_buffer[forwardbuffer_entry].prev_node = source_id;

    DBGOUT("Updated forward buffer entry (%d, %d, %d, %d) with hops %d", p->source_node,
      source_id, dest, p->dest_node, p->path_len);
  }
}

int find_pathcacheentry(ADDRESS dest_node)
{                                  // search the path cache for a given target
  int i;

  for (i = 0; i < PATHCACHE_SIZE; i++)
    if (path_cache[i].used && path_cache[i].dest_node == dest_node)
      return i;

  return -1;
}

int find_freepathcacheentry()         // find a free slot in the path cache (if any)
{
  int i;

  for (i = 0; i < PATHCACHE_SIZE; i++)
    if (path_cache[i].used == 0)
      return i;

  return -1;
}

void update_pathcache(FORWARD_MSG *p)           // update the path cache with this forwarding message
{
  int pathcache_index;

  pathcache_index = find_pathcacheentry(p->dest_node);
  if (pathcache_index < 0)
  {
    pathcache_index = find_freepathcacheentry();
    if (pathcache_index < 0)
      pathcache_index = rand() % PATHCACHE_SIZE;

    path_cache[pathcache_index].used = 1;
    if (path_cache[pathcache_index].path)
      mfree(path_cache[pathcache_index].path);

    DBGOUT("Path cache entry %d updated with path to %d", pathcache_index, p->dest_node);
    path_cache[pathcache_index].dest_node = p->dest_node;
    path_cache[pathcache_index].path_len = p->path_len;
    path_cache[pathcache_index].path = (ADDRESS *) mcalloc(p->path_len, sizeof(ADDRESS));
    memcpy(path_cache[pathcache_index].path, p->path, sizeof(ADDRESS)*p->path_len);
  }
}

void check_subscribebuffer(FORWARD_MSG *p)   // check if any neighbors are waiting for the path vector of this message
{
  PATHRESPOND_MSG msg;
  int i, size;
  uint8_t buffer[sizeof(msg)];

DO_STACKTEST

  for (i = 0; i < SUBSCRIBEBUFFER_SIZE; i++)
    if (subscribe_buffer[i].used && subscribe_buffer[i].source_node2 == p->source_node &&
      subscribe_buffer[i].dest_node == p->dest_node)
    {
      DBGOUT("Match in subscribe buffer. Sending path vector to node %d",
        subscribe_buffer[i].subscriber_node);

      msg.cmd = CMD_PATHRESPOND;
      msg.source_node1 = subscribe_buffer[i].source_node1;
      msg.source_node2 = subscribe_buffer[i].source_node2;
      msg.dest_node = subscribe_buffer[i].dest_node;
      msg.path_len = p->path_len;
      memcpy(msg.path, p->path, sizeof(ADDRESS)*p->path_len);

      size = marshal_pathrespondmsg(&msg, buffer);
      send_genericmsg(buffer, size, MSG_CONFIRM, 1, alloc_addr(subscribe_buffer[i].subscriber_node), -1, 
        PEER_DEFAULTPOWER, 1);

      subscribe_buffer[i].used = 0; // do not reply to this subscription anymore 
    }
}

uint8_t forward_resolvepath(ADDRESS dest_node, ADDRESS *next_hop, ADDRESS *path, uint8_t *path_len)
{
  int entry_index;

  entry_index = find_pathcacheentry(dest_node);
  if (entry_index >= 0 && path_cache[entry_index].path_len > 0)
  {
    *path_len = path_cache[entry_index].path_len;
    memcpy(path, path_cache[entry_index].path, path_cache[entry_index].path_len*sizeof(ADDRESS));
    *next_hop = path[0];
    DBGOUT("Next hop retrieved from path cache: %d", *next_hop);
    
    return 1;
  }

  return queryplan_resolvepath(dest_node, next_hop, path, path_len);
}

uint8_t resolve_path(ADDRESS source_id, FORWARD_MSG *p, ADDRESS *next_hop)
{
  int destpath_index = find_pathpos(p->path_len, p->path, p->dest_node), entry_index;
  ADDRESS temp_path[MAX_PATHLEN], test_node;
  uint8_t temp_pathlen;

DO_STACKTEST

  DBGOUT("Resolving path");
  test_node = (destpath_index < 0) ? p->dest_node : p->path[destpath_index];
  for (;;)
  {
    DBGOUT("Checking destination %d", test_node);
    if (forward_resolvepath(test_node, next_hop, temp_path, &temp_pathlen))
    {
      print_path(temp_path, temp_pathlen, "Resolution path");
      
      if (p->path_len == 0)
        p->path_len = temp_pathlen;
      else if (destpath_index >= 0)      // handles cases for both destructive and nondestructive forwarding
      {
        memcpy(&temp_path[temp_pathlen-1], &p->path[destpath_index],
          (p->path_len-destpath_index)*sizeof(ADDRESS));
        temp_pathlen += (p->path_len-destpath_index-1);
        p->path_len = temp_pathlen;
      }

      memcpy(p->path, temp_path, temp_pathlen*sizeof(ADDRESS));
      print_path(p->path, p->path_len, "Updated path");

      return 1;
    }

    entry_index = find_forwardbufferentry(0xffff, test_node);
    if (entry_index >= 0)
    {
      *next_hop = forward_buffer[entry_index].next_node;
      if (id_to_peerlink(*next_hop) >= 0 && *next_hop != source_id)
      {
        DBGOUT("Next hop retrieved from forward buffer: %d", *next_hop);
        return 1;
      }
    }

    entry_index = find_forwardbufferentry(test_node, 0xffff);
    if (entry_index >= 0)
    {
      *next_hop = forward_buffer[entry_index].prev_node;
      if (id_to_peerlink(*next_hop) >= 0 && *next_hop != source_id)
      {                                      // ...follow next hop from cache if found
        DBGOUT("Next hop retrieved from forward buffer: %d", *next_hop);
        return 1;
      }
    }

    if (destpath_index <= 0)
      break;
    destpath_index--;
    test_node = p->path[destpath_index];
  };

  return 0;
}

void send_forwardmsg(ADDRESS source_id, FORWARD_MSG *p)  // send this previously initialized forward message to the next hop
{
  ADDRESS dest;
  uint8_t path_resolved = 0;

  if ((p->path_len == 0 && p->dest_node == BASESTATION_ADDR) || (p->path_len > 0 && p->path[0] == BASESTATION_ADDR))
    dest = trees[0].parent;
  else if (p->path_len == 0 || id_to_peerlink(p->path[0]) < 0)
    if (resolve_path(source_id, p, &dest))
    {
      DBGOUT("Path successfully resolved");
      path_resolved = 1;
    } else {
      DBGERR("Cannot resolve path!");
      if (p->path_len > 0)
        pause_sim();
      forward_unreachable(p);
      return;
    }
  else dest = p->path[0];

  if (path_resolved == 0 && checkallow_forwardbufferupdate(p))
  {                    // for certain kinds of messages, update the forward buffers, path cache and subscribe buffer
    update_forwardbuffer(source_id, dest, p);            // do not update buffers if path was resolved using them
    if (p->dest_node != BASESTATION_ADDR && p->path_len > 0)
    {
      if (p->path_len > 1)
        update_pathcache(p);
      check_subscribebuffer(p);
    }
  }

  checkrepair_forwardmsg(source_id, dest, p);
}

void forward_reached(FORWARD_MSG *msg)   // called when greedy forwading has reached its destination
{
  uint8_t path_len;
  ADDRESS path[MAX_PATHLEN];

DO_STACKTEST

  path_len = msg->path_len;
  memcpy(path, msg->path, path_len*sizeof(ADDRESS));
  if (path_len > 0)
  {
    reverse_pathvec(path, path_len-1);
    path[path_len-1] = msg->source_node;
  }

  enqueue_payload(msg->source_node, msg->dest_node, path_len, path, msg->data_len, msg->data,
    msg->failed);
#ifdef TOSSIM
  finish_forward(msg);
#endif
}

uint8_t doshortcut_forwardmsg(FORWARD_MSG *msg)     // find shortcuts in source route
{
  int i, j, t, curr_pos, end_pos;
  ADDRESS path[MAX_PATHLEN];
  uint8_t path_len;
  int best_shortcutlen, shortcut_len, best_shortcuttype = 0, best_peer = 0, best_index = 0;
  int max_index, min_index;

DO_STACKTEST

  path_len = msg->path_len+1;
  if (msg->destructive == 0)
  {
    end_pos = find_pathpos(msg->path_len, msg->path, msg->dest_node);
    if (end_pos < 0)
    {
      DBGERR("Cannot find destination!");
      pause_sim();
      return 0;
    }

    path[0] = msg->source_node;
    memcpy(&path[1], &msg->path[end_pos+1], (path_len-2-end_pos)*sizeof(ADDRESS));
    memcpy(&path[path_len-end_pos-1], msg->path, (end_pos+1)*sizeof(ADDRESS));
  } else {
    path[0] = TOS_NODE_ID;
    memcpy(&path[1], msg->path, (path_len-1)*sizeof(ADDRESS));
  }

  curr_pos = find_pathpos(path_len, path, TOS_NODE_ID);
  best_shortcutlen = 0;
  for (j = 0; j < MAX_PEERLINKS; j++)
    if (peer_link[j].used && peerlink_statusok(j))
      for (i = 0; i < path_len; i++)
      {
        if (peer_link[j].id == path[i])      // check for single hop shortcuts
        {
          shortcut_len = abs(curr_pos-i)-1;
          if (shortcut_len > best_shortcutlen)
          {
            best_shortcutlen = shortcut_len;
            best_index = i;
            best_shortcuttype = 1;
            best_peer = j;
          }
        }

        for (t = 0; t < config.trees_enabled; t++)
          if (peer_link[j].tree.trees[t].parent == path[i])  // check for double hop shortcuts
          {
            shortcut_len = abs(curr_pos-i)-2;
            if (shortcut_len > best_shortcutlen)
            {
              best_shortcutlen = shortcut_len;
              best_index = i;
              best_shortcuttype = 2;
              best_peer = j;
            }
          }

        if (id_to_peerlink(path[i]) < 0 && find_extneighborhoodentry(peer_link[j].id, path[i]) >= 0)
        {
          shortcut_len = abs(curr_pos-i)-2;
          if (shortcut_len > best_shortcutlen)
          {
            best_shortcutlen = shortcut_len;
            best_index = i;
            best_shortcuttype = 3;
            best_peer = j;
          }
        }
      }

  if (best_shortcutlen > 0)
  {
    DBGOUT("Shortcut len: %d, Shortcut type: %d, Peer: %d, Index: %d", best_shortcutlen,
      best_shortcuttype, peer_link[best_peer].id, best_index);
    print_path(path, path_len, "Old path");

    if (curr_pos < best_index)
    {
      min_index = curr_pos;
      max_index = best_index;
    } else {
      max_index = curr_pos;
      min_index = best_index;
    }

    switch (best_shortcuttype)
    {
      case 1:
        for (i = max_index; i < path_len; i++)
          path[i+min_index-max_index+1] = path[i];
        break;

      case 2:
      case 3:
        for (i = max_index; i < path_len; i++)
          path[i+min_index-max_index+2] = path[i];
        path[min_index+1] = peer_link[best_peer].id;
        break;

      default:
        DBGERR("Invalid shortcut type: %d", best_shortcuttype);
        pause_sim();
        return 0;
    }

    path_len -= best_shortcutlen;
    curr_pos = find_pathpos(path_len, path, TOS_NODE_ID);
    msg->path_len = path_len-1;
    memcpy(msg->path, &path[curr_pos+1], (path_len-curr_pos-1)*sizeof(ADDRESS));
    memcpy(&msg->path[path_len-curr_pos-1], &path[1], curr_pos*sizeof(ADDRESS));

    print_path(path, path_len, "New path");
    print_path(msg->path, msg->path_len, "Encoded");
    
    return 1;
  }
  
  return 0;
}

void shortcut_forwardmsg(FORWARD_MSG *msg) 
{
  if (msg->destructive || doshortcut_forwardmsg(msg))  // shortcut at most twice for non-destructive msgs
    doshortcut_forwardmsg(msg);
}

void handle_multicast(ADDRESS source_id, FORWARD_MSG *msg)    // check if we've arrived at a multicast branch node
{
  int i;
  uint8_t destpath_nodecount;
  ADDRESS destpath_nodes[MAX_PATHLEN];
  uint8_t multicastbuffer_len;
  uint8_t *p, multicast_buffer[MULTICASTBUFFER_SIZE];
  MULTICAST_NODE *root = NULL;
  uint8_t send_multicast;
  uint8_t data_len, data[MAX_FORWARDDATASIZE];

DO_STACKTEST

  p = msg->multicast_buffer;
  unmarshal_multicast(&p, &root, 0);

  DBGOUT("Parsed multicast:");
  multicast_print(root, 0);

  if (root->send_here)
  {
    DBGOUT("Send here. Done forwarding");
    forward_reached(msg);
  }

  send_multicast = check_forwardsendmulticast(msg);
  for (i = 0; i < root->child_count; i++)             // for each multicast subtree, forward a copy of the payload
  {
    destpath_nodecount = root->children[i]->path_len;
    if (destpath_nodecount == 0)
    {
      DBGERR("destpath_nodecount = 0!");
      pause_sim();
      return;
    }

    memcpy(destpath_nodes, root->children[i]->path, destpath_nodecount*sizeof(ADDRESS));
    data_len = msg->data_len;
    memcpy(data, msg->data, data_len);
    
    if (root->children[i]->child_count > 0)
    {
      p = multicast_buffer;
      marshal_multicast(&p, root->children[i], 0);
      multicastbuffer_len = p-multicast_buffer;
    } else {
      multicastbuffer_len = 0;
      clearmulticast_payload(&data_len, data, msg->source_node);
    }

    init_forward(msg->source_node, source_id, destpath_nodes[destpath_nodecount-1],
      destpath_nodecount, destpath_nodes, FORWARD_DESTRUCTIVE, data_len, data,
      send_multicast ? multicastbuffer_len : 0, send_multicast ? multicast_buffer : NULL, ROUTE_SUCCESS);
  }

#ifndef DISABLE_FREE
  multicast_free(root);
#endif  
}

void forward_message(ADDRESS source_id, FORWARD_MSG *msg)    // main handler for greedy forwarding requests
{
  int i, peer_index;

DO_STACKTEST

  DBGOUT("Forwarding message received from node %d", source_id);        // display some debugging info
  DBGOUT("Source: %d, Dest: %d, Data len: %d, Destruct: %d, Failed: %d",
    msg->source_node, msg->dest_node, msg->data_len, msg->destructive, msg->failed);
  if (msg->path_len > 0)
    print_path(msg->path, msg->path_len, "Path");
  if (msg->data_len > 0)
    print_list(msg->data, msg->data_len, "Data");
  if (msg->multicastbuffer_len > 0)
    print_list(msg->multicast_buffer, msg->multicastbuffer_len, "Multicast");
    
                                      // this corner case arises sometimes during repair
  if (msg->destructive && msg->path_len == 1 && msg->path[0] == BASESTATION_ADDR)
    msg->path_len = 0;

#if ATTRDOMAIN_BITS <= ATTR_BITS
  if (msg->path_len > 2 && checkallow_forwardshortcut(msg))
    shortcut_forwardmsg(msg);    
#endif

  if (msg->data_len > 0)        // perform some payload-dependent operations on the request
    handle_forwardintermediate(msg);

  if (msg->dest_node == TOS_NODE_ID)
  {
    if (msg->multicastbuffer_len == 0 || msg->failed) // we're done if we are not at a multicast split node
    {                                                 // do not process a failed message with multicast
      DBGOUT("Done forwarding!");                                             
      if (msg->failed == 0 && msg->destructive && msg->source_node != msg->dest_node)
        update_forwardbuffer(source_id, TOS_NODE_ID, msg);  // we still need to update the flow buffer
      forward_reached(msg);
    } else
      handle_multicast(source_id, msg);   // we are a multicast split point  

    return;
  }

  if (msg->path_len == 0 && msg->dest_node != BASESTATION_ADDR)
  {                    // reached end of path vector and still no destination -> check neighbors
    peer_index = neighborshortcut_index(msg->dest_node);
    if (peer_index >= 0)
    {
      for (i = msg->path_len; i > 0; i--)
        msg->path[i] = msg->path[i-1];
      msg->path[0] = peer_link[peer_index].id;
      msg->path_len++;
    }
  }

  send_forwardmsg(source_id, msg);
}
                                    
void handle_forwardmsg(ADDRESS source_id, uint8_t *p)
{                                   // first-level handler for a forwarding message
  FORWARD_MSG msg;
  uint8_t size = unmarshal_forwardmsg(&msg, p);

DO_STACKTEST

  merge_forward(&msg, source_id, TOS_NODE_ID, 1);
  msgrcv_forward++;
  bytercv_forward += size;
}

void handlesent_forwardmsg(uint8_t msg_len, uint8_t *p, uint8_t send_success, uint8_t node_count,
  ADDRESS *nodes, uint8_t confirm_seq)                           // handler for failure to send a forwarding message
{
  FORWARD_MSG msg;
  uint8_t repair_pathlen;
  int repair_pathindex, dest_index, peer_index;
  REPAIR_MSGHI msg_out;
  uint8_t buffer[sizeof(msg_out)];
  int size;

DO_STACKTEST

  if (send_success == 0)                  // currently only handling failure is interesting
  {
    DBGOUT("Cannot send forwarding message!");
    if (node_count != 1)
    {
      DBGERR("Failed nodes = %d!", node_count);
      return;
    }

    if (config.repair_level == REPAIR_NONE)
    {
      DBGERR("Ignored because repair is disabled");
      return;
    }

    unmarshal_forwardmsg(&msg, p);
    if (msg.path_len == 0)
    {
      if (msg.dest_node == BASESTATION_ADDR)          // try specialized repair for route to Base
      {
        peer_index = find_lowerlevelpeer();
        if (peer_index >= 0)
        {
          DBGOUT("Will forward to peer %d to get to Base station", peer_link[peer_index].id);
#ifdef TOSSIM
          log_repairforward(1, msg.source_node, msg.dest_node, nodes[0], peer_link[peer_index].id,
            peer_link[peer_index].tree.trees[0].hops+1-trees[0].hops, NULL, 0, NULL, 0);
#endif

          send_genericmsg(p, msg_len, MSG_CONFIRM, 1, alloc_addr(peer_link[peer_index].id), -1, PEER_DEFAULTPOWER, 1);
          return;
        }
      } else if (msg.dest_node != nodes[0])  // we probably must differentiate btw. last hop of destructive msgs 
      {                                      // and no-path vector msgs (use soft state only)
        DBGERR("Failed to send a message to %d using soft state only.", msg.dest_node);
        return;                                          // give up if we have no path and we're more than 1 hop away from dest
      }
    }  

    if (msg.failed)
    {
      DBGERR("Ignored failed forward message"); // do not try to repair failed payloads
      return;
    }

    if (config.repair_level == REPAIR_SINGLETREE)
    {
      DBGERR("Ignored because repair is single tree");
      return;
    }

    stepback_forwardmsg(nodes[0], &msg);
    print_path(msg.path, msg.path_len, "Stepped back path");
                                             // we begin general RepairRoute here
    repair_pathindex = find_repairpathindex(nodes[0], msg.dest_node);
    if (repair_pathindex < 0)
    {
      DBGOUT("Repair buffer entry does not exist");
      if (msg.destructive == 0)
      {
        dest_index = find_pathpos(msg.path_len, msg.path, msg.dest_node);
        if (dest_index < 0)
        {
          DBGERR("Cannot find destination %d", msg.dest_node);
          pause_sim();
          return;
        }

        repair_pathlen = dest_index+1;
      } else
        repair_pathlen = msg.path_len;

      repair_pathindex = findfree_repairpathindex();
      if (repair_pathindex < 0)
      {
        DBGERR("Cannot add repair route!");
        pause_sim();
        return;
      }

      repair_path[repair_pathindex].used = 1;
      repair_path[repair_pathindex].path_status = PATHSTATUS_REPAIRING;
      repair_path[repair_pathindex].neighbor_node = nodes[0];
      repair_path[repair_pathindex].dest_node = msg.dest_node;

      if (msg.dest_node == BASESTATION_ADDR)
      {
        repair_pathlen = 1;
        repair_path[repair_pathindex].path_len = 1;
        repair_path[repair_pathindex].path = (ADDRESS *) mcalloc(repair_pathlen, sizeof(ADDRESS));
        repair_path[repair_pathindex].path[0] = BASESTATION_ADDR;
      } else {
        repair_path[repair_pathindex].path_len = repair_pathlen;
        repair_path[repair_pathindex].path = (ADDRESS *) mcalloc(repair_pathlen, sizeof(ADDRESS));
        memcpy(repair_path[repair_pathindex].path, msg.path, repair_pathlen*sizeof(ADDRESS));
      }

      msg_out.cmd = CMDHI_REPAIR;
      msg_out.path_found = 0;
      msg_out.path_index = 0;
      msg_out.neighbor_node = nodes[0];
      msg_out.dest_node = msg.dest_node;

      DBGOUT("Enqueueing message in repair queue");
      enqueuerepair_forwardmsg(TOS_NODE_ID, nodes[0], &msg);

      size = marshal_repairmsghi(&msg_out, buffer);
      init_repairroute(msg.dest_node, nodes[0], repair_pathlen, msg.path, size, buffer);
    } else {
      DBGOUT("Repair buffer entry exists");
      forward_message(TOS_NODE_ID, &msg);
    }
  }
}

void timer_forwardbufferexchange()   // called from the OS periodically, exchange flow information between neighbors
{
  FORWARDBUFFEREXCHANGE_MSG msg;
  int i, size;
  uint8_t buffer[sizeof(msg)];

DO_STACKTEST

  msg.cmd = CMD_FORWARDBUFFEREXCHANGE;
  msg.entry_count = 0;
  for (i = 0; i < FORWARDBUFFER_SIZE; i++)
    if (forward_buffer[i].used && forward_buffer[i].modified &&
      forward_buffer[i].source_node != TOS_NODE_ID && forward_buffer[i].dest_node != TOS_NODE_ID &&
      forward_buffer[i].next_node != TOS_NODE_ID && forward_buffer[i].prev_node != TOS_NODE_ID &&
      forward_buffer[i].next_node != forward_buffer[i].dest_node &&
      forward_buffer[i].prev_node != forward_buffer[i].source_node && forward_buffer[i].hops > 0 &&
      forward_buffer[i].dest_node != BASESTATION_ADDR) // dont exchange paths v. to base, we dont have path vectors there
    {
      forward_buffer[i].modified = 0;
      msg.entries[msg.entry_count].source_node = forward_buffer[i].source_node;
      msg.entries[msg.entry_count].dest_node = forward_buffer[i].dest_node;
      msg.entries[msg.entry_count].next_node = forward_buffer[i].next_node;
      msg.entries[msg.entry_count].hops = forward_buffer[i].hops;

      msg.entry_count++;
      if (msg.entry_count == MAX_EXCHANGECOUNT)
        break;
    }

  if (msg.entry_count > 0)
  {
    DBGOUT("Sending forward buffer exchange message with %d modified entries", msg.entry_count);
    size = marshal_forwardbufferexchangemsg(&msg, buffer);
    send_genericmsg(buffer, size, MSG_NOCONFIRM, 0, NULL, -1, PEER_DEFAULTPOWER, 1);
  }
}

int forwardbuffer_findflow(ADDRESS source_node, ADDRESS dest_node, ADDRESS next_node)
{
  int i;

  for (i = 0; i < FORWARDBUFFER_SIZE; i++)
    if (forward_buffer[i].used && forward_buffer[i].next_node == next_node &&
      (forward_buffer[i].source_node == source_node || forward_buffer[i].dest_node == dest_node))
      return i;

  return -1;
}

int find_pathcollapseentry(ADDRESS common_node, ADDRESS neighbor_node)
{
  int i;

  for (i = 0; i < PATHCOLLAPSEBUFFER_SIZE; i++)
    if (pathcollapse_buffer[i].used && pathcollapse_buffer[i].common_node == common_node &&
      pathcollapse_buffer[i].neighbor_node == neighbor_node)
      return i;

  return -1;
}

int find_freepathcollapseentry()
{
  int i;

  for (i = 0; i < PATHCOLLAPSEBUFFER_SIZE; i++)
    if (pathcollapse_buffer[i].used == 0)
      return i;

  return -1;
}

uint8_t pathcollapse_allow(ADDRESS common_node, ADDRESS neighbor_node)
{
  int entry_index = find_pathcollapseentry(common_node, neighbor_node);

  if (entry_index >= 0)
    return 0;

  entry_index = find_freepathcollapseentry();
  if (entry_index < 0)
    return 0;

  pathcollapse_buffer[entry_index].used = 1;
  pathcollapse_buffer[entry_index].common_node = common_node;
  pathcollapse_buffer[entry_index].neighbor_node = neighbor_node;
  return 1;
}

void handle_forwardbufferexchange(ADDRESS source_id, ADDRESS source_node, ADDRESS dest_node,
  ADDRESS next_node, uint8_t hops, uint8_t is_snooped, uint8_t path_len, ADDRESS *path)
{
  int i, size;
  PATHSUBSCRIBE_MSG msg_out;
  PATHCOLLAPSE_MSGHI msghi;
  uint8_t *buffer;

DO_STACKTEST

  if (next_node == TOS_NODE_ID || dest_node == TOS_NODE_ID || dest_node == BASESTATION_ADDR ||
    forwardbuffer_findflow(source_node, dest_node, source_id) >= 0)
    return;

  for (i = 0; i < FORWARDBUFFER_SIZE; i++)
    if (forward_buffer[i].used && forward_buffer[i].dest_node != BASESTATION_ADDR &&
      forward_buffer[i].prev_node != TOS_NODE_ID &&
      forward_buffer[i].next_node != source_id && forward_buffer[i].prev_node != source_id)
    {  
      if (source_node == forward_buffer[i].source_node && id_to_peerlink(next_node) < 0 &&
        find_extneighborhoodentry(forward_buffer[i].prev_node, source_id) < 0 &&
        dest_node < forward_buffer[i].dest_node)  // tiebreaking
      {
        DBGOUT("Detected possibility to multicast from %d using link with %d to %d and %d",
          forward_buffer[i].source_node, source_id, forward_buffer[i].dest_node, dest_node);
        if (is_snooped && pathcollapse_allow(source_node, source_id) == 0)
        {
          DBGOUT("Seem to have detected this possibility already");
          continue;
        }

        msghi.cmd = CMDHI_PATHCOLLAPSE;
        msghi.hint_count = 1;
        msghi.hints[0].source_node = TOS_NODE_ID;
        msghi.hints[0].dest_node1 = forward_buffer[i].dest_node;
        msghi.hints[0].dest_node2 = dest_node;
        msghi.hints[0].neighbor_node = source_id;
        msghi.hints[0].path_len = 0;

        DBGOUT("Sending path collapse message to %d using soft state", forward_buffer[i].source_node);
        buffer = (uint8_t *) mmalloc(marshalest_pathcollapsemsghi(&msghi));
        size = marshal_pathcollapsemsghi(&msghi, buffer);
        init_forward(TOS_NODE_ID, TOS_NODE_ID, forward_buffer[i].source_node, 0, NULL,
          FORWARD_DESTRUCTIVE, size, buffer, 0, NULL, ROUTE_SUCCESS);
        mfree(buffer);
      } else if (dest_node == forward_buffer[i].dest_node && hops+is_snooped > 1 &&
        hops+is_snooped < forward_buffer[i].hops && forward_buffer[i].next_node != TOS_NODE_ID &&  // if we are not the target...
        source_node < forward_buffer[i].source_node)     // tiebreaking
      {
        DBGOUT("Detected possibility to merge paths to %d from sources %d and %d using %d",
          forward_buffer[i].dest_node, forward_buffer[i].source_node, source_node, source_id);
        if (is_snooped && pathcollapse_allow(dest_node, source_id) == 0)
        {
          DBGOUT("Seem to have detected this possibility already");
          continue;
        }

        if (path_len > 0)
        {
          msghi.cmd = CMDHI_PATHCOLLAPSE;
          msghi.hint_count = 1;
          msghi.hints[0].source_node = TOS_NODE_ID;
          msghi.hints[0].dest_node1 = forward_buffer[i].dest_node;
          msghi.hints[0].dest_node2 = forward_buffer[i].dest_node;
          msghi.hints[0].neighbor_node = source_id;
          msghi.hints[0].path_len = path_len+1;
          msghi.hints[0].path[0] = next_node;
          memcpy(&msghi.hints[0].path[1], path, path_len*sizeof(ADDRESS));

          DBGOUT("Sending path collapse message to %d using soft state", forward_buffer[i].source_node);
          buffer = (uint8_t *) mmalloc(marshalest_pathcollapsemsghi(&msghi));
          size = marshal_pathcollapsemsghi(&msghi, buffer);
          init_forward(TOS_NODE_ID, TOS_NODE_ID, forward_buffer[i].source_node, 0, NULL,
            FORWARD_DESTRUCTIVE, size, buffer, 0, NULL, ROUTE_SUCCESS);
          mfree(buffer);
        } else {
          msg_out.cmd = CMD_PATHSUBSCRIBE;
          msg_out.source_node1 = forward_buffer[i].source_node;
          msg_out.source_node2 = source_node;
          msg_out.dest_node = forward_buffer[i].dest_node;

          DBGOUT("Sending path subscribe message to %d for path to %d", source_id, forward_buffer[i].dest_node);
          buffer = (uint8_t *) mmalloc(marshalest_pathsubscribemsg(&msg_out));
          size = marshal_pathsubscribemsg(&msg_out, buffer);
          send_genericmsg(buffer, size, MSG_CONFIRM, 1, alloc_addr(source_id), -1, PEER_DEFAULTPOWER, 1);
          mfree(buffer);
        }
      }
    }    
}

void handle_forwardbufferexchangemsg(ADDRESS source_id, uint8_t *p)
{                                       // main handler for flow exhange messages
  int i;
  FORWARDBUFFEREXCHANGE_MSG msg_in;

DO_STACKTEST

  DBGOUT("Received forward buffer exchange message from %d", source_id);
  unmarshal_forwardbufferexchangemsg(&msg_in, p);

  for (i = 0; i < msg_in.entry_count; i++)
    handle_forwardbufferexchange(source_id, msg_in.entries[i].source_node,
      msg_in.entries[i].dest_node, msg_in.entries[i].next_node, msg_in.entries[i].hops, 0, 0, NULL);
}

void handlesnoop_forwardmsg(ADDRESS source_id, ADDRESS dest_node, uint8_t *p,
  uint8_t fragment_count)
{
  FORWARD_MSG msg_in;

DO_STACKTEST

  if ((config.merge_paths & 0x02) == 0)
    return;

  if (fragment_count > 1)
    unmarshal_forwardmsg_head(&msg_in, p);
  else unmarshal_forwardmsg(&msg_in, p);

  if ((msg_in.data[0] & CMDHI_MASK) == CMDHI_TUPLE && msg_in.destructive)
  {                          // consider only tuple messages with destructive pathvector
    DBGOUT("Snooped forward msg from %d to %d, source: %d, dest: %d, hops: %d", source_id,
      dest_node, msg_in.source_node, msg_in.dest_node, msg_in.path_len);
    handle_forwardbufferexchange(source_id, msg_in.source_node, msg_in.dest_node, dest_node,
      msg_in.path_len, 1, (fragment_count == 1) ? msg_in.path_len : 0,
      (fragment_count == 1) ? msg_in.path : NULL);
  }
}

int find_subscribebufferentry(ADDRESS source_node1, ADDRESS source_node2, ADDRESS dest_node,
  ADDRESS subscriber_node)         // search the path subscribe buffer by source-target flow
{
  int i;

  for (i = 0; i < SUBSCRIBEBUFFER_SIZE; i++)
    if (subscribe_buffer[i].used && subscribe_buffer[i].source_node1 == source_node1 &&
      subscribe_buffer[i].source_node2 == source_node2 && subscribe_buffer[i].dest_node == dest_node &&
      subscribe_buffer[i].subscriber_node == subscriber_node)
      return i;

  return -1;
}

int find_freesubscribebufferentry()         // find a free entry in the path subscribe buffer
{
  int i;

  for (i = 0; i < SUBSCRIBEBUFFER_SIZE; i++)
    if (subscribe_buffer[i].used == 0)
      return i;

  return -1;
}

void handle_pathsubscribemsg(ADDRESS source_id, uint8_t *p)
{                                  // main handler for a path subscribe message
  PATHSUBSCRIBE_MSG msg_in;
  PATHRESPOND_MSG msg_out;
  int subscribebuffer_entry, pathcache_entry, size;
  uint8_t buffer[sizeof(msg_out)];

DO_STACKTEST

  unmarshal_pathsubscribemsg(&msg_in, p);
  DBGOUT("Path subscribe received from node %d on sources %d and destination %d", source_id,
    msg_in.source_node2, msg_in.dest_node);

  pathcache_entry = find_pathcacheentry(msg_in.dest_node);
  if (pathcache_entry >= 0)
  {
    DBGOUT("Path found in cache at position %d", pathcache_entry);

    msg_out.cmd = CMD_PATHRESPOND;
    msg_out.source_node1 = msg_in.source_node1;
    msg_out.source_node2 = msg_in.source_node2;
    msg_out.dest_node = msg_in.dest_node;
    msg_out.path_len = path_cache[pathcache_entry].path_len;
    memcpy(msg_out.path, path_cache[pathcache_entry].path,
      sizeof(ADDRESS)*path_cache[pathcache_entry].path_len);

    size = marshal_pathrespondmsg(&msg_out, buffer);
    send_genericmsg(buffer, size, MSG_CONFIRM, 1, alloc_addr(source_id), -1, PEER_DEFAULTPOWER, 1);
    return;
  }

  DBGOUT("Path not found in cache. Adding path subscribe entry");

  subscribebuffer_entry = find_freesubscribebufferentry();
  if (subscribebuffer_entry < 0)
    subscribebuffer_entry = rand() % SUBSCRIBEBUFFER_SIZE;

  subscribe_buffer[subscribebuffer_entry].used = 1;
  subscribe_buffer[subscribebuffer_entry].source_node1 = msg_in.source_node1;
  subscribe_buffer[subscribebuffer_entry].source_node2 = msg_in.source_node2;
  subscribe_buffer[subscribebuffer_entry].dest_node = msg_in.dest_node;
  subscribe_buffer[subscribebuffer_entry].subscriber_node = source_id;
}

void handle_pathrespondmsg(ADDRESS source_id, uint8_t *p)
{                                   // main handler for path respond message
  PATHRESPOND_MSG msg;
  PATHCOLLAPSE_MSGHI msghi;
  int size;
  uint8_t buffer[sizeof(msghi)];

DO_STACKTEST

  unmarshal_pathrespondmsg(&msg, p);
  DBGOUT("Path respond message received from node %d. Notifying source %d", source_id, msg.source_node1);
  print_path(msg.path, msg.path_len, "Path");

  msghi.cmd = CMDHI_PATHCOLLAPSE;
  msghi.hint_count = 1;
  msghi.hints[0].source_node = TOS_NODE_ID;
  msghi.hints[0].dest_node1 = msg.dest_node;
  msghi.hints[0].dest_node2 = msg.dest_node;
  msghi.hints[0].neighbor_node = source_id;
  msghi.hints[0].path_len = msg.path_len;
  memcpy(msghi.hints[0].path, msg.path, msg.path_len*sizeof(ADDRESS));

  DBGOUT("Sending path collapse message to %d using soft state", msg.source_node1);
  size = marshal_pathcollapsemsghi(&msghi, buffer);
  init_forward(TOS_NODE_ID, TOS_NODE_ID, msg.source_node1, 0, NULL, FORWARD_DESTRUCTIVE,
    size, buffer, 0, NULL, ROUTE_SUCCESS);
}


