/**  
 * Copyright (c) 2010 University of Pennsylvania.
 *     All rights reserved.
 *
 *  Licensed under the Apache License, Version 2.0 (the "License");
 *  you may not use this file except in compliance with the License.
 *  You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 *  Unless required by applicable law or agreed to in writing,
 *  software distributed under the License is distributed on an "AS
 *  IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
 *  express or implied.  See the License for the specific language
 *  governing permissions and limitations under the License.
 *
 */
 
void init_mlink()                        // initialize structures
{
  int i;

#ifdef TOSSIM
  channel_contention = 1;
#else
  channel_contention = 0;
#endif

  time_hops = (TOS_NODE_ID == BASESTATION_ADDR) ? 0 : 0xff;
    
  discoverymsg_sending = 0;

  outputbuffer_size = 0; outputbuffer_maxindex = 0;
  sendbroadcast_confirmseq = 1; // start from one, 0 is resereved for no confirmation

  outputbuffer_sending = 0;
  memset(output_buffer, 0, sizeof(output_buffer));

  outputschedule_size = 0;
  memset(output_schedule, 0, sizeof(output_schedule));
  
  memset(fragment_buffer, 0, sizeof(fragment_buffer));
  fragmentbuffer_size = 0;

  memset(peer_link, 0, sizeof(peer_link));
  broadcast_msgindex = -1;
  for (i = 0; i < MAX_PEERLINKS; i++)
  {
    peer_link[i].response_time = 1;
    
    peer_link[i].unicast_msgindex = -1;
    peer_link[i].sendunicast_confirmseq = 1;
    peer_link[i].rcvunicast_confirmseq = 0;
    peer_link[i].rcvbroadcast_confirmseq = 0;
    
    peer_link[i].rcvunicast_dupcount = 0;
    peer_link[i].rcvbroadcast_dupcount = 0;
  }

  msgout_groupsize = 0; msgout_destaddr = 0;
  memset(msgout_grouplist, 0, sizeof(msgout_grouplist)); 
  
  memset(discovery_buffer, 0, sizeof(discovery_buffer));
  reliable_peercount = 0;
}

int id_to_peerlink(ADDRESS node_id)     // translate from node_id to index into peer_link
{
  int i;

  for (i = 0; i < MAX_PEERLINKS; i++)
    if (peer_link[i].used && peer_link[i].id == node_id)
      return i;

  return -1;
}

uint8_t peerlink_statusok(int peer_index)   // return 1 if id exists in peer lists with status OK, 0 otherwise
{
  if (peer_index < 0 || peer_index >= MAX_PEERLINKS || peer_link[peer_index].used == 0)
    return -1;
    
  return (peer_link[peer_index].status == PEERLINK_OK && peer_link[peer_index].congestion == 0);
}

void outputschedule_swapentries(int i, int j)   // realizes priority-queue for output queue scheduler
{
  OUTPUTSCHEDULE_ENTRY t;

  memcpy(&t, &output_schedule[i], sizeof(t));
  memcpy(&output_schedule[i], &output_schedule[j], sizeof(t));
  memcpy(&output_schedule[j], &t, sizeof(t));
}

void outputschedule_heapify(int i)              // realizes priority-queue for output queue scheduler
{
  int left, right, smallest;

  if (i < 0 || i >= outputschedule_size)
  {
    DBGERR("Index %d does not exist!", i);
    pause_sim();
    return;
  }

  for (;;)
  {
    left = 2*i; right = 2*i+1;

    if (left < outputschedule_size && output_schedule[left].nexttry_time < output_schedule[i].nexttry_time)
      smallest = left;
    else smallest = i;

    if (right < outputschedule_size &&
      output_schedule[right].nexttry_time < output_schedule[smallest].nexttry_time)
      smallest = right;

    if (smallest == i)
      break;

    outputschedule_swapentries(i, smallest);
    i = smallest;
  }
}

void outputschedule_decreasetime(int i, uint32_t new_time)  // realizes priority-queue for output queue scheduler
{
  if (i < 0 || i >= outputschedule_size)
  {
    DBGERR("Index %d does not exist!", i);
    pause_sim();
    return;
  }

  output_schedule[i].nexttry_time = new_time;
  while (i > 0 && output_schedule[i/2].nexttry_time > output_schedule[i].nexttry_time)
  {
    outputschedule_swapentries(i, i/2);
    i /= 2;
  }
}

void outputschedule_getmin(int *n, uint32_t *t)    // realizes priority-queue for output queue scheduler
{
  if (outputschedule_size == 0)
  {
    DBGERR("Output schedule is empty!");
    return;
  }

  *n = output_schedule[0].outputbuffer_index;
  *t = output_schedule[0].nexttry_time;
}

void outputschedule_popmin()            // retrieve earliest remaining message to be aired
{
  if (outputschedule_size == 0)
  {
    DBGERR("Output schedule is empty!");
    pause_sim();
    return;
  }

  outputschedule_swapentries(0, outputschedule_size-1);
  outputschedule_size--;
  if (outputschedule_size > 0)
    outputschedule_heapify(0);
}

void outputschedule_schedule(int n, uint32_t nexttry_time)   // schedule message in output buffer to aired at a given time
{
  if (nexttry_time == 0)     // cannot schedule message for time 0 because 0 is used internally as reserved minimum time
  {
    DBGERR("Cannot schedule index %d for time 0", n);
    pause_sim();
    return;
  }

  output_schedule[outputschedule_size].outputbuffer_index = n;
  outputschedule_size++;
  outputschedule_decreasetime(outputschedule_size-1, nexttry_time);
}

int outputschedule_getindex(int n)
{
  int i;

  for (i = 0; i < outputschedule_size; i++)
    if (output_schedule[i].outputbuffer_index == n)
      return i;

  return -1;
}

void outputschedule_delete(int n)         // remove a message from scheduler after successfull confirmation
{
  int i = outputschedule_getindex(n);

  if (i < 0)
  {
    DBGERR("Index %d does not exist!", n);
    return;
  }

  outputschedule_decreasetime(i, 0);
  outputschedule_popmin();
}

ADDRESS compute_destaddr(uint8_t node_count, ADDRESS *nodes)  // compute virtual address of a set of neighbors
{
#ifdef USE_VIRTUALADDR
  int i;  
  ADDRESS addr;

  if (node_count == 0)
    return AM_BROADCAST_ADDR;
  else if (node_count == 1)
    return REALNODE_MASK(nodes[0]);

  addr = 0;
  for (i = 0; i < node_count; i++)
    addr |= VIRTUALNODE_MASK(nodes[i]);
  addr = VIRTUALNODE_ADDR(addr);

  return addr;
#else
  return (node_count == 1) ? nodes[0] : AM_BROADCAST_ADDR;
#endif  
}

uint8_t is_dest(ADDRESS addr)             // check if current node is in the virtual address specified by the sender
{
#ifdef USE_VIRTUALADDR
  return (addr == AM_BROADCAST_ADDR) ||
    (IS_VIRTUAL(addr) == 0 && REALNODE_MASK(addr) == TOS_NODE_ID) ||
    (IS_VIRTUAL(addr) && (addr & VIRTUALNODE_MASK(TOS_NODE_ID)));
#else
  return (addr == AM_BROADCAST_ADDR || addr == TOS_NODE_ID);
#endif    
}

void handlesent_discoverymsg(uint8_t msg_len, uint8_t *p, uint8_t send_success, uint8_t node_count,
  ADDRESS *nodes, uint8_t confirm_seq)
{
  if (confirm_seq == 0)
    discoverymsg_sending = 0;               // allow another non-confirmed discovery message to be enqueued   
}

void handle_discoverymsg(ADDRESS source_id, uint8_t *p)
{               // handler for incoming discovery logical message
  DISCOVERY_MSG msg;
  int i, peer_index = -1;

  unmarshal_discoverymsg(&msg, p);

  if (!msg.is_timesync && tree_phase == TREE_COUNT && source_id == trees[0].parent)
  {  
    reset_requested = RESET_CODE;  // Our parent has reset, initiate reset in turn     
    return;
  }

  if (msg.has_time)
  {                                                 
    if (msg.time_hops < time_hops && msg.time > 0 && msg.main_timer > 0)
    {                                           // update current time and time_hops
      time_hops = msg.time_hops+1;
      timer_cycles = msg.time-1-(TOS_NODE_ID % 2);    
      update_attr(ATTR_MAINTIMER, msg.main_timer, timer_cycles);
    }    
    if (msg.is_timesync)
      return;  
  }
    
  for (i = 0; i < MAX_PEERLINKS; i++)    // check if the peer currently exists
    if (peer_link[i].used)
    {
      if (peer_link[i].id == source_id)     // if yes, exit
        return;
    } else
      peer_index = i;                  // maintain a free slot

  if (peer_index != -1) //new peer and also there is space to add it to the list
  {
    DBGOUT("New discovery message received from %d", source_id);
    peer_link[peer_index].used = 1;
    peer_link[peer_index].id = source_id;
  } else {
    DBGERR("Cannot add %d to peer list. List full", source_id);
    pause_sim();
  }
}

void handle_sentmessage(uint8_t msg_len, uint8_t *msg, uint8_t send_success, uint8_t node_count,
  ADDRESS *nodes, uint8_t confirm_seq)            // invoked after message confirmed or timed out
{
  int i, peer_index;

#ifdef TOSSIM
  if (config.disable_msglog == 0)
    log_sendcmdconf(0, NULL, msg, msg_len, send_success);
#endif

  if (send_success == 0)
  {
    DBGOUT("Message timed out!");
#ifdef TOSSIM
    print_path(nodes, node_count, "Nodes not sent to");
#endif

    for (i = 0; i < node_count; i++)
    {
      peer_index = id_to_peerlink(nodes[i]);
      if (peer_link < 0)
      {
        DBGERR("Invalid node id: %d", nodes[i]);
        pause_sim();
        return;
      }

      if (peer_link[peer_index].status != PEERLINK_FAILED)  // in case of failure, mark peers as failed and notify upeer layers
      {
        DBGOUT("Marked node %d as failed", nodes[i]);
        peer_link[peer_index].status = PEERLINK_FAILED;
      }
    }
  }

  if (msg_len > 0)
  {
    if ((msg[0] & CMD_MASK) >= CMD_COUNT)
    {
      DBGERR("Invalid command %d", msg[0] & CMD_MASK);
      pause_sim();
      return;
    } else switch (msg[0] & CMD_MASK)
    {
      case CMD_DISCOVERY:      handlesent_discoverymsg(msg_len, msg, send_success, node_count, nodes, confirm_seq); break;
      case CMD_TREECONSTRUCT:  handlesent_treeconstructmsg(msg_len, msg, send_success, node_count, nodes, confirm_seq); break;
      case CMD_SEQROUTE:       handlesent_seqroutemsg(msg_len, msg, send_success, node_count, nodes, confirm_seq); break;
      case CMD_FORWARD:        handlesent_forwardmsg(msg_len, msg, send_success, node_count, nodes, confirm_seq); break;
    }
  }  
}

int check_alreadyconfirming(GENERIC_MSG *msg, uint8_t node_count, ADDRESS *nodes)
{                            // used to remove qeueing multiple confirmations for the same message
  int i;

  for (i = 0; i < OUTPUTBUFFER_SIZE; i++)
    if (output_buffer[i].used && output_buffer[i].msg.msg_len == 0 &&
      output_buffer[i].msg.confirm_seq == msg->confirm_seq &&
      output_buffer[i].node_count == 1 && node_count == 1 &&
      output_buffer[i].nodes[0] == nodes[0])
      return 1;

  return 0;
}

int find_freeoutputbufferindex()          // returns a free index in the ouput queue, -1 if none
{
  int i;

  for (i = 0; i < OUTPUTBUFFER_SIZE; i++)
    if (output_buffer[i].used == 0)
      return i;

  return -1;
}

int8_t enqueue_message(GENERIC_MSG *msg, uint8_t confirm_send, uint8_t node_count, ADDRESS *nodes,
  uint8_t integrate_next)        // enqueues a generic physical message in the output queue
{                                // integrate_next specifies if to reintegrate and return the split logical message in the end
  int free_index, peer_index;
  uint8_t confirm_seq;
  uint32_t t;
  OUTPUTBUFFER_ENTRY *entry;

DO_STACKTEST

  if (msg->msg_len > MAX_GENERICPAYLOAD)
  {
    DBGERR("Invalid message size: %d", msg->msg_len);
    pause_sim();
    return -1;
  }

  if (msg->msg_len > 0 || check_alreadyconfirming(msg, node_count, nodes) == 0)
  {
    free_index = find_freeoutputbufferindex();
    if (free_index >= 0)
    {
      outputbuffer_size++;
      if (free_index > outputbuffer_maxindex)
        outputbuffer_maxindex = free_index;
      if (outputbuffer_size > maxusage_outputbuffer)
        maxusage_outputbuffer = outputbuffer_size;

      entry = &output_buffer[free_index];
      entry->used = 1;
      entry->retry_count = 0;

      entry->node_count = node_count;
      entry->nodes = nodes;

      entry->integrate_next = integrate_next;
      entry->fragment_msglen = 0;
      entry->fragment_msg = NULL;

      entry->next = -1;
      entry->prev = -1;

      memcpy(&entry->msg, msg, sizeof(GENERIC_MSG));
      entry->msg.source_id = TOS_NODE_ID;

      if (msg->msg_len > 0)
      {
        if (confirm_send == MSG_CONFIRM)
        {
          if (node_count == 1)
          {
            peer_index = id_to_peerlink(nodes[0]);
            if (peer_index < 0)
            {
              DBGERR("Cannot find node %d in peer list", nodes[0]);
              pause_sim();
              return -1;
            }

            confirm_seq = peer_link[peer_index].sendunicast_confirmseq;
            peer_link[peer_index].sendunicast_confirmseq =
              (peer_link[peer_index].sendunicast_confirmseq % 127)+1;

            if (peer_link[peer_index].unicast_msgindex != -1)   // maintain list of dependent messages
            {
              entry->prev = peer_link[peer_index].unicast_msgindex;
              output_buffer[peer_link[peer_index].unicast_msgindex].next = free_index;
            }
            peer_link[peer_index].unicast_msgindex = free_index;

            DBGOUT("Issued unicast sequence %d to %d (%d, %d)", confirm_seq, nodes[0], entry->prev, free_index);
          } else {
            confirm_seq = sendbroadcast_confirmseq | 0x80;
            sendbroadcast_confirmseq = (sendbroadcast_confirmseq % 127)+1;

            if (broadcast_msgindex != -1)   // maintain list of dependent messages
            {
              entry->prev = broadcast_msgindex;
              output_buffer[broadcast_msgindex].next = free_index;
            }
            broadcast_msgindex = free_index;

            DBGOUT("Issued broadcast sequence %d", confirm_seq);
#ifdef TOSSIM
            if (node_count > 0)
              print_path(nodes, node_count, "Destinations");
#endif
          }

          entry->msg.confirm_seq = confirm_seq;
        } else entry->msg.confirm_seq = 0;
      }
      
      if (entry->prev == -1)  // dont schedule dependent messages immediately
      {
        if (msg->msg_len > 0)
        {
          t = startup_cycles + rand() % (outputschedule_size+1) + 1;
          DBGOUT("Scheduled message for time %d", t);
          outputschedule_schedule(free_index, t);
        } else {
          t = startup_cycles + (rand() % 3);
          if (t == 0)
            t++;
          DBGOUT("Scheduled confirmation for time %d", t);
          outputschedule_schedule(free_index, t);
        }
      }   
    } else {
      DBGERR("No space to enqueue message!");
      return -1;
    }
  } else {
    DBGOUT("Already have confirmation enqueued for this sequence");
    mfree(nodes);
  }

  return 0;
}

void enqueue_confirmation(ADDRESS addr, uint8_t confirm_seq)  // queues a confirmation message - msg_len = 0
{
  GENERIC_MSG msg;

  DBGOUT("Sending confirmation to %d for sequence %d", addr, confirm_seq);
  
  msg.msg_len = 0;
  msg.msg = NULL;
  msg.confirm_seq = confirm_seq;
  msg.time_ofs = -1;
  msg.send_power = PEER_DEFAULTPOWER;
  msg.allow_merge = 1;
  enqueue_message(&msg, MSG_NOCONFIRM, 1, alloc_addr(addr), 0);

  msgsent_conf++;
}

int8_t send_genericmsg(uint8_t *msg, uint8_t msg_len, uint8_t confirm_send, uint8_t node_count,
  ADDRESS *nodes, int16_t time_ofs, uint8_t send_power, uint8_t allow_merge)      // splits and queues a maximum 256 byte logical message 
{
  uint8_t i, fragment_count, length;
  GENERIC_MSG gm;
  ADDRESS *n;

  if (node_count == 0)
    confirm_send = MSG_NOCONFIRM;
  
  if (time_ofs == 0) // time field cannot overlap with command byte
    time_ofs = -1;
    
  if (msg_len == 0 || msg_len > MAX_PAYLOAD_LONG)
  {
    DBGERR("Invalid message size: %d", msg_len);
#ifdef TOSSIM
    log_dropcmd(node_count, nodes, msg, msg_len, 1);
#endif
    mfree(nodes);

    return -1;
  }

  fragment_count = DIV_CEIL(msg_len, MAX_GENERICPAYLOAD-1);
  if (fragment_count == 0 || fragment_count > MAX_FRAGMENTCOUNT ||
    fragment_count+outputbuffer_size > OUTPUTBUFFER_SIZE)
  {
    DBGERR("Cannot enqueue %d fragments into output buffer!", fragment_count);
#ifdef TOSSIM
    log_dropcmd(node_count, nodes, msg, msg_len, 1);
#endif
    mfree(nodes);

    return -1;
  }

  msgsent_split += fragment_count;
  if (msg_len > 0 && (msg[0] & CMD_MASK) == CMD_FORWARD)
    msgsent_splitforward += fragment_count;
#ifdef TOSSIM
  if (config.disable_msglog == 0)
    log_sendcmd(node_count, nodes, msg, msg_len);
#endif

  for (i = 0; i < fragment_count; i++)
  {
    if (fragment_count > 1)
      DBGOUT("Sending fragment %d of %d", i+1, fragment_count);

    length = MIN(msg_len, MAX_GENERICPAYLOAD-1);
    gm.msg_len = length+1;
    gm.msg = (uint8_t *) mmalloc(gm.msg_len);

    gm.msg[0] = ((i << 4) | (fragment_count & 0xf));   // append 1 byte describing fragment count and fragment index
    memcpy(&gm.msg[1], msg, length);     // add message itself

    if (i < fragment_count-1)  // node count to a new buffer
    {
      n = (ADDRESS *) mcalloc(node_count, sizeof(ADDRESS));
      memcpy(n, nodes, node_count*sizeof(ADDRESS));
    } else n = nodes;

    if (time_ofs < 0)
      gm.time_ofs = -1;
    else if (time_ofs >= msg_len)
    {
      gm.time_ofs = -1;
      time_ofs -= length;
    } else {
      gm.time_ofs = time_ofs+1;     // compensate for added byte with fragment info
      time_ofs = -1;
    }

    gm.send_power = send_power;
    gm.allow_merge = allow_merge;
    if (enqueue_message(&gm, confirm_send, node_count, n, i < fragment_count-1) != 0)
    {
      DBGERR("Cannot enqueue fragment %d of %d!", i+1, fragment_count);
#ifdef TOSSIM
      log_dropcmd(node_count, nodes, msg, msg_len, 1);
#endif
      mfree(nodes);

      return -1;
    }

    msg += length;
    msg_len -= length;
  }

  return 0;
}

void send_discoverymsg(ADDRESS addr, uint8_t confirm, uint8_t has_time, uint8_t time_sync)    // queues a discovery message
{
  DISCOVERY_MSG msg;
  uint8_t buffer[sizeof(msg)], len;

  if (!time_sync)
    set_globalstatus((time_hops == 0xff) ? ((reliable_peercount == 0) ? GS_DISCOVERY : GS_DISCOVERY_N) : GS_DISCOVERY_R); 

  if (confirm == MSG_NOCONFIRM)
    discoverymsg_sending = 1;

  msg.cmd = CMD_DISCOVERY;
  msg.has_time = has_time;
  msg.is_timesync = time_sync;  
  if (has_time)
  {
    msg.time = timer_cycles;
    msg.main_timer = TIMER1_INTERVAL;
    msg.time_hops = time_hops;
  }  
  
  len = marshal_discoverymsg(&msg, buffer);  
  if (addr == AM_BROADCAST_ADDR)        
    send_genericmsg(buffer, len, confirm, 0, NULL, (has_time ? 1 : -1), MAX_RFPOWER, 0); 
  else send_genericmsg(buffer, len, confirm, 1, alloc_addr(addr), (has_time ? 1 : -1), MAX_RFPOWER, 0); // update time@offset 1, right after cmd byte
}

int8_t release_bufferentry(uint16_t k, uint8_t send_success)
{                                      // release an entry from the output queue, with a success indicator
  int peer_index;
  uint8_t *p;
  OUTPUTBUFFER_ENTRY *entry;

  if (k >= OUTPUTBUFFER_SIZE || outputbuffer_size == 0 || output_buffer[k].used == 0)
    return -1;

  entry = &output_buffer[k];
  if (entry->msg.msg_len > 0 && entry->msg.confirm_seq != 0 && entry->node_count > 0)
  {
    if (entry->next == -1)    
    {
      if ((entry->msg.confirm_seq & 0x80) == 0)
      {
        peer_index = id_to_peerlink(entry->nodes[0]);
        if (peer_index < 0)
        {
          DBGERR("Invalid peer_index");
          pause_sim();
          return -1;
        }
        peer_link[peer_index].unicast_msgindex = -1;
      } else broadcast_msgindex = -1;
    } else output_buffer[entry->next].prev = -1;
  }

  if (entry->msg.msg_len > 0)   // here we are reintegrating the split logical message, which the current entry
  {                                       // is a fragment of
    p = (uint8_t *) mmalloc(entry->fragment_msglen + entry->msg.msg_len - 1);
    memcpy(p, entry->fragment_msg, entry->fragment_msglen);
    memcpy(&p[entry->fragment_msglen], &entry->msg.msg[1], entry->msg.msg_len-1);
    entry->fragment_msglen += (entry->msg.msg_len-1);

    if (entry->fragment_msg != NULL)
    {
      mfree(entry->fragment_msg);
      entry->fragment_msg = NULL;
    }

    if (entry->next != -1 && entry->integrate_next)
    {                                          // if this is the last fragment, then were done and invoke handler
      output_buffer[entry->next].fragment_msg = p;
      output_buffer[entry->next].fragment_msglen = entry->fragment_msglen;
    } else {
      handle_sentmessage(entry->fragment_msglen, p, send_success, entry->node_count, entry->nodes, entry->msg.confirm_seq);
      mfree(p);
    }
  }

  if (entry->nodes != NULL)
  {
    mfree(entry->nodes);
    entry->nodes = NULL;
  }

  if (entry->msg.msg != NULL)
  {
    mfree(entry->msg.msg);
    entry->msg.msg = NULL;
  }

  entry->used = 0;
  outputbuffer_size--;

  while (output_buffer[outputbuffer_maxindex].used == 0 && outputbuffer_maxindex > 0)
    outputbuffer_maxindex--;

  return 0;
}

void schedule_nextmessage(int buffer_index)  // buffer_index has been sent, and schedule next message after it if any
{
  if (output_buffer[buffer_index].next != -1)  
  {
    DBGOUT("Scheduled next message for time %d", startup_cycles+1+outputschedule_size);
    outputschedule_schedule(output_buffer[buffer_index].next, startup_cycles+1+outputschedule_size);
  }
}

void abort_message(int buffer_index)     // this entry has been tried too many times, give up
{
  int next_index;

  do {
    schedule_nextmessage(buffer_index);
    next_index = output_buffer[buffer_index].integrate_next ?
      output_buffer[buffer_index].next : -1;

    outputschedule_delete(buffer_index);
    release_bufferentry(buffer_index, 0);     // '0' indicates failure here

    buffer_index = next_index;
  } while (buffer_index >= 0);
}

uint16_t peer_responsetime(int outputbuffer_index)   // compute response time using confirmations from this peer
{
  int i, peer_index;
  uint16_t node_count, sum = 0;

  node_count = output_buffer[outputbuffer_index].node_count;
  for (i = 0; i < node_count; i++)
  {
    peer_index = id_to_peerlink(output_buffer[outputbuffer_index].nodes[i]);
    if (peer_index < 0)
    {
      DBGERR("Invalid peer_index!");
      pause_sim();
      return 0;
    }

    sum += peer_link[peer_index].response_time;
  }

  return (node_count == 0) ? 0 : sum/node_count;
}

uint8_t get_destpower(uint8_t node_count, ADDRESS *nodes)
{
  int i, entry;
  uint8_t tp, result = MIN_RFPOWER;
  
  if (node_count == 0)
    return MAX_RFPOWER;

  for (i = 0; i < node_count; i++)
  {
    entry = id_to_peerlink(nodes[i]);
    if (entry < 0)
      return MAX_RFPOWER;
    tp = peer_link[entry].send_power + 2*(peer_link[entry].rcvunicast_dupcount + peer_link[entry].rcvbroadcast_dupcount);  
    if (result < tp)
      result = tp;
  }  
  
  return result;
}

int8_t update_buffer(ADDRESS *addr, uint8_t *msg_buf, uint8_t *msg_len, uint8_t *send_power)  
{                                                     // called from the os, generate an outgoing phys. message
  int i, k, peerlink_index, outputbuffer_index;
  uint8_t *p, *p1, congestion_bit, tp;
  uint32_t nexttry_time;
#ifdef USE_VIRTUALADDR  
  ADDRESS msg_addr;
#endif

#ifndef TOSSIM
  channel_contention /= 2;
  if (rand() % (channel_contention+1))
    return 1;
#endif
  
  if (outputschedule_size == 0 && outputbuffer_size > 0)
  {
    DBGERR("Buffer not empty but schedule empty!");
    pause_sim();
    return 1;
  }

  if (outputschedule_size == 0 || outputbuffer_sending)
    return 2;

  outputschedule_getmin(&outputbuffer_index, &nexttry_time);   // retrieve next fragment to send
  if (outputbuffer_index < 0 || outputbuffer_index > outputbuffer_maxindex ||
    output_buffer[outputbuffer_index].used == 0)
  {
    DBGERR("Invalid outputbuffer_index: %d (%d)", outputbuffer_index, outputbuffer_maxindex);
    pause_sim();
    return 3;
  }

  if (nexttry_time > startup_cycles)
    return 4;

  outputschedule_popmin();                      // remove next fragment to send from scheduler

  if (output_buffer[outputbuffer_index].node_count == 1)
  {
    peerlink_index = id_to_peerlink(output_buffer[outputbuffer_index].nodes[0]);
    if (peerlink_index < 0)
      DBGOUT("Sending to node not in peer link");
    else if (peer_link[peerlink_index].status != PEERLINK_OK)
    {
      DBGERR("Node %d's status id %d. Aborting", output_buffer[outputbuffer_index].nodes[0],
        peer_link[peerlink_index].status);
      abort_message(outputbuffer_index);
      return 6;
    }
  }

  output_buffer[outputbuffer_index].lasttry_time = startup_cycles;
  output_buffer[outputbuffer_index].retry_count++;
  if (output_buffer[outputbuffer_index].retry_count > config.retry_count)
  {
    DBGERR("Failed to send message %d too many times! (%d) Aborting", outputbuffer_index,
      config.retry_count);
    abort_message(outputbuffer_index);
    return 7;
  }

  p = msg_buf;
  *msg_len = 1+sizeof(ADDRESS)+output_buffer[outputbuffer_index].msg.msg_len;

  msgout_groupsize = 1;
  msgout_grouplist[0].outputbuffer_index = outputbuffer_index;
  msgout_grouplist[0].confirm_seq = output_buffer[outputbuffer_index].msg.confirm_seq;
  msgout_grouplist[0].is_conf = (output_buffer[outputbuffer_index].msg.msg_len == 0);
  msgout_grouplist[0].dest_addr = compute_destaddr(output_buffer[outputbuffer_index].node_count,
    output_buffer[outputbuffer_index].nodes);

  if (output_buffer[outputbuffer_index].msg.allow_merge)
    for (i = 0; i < OUTPUTBUFFER_SIZE; i++)         // look for other fragments to fit into the outgoing message
      if (i != outputbuffer_index && output_buffer[i].used && output_buffer[i].prev == -1 &&
        *msg_len+2+sizeof(ADDRESS)+output_buffer[i].msg.msg_len <= MAX_MSGLEN-sizeof(uint16_t) &&
        (output_buffer[outputbuffer_index].msg.confirm_seq == 0) == (output_buffer[i].msg.confirm_seq == 0) &&
        output_buffer[i].msg.allow_merge)
      {
        *msg_len += 2+sizeof(ADDRESS)+output_buffer[i].msg.msg_len;

        outputschedule_delete(i);
        output_buffer[i].lasttry_time = startup_cycles;
        output_buffer[i].retry_count++;

        msgout_grouplist[msgout_groupsize].outputbuffer_index = i;
        msgout_grouplist[msgout_groupsize].confirm_seq = output_buffer[i].msg.confirm_seq;
        msgout_grouplist[msgout_groupsize].is_conf = (output_buffer[i].msg.msg_len == 0);
        msgout_grouplist[msgout_groupsize].dest_addr = compute_destaddr(
          output_buffer[msgout_grouplist[msgout_groupsize].outputbuffer_index].node_count,
          output_buffer[msgout_grouplist[msgout_groupsize].outputbuffer_index].nodes);

        msgout_groupsize++;
        if (msgout_groupsize == MAX_GROUPSIZE)
          break;
      }

  *addr = msgout_grouplist[0].dest_addr;                        // compute outgoing virtual address
#ifdef USE_VIRTUALADDR
  for (i = 1; i < msgout_groupsize; i++)
  {
    msg_addr = msgout_grouplist[i].dest_addr;
    if (msg_addr != *addr)
    {
      if (IS_VIRTUAL(*addr) == 0)
        *addr = VIRTUALNODE_ADDR(VIRTUALNODE_MASK(*addr));
      if (IS_VIRTUAL(msg_addr) == 0)
        msg_addr = VIRTUALNODE_ADDR(VIRTUALNODE_MASK(msg_addr));
      *addr |= msg_addr;      
    }
  }
#else
  if (*addr != AM_BROADCAST_ADDR)
    for (i = 1; i < msgout_groupsize; i++)
      if (msgout_grouplist[i].dest_addr != *addr)
      {
        *addr = AM_BROADCAST_ADDR;
        break;
      }
#endif  
  msgout_destaddr = *addr;

  *send_power = MIN_RFPOWER;
  for (i = 0; i < msgout_groupsize; i++)                  // marshal the outgoing message & compute radio power
  {
    if (msgout_groupsize > 1 && IS_VIRTUAL(*addr))
      marshal_word(&p, msgout_grouplist[i].dest_addr);

    k = msgout_grouplist[i].outputbuffer_index;
    marshal_byte(&p, output_buffer[k].msg.confirm_seq);
    if (i < msgout_groupsize-1)  // last length can be recostructed from total length
      marshal_byte(&p, output_buffer[k].msg.msg_len);

    if (output_buffer[k].msg.time_ofs >= 0)              // if there is timer ofset, update current time in message
    {
      p1 = &output_buffer[k].msg.msg[output_buffer[k].msg.time_ofs];
      marshal_long(&p1, timer_cycles);
    }

    marshal_buffer(&p, output_buffer[k].msg.msg, output_buffer[k].msg.msg_len);
    
    tp = 2*output_buffer[k].retry_count + ((output_buffer[k].msg.send_power == PEER_DEFAULTPOWER) ? 
      get_destpower(output_buffer[k].node_count, output_buffer[k].nodes) : output_buffer[k].msg.send_power);    
    if (*send_power < tp)
      *send_power = tp;   
  }
  if (*send_power > MAX_RFPOWER)
    *send_power = MAX_RFPOWER;

  congestion_bit = check_congestion();
  marshal_word(&p, (compute_crc(((uint16_t) msgout_groupsize) ^ ((uint16_t) congestion_bit) ^
    ((uint16_t) (*addr)) ^ ((uint16_t) output_buffer[outputbuffer_index].msg.source_id),
    msg_buf, p-msg_buf) & 0x07ff) | ((uint16_t) congestion_bit << 11) | ((uint16_t) msgout_groupsize << 12));
  *msg_len = p-msg_buf;

  if (*msg_len > MAX_MSGLEN)
  {
    DBGERR("Message too long: %d", *msg_len);
    pause_sim();
    return 1;
  }

  msgsent_total++;
  bytesent_payload += *msg_len;
  bytesent_total += *msg_len+OS_OVERHEAD;
  if (output_buffer[outputbuffer_index].msg.msg_len > 0)
    bytesent_totalnoconf += *msg_len+OS_OVERHEAD;
  msggroup_sent += msgout_groupsize;
  if (msgout_groupsize > maxusage_group)
    maxusage_group = msgout_groupsize;

#ifdef TOSSIM                                           // logging and stat maintenance
  if (config.disable_msglog == 0)
    log_sendmsg(*addr, msg_buf, *msg_len);
#endif

  DBGOUT("Sent sequence %d to %d, retry %d, power %d", output_buffer[outputbuffer_index].msg.confirm_seq,
    *addr, output_buffer[outputbuffer_index].retry_count, *send_power);
  for (i = 0; i < msgout_groupsize; i++)
    print_path(output_buffer[msgout_grouplist[i].outputbuffer_index].nodes,
      output_buffer[msgout_grouplist[i].outputbuffer_index].node_count, "Destinations");

  for (i = 0; i < msgout_groupsize; i++)              // update schedules for outgoing fragments
  {
    k = msgout_grouplist[i].outputbuffer_index;
    if (output_buffer[k].msg.confirm_seq == 0 || output_buffer[k].msg.msg_len == 0)
      release_bufferentry(k, 1);
    else {
#ifdef TOSSIM
      nexttry_time = startup_cycles+1+MIN(25, channel_contention)+
        output_buffer[k].retry_count*MIN(25, peer_responsetime(k));
#else
      nexttry_time = startup_cycles+1+MIN(15, channel_contention)+
        MIN(25, (uint32_t) output_buffer[k].retry_count*peer_responsetime(k));
#endif

      outputschedule_schedule(k, nexttry_time);
      DBGOUT("Rescheduled message for time %d", nexttry_time);
    }
  }

  return 0;
}

void update_result(error_t result)                    //OS-called handler, indicates if message successfully aired by OS
{
  if (result != SUCCESS)
  {
    outputbuffer_sending = 0;
    DBGERR("Failed to send message");
#ifdef TOSSIM
    channel_contention += 2;
#endif
  } else {
    outputbuffer_sending = 1;
#ifdef TOSSIM
    if (channel_contention > 1)
      channel_contention--;
#endif    
  }    
}

void handle_senddone(uint8_t was_acked, error_t result)   // called by the OS when transmission is finished
{
#ifndef DISABLE_HARDWAREACK
  int i, peer_index;
#endif  

  outputbuffer_sending = 0;
  
#ifndef DISABLE_HARDWAREACK                        // translate hardware acks (if present) into software acks and handle them
  if (result == SUCCESS && was_acked && (IS_VIRTUAL(msgout_destaddr)) == 0)
  {
    peer_index = id_to_peerlink(msgout_destaddr);
    if (peer_index < 0)
    {
      DBGERR("Invalid dest addr: %d", msgout_destaddr);
      pause_sim();
      return;
    }

    for (i = 0; i < msgout_groupsize; i++)
      if (msgout_grouplist[i].confirm_seq != 0 && msgout_grouplist[i].is_conf == 0)
        handle_confirmation(peer_index, msgout_grouplist[i].confirm_seq);
  }
#endif
}

int handle_message(uint8_t peer_index, uint8_t msg_len, uint8_t *msg)
{
  ADDRESS peer_id = peer_link[peer_index].id;
  
#ifdef TOSSIM
  if (config.disable_msglog == 0)
    log_rcvcmd(peer_id, msg, msg_len);
#endif

#ifdef UNIT_TEST
  if (testhook_handlemessage)
  {
    (*testhook_handlemessage)(peer_index, msg_len, msg);
    return 0;
  }
#endif

  if (msg_len > 0)
    switch (msg[0] & CMD_MASK)
    {
      case CMD_DISCOVERY:             handle_discoverymsg(peer_id, msg); break;
      case CMD_TREECONSTRUCT:         handle_treeconstructmsg(peer_id, msg); break;
      case CMD_TREECOMMIT:            handle_treecommitmsg(peer_id, msg); break;
      case CMD_TREESUMMARY:           handle_treesummarymsg(peer_id, msg); break;
      case CMD_TREESUMMARYUPDATE:     handle_treesummaryupdatemsg(peer_id, msg); break;
      case CMD_TREEROOTNOTIFY:        handle_treerootnotifymsg(peer_id, msg); break;
      case CMD_SEQROUTE:              handle_seqroutemsg(peer_id, msg); break;
      case CMD_SEQROUTEUNR:           handle_seqroutemsg(peer_id, msg); break;
      case CMD_FLOODROUTE:            handle_floodroutemsg(peer_id, msg); break;
      case CMD_BASEFLOOD:             handle_basefloodmsg(peer_id, msg); break;
      case CMD_FORWARD:               handle_forwardmsg(peer_id, msg); break;
      case CMD_FORWARDBUFFEREXCHANGE: handle_forwardbufferexchangemsg(peer_id, msg); break;
      case CMD_PATHSUBSCRIBE:         handle_pathsubscribemsg(peer_id, msg); break;
      case CMD_PATHRESPOND:           handle_pathrespondmsg(peer_id, msg); break;
      default:
        DBGERR("Invalid command received! (%d)", msg[0] & CMD_MASK);
        pause_sim();
        return -1;
    }

  return 0;
}

void handlesnoop_message(uint8_t peer_index, ADDRESS dest_node, uint8_t msg_len, uint8_t *msg,
  uint8_t fragment_count)
{
  if (msg_len > 0 && (msg[0] & CMD_MASK) == CMD_FORWARD)
    handlesnoop_forwardmsg(peer_link[peer_index].id, dest_node, msg, fragment_count);
}

uint8_t checkaccept_msg(uint8_t peer_index, uint8_t msg_len, uint8_t *msg)
{                                   // main logical accept handler
  ADDRESS peer_id = peer_link[peer_index].id;
 
  if (msg_len > 0)
  {
    if ((msg[0] & CMD_MASK) >= CMD_COUNT)
      return 0;
    else switch (msg[0] & CMD_MASK)
    {
      case CMD_TREECOMMIT:     return checkaccept_treecommit(peer_id, msg);
      case CMD_TREEROOTNOTIFY: return checkaccept_treerootnotifymsg(peer_id, msg);
      case CMD_FLOODROUTE:     return checkaccept_floodroutemsg(peer_id, msg);
      case CMD_BASEFLOOD:      return checkaccept_basefloodmsg(peer_id, msg); 
      default:                 return 1;
    }
  }
  
  return 0;
}

int find_extneighborhoodentry(ADDRESS neighbor_node, ADDRESS extneighbor_node)
{
  int i;

  for (i = 0; i < EXTNEIGHBORHOODBUFFER_SIZE; i++)
    if (extneighborhood_buffer[i].used && extneighborhood_buffer[i].neighbor_node == neighbor_node &&
      extneighborhood_buffer[i].extneighbor_node == extneighbor_node)
      return i;

  return -1;
}

int find_freeextneighborhoodentry()
{
  int i;

  for (i = 0; i < EXTNEIGHBORHOODBUFFER_SIZE; i++)
    if (extneighborhood_buffer[i].used == 0)
      return i;

  return -1;
}

void add_extneighborhoodentry(ADDRESS neighbor_node, ADDRESS extneighbor_node)
{
  int entry_index = find_extneighborhoodentry(neighbor_node, extneighbor_node);

  if (entry_index < 0)
  {
    entry_index = find_freeextneighborhoodentry();
    if (entry_index < 0)
    {
      entry_index = rand() % EXTNEIGHBORHOODBUFFER_SIZE;
      DBGOUT("Extended neighborhood buffer full. Evicted entry %d", entry_index);
    }

    extneighborhood_buffer[entry_index].used = 1;
    extneighborhood_buffer[entry_index].neighbor_node = neighbor_node;
    extneighborhood_buffer[entry_index].extneighbor_node = extneighbor_node;    
    DBGOUT("Added entry (%d, %d) to extended neighborhood buffer", neighbor_node, extneighbor_node);  
  }
}

void handlesnoop_conffragments(uint8_t peer_index, ADDRESS dest_node, GENERIC_MSG *msg)
{
  uint8_t fragment_count, fragment_index;

  if (peer_discovery == 0 && (msg->confirm_seq & 0x80) == 0)  // Dont add entries to the extended neighborhood buffer during discovery. Some neighbors are not reliable.
    add_extneighborhoodentry(msg->source_id, dest_node);
    
  if (msg->msg_len > 0)
  {
    if (msg->confirm_seq & 0x80)
    {
      DBGOUT("Snooped broadcast seq %d from %d", msg->confirm_seq, peer_link[peer_index].id);
      peer_link[peer_index].rcvbroadcast_confirmseq = msg->confirm_seq;
    } else DBGOUT("Snooped unicast seq %d from %d", msg->confirm_seq, peer_link[peer_index].id);

    fragment_count = msg->msg[0] & 0xf;
    fragment_index = msg->msg[0] >> 4;
    if (fragment_count > MAX_FRAGMENTCOUNT || fragment_index >= fragment_count || fragment_index > 0)
      return;

    handlesnoop_message(peer_index, dest_node, msg->msg_len-1, &msg->msg[1], fragment_count);
  }
}

void cleanup_fragments(ADDRESS source_addr, uint8_t is_broadcast)
{                // when a new fragment received, removed erroneously received fragments from the same source before
  int i;

  for (i = 0; i < FRAGMENTBUFFER_SIZE; i++)  // cleanup only broadcast or only unicast msgs
    if (fragment_buffer[i].used && fragment_buffer[i].source_addr == source_addr &&
      fragment_buffer[i].is_broadcast == is_broadcast)
    {
      fragment_buffer[i].used = 0;
      fragmentbuffer_size--;

      fragment_buffer[i].msg_len = 0;
      if (fragment_buffer[i].msg)
      {
        mfree(fragment_buffer[i].msg);
        fragment_buffer[i].msg = NULL;
      }
    }
}

int find_freefragmententry()                  // return a free entry in the fragment buffer, -1 if none
{
  int i;

  for (i = 0; i < FRAGMENTBUFFER_SIZE; i++)
    if (fragment_buffer[i].used == 0)
      return i;

  return -1;
}

int find_fragmententry(ADDRESS source_addr, uint8_t is_broadcast)  // search an entry in the fragment buffer by address
{
  int i;

  for (i = 0; i < FRAGMENTBUFFER_SIZE; i++)
    if (fragment_buffer[i].used && fragment_buffer[i].source_addr == source_addr &&
      fragment_buffer[i].is_broadcast == is_broadcast)
      return i;

  return -1;
}

void handle_fragments(uint8_t peer_index, GENERIC_MSG *msg)
{        // check if an incoming physical message needs to be placed in the fragment buffer, and integrate logical messages
  int buffer_index;
  uint8_t fragment_count, fragment_index, is_broadcast;
  FRAGMENTBUFFER_ENTRY *entry;

  msgrcv_split++;

  is_broadcast = (msg->confirm_seq & 0x80) != 0;
  fragment_count = msg->msg[0] & 0xf;
  fragment_index = msg->msg[0] >> 4;
  if (fragment_count > MAX_FRAGMENTCOUNT || fragment_index >= fragment_count)
  {
    DBGERR("Invalid fragment in handle_fragments: (%d, %d)", fragment_count, fragment_index);
    return;
  }

  if (fragment_count == 1 || msg->confirm_seq == 0)  // single fragment and non-confirmed: always pass through
  {
    if (msg->confirm_seq != 0)                       // cleanup only on confirmed messages
      cleanup_fragments(msg->source_id, is_broadcast);
    handle_message(peer_index, msg->msg_len-1, &msg->msg[1]);
  } else {
    DBGOUT("Received %s fragment index %d from node %d", is_broadcast ? "broadcast" : "unicast",
      fragment_index, msg->source_id);

    if (fragment_index == 0)                // first fragment of logical message, find a free entry
    {
      cleanup_fragments(msg->source_id, is_broadcast);
      buffer_index = find_freefragmententry();
      if (buffer_index < 0)
      {
        DBGERR("Cannot allocate fragment entry!");
        return;
      }

      fragmentbuffer_size++;
      if (fragmentbuffer_size > maxusage_inputbuffer)
        maxusage_inputbuffer = fragmentbuffer_size;

      entry = &fragment_buffer[buffer_index];
      entry->used = 1;
      entry->source_addr = msg->source_id;
      entry->is_broadcast = is_broadcast;
      entry->fragment_index = 0;
      entry->msg_len = 0;
      entry->msg = NULL;
    } else {                                  // not the first fragment of a logical message
      buffer_index = find_fragmententry(msg->source_id, is_broadcast);      // find previous integrated fragments
      if (buffer_index < 0)
      {
        DBGERR("Cannot find previous fragments in buffer!");
        return;
      }

      entry = &fragment_buffer[buffer_index];
      if (entry->fragment_index != fragment_index-1)
      {
        DBGERR("A fragment has been skipped!");
        return;
      }

      entry->fragment_index = fragment_index;
    }
                                                               
    entry->msg = (uint8_t *) mrealloc(entry->msg, entry->msg_len + msg->msg_len-1);             // integrate with previous fragments
    memcpy(&entry->msg[entry->msg_len], &msg->msg[1], msg->msg_len-1);
    entry->msg_len += msg->msg_len-1;

    if (entry->fragment_index == fragment_count-1)
    {                                               // if this was the last fragment, invoke logical reception handlers
      DBGOUT("Completed fragment from node %d", entry->source_addr);
      handle_message(peer_index, entry->msg_len, entry->msg);

      if (entry->msg != NULL)
        mfree(entry->msg);
      entry->used = 0;
      fragmentbuffer_size--;
    }
  }
}

uint8_t docheckaccept_fragment(uint8_t peer_index, GENERIC_MSG *msg)
{                            // check if this physical message needs to be confirmed
  int buffer_index;
  uint8_t fragment_count, fragment_index;

  fragment_count = msg->msg[0] & 0xf;
  fragment_index = msg->msg[0] >> 4;
  if (fragment_count > MAX_FRAGMENTCOUNT || fragment_index >= fragment_count)
  {
    DBGERR("Invalid fragment: (%d, %d)", fragment_count, fragment_index);
    return 0;
  }

  if (fragment_count == 1 || fragment_index == 0)  // only a single fragment, bypass fragmentation layer, invoke logical handlers
    return checkaccept_msg(peer_index, msg->msg_len-1, &msg->msg[1]);

  buffer_index = find_fragmententry(msg->source_id, (msg->confirm_seq & 0x80) != 0);
  if (buffer_index >= 0 && fragment_buffer[buffer_index].fragment_index == fragment_index-1)
    return 1;            // should be confirmed if previous fragment has been confirmed

  return (msg->confirm_seq == peer_link[peer_index].rcvunicast_confirmseq) ||
    (msg->confirm_seq == peer_link[peer_index].rcvbroadcast_confirmseq);
}     // should be confirmed if this was the last fragment of a logical message we just integrated (now gone from the buffer)

uint8_t checkaccept_fragment(uint8_t peer_index, GENERIC_MSG *msg) //only used to capture and display result of docheckaccept_fragment
{
  uint8_t result = docheckaccept_fragment(peer_index, msg);

  if (result == 0)
    DBGERR("Fragment rejected");

  return result;
}

void handle_duplicates(uint8_t peer_index, GENERIC_MSG *msg)
{                // check if an incoming physical message needs to be duplicate eliminated
  if (msg->confirm_seq == 0)
  {
//    DBGOUT("Received no-confirm message from %d", msg->source_id);
    handle_fragments(peer_index, msg);    // no duplicate elimination for non-confirmation messages
  } else if ((msg->confirm_seq & 0x80) == 0) // duplicate elimination for unicast
    if (msg->confirm_seq != peer_link[peer_index].rcvunicast_confirmseq)
    {
      DBGOUT("Received unicast %d from %d", msg->confirm_seq, msg->source_id);
      peer_link[peer_index].rcvunicast_confirmseq = msg->confirm_seq;
      peer_link[peer_index].rcvunicast_dupcount = 0;
      handle_fragments(peer_index, msg);
    } else {
      if (peer_link[peer_index].rcvunicast_dupcount < MAX_DUPCOUNT)
        peer_link[peer_index].rcvunicast_dupcount++;
      DBGOUT("Already received unicast %d from %d", msg->confirm_seq, msg->source_id);
    }
  else if (msg->confirm_seq != peer_link[peer_index].rcvbroadcast_confirmseq) // for broadcast
  {
    DBGOUT("Received broadcast %d from %d", msg->confirm_seq, msg->source_id);
    peer_link[peer_index].rcvbroadcast_confirmseq = msg->confirm_seq;
    peer_link[peer_index].rcvbroadcast_dupcount = 0;
    handle_fragments(peer_index, msg);
  } else {
    if (peer_link[peer_index].rcvbroadcast_dupcount < MAX_DUPCOUNT)
      peer_link[peer_index].rcvbroadcast_dupcount++;
    DBGOUT("Already received broadcast %d from %d", msg->confirm_seq, msg->source_id);
  }
}

int handle_confirmation(uint8_t peer_index, uint8_t confirm_seq)
{                    // handles an incoming confirmation from a neighbor
  int i, j;

  for (i = 0; i < OUTPUTBUFFER_SIZE; i++)
    if (output_buffer[i].used && output_buffer[i].msg.confirm_seq == confirm_seq &&
      output_buffer[i].node_count > 0 && output_buffer[i].msg.msg_len > 0)
      for (j = 0; j < output_buffer[i].node_count; j++)
        if (output_buffer[i].nodes[j] == peer_link[peer_index].id)
        {                                         // find neighbor and right message in output_buffer
          msgrcv_conf++;
                                     
          peer_link[peer_index].response_time = (2*peer_link[peer_index].response_time+1+
            ((startup_cycles > output_buffer[i].lasttry_time) ? startup_cycles-output_buffer[i].lasttry_time : 0))/3; 
          DBGOUT("Updated response time for peer %d to %d", peer_link[peer_index].id,
            peer_link[peer_index].response_time);

          if (output_buffer[i].node_count == 1)
          {
            schedule_nextmessage(i);         // update output queue and schedule
            outputschedule_delete(i);            
            release_bufferentry(i, 1);
          } else {
            output_buffer[i].nodes[j] = output_buffer[i].nodes[output_buffer[i].node_count-1];
            output_buffer[i].node_count--;
          }

#ifdef TOSSIM
          channel_contention >>= 1;
#endif

          DBGOUT("Confirmation received for sequence %d from %d", confirm_seq,
            peer_link[peer_index].id);

          return 1;
        }

  return 0;
}


#ifdef USER_TOPOLOGY
uint8_t handle_usertop(ADDRESS src_node)
{
  int i;

  for (i = 0; i < sizeof(user_top)/sizeof(user_top[0]); i++)
    if (user_top[i].n1 == src_node && user_top[i].n2 == TOS_NODE_ID) 
      return (rand() % 100) < user_top[i].loss_perc;

  return 0;
}
#endif

int find_freepeerlink()                     // find a free slot in peer_link, -1 if none
{
  int i;

  for (i = 0; i < MAX_PEERLINKS; i++)
    if (peer_link[i].used == 0)
      return i;

  return -1;
}

int handle_peerlink(ADDRESS source_addr, uint8_t send_power)
{ // before processing messages from this peer, check its status, add it to peer_link, etc
  int peer_index;

  peer_index = id_to_peerlink(source_addr);
  if (peer_index >= 0 && peer_link[peer_index].status != PEERLINK_OK)
  {
    DBGERR("Node %d status set to %d. Ignored.", peer_link[peer_index].status, source_addr);
    return -1;
  }

  if (peer_index < 0)
  {
    peer_index = find_freepeerlink();
    if (peer_index < 0)
    {
      DBGERR("Cannot add %d to peer link. List full", source_addr);
      return -1;
    }

    DBGOUT("Adding peer %d to peer link with send power %d", source_addr, send_power);
    peer_link[peer_index].used = 1;
    peer_link[peer_index].id = source_addr;
    peer_link[peer_index].status = PEERLINK_OK;
    peer_link[peer_index].send_power = send_power;
  }

  return peer_index;
}

int find_discoverybufferentry(ADDRESS node_id)
{
  int i;

  for (i = 0; i < DISCOVERYBUFFER_SIZE; i++)
    if (discovery_buffer[i].used && discovery_buffer[i].id == node_id)
      return i;

  return -1;
}

int find_freediscoverybufferentry()
{
  int i;

  for (i = 0; i < DISCOVERYBUFFER_SIZE; i++)
    if (discovery_buffer[i].used == 0)
      return i;

  return -1;
}

int check_discoverybuffer(ADDRESS src_addr, ADDRESS dest_addr, int8_t msg_power)
{
  int entry, peer_index, rcv_power;
    
  peer_index = id_to_peerlink(src_addr);
  if (peer_index >= 0)
    return peer_index;

  if (is_dest(dest_addr) == 0 || msg_power < MIN_DISCOVERYPOWER)
    return -1;
    
  entry = find_discoverybufferentry(src_addr);
  if (entry < 0)
  {
    entry = find_freediscoverybufferentry();
    if (entry < 0)
    {
      DBGERR("Cannot find free discovery buffer entry");
      pause_sim();
      return -1;
    }

    discovery_buffer[entry].used = 1;
    discovery_buffer[entry].id = src_addr;
    discovery_buffer[entry].time = startup_cycles;
    discovery_buffer[entry].count = 0;
    discovery_buffer[entry].rcv_power = msg_power;
  }

  DBGOUT("Discovery candidate %d, power %hd", src_addr, msg_power);
  
  if (discovery_buffer[entry].count < MAX_DISCOVERYMSG)
    discovery_buffer[entry].count++;
  else {
    DBGOUT("Failed to establish link after %d cycles. Retrying...", MAX_DISCOVERYMSG);
    discovery_buffer[entry].used = 0;
    return -1;
  }
  
  discovery_buffer[entry].rcv_power = (4*((int16_t)discovery_buffer[entry].rcv_power) + ((int16_t) msg_power)) / 5;
  rcv_power = discovery_buffer[entry].rcv_power;

  if (dest_addr == AM_BROADCAST_ADDR)
  {
    if (src_addr < TOS_NODE_ID)
    {
      DBGOUT("Responding to node %d (%d, %d)", src_addr, discovery_buffer[entry].count,
        startup_cycles-discovery_buffer[entry].time);
      send_discoverymsg(src_addr, MSG_NOCONFIRM, 0, 0);
      return -1;
    }
  } else {
    DBGOUT("Received response from %d (%d, %d)", src_addr, discovery_buffer[entry].count,
      startup_cycles-discovery_buffer[entry].time);
      
    if (src_addr < TOS_NODE_ID || (discovery_buffer[entry].count >= MIN_DISCOVERYMSG &&
      startup_cycles-discovery_buffer[entry].time < MIN_DISCOVERYTIME))
    {
      DBGOUT("Link with %d now considered reliable", src_addr);
      peer_index = handle_peerlink(src_addr, MIN(MAX_RFPOWER, ((rcv_power >= 0) ? MIN_RFPOWER : (rcv_power*rcv_power/28))));
      if (peer_index < 0)
      {
        DBGERR("Cannot add node %d to peer link", src_addr);
        pause_sim();
        return -1;
      }

      if (src_addr > TOS_NODE_ID)
      {
        DBGOUT("Notifying %d for reliable link", src_addr);
        send_discoverymsg(src_addr, MSG_CONFIRM, 0, 0);
      }
      
      reliable_peercount++;

      return peer_index;
    }
  }

  return -1;
}

void decode_message(ADDRESS src_addr, ADDRESS dest_addr, uint8_t *msg_in, uint8_t msg_len, int8_t msg_power)
{          // OS-called, incoming physical message
  int i, peer_index;
  uint8_t group_size, *p, congestion_bit;
  uint16_t crc_cong_group, crc_expected, crc_msg;
  GENERIC_MSG msg;
  ADDRESS addr;

  channel_contention++;

  if (src_addr == TOS_NODE_ID || src_addr > ADDR_BITMASK)
  {
    DBGERR("Invalid node: %d. Ignored.", src_addr);
    return;
  }
  if (msg_len < MIN_MSGLEN)  // filter messages with minimum length required by the protocol
    return;
  if (msg_power < MIN_MSGPOWER)
    return;

#ifdef USER_TOPOLOGY
  if (handle_usertop(src_addr))   // ignore msg from link not allowed by user
    return;
#endif

  if (heapman_available() < CONG_MINHEAP)
  {
    DBGERR("Ignored message from %d, not enough memory: %d", src_addr, heapman_available());
    return;
  }

  p = &msg_in[msg_len-sizeof(uint16_t)];
  crc_cong_group = unmarshal_word(&p);
  group_size = (crc_cong_group >> 12) & 0x0f;
  congestion_bit = (crc_cong_group >> 11) & 0x01;
  crc_expected = (compute_crc(((uint16_t) group_size) ^ ((uint16_t) congestion_bit) ^ ((uint16_t) dest_addr) ^
    ((uint16_t) src_addr), msg_in, msg_len-sizeof(uint16_t)) & 0x07ff);
  crc_msg = (crc_cong_group & 0x07ff);
  if (crc_expected != crc_msg)    // ensure message integrity
  {
    DBGERR("CRC error: expected: %4x, got: %4x", crc_expected, crc_msg);
    msgrcv_crcerror++;
    return;
  }

  if (group_size > MAX_GROUPSIZE)
  {
    DBGERR("Invalid group size: %d", group_size);
    return;
  }

  msg.source_id = src_addr;
  peer_index = check_discoverybuffer(src_addr, dest_addr, msg_power);
  if (peer_index < 0)
    return;

  if (congestion_bit && peer_link[peer_index].congestion == 0)
    DBGOUT("Congestion at peer %d", peer_link[peer_index].id);
  else if (congestion_bit == 0 && peer_link[peer_index].congestion)
    DBGOUT("No longer congestion at peer %d", peer_link[peer_index].id);
  peer_link[peer_index].congestion = congestion_bit;
  peer_link[peer_index].lastmsgrcv_time = startup_cycles;
  peer_link[peer_index].rcv_power = (4*((int16_t)peer_link[peer_index].rcv_power)+(int16_t)msg_power)/5;

  if (is_dest(dest_addr))        // keep stats and logs for msgs addressed to us
  {
    msgrcv_total++;
    bytercv_payload += msg_len;
    bytercv_total += msg_len+OS_OVERHEAD;
#ifdef TOSSIM
    if (config.disable_msglog == 0)
      log_rcvmsg(msg.source_id, msg_in, msg_len);
#endif
  }

  p = msg_in;
  for (i = 0; i < group_size; i++)  // start unmarshalling the message
  {
    addr = (group_size > 1 && IS_VIRTUAL(dest_addr)) ? unmarshal_word(&p) : dest_addr;
    msg.confirm_seq = unmarshal_byte(&p);

    if (i < group_size-1)
      msg.msg_len = unmarshal_byte(&p);      // msg_len of last fragment is implied by the total size of the message
    else msg.msg_len = msg_len-sizeof(uint16_t)-(p-msg_in);
    if (msg.msg_len > msg_len-sizeof(uint16_t)-(p-msg_in))
    {
      DBGERR("Message fragment too long: %d", msg.msg_len);
      return;
    }

    if (msg.msg_len > 0)
    {
      msg.msg = (uint8_t *) mmalloc(msg.msg_len);
      unmarshal_buffer(&p, msg.msg, msg.msg_len);
    } else msg.msg = NULL;
      
    if (is_dest(addr) == 0)
      handlesnoop_conffragments(peer_index, addr, &msg);
    else if (msg.msg_len == 0 && msg.confirm_seq != 0 && addr == TOS_NODE_ID)
    {
      if (handle_confirmation(peer_index, msg.confirm_seq) == 0)
        DBGERR("Invalid confirmation received for sequence %d from node %d", msg.confirm_seq,
          msg.source_id);   // reject broadcast message with unicast confirm seq
    } else if (msg.msg_len > 0 && (addr == TOS_NODE_ID || msg.confirm_seq == 0 ||
      ((msg.confirm_seq & 0x80) && checkaccept_fragment(peer_index, &msg))))
      if ((msg.msg[0] & 0xf) == 1 ||     // fragment_count == 1
        fragmentbuffer_size < FRAGMENTBUFFER_SIZE-CONG_MINFRAGMENT ||
        find_fragmententry(msg.source_id, (msg.confirm_seq & 0x80) != 0) >= 0)
      {                            // do not respond if receiving a new multifragment msg and fragment buffer full
#ifdef DISABLE_HARDWAREACK
        if (msg.confirm_seq != 0)
#else
        if ((msg.confirm_seq & 0x80) != 0) // if hardware acks, confirm only broadcasts
#endif
          enqueue_confirmation(msg.source_id, msg.confirm_seq);

        handle_duplicates(peer_index, &msg);
      } else DBGERR("Dropped message: fragment buffer: %d, heap: %d", fragmentbuffer_size,
        heapman_available());
    else
      DBGERR("Invalid message! (%d, %d, %d)", msg.msg_len, addr, msg.confirm_seq);

    if (msg.msg)
      mfree(msg.msg);
  }
}


