/*
 * scpstp.c - Space Communications Protocol Standards Transport Protocol
 */
#include "scps.h"
#include "scpstp.h"
#include "scpsudp.h"
#include "scps_constants.h"
#include <stdio.h>
#include <sys/types.h>
#include "tp_sched.h"
#include <sys/uio.h>
#include <sys/socket.h>
#include <sys/ioctl.h>
#include "tp_debug.h"
#include "np_scmp.h"
#include "scps_ip.h"
#include "rs_config.h"
#include "tp_timers.h"
extern GW_ifs gw_ifs;

#include "other_proto_handler.h"


tp_Socket *tp_allsocs;    /* Pointer to first TP socket */
udp_Socket *udp_allsocs;  /* Pointer to first UDP socket */
struct _timer rate_timer;  /* Rate Control timer */
procref timer_fn[TIMER_COUNT];  /* Array of timer functions */

struct llbuf *in_data;  /* packet buffer */
byte out_data[MAX_MTU];    /* packet buffer */
struct msghdr out_msg;    /* the message for sndmsg */
struct iovec out_iov[20];  /* Handle up to 19 segments + 1 raw header */

extern struct gwio *sock_interface; 
extern struct gwio *divert_gwio;

tp_Socket *start_s = NULL;
tp_Socket *init_s  = NULL;
tp_Socket *current_s = NULL;
int num_loops = 0;

#include "rt_alloc.h"

uint32_t local_addr;  // Local IP address

int receives, max_receives;

/*
 * IP identification numbers
 */
unsigned short tp_id;
unsigned short udp_id;

/* TP's concept of current time */
uint32_t tp_now;
uint32_t last_cluster_check;

/* inbound packet's requirements structure */
scps_np_rqts rqts_in;

/* Timer definitions */

int ls;
int ll_read_avail;    /* Lower layer socket id */

short global_conn_ID;    /* Connection identifier for compression -
         * each socket uses an ID unique to this
         * machine */

int write_count;
int send_count;
int udp_write_count;
int udp_send_count;

int delayed_sent = 0;

int TS_ARRAY_LEN = 1;

struct _times {
  struct timeval t;
  uint32_t rtt;
}ts_array[1];


int rate_condition = GW_LOST_AND_REGAINED_RATE;
extern struct _clust_mem_map clust_mem_map;

// busy-wait loop for tp.
void tp (void)
{
  uint32_t start;
  byte proto;
  static uint32_t timeout = 0;
  static int send_delay = 0;
  static int max_send_delay = 40;
  tp_Socket *s = NULL;
  udp_Socket *us;
  int cc;
  uint32_t bytes_sent;
  tp_Header *tp;
  scps_np_rqts rqts_in;
  struct mbuff *mbuffer;
  int offset = 0;
  struct timeval mytime;
  int already_freed;
  int  clust_check = 0;
  max_receives = 5;

  mytime.tv_sec = 0;

  LOG("tp start\n");

  while (1) {
    while (1) {
      start = tp_now;  /* clock_ValueRough (); */
      if (!timeout)
        timeout = start + TP_TICK;
      receives = 0;

      /* This needs to loop through interfaces and pull data off them */
      while ((receives < max_receives) && (scheduler.gwio_data) 
        && ((cc = nl_ind (&rqts_in, MAX_MTU, &offset)) > 0)) {
        tp = (tp_Header *) ((void *) in_data->data + offset);
        proto = rqts_in.tpid;
        rqts_in.divert_port_number = in_data->divert_port_number;
        already_freed = 0;
        LOG("proto=%d\n", proto);
        switch (proto) {
        case IPPROTO_TCP:
          tp_Handler (&rqts_in, cc, tp);
          break;
        case SCPSCTP:
          tp_CompressedHandler (&rqts_in, cc, tp);
          break;
        case IPPROTO_UDP:
          udp_Handler (&rqts_in, cc, tp);
          break;
        case IPPROTO_ICMP:
          tp = (tp_Header *) ((void *) in_data->data);
          icmp_Handler (&rqts_in, cc, (ip_template *) tp, offset);
          already_freed = 1; 
          break;
        default:
          break;
        }
        receives++;
        /* Free the buffer */
        if (!already_freed) {
          free_llbuf (rqts_in.gwio, in_data);
        }
      }

      // If there is nothing in the receive queue, check 
      // timers and issue delayed acks for all sockets.
      // Check timers and issue delayed acks for each socket
      if (scheduler.timers.expired_queue.head) {
        service_timers ();
      }
      if (SEQ_GT (start, timeout)) {
        tp_now = start;
        /* service_timers(); */
        tp_Timers ();
        timeout = start + TP_TICK;
        if (!timeout)
          timeout++;  /* reserve 0 value timeout */
      }

   
      if (!init_s) {
        init_s = tp_allsocs;
      }
      if (!start_s) {
        start_s = tp_allsocs;
      }
         
      if (!s) {
        s = tp_allsocs;
      }
      s = start_s;

      if (rate_condition == GW_LOST_AND_REGAINED_RATE) {
        s = init_s;
        rate_condition = GW_USING_RATE;
      } else {
        s = init_s;
      }
   
      if (!s) {
        s = tp_allsocs;
      }
      start_s = s;
      if (num_loops == 2) {
        s = tp_allsocs;
        init_s = tp_allsocs;
        start_s = tp_allsocs;
      }
      num_loops = 0;

      do {
        if (s) {
          if (!s->rt_route) {
            s->rt_route = route_rt_lookup_s (s);
          }
          if (s->sockFlags & (SOCK_ACKNOW | SOCK_CANACK)) {
            s->flags = tp_FlagACK;
            if (!(mbuffer = tp_BuildHdr (s, NULL, 0))) {
              return;
            }
            if ((tp_NewSend (s, mbuffer, FALSE)) > 0)  {
              delayed_sent++;
              s->sockFlags &=~(SOCK_ACKNOW | SOCK_CANACK | SOCK_DELACK);
              s->unacked_segs = 0;
              s->lastack = s->acknum;
              clear_timer (s->otimers[Del_Ack], 1);
              s->lastuwe = s->acknum + s->rcvwin;
              s->ack_delay = 0;
            }    /* If NewSend returned non-zero */
            free_mbuff (mbuffer);
          }    /* If SOCK_ACKNOW set */
          if (s->send_buff->snd_una || s->send_buff->send) {
            tp_NewSend (s, NULL, FALSE);
          }
          if (send_delay >= max_send_delay)  {
            send_delay = 0;
            if (s->send_buff->send) {
              tp_NewSend (s, NULL, FALSE);
            }
          }  else  {
            send_delay++;
          }
         /* Need to do this to push out final data holding
          * up a FIN because of lack of mbuffs
          */
          if (sys_memory.clust_in_use + 10 < sys_memory.fclist.max_size) {
            if (s->state == tp_StateWANTTOCLOSE || s->state == tp_StateWANTTOLAST)
                tp_Flush (s);
          }

          {     
            if ( (gw_ifs.c_netstat_interval)  && ((tp_now - last_cluster_check) > (gw_ifs.c_netstat_interval * 1000 * 1000))) {
              int i; 
              int cluster_count = 0;
              int socket_count  = 0;

              last_cluster_check = tp_now;
              for (i = 0; i < MAX_CLUST; i++) {
                if (clust_mem_map.clust_list [i].used == 1) {
                  cluster_count ++;
                }
              }
              LOG ("Gateway: cluster allocation: %d\n", cluster_count);

              for (i = 0; i < MAX_SCPS_SOCKET; i++) {
                if (scheduler.sockets[i].ptr != NULL) {
                  socket_count ++;
                }
              }
              LOG ("Gateway: socket allocation: %d\n", socket_count);
              if (sock_interface)
                LOG ("Gateway: interface buffers available : %d\n", !TAILQ_EMPTY(&sock_interface->available));
              if (divert_gwio)
                LOG ("Gateway: divert interface buffers available : %d\n", !TAILQ_EMPTY(&divert_gwio->available));
            }
          }
          if (!s->otimers[Persist]->set) {
            if ((!s->otimers[Rexmit]->set) && ( (s->send_buff->snd_una) || SEQ_GT (s->max_seqsent, s->snduna)) 
              && (s->state != tp_StateCLOSED) && (s->otimers[Rexmit]->expired == 0) ) {
              struct timeval mytime;
              mytime.tv_sec = 0x0;
              mytime.tv_usec =((s->t_srtt>>TP_RTT_SHIFT) + max (500000,((s->t_rttvar>>TP_RTTVAR_SHIFT) << 2)));
              mytime.tv_usec = max (mytime.tv_usec, s->RTOMIN);
              mytime.tv_usec = min (mytime.tv_usec, s->RTOMAX);
              set_timer (&mytime, s->otimers[Rexmit], 1);
            }
          }
          if ((!s->otimers[TW]->set) && (s->state ==tp_StateTIMEWT)) {
            s->timeout = s->TWOMSLTIMEOUT;
            mytime.tv_sec = s->TWOMSLTIMEOUT;
            mytime.tv_usec = 0;
            set_timer (&mytime, s->otimers[TW], 1);
          }
          {
            int i;
            clust_check ++;
            if (clust_check > 50000) {
              clust_check = 0;

              for (i = 0; i < MAX_CLUST; i++) {
                if ((clust_mem_map.clust_list [i].clust) && 
                  (clust_mem_map.clust_list [i].used == 1) && 
                  (clust_mem_map.clust_list [i].clust->parent == 0x0) &&
                  (clust_mem_map.clust_list [i].clust->c_next == 0x0) &&
                  (clust_mem_map.clust_list [i].clust->c_prev == 0x0)) {
                  free_mclus (clust_mem_map.clust_list [i].clust);  
                }
              }
            }
          }
          if ((s) && (s->peer_socket) && (s->gateway_flags & (GATEWAY_MORE_TO_WRITE))) {
            gateway_move_data (s, s->peer_socket);
          }

          if ((s) && (s->gateway_flags & (GATEWAY_ABORT_NOW))) {
            tp_Socket *ttmp;
            if (s->peer_socket) {
              tp_Abort (s->peer_socket->sockid);
            }
            ttmp = s->next;
            tp_Abort (s->sockid);
            s = ttmp;
          }
        }
        current_s = s;
        if (s)  
          s = s -> next;
        if (!s) {
          s = tp_allsocs;
          num_loops ++;
        }

        if ((current_s) && (current_s->rt_route) &&
          (current_s->rt_route->current_credit <= current_s->rt_route->MTU) &&  
          (rate_condition == GW_USING_RATE) ) {
          if (((current_s->send_buff->send) || (current_s->send_buff->holes) ) &&
            (!current_s->gateway_fairness_ctr)) {
            init_s = current_s;
            rate_condition = GW_LOST_RATE;
          } else {
            init_s = s;
            rate_condition = GW_LOST_RATE;
          }
          current_s->gateway_fairness_ctr = 0;
        }
      } while (s != start_s && num_loops != 2);

      /* See if any UDP datagrams need to be kicked out the door */
      for (us = udp_allsocs; us != NULL; us = us->next) {
        if ((us->select_timer) && (SEQ_GEQ (tp_now, us->select_timer))) {
          us->select_timer = 0;
          us->thread->status = Ready;
        }
        if ((us->buff_full) &&
          (us->rt_route && us->rt_route->flags & RT_LINK_AVAIL) &&
          (us->rt_route->current_credit >= ((int) us->send_buff->start->m_ext.len + UDP_HDR_LEN + us->sp_size + 20))) {    /* this is hard-coded for IP! */
          udp_Coalesce (us, &bytes_sent);
          us->buff_full = FALSE;
        }
      }
      other_proto_emit ();
      other_proto_non_ip_emit ();
      other_proto_ipv6_emit ();
      // Servicing the interface every 10ms just doesn't
      // cut if for running faster than ~7Mbps; So we need
      // to do it here too :o(
      // If you are running at slower speeds, there is no
      // need to service the interface here!
      // 
      // We need a dedicated thread that will select() on
      // the interfaces and drain them as data becomes 
      // available; This is not possible with our tiny 
      // threads because I have no way of letting the 
      // select() block waiting for input without *all* the 
      // threads blocking too; POSIX threads are probably 
      // the portable way to do this, but they aren't 
      // necessarily available (or stable) on all platforms.
      // 
      //    Solaris     ~yes;
      //    SunOS       No (but LWP do exist)
      //    Linux       ~yes;
      //    Irix        ~yes(?)
      //    {Foo}BSD    ???
      //   NT          No (but there is Win32 threads)
      //    OS/2        No (but there is some threading capability)
      //
      // I'm working on a more acceptable solution than this,
      // but it's requiring thought...
      if (sock_interface)
        service_interface (sock_interface);
      if (divert_gwio)
        service_interface (divert_gwio);
    }      /* while allsocs */
    fflush (stdout);
    timeout = 0;
  }        /* while(1) */
}

