#include <signal.h>
#include <fcntl.h>
#include <stdlib.h>
#include <string.h>  /* for bzero */
#include "scps_constants.h"
#include "scps.h"
#include "scpstp.h"
#include "scpsudp.h"
#include "np_scmp.h"
#include "tp_socket.h"
#include "route.h"
#include "tp_timers.h"
#include "tp_debug.h"
#include "tap.h"
#include "other_proto_handler.h"

int scps_np_get_template (scps_np_rqts * rqts, scps_np_template * templ);

#include "rs_config.h"
#include <stdlib.h>
extern GW_ifs gw_ifs;

extern tp_Socket *start_s;
extern tp_Socket *init_s;

extern route *route_list_head;
extern route *def_route;
extern route *other_route;
extern uint32_t tp_now;
extern tp_Socket *tp_allsocs;
extern udp_Socket *udp_allsocs;
extern struct _timer rate_timer;
extern struct msghdr out_msg;
extern struct iovec out_iov[8];

extern int write_count;
extern int send_count;
extern int udp_write_count;
extern int udp_send_count;
extern procref timer_fn[];
extern unsigned short tp_id;
extern unsigned short udp_id;
extern short global_conn_ID;
extern int ll_read_avail;
extern fd_set llfd_set;
extern int ll_max_socket;

extern struct _times {
    struct timeval t;
    uint32_t rtt;
} ts_array[1];

int tp_is_running = 0;

#include "ll.h"

int np_hdr_size (scps_np_rqts np_rqts)
{
  switch (np_rqts.nl_protocol) {
  case NL_PROTOCOL_IPV4:
    return (20);
    break;

  case NL_PROTOCOL_NP: {
    scps_np_template np_templ;
    return (scps_np_get_template (&np_rqts, &np_templ));
    break;
    }

  case NL_PROTOCOL_IPV6:
    return (40);
    break;

  default:
    LOG ("WARNING protocol = %d\n", np_rqts.nl_protocol);
    return (20);
    break;
  }
}

int tp_hdr_size ()
{
  return (20);
}

int sp_hdr_size ()
{
  return (0);
}

/*
 * Initialize the tp implementation
 */
void scps_Init (void)
{
  int i;
  struct mbcluster *mbcluster;
  

  ll_max_socket = 0;
  FD_ZERO (&llfd_set);

  out_msg.msg_iov = out_iov;
   

  get_local_internet_addr ((char *) &local_addr);
  LOG("get local address %08x\n", local_addr);


  LOG("other_proto_init start\n");
  other_proto_init ();

  LOG("create_divert_interface start\n");
  create_divert_interface (0, (short) 0);

  LOG("gateway_tap_rules start\n");
  gateway_tap_rules ();

  memset (ts_array, 0, sizeof (ts_array));

  tp_allsocs = NULL;
  udp_allsocs = NULL;
  
  LOG("route_initialize start\n"); 
  route_initialize ();

  LOG("init_default_routes start\n"); 
  init_default_routes ();
  
  struct timeval mytime = {.tv_sec = 0, .tv_usec = 100000};
  sigprocmask (SIG_BLOCK, &alarmset, 0x0);    

  LOG("create_timer tp_TFRate start\n");       
  create_timer (&tp_TFRate, route_list_head, 1, &mytime, &rate_timer, -1);
  sigprocmask (SIG_UNBLOCK, &alarmset, 0x0);           

  tp_id = 0;
  udp_id = 0;

  global_conn_ID = (short) clock_ValueRough ();

  write_count = send_count = 0;
  udp_write_count = udp_send_count = 0;

  /* 
   * Initialize the timer function pointer array.
   */

  for (i = 0; i < TIMER_COUNT; i++)
    timer_fn[i] = 0;

  timer_fn[Del_Ack] = (procref) tp_TFDelayedAck;
  timer_fn[Rexmit] = (procref) tp_TFRetransmit;
  timer_fn[Persist] = (procref) tp_TFPersist;
  timer_fn[Vegas] = (procref) tp_TFVegas;
  timer_fn[BE_Recv] = (procref) tp_TFBERecv;
  timer_fn[Select] = (procref) tp_TFSelect;
  timer_fn[TW] = (procref) tp_TFTimeWT;
  timer_fn[KA] = (procref) tp_TFKeepAlive;

  route_list_head->time = clock_ValueRough ();    /* turn on timer */
  /*
   * Do it here rather than in 
   * tp_mss() to make UDP happy 
   */
#define PRELOAD 0
  for (i = 0; i < PRELOAD; i++)
    {
      mbcluster = alloc_mbclus (1);
      mbcluster->c_count = 1;
      free_mclus (mbcluster);
    }

  /* Get the busted routing socket */
  route_sock = def_route -> route_sock_id;
  {
    extern GW_ifs gw_ifs;
    int rate_control;
    int mtu;
    int mss_ff;
    int irto;
    int cc;
    uint32_t addr;
    int      port;
    int32_t rand_number;
    int one = 1;
    void *s2;
    int rc;

    route_sock2 = other_route -> route_sock_id;
    s2 = scheduler.sockets[route_sock2].ptr;

    ((tp_Socket *) s2)->rt_route = other_route;
    LOG ("route_sock = %d rouet_sock2 %d\n", route_sock, route_sock2);
    if (gw_ifs.a.mtu) {
      mtu = gw_ifs.a.mtu ;
      LOG("aif_mtu start\n");
      rc = scps_setsockopt (route_sock, SCPS_ROUTE, SCPS_MTU, &mtu, sizeof (mtu));
      LOG("rc=%d\n", rc);
    }

    if (gw_ifs.a.smtu) {
        mtu = gw_ifs.a.smtu ; 
        LOG("aif_smtu start\n");
        rc = scps_setsockopt (route_sock, SCPS_ROUTE, SCPS_SMTU, &mtu, sizeof (mtu));
      LOG("rc=%d\n", rc);
    }  

    if (gw_ifs.a.cc)  {
        cc = gw_ifs.a.cc ; 

  if (cc == NO_CONGESTION_CONTROL) {
    int zero = 0;
    LOG("SCPSTP_CONGEST option start\n");
    rc = scps_setsockopt (route_sock, SCPS_ROUTE, SCPSTP_CONGEST, &zero, sizeof (zero));
    LOG("rc=%d\n", rc);
  }

  if (cc == VJ_CONGESTION_CONTROL) {
    int one = 1;
    LOG("SCPSTP_VJ_CONGEST option start\n");
    rc = scps_setsockopt (route_sock, SCPS_ROUTE, SCPSTP_VJ_CONGEST, &one, sizeof (one));
      LOG("rc=%d\n", rc);
  }

  if (cc == VEGAS_CONGESTION_CONTROL) {
    int one = 1;
    LOG("SCPSTP_VEGAS_CONGEST option start\n");
    rc = scps_setsockopt (route_sock, SCPS_ROUTE, SCPSTP_VEGAS_CONGEST, &one, sizeof (one));
      LOG("rc=%d\n", rc);
  }

  if (cc == FLOW_CONTROL_CONGESTION_CONTROL) {
    int one = 1;
    LOG("SCPSTP_FLOW_CONTROL_CONGEST option start\n");
    rc = scps_setsockopt (route_sock, SCPS_ROUTE, SCPSTP_FLOW_CONTROL_CONGEST, &one, sizeof (one));
      LOG("rc=%d\n", rc);
  }
    }  

    if (gw_ifs.a.mss_ff) {
        mss_ff = gw_ifs.a.mss_ff; 
        LOG("aif_mss_ff start\n");
        rc = scps_setsockopt (route_sock, SCPS_ROUTE, SCPS_MSS_FF, &mss_ff, sizeof (mss_ff));
      LOG("rc=%d\n", rc);
    }  

    if (gw_ifs.a.irto) {
        irto = gw_ifs.a.irto; 
        LOG("aif_irto start\n");
        rc = scps_setsockopt (route_sock, SCPS_ROUTE, SCPS_IRTO, &irto, sizeof (irto));
      LOG("rc=%d\n", rc);
    }  

    if (gw_ifs.a.tcponly) {
        LOG("aif_tcponly start\n");
        rc = scps_setsockopt (route_sock, SCPS_ROUTE, SCPS_TCPONLY, &one, sizeof (one));
      LOG("rc=%d\n", rc);
    }  

    if (gw_ifs.a.div_addr) {
        addr = gw_ifs.a.div_addr; 
        LOG("aif_div_addr start\n");
        rc = scps_setsockopt (route_sock, SCPS_ROUTE, SCPS_DIV_ADDR, &addr, sizeof (addr));
      LOG("rc=%d\n", rc);
    }  

    if (gw_ifs.a.div_port) {
        port = gw_ifs.a.div_port; 
        LOG("aif_div_port start\n");
        rc = scps_setsockopt (route_sock, SCPS_ROUTE, SCPS_DIV_PORT, &port, sizeof (port));
      LOG("rc=%d\n", rc);
    }  

    if (gw_ifs.a.name) {
        LOG("aif_name start\n");
        rc = scps_setsockopt (route_sock, SCPS_ROUTE, SCPS_IFNAME, &gw_ifs.a.name, strlen (gw_ifs.a.name));
      LOG("rc=%d\n", rc);
    }  

    if (gw_ifs.a.scps_security) {
        LOG("aif_scps_security start\n");
        rc = scps_setsockopt (route_sock, SCPS_ROUTE, SCPS_SP_RQTS, &gw_ifs.a.scps_security, sizeof (gw_ifs.a.scps_security));
      LOG("rc=%d\n", rc);
    }  

    if (gw_ifs.a.rate) {
      rate_control = gw_ifs.a.rate;
      LOG("aif_rate start\n");
      rc = scps_setsockopt (route_sock, SCPS_ROUTE, SCPS_RATE, &rate_control,
       sizeof (rate_control));
      LOG("rc=%d\n", rc);
    } else {
      rate_control = GATEWAY_DEFAULT_RATE;
      LOG("SCPS_RATE option start\n");
      rc = scps_setsockopt (route_sock, SCPS_ROUTE, SCPS_RATE, &rate_control,
       sizeof (rate_control));
      LOG("rc=%d\n", rc);
    }


    if (gw_ifs.a.encrypt_ipsec_downstream) {
  int value;
        value = gw_ifs.a.encrypt_ipsec_downstream; 
        LOG("aif_encrypt_ipsec_downstream start\n");
        rc = scps_setsockopt (route_sock, SCPS_ROUTE,
                         SCPS_ENCRYPT_IPSEC,
                 &value, sizeof (value));
      LOG("rc=%d\n", rc);
    }  

    if (gw_ifs.a.encrypt_pre_overhead) {
      int value;
        value = gw_ifs.a.encrypt_pre_overhead; 
        LOG("aif_encrypt_pre_overhead start\n");
        rc = scps_setsockopt (route_sock, SCPS_ROUTE,
                         SCPS_ENCRYPT_PRE_OVERHEAD,
            &value, sizeof (value));
      LOG("rc=%d\n", rc);
    }  

    if (gw_ifs.a.encrypt_block_size) {
      int value;
      value = gw_ifs.a.encrypt_block_size; 
      LOG("aif_encrypt_block_size start\n");
      rc = scps_setsockopt (route_sock, SCPS_ROUTE, SCPS_ENCRYPT_BLOCK_SIZE, &value, sizeof (value));
      LOG("rc=%d\n", rc);
    }  

    if (gw_ifs.a.encrypt_post_overhead) {
  int value;
        value = gw_ifs.a.encrypt_post_overhead; 
        LOG("aif_encrypt_post_overhead start\n");
        rc = scps_setsockopt (route_sock, SCPS_ROUTE, SCPS_ENCRYPT_POST_OVERHEAD,
            &value, sizeof (value));
      LOG("rc=%d\n", rc);
    }  
    if (gw_ifs.b.mtu) {
      mtu = gw_ifs.b.mtu;
      LOG("bif_mtu start\n");
      rc = scps_setsockopt (route_sock2, SCPS_ROUTE, SCPS_MTU, &mtu, sizeof (mtu));
      LOG("rc=%d\n", rc);
    }

    if (gw_ifs.b.smtu) {
        mtu = gw_ifs.b.smtu ;
        rc = scps_setsockopt (route_sock2, SCPS_ROUTE, SCPS_SMTU, &mtu, sizeof (mtu));
      LOG("rc=%d\n", rc);
    }
    
    if (gw_ifs.b.cc)  {
        cc = gw_ifs.b.cc ; 

  if (cc == NO_CONGESTION_CONTROL) {
    int zero = 0;
          rc = scps_setsockopt (route_sock2, SCPS_ROUTE, SCPSTP_CONGEST, &zero, sizeof (zero));
      LOG("rc=%d\n", rc);
  }

  if (cc == VJ_CONGESTION_CONTROL) {
    int one = 1;
          rc = scps_setsockopt (route_sock2, SCPS_ROUTE, SCPSTP_VJ_CONGEST, &one, sizeof (one));
      LOG("rc=%d\n", rc);
  }

  if (cc == VEGAS_CONGESTION_CONTROL) {
    int one = 1;
          rc = scps_setsockopt (route_sock2, SCPS_ROUTE, SCPSTP_VEGAS_CONGEST, &one, sizeof (one));
      LOG("rc=%d\n", rc);
  }

  if (cc == FLOW_CONTROL_CONGESTION_CONTROL) {
    int one = 1;
          rc = scps_setsockopt (route_sock2, SCPS_ROUTE, SCPSTP_FLOW_CONTROL_CONGEST, &one, sizeof (one));
      LOG("rc=%d\n", rc);
  }
    }  

    if (gw_ifs.b.mss_ff) {
        mss_ff = gw_ifs.b.mss_ff ; 
        rc = scps_setsockopt (route_sock2, SCPS_ROUTE, SCPS_MSS_FF, &mss_ff, sizeof (mss_ff));
      LOG("rc=%d\n", rc);
    }  

    if (gw_ifs.b.irto) {
        irto = gw_ifs.b.irto; 
        rc = scps_setsockopt (route_sock2, SCPS_ROUTE, SCPS_IRTO, &irto, sizeof (irto));
      LOG("rc=%d\n", rc);
    }  

    if (gw_ifs.b.tcponly) {
        rc = scps_setsockopt (route_sock2, SCPS_ROUTE, SCPS_TCPONLY, &one, sizeof (one));
      LOG("rc=%d\n", rc);
    }  

    if (gw_ifs.b.div_addr) {
        addr = gw_ifs.b.div_addr; 
        rc = scps_setsockopt (route_sock2, SCPS_ROUTE, SCPS_DIV_ADDR, &addr, sizeof (addr));
      LOG("rc=%d\n", rc);
    }  

    if (gw_ifs.b.div_port) {
      port = gw_ifs.b.div_port; 
      rc = scps_setsockopt (route_sock2, SCPS_ROUTE, SCPS_DIV_PORT, &port, sizeof (port));
      LOG("rc=%d\n", rc);
    }  

    if (gw_ifs.b.name) {
      rc = scps_setsockopt (route_sock2, SCPS_ROUTE, SCPS_IFNAME, &gw_ifs.b.name, strlen (gw_ifs.b.name));
      LOG("rc=%d\n", rc);
    }  

    if (gw_ifs.b.scps_security) {
        rc = scps_setsockopt (route_sock2, SCPS_ROUTE, SCPS_SP_RQTS, &gw_ifs.b.scps_security, sizeof (gw_ifs.b.scps_security));
      LOG("rc=%d\n", rc);
    }  

    if (gw_ifs.b.rate) {
      rate_control = gw_ifs.b.rate;
      rc = scps_setsockopt (route_sock2, SCPS_ROUTE, SCPS_RATE, &rate_control, sizeof (rate_control));
      LOG("rc=%d\n", rc);
    } else {
      rate_control = GATEWAY_DEFAULT_RATE;
      rc = scps_setsockopt (route_sock2, SCPS_ROUTE, SCPS_RATE, &rate_control, sizeof (rate_control));
      LOG("rc=%d\n", rc);
    }


    if (gw_ifs.b.encrypt_ipsec_downstream) {
      int value;
        value = gw_ifs.b.encrypt_ipsec_downstream; 
        rc = scps_setsockopt (route_sock2, SCPS_ROUTE, SCPS_ENCRYPT_IPSEC, &value, sizeof (value));
      LOG("rc=%d\n", rc);
    }  

    if (gw_ifs.b.encrypt_pre_overhead) {
      int value;
      value = gw_ifs.b.encrypt_pre_overhead; 
      rc = scps_setsockopt (route_sock2, SCPS_ROUTE, SCPS_ENCRYPT_PRE_OVERHEAD,&value, sizeof (value));
      LOG("rc=%d\n", rc);

    }  

    if (gw_ifs.b.encrypt_block_size) {
        int value;
        value = gw_ifs.b.encrypt_block_size; 
        scps_setsockopt (route_sock2, SCPS_ROUTE,
                         SCPS_ENCRYPT_BLOCK_SIZE,
            &value, sizeof (value));
    }  

    if (gw_ifs.b.encrypt_post_overhead) {
      int value = gw_ifs.b.encrypt_post_overhead; 
      scps_setsockopt (route_sock2, SCPS_ROUTE, SCPS_ENCRYPT_POST_OVERHEAD,
            &value, sizeof (value));
    }  

    rand_number = clock_ValueRough ();
    srandom (rand_number);
    rand_number = random ();
    rand_number = rand_number % 5000;
    rand_number += 5000;
  }
  mbcluster = deq_mclus (((tp_Socket *) (scheduler.sockets[route_sock].ptr))->app_sbuff);
  free_mclus (mbcluster);
  mbcluster = deq_mclus (((tp_Socket *) (scheduler.sockets[route_sock].ptr))->app_rbuff);
  free_mclus (mbcluster);

  /* Kick in rate control timers here */
  route_list_head->prev_time = 1;

  /* 
   * We created a socket, keep running until we've 
   * closed out all the protocol sockets
   */

  tp_is_running = 1;

  scps_np_init ();

}

int tp_Common (tp_Socket * s)
{
  int temp;
  struct timeval mytime;
  tp_Socket *sp;
  struct mbcluster *mbcluster;
  int i;

  if (!((s->state == tp_StateNASCENT) || (s->state == tp_StateCLOSED)))
    {
      SET_ERR (SCPS_ESOCKOUTSTATE);
      return (-1);
    }

  /* We're clean, skip the rest */
  if (s->Initialized)
    {
      if (!(s->scratch_buff))
  {
    s->scratch_buff = alloc_mbuff (MT_HEADER);
  }
      return (0);
    }

  for (sp = tp_allsocs; sp != NULL; sp = sp->next) {
    if (s == sp) {
      SET_ERR (SCPS_ESOCKINUSE);
      return (-1);
    }
  }

  /* Get a pseudo-file descriptor for our socket */
  for (temp = 0; temp < MAX_SCPS_SOCKET; temp++) {
    if (scheduler.sockets[temp].ptr == NULL) {
      scheduler.sockets[temp].ptr = (caddr_t) s;
      s->sockid = temp;
      break;
    }
  }

  if (temp == MAX_SCPS_SOCKET) {
    SET_ERR (SCPS_ENOBUFS);
    LOG ("We are temporary out of sockets\n");
    return (-1);
  }

  /* Initialize blocking related parameters */
  s->read = s->write = 0;
  s->read_prev = s->read_next = 0x0;
  s->write_prev = s->write_next = 0x0;
  s->read_parent = s->write_parent = 0x0;

  s->display_now = 0x00;

  s->snack_delay = 0;  /* The default is to not delay the response to a SNACK */
  /* Copy local copies of the settable constants */
  s->ACKDELAY = TP_ACKDELAY;
  s->ACKFLOOR = TP_ACKFLOOR;
  s->RTOMIN = TP_RTOMIN;
  s->RTOMAX = TP_RTOMAX;
  s->MAXPERSIST_CTR = TP_MAXPERSIST_CTR;
  s->RTOPERSIST_MAX = TP_RTOPERSIST_MAX;
  s->RETRANSMITTIME = TP_RETRANSMITTIME;
  s->PERSISTTIME = TP_PERSISTTIME;
  s->TIMEOUT = TP_TIMEOUT;
  s->LONGTIMEOUT = TP_LONGTIMEOUT;
  s->TWOMSLTIMEOUT = TP_2MSLTIMEOUT;
  s->KATIMEOUT = TP_KATIMEOUT;
  s->bets_recv_timeout = BETS_RECEIVE_TIMEOUT;
  s->scratch_buff = alloc_mbuff (MT_HEADER);
  s->ack_freq = DEFAULT_ACK_FREQ;  /* set default ack behavior */
  s->cong_algorithm = VEGAS_CONGESTION_CONTROL;    /* set default cong-contr */
#ifdef MFX_TRANS
  s->MFX_SETTING = 3;
#else /* MFX_TRANS */
  s->MFX_SETTING = 0;
#endif /* MFX_TRANS */
  s->mfx_snd_una = 8;
  s->rt_route = NULL;
  s->rel_seq_num_urg_ptr = 0;
  s->gateway_runt_ack_ctr = 0;
  s->gateway_runt_ctr = 0;

  /* Congestion epoch control variables. */
  s->high_seq = 0;
  s->high_congestion_seq = 0;

  /* Use for persist expontential time out values */
  s->persist_shift = 0;
  s->maxpersist_ctr = 0;

  /* TCP-Vegas related parameters */
  s->VEGAS_ALPHA = ALPHA;
  s->VEGAS_BETA = BETA;
  s->VEGAS_GAMMA = GAMMA;
  s->VEGAS_SS   = 0;

  mytime.tv_sec = mytime.tv_usec = 0;

  s->capabilities = 0;    /* Defaults to just Window-Scaling */

  /* default to running with Timestamps and SNACK1 */


  s->ecbs1 = s->ecbs1_req = 0;
  s->ecbs2 = s->ecbs2_req = 0;
  s->ecbs1_len = s->ecbs1_req_len = 0;
  s->ecbs2_len = s->ecbs2_req_len = 0;
  { int iii;
    for (iii = 0; iii < MAX_ECBS_VALUE; iii++) {
    s->ecbs1_value [iii] = s->ecbs1_req_value [iii] = 0;
    s->ecbs2_value [iii] = s->ecbs2_req_value [iii] = 0;
    }
  }
  
  s->capabilities |= CAP_JUMBO;

  s->capabilities |= CAP_TIMESTAMP;
  s->sockFlags |= TF_REQ_TSTMP;

#ifdef OPT_SNACK1
  s->capabilities |= CAP_SNACK;
  s->sockFlags |= TF_REQ_SNACK1;
#endif /* OPT_SNACK1 */

  s->capabilities |= CAP_CONGEST;

#ifdef MFX_TRANS
  s->capabilities |= CAP_MFX;
#endif /* MFX_TRANS */

  s->thread = scheduler.current;  /* process id of this thread */
  s->timeout = TP_LONGTIMEOUT;  /* max retransmissions */
  tp_now = clock_ValueRough ();

  s->total_data = s->last_total_data = 0;

  s->funct_flags = 0x0;

/*
 * Note:  initializing lastuwein to seqnum results 
 * in a window size of 0 on the initial SYN. This 
 * should be at least 1 to allow the SYN out, but 
 * in our tests, we use SEQ_LEQ rather than SEQ_LT, 
 * which is correct.  When/if we support transaction
 * TP, in which data can accompany the SYN, we must 
 * set lastuwein to seqnum + the default window size, 
 * which should be at least 1 mss.
 */
   s->seqnum = s->snduna = s->seqsent = s->max_seqsent =
    s->old_seqsent = s->lastuwein = tp_now;
  /* s->lastuwein = s->seqnum++; */
   s->initial_seqnum = s->seqnum;
  s->high_hole_seq = s->seqnum;
 
  if (s->capabilities & CAP_BETS)
    s->BETS.InSndSeq = s->seqnum + 1;
  s->ack_delay = 0;
  s->rttcnt = -1;
  s->rtt = 0;
  /* s->t_rxtcur = 0; */
  s->flags = tp_FlagSYN;
  s->th_off = temp = MBLEN - TP_MAX_HDR;

  /* Set some default network layer parameters */
  s->np_rqts.tpid = IPPROTO_TCP;
  s->np_rqts.ipv4_dst_addr = 0;
  s->np_rqts.ipv4_src_addr = 0;
  s->np_rqts.timestamp.format = 0;
  s->np_rqts.timestamp.ts_val[0] = s->np_rqts.timestamp.ts_val[1] = 0;
  s->np_rqts.bqos.precedence = 0;
  s->np_rqts.bqos.routing = 0;
  s->np_rqts.bqos.pro_specific = 0;
  s->np_rqts.eqos.ip_precedence = 0;
  s->np_rqts.eqos.ip_tos = 0;
  s->np_rqts.cksum = 1;
  s->np_rqts.int_del = 0;

  if (!s->np_rqts.nl_protocol) {
      s->np_rqts.nl_protocol =  NL_DEFAULT;
  }

  if ( (s->np_rqts.nl_protocol == NL_PROTOCOL_IPV4) ||
       (s->np_rqts.nl_protocol == NL_PROTOCOL_NP) ) {
        s->ph.nl_head.ipv4.src = local_addr;
          s->ph.nl_head.ipv4.mbz = 0;
          s->ph.nl_head.ipv4.protocol = IPPROTO_TCP;
  }

  if (s->np_rqts.nl_protocol == NL_PROTOCOL_IPV6) {
          s->ph.nl_head.ipv6.protocol = IPPROTO_TCP;
  }

  if (!(s->receive_buff))
    {
      s->receive_buff = buff_init (MAX_MBUFFS, s);
      s->Out_Seq = buff_init (MAX_MBUFFS, s);
    }
  if (!(s->send_buff))
    s->send_buff = buff_init (MAX_MBUFFS, s);

  if ((!(s->receive_buff)) || (!(s->Out_Seq)) || (!(s->send_buff)))
    goto SockAlloc_Failure;

  /* 
   * With the addition of socket options, it is possible to
   * setup your send and receive buffers prior to doing a
   * tp_Connect() or tp_Listen(); We can (and probably should)
   * think about implementing a socket() call to a globally 
   * managed pool of sockets - the application then just has
   * to deal with a file-descriptor, rather than a socket.
   */

  if (!(s->app_rbuff))
    {
      if (!(s->app_rbuff = chain_init (BUFFER_SIZE)))
  goto SockAlloc_Failure;
    }

  if (!(s->app_sbuff))
    {
      if (!(s->app_sbuff = chain_init (BUFFER_SIZE)))
  goto SockAlloc_Failure;
    }


  /* Create our timers */
  sigprocmask (SIG_BLOCK, &alarmset, 0x0);           
  for (i = 0; i < TIMER_COUNT; i++)
    {
      if (!(s->otimers[i] = create_timer ((void *) timer_fn[i], s, 0, NULL,
            NULL, i)))
  {
  sigprocmask (SIG_UNBLOCK, &alarmset, 0x0);           
    SET_ERR (SCPS_ENOBUFS);
    return (-1);
  }
    }
  sigprocmask (SIG_UNBLOCK, &alarmset, 0x0);           

  s->rcvwin = s->app_rbuff->max_size - 1;

  s->rttbest = 0;

  s->sockFlags |= TF_REQ_SCALE;  /* say we'll scale our windows */
  s->request_r_scale = 0;
  while ((s->request_r_scale < TP_MAX_WINSHIFT) &&
   ((TP_MAXWIN << s->request_r_scale) < s->app_rbuff->max_size))
    s->request_r_scale++;

  s->mbuff_fails = 0;
  s->cb_datin_fails = 0;

  s->thread = scheduler.current;

  s->Initialized = IPPROTO_TCP;
  s->next = tp_allsocs;
  if (s->next)
    s->next->prev = s;
  s->prev = NULL;

  tp_allsocs = s;
  return (s->sockid);

SockAlloc_Failure:
  /*
   * Give back any dynamic memory we grabbed so far...
   */

  if (s->scratch_buff)
    free_mbuff (s->scratch_buff);
  if (s->receive_buff)
    free (s->receive_buff);
  if (s->Out_Seq)
    free (s->Out_Seq);
  if (s->send_buff)
    free (s->send_buff);
  if (s->app_sbuff) {
    while ((mbcluster = deq_mclus (s->app_sbuff))) {
  mbcluster->c_count = 1;
  free_mclus (mbcluster);
    }
    free (s->app_sbuff);
  }
  if (s->app_rbuff) {
    while ((mbcluster = deq_mclus (s->app_rbuff)))
  mbcluster->c_count = 1;
        free_mclus (mbcluster);
    free (s->app_rbuff);
  }

  scheduler.sockets[s->sockid].ptr = 0x0;
  SET_ERR (SCPS_ENOMEM);
  return (-1);
}

/*
 * Unthread a TP socket from the socket list, if it's there
 */
void tp_Unthread (tp_Socket * ds)
{
  int i;
  tp_Socket *s, **sp;
  struct mbcluster *mbcluster;

  ds->Initialized = 0;
  sp = &tp_allsocs;

  scheduler.sockets[ds->sockid].ptr = NULL;

  for (;;)
    {
      s = *sp;
      if (s == ds)
  {

    /*
     * We deallocate any mbuffs & clusters still owned by
     * the socket prior to unthreading the socket from
     * tp_allsocs. Note: At this point if we are closing
     * normally, There should not be anything left there
     * anyway!
     */

    kill_bchain (ds->send_buff->start);
    kill_bchain (ds->receive_buff->start);
    kill_bchain (ds->Out_Seq->start); 

    s->state = tp_StateCLOSED;
          sigprocmask (SIG_BLOCK, &alarmset, 0x0);
    /* Delete all our timers */
    for (i = 0; i < TIMER_COUNT; i++) {
      delete_timer (s->otimers[i], 0);
     }
          sigprocmask (SIG_UNBLOCK, &alarmset, 0x0);

    /*
     * Any data still in clusters should either be freed
     * by the kill_bchain() calls, are it is being
     * retained by the application (through the reference
     * counts)
     *
     * Except for the cases where the initial cluster provided
     * by chain_init is never used!
     */

  if (ds->app_rbuff) {
    while ((mbcluster = deq_mclus (ds->app_sbuff))) {
      mbcluster->c_count = 1;
      free_mclus (mbcluster);
    }
  }

  if (ds->app_rbuff) {
    while ((mbcluster = deq_mclus (ds->app_rbuff))) {
      mbcluster->c_count = 1;
      free_mclus (mbcluster);
    }
  }

    /* Remove our buffer structures */
    free (ds->receive_buff);
    free (ds->send_buff);
    free (ds->app_sbuff);
    free (ds->app_rbuff);
    free (ds->Out_Seq);
    free_mbuff (ds->scratch_buff);

    if (ds->hole_ptr)
      free_mbuff (ds->hole_ptr);

    /* Prevent use from trying to free a phantom cluster */
    if (ds->scratch_buff)
             ds->scratch_buff->m_ext.offset = ds->scratch_buff->m_ext.len = 0;
    /* free_mbuff(ds->scratch_buff); */
    ds->scratch_buff = 0x0;
    *sp = s->next;
    /* Maintain the integrity of the prev pointer. */
    if ( *sp && (*sp)->next ) (*sp)->next->prev = *sp;

    if (*sp) {
      if (s->prev) {
      s->prev->next = *sp;
      }
          } else {
      if (s->prev) {
      s->prev->next = NULL;
      }
    }

    if (*sp) {
      if (s->prev) {
        (*sp) ->prev = s->prev;
      } else {
        (*sp) -> prev = NULL;
      }
    }

    /* 
     * When using the fair gateway, make sure that we don't leave a dangling
     * pointer to a freed socket!
     */
    if ( start_s==ds ) {
      start_s = ds->next?ds->next:tp_allsocs;
    }
    if ( init_s==ds ) {
      init_s = ds->next?ds->next:tp_allsocs;
    }
    /*
     * If gateway_select is on, we'd better get ourselves out of the
     * readable/writable lists.  The important cases handled by these
     * macros is when ds is the head of the read or write queue.
     */
    REMOVE_READ(ds);
    REMOVE_WRITE(ds);

    /*
     * If this socket is unlucky enough to be on the queue of partially connected
     * sockets or the queue of connected sockets listening for accept, we have
     * to get him off.
     */
    if ( ds->qhead ) {
      /*
       * Unlink myself from the partially connected and fully connected
       * lists of my qhead.  Note that q0 is the forward pointer for the
       * partially connected list and q is the forward pointer for the
       * fully connected list.
       */
      if ( ds->q0 ) ds->q0->q = ds->q;
      if ( ds->q ) ds->q->q0 = ds->q0;
      if ( ds->qhead->q0 == ds ) {
        ds->qhead->q0 = ds->q0;
      }
      if ( ds->qhead->q == ds ) {
        ds->qhead->q = ds->q;
      }
      tp_Abort(ds->qhead->sockid);
    } else {
      /*
       * When freeing a listening socket, kill off everything related to it.
       */
      if ( ds->state == tp_StateLISTEN ) {
        while ( ds->q0 ) tp_Abort(ds->q0->sockid);
        while (ds->q ) tp_Abort(ds->q->sockid);
      } else {
              if (ds->q0) {
    LOG ("q0 is set to something\n");
              }
              if (ds->q) {
    LOG ("q is set to something\n");
              }
            }
    }

          if ( (ds->peer_socket) && (ds->peer_socket->peer_socket == ds)) {
        ds->peer_socket->peer_socket = 0x0;
         }

    free (ds);
    ds = NULL;

    break;
  }
      if (s == NULL)
  break;
      sp = &s->next;
    }
  Validate_Thread ();
}

/*
 * Send pending TP data
 */
int tp_Flush (tp_Socket * s)
{
  struct mbuff *mbuffer = NULL;
  int len;

  /*
   * Note: this is a dumb thing, check to see whether 
   * we check the send_buff.start, send_buff.snd_una 
   * or send_buff.send as the test...
   */

  /* if there is pending data, send it... but don't 
   * bother to even allocate an mbuffer if the read_head 
   * is NULL (no data to send)
   */

  /* if (((s->send_buff->start) && (s->app_sbuff->read_head) &&  */
  if (((s->app_sbuff->read_head) &&
       (s->send_buff->b_size < s->send_buff->max_size)) ||
      (s->state == tp_StateWANTTOLAST) ||
      (s->state == tp_StateWANTTOCLOSE))
    {
      /* We can enqueue data to go out, give it  a shot */

      if (s->app_sbuff->size > 0)
  {

    s->flags |= tp_FlagPUSH;

    /* While we can enqueue another mbuff, allocate 
     * another mbuff AND we can attach data to an 
     * mbuff, build a header and enqueue it to go 
     * out. Continue until we fail. 
     */

    while ((s->send_buff->b_size < s->send_buff->max_size) &&
     (mbuffer = alloc_mbuff (MT_HEADER)) &&
     ((len = mcput (mbuffer, s->app_sbuff->read_head,
        s->app_sbuff->read_off,
        s->maxdata, 1)) > 0))
      {
        if (len < s->maxdata)
    {
      write_align (s->app_sbuff, 0, 1);
      mbuffer->m_flags |= M_RUNT;
    }

        tp_BuildHdr (s, mbuffer, 1);
        enq_mbuff (mbuffer, s->send_buff);
        read_align (s->app_sbuff, len, 1);
        s->app_sbuff->size -= len;
        s->app_sbuff->run_length -= len;
        if (!(s->send_buff->send))
    s->send_buff->send = s->send_buff->last;
      }
  }

      else
  mbuffer = alloc_mbuff (MT_HEADER);

      /* If we failed because we couldn't grab an mbuff, 
       * there is no point in even considering whether 
       * or not a FIN needs to go out.
       */

      if (!(mbuffer))
  {
    SET_ERR (SCPS_ENOBUFS);
    return (-1);
  }

      /* If we're here, we've got an mbuffer so we we've 
       * managed to enqueue all our data to send. Build 
       * a FIN if necessary.
       */
      if ((s->state == tp_StateWANTTOCLOSE) ||
    (s->state == tp_StateWANTTOLAST))
  {

    if (s->send_buff->b_size >= s->send_buff->max_size)
      {
        free_mbuff (mbuffer);
        SET_ERR (SCPS_ENOMEM);
        return (-1);
      }

    /*
     * Must allocate mbuffer before calling tp_BuildHdr 
     * and pass it in.  This is a hack to say that this 
     * segment will be queued for transmission, not sent 
     * immediately.  As a result, the maximum sequence 
     * number BUILT, not SENT should be used.  (If this 
     * were an ACK, we would allow tp_BuildHdr to allocate 
     * the mbuffer, and use the sequence number most 
     * recently SENT in the header.)
     */

    s->flags = tp_FlagACK | tp_FlagFIN;
    s->lastack = s->acknum;

    s->lastuwe = s->acknum + s->rcvwin;
    /* s->sockFlags &= ~SOCK_DELACK; */

    (void) tp_BuildHdr (s, mbuffer, 0);
    enq_mbuff (mbuffer, s->send_buff);
    if (!(s->send_buff->send))
      s->send_buff->send = s->send_buff->last;
    if (s->state == tp_StateWANTTOCLOSE)
      {
        s->state_prev = s->state;
        s->state = tp_StateFINWT1PEND;
       }
    else
      {
        s->state_prev = s->state;
        s->state = tp_StateLASTACKPEND;
       }
  }
      else
  free_mbuff (mbuffer);

      tp_NewSend (s, NULL, FALSE);
    }
  return (0);
}

void tp_WinAck (tp_Socket * s, tp_Header * th)
{
  uint32_t long_temp;

  th->acknum = htonl (s->acknum);
  long_temp = s->rcvwin;

  if ((long_temp < (uint32_t) (s->app_rbuff->max_size / 4)) &&
      (long_temp < (uint32_t) (s->maxseg)))
    long_temp = 0;
  if (long_temp > (uint32_t) (TP_MAXWIN << s->rcv_scale))
    long_temp = (uint32_t) (TP_MAXWIN << s->rcv_scale);
  if (long_temp < (uint32_t) (s->lastuwe - s->acknum))
    long_temp = (uint32_t) (s->lastuwe - s->acknum);

  if ((s) &&
      (s->gateway_flags & GATEWAY_PEER_WIN_NOT_OPENED))
    {
      long_temp = 0;
    }

  /*
   * This is to prevent being bitten by Little Endian 
   * machines when we want to advertise a window greater 
   * than 65535, but we don't have window-scaling option 
   * available 
   */
  if ((long_temp > 0xFFFF) && (!(s->rcv_scale)))
    {
      th->window = 0xFFFF;
    }
  else
    {
      th->window = htons ((u_short) (long_temp >> s->rcv_scale));
    }
}

void Validate_Thread (void)
{
  tp_Socket *s;
  // If the only sockets on allsocs are now Routing sockets, 
  // and we want to terminate if tp_is_running = 1;
  for (s = tp_allsocs; s; s = s->next) {
    if (s->Initialized == IPPROTO_TCP)
        return;
  }

  for (s = (tp_Socket *) udp_allsocs; s; s = s->next) {
    if (s->Initialized == IPPROTO_UDP)
      return;
  }
  tp_is_running = 0;
  return;
}

void tp_quench (tp_Socket * s)
{
  s->snd_cwnd = s->maxseg;
  s->snd_prevcwnd = s->maxseg;
  s->snd_ssthresh = 2 * s->maxseg;
  s->sockFlags &= ~TF_CC_LINEAR;
}

void tp_notify (int type, scps_np_rqts * rqts, tp_Header * tp)
{
  tp_Socket *s;
  int source = 0;
  int dest = 0;

  /* Walk the socket list to see which socket this message applies */
  for (s = tp_allsocs; ((s) && (!(source)) && (!(dest))); s = s->next) {
    if ((s->np_rqts.ipv4_dst_addr == rqts->ipv4_dst_addr) &&
      (s->np_rqts.ipv4_src_addr == rqts->ipv4_src_addr) &&
      (s->myport == tp->srcPort) && (s->hisport == tp->dstPort))
      source = 1;
    else if ((s->np_rqts.ipv4_dst_addr == rqts->ipv4_src_addr) &&
      (s->np_rqts.ipv4_src_addr == rqts->ipv4_dst_addr) &&
      (s->myport == tp->dstPort) && (s->hisport == tp->srcPort))
      dest = 1;
  }

  if (!(source || dest))
    return;
  switch (type) {
  case SCMP_SOURCEQUENCH:
    tp_quench (s);
    break;
  case SCMP_CORRUPT:
    break;
  default:
    break;
  }
}


