/******************************************************************************
 * fake-linux.c
 * 
 * Miscellaneous functions pulled from the Linux kernel that are too long to 
 * go in fake-linux.h. This file differs from the other tc_xxx.c files in 
 * that the functions defined here may have been hacked around!
 * 
 * Copyright (c) 1999-2000, K A Fraser
 */
/* Sections of this file require the following:
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
 */

#include "fake-linux.h"
#include "multipath.h"
#include <stdio.h>
int strncmp(const char *s1, const char *s2, int n);
u32 strtoul(const char *nptr, char **endptr, int base);

__u8 ultcp_cnt,ultcp_default;
struct net_device **ultcp_interface; // -P2

// #define BYTES_PER_BUFFER (8192+52) //(0x3000 - 176)
// #define NUM_BUFSW        32
// #define NUM_BUFSR        32

// #define BYTES_PER_BUFFER (1608+52) //(0x3000 - 176)
#define BYTES_PER_BUFFER (8192+52) //(0x3000 - 176)

#define NUM_BUFSW        1099  // MUST be same as ../private.h: #define BUFFERS_PER_RING
#define NUM_BUFSR        1099

extern char * eth; // the interface on which sender is connected to the next node... added by Manpreet. 

struct icmp_err icmp_err_convert[0];
struct ipv4_devconf ipv4_devconf = { 0 };
struct icmp_mib icmp_statistics = { 0 };
struct linux_mib net_statistics = { 0 };
atomic_t inet_sock_nr;
__u32 sysctl_rmem_default = (BYTES_PER_BUFFER*NUM_BUFSR); /* SK_RMEM_MAX */
__u32 sysctl_wmem_default = (BYTES_PER_BUFFER*NUM_BUFSW); /* SK_WMEM_MAX */
__u32 sysctl_rmem_max = (BYTES_PER_BUFFER*NUM_BUFSR); /* SK_RMEM_MAX */
__u32 sysctl_wmem_max = (BYTES_PER_BUFFER*NUM_BUFSW); /* SK_WMEM_MAX */


/* Helper functions provided outside the stack. */
u_char *alloc_fixed_header(struct user_pcb *pcb);
void free_fixed_header(struct user_pcb *pcb, u_char *hdr);
u_char *alloc_fixed_data(struct user_pcb *pcb, int size);
void free_fixed_data(struct user_pcb *pcb, u_char *data, int size);
int tx_skb(struct user_pcb *pcb, struct sk_buff *skb);
//void upload_rx_buf(struct user_pcb *pcb, struct sk_buff *skb); - P2

__u32 tcp_recalc_ssthresh(struct tcp_opt *tp,struct multipath_pcb* mpcb)
{

#if COUPLE_ALL
  u32 FlightSize = (tp->snd_nxt - tp->snd_una)/tp->mss_cache;
  FlightSize = min(FlightSize, tcp_packets_in_flight(tp));

  if (!mpcb||mpcb->cnt_established<=1){
    return max(min(FlightSize, tp->snd_cwnd) >> 1, 2);
  }
  else {
    //return max(min(FlightSize,tp->snd_cwnd) - get_total_cwnd(mpcb)/2,2);

    int d = get_total_cwnd(mpcb);

    int crt = min(FlightSize,tp->snd_cwnd);

    d -= get_crt_cwnd(tp);
    d += crt;

    d >>= B;

    int res;

    if (crt<d)
      res = 2;
    else 
      res = max(crt-d,2);

    //printf("New CWND %d, old CWND %d, total CWND %d\n",res,crt,d<<B);
    return res;
  }
#else 
    u32 FlightSize = (tp->snd_nxt - tp->snd_una)/tp->mss_cache;
    FlightSize = min(FlightSize, tcp_packets_in_flight(tp));
    return max(min(FlightSize, tp->snd_cwnd) >> 1, 2);
#endif
}

void skb_queue_order(struct sk_buff_head* list,struct sk_buff* skb){
  struct sk_buff* crt = skb_peek_tail(list);

  if (!crt){
    __skb_queue_tail(list, skb);
  }
  else {
    while (crt!=list&&TCP_SKB_CB(crt)->data_seq>TCP_SKB_CB(skb)->data_seq)
      crt = crt->prev;

    if (crt==list){
      __skb_queue_head(list, skb);
    }
    else {
      __skb_append(crt,skb);
    }
  }
}

int slim_tcp_snd_test(struct sock* sk, int max_len){
    struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
    struct multipath_pcb* mpcb;

    mpcb = mpcb_from_sock(sk->socket);

    if (tp->packets_out==0 && (s32)(tcp_time_stamp - tp->rcv_tstamp) > tp->rto){
      tp->snd_cwnd = min(tp->snd_cwnd, 2);
    }

    //cwnd and snd_wnd throttling
    if (!((tcp_packets_in_flight(tp) < tp->snd_cwnd))){
      return 0; 
    }
    
    if (tp->snd_wnd / mpcb->cnt_subflows <= (tp->snd_nxt - tp->snd_una)+max_len){
      printf("RCV window limited snd_wnd %d cnt_subflows %d , available %d !\n",tp->snd_wnd,  mpcb->cnt_subflows,(tp->snd_nxt - tp->snd_una)+max_len);
      return 0;
    }

    if (!(tp->retransmits == 0)){
      printf("Retransmit limited!\n");
      return 0; 
    }

    return 1; 
}

int tcp_snd_test(struct sock *sk, struct sk_buff *skb)
{
    struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
    struct multipath_pcb* mpcb;
    int nagle_check = 1;

    mpcb = mpcb_from_sock(sk->socket);

    //    printf("\n[snd_test] sock rcv buf is %d, send buf is %d", sk->rcvbuf, sk->sndbuf); 

    /*	RFC 1122 - section 4.2.3.4
     *
     *	We must queue if
     *
     *	a) The right edge of this frame exceeds the window
     *	b) There are packets in flight and we have a small segment
     *	   [SWS avoidance and Nagle algorithm]
     *	   (part of SWS is done on packetization)
     *	c) We are retransmiting [Nagle]
     *	d) We have too many packets 'in flight'
     *
     * 	Don't use the nagle rule for urgent data (or
     *	for the final FIN -DaveM).
     */

    //DISABLE NAGLE / CORK CHECKS
    /*if (
	(sk->nonagle == 2 && (skb->len < tp->mss_cache)) ||
	(
	 !sk->nonagle &&
	 skb->len < (tp->mss_cache >> 1) &&
	 tp->packets_out &&
	 !(TCP_SKB_CB(skb)->flags & (TCPCB_FLAG_URG|TCPCB_FLAG_FIN))
	 )
	)
	nagle_check = 0;*/



    if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)
	nagle_check = 1;

    /*
     * Reset CWND after idle period longer rto. Actually, it would
     * be better to save last send time, but VJ in SIGCOMM'88 proposes
     * to use keepalive timestamp. Well, it is not good, certainly,
     * because SMTP is still broken, but it is better than nothing yet.
     */
    if (tp->packets_out==0 && (s32)(tcp_time_stamp - tp->rcv_tstamp) > tp->rto){

      /*#ifdef CWND_TEST
    snd_test_count[tp->id]++; 
    printf("\n\nsnd test message for connection %d for %d times... Reducing cwnd from %u to 2\n", tp->id, snd_test_count[tp->id], tp->snd_cwnd); 
    #endif*/

      tp->snd_cwnd = min(tp->snd_cwnd, 2);

      /*#ifdef PLOT_CWND
      fprintf(cwnd_file[tp->id], "%d\t%u\t%u\t%u\t%u\t%u\t%u\t%u\n", tp->id, (unsigned long) jiffies, tp->snd_cwnd, tp->snd_ssthresh, tp->packets_out, tp->snd_wnd - (tp->snd_nxt - tp->snd_una), tp->snd_wnd, tp->snd_nxt - tp->snd_una); 
      fflush(cwnd_file[tp->id]); 

#ifdef PLOT_AGGREGATE
      fprintf(all_cwnd_file, "%u\t%d\t", (unsigned long) jiffies, aggregate->count); 
      //      fprintf(all_cwnd_file, "%u\t", (unsigned long) jiffies); 
      tot_cwnd = 0; 
      for (ddd=0; ddd<aggregate->count; ddd++)
	{
	  fprintf(all_cwnd_file, "%d\t%u\t%u", aggregate->connections[ddd]->id, aggregate->connections[ddd]->snd_cwnd, aggregate->connections[ddd]->packets_out); 
	  tot_cwnd += aggregate->connections[ddd]->snd_cwnd; 
	}
      fprintf(all_cwnd_file, "%u\n", tot_cwnd); 
      
#endif
#endif
*/
    }

    /* Don't be strict about the congestion window for the
     * final FIN frame.  -DaveM
     */

    // tst1 && (tst2 || tst3) && tst4 && tst5 

    if (!(nagle_check))
      {
	return 0; 
      }

    //cwnd and snd_wnd throttling
    if (!((tcp_packets_in_flight(tp) < tp->snd_cwnd) || (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)))
      {
	//	printf("yo: CWND throttled me, conn=%d ..., jiffies=%u, packets_in flight=%d, snd_cwnd =%u\n", tp->id, (unsigned long) jiffies, tcp_packets_in_flight(tp), tp->snd_cwnd); 
	return 0; 
      }
    

    
    if (after(TCP_SKB_CB(skb)->end_seq, tp->snd_una + tp->snd_wnd/(mpcb?mpcb->cnt_subflows:1)))
      {
	//	printf("yo: snd_wnd killed me, conn=%d ..., jiffies=%u, trying to send %u, una=%u, snd_wnd=%u\n", tp->id, (unsigned long) jiffies, TCP_SKB_CB(skb)->end_seq, tp->snd_una, tp->snd_wnd); 
	printf("\nReceive window limited end_seq is %u snd_una %u snd_wnd is %u, cnd subflows %d!\n",TCP_SKB_CB(skb)->end_seq, tp->snd_una,tp->snd_wnd,mpcb?mpcb->cnt_subflows:1);
	return 0; 
      }

    if (!(tp->retransmits == 0))
      {
	//	printf("yo: non-zero retransmits... jiffies=%u, conn=%d\n", (unsigned long) jiffies, tp->id); 
	return 0; 
      }

    //    printf("all tests passed...jiffies=%u, packets_out=%u\n", (unsigned long) jiffies, tp->packets_out); 
    return 1; 
}




u16 multipath_tcp_select_window(struct multipath_pcb* mpcb){
  u32 cur_win = multipath_tcp_receive_window(&(mpcb->opt));
  u32 new_win = __multipath_tcp_select_window(mpcb);
  u32 new = new_win;

    /* Never shrink the offered window */
  if(new_win < cur_win) {
    /* Danger Will Robinson!
     * Don't update rcv_wup/rcv_wnd here or else
     * we will not be able to advertise a zero
     * window in time.  --DaveM
     */
    new_win = cur_win;
  } else {
    subflow_t tmp;

    mpcb->opt.rcv_wnd = new_win;
    mpcb->opt.rcv_wup = mpcb->opt.rcv_nxt;
    //    printf("RCV Window %d, rcv_next %d\n",mpcb->opt.rcv_wnd,mpcb->opt.rcv_nxt);

    for (tmp = mpcb->connection_list;tmp;tmp=tmp->next){
      if (tmp->pcb->sk->state!=1)
	continue;
      struct tcp_opt *tp = &(tmp->pcb->sk->tp_pinfo.af_tcp);      
      tp->rcv_wnd = new_win;
      tp->rcv_wup = tp->rcv_nxt;
    }
  }
  //  printf("CURWIN %d %d %d\n",cur_win,new,new_win>>mpcb->opt.rcv_wscale);

  return new_win >> mpcb->opt.rcv_wscale;
}


u16 tcp_select_window(struct sock *sk)
{
    struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
    struct multipath_pcb* mpcb;
    mpcb = mpcb_from_sock(sk->socket);
    if (mpcb)
      return multipath_tcp_select_window(mpcb);

    u32 cur_win = tcp_receive_window(tp);
    u32 new_win = __tcp_select_window(sk);

    /* Never shrink the offered window */
    if(new_win < cur_win) {
	/* Danger Will Robinson!
	 * Don't update rcv_wup/rcv_wnd here or else
	 * we will not be able to advertise a zero
	 * window in time.  --DaveM
	 */
	new_win = cur_win;
    } else {
	tp->rcv_wnd = new_win;
	tp->rcv_wup = tp->rcv_nxt;
    }

    /* RFC1323 scaling applied */
    return new_win >> tp->rcv_wscale;
}

unsigned int tcp_current_mss(struct sock *sk)
{
    struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
    struct dst_entry *dst = sk->dst_cache;
    int mss_now = tp->mss_cache; 

    if (dst && dst->pmtu != tp->pmtu_cookie)
	mss_now = tcp_sync_mss(sk, dst->pmtu);

    if(tp->sack_ok && tp->num_sacks)
	mss_now -= (TCPOLEN_SACK_BASE_ALIGNED +
		    (tp->num_sacks * TCPOLEN_SACK_PERBLOCK));

    if (mpcb_from_sock(sk->socket))
      mss_now -= 8;
    return mss_now > 8 ? mss_now : 8;
}

/* tcp_dump_stats: dump out useful info for a connection... */
void tcp_dump_stats(struct sock *sk)
{
    struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
    printf("\nDumping stats for conn=%d: ", tp->id); 
    printf("write=%d error=%d receive=%d ooo=%d\t", 
           sk->write_queue.qlen, sk->error_queue.qlen, 
           sk->receive_queue.qlen, tp->out_of_order_queue.qlen);
    printf("RX SIDE: rcv_nxt=%08x rcv_wup=%08x rcv_wnd=%d copied_seq=%08x\t",
           tp->rcv_nxt, tp->rcv_wup, tp->rcv_wnd, tp->copied_seq); 
    printf("TX SIDE: snd_una=%08x snd_nxt=%08x snd_wnd=%d write_seq=%08x\t",
           tp->snd_una, tp->snd_nxt, tp->snd_wnd, tp->write_seq);
    printf("wmem_alloc=%d sndbuf=%d (diff=%d)\t",
           atomic_read(&sk->wmem_alloc), sk->sndbuf, 
           sk->sndbuf-atomic_read(&sk->wmem_alloc));
    printf("InSegs=%ld, OutSegs=%ld, Retrans=%ld, InErr=%ld, OutRst=%ld\n\n",
           tcp_statistics.TcpInSegs, tcp_statistics.TcpOutSegs,
           tcp_statistics.TcpRetransSegs, tcp_statistics.TcpInErrs,
           tcp_statistics.TcpOutRsts);
           
}

/*
 *	This is meant for all protocols to use and covers goings on
 *	at the socket level. Everything here is generic.
 */

int sock_setsockopt(struct socket *sock, int level, int optname,
		    char *optval, int optlen)
{
	struct sock *sk=sock->sk;
#ifdef CONFIG_FILTER
	struct sk_filter *filter;
#endif
	int val;
	int valbool;
//	struct linger ling;
	int ret = 0;
	
	/*
	 *	Options without arguments
	 */

#ifdef SO_DONTLINGER		/* Compatibility item... */
	switch(optname)
	{
		case SO_DONTLINGER:
			sk->linger=0;
			return 0;
	}
#endif	
		
  	if(optlen<sizeof(int))
  		return(-EINVAL);
  	
	if (get_user(val, (int *)optval))
		return -EFAULT;
	
  	valbool = val?1:0;

	lock_sock(sk);

  	switch(optname) 
  	{
/*
		case SO_DEBUG:	
			if(val && !capable(CAP_NET_ADMIN))
			{
				ret = -EACCES;
			}
			else
				sk->debug=valbool;
			break;
		case SO_REUSEADDR:
			sk->reuse = valbool;
			break;
		case SO_TYPE:
		case SO_ERROR:
			ret = -ENOPROTOOPT;
		  	break;
		case SO_DONTROUTE:
			sk->localroute=valbool;
			break;
		case SO_BROADCAST:
			sk->broadcast=valbool;
			break;
		case SO_SNDBUF:
*/
			/* Don't error on this BSD doesn't and if you think
			   about it this is right. Otherwise apps have to
			   play 'guess the biggest size' games. RCVBUF/SNDBUF
			   are treated in BSD as hints */
			   
/*
			if (val > sysctl_wmem_max)
				val = sysctl_wmem_max;

			sk->userlocks |= SOCK_SNDBUF_LOCK;
			if ((val * 2) < SOCK_MIN_SNDBUF)
				sk->sndbuf = SOCK_MIN_SNDBUF;
			else
				sk->sndbuf = (val * 2);

*/
			/*
			 *	Wake up sending tasks if we
			 *	upped the value.
			 */
/*
			sk->write_space(sk);
			break;

		case SO_RCVBUF:
*/
			/* Don't error on this BSD doesn't and if you think
			   about it this is right. Otherwise apps have to
			   play 'guess the biggest size' games. RCVBUF/SNDBUF
			   are treated in BSD as hints */
/*
			  
			if (val > sysctl_rmem_max)
				val = sysctl_rmem_max;

			sk->userlocks |= SOCK_RCVBUF_LOCK;
*/
			/* FIXME: is this lower bound the right one? */
/*
			if ((val * 2) < SOCK_MIN_RCVBUF)
				sk->rcvbuf = SOCK_MIN_RCVBUF;
			else
				sk->rcvbuf = (val * 2);
			break;

		case SO_KEEPALIVE:
#ifdef CONFIG_INET
			if (sk->protocol == IPPROTO_TCP)
			{
				tcp_set_keepalive(sk, valbool);
			}
#endif
			sk->keepopen = valbool;
			break;

	 	case SO_OOBINLINE:
			sk->urginline = valbool;
			break;

	 	case SO_NO_CHECK:
			sk->no_check = valbool;
			break;

		case SO_PRIORITY:
			if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN)) 
				sk->priority = val;
			else
				ret = -EPERM;
			break;

		case SO_LINGER:
			if(optlen<sizeof(ling)) {
				ret = -EINVAL;	*//* 1003.1g */
/*
				break;
			}
			if (copy_from_user(&ling,optval,sizeof(ling))) {
				ret = -EFAULT;
				break;
			}
			if(ling.l_onoff==0) {
				sk->linger=0;
			} else {
#if (BITS_PER_LONG == 32)
				if (ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
					sk->lingertime=MAX_SCHEDULE_TIMEOUT;
				else
#endif
					sk->lingertime=ling.l_linger*HZ;
				sk->linger=1;
			}
			break;

		case SO_BSDCOMPAT:
			sk->bsdism = valbool;
			break;

		case SO_PASSCRED:
			sock->passcred = valbool;
			break;

		case SO_TIMESTAMP:
			sk->rcvtstamp = valbool;
			break;

		case SO_RCVLOWAT:
			if (val < 0)
				val = INT_MAX;
			sk->rcvlowat = val ? : 1;
			break;

		case SO_RCVTIMEO:
			ret = sock_set_timeout(&sk->rcvtimeo, optval, optlen);
			break;

		case SO_SNDTIMEO:
			ret = sock_set_timeout(&sk->sndtimeo, optval, optlen);
			break;

#ifdef CONFIG_NETDEVICES
		case SO_BINDTODEVICE:
		{
			char devname[IFNAMSIZ]; 

*/
			/* Sorry... */ 
/*
			if (!capable(CAP_NET_RAW)) {
				ret = -EPERM;
				break;
			}

*/
			/* Bind this socket to a particular device like "eth0",
			 * as specified in the passed interface name. If the
			 * name is "" or the option length is zero the socket 
			 * is not bound. 
			 */ 

/*
			if (!valbool) {
				sk->bound_dev_if = 0;
			} else {
				if (optlen > IFNAMSIZ) 
					optlen = IFNAMSIZ; 
				if (copy_from_user(devname, optval, optlen)) {
					ret = -EFAULT;
					break;
				}

*/
				/* Remove any cached route for this socket. */
/*
				sk_dst_reset(sk);

				if (devname[0] == '\0') {
					sk->bound_dev_if = 0;
				} else {
					struct net_device *dev = dev_get_by_name(devname);
					if (!dev) {
						ret = -ENODEV;
						break;
					}
					sk->bound_dev_if = dev->ifindex;
					dev_put(dev);
				}
			}
			break;
		}
#endif


#ifdef CONFIG_FILTER
		case SO_ATTACH_FILTER:
			ret = -EINVAL;
			if (optlen == sizeof(struct sock_fprog)) {
				struct sock_fprog fprog;

				ret = -EFAULT;
				if (copy_from_user(&fprog, optval, sizeof(fprog)))
					break;

				ret = sk_attach_filter(&fprog, sk);
			}
			break;

		case SO_DETACH_FILTER:
			spin_lock_bh(&sk->lock.slock);
			filter = sk->filter;
                        if (filter) {
				sk->filter = NULL;
				spin_unlock_bh(&sk->lock.slock);
				sk_filter_release(sk, filter);
				break;
			}
			spin_unlock_bh(&sk->lock.slock);
			ret = -ENONET;
			break;
#endif
*/
		/* We implement the SO_SNDLOWAT etc to
		   not be settable (1003.1g 5.3) */
		default:
//		  	ret = -ENOPROTOOPT;
			break;
  	}
	release_sock(sk);
	return ret;
}


int sock_getsockopt(struct socket *sock, int level, int optname,
		    char *optval, int *optlen)
{
	struct sock *sk = sock->sk;
	
/*
	union
	{
  		int val;
  		struct linger ling;
		struct timeval tm;
	} v;
*/
	
	unsigned int lv=sizeof(int),len;
  	
  	if(get_user(len,optlen))
  		return -EFAULT;
	if(len < 0)
		return -EINVAL;

	switch(optname){
	case SO_SNDBUF:
	  *(int*)optval = sk->sndbuf;
	  *optlen = 4;
	  break;
	  
	case SO_RCVBUF:
	  *(int*)optval =sk->rcvbuf;
	  *optlen = 4;
	  break;
	  
	default:
	  printf("Other option in sock)getsockopt %d!\n",optname);
	}
		
/*
  	switch(optname) 
  	{
		case SO_DEBUG:		
			v.val = sk->debug;
			break;
		
		case SO_DONTROUTE:
			v.val = sk->localroute;
			break;
		
		case SO_BROADCAST:
			v.val= sk->broadcast;
			break;

		case SO_SNDBUF:
			v.val=sk->sndbuf;
			break;
		
		case SO_RCVBUF:
			v.val =sk->rcvbuf;
			break;

		case SO_REUSEADDR:
			v.val = sk->reuse;
			break;

		case SO_KEEPALIVE:
			v.val = sk->keepopen;
			break;

		case SO_TYPE:
			v.val = sk->type;		  		
			break;

		case SO_ERROR:
			v.val = -sock_error(sk);
			if(v.val==0)
				v.val=xchg(&sk->err_soft,0);
			break;

		case SO_OOBINLINE:
			v.val = sk->urginline;
			break;
	
		case SO_NO_CHECK:
			v.val = sk->no_check;
			break;

		case SO_PRIORITY:
			v.val = sk->priority;
			break;
		
		case SO_LINGER:	
			lv=sizeof(v.ling);
			v.ling.l_onoff=sk->linger;
 			v.ling.l_linger=sk->lingertime/HZ;
			break;
					
		case SO_BSDCOMPAT:
			v.val = sk->bsdism;
			break;

		case SO_TIMESTAMP:
			v.val = sk->rcvtstamp;
			break;

		case SO_RCVTIMEO:
			lv=sizeof(struct timeval);
			if (sk->rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
				v.tm.tv_sec = 0;
				v.tm.tv_usec = 0;
			} else {
				v.tm.tv_sec = sk->rcvtimeo/HZ;
				v.tm.tv_usec = ((sk->rcvtimeo%HZ)*1000)/HZ;
			}
			break;

		case SO_SNDTIMEO:
			lv=sizeof(struct timeval);
			if (sk->sndtimeo == MAX_SCHEDULE_TIMEOUT) {
				v.tm.tv_sec = 0;
				v.tm.tv_usec = 0;
			} else {
				v.tm.tv_sec = sk->sndtimeo/HZ;
				v.tm.tv_usec = ((sk->sndtimeo%HZ)*1000)/HZ;
			}
			break;

		case SO_RCVLOWAT:
			v.val = sk->rcvlowat;
			break;

		case SO_SNDLOWAT:
			v.val=1;
			break; 

		case SO_PASSCRED:
			v.val = sock->passcred;
			break;

		case SO_PEERCRED:
			if (len > sizeof(sk->peercred))
				len = sizeof(sk->peercred);
			if (copy_to_user(optval, &sk->peercred, len))
				return -EFAULT;
			goto lenout;

		case SO_PEERNAME:
		{
			char address[128];

			if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
				return -ENOTCONN;
			if (lv < len)
				return -EINVAL;
			if(copy_to_user((void*)optval, address, len))
				return -EFAULT;
			goto lenout;
		}
*/
		/* Dubious BSD thing... Probably nobody even uses it, but
		 * the UNIX standard wants it for whatever reason... -DaveM
		 */
/*
		case SO_ACCEPTCONN:
			v.val = (sk->state == TCP_LISTEN);
			break;

		default:
			return(-ENOPROTOOPT);
	}
*/
	if (len > lv)
		len = lv;
/*
	if (copy_to_user(optval, &v, len))
		return -EFAULT;
lenout:
  	if (put_user(len, optlen))
  		return -EFAULT;
*/
  	return 0;
}


/******************************************************************************
 * SKBUFF HACKS............
 */

extern kmem_cache_t *skbuff_head_cache;

/* XXX Corresponds to definition in ipv4.h */
#define MAX_HEADER_LEN 176

struct sk_buff *alloc_skb(struct sock *sk, unsigned int size)
{
    struct sk_buff *skb;
    u8 *hdr, *data;

    ASSERT(sk != NULL);

    /* Get the HEAD */
    if ( !(skb = kmem_cache_alloc(skbuff_head_cache, 0)) ) goto nohead;
    
    /* Get the DATA. Size must match skb_add_mtu(). */
    size = ((size + 15) & ~15); 
#ifdef HDR_SLPIT
    if ( !(hdr  = alloc_fixed_header(sk->pcb)) ) goto nohdr;
#endif
    if ( !(data = alloc_fixed_data(sk->pcb, size + sizeof(atomic_t))) ) 
        goto nodata;

    /*
     * Allow flow control based on memory uage (KAF)
     */
    skb->truesize = BYTES_PER_BUFFER; //size + sizeof(struct sk_buff);

    /* Load the data pointers. */
#ifdef HDR_SPLIT
    skb->d_head = skb->d_data = skb->d_tail = data;
    skb->d_end  = data + size;
    skb->h_head = skb->h_data = skb->h_tail = hdr;
    skb->h_end  = hdr + MAX_HEADER_LEN;
#else
    skb->h_head = skb->h_data = skb->h_tail = data;
    skb->d_head = skb->d_data = skb->d_tail = skb->h_end = 
        data + MAX_HEADER_LEN;
    skb->d_end  = data + size;
#endif

    /* Set up other state */
    skb->len     = 0;
    skb->cloned  = 0;
    skb->rx_buf  = 0;
    skb->data_sk = sk; // KAF -- for when it comes to clean up time.
    
    atomic_set(&skb->users, 1); 
    atomic_set(skb_datarefp(skb), 1);
    
    return(skb);

 nodata:
#ifdef HR_SLPIT
    free_fixed_header(sk->pcb, skb->h_head);
 nohdr:
#endif
    kmem_cache_free(skbuff_head_cache, skb);
 nohead:
    return NULL;
}


/*
 *	Slab constructor for a skb head. 
 */ 
static inline void skb_headerinit(void *p, kmem_cache_t *cache, 
				  unsigned long flags)
{
    struct sk_buff *skb = p;
    
    skb->destructor = NULL;
    skb->pkt_type = PACKET_HOST;	/* Default type */
    skb->prev = skb->next = NULL;
    skb->list = NULL;
    skb->sk = NULL;
    skb->ip_summed = 0;
    skb->dst = NULL;
    memset(skb->cb, 0, sizeof(skb->cb));
}


/*
 * Free an skbuff by memory without cleaning the state. 
 */
void kfree_skbmem(struct sock *sk, struct sk_buff *skb)
{
  //bug here is sk is null. Not sure how to change it.
  if ( (!skb->cloned || atomic_dec_and_test(skb_datarefp(skb))) && sk->pcb )
    {
      if ( skb->rx_buf )
        {
            //upload_rx_buf(sk->pcb, skb); - P2
	    free(skb->h_head);
        }
        else
        {
#ifdef HDR_SPLIT
            free_fixed_header(sk->pcb, skb->h_head);
            free_fixed_data(sk->pcb, skb->d_head, skb->d_end - skb->d_head);
#else
            free_fixed_data(sk->pcb, skb->h_head, skb->d_end - skb->h_head);
#endif
        }
    }     
   
    kmem_cache_free(skbuff_head_cache, skb);
}

/*
 * Free an sk_buff. Release anything attached to the buffer. Clean the state.
 */
void __kfree_skb(struct sk_buff *skb)
{
    struct sock *sk = skb->data_sk; // KAF -- we are interested in data owner.

#ifndef NDEBUG
    if (skb->list) {
        printk(KERN_WARNING "Warning: kfree_skb passed an skb still "
               "on a list (from %p).\n", NET_CALLER(skb));
        *(int*)0 = 0;
    }
#endif

    dst_release(skb->dst);
    if ( skb->destructor ) skb->destructor(skb);
    skb_headerinit(skb, NULL, 0);  /* clean state */
    kfree_skbmem(sk, skb);
}


/*
 * Duplicate an sk_buff. The new one is not owned by a socket.
 */
struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask)
{
    struct sk_buff *n;

    //    printf("\ngoing for kmem_cache_alloc..."); fflush(stdout); 
    if ( (n = kmem_cache_alloc(skbuff_head_cache, gfp_mask)) ) 
    {
      //      printf("\nkmem_cache_alloc returned..."); fflush(stdout); 
        memcpy(n, skb, sizeof(*n));
        atomic_inc(skb_datarefp(skb));
        skb->cloned = 1;
        
        dst_clone(n->dst);
        n->cloned = 1;
        n->next = n->prev = NULL;
        n->list = NULL;
        n->sk = NULL;
        atomic_set(&n->users, 1);
        n->destructor = NULL;
    }

    return n;
}


/*
 *	Keep out-of-line to prevent kernel bloat.
 *	__builtin_return_address is not used because it is not always
 *	reliable. 
 */
void skb_over_panic(struct sk_buff *skb, int sz, char *fn)
{
	printk("skput:over: %s:%d put:%d\n", fn, skb->len, sz);
	*(int*)0 = 0;
}

void skb_under_panic(struct sk_buff *skb, int sz, char *fn)
{
        printk("skput:under: %s:%d put:%d\n", fn, skb->len, sz);
	*(int*)0 = 0;
}


struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, int newheadroom)
{
	struct sk_buff *n;
	unsigned long offset;

        /*
         * XXX KAF: is this only called for header space? We should alloc
         * enough already -- possibly a big warning, followed by shifting
         * header data down in memory to make more headroom.
         */
        panic("skb_realloc_headroom: not yet converted for h/p slit"); 
        return(NULL);
}




/******************************************************************************
 * SOCK HACKS......
 */

/*
 * Allocate a skb from the socket's send buffer.
 */
struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, int priority)
{
	if (force || atomic_read(&sk->wmem_alloc) < sk->sndbuf) {
		struct sk_buff * skb = alloc_skb(sk, size);
		if (skb) {
			skb_set_owner_w(skb, sk);
			return skb;
		}
	}
	return NULL;
}


/* 
 * Write buffer destrtuctor automatically called from kfree_skb. 
 */
void sock_wfree(struct sk_buff *skb)
{
	struct sock *sk = skb->sk;
        if ( atomic_read(skb_datarefp(skb)) == 1 )
        {
	    // added by Wen Xu
	    if (atomic_read(&sk->wmem_alloc) >= skb->truesize) {
              /* In case it might be waiting for more memory. */
              atomic_sub(skb->truesize, &sk->wmem_alloc);
              sk->write_space(sk);
	    }
        }
	sock_put(sk);
}


/* 
 * Read buffer destructor automatically called from kfree_skb. 
 */
void sock_rfree(struct sk_buff *skb)
{
	struct sock *sk = skb->sk;

	atomic_sub(skb->truesize, &sk->rmem_alloc);
}


extern kmem_cache_t *sk_cachep;

/*
 *	All socket objects are allocated here. This is for future
 *	usage.
 */
struct sock *sk_alloc(int family, int priority, int zero_it)
{
	struct sock *sk = kmem_cache_alloc(sk_cachep, priority);

	if(sk && zero_it) {
		memset(sk, 0, sizeof(struct sock));
		sk->family = family;
		sock_lock_init(sk);
	}

	return sk;
}

void sk_free(struct sock *sk)
{
	if (sk->destruct)
		sk->destruct(sk);

        /*
         * Yech! Ought to lock listener here (we use listener lock as lock
         * for modifying it's active conn queue as well).
         * 
         * No problem while we're using a non-preemptive thread lib though!
         */
        if ( sk->lprev ) sk->lprev->lnext = sk->lnext;
        if ( sk->lnext ) sk->lnext->lprev = sk->lprev;

	if (atomic_read(&sk->omem_alloc))
            printk(KERN_DEBUG "sk_free: optmem leakage (%d bytes) detected.\n", atomic_read(&sk->omem_alloc));

	kmem_cache_free(sk_cachep, sk);
}


void __release_sock(struct sock *sk)
{
	struct sk_buff *skb = sk->backlog.head;
	do {
		struct sk_buff *next = skb->next;
		skb->next = NULL;
		sk->backlog_rcv(sk, skb);
		skb = next;
	} while(skb != NULL);
	sk->backlog.head = sk->backlog.tail = NULL;
}


/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
   I think, these locks should be removed for datagram sockets.
 */
static void sock_wait_for_wmem(struct sock * sk)
{
    sk->socket->flags &= ~SO_NOSPACE;
    
    bh_lock_sock(sk);

    for (;;) {
        if ( signal_pending(current) ||
             (atomic_read(&sk->wmem_alloc) < sk->sndbuf) ||
             (sk->shutdown & SEND_SHUTDOWN) ||
             sk->err )
        {
            break;
        }

        /* Tell the NIC we're waiting for tx space. */
        //request_tx_space_upcall(sk->pcb);
        pth_cond_await(sk->sleep, &(sk->lock.m), NULL); // KAF: _not_ sock_sleep!
    }

    bh_unlock_sock(sk);
}


/*
 *	Generic send/receive buffer handlers
 */
struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, 
			unsigned long fallback, int noblock, int *errcode)
{
	int err;
	struct sk_buff *skb;

	while (1) {
		unsigned long try_size = size;

		err = sock_error(sk);
		if (err != 0)
			goto failure;

		/*
		 *	We should send SIGPIPE in these cases according to
		 *	1003.1g draft 6.4. If we (the user) did a shutdown()
		 *	call however we should not. 
		 *
		 *	Note: This routine isnt just used for datagrams and
		 *	anyway some datagram protocols have a notion of
		 *	close down.
		 */

		err = -EPIPE;
		if (sk->shutdown&SEND_SHUTDOWN)
			goto failure;

		if (fallback) {
                    /* The buffer get won't block, or use the atomic queue.
                     * It does produce annoying no free page messages still.
                     */
                    skb = sock_wmalloc(sk, size, 0, 0/*XXXKAF GFP_BUFFER*/);
			if (skb)
                            break;
			try_size = fallback;
		}
		skb = sock_wmalloc(sk, try_size, 0, sk->allocation);
		if (skb)
			break;

		/*
		 * This means we have too many buffers for this socket already.
		 */

		sk->socket->flags |= SO_NOSPACE;
		err = -EAGAIN;
		if (noblock)
			goto failure;
		err = -ERESTARTSYS;
		if (signal_pending(current))
			goto failure;
		sock_wait_for_wmem(sk);
	}

	return skb;

failure:
	*errcode = err;
	return NULL;
}

/*
 *	Default Socket Callbacks
 */

void sock_def_wakeup(struct sock *sk)
{
    //read_lock(&sk->callback_lock);
    if(!sk->dead){
        wake_up_interruptible(sk->sleep);
	//		multipath_wake_up(sk);
	}
    //read_unlock(&sk->callback_lock);
}

void sock_def_error_report(struct sock *sk)
{
    //read_lock(&sk->callback_lock);
    if (!sk->dead) {
        wake_up_interruptible(sk->sleep);
	//multipath_wake_up(sk);
        sock_wake_async(sk->socket,0,POLL_ERR); 
    }
    //read_unlock(&sk->callback_lock);
}

void sock_def_readable(struct sock *sk, int len)
{
    //read_lock(&sk->callback_lock);
    if(!sk->dead) {
        wake_up_interruptible(sk->sleep);
	//multipath_wake_up(sk);
        sock_wake_async(sk->socket,1,POLL_IN);
    }
    //read_unlock(&sk->callback_lock);
}

void sock_def_write_space(struct sock *sk)
{
    //read_lock(&sk->callback_lock);

    /* Do not wake up a writer until he can make "significant"
     * progress.  --DaveM
     */
    if(!sk->dead &&
       ((atomic_read(&sk->wmem_alloc) << 1) <= sk->sndbuf)) {
        wake_up_interruptible(sk->sleep);
	//multipath_wake_up(sk);
        
		
        /* Should agree with poll, otherwise some programs break */
        if (sock_writeable(sk))
            sock_wake_async(sk->socket, 2, POLL_OUT);
    }
    read_unlock(&sk->callback_lock);
}

void sock_def_destruct(struct sock *sk)
{
    if (sk->protinfo.destruct_hook)
        kfree(sk->protinfo.destruct_hook);
}


void sock_init_data(struct socket *sock, struct sock *sk)
{
	skb_queue_head_init(&sk->receive_queue);
	skb_queue_head_init(&sk->write_queue);
	skb_queue_head_init(&sk->error_queue);

	spin_lock_init(&sk->timer_lock);
	init_timer(&sk->timer);
	
	sk->allocation	=	GFP_KERNEL;
	sk->rcvbuf	=	sysctl_rmem_default;
	sk->sndbuf	=	sysctl_wmem_default;

	printf("Socket bufs w:%d r%d\n",sk->sndbuf,sk->rcvbuf);
	sk->state 	= 	TCP_CLOSE;
	sk->zapped	=	1;
	sk->socket	=	sock;

	if(sock)
	{
		sk->type	=	sock->type;
		sk->sleep	=	&sock->wait;
		sock->sk	=	sk;
                sk->pcb         =       pcb_from_sock(sock);
	} else
		sk->sleep	=	NULL;

	sk->callback_lock	=	RW_LOCK_UNLOCKED;

	sk->state_change	=	sock_def_wakeup;
	sk->data_ready		=	sock_def_readable;
	sk->write_space		=	sock_def_write_space;
	sk->error_report	=	sock_def_error_report;
	sk->destruct            =       sock_def_destruct;

        sk->lprev = NULL;
        sk->lnext = NULL; // KAF: not on a listener queue yet

	atomic_set(&sk->refcnt, 1);
}





    
/******************************************************************************
 * TIMER HACKS...........
 */

/*
 * Timer event checking frequency in milliseconds. See ipv4/ipv4.h for some
 * comments about this value.
 */
#define COARSE_TIMEOUT_PERIOD 50

pth_mutex_t timerlist_lock;

struct timer_list **timer_heap;
int heap_limit;

static __inline__ void sink_timer(struct timer_list *timer, int i)
{
    int j;

    while ( (j = i<<1) <= heap_limit )
    {
        if ( (j < heap_limit) && 
             (timer_heap[j]->expires > timer_heap[j+1]->expires) ) j++;
        if ( timer->expires <= timer_heap[j]->expires ) break;
        timer_heap[i] = timer_heap[j];
        timer_heap[i]->index = i;
        i = j;
    }

    timer_heap[i] = timer;
    timer->index  = i;
}

static __inline__ void float_timer(struct timer_list *timer, int i)
{
    while ( (i > 1) && (timer->expires < timer_heap[i>>1]->expires) ) 
    {
        timer_heap[i] = timer_heap[i>>1];
        timer_heap[i]->index = i;
        i >>= 1;
    }

    timer_heap[i] = timer;
    timer->index  = i;        
}

int init_timers(void)
{
    if ( (timer_heap = malloc(2000 * sizeof(struct timer_list *))) == NULL )
        return(-ENOMEM);
    pth_mutex_init(&timerlist_lock);
    heap_limit = 0;
    return(0);
}

void cleanup_timers(void)
{
    free(timer_heap);
}

void add_timer(struct timer_list *timer)
{
    pth_mutex_acquire(&timerlist_lock, 0, NULL);
    float_timer(timer, ++heap_limit);
    pth_mutex_release(&timerlist_lock);
}

void mod_timer(struct timer_list *timer, unsigned long expires)
{
    int i;

    pth_mutex_acquire(&timerlist_lock, 0, NULL);

    timer->expires = expires;

    if ( (i = timer->index) == -1 )
    {
        float_timer(timer, ++heap_limit);
    }
    else
    {
        (((i > 1) && (expires < timer_heap[i>>1]->expires)) ? 
         float_timer : sink_timer) (timer, i);
    }

    pth_mutex_release(&timerlist_lock);
}

int del_timer(struct timer_list * timer)
{
    int i;
    struct timer_list *bt;

    if ( timer->index == -1 ) return(0);

    pth_mutex_acquire(&timerlist_lock, 0, NULL);

    if ( (i = timer->index) != -1 )
    {
        bt = timer_heap[heap_limit--];
        if ( heap_limit >= i )
        {
            (((i > 1) && (bt->expires < timer_heap[i>>1]->expires)) ? 
             float_timer : sink_timer) (bt, i);
        }
        timer->index = -1;
    }

    pth_mutex_release(&timerlist_lock);

    return(1);
}

/*
 * process_tcp_timers: NEW FUNCTION (KAF 16/12/99)
 */
void process_tcp_timers(void)
{
    int now = jiffies;
    struct timer_list *t;

    pth_mutex_acquire(&timerlist_lock, 0, NULL);
    while ( heap_limit && 
            ((t = timer_heap[1])->expires < 
             (now + COARSE_TIMEOUT_PERIOD/(2000/HZ))) )
    {
        void (*fn)(unsigned long) = t->function;
        unsigned long data        = t->data;
        sink_timer(timer_heap[heap_limit--], 1);
        t->index = -1;
        pth_mutex_release(&timerlist_lock);
        fn(data);
        pth_mutex_acquire(&timerlist_lock, 0, NULL);
    }
    pth_mutex_release(&timerlist_lock);
}






/******************************************************************************
 * IOVEC HACKS................
 */

int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len)
{
    while(len>0)
    {
        if(iov->iov_len)
        {
            int copy = min(iov->iov_len, len);
            memcpy(iov->iov_base, kdata, copy);
            kdata+=copy;
            len-=copy;
            iov->iov_len-=copy;
            iov->iov_base+=copy;
        }
        iov++;
    }
    return(0); 
}


/*
 *	For use with ip_build_xmit
 */
int memcpy_fromiovecend(unsigned char *kdata, struct iovec *iov, int offset,
			int len)
{
	int err = -EFAULT;

	/* Skip over the finished iovecs */
	while(offset >= iov->iov_len)
	{
		offset -= iov->iov_len;
		iov++;
	}

	while (len > 0)
	{
		u8 *base = iov->iov_base + offset;
		int copy = min(len, iov->iov_len - offset);

		offset = 0;
		if (copy_from_user(kdata, base, copy))
			goto out;
		len   -= copy;
		kdata += copy;
		iov++;
	}
	err = 0;
out:
	return err;
}

/*
 *	And now for the all-in-one: copy and checksum from a user iovec
 *	directly to a datagram
 *	Calls to csum_partial but the last must be in 32 bit chunks
 *
 *	ip_build_xmit must ensure that when fragmenting only the last
 *	call to this function will be unaligned also.
 */
int csum_partial_copy_fromiovecend(unsigned char *kdata, struct iovec *iov,
				 int offset, unsigned int len, int *csump)
{
	int csum = *csump;
	int partial_cnt = 0, err = 0;

	/* Skip over the finished iovecs */
	while (offset >= iov->iov_len)
	{
		offset -= iov->iov_len;
		iov++;
	}

	while (len > 0)
	{
		u8 *base = iov->iov_base + offset;
		unsigned int copy = min(len, iov->iov_len - offset);

		offset = 0;
		/* There is a remnant from previous iov. */
		if (partial_cnt)
		{
			int par_len = 4 - partial_cnt;

			/* iov component is too short ... */
			if (par_len > copy) {
				if (copy_from_user(kdata, base, copy))
					goto out_fault;
				kdata += copy;
				base  += copy;
				partial_cnt += copy;
				len   -= copy;
				iov++;
				if (len)
					continue;
				*csump = csum_partial(kdata - partial_cnt,
							 partial_cnt, csum);
				goto out;
			}
			if (copy_from_user(kdata, base, par_len))
				goto out_fault;
			csum = csum_partial(kdata - partial_cnt, 4, csum);
			kdata += par_len;
			base  += par_len;
			copy  -= par_len;
			len   -= par_len;
			partial_cnt = 0;
		}

		if (len > copy)
		{
			partial_cnt = copy % 4;
			if (partial_cnt)
			{
				copy -= partial_cnt;
				if (copy_from_user(kdata + copy, base + copy,
				 		partial_cnt))
					goto out_fault;
			}
		}

		if (copy) {
			csum = csum_and_copy_from_user(base, kdata, copy,
							csum, &err);
			if (err)
				goto out;
		}
		len   -= copy + partial_cnt;
		kdata += copy + partial_cnt;
		iov++;
	}
        *csump = csum;
out:
	return err;

out_fault:
	err = -EFAULT;
	goto out;
}



/******************************************************************************
 * CMSG HACKS..............
 */

int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
{
	struct cmsghdr *cm = (struct cmsghdr*)msg->msg_control;
	struct cmsghdr cmhdr;
	int cmlen = CMSG_LEN(len);
	int err;

	if (cm==NULL || msg->msg_controllen < sizeof(*cm)) {
		msg->msg_flags |= MSG_CTRUNC;
		return 0; /* XXX: return error? check spec. */
	}
	if (msg->msg_controllen < cmlen) {
		msg->msg_flags |= MSG_CTRUNC;
		cmlen = msg->msg_controllen;
	}
	cmhdr.cmsg_level = level;
	cmhdr.cmsg_type = type;
	cmhdr.cmsg_len = cmlen;

	err = -EFAULT;
	if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
		goto out; 
	if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
		goto out;
	cmlen = CMSG_SPACE(len);
	msg->msg_control += cmlen;
	msg->msg_controllen -= cmlen;
	err = 0;
out:
	return err;
}





/******************************************************************************
 * ETHERNET HACKS............
 */

/******************************************************************************
 * eth_output:
 *   Main output routine called by the Linux stack. This sets things up
 *   so that external routines can do the real work without knowing the
 *   internals of a load of kernel structures.
 */
int eth_output(struct sk_buff *skb)
{
/* we send an IP pkt on the raw sock, letting the linux kernel put in the mac hdr - P2 */
//    skb->mac.ethernet = (struct ethhdr *)skb_push_h(skb, ETH_HLEN);
//    memcpy(skb->mac.ethernet, skb->dst->hh->hh_data, ETH_HLEN);
//    display_packet(skb);

    return(tx_skb(skb->sk->pcb, skb));
}

// P2
//char* sprint_ip(int ip);

void display_packet(struct sk_buff *skb)
{
struct iphdr *iph;
struct tcphdr *th;

iph = (struct iphdr *)(skb->h_data);
th = (struct tcphdr *)((char *)(skb->h_data) + sizeof(struct iphdr));

printf("pkt contents : \n \
IP : saddr %s daddr %s len %d proto %d \n \
TCP : sport %d dport %d seq %d ack %d syn %d fin %d rst %d \n \
total length: %d\n", 
       sprint_ip(iph->saddr), sprint_ip(iph->daddr), ntohs(iph->tot_len), iph->protocol, 
       ntohs(th->source), ntohs(th->dest), th->seq, th->ack_seq, th->syn, th->fin, th->rst,
       ntohs(skb->nh.iph->tot_len)
);
}




/******************************************************************************
 * DATAGRAM HACKS (used by udp only)............
 */

/*
 * Wait for a packet..
 */ 
static int wait_for_packet(struct sock * sk, int *err)
{
	int error;

	/* Socket errors? */
	error = sock_error(sk);
	if (error)
		goto out;

	if (!skb_queue_empty(&sk->receive_queue))
		goto ready;

	/* Socket shut down? */
	if (sk->shutdown & RCV_SHUTDOWN)
		goto out;

	/* handle signals */
	error = -ERESTARTSYS;
	if (signal_pending(current))
		goto out;

        sock_sleep(sk);

ready:  return 0; 

out:    *err = error;
	return error;
}
            
struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, int *err)
{
	int error;
	struct sk_buff *skb;

	/* Caller is allowed not to check sk->err before skb_recv_datagram() */
	error = sock_error(sk);
	if (error)
		goto no_packet;

	do {
		/* Again only user level code calls this function, so nothing interrupt level
		   will suddenly eat the receive_queue.

		   Look at current nfs client by the way...
		   However, this function was corrent in any case. 8)
		 */
		if (flags & MSG_PEEK)
		{
			unsigned long cpu_flags;

			spin_lock_irqsave(&sk->receive_queue.lock, cpu_flags);
			skb = skb_peek(&sk->receive_queue);
			if(skb!=NULL)
				atomic_inc(&skb->users);
			spin_unlock_irqrestore(&sk->receive_queue.lock, cpu_flags);
		} else
			skb = skb_dequeue(&sk->receive_queue);

		if (skb)
			return skb;

		/* User doesn't want to wait */
		error = -EAGAIN;
		if (noblock)
			goto no_packet;

	} while (wait_for_packet(sk, err) == 0);

	return NULL;

no_packet:
	*err = error;
	return NULL;
}


void skb_free_datagram(struct sock * sk, struct sk_buff *skb)
{
	kfree_skb(skb);
}


/*
 *	Copy a datagram to an iovec.
 *	Note: the iovec is modified during the copy.
 */  
int skb_copy_datagram_iovec(struct sk_buff *skb, int offset, struct iovec *to,
			    int size)
{
	return memcpy_toiovec(to, skb->h.raw + offset, size);
}





/******************************************************************************
 * MEMORY HACKS..................
 */

// #define ITEMS_PER_CACHE_BLOCK 500
#define ITEMS_PER_CACHE_BLOCK 5000

extern kmem_cache_t *__kmem_cache_create(int size)
{
    kmem_cache_t *c;
    free_item_t  *f; 
    int i;

    /*
     * We ensure that each item is padded to a 16-byte boundary.
     */
    size = (size+15)&~15;

    /*
     * The first block includes the cache header.
     */
    c = malloc(((sizeof(kmem_cache_t)+15)&~15) + ITEMS_PER_CACHE_BLOCK*size);
    if ( !c ) return(NULL);

    c->size = size;
    c->fb   = NULL;  // no extra data blocks
    
    /*
     * Since we have ensured each item is a multiple of 16 bytes, and since
     * we ensure the data block starts on a 16-byte boundary (by doing a mask
     * here), we ensure _every_ item is on a 16-byte boundary.
     */
    c->fl = f = (free_item_t *)((((u_int)(c+1))+15)&~15);

    for ( i = 1; i < ITEMS_PER_CACHE_BLOCK; i++ )
    {
        f->nf = (free_item_t *)(((char*)f) + size);
        f     = f->nf;
    }
    f->nf = NULL;

    return(c);
}


extern void *__kmem_cache_alloc(kmem_cache_t *cache)
{
    void *p;


//    printf("HELLLLLLLLLLLLLLLOOOOOOOOOOOOOO\n");
//    fflush(stdout);


/*    if(cache == NULL){
	printf("Starting with null, wtf..\n");
	fflush(stdout);
	return NULL;
    }
*/

    if ( (p = cache->fl) )
    {
//	printf("p:%x %x\n", p,((free_item_t *)p)->nf);
        /* Satisfy request from head of free list */
        cache->fl = cache->fl->nf;
//	printf("\t\tOut of alloc\n");
//	fflush(stdout);
        return(p);
    }
    else 
    {
        /* Need a new data block */
        int i;
        free_item_t *f;

//	printf("\t\t\tCreating memory items:%d\n", ITEMS_PER_CACHE_BLOCK);
//	fflush(stdout);

        if ( !(p = malloc(ITEMS_PER_CACHE_BLOCK*cache->size)) ) return(NULL);
        cache->fl = f  = (free_item_t *)(((char*)p) + cache->size);

//	printf("p:%d cache->fl:%d\n", p, f);
//	fflush(stdout);

        for ( i = 2; i < ITEMS_PER_CACHE_BLOCK; i++ )
        {
            f->nf = (free_item_t *)(((char*)f) + cache->size);
            f     = f->nf;
        }
        f->nf = NULL;
//	printf("\t\tOut of alloc\n");
//	fflush(stdout);
        return(p);
    }
}


extern void __kmem_cache_free(kmem_cache_t *cache, void *p)
{
    /* Add item to head of free list */
    free_item_t *f = p;
    f->nf = cache->fl;
    cache->fl = f;
}


extern void __kmem_cache_destroy(kmem_cache_t *cache)
{
    free(cache);
}
  






/******************************************************************************
 * ROUTE/FIB HACKS.............
 */

struct net_device *dev_list = NULL;

int close(int);
int ioctl(int, int, ...);

/******************************************************************************
 * ip_rt_init:
 *   We get information on every Ethernet interface, and store it away in
 *   fake net_device structures.
 */
struct net_device* get_interface(__u32 ip){
  int i;
  for (i=0;i<ultcp_cnt;i++)
    if (ultcp_interface[i]->ip_addr == ip)
      return ultcp_interface[i];
  
  if (ip==0&&ultcp_cnt>0){
    __u8 choice = ultcp_default;
    ultcp_default = (ultcp_default+1)%ultcp_cnt;
    printf("Setting default interface:");print_ip(ultcp_interface[choice]->ip_addr);printf("\n");
    return ultcp_interface[choice];
  }

  printf("Cant find iface for ip ");
  print_ip(ip);

  return NULL;
}

int get_interface_loc(__u32 ip){
  int i;
  for (i=0;i<ultcp_cnt;i++)
    if (ultcp_interface[i]->ip_addr == ip)
      return i;
  

  return -1;
}

void ip_rt_init(void)
{
    u32 ip_addr;
    int s;
    int len = 100 * sizeof(struct ifreq);
    char *buf, *ptr;
    struct ifreq *ifr, ifrcopy;
    struct ifconf ifc;
    struct net_device *dev;
    
    if ( (s = socket(AF_INET, SOCK_DGRAM, 0)) < 0 ) return;

    /*
     * Get information about every network interface. This convoluted code
     * is required to ensure that we get info on every interface -- 
     * SIOCGIFCONF doesn't tell us 
     */
    for ( ; ; )
    {
        if ( (buf = malloc(len)) == NULL ) goto e1;

        ifc.ifc_len = len;
        ifc.ifc_buf = buf;
        if ( ioctl(s, SIOCGIFCONF, &ifc) < 0 ) goto e2;

        /* If it didn't fill the buffer we gave it, we must have all if's. */
        if ( ifc.ifc_len != len ) break;

        /* Otherwise, try again with a slightly larger buffer. */
        len += 10 * sizeof(struct ifreq);
        free(buf);
    }
    
    ultcp_interface = (struct net_device**)malloc(MAX_INTERFACE * sizeof (struct net_device*));
    ultcp_cnt = 0;
    ultcp_default = 0;
    /*
     * Parse each interface, filtering the ethernet ones and storing away
     * address information.
     */
    for ( ptr = buf; ptr < buf + ifc.ifc_len; ptr += sizeof(struct ifreq) )
    {
        ifr = (struct ifreq *)ptr;

        if ( (strlen(ifr->ifr_name) > MAX_IFNAME_LEN) 
	     ||  strcmp(ifr->ifr_name, "lo")==0 )
        {
            /* Not an ethernet device. */
            continue;
        }

        ifrcopy = *ifr;
        if ((ioctl(s, SIOCGIFADDR, &ifrcopy) < 0) ||
             (ifrcopy.ifr_addr.sa_family != AF_INET))
        {
            /*
             * Not an INET interface, so not much use to us!
             */
            continue;
        }
        ip_addr = ((struct sockaddr_in *)&(ifrcopy.ifr_addr))->sin_addr.s_addr;

        if ( (ioctl(s, SIOCGIFFLAGS, &ifrcopy) < 0) ||
             ((ifrcopy.ifr_flags & IFF_UP) == 0) )
        {
            /* Interface not currently available. */
            continue;
        }

        /* Okay, it's a valid Ethernet interface, so get the required info. */

        if ( (dev = malloc(sizeof(struct net_device))) == NULL ) continue;
        memset(dev, 0, sizeof( struct net_device));
        strcpy(dev->name, ifrcopy.ifr_name);
        dev->next = dev_list;
        dev_list = dev;

        if ( ioctl(s, SIOCGIFINDEX, &ifrcopy) < 0 ) continue;
        dev->ifindex = ifrcopy.ifr_ifindex;
/* 
    we consider eth0 as the interface we talk on. 
    store away its id for use in various places 
    in the tcp/ip code - P2
*/

	//        if (strncmp(ifr->ifr_name, "eth0", 4) == 0) {

	//	printf("\n!!!! ifr->ifr_name is %s, eth is %s\n", ifr->ifr_name,eth); 

	//if (strncmp(ifr->ifr_name, eth, 4) == 0) {	// trying to make eth0 as a command-line parameter... 
	// printf("ULTCP Interface:");
	  //ultcp_interface = dev;
	  //  printf("\nULTCP interface index is %d\n",ultcp_interface->ifindex);
	  //  printf("\nname: %s\n", ultcp_interface->name); 
	  //  printf("\nbroadcast: %s\n", ultcp_interface->broadcast); 
	  //  printf("\ndev_addr: %s with length %d\n", ultcp_interface->dev_addr, strlen(ultcp_interface->dev_addr)); 
	  //}

	//	printf("\n\n"); 

	//	printf("\ndev name is %s\n", dev->name); 
	
	ultcp_interface[ultcp_cnt] = dev;
	ultcp_cnt ++;

        if ( ioctl(s, SIOCGIFHWADDR, &ifrcopy) < 0 ) continue;
        memcpy(dev->dev_addr, ifrcopy.ifr_hwaddr.sa_data, ETH_ALEN);
        dev->addr_len = ETH_ALEN;

        if ( ioctl(s, SIOCGIFMTU, &ifrcopy) < 0 ) continue;
        dev->mtu = ifrcopy.ifr_mtu;

        dev->hard_header_len = ETH_HLEN;
        dev->ip_addr = ip_addr; /* network byte ordering */

	//printf("\n Setting device %s ip_addr as:",ifr->ifr_name);
	//print_ip(dev->ip_addr); 
	//printf("\n");

	//printf("\n Setting device addr as %s with length %d\n",dev->dev_addr,strlen(dev->dev_addr));
	//	printf("\nhwaddr %s with length %d\n", ifrcopy.ifr_hwaddr.sa_data, strlen(ifrcopy.ifr_hwaddr.sa_data)); 
	//	printf("\naddr %s with length %d\n", ifrcopy.ifr_addr.sa_data, strlen(ifrcopy.ifr_addr.sa_data)); 
	//	printf("\ndstaddr %s with length %d\n", ifrcopy.ifr_dstaddr.sa_data, strlen(ifrcopy.ifr_addr.sa_data)); 
	//	printf("\nbroadaddr %s with length %d\n", ifrcopy.ifr_broadaddr.sa_data, strlen(ifrcopy.ifr_broadaddr.sa_data)); 
    }

e2: free(buf);
e1: close(s);
}


/******************************************************************************
 * strtoip:
 *   Converts an IP address in dotted decimal format to a number in 
 *   network byte order. Places the final string pointer in <rs>.
 */

static inline u32 strtoip(char *s, char **rs)
{
    u32 ip;
    ip  = strtoul(s,   &s, 10);
    ip |= strtoul(s+1, &s, 10) <<  8;
    ip |= strtoul(s+1, &s, 10) << 16;
    ip |= strtoul(s+1, &s, 10) << 24;
    *rs = s;
    return(ip);
}


/******************************************************************************
 * create_new_route:
 *  Creates a new routing table entry with the given parameters.
 *  Emphasis is on quick and dirty here! :-) 
 */
static struct rtable *create_new_route(u32 remote, struct net_device *dev)
{        
    struct rtable *rt;
    struct eth_ipv4_route_req_rsp req_rsp;
    extern int tcp_socket;

    if ( (rt = malloc(sizeof(struct rtable))) == NULL ) goto e0;
    memset(rt, 0, sizeof(struct rtable));

    rt->rt_dst = remote;
    rt->rt_src = dev->ip_addr;
    rt->rt_iif = dev->ifindex;
    
    rt->u.dst.dev    = dev;
    rt->u.dst.output = eth_output;

    if ( (rt->u.dst.hh = malloc(sizeof(struct hh_cache))) == NULL ) goto e1;
    memset(rt->u.dst.hh, 0, sizeof(struct hh_cache));

    /* Fill in hardware header information, except for MAC dest address. */
    rt->u.dst.hh->hh_len  = ETH_HLEN;
    rt->u.dst.hh->hh_type = ETH_P_IP;
    memcpy((char*)(rt->u.dst.hh->hh_data)+6, dev->dev_addr, ETH_ALEN);
    *((u_short *)&(rt->u.dst.hh->hh_data[3])) = htons(ETH_P_IP);

    if ( (rt->u.dst.ops = malloc(sizeof(struct dst_ops))) == NULL ) goto e2;
    memset(rt->u.dst.ops, 0, sizeof(struct dst_ops));    

    /*
     * Now get information about the gateway and path MTU. To do this
     * we make use of the TCP control socket. We get one of these by
     * creating an AF_USER socket of type SOCK_RAW: this will act precisely
     * as an INET raw socket, allowing thinsg like RSTs to be sent, but
     * will use the AF_USER ioctl() routine.
     */
    req_rsp.saddr   = dev->ip_addr;
    req_rsp.daddr   = remote;
    req_rsp.ifindex = dev->ifindex;
    //if ( ioctl(tcp_socket, IOCTL_GET_ROUTE_INFO, &req_rsp) ) goto e3;
    //rt->rt_gateway = req_rsp.gateway; - not needed P2 
    rt->rt_gateway = 0;
    memcpy(rt->u.dst.hh->hh_data, req_rsp.mac_addr, ETH_ALEN);
    //rt->u.dst.pmtu = rt->u.dst.advmss = req_rsp.pmtu; - not needed P2
    rt->u.dst.pmtu = rt->u.dst.advmss = 1460;

    rt->next = dev->rt_list;
    dev->rt_list = rt;

//    printf("\tIn create new route: daddr: [%u]\n", rt->rt_dst);

    return(rt);

 e3:free(rt->u.dst.ops);
 e2:free(rt->u.dst.hh);
 e1:free(rt);
 e0:return(NULL);

}


/******************************************************************************
 * ip_route_find:
 *   Looks up a routing entry for the given (src,dst) IP address pair for
 *   the given output interface <oif>.
 */
int ip_route_find(struct rtable **rp, u32 dst, u32 src, int oif)
{
    struct net_device *dev;
    struct rtable *rt;
//printf("route_find %x,%d, %x->%x\n",dev_list,oif,src,dst);
//fflush(stdout);
    for ( dev = dev_list; dev != NULL; dev = dev->next )
    {
//	printf("%s %d %x\n",dev->name,dev->ifindex,dev->ip_addr);
//	fflush(stdout);
        if ( (dev->ifindex == oif) || (dev->ip_addr == src) ) break;
    }

//printf("route_find %x\n",dev);
//fflush(stdout);

    /* Perform sanity checks... */
    if ( dev == NULL ) return(-ENODEV);
    if ( (oif && (dev->ifindex != oif)) || (src && (dev->ip_addr != src)) )
    {
        panic("");
    }

//printf("route_find 2\n");
//fflush(stdout);

    /* Scan routing entries... */
    for ( rt = dev->rt_list; rt != NULL; rt = rt->next )
    {
        if ( rt->rt_dst == dst ) break;
    }

//printf("route_find 3\n");
//fflush(stdout);

    if ( (*rp = rt) == NULL )
    {
        if ( (*rp = create_new_route(dst, dev)) == NULL )return(-EHOSTUNREACH);
    }

//printf("route_find 4\n");
//fflush(stdout);

    return(0);
}


/******************************************************************************
 * ip_route_input:
 *   Looks up a dst_entry for the given (src,dst) IP address pair for
 *   the given input device <devin>.
 */
int ip_route_input(struct sk_buff *skb, 
                   u32 dst, 
                   u32 src, 
                   u8 tos, 
                   struct net_device *devin)
{
    struct net_device *dev = devin;
    struct rtable *rt;

    /*
     * KAF (12/1/00): this is by far the most common case. We can do this
     * because the NIC has already demuxed to a socket.
     */
    //if ( (skb->dst = skb->sk->dst_cache) ) return(0); - we demux when standard tcp does -P2

    if ( dev == NULL )
    {
        for ( dev = dev_list; dev != NULL; dev = dev->next )
        {
            if ( dev->ip_addr == dst ) break;
        }
        if ( dev == NULL ) return(-ENODEV);
    }

    /* Scan routing entries... */
    for ( rt = dev->rt_list; rt != NULL; rt = rt->next )
    {
        if ( rt->rt_dst == src ) break;
    }

    if ( rt == NULL )
    {
        if ( (rt = create_new_route(src, dev)) == NULL ) return(-EHOSTUNREACH);
    }

    skb->dst = &(rt->u.dst);
    return(0);
}


unsigned inet_addr_type(u32 addr)
{
    struct net_device *dev;

    if ( ZERONET(addr) || BADCLASS(addr) ) return(RTN_BROADCAST);
    if ( MULTICAST(addr) )                 return(RTN_MULTICAST);

    for ( dev = dev_list; dev != NULL; dev = dev->next )
    {
        if ( dev->ip_addr == addr ) return(RTN_LOCAL);
    }

    return(RTN_UNICAST);
}


#include "multipath.h"
#include "hash.h"

__inline__ void tcp_listen_unlock(void)
{
    extern pth_mutex_t tcp_lhash_notify_lock;
    pth_mutex_acquire(&tcp_lhash_notify_lock, 0, NULL);
    if (atomic_dec_and_test(&tcp_lhash_users)){
        wake_up(&tcp_lhash_wait);
	}
    pth_mutex_release(&tcp_lhash_notify_lock);
}


__inline__ void tcp_syn_build_options(__u32 *ptr, int mss, int ts, int sack,int offer_wscale, int wscale, __u32 tstamp, __u32 ts_recent, struct multipath_pcb* mpcb,int is_first)
{
    /* We always get an MSS option.
     * The option bytes which will be seen in normal data
     * packets should timestamps be used, must be in the MSS
     * advertised.  But we subtract them from tp->mss_cache so
     * that calculations in tcp_sendmsg are simpler etc.
     * So account for this fact here if necessary.  If we
     * don't do this correctly, as a receiver we won't
     * recognize data packets as being full sized when we
     * should, and thus we won't abide by the delayed ACK
     * rules correctly.
     * SACKs don't matter, we never delay an ACK when we
     * have any of those going out.
     */
     
  *ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss);
  if (ts) {
    if(sack)
      *ptr++ = __constant_htonl((TCPOPT_SACK_PERM << 24) | (TCPOLEN_SACK_PERM << 16) |
				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
    else
      *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
    *ptr++ = htonl(tstamp);		/* TSVAL */
    *ptr++ = htonl(ts_recent);	/* TSECR */
  } 
  else if(0)//(sack) 
    *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_SACK_PERM << 8) | TCPOLEN_SACK_PERM);

  if (offer_wscale)
    *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_WINDOW << 16) | (TCPOLEN_WINDOW << 8) | (wscale));
	
  if (mpcb) {
    //printf("Adding data to socket options %x %x %d!\n",mpcb,mpcb->connection_list,mpcb->cnt_local_addr);
    if (is_first){
      int sz = 6 + 4*mpcb->cnt_local_addr;
      // if (sz%4){
      //sz = 4+((sz>>2)<<2);
      //}
      //printf("Opsize is %d\n",sz);

      *ptr++ = htonl((TCPOPT_NOP<<24)|(TCPOPT_NOP<<16)|(TCPOPT_MULTIPATH<<8) | sz);
      *ptr++ = htonl(mpcb->tk_local);
      add_ip_list(ptr,mpcb);
    }
    else{
      *ptr++ = htonl((TCPOPT_NOP<<24)|(TCPOPT_NOP<<16)|(TCPOPT_NEW_SUBFLOW<<8) | 10);
      if (mpcb->tk_remote<0){
	printf("Tk remote uninitialized!\n");exit(1);
      }
      *ptr++ = htonl(mpcb->tk_local);
      *ptr++ = htonl(mpcb->tk_remote);
    }
  }
}
